From 25489c2b7a5a5dbcb53a33da19662a9b9c9810d8 Mon Sep 17 00:00:00 2001 From: dDostalker <146743193+dDostalker@users.noreply.github.com> Date: Tue, 11 Nov 2025 20:22:32 +0800 Subject: [PATCH 0001/1030] Fix adding a Python virtual environment, may duplicate the "open this dictionary" string when modifying content. (#41840) Release Notes: - Fixed an issue when adding a Python virtual environment that may cause duplicate "open this dictionary" entries - Trigger condition: Type `C:\`, delete `\`, then repeatedly add `\`. -Video bug: https://github.com/user-attachments/assets/f68008bb-9138-4451-a842-25b58574493b fix: https://github.com/user-attachments/assets/2913b8c2-adee-4275-af7e-e055fd78915f --- crates/file_finder/src/open_path_prompt.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/file_finder/src/open_path_prompt.rs b/crates/file_finder/src/open_path_prompt.rs index f29c0e6cd20f423dd9073abced0182f272b588c9..53bad3b34880d69aba169df965db71f69b2296eb 100644 --- a/crates/file_finder/src/open_path_prompt.rs +++ b/crates/file_finder/src/open_path_prompt.rs @@ -399,7 +399,12 @@ impl PickerDelegate for OpenPathDelegate { } }) .unwrap_or(false); - if should_prepend_with_current_dir { + + let current_dir_in_new_entries = new_entries + .iter() + .any(|entry| &entry.path.string == current_dir); + + if should_prepend_with_current_dir && !current_dir_in_new_entries { new_entries.insert( 0, CandidateInfo { From b3dd51560bb00cd90a2a56dd29f79e9c7b3c1cda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Raz=20Guzm=C3=A1n=20Macedo?= Date: Tue, 11 Nov 2025 06:55:02 -0600 Subject: [PATCH 0002/1030] docs: Fix broken links in docs with lychee (#42404) Lychee is a [Rust based](https://lychee.cli.rs) async parallel link checker. I ran it against the codebase to suss out stale links and fixed those up. There's currently 2 remaining cases that I don't know how to resolve: 1. https://flathub.org/apps/dev.zed.Zed - nginx is giving a 502 bad gateway 2. https://github.com/zed-industries/zed/actions/workflows/ci.yml/badge.svg - I don't want to mess with the CI pipeline in this PR. Once again, I'll punt to the Docs Czar to see if this gets incorporated into CI later. --- ## Running `lychee` locally: ``` cargo binstall -y lychee lychee . ``` --- Release Notes: - N/A Signed-off-by: mrg --- docs/src/ai/llm-providers.md | 2 +- docs/src/development/release-notes.md | 2 +- docs/src/uninstall.md | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/src/ai/llm-providers.md b/docs/src/ai/llm-providers.md index b737be53eac87cd630303556c1d0f8fcd8d406a1..3e40d7ae0283b3dbd1c50ba1bef5ae410d969305 100644 --- a/docs/src/ai/llm-providers.md +++ b/docs/src/ai/llm-providers.md @@ -587,7 +587,7 @@ These routing controls let you fine‑tune cost, capability, and reliability tra ### Vercel v0 {#vercel-v0} -[Vercel v0](https://vercel.com/docs/v0/api) is an expert model for generating full-stack apps, with framework-aware completions optimized for modern stacks like Next.js and Vercel. +[Vercel v0](https://v0.app/docs/api/model) is an expert model for generating full-stack apps, with framework-aware completions optimized for modern stacks like Next.js and Vercel. It supports text and image inputs and provides fast streaming responses. The v0 models are [OpenAI-compatible models](/#openai-api-compatible), but Vercel is listed as first-class provider in the panel's settings view. diff --git a/docs/src/development/release-notes.md b/docs/src/development/release-notes.md index 5005fc32d36bafb57754e45423b45fc8b7bf64d9..90e1ad21b102de291f65894748f0abf11519a59f 100644 --- a/docs/src/development/release-notes.md +++ b/docs/src/development/release-notes.md @@ -10,7 +10,7 @@ Release Notes: - N/A _or_ Added/Fixed/Improved ... ``` -On Wednesdays, we run a [`get-preview-channel-changes`](https://github.com/zed-industries/zed/blob/main/script/get-preview-channel-changes) script that scrapes `Release Notes` lines from pull requests landing in preview, as documented in our [Release](https://zed.dev/docs/development/releases) docs. +On Wednesdays, we run a [`get-preview-channel-changes`](https://github.com/zed-industries/zed/blob/main/script/get-preview-channel-changes) script that scrapes `Release Notes` lines from pull requests landing in preview, as documented in our [Release](https://zed.dev/docs/development/release-notes) docs. The script outputs everything below the `Release Notes` line, including additional data such as the pull request author (if not a Zed team member) and a link to the pull request. If you use `N/A`, the script skips your pull request entirely. diff --git a/docs/src/uninstall.md b/docs/src/uninstall.md index f2d7da93e78b71c607e79b0bdd5d017f88d55f4d..c1f71a6609ff1b73e15171802441f9aebc8f09cb 100644 --- a/docs/src/uninstall.md +++ b/docs/src/uninstall.md @@ -110,4 +110,4 @@ If you encounter issues during uninstallation: - **Linux**: If the uninstall script fails, check the error message and consider manual removal of the directories listed above. - **All platforms**: If you want to start fresh while keeping Zed installed, you can delete the configuration directories instead of uninstalling the application entirely. -For additional help, see our [Linux-specific documentation](./linux.md) or visit the [Zed community](https://zed.dev/community). +For additional help, see our [Linux-specific documentation](./linux.md) or visit the [Zed community](https://zed.dev/community-links). From 777b46533f6324fb0ee31150d079830fcfb54b5b Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Tue, 11 Nov 2025 13:55:19 +0100 Subject: [PATCH 0003/1030] auto_update: Ignore dir removal errors on windows (#42435) The auto update helper already removes these when successful, so these will always fail in the common case. Additional replaces a mutable const with a static as otherwise we'll rebuild the job list on every access Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/auto_update/src/auto_update.rs | 17 +++-------------- crates/auto_update_helper/src/updater.rs | 14 +++++--------- 2 files changed, 8 insertions(+), 23 deletions(-) diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index bd44eb714c08f9a5c698e92570a9edb518c5c806..facb55e0df951633b082f23477e35ce2b55f6f84 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -905,26 +905,15 @@ async fn install_release_macos( #[cfg(target_os = "windows")] async fn cleanup_windows() -> Result<()> { - use util::ResultExt; - let parent = std::env::current_exe()? .parent() .context("No parent dir for Zed.exe")? .to_owned(); // keep in sync with crates/auto_update_helper/src/updater.rs - smol::fs::remove_dir(parent.join("updates")) - .await - .context("failed to remove updates dir") - .log_err(); - smol::fs::remove_dir(parent.join("install")) - .await - .context("failed to remove install dir") - .log_err(); - smol::fs::remove_dir(parent.join("old")) - .await - .context("failed to remove old version dir") - .log_err(); + _ = smol::fs::remove_dir(parent.join("updates")).await; + _ = smol::fs::remove_dir(parent.join("install")).await; + _ = smol::fs::remove_dir(parent.join("old")).await; Ok(()) } diff --git a/crates/auto_update_helper/src/updater.rs b/crates/auto_update_helper/src/updater.rs index f146583d3bc69b167b61339278a475827bf28d0b..076e11fb4eef1e5c53e2bdc290be7117330c3e61 100644 --- a/crates/auto_update_helper/src/updater.rs +++ b/crates/auto_update_helper/src/updater.rs @@ -1,6 +1,6 @@ use std::{ - cell::LazyCell, path::Path, + sync::LazyLock, time::{Duration, Instant}, }; @@ -13,8 +13,8 @@ use windows::Win32::{ use crate::windows_impl::WM_JOB_UPDATED; pub(crate) struct Job { - pub apply: Box Result<()>>, - pub rollback: Box Result<()>>, + pub apply: Box Result<()> + Send + Sync>, + pub rollback: Box Result<()> + Send + Sync>, } impl Job { @@ -154,10 +154,8 @@ impl Job { } } -// app is single threaded #[cfg(not(test))] -#[allow(clippy::declare_interior_mutable_const)] -pub(crate) const JOBS: LazyCell<[Job; 22]> = LazyCell::new(|| { +pub(crate) static JOBS: LazyLock<[Job; 22]> = LazyLock::new(|| { fn p(value: &str) -> &Path { Path::new(value) } @@ -206,10 +204,8 @@ pub(crate) const JOBS: LazyCell<[Job; 22]> = LazyCell::new(|| { ] }); -// app is single threaded #[cfg(test)] -#[allow(clippy::declare_interior_mutable_const)] -pub(crate) const JOBS: LazyCell<[Job; 9]> = LazyCell::new(|| { +pub(crate) static JOBS: LazyLock<[Job; 9]> = LazyLock::new(|| { fn p(value: &str) -> &Path { Path::new(value) } From f2ad0d716f132e6e05545f3dd4a73f8765dc0fb3 Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Tue, 11 Nov 2025 09:56:20 -0300 Subject: [PATCH 0004/1030] zeta cli: Print log paths when running predict (#42396) Release Notes: - N/A Co-authored-by: Michael Sloan Co-authored-by: Ben Kunkle --- crates/zeta_cli/src/paths.rs | 8 ++++++++ crates/zeta_cli/src/predict.rs | 22 +++++++++++++++++----- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/crates/zeta_cli/src/paths.rs b/crates/zeta_cli/src/paths.rs index 144bf6f5dd97c518d965d7bd23da83ce7f11f66f..fc7f8b3afc3dbcd724649749a58b76dbab275750 100644 --- a/crates/zeta_cli/src/paths.rs +++ b/crates/zeta_cli/src/paths.rs @@ -6,3 +6,11 @@ pub static CACHE_DIR: LazyLock = pub static REPOS_DIR: LazyLock = LazyLock::new(|| TARGET_DIR.join("zeta-repos")); pub static WORKTREES_DIR: LazyLock = LazyLock::new(|| TARGET_DIR.join("zeta-worktrees")); pub static LOGS_DIR: LazyLock = LazyLock::new(|| TARGET_DIR.join("zeta-logs")); +pub static LOGS_SEARCH_PROMPT: LazyLock = + LazyLock::new(|| LOGS_DIR.join("search_prompt.md")); +pub static LOGS_SEARCH_QUERIES: LazyLock = + LazyLock::new(|| LOGS_DIR.join("search_queries.json")); +pub static LOGS_PREDICTION_PROMPT: LazyLock = + LazyLock::new(|| LOGS_DIR.join("prediction_prompt.md")); +pub static LOGS_PREDICTION_RESPONSE: LazyLock = + LazyLock::new(|| LOGS_DIR.join("prediction_response.md")); diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index 4efc82fa8a7c5d5cf6773a7f771d12dd89b4e1ed..32f2f564fc53df987579bf2946eb5765519157c6 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -1,7 +1,10 @@ use crate::PromptFormat; use crate::example::{ActualExcerpt, ExpectedExcerpt, NamedExample}; use crate::headless::ZetaCliAppState; -use crate::paths::{CACHE_DIR, LOGS_DIR}; +use crate::paths::{ + CACHE_DIR, LOGS_DIR, LOGS_PREDICTION_PROMPT, LOGS_PREDICTION_RESPONSE, LOGS_SEARCH_PROMPT, + LOGS_SEARCH_QUERIES, +}; use ::serde::Serialize; use anyhow::{Result, anyhow}; use clap::Args; @@ -61,6 +64,15 @@ pub async fn run_zeta2_predict( .await .unwrap(); result.write(args.format, std::io::stdout()).unwrap(); + + println!("## Logs\n"); + println!("Search prompt: {}", LOGS_SEARCH_PROMPT.display()); + println!("Search queries: {}", LOGS_SEARCH_QUERIES.display()); + println!("Prediction prompt: {}", LOGS_PREDICTION_PROMPT.display()); + println!( + "Prediction response: {}", + LOGS_PREDICTION_RESPONSE.display() + ); } thread_local! { @@ -147,12 +159,12 @@ pub async fn zeta2_predict( match event { zeta2::ZetaDebugInfo::ContextRetrievalStarted(info) => { start_time = Some(info.timestamp); - fs::write(LOGS_DIR.join("search_prompt.md"), &info.search_prompt)?; + fs::write(&*LOGS_SEARCH_PROMPT, &info.search_prompt)?; } zeta2::ZetaDebugInfo::SearchQueriesGenerated(info) => { search_queries_generated_at = Some(info.timestamp); fs::write( - LOGS_DIR.join("search_queries.json"), + &*LOGS_SEARCH_QUERIES, serde_json::to_string_pretty(&info.search_queries).unwrap(), )?; } @@ -164,7 +176,7 @@ pub async fn zeta2_predict( let prediction_started_at = Instant::now(); start_time.get_or_insert(prediction_started_at); fs::write( - LOGS_DIR.join("prediction_prompt.md"), + &*LOGS_PREDICTION_PROMPT, &request.local_prompt.unwrap_or_default(), )?; @@ -198,7 +210,7 @@ pub async fn zeta2_predict( let response = request.response_rx.await?.0.map_err(|err| anyhow!(err))?; let response = zeta2::text_from_response(response).unwrap_or_default(); let prediction_finished_at = Instant::now(); - fs::write(LOGS_DIR.join("prediction_response.md"), &response)?; + fs::write(&*LOGS_PREDICTION_RESPONSE, &response)?; let mut result = result.lock().unwrap(); From 58db38722b57f8531fab877be548c44a9a785229 Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Tue, 11 Nov 2025 15:38:28 +0200 Subject: [PATCH 0005/1030] Find proper applicable chunks for visible ranges (#42422) Release Notes: - Fixed inlay hints not being queried for certain long-ranged jumps Co-authored-by: Smit Barmase Co-authored-by: Lukas Wirth --- crates/project/src/lsp_store/inlay_hint_cache.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/project/src/lsp_store/inlay_hint_cache.rs b/crates/project/src/lsp_store/inlay_hint_cache.rs index 51189d8fdae788c7c12546f2c9ac1735930c3095..cca9d66e8c330f1a4c723a84c4fb418b976f7c03 100644 --- a/crates/project/src/lsp_store/inlay_hint_cache.rs +++ b/crates/project/src/lsp_store/inlay_hint_cache.rs @@ -6,6 +6,7 @@ use gpui::{App, Entity, Task}; use language::{Buffer, BufferRow, BufferSnapshot}; use lsp::LanguageServerId; use text::OffsetRangeExt; +use util::RangeExt as _; use crate::{InlayHint, InlayId}; @@ -123,18 +124,17 @@ impl BufferInlayHints { let row_ranges = ranges .iter() .map(|range| range.to_point(&self.snapshot)) - .map(|point_range| point_range.start.row..=point_range.end.row) + // Be lenient and yield multiple chunks if they "touch" the exclusive part of the range. + // This will result in LSP hints [re-]queried for more ranges, but also more hints already visible when scrolling around. + .map(|point_range| point_range.start.row..point_range.end.row + 1) .collect::>(); self.buffer_chunks .iter() - .filter(move |chunk| -> bool { - // Be lenient and yield multiple chunks if they "touch" the exclusive part of the range. - // This will result in LSP hints [re-]queried for more ranges, but also more hints already visible when scrolling around. + .filter(move |chunk| { let chunk_range = chunk.start..=chunk.end; - row_ranges.iter().any(|row_range| { - chunk_range.contains(&row_range.start()) - || chunk_range.contains(&row_range.end()) - }) + row_ranges + .iter() + .any(|row_range| chunk_range.overlaps(&row_range)) }) .copied() } From 9be5e31aca85c9d9965c7c28d3e18241afa4c964 Mon Sep 17 00:00:00 2001 From: Libon Date: Tue, 11 Nov 2025 21:42:00 +0800 Subject: [PATCH 0006/1030] Add clear recent files history command (#42176) ![2025-11-07 181619](https://github.com/user-attachments/assets/a9bef7a6-dc0b-4db2-85e5-2e1df7b21cfa) Release Notes: - Added "workspace: clear navigation history" command --- crates/file_finder/src/file_finder_tests.rs | 96 +++++++++++++++++++++ crates/workspace/src/pane.rs | 19 ++++ crates/workspace/src/workspace.rs | 13 +++ 3 files changed, 128 insertions(+) diff --git a/crates/file_finder/src/file_finder_tests.rs b/crates/file_finder/src/file_finder_tests.rs index 690265562e1c36e685574ec590819d8f513c128a..d6971da15fde8406ac4d00fb613906c91e25d8d4 100644 --- a/crates/file_finder/src/file_finder_tests.rs +++ b/crates/file_finder/src/file_finder_tests.rs @@ -3452,3 +3452,99 @@ async fn test_paths_with_starting_slash(cx: &mut TestAppContext) { assert_eq!(active_editor.read(cx).title(cx), "file1.txt"); }); } + +#[gpui::test] +async fn test_clear_navigation_history(cx: &mut TestAppContext) { + let app_state = init_test(cx); + app_state + .fs + .as_fake() + .insert_tree( + path!("/src"), + json!({ + "test": { + "first.rs": "// First file", + "second.rs": "// Second file", + "third.rs": "// Third file", + } + }), + ) + .await; + + let project = Project::test(app_state.fs.clone(), [path!("/src").as_ref()], cx).await; + let (workspace, cx) = cx.add_window_view(|window, cx| Workspace::test_new(project, window, cx)); + + workspace.update_in(cx, |_workspace, window, cx| window.focused(cx)); + + // Open some files to generate navigation history + open_close_queried_buffer("fir", 1, "first.rs", &workspace, cx).await; + open_close_queried_buffer("sec", 1, "second.rs", &workspace, cx).await; + let history_before_clear = + open_close_queried_buffer("thi", 1, "third.rs", &workspace, cx).await; + + assert_eq!( + history_before_clear.len(), + 2, + "Should have history items before clearing" + ); + + // Verify that file finder shows history items + let picker = open_file_picker(&workspace, cx); + cx.simulate_input("fir"); + picker.update(cx, |finder, _| { + let matches = collect_search_matches(finder); + assert!( + !matches.history.is_empty(), + "File finder should show history items before clearing" + ); + }); + workspace.update_in(cx, |_, window, cx| { + window.dispatch_action(menu::Cancel.boxed_clone(), cx); + }); + + // Verify navigation state before clear + workspace.update(cx, |workspace, cx| { + let pane = workspace.active_pane(); + pane.read(cx).can_navigate_backward() + }); + + // Clear navigation history + cx.dispatch_action(workspace::ClearNavigationHistory); + + // Verify that navigation is disabled immediately after clear + workspace.update(cx, |workspace, cx| { + let pane = workspace.active_pane(); + assert!( + !pane.read(cx).can_navigate_backward(), + "Should not be able to navigate backward after clearing history" + ); + assert!( + !pane.read(cx).can_navigate_forward(), + "Should not be able to navigate forward after clearing history" + ); + }); + + // Verify that file finder no longer shows history items + let picker = open_file_picker(&workspace, cx); + cx.simulate_input("fir"); + picker.update(cx, |finder, _| { + let matches = collect_search_matches(finder); + assert!( + matches.history.is_empty(), + "File finder should not show history items after clearing" + ); + }); + workspace.update_in(cx, |_, window, cx| { + window.dispatch_action(menu::Cancel.boxed_clone(), cx); + }); + + // Verify history is empty by opening a new file + // (this should not show any previous history) + let history_after_clear = + open_close_queried_buffer("sec", 1, "second.rs", &workspace, cx).await; + assert_eq!( + history_after_clear.len(), + 0, + "Should have no history items after clearing" + ); +} diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index 24f4254232b33975d77f227a6fa2af57d49c25fd..d85662733d52390db820957818901fa2e2cfd2a2 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -4041,6 +4041,25 @@ impl NavHistory { self.0.lock().mode = NavigationMode::Normal; } + pub fn clear(&mut self, cx: &mut App) { + let mut state = self.0.lock(); + + if state.backward_stack.is_empty() + && state.forward_stack.is_empty() + && state.closed_stack.is_empty() + && state.paths_by_item.is_empty() + { + return; + } + + state.mode = NavigationMode::Normal; + state.backward_stack.clear(); + state.forward_stack.clear(); + state.closed_stack.clear(); + state.paths_by_item.clear(); + state.did_update(cx); + } + pub fn pop(&mut self, mode: NavigationMode, cx: &mut App) -> Option { let mut state = self.0.lock(); let entry = match mode { diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 7e35510652b1118e9dc8ffa18491d3c2a7904c75..316969812ac34e84f4019a191fda225e255700f0 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -199,6 +199,8 @@ actions!( AddFolderToProject, /// Clears all notifications. ClearAllNotifications, + /// Clears all navigation history, including forward/backward navigation, recently opened files, and recently closed tabs. **This action is irreversible**. + ClearNavigationHistory, /// Closes the active dock. CloseActiveDock, /// Closes all docks. @@ -1917,6 +1919,12 @@ impl Workspace { .collect() } + pub fn clear_navigation_history(&mut self, _window: &mut Window, cx: &mut Context) { + for pane in &self.panes { + pane.update(cx, |pane, cx| pane.nav_history_mut().clear(cx)); + } + } + fn navigate_history( &mut self, pane: WeakEntity, @@ -5858,6 +5866,11 @@ impl Workspace { workspace.clear_all_notifications(cx); }, )) + .on_action(cx.listener( + |workspace: &mut Workspace, _: &ClearNavigationHistory, window, cx| { + workspace.clear_navigation_history(window, cx); + }, + )) .on_action(cx.listener( |workspace: &mut Workspace, _: &SuppressNotification, _, cx| { if let Some((notification_id, _)) = workspace.notifications.pop() { From b8fcd3ea044ebc3780588f37d1ba1ab5133a7134 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Tue, 11 Nov 2025 14:43:06 +0100 Subject: [PATCH 0007/1030] gpui: Fix `RefCell already borrowed` in `WindowsPlatform::run` (#42440) Fixes ZED-1VX Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/gpui/src/platform/windows/events.rs | 14 ++---- crates/gpui/src/platform/windows/platform.rs | 50 +++++++++++--------- crates/gpui/src/platform/windows/window.rs | 10 +++- 3 files changed, 42 insertions(+), 32 deletions(-) diff --git a/crates/gpui/src/platform/windows/events.rs b/crates/gpui/src/platform/windows/events.rs index 4e6df63106f4c650ad3130e39d410670ddc4687d..cc17f19bcfac86a6f8ac31ec1059d76c24e79695 100644 --- a/crates/gpui/src/platform/windows/events.rs +++ b/crates/gpui/src/platform/windows/events.rs @@ -487,14 +487,12 @@ impl WindowsWindowInner { let scale_factor = lock.scale_factor; let wheel_scroll_amount = match modifiers.shift { true => { - self.system_settings - .borrow() + self.system_settings() .mouse_wheel_settings .wheel_scroll_chars } false => { - self.system_settings - .borrow() + self.system_settings() .mouse_wheel_settings .wheel_scroll_lines } @@ -541,8 +539,7 @@ impl WindowsWindowInner { }; let scale_factor = lock.scale_factor; let wheel_scroll_chars = self - .system_settings - .borrow() + .system_settings() .mouse_wheel_settings .wheel_scroll_chars; drop(lock); @@ -677,8 +674,7 @@ impl WindowsWindowInner { // used by Chrome. However, it may result in one row of pixels being obscured // in our client area. But as Chrome says, "there seems to be no better solution." if is_maximized - && let Some(ref taskbar_position) = - self.system_settings.borrow().auto_hide_taskbar_position + && let Some(ref taskbar_position) = self.system_settings().auto_hide_taskbar_position { // For the auto-hide taskbar, adjust in by 1 pixel on taskbar edge, // so the window isn't treated as a "fullscreen app", which would cause @@ -1072,7 +1068,7 @@ impl WindowsWindowInner { lock.border_offset.update(handle).log_err(); // system settings may emit a window message which wants to take the refcell lock, so drop it drop(lock); - self.system_settings.borrow_mut().update(display, wparam.0); + self.system_settings_mut().update(display, wparam.0); } else { self.handle_system_theme_changed(handle, lparam)?; }; diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index b985cc14b01b1171d4013bf5c41a0c5199565503..72f427beb55b18ff5b94a1a90e334e07045b8726 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -342,9 +342,8 @@ impl Platform for WindowsPlatform { } } - if let Some(ref mut callback) = self.inner.state.borrow_mut().callbacks.quit { - callback(); - } + self.inner + .with_callback(|callbacks| &mut callbacks.quit, |callback| callback()); } fn quit(&self) { @@ -578,14 +577,13 @@ impl Platform for WindowsPlatform { fn set_cursor_style(&self, style: CursorStyle) { let hcursor = load_cursor(style); - let mut lock = self.inner.state.borrow_mut(); - if lock.current_cursor.map(|c| c.0) != hcursor.map(|c| c.0) { + if self.inner.state.borrow_mut().current_cursor.map(|c| c.0) != hcursor.map(|c| c.0) { self.post_message( WM_GPUI_CURSOR_STYLE_CHANGED, WPARAM(0), LPARAM(hcursor.map_or(0, |c| c.0 as isize)), ); - lock.current_cursor = hcursor; + self.inner.state.borrow_mut().current_cursor = hcursor; } } @@ -724,6 +722,18 @@ impl WindowsPlatformInner { })) } + /// Calls `project` to project to the corresponding callback field, removes it from callbacks, calls `f` with the callback and then puts the callback back. + fn with_callback( + &self, + project: impl Fn(&mut PlatformCallbacks) -> &mut Option, + f: impl FnOnce(&mut T), + ) { + if let Some(mut callback) = project(&mut self.state.borrow_mut().callbacks).take() { + f(&mut callback); + *project(&mut self.state.borrow_mut().callbacks) = Some(callback) + } + } + fn handle_msg( self: &Rc, handle: HWND, @@ -807,40 +817,36 @@ impl WindowsPlatformInner { } fn handle_dock_action_event(&self, action_idx: usize) -> Option { - let mut lock = self.state.borrow_mut(); - let mut callback = lock.callbacks.app_menu_action.take()?; - let Some(action) = lock + let Some(action) = self + .state + .borrow_mut() .jump_list .dock_menus .get(action_idx) .map(|dock_menu| dock_menu.action.boxed_clone()) else { - lock.callbacks.app_menu_action = Some(callback); log::error!("Dock menu for index {action_idx} not found"); return Some(1); }; - drop(lock); - callback(&*action); - self.state.borrow_mut().callbacks.app_menu_action = Some(callback); + self.with_callback( + |callbacks| &mut callbacks.app_menu_action, + |callback| callback(&*action), + ); Some(0) } fn handle_keyboard_layout_change(&self) -> Option { - let mut callback = self - .state - .borrow_mut() - .callbacks - .keyboard_layout_change - .take()?; - callback(); - self.state.borrow_mut().callbacks.keyboard_layout_change = Some(callback); + self.with_callback( + |callbacks| &mut callbacks.keyboard_layout_change, + |callback| callback(), + ); Some(0) } fn handle_device_lost(&self, lparam: LPARAM) -> Option { - let mut lock = self.state.borrow_mut(); let directx_devices = lparam.0 as *const DirectXDevices; let directx_devices = unsafe { &*directx_devices }; + let mut lock = self.state.borrow_mut(); lock.directx_devices.take(); lock.directx_devices = Some(directx_devices.clone()); diff --git a/crates/gpui/src/platform/windows/window.rs b/crates/gpui/src/platform/windows/window.rs index 0050fa4bc0e96b8702314f33637db67998b5941d..4b89fcffb39d9bfbc0734977cec16a00984f5c9a 100644 --- a/crates/gpui/src/platform/windows/window.rs +++ b/crates/gpui/src/platform/windows/window.rs @@ -63,7 +63,7 @@ pub(crate) struct WindowsWindowInner { hwnd: HWND, drop_target_helper: IDropTargetHelper, pub(crate) state: RefCell, - pub(crate) system_settings: RefCell, + system_settings: RefCell, pub(crate) handle: AnyWindowHandle, pub(crate) hide_title_bar: bool, pub(crate) is_movable: bool, @@ -321,6 +321,14 @@ impl WindowsWindowInner { } Ok(()) } + + pub(crate) fn system_settings(&self) -> std::cell::Ref<'_, WindowsSystemSettings> { + self.system_settings.borrow() + } + + pub(crate) fn system_settings_mut(&self) -> std::cell::RefMut<'_, WindowsSystemSettings> { + self.system_settings.borrow_mut() + } } #[derive(Default)] From dfd7e85d5de70f642ca3cbef7a1a1535fd108362 Mon Sep 17 00:00:00 2001 From: Terra Date: Tue, 11 Nov 2025 22:43:25 +0900 Subject: [PATCH 0008/1030] Replace deprecated json.schemastore.org with www.schemastore.org (#42336) Release Notes: - N/A According to [microsoft/vscode#254689](https://github.com/microsoft/vscode/issues/254689), the json.schemastore.org domain has been deprecated and should now use www.schemastore.org (or schemastore.org) instead. This PR updates all occurrences of the old domain within the Zed codebase, including code, documentation, and configuration files. --- .github/ISSUE_TEMPLATE/config.yml | 2 +- crates/json_schema_store/src/schemas/package.json | 14 +++++++------- crates/json_schema_store/src/schemas/tsconfig.json | 2 +- docs/src/languages/deno.md | 2 +- docs/src/languages/yaml.md | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 3d0b2ce0af79944c9c86dba6187b0fd7d91c5b8c..8602daf90ddd0fe49d9db27c39f3d52d6e7da032 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,4 +1,4 @@ -# yaml-language-server: $schema=https://json.schemastore.org/github-issue-config.json +# yaml-language-server: $schema=https://www.schemastore.org/github-issue-config.json blank_issues_enabled: false contact_links: - name: Feature Request diff --git a/crates/json_schema_store/src/schemas/package.json b/crates/json_schema_store/src/schemas/package.json index a24583fa8848891d661114291951d4df28f463fd..0906dcf36e30dcc9bdb64153a963368d3647a6d9 100644 --- a/crates/json_schema_store/src/schemas/package.json +++ b/crates/json_schema_store/src/schemas/package.json @@ -1030,22 +1030,22 @@ "$ref": "#" }, "eslintConfig": { - "$ref": "https://json.schemastore.org/eslintrc.json" + "$ref": "https://www.schemastore.org/eslintrc.json" }, "prettier": { - "$ref": "https://json.schemastore.org/prettierrc.json" + "$ref": "https://www.schemastore.org/prettierrc.json" }, "stylelint": { - "$ref": "https://json.schemastore.org/stylelintrc.json" + "$ref": "https://www.schemastore.org/stylelintrc.json" }, "ava": { - "$ref": "https://json.schemastore.org/ava.json" + "$ref": "https://www.schemastore.org/ava.json" }, "release": { - "$ref": "https://json.schemastore.org/semantic-release.json" + "$ref": "https://www.schemastore.org/semantic-release.json" }, "jscpd": { - "$ref": "https://json.schemastore.org/jscpd.json" + "$ref": "https://www.schemastore.org/jscpd.json" }, "pnpm": { "description": "Defines pnpm specific configuration.", @@ -1305,5 +1305,5 @@ ] } ], - "$id": "https://json.schemastore.org/package.json" + "$id": "https://www.schemastore.org/package.json" } diff --git a/crates/json_schema_store/src/schemas/tsconfig.json b/crates/json_schema_store/src/schemas/tsconfig.json index 4b9088725401e27dfc24c14d7c58acfae4355631..9484c027df59c2efe0d2c4024046fb6a839e78a9 100644 --- a/crates/json_schema_store/src/schemas/tsconfig.json +++ b/crates/json_schema_store/src/schemas/tsconfig.json @@ -1466,7 +1466,7 @@ } } }, - "id": "https://json.schemastore.org/tsconfig", + "id": "https://www.schemastore.org/tsconfig", "title": "JSON schema for the TypeScript compiler's configuration file", "type": "object" } diff --git a/docs/src/languages/deno.md b/docs/src/languages/deno.md index a4192257765d6aa131232ff8a80a3af452a38d57..0fa645291e4c4788ae126bc3ccb0120f26a78cb0 100644 --- a/docs/src/languages/deno.md +++ b/docs/src/languages/deno.md @@ -78,7 +78,7 @@ To get completions for `deno.json` or `package.json` you can add the following t "fileMatch": [ "package.json" ], - "url": "http://json.schemastore.org/package" + "url": "https://www.schemastore.org/package" } ] } diff --git a/docs/src/languages/yaml.md b/docs/src/languages/yaml.md index 477d197d11fa4f0ad0e62ee25e416eee7c35ee67..33b92df94ec59f6f4cdf9a0afe83c3ad74dc3bda 100644 --- a/docs/src/languages/yaml.md +++ b/docs/src/languages/yaml.md @@ -19,7 +19,7 @@ You can configure various [yaml-language-server settings](https://github.com/red "singleQuote": true }, "schemas": { - "http://json.schemastore.org/composer": ["/*"], + "https://getcomposer.org/schema.json": ["/*"], "../relative/path/schema.json": ["/config*.yaml"] } } @@ -70,7 +70,7 @@ By default yaml-language-server will attempt to determine the correct schema for You can override any auto-detected schema via the `schemas` settings key (demonstrated above) or by providing an [inlined schema](https://github.com/redhat-developer/yaml-language-server#using-inlined-schema) reference via a modeline comment at the top of your yaml file: ```yaml -# yaml-language-server: $schema=https://json.schemastore.org/github-action.json +# yaml-language-server: $schema=https://www.schemastore.org/github-action.json name: Issue Assignment on: issues: From 10d5d78dedce7526739c8d7c488b820bef33c7c6 Mon Sep 17 00:00:00 2001 From: liuyanghejerry Date: Tue, 11 Nov 2025 21:45:03 +0800 Subject: [PATCH 0009/1030] Improve error messages on extension loading (#42266) This pull request improves error message when extension loading goes wrong. Before: ``` 2025-11-08T21:16:02+08:00 ERROR [extension_host::extension_host] failed to load arkts extension.toml Caused by: No such file or directory (os error 2) ``` Now: ``` 2025-11-08T22:57:00+08:00 ERROR [extension_host::extension_host] failed to load arkts extension.toml, "/Users/user_name_placeholder/Library/Application Support/Zed/extensions/installed/arkts/extension.toml" Caused by: No such file or directory (os error 2) ``` Release Notes: - N/A --- crates/extension/src/extension_manifest.rs | 14 ++++++-------- crates/extension_host/src/wasm_host.rs | 6 +++--- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/crates/extension/src/extension_manifest.rs b/crates/extension/src/extension_manifest.rs index 7e074ffcab77ceb2a63fd92448faa2e13f4ec8c4..7a15a3c58b7a907fa56a12633343a48d150b6bcf 100644 --- a/crates/extension/src/extension_manifest.rs +++ b/crates/extension/src/extension_manifest.rs @@ -267,10 +267,9 @@ impl ExtensionManifest { let mut extension_manifest_path = extension_dir.join("extension.json"); if fs.is_file(&extension_manifest_path).await { - let manifest_content = fs - .load(&extension_manifest_path) - .await - .with_context(|| format!("failed to load {extension_name} extension.json"))?; + let manifest_content = fs.load(&extension_manifest_path).await.with_context(|| { + format!("loading {extension_name} extension.json, {extension_manifest_path:?}") + })?; let manifest_json = serde_json::from_str::(&manifest_content) .with_context(|| { format!("invalid extension.json for extension {extension_name}") @@ -279,10 +278,9 @@ impl ExtensionManifest { Ok(manifest_from_old_manifest(manifest_json, extension_name)) } else { extension_manifest_path.set_extension("toml"); - let manifest_content = fs - .load(&extension_manifest_path) - .await - .with_context(|| format!("failed to load {extension_name} extension.toml"))?; + let manifest_content = fs.load(&extension_manifest_path).await.with_context(|| { + format!("loading {extension_name} extension.toml, {extension_manifest_path:?}") + })?; toml::from_str(&manifest_content).map_err(|err| { anyhow!("Invalid extension.toml for extension {extension_name}:\n{err}") }) diff --git a/crates/extension_host/src/wasm_host.rs b/crates/extension_host/src/wasm_host.rs index eb26c44f20519b7cdb3a38859f23ce99365fe505..1e4bed7a50b44c710384f19c901e4e74854df0e2 100644 --- a/crates/extension_host/src/wasm_host.rs +++ b/crates/extension_host/src/wasm_host.rs @@ -763,17 +763,17 @@ impl WasmExtension { .fs .open_sync(&path) .await - .context("failed to open wasm file")?; + .context(format!("opening wasm file, path: {path:?}"))?; let mut wasm_bytes = Vec::new(); wasm_file .read_to_end(&mut wasm_bytes) - .context("failed to read wasm")?; + .context(format!("reading wasm file, path: {path:?}"))?; wasm_host .load_extension(wasm_bytes, manifest, cx) .await - .with_context(|| format!("failed to load wasm extension {}", manifest.id)) + .with_context(|| format!("loading wasm extension: {}", manifest.id)) } pub async fn call(&self, f: Fn) -> Result From 38e2c7aa66a999a3e1064cf12dea03a48f774927 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Tue, 11 Nov 2025 14:56:04 +0100 Subject: [PATCH 0010/1030] editor: Hide file blame on editor cancel (ESC) (#42436) Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/editor/src/editor.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 223dbb776550e949d0ce86dca6f68aff6482433d..f7eb309fd1b67272103133d47303ef7f0b9e5f35 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -4076,17 +4076,24 @@ impl Editor { self.selection_mark_mode = false; self.selection_drag_state = SelectionDragState::None; + if self.dismiss_menus_and_popups(true, window, cx) { + cx.notify(); + return; + } if self.clear_expanded_diff_hunks(cx) { cx.notify(); return; } - if self.dismiss_menus_and_popups(true, window, cx) { + if self.show_git_blame_gutter { + self.show_git_blame_gutter = false; + cx.notify(); return; } if self.mode.is_full() && self.change_selections(Default::default(), window, cx, |s| s.try_cancel()) { + cx.notify(); return; } From a19d11184dcff70e3473ac9d43da721af310583a Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Tue, 11 Nov 2025 16:09:56 +0100 Subject: [PATCH 0011/1030] remote: Add more context to error logging in wsl (#42450) cc https://github.com/zed-industries/zed/issues/40892 Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/remote/src/transport/wsl.rs | 41 ++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/crates/remote/src/transport/wsl.rs b/crates/remote/src/transport/wsl.rs index 702b2dd0601ab94969d1c88de6b6eaae8951c9b2..3e14fcfc8b7b8a7f74b1de2a10107a143461108b 100644 --- a/crates/remote/src/transport/wsl.rs +++ b/crates/remote/src/transport/wsl.rs @@ -19,6 +19,7 @@ use std::{ time::Instant, }; use util::{ + ResultExt as _, paths::{PathStyle, RemotePathBuf}, rel_path::RelPath, shell::ShellKind, @@ -79,20 +80,27 @@ impl WslRemoteConnection { can_exec: true, }; delegate.set_status(Some("Detecting WSL environment"), cx); - this.shell = this.detect_shell().await?; + this.shell = this + .detect_shell() + .await + .context("failed detecting shell")?; this.shell_kind = ShellKind::new(&this.shell, false); - this.can_exec = this.detect_can_exec().await?; - this.platform = this.detect_platform().await?; + this.can_exec = this.detect_can_exec().await; + this.platform = this + .detect_platform() + .await + .context("failed detecting platform")?; this.remote_binary_path = Some( this.ensure_server_binary(&delegate, release_channel, version, commit, cx) - .await?, + .await + .context("failed ensuring server binary")?, ); log::debug!("Detected WSL environment: {this:#?}"); Ok(this) } - async fn detect_can_exec(&self) -> Result { + async fn detect_can_exec(&self) -> bool { let options = &self.connection_options; let program = self.shell_kind.prepend_command_prefix("uname"); let args = &["-m"]; @@ -101,10 +109,13 @@ impl WslRemoteConnection { .await; if !output.is_ok_and(|output| output.status.success()) { - run_wsl_command_impl(options, &program, args, false).await?; - Ok(false) + run_wsl_command_impl(options, &program, args, false) + .await + .context("failed detecting exec status") + .log_err(); + false } else { - Ok(true) + true } } async fn detect_platform(&self) -> Result { @@ -504,7 +515,9 @@ impl RemoteConnection for WslRemoteConnection { /// `wslpath` is a executable available in WSL, it's a linux binary. /// So it doesn't support Windows style paths. async fn sanitize_path(path: &Path) -> Result { - let path = smol::fs::canonicalize(path).await?; + let path = smol::fs::canonicalize(path) + .await + .with_context(|| format!("Failed to canonicalize path {}", path.display()))?; let path_str = path.to_string_lossy(); let sanitized = path_str.strip_prefix(r"\\?\").unwrap_or(&path_str); @@ -526,14 +539,16 @@ async fn run_wsl_command_impl( args: &[&str], exec: bool, ) -> Result { - let output = wsl_command_impl(options, program, args, exec) + let mut command = wsl_command_impl(options, program, args, exec); + let output = command .output() - .await?; + .await + .with_context(|| format!("Failed to run command '{:?}'", command))?; if !output.status.success() { return Err(anyhow!( - "Command '{}' failed: {}", - program, + "Command '{:?}' failed: {}", + command, String::from_utf8_lossy(&output.stderr).trim() )); } From 28d019be2e5ba7208874c5779aefc1d8f6f07ae7 Mon Sep 17 00:00:00 2001 From: tidely <43219534+tidely@users.noreply.github.com> Date: Tue, 11 Nov 2025 17:10:47 +0200 Subject: [PATCH 0012/1030] ollama: Fix tool calling (#42275) Closes #42303 Ollama added tool call identifiers (https://github.com/ollama/ollama/pull/12956) in its latest version [v0.12.10](https://github.com/ollama/ollama/releases/tag/v0.12.10). This broke our json schema and made all tool calls fail. This PR fixes the schema and uses the Ollama provided tool call identifier when available. We remain backwards compatible and still use our own identifier with older versions of Ollama. I added a `TODO` to remove the `Option` around the new field when most users have updated their installations to v0.12.10 or above. Note to reviewer: The fix to this issue should likely get cherry-picked into the next release, since Ollama becomes unusable as an agent without it. Release Notes: - Fixed tool calling when using the latest version of Ollama --- crates/language_models/src/provider/ollama.rs | 47 +++++++-------- crates/ollama/src/ollama.rs | 59 ++++++++++++++++++- 2 files changed, 80 insertions(+), 26 deletions(-) diff --git a/crates/language_models/src/provider/ollama.rs b/crates/language_models/src/provider/ollama.rs index a0aada7d1a7b557e1e5aa07f19dd3e38492fc972..b6870f5f72b08d2ca4decc101deae59b6a56c224 100644 --- a/crates/language_models/src/provider/ollama.rs +++ b/crates/language_models/src/provider/ollama.rs @@ -381,10 +381,13 @@ impl OllamaLanguageModel { thinking = Some(text) } MessageContent::ToolUse(tool_use) => { - tool_calls.push(OllamaToolCall::Function(OllamaFunctionCall { - name: tool_use.name.to_string(), - arguments: tool_use.input, - })); + tool_calls.push(OllamaToolCall { + id: Some(tool_use.id.to_string()), + function: OllamaFunctionCall { + name: tool_use.name.to_string(), + arguments: tool_use.input, + }, + }); } _ => (), } @@ -575,25 +578,23 @@ fn map_to_language_model_completion_events( } if let Some(tool_call) = tool_calls.and_then(|v| v.into_iter().next()) { - match tool_call { - OllamaToolCall::Function(function) => { - let tool_id = format!( - "{}-{}", - &function.name, - TOOL_CALL_COUNTER.fetch_add(1, Ordering::Relaxed) - ); - let event = - LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { - id: LanguageModelToolUseId::from(tool_id), - name: Arc::from(function.name), - raw_input: function.arguments.to_string(), - input: function.arguments, - is_input_complete: true, - }); - events.push(Ok(event)); - state.used_tools = true; - } - } + let OllamaToolCall { id, function } = tool_call; + let id = id.unwrap_or_else(|| { + format!( + "{}-{}", + &function.name, + TOOL_CALL_COUNTER.fetch_add(1, Ordering::Relaxed) + ) + }); + let event = LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { + id: LanguageModelToolUseId::from(id), + name: Arc::from(function.name), + raw_input: function.arguments.to_string(), + input: function.arguments, + is_input_complete: true, + }); + events.push(Ok(event)); + state.used_tools = true; } else if !content.is_empty() { events.push(Ok(LanguageModelCompletionEvent::Text(content))); } diff --git a/crates/ollama/src/ollama.rs b/crates/ollama/src/ollama.rs index 0ed3b6da17d952cc874485337ec380ef3ca990a8..f6614379fa999883405a20d17328c61d7da448f2 100644 --- a/crates/ollama/src/ollama.rs +++ b/crates/ollama/src/ollama.rs @@ -102,9 +102,11 @@ pub enum ChatMessage { } #[derive(Serialize, Deserialize, Debug)] -#[serde(rename_all = "lowercase")] -pub enum OllamaToolCall { - Function(OllamaFunctionCall), +pub struct OllamaToolCall { + // TODO: Remove `Option` after most users have updated to Ollama v0.12.10, + // which was released on the 4th of November 2025 + pub id: Option, + pub function: OllamaFunctionCall, } #[derive(Serialize, Deserialize, Debug)] @@ -444,6 +446,7 @@ mod tests { "content": "", "tool_calls": [ { + "id": "call_llama3.2:3b_145155", "function": { "name": "weather", "arguments": { @@ -479,6 +482,56 @@ mod tests { } } + // Backwards compatibility with Ollama versions prior to v0.12.10 November 2025 + // This test is a copy of `parse_tool_call()` with the `id` field omitted. + #[test] + fn parse_tool_call_pre_0_12_10() { + let response = serde_json::json!({ + "model": "llama3.2:3b", + "created_at": "2025-04-28T20:02:02.140489Z", + "message": { + "role": "assistant", + "content": "", + "tool_calls": [ + { + "function": { + "name": "weather", + "arguments": { + "city": "london", + } + } + } + ] + }, + "done_reason": "stop", + "done": true, + "total_duration": 2758629166u64, + "load_duration": 1770059875, + "prompt_eval_count": 147, + "prompt_eval_duration": 684637583, + "eval_count": 16, + "eval_duration": 302561917, + }); + + let result: ChatResponseDelta = serde_json::from_value(response).unwrap(); + match result.message { + ChatMessage::Assistant { + content, + tool_calls: Some(tool_calls), + images: _, + thinking, + } => { + assert!(content.is_empty()); + assert!(thinking.is_none()); + + // When the `Option` around `id` is removed, this test should complain + // and be subsequently deleted in favor of `parse_tool_call()` + assert!(tool_calls.first().is_some_and(|call| call.id.is_none())) + } + _ => panic!("Deserialized wrong role"), + } + } + #[test] fn parse_show_model() { let response = serde_json::json!({ From ee2e69065714297679d199c25bb25bde558dacdc Mon Sep 17 00:00:00 2001 From: Bennet Bo Fenner Date: Tue, 11 Nov 2025 16:25:27 +0100 Subject: [PATCH 0013/1030] agent_servers: Fix panic when setting default mode (#42452) Closes ZED-35A Release Notes: - Fixed an issue where Zed would panic when trying to set the default mode for ACP agents --- crates/agent_servers/src/custom.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/crates/agent_servers/src/custom.rs b/crates/agent_servers/src/custom.rs index a51ed8a51a24d28aa6f2867797207bb15643a67d..7d36cc758389a828b819a822c91c9bb4b3444985 100644 --- a/crates/agent_servers/src/custom.rs +++ b/crates/agent_servers/src/custom.rs @@ -50,13 +50,14 @@ impl crate::AgentServer for CustomAgentServer { fn set_default_mode(&self, mode_id: Option, fs: Arc, cx: &mut App) { let name = self.name(); update_settings_file(fs, cx, move |settings, _| { - settings + if let Some(settings) = settings .agent_servers .get_or_insert_default() .custom .get_mut(&name) - .unwrap() - .default_mode = mode_id.map(|m| m.to_string()) + { + settings.default_mode = mode_id.map(|m| m.to_string()) + } }); } From 8467a3dbd6aa3af529de021226d9196f2dc621d6 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Tue, 11 Nov 2025 12:47:08 -0300 Subject: [PATCH 0014/1030] agent_ui: Allow to uninstall agent servers from the settings view (#42445) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR also adds items within the "Add Agent" menu to: 1. Add more agent servers from extensions, opening up the extensions page with "Agent Servers" already filtered 2. Go to the agent server + ACP docs to learn more about them I feel like having them there is a nice way to promote this knowledge from within the product and have users learn more about them. Screenshot 2025-11-11 at 10  46 3@2x Release Notes: - agent: Enabled uninstalled agent servers from the agent panel's settings view. --- crates/agent_ui/src/agent_configuration.rs | 201 ++++++++++++++++----- crates/client/src/zed_urls.rs | 8 + crates/project/src/agent_server_store.rs | 12 ++ 3 files changed, 174 insertions(+), 47 deletions(-) diff --git a/crates/agent_ui/src/agent_configuration.rs b/crates/agent_ui/src/agent_configuration.rs index 8ace684234e90c5203528cae360a28b30798bea3..125dc223796f6d9b7e96bee452bee25a2409adb1 100644 --- a/crates/agent_ui/src/agent_configuration.rs +++ b/crates/agent_ui/src/agent_configuration.rs @@ -8,6 +8,7 @@ use std::{ops::Range, sync::Arc}; use agent::ContextServerRegistry; use anyhow::Result; +use client::zed_urls; use cloud_llm_client::{Plan, PlanV1, PlanV2}; use collections::HashMap; use context_server::ContextServerId; @@ -26,18 +27,20 @@ use language_model::{ use language_models::AllLanguageModelSettings; use notifications::status_toast::{StatusToast, ToastIcon}; use project::{ - agent_server_store::{AgentServerStore, CLAUDE_CODE_NAME, CODEX_NAME, GEMINI_NAME}, + agent_server_store::{ + AgentServerStore, CLAUDE_CODE_NAME, CODEX_NAME, ExternalAgentServerName, GEMINI_NAME, + }, context_server_store::{ContextServerConfiguration, ContextServerStatus, ContextServerStore}, }; use settings::{Settings, SettingsStore, update_settings_file}; use ui::{ - Button, ButtonStyle, Chip, CommonAnimationExt, ContextMenu, Disclosure, Divider, DividerColor, - ElevationIndex, IconName, IconPosition, IconSize, Indicator, LabelSize, PopoverMenu, Switch, - SwitchColor, Tooltip, WithScrollbar, prelude::*, + Button, ButtonStyle, Chip, CommonAnimationExt, ContextMenu, ContextMenuEntry, Disclosure, + Divider, DividerColor, ElevationIndex, IconName, IconPosition, IconSize, Indicator, LabelSize, + PopoverMenu, Switch, SwitchColor, Tooltip, WithScrollbar, prelude::*, }; use util::ResultExt as _; use workspace::{Workspace, create_and_open_local_file}; -use zed_actions::ExtensionCategoryFilter; +use zed_actions::{ExtensionCategoryFilter, OpenBrowser}; pub(crate) use configure_context_server_modal::ConfigureContextServerModal; pub(crate) use configure_context_server_tools_modal::ConfigureContextServerToolsModal; @@ -415,6 +418,7 @@ impl AgentConfiguration { cx: &mut Context, ) -> impl IntoElement { let providers = LanguageModelRegistry::read_global(cx).providers(); + let popover_menu = PopoverMenu::new("add-provider-popover") .trigger( Button::new("add-provider", "Add Provider") @@ -425,7 +429,6 @@ impl AgentConfiguration { .icon_color(Color::Muted) .label_size(LabelSize::Small), ) - .anchor(gpui::Corner::TopRight) .menu({ let workspace = self.workspace.clone(); move |window, cx| { @@ -447,6 +450,11 @@ impl AgentConfiguration { }) })) } + }) + .anchor(gpui::Corner::TopRight) + .offset(gpui::Point { + x: px(0.0), + y: px(2.0), }); v_flex() @@ -541,7 +549,6 @@ impl AgentConfiguration { .icon_color(Color::Muted) .label_size(LabelSize::Small), ) - .anchor(gpui::Corner::TopRight) .menu({ move |window, cx| { Some(ContextMenu::build(window, cx, |menu, _window, _cx| { @@ -564,6 +571,11 @@ impl AgentConfiguration { }) })) } + }) + .anchor(gpui::Corner::TopRight) + .offset(gpui::Point { + x: px(0.0), + y: px(2.0), }); v_flex() @@ -943,7 +955,7 @@ impl AgentConfiguration { .cloned() .collect::>(); - let user_defined_agents = user_defined_agents + let user_defined_agents: Vec<_> = user_defined_agents .into_iter() .map(|name| { let icon = if let Some(icon_path) = agent_server_store.agent_icon(&name) { @@ -951,27 +963,93 @@ impl AgentConfiguration { } else { AgentIcon::Name(IconName::Ai) }; - self.render_agent_server(icon, name, true) - .into_any_element() + (name, icon) }) - .collect::>(); + .collect(); - let add_agens_button = Button::new("add-agent", "Add Agent") - .style(ButtonStyle::Outlined) - .icon_position(IconPosition::Start) - .icon(IconName::Plus) - .icon_size(IconSize::Small) - .icon_color(Color::Muted) - .label_size(LabelSize::Small) - .on_click(move |_, window, cx| { - if let Some(workspace) = window.root().flatten() { - let workspace = workspace.downgrade(); - window - .spawn(cx, async |cx| { - open_new_agent_servers_entry_in_settings_editor(workspace, cx).await + let add_agent_popover = PopoverMenu::new("add-agent-server-popover") + .trigger( + Button::new("add-agent", "Add Agent") + .style(ButtonStyle::Outlined) + .icon_position(IconPosition::Start) + .icon(IconName::Plus) + .icon_size(IconSize::Small) + .icon_color(Color::Muted) + .label_size(LabelSize::Small), + ) + .menu({ + move |window, cx| { + Some(ContextMenu::build(window, cx, |menu, _window, _cx| { + menu.entry("Install from Extensions", None, { + |window, cx| { + window.dispatch_action( + zed_actions::Extensions { + category_filter: Some( + ExtensionCategoryFilter::AgentServers, + ), + id: None, + } + .boxed_clone(), + cx, + ) + } }) - .detach_and_log_err(cx); + .entry("Add Custom Agent", None, { + move |window, cx| { + if let Some(workspace) = window.root().flatten() { + let workspace = workspace.downgrade(); + window + .spawn(cx, async |cx| { + open_new_agent_servers_entry_in_settings_editor( + workspace, cx, + ) + .await + }) + .detach_and_log_err(cx); + } + } + }) + .separator() + .header("Learn More") + .item( + ContextMenuEntry::new("Agent Servers Docs") + .icon(IconName::ArrowUpRight) + .icon_color(Color::Muted) + .icon_position(IconPosition::End) + .handler({ + move |window, cx| { + window.dispatch_action( + Box::new(OpenBrowser { + url: zed_urls::agent_server_docs(cx), + }), + cx, + ); + } + }), + ) + .item( + ContextMenuEntry::new("ACP Docs") + .icon(IconName::ArrowUpRight) + .icon_color(Color::Muted) + .icon_position(IconPosition::End) + .handler({ + move |window, cx| { + window.dispatch_action( + Box::new(OpenBrowser { + url: "https://agentclientprotocol.com/".into(), + }), + cx, + ); + } + }), + ) + })) } + }) + .anchor(gpui::Corner::TopRight) + .offset(gpui::Point { + x: px(0.0), + y: px(2.0), }); v_flex() @@ -982,7 +1060,7 @@ impl AgentConfiguration { .child(self.render_section_title( "External Agents", "All agents connected through the Agent Client Protocol.", - add_agens_button.into_any_element(), + add_agent_popover.into_any_element(), )) .child( v_flex() @@ -993,26 +1071,29 @@ impl AgentConfiguration { AgentIcon::Name(IconName::AiClaude), "Claude Code", false, + cx, )) .child(Divider::horizontal().color(DividerColor::BorderFaded)) .child(self.render_agent_server( AgentIcon::Name(IconName::AiOpenAi), "Codex CLI", false, + cx, )) .child(Divider::horizontal().color(DividerColor::BorderFaded)) .child(self.render_agent_server( AgentIcon::Name(IconName::AiGemini), "Gemini CLI", false, + cx, )) .map(|mut parent| { - for agent in user_defined_agents { + for (name, icon) in user_defined_agents { parent = parent .child( Divider::horizontal().color(DividerColor::BorderFaded), ) - .child(agent); + .child(self.render_agent_server(icon, name, true, cx)); } parent }), @@ -1025,6 +1106,7 @@ impl AgentConfiguration { icon: AgentIcon, name: impl Into, external: bool, + cx: &mut Context, ) -> impl IntoElement { let name = name.into(); let icon = match icon { @@ -1039,28 +1121,53 @@ impl AgentConfiguration { let tooltip_id = SharedString::new(format!("agent-source-{}", name)); let tooltip_message = format!("The {} agent was installed from an extension.", name); + let agent_server_name = ExternalAgentServerName(name.clone()); + + let uninstall_btn_id = SharedString::from(format!("uninstall-{}", name)); + let uninstall_button = IconButton::new(uninstall_btn_id, IconName::Trash) + .icon_color(Color::Muted) + .icon_size(IconSize::Small) + .tooltip(Tooltip::text("Uninstall Agent Extension")) + .on_click(cx.listener(move |this, _, _window, cx| { + let agent_name = agent_server_name.clone(); + + if let Some(ext_id) = this.agent_server_store.update(cx, |store, _cx| { + store.get_extension_id_for_agent(&agent_name) + }) { + ExtensionStore::global(cx) + .update(cx, |store, cx| store.uninstall_extension(ext_id, cx)) + .detach_and_log_err(cx); + } + })); + h_flex() - .gap_1p5() - .child(icon) - .child(Label::new(name)) - .when(external, |this| { - this.child( - div() - .id(tooltip_id) - .flex_none() - .tooltip(Tooltip::text(tooltip_message)) - .child( - Icon::new(IconName::ZedSrcExtension) - .size(IconSize::Small) - .color(Color::Muted), - ), - ) - }) + .gap_1() + .justify_between() .child( - Icon::new(IconName::Check) - .color(Color::Success) - .size(IconSize::Small), + h_flex() + .gap_1p5() + .child(icon) + .child(Label::new(name)) + .when(external, |this| { + this.child( + div() + .id(tooltip_id) + .flex_none() + .tooltip(Tooltip::text(tooltip_message)) + .child( + Icon::new(IconName::ZedSrcExtension) + .size(IconSize::Small) + .color(Color::Muted), + ), + ) + }) + .child( + Icon::new(IconName::Check) + .color(Color::Success) + .size(IconSize::Small), + ), ) + .when(external, |this| this.child(uninstall_button)) } } diff --git a/crates/client/src/zed_urls.rs b/crates/client/src/zed_urls.rs index 7193c099473c95794796c2fc4d3eaaf2f06eb1ac..957d6c68f773db025b4ee604666f5b3d8101148b 100644 --- a/crates/client/src/zed_urls.rs +++ b/crates/client/src/zed_urls.rs @@ -51,3 +51,11 @@ pub fn external_agents_docs(cx: &App) -> String { server_url = server_url(cx) ) } + +/// Returns the URL to Zed agent servers documentation. +pub fn agent_server_docs(cx: &App) -> String { + format!( + "{server_url}/docs/extensions/agent-servers", + server_url = server_url(cx) + ) +} diff --git a/crates/project/src/agent_server_store.rs b/crates/project/src/agent_server_store.rs index ef8079bd014ecc7b26102aafc931029f9ab1cafa..d3c078ffa101c8c66d1c5ab75fb8b59d7748127a 100644 --- a/crates/project/src/agent_server_store.rs +++ b/crates/project/src/agent_server_store.rs @@ -759,6 +759,18 @@ impl AgentServerStore { } }) } + + pub fn get_extension_id_for_agent( + &mut self, + name: &ExternalAgentServerName, + ) -> Option> { + self.external_agents.get_mut(name).and_then(|agent| { + agent + .as_any_mut() + .downcast_ref::() + .map(|ext_agent| ext_agent.extension_id.clone()) + }) + } } fn get_or_npm_install_builtin_agent( From 993919d360da1af40af7578b0d099cf52a512f19 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Tue, 11 Nov 2025 12:50:56 -0300 Subject: [PATCH 0015/1030] agent_ui: Add icon button to trigger the @-mention completions menu (#42449) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes https://github.com/zed-industries/zed/issues/37087 This PR adds an icon button to the footer of the message editor enabling to trigger and interact with the @-mention completions menu with the mouse. This is a first step towards making other types of context you can add in Zed's agent panel more discoverable. Next, I want to improve the discoverability of images and selections, given that you wouldn't necessarily know they work in Zed without a clear way to see them. But I think that for now, this is enough to close the issue above, which had lots of productive comments and discussion! Screenshot 2025-11-11 at 10  46 3@2x Release Notes: - agent: Added an icon button in the agent panel that allows to trigger the @-mention menu (for adding context) now also with the mouse. --- assets/icons/at_sign.svg | 4 ++ .../agent_ui/src/acp/completion_provider.rs | 8 ++- crates/agent_ui/src/acp/message_editor.rs | 49 +++++++++++++++++++ crates/agent_ui/src/acp/thread_view.rs | 25 ++++++++++ crates/icons/src/icons.rs | 1 + 5 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 assets/icons/at_sign.svg diff --git a/assets/icons/at_sign.svg b/assets/icons/at_sign.svg new file mode 100644 index 0000000000000000000000000000000000000000..531c10c8dc151fb27f2a53d424ab57acecd7d03c --- /dev/null +++ b/assets/icons/at_sign.svg @@ -0,0 +1,4 @@ + + + + diff --git a/crates/agent_ui/src/acp/completion_provider.rs b/crates/agent_ui/src/acp/completion_provider.rs index 84d75ebe4133b3145b892eec659867b137bce2f0..408dbedcfdd4998ca8d2e094aab4799bad168629 100644 --- a/crates/agent_ui/src/acp/completion_provider.rs +++ b/crates/agent_ui/src/acp/completion_provider.rs @@ -694,14 +694,18 @@ fn build_symbol_label(symbol_name: &str, file_name: &str, line: u32, cx: &App) - } fn build_code_label_for_full_path(file_name: &str, directory: Option<&str>, cx: &App) -> CodeLabel { - let comment_id = cx.theme().syntax().highlight_id("comment").map(HighlightId); + let path = cx + .theme() + .syntax() + .highlight_id("variable") + .map(HighlightId); let mut label = CodeLabelBuilder::default(); label.push_str(file_name, None); label.push_str(" ", None); if let Some(directory) = directory { - label.push_str(directory, comment_id); + label.push_str(directory, path); } label.build() diff --git a/crates/agent_ui/src/acp/message_editor.rs b/crates/agent_ui/src/acp/message_editor.rs index 4f919a6c0425e48575d09380339730d7ddb26172..b7037a6413d93fb4ee538af7062049df9f58e818 100644 --- a/crates/agent_ui/src/acp/message_editor.rs +++ b/crates/agent_ui/src/acp/message_editor.rs @@ -15,6 +15,7 @@ use editor::{ EditorEvent, EditorMode, EditorSnapshot, EditorStyle, ExcerptId, FoldPlaceholder, Inlay, MultiBuffer, ToOffset, actions::Paste, + code_context_menus::CodeContextMenu, display_map::{Crease, CreaseId, FoldId}, scroll::Autoscroll, }; @@ -272,6 +273,15 @@ impl MessageEditor { self.editor.read(cx).is_empty(cx) } + pub fn is_completions_menu_visible(&self, cx: &App) -> bool { + self.editor + .read(cx) + .context_menu() + .borrow() + .as_ref() + .is_some_and(|menu| matches!(menu, CodeContextMenu::Completions(_)) && menu.visible()) + } + pub fn mentions(&self) -> HashSet { self.mention_set .mentions @@ -836,6 +846,45 @@ impl MessageEditor { cx.emit(MessageEditorEvent::Send) } + pub fn trigger_completion_menu(&mut self, window: &mut Window, cx: &mut Context) { + let editor = self.editor.clone(); + + cx.spawn_in(window, async move |_, cx| { + editor + .update_in(cx, |editor, window, cx| { + let menu_is_open = + editor.context_menu().borrow().as_ref().is_some_and(|menu| { + matches!(menu, CodeContextMenu::Completions(_)) && menu.visible() + }); + + let has_at_sign = { + let snapshot = editor.display_snapshot(cx); + let cursor = editor.selections.newest::(&snapshot).head(); + let offset = cursor.to_offset(&snapshot); + if offset > 0 { + snapshot + .buffer_snapshot() + .reversed_chars_at(offset) + .next() + .map(|sign| sign == '@') + .unwrap_or(false) + } else { + false + } + }; + + if menu_is_open && has_at_sign { + return; + } + + editor.insert("@", window, cx); + editor.show_completions(&editor::actions::ShowCompletions, window, cx); + }) + .log_err(); + }) + .detach(); + } + fn chat(&mut self, _: &Chat, _: &mut Window, cx: &mut Context) { self.send(cx); } diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 17daf5a18e97829d5e4d64d30d266b5d5d271e7b..4f3bbe718d3c6265f54f3cc4a949256b81c25572 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -4188,6 +4188,8 @@ impl AcpThreadView { .justify_between() .child( h_flex() + .gap_0p5() + .child(self.render_add_context_button(cx)) .child(self.render_follow_toggle(cx)) .children(self.render_burn_mode_toggle(cx)), ) @@ -4502,6 +4504,29 @@ impl AcpThreadView { })) } + fn render_add_context_button(&self, cx: &mut Context) -> impl IntoElement { + let message_editor = self.message_editor.clone(); + let menu_visible = message_editor.read(cx).is_completions_menu_visible(cx); + + IconButton::new("add-context", IconName::AtSign) + .icon_size(IconSize::Small) + .icon_color(Color::Muted) + .when(!menu_visible, |this| { + this.tooltip(move |_window, cx| { + Tooltip::with_meta("Add Context", None, "Or type @ to include context", cx) + }) + }) + .on_click(cx.listener(move |_this, _, window, cx| { + let message_editor_clone = message_editor.clone(); + + window.defer(cx, move |window, cx| { + message_editor_clone.update(cx, |message_editor, cx| { + message_editor.trigger_completion_menu(window, cx); + }); + }); + })) + } + fn render_markdown(&self, markdown: Entity, style: MarkdownStyle) -> MarkdownElement { let workspace = self.workspace.clone(); MarkdownElement::new(markdown, style).on_url_click(move |text, window, cx| { diff --git a/crates/icons/src/icons.rs b/crates/icons/src/icons.rs index fb45ca1eb5f8334190c11ad811a31128396ba23a..a0865773ac394722c113a43fe323de218b2f145a 100644 --- a/crates/icons/src/icons.rs +++ b/crates/icons/src/icons.rs @@ -35,6 +35,7 @@ pub enum IconName { ArrowUp, ArrowUpRight, Attach, + AtSign, AudioOff, AudioOn, Backspace, From 0268b170964f22bd91bac197bd1b803554fb547c Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Tue, 11 Nov 2025 11:07:57 -0500 Subject: [PATCH 0016/1030] Add more secrets to eval workflows (#42459) Release Notes: - N/A --- .github/workflows/run_agent_evals.yml | 3 +++ crates/eval/src/eval.rs | 2 +- tooling/xtask/src/tasks/workflows/run_agent_evals.rs | 3 +++ tooling/xtask/src/tasks/workflows/vars.rs | 3 +++ 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/run_agent_evals.yml b/.github/workflows/run_agent_evals.yml index 1a875aa2c463d264002f14264993b9c99ae1f49c..034a69c276d7b11325dcc4493b25d3c61d3f6455 100644 --- a/.github/workflows/run_agent_evals.yml +++ b/.github/workflows/run_agent_evals.yml @@ -6,6 +6,9 @@ env: CARGO_INCREMENTAL: '0' RUST_BACKTRACE: '1' ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }} + GOOGLE_CLOUD_PROJECT: ${{ secrets.GOOGLE_CLOUD_PROJECT }} ZED_CLIENT_CHECKSUM_SEED: ${{ secrets.ZED_CLIENT_CHECKSUM_SEED }} ZED_EVAL_TELEMETRY: '1' MODEL_NAME: ${{ inputs.model_name }} diff --git a/crates/eval/src/eval.rs b/crates/eval/src/eval.rs index d04dad8e99961480ce5f08328fa97aeabf5eda10..3166a7321782069153deb74b90e6b5b71fc99e06 100644 --- a/crates/eval/src/eval.rs +++ b/crates/eval/src/eval.rs @@ -463,8 +463,8 @@ pub fn find_model( .ok_or_else(|| { anyhow::anyhow!( "No language model with ID {}/{} was available. Available models: {}", - selected.model.0, selected.provider.0, + selected.model.0, model_registry .available_models(cx) .map(|model| format!("{}/{}", model.provider_id().0, model.id().0)) diff --git a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs index 4601d5a5bf6a60435a87edab9cd6d62b77ef52a8..341ca1d4a517489bc9248a0d52e39f75e8da7074 100644 --- a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs +++ b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs @@ -19,6 +19,9 @@ pub(crate) fn run_agent_evals() -> Workflow { .add_env(("CARGO_INCREMENTAL", 0)) .add_env(("RUST_BACKTRACE", 1)) .add_env(("ANTHROPIC_API_KEY", vars::ANTHROPIC_API_KEY)) + .add_env(("OPENAI_API_KEY", vars::OPENAI_API_KEY)) + .add_env(("GOOGLE_API_KEY", vars::GOOGLE_API_KEY)) + .add_env(("GOOGLE_CLOUD_PROJECT", vars::GOOGLE_CLOUD_PROJECT)) .add_env(("ZED_CLIENT_CHECKSUM_SEED", vars::ZED_CLIENT_CHECKSUM_SEED)) .add_env(("ZED_EVAL_TELEMETRY", 1)) .add_env(("MODEL_NAME", model_name.to_string())) diff --git a/tooling/xtask/src/tasks/workflows/vars.rs b/tooling/xtask/src/tasks/workflows/vars.rs index 51f6383b1eddafd2a723e31ea23247b90b3a955c..58f2ba7b63aabcff80fb2c1073da892da2e7b25f 100644 --- a/tooling/xtask/src/tasks/workflows/vars.rs +++ b/tooling/xtask/src/tasks/workflows/vars.rs @@ -17,6 +17,9 @@ macro_rules! var { } secret!(ANTHROPIC_API_KEY); +secret!(OPENAI_API_KEY); +secret!(GOOGLE_API_KEY); +secret!(GOOGLE_CLOUD_PROJECT); secret!(APPLE_NOTARIZATION_ISSUER_ID); secret!(APPLE_NOTARIZATION_KEY); secret!(APPLE_NOTARIZATION_KEY_ID); From 03acbb7de35afc27ae7eb32b43b1c4121875da8f Mon Sep 17 00:00:00 2001 From: Marshall Bowers Date: Tue, 11 Nov 2025 11:13:59 -0500 Subject: [PATCH 0017/1030] collab: Remove unused embeddings queries and model (#42463) This PR removes the queries and database model for embeddings, as they're no longer used. Release Notes: - N/A --- crates/collab/src/db/queries.rs | 1 - crates/collab/src/db/queries/embeddings.rs | 94 ------------------- crates/collab/src/db/tables.rs | 1 - crates/collab/src/db/tables/embedding.rs | 18 ---- crates/collab/src/db/tests.rs | 3 - crates/collab/src/db/tests/embedding_tests.rs | 87 ----------------- crates/collab/src/main.rs | 4 +- 7 files changed, 1 insertion(+), 207 deletions(-) delete mode 100644 crates/collab/src/db/queries/embeddings.rs delete mode 100644 crates/collab/src/db/tables/embedding.rs delete mode 100644 crates/collab/src/db/tests/embedding_tests.rs diff --git a/crates/collab/src/db/queries.rs b/crates/collab/src/db/queries.rs index 7b457a5da438e0a9ab7c6cd79368b2845e962318..db91021c22b35b7b7159bd5cd54e28f8fa1a14e2 100644 --- a/crates/collab/src/db/queries.rs +++ b/crates/collab/src/db/queries.rs @@ -5,7 +5,6 @@ pub mod buffers; pub mod channels; pub mod contacts; pub mod contributors; -pub mod embeddings; pub mod extensions; pub mod notifications; pub mod projects; diff --git a/crates/collab/src/db/queries/embeddings.rs b/crates/collab/src/db/queries/embeddings.rs deleted file mode 100644 index 6ae8013284f4652d5cb0d4a19214c3a5c1a42df0..0000000000000000000000000000000000000000 --- a/crates/collab/src/db/queries/embeddings.rs +++ /dev/null @@ -1,94 +0,0 @@ -use super::*; -use time::Duration; -use time::OffsetDateTime; - -impl Database { - pub async fn get_embeddings( - &self, - model: &str, - digests: &[Vec], - ) -> Result, Vec>> { - self.transaction(|tx| async move { - let embeddings = { - let mut db_embeddings = embedding::Entity::find() - .filter( - embedding::Column::Model.eq(model).and( - embedding::Column::Digest - .is_in(digests.iter().map(|digest| digest.as_slice())), - ), - ) - .stream(&*tx) - .await?; - - let mut embeddings = HashMap::default(); - while let Some(db_embedding) = db_embeddings.next().await { - let db_embedding = db_embedding?; - embeddings.insert(db_embedding.digest, db_embedding.dimensions); - } - embeddings - }; - - if !embeddings.is_empty() { - let now = OffsetDateTime::now_utc(); - let retrieved_at = PrimitiveDateTime::new(now.date(), now.time()); - - embedding::Entity::update_many() - .filter( - embedding::Column::Digest - .is_in(embeddings.keys().map(|digest| digest.as_slice())), - ) - .col_expr(embedding::Column::RetrievedAt, Expr::value(retrieved_at)) - .exec(&*tx) - .await?; - } - - Ok(embeddings) - }) - .await - } - - pub async fn save_embeddings( - &self, - model: &str, - embeddings: &HashMap, Vec>, - ) -> Result<()> { - self.transaction(|tx| async move { - embedding::Entity::insert_many(embeddings.iter().map(|(digest, dimensions)| { - let now_offset_datetime = OffsetDateTime::now_utc(); - let retrieved_at = - PrimitiveDateTime::new(now_offset_datetime.date(), now_offset_datetime.time()); - - embedding::ActiveModel { - model: ActiveValue::set(model.to_string()), - digest: ActiveValue::set(digest.clone()), - dimensions: ActiveValue::set(dimensions.clone()), - retrieved_at: ActiveValue::set(retrieved_at), - } - })) - .on_conflict( - OnConflict::columns([embedding::Column::Model, embedding::Column::Digest]) - .do_nothing() - .to_owned(), - ) - .exec_without_returning(&*tx) - .await?; - Ok(()) - }) - .await - } - - pub async fn purge_old_embeddings(&self) -> Result<()> { - self.transaction(|tx| async move { - embedding::Entity::delete_many() - .filter( - embedding::Column::RetrievedAt - .lte(OffsetDateTime::now_utc() - Duration::days(60)), - ) - .exec(&*tx) - .await?; - - Ok(()) - }) - .await - } -} diff --git a/crates/collab/src/db/tables.rs b/crates/collab/src/db/tables.rs index e619acaaf2bc237caac67dedcb5c738114d260d5..0220955824af30f489afe32f9695af3dbb52cdc9 100644 --- a/crates/collab/src/db/tables.rs +++ b/crates/collab/src/db/tables.rs @@ -8,7 +8,6 @@ pub mod channel_chat_participant; pub mod channel_member; pub mod contact; pub mod contributor; -pub mod embedding; pub mod extension; pub mod extension_version; pub mod follower; diff --git a/crates/collab/src/db/tables/embedding.rs b/crates/collab/src/db/tables/embedding.rs deleted file mode 100644 index 8743b4b9e65751bf350bff1db532de38ce73f368..0000000000000000000000000000000000000000 --- a/crates/collab/src/db/tables/embedding.rs +++ /dev/null @@ -1,18 +0,0 @@ -use sea_orm::entity::prelude::*; -use time::PrimitiveDateTime; - -#[derive(Clone, Debug, PartialEq, DeriveEntityModel)] -#[sea_orm(table_name = "embeddings")] -pub struct Model { - #[sea_orm(primary_key)] - pub model: String, - #[sea_orm(primary_key)] - pub digest: Vec, - pub dimensions: Vec, - pub retrieved_at: PrimitiveDateTime, -} - -#[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] -pub enum Relation {} - -impl ActiveModelBehavior for ActiveModel {} diff --git a/crates/collab/src/db/tests.rs b/crates/collab/src/db/tests.rs index 67c36576aac0f938bbc040202d7fa83e35af2d3b..7aed2ebc2dd16f31cde4116a70377b40b1cb8b2f 100644 --- a/crates/collab/src/db/tests.rs +++ b/crates/collab/src/db/tests.rs @@ -2,9 +2,6 @@ mod buffer_tests; mod channel_tests; mod contributor_tests; mod db_tests; -// we only run postgres tests on macos right now -#[cfg(target_os = "macos")] -mod embedding_tests; mod extension_tests; use crate::migrations::run_database_migrations; diff --git a/crates/collab/src/db/tests/embedding_tests.rs b/crates/collab/src/db/tests/embedding_tests.rs deleted file mode 100644 index 5d8d69c0304d3a16b55e9d7b1477fe62cc22024a..0000000000000000000000000000000000000000 --- a/crates/collab/src/db/tests/embedding_tests.rs +++ /dev/null @@ -1,87 +0,0 @@ -use super::TestDb; -use crate::db::embedding; -use collections::HashMap; -use sea_orm::{ColumnTrait, EntityTrait, QueryFilter, sea_query::Expr}; -use std::ops::Sub; -use time::{Duration, OffsetDateTime, PrimitiveDateTime}; - -// SQLite does not support array arguments, so we only test this against a real postgres instance -#[gpui::test] -async fn test_get_embeddings_postgres(cx: &mut gpui::TestAppContext) { - let test_db = TestDb::postgres(cx.executor()); - let db = test_db.db(); - - let provider = "test_model"; - let digest1 = vec![1, 2, 3]; - let digest2 = vec![4, 5, 6]; - let embeddings = HashMap::from_iter([ - (digest1.clone(), vec![0.1, 0.2, 0.3]), - (digest2.clone(), vec![0.4, 0.5, 0.6]), - ]); - - // Save embeddings - db.save_embeddings(provider, &embeddings).await.unwrap(); - - // Retrieve embeddings - let retrieved_embeddings = db - .get_embeddings(provider, &[digest1.clone(), digest2.clone()]) - .await - .unwrap(); - assert_eq!(retrieved_embeddings.len(), 2); - assert!(retrieved_embeddings.contains_key(&digest1)); - assert!(retrieved_embeddings.contains_key(&digest2)); - - // Check if the retrieved embeddings are correct - assert_eq!(retrieved_embeddings[&digest1], vec![0.1, 0.2, 0.3]); - assert_eq!(retrieved_embeddings[&digest2], vec![0.4, 0.5, 0.6]); -} - -#[gpui::test] -async fn test_purge_old_embeddings(cx: &mut gpui::TestAppContext) { - let test_db = TestDb::postgres(cx.executor()); - let db = test_db.db(); - - let model = "test_model"; - let digest = vec![7, 8, 9]; - let embeddings = HashMap::from_iter([(digest.clone(), vec![0.7, 0.8, 0.9])]); - - // Save old embeddings - db.save_embeddings(model, &embeddings).await.unwrap(); - - // Reach into the DB and change the retrieved at to be > 60 days - db.transaction(|tx| { - let digest = digest.clone(); - async move { - let sixty_days_ago = OffsetDateTime::now_utc().sub(Duration::days(61)); - let retrieved_at = PrimitiveDateTime::new(sixty_days_ago.date(), sixty_days_ago.time()); - - embedding::Entity::update_many() - .filter( - embedding::Column::Model - .eq(model) - .and(embedding::Column::Digest.eq(digest)), - ) - .col_expr(embedding::Column::RetrievedAt, Expr::value(retrieved_at)) - .exec(&*tx) - .await - .unwrap(); - - Ok(()) - } - }) - .await - .unwrap(); - - // Purge old embeddings - db.purge_old_embeddings().await.unwrap(); - - // Try to retrieve the purged embeddings - let retrieved_embeddings = db - .get_embeddings(model, std::slice::from_ref(&digest)) - .await - .unwrap(); - assert!( - retrieved_embeddings.is_empty(), - "Old embeddings should have been purged" - ); -} diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index 6b94459910647c1e48ee69f2b0dd38afd3723821..08047c56e55c016f3fd2b34d0935fb33a61b5dad 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -13,7 +13,7 @@ use collab::llm::db::LlmDatabase; use collab::migrations::run_database_migrations; use collab::{ AppState, Config, Result, api::fetch_extensions_from_blob_store_periodically, db, env, - executor::Executor, rpc::ResultExt, + executor::Executor, }; use db::Database; use std::{ @@ -95,8 +95,6 @@ async fn main() -> Result<()> { let state = AppState::new(config, Executor::Production).await?; if mode.is_collab() { - state.db.purge_old_embeddings().await.trace_err(); - let epoch = state .db .create_server(&state.config.zed_environment) From 83351283e4f3690d12ca2d9fc005f381bf028704 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Tue, 11 Nov 2025 17:15:12 +0100 Subject: [PATCH 0018/1030] settings: Skip terminal env vars with substitutions in vscode import (#42464) Closes https://github.com/zed-industries/zed/issues/40547 Release Notes: - Fixed vscode import creating faulty terminal env vars in terminal settings --- crates/auto_update/src/auto_update.rs | 4 +--- crates/settings/src/vscode_import.rs | 8 +++++++- crates/terminal/src/terminal.rs | 10 +++++++++- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index facb55e0df951633b082f23477e35ce2b55f6f84..accda1b1ce4b09db0bc4cc0fb5824290725cb8ee 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -350,8 +350,7 @@ impl AutoUpdater { pub fn start_polling(&self, cx: &mut Context) -> Task> { cx.spawn(async move |this, cx| { - #[cfg(target_os = "windows")] - { + if cfg!(target_os = "windows") { use util::ResultExt; cleanup_windows() @@ -903,7 +902,6 @@ async fn install_release_macos( Ok(None) } -#[cfg(target_os = "windows")] async fn cleanup_windows() -> Result<()> { let parent = std::env::current_exe()? .parent() diff --git a/crates/settings/src/vscode_import.rs b/crates/settings/src/vscode_import.rs index 36bd84e1a145a9a64eadbaec9411f904b9a881c9..cbffb33b1795dbf71e48df8089c472ee534306c1 100644 --- a/crates/settings/src/vscode_import.rs +++ b/crates/settings/src/vscode_import.rs @@ -753,7 +753,13 @@ impl VsCodeSettings { let env = self .read_value(&format!("terminal.integrated.env.{platform}")) .and_then(|v| v.as_object()) - .map(|v| v.iter().map(|(k, v)| (k.clone(), v.to_string())).collect()); + .map(|v| { + v.iter() + .map(|(k, v)| (k.clone(), v.to_string())) + // zed does not support substitutions, so this can break env vars + .filter(|(_, v)| !v.contains('$')) + .collect() + }); ProjectTerminalSettingsContent { // TODO: handle arguments diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index c635cb9b417f422ad0ddd91611233cb04a771679..59c71474d47b9c79d33b28bd7cbbc1a187b4ebfd 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -1386,7 +1386,15 @@ impl Terminal { /// (This is a no-op for display-only terminals.) fn write_to_pty(&self, input: impl Into>) { if let TerminalType::Pty { pty_tx, .. } = &self.terminal_type { - pty_tx.notify(input.into()); + let input = input.into(); + if log::log_enabled!(log::Level::Debug) { + if let Ok(str) = str::from_utf8(&input) { + log::debug!("Writing to PTY: {:?}", str); + } else { + log::debug!("Writing to PTY: {:?}", input); + } + } + pty_tx.notify(input); } } From 9e1e732db86076a21465baa9e0ab75a417ed64a0 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Tue, 11 Nov 2025 11:37:20 -0500 Subject: [PATCH 0019/1030] Use longer timeout on evals (#42465) The GPT-5 ones in particular can take a long time! Release Notes: - N/A --------- Co-authored-by: Bennet Bo Fenner --- .github/workflows/run_agent_evals.yml | 2 +- .github/workflows/run_unit_evals.yml | 4 ++-- crates/gpui/src/executor.rs | 6 +++++- script/run-unit-evals | 2 +- tooling/xtask/src/tasks/workflows/run_agent_evals.rs | 3 ++- tooling/xtask/src/tasks/workflows/vars.rs | 2 +- 6 files changed, 12 insertions(+), 7 deletions(-) diff --git a/.github/workflows/run_agent_evals.yml b/.github/workflows/run_agent_evals.yml index 034a69c276d7b11325dcc4493b25d3c61d3f6455..0ee8f3c5150589bc4565cd93326cf437d384c428 100644 --- a/.github/workflows/run_agent_evals.yml +++ b/.github/workflows/run_agent_evals.yml @@ -7,7 +7,7 @@ env: RUST_BACKTRACE: '1' ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }} + GOOGLE_AI_API_KEY: ${{ secrets.GOOGLE_AI_API_KEY }} GOOGLE_CLOUD_PROJECT: ${{ secrets.GOOGLE_CLOUD_PROJECT }} ZED_CLIENT_CHECKSUM_SEED: ${{ secrets.ZED_CLIENT_CHECKSUM_SEED }} ZED_EVAL_TELEMETRY: '1' diff --git a/.github/workflows/run_unit_evals.yml b/.github/workflows/run_unit_evals.yml index a41b4fb6d7058a97dcd5a98894a0d2c4687ceed4..e3f03c2f9ecd7a48423939f315ce41e13b934d7d 100644 --- a/.github/workflows/run_unit_evals.yml +++ b/.github/workflows/run_unit_evals.yml @@ -1,6 +1,6 @@ -# Generated from xtask::workflows::run_agent_evals +# Generated from xtask::workflows::run_unit_evals # Rebuild with `cargo xtask workflows`. -name: run_agent_evals +name: run_unit_evals env: CARGO_TERM_COLOR: always CARGO_INCREMENTAL: '0' diff --git a/crates/gpui/src/executor.rs b/crates/gpui/src/executor.rs index c05cf5745d6e19172191e298fa4f31e76513a00b..86cd7451fb3559ffd7da4001bdf6f6bd121e8b39 100644 --- a/crates/gpui/src/executor.rs +++ b/crates/gpui/src/executor.rs @@ -281,7 +281,11 @@ impl BackgroundExecutor { }); let mut cx = std::task::Context::from_waker(&waker); - let duration = Duration::from_secs(180); + let duration = Duration::from_secs( + option_env!("GPUI_TEST_TIMEOUT") + .and_then(|s| s.parse::().ok()) + .unwrap_or(180), + ); let mut test_should_end_by = Instant::now() + duration; loop { diff --git a/script/run-unit-evals b/script/run-unit-evals index 02481e1ce9dde7d2cbde9603f663093bf7a2ee38..7a72d0b6a64b9ae9f3dcf340c16d7426d88d6a0b 100755 --- a/script/run-unit-evals +++ b/script/run-unit-evals @@ -2,4 +2,4 @@ set -euxo pipefail -cargo nextest run --workspace --no-fail-fast --features unit-eval --no-capture -E 'test(::eval_)' +GPUI_TEST_TIMEOUT=1500 cargo nextest run --workspace --no-fail-fast --features unit-eval --no-capture -E 'test(::eval_)' diff --git a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs index 341ca1d4a517489bc9248a0d52e39f75e8da7074..846001201f62fd65bf9d05af53ace59646ea197c 100644 --- a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs +++ b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs @@ -20,7 +20,7 @@ pub(crate) fn run_agent_evals() -> Workflow { .add_env(("RUST_BACKTRACE", 1)) .add_env(("ANTHROPIC_API_KEY", vars::ANTHROPIC_API_KEY)) .add_env(("OPENAI_API_KEY", vars::OPENAI_API_KEY)) - .add_env(("GOOGLE_API_KEY", vars::GOOGLE_API_KEY)) + .add_env(("GOOGLE_AI_API_KEY", vars::GOOGLE_AI_API_KEY)) .add_env(("GOOGLE_CLOUD_PROJECT", vars::GOOGLE_CLOUD_PROJECT)) .add_env(("ZED_CLIENT_CHECKSUM_SEED", vars::ZED_CLIENT_CHECKSUM_SEED)) .add_env(("ZED_EVAL_TELEMETRY", 1)) @@ -53,6 +53,7 @@ pub(crate) fn run_unit_evals() -> Workflow { let unit_evals = unit_evals(); named::workflow() + .name("run_unit_evals") .on(Event::default() .schedule([ // GitHub might drop jobs at busy times, so we choose a random time in the middle of the night. diff --git a/tooling/xtask/src/tasks/workflows/vars.rs b/tooling/xtask/src/tasks/workflows/vars.rs index 58f2ba7b63aabcff80fb2c1073da892da2e7b25f..6493bd0ab62699454af6c100d526143c54f8774f 100644 --- a/tooling/xtask/src/tasks/workflows/vars.rs +++ b/tooling/xtask/src/tasks/workflows/vars.rs @@ -18,7 +18,7 @@ macro_rules! var { secret!(ANTHROPIC_API_KEY); secret!(OPENAI_API_KEY); -secret!(GOOGLE_API_KEY); +secret!(GOOGLE_AI_API_KEY); secret!(GOOGLE_CLOUD_PROJECT); secret!(APPLE_NOTARIZATION_ISSUER_ID); secret!(APPLE_NOTARIZATION_KEY); From 7e491ac5001c374616b9aa87593ca1aeccac09e9 Mon Sep 17 00:00:00 2001 From: Marshall Bowers Date: Tue, 11 Nov 2025 11:44:04 -0500 Subject: [PATCH 0020/1030] collab: Drop `embeddings` table (#42466) This PR drops the `embeddings` table, as it is no longer used. Release Notes: - N/A --- crates/collab/migrations/20251111161644_drop_embeddings.sql | 1 + 1 file changed, 1 insertion(+) create mode 100644 crates/collab/migrations/20251111161644_drop_embeddings.sql diff --git a/crates/collab/migrations/20251111161644_drop_embeddings.sql b/crates/collab/migrations/20251111161644_drop_embeddings.sql new file mode 100644 index 0000000000000000000000000000000000000000..80f42c7d2c88b258ef8cc63757694a7e229643c7 --- /dev/null +++ b/crates/collab/migrations/20251111161644_drop_embeddings.sql @@ -0,0 +1 @@ +drop table embeddings; From c50f8216138e6069ddb51af07b612e1ec041c522 Mon Sep 17 00:00:00 2001 From: brequet <125278669+brequet@users.noreply.github.com> Date: Tue, 11 Nov 2025 17:58:18 +0100 Subject: [PATCH 0021/1030] docs: Fix typo in `configuring-zed.md` (#42454) Fix a minor typo in the setting key: `auto_install_extension` should be `auto_install_extensions`. Release Notes: - N/A --- docs/src/configuring-zed.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/configuring-zed.md b/docs/src/configuring-zed.md index 145620c3962984407db73bf7ac4c0a3bbfa75324..14f11df256167928931280cb906cd996615b101b 100644 --- a/docs/src/configuring-zed.md +++ b/docs/src/configuring-zed.md @@ -213,7 +213,7 @@ Note: This setting has no effect in Vim mode, as rewrap is already allowed every ## Auto Install extensions - Description: Define extensions to be autoinstalled or never be installed. -- Setting: `auto_install_extension` +- Setting: `auto_install_extensions` - Default: `{ "html": true }` **Options** From 5f4d0dbaab48190ae17a173c614a2b9025bf4ba4 Mon Sep 17 00:00:00 2001 From: feeiyu <158308373+feeiyu@users.noreply.github.com> Date: Wed, 12 Nov 2025 01:20:38 +0800 Subject: [PATCH 0022/1030] Fix circular reference issue around PopoverMenu (#42461) Follow up to https://github.com/zed-industries/zed/pull/42351 Release Notes: - N/A --- .../src/edit_prediction_button.rs | 24 +++++++++++-------- crates/language_tools/src/lsp_button.rs | 9 +++++-- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/crates/edit_prediction_button/src/edit_prediction_button.rs b/crates/edit_prediction_button/src/edit_prediction_button.rs index 6e9000bc62eea94d5c48dca2416781f46428522c..a0bbe18b4bcaaf433b15db333b3e5a7eb8dfb4d0 100644 --- a/crates/edit_prediction_button/src/edit_prediction_button.rs +++ b/crates/edit_prediction_button/src/edit_prediction_button.rs @@ -128,20 +128,21 @@ impl Render for EditPredictionButton { }), ); } - let this = cx.entity(); + let this = cx.weak_entity(); div().child( PopoverMenu::new("copilot") .menu(move |window, cx| { let current_status = Copilot::global(cx)?.read(cx).status(); - Some(match current_status { + match current_status { Status::Authorized => this.update(cx, |this, cx| { this.build_copilot_context_menu(window, cx) }), _ => this.update(cx, |this, cx| { this.build_copilot_start_menu(window, cx) }), - }) + } + .ok() }) .anchor(Corner::BottomRight) .trigger_with_tooltip( @@ -182,7 +183,7 @@ impl Render for EditPredictionButton { let icon = status.to_icon(); let tooltip_text = status.to_tooltip(); let has_menu = status.has_menu(); - let this = cx.entity(); + let this = cx.weak_entity(); let fs = self.fs.clone(); div().child( @@ -209,9 +210,11 @@ impl Render for EditPredictionButton { ) })) } - SupermavenButtonStatus::Ready => Some(this.update(cx, |this, cx| { - this.build_supermaven_context_menu(window, cx) - })), + SupermavenButtonStatus::Ready => this + .update(cx, |this, cx| { + this.build_supermaven_context_menu(window, cx) + }) + .ok(), _ => None, }) .anchor(Corner::BottomRight) @@ -233,15 +236,16 @@ impl Render for EditPredictionButton { let enabled = self.editor_enabled.unwrap_or(true); let has_api_key = CodestralCompletionProvider::has_api_key(cx); let fs = self.fs.clone(); - let this = cx.entity(); + let this = cx.weak_entity(); div().child( PopoverMenu::new("codestral") .menu(move |window, cx| { if has_api_key { - Some(this.update(cx, |this, cx| { + this.update(cx, |this, cx| { this.build_codestral_context_menu(window, cx) - })) + }) + .ok() } else { Some(ContextMenu::build(window, cx, |menu, _, _| { let fs = fs.clone(); diff --git a/crates/language_tools/src/lsp_button.rs b/crates/language_tools/src/lsp_button.rs index 7dc2e93a5c707eaa3829caba6d6d2a04773883b1..ee49114b787e764989453fae1d12f61253eea099 100644 --- a/crates/language_tools/src/lsp_button.rs +++ b/crates/language_tools/src/lsp_button.rs @@ -1053,11 +1053,16 @@ impl Render for LspButton { (None, "All Servers Operational") }; - let lsp_button = cx.entity(); + let lsp_button = cx.weak_entity(); div().child( PopoverMenu::new("lsp-tool") - .menu(move |_, cx| lsp_button.read(cx).lsp_menu.clone()) + .menu(move |_, cx| { + lsp_button + .read_with(cx, |lsp_button, _| lsp_button.lsp_menu.clone()) + .ok() + .flatten() + }) .anchor(Corner::BottomLeft) .with_handle(self.popover_menu_handle.clone()) .trigger_with_tooltip( From 908ef035025335ebc94e04e807ee0a0ce5311ee6 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Tue, 11 Nov 2025 13:45:48 -0500 Subject: [PATCH 0023/1030] Split out cron and non-cron unit evals (#42472) Release Notes: - N/A --------- Co-authored-by: Bennet Bo Fenner --- .github/workflows/run_agent_evals.yml | 5 + .github/workflows/run_cron_unit_evals.yml | 78 ++++++++++++++++ .github/workflows/run_unit_evals.yml | 21 ++++- script/run-unit-evals | 4 + tooling/xtask/src/tasks/workflows.rs | 4 + .../src/tasks/workflows/run_agent_evals.rs | 91 ++++++++++++++----- 6 files changed, 178 insertions(+), 25 deletions(-) create mode 100644 .github/workflows/run_cron_unit_evals.yml diff --git a/.github/workflows/run_agent_evals.yml b/.github/workflows/run_agent_evals.yml index 0ee8f3c5150589bc4565cd93326cf437d384c428..421d5a1c8003eaa42977339b4ab8e5e0df7ee014 100644 --- a/.github/workflows/run_agent_evals.yml +++ b/.github/workflows/run_agent_evals.yml @@ -51,6 +51,11 @@ jobs: - name: run_agent_evals::agent_evals::run_eval run: cargo run --package=eval -- --repetitions=8 --concurrency=1 --model "${MODEL_NAME}" shell: bash -euxo pipefail {0} + env: + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + GOOGLE_AI_API_KEY: ${{ secrets.GOOGLE_AI_API_KEY }} + GOOGLE_CLOUD_PROJECT: ${{ secrets.GOOGLE_CLOUD_PROJECT }} - name: steps::cleanup_cargo_config if: always() run: | diff --git a/.github/workflows/run_cron_unit_evals.yml b/.github/workflows/run_cron_unit_evals.yml new file mode 100644 index 0000000000000000000000000000000000000000..9137d1599c920d5f3c72ba7c884bc76d9aed6f54 --- /dev/null +++ b/.github/workflows/run_cron_unit_evals.yml @@ -0,0 +1,78 @@ +# Generated from xtask::workflows::run_cron_unit_evals +# Rebuild with `cargo xtask workflows`. +name: run_cron_unit_evals +env: + CARGO_TERM_COLOR: always + CARGO_INCREMENTAL: '0' + RUST_BACKTRACE: '1' + ZED_CLIENT_CHECKSUM_SEED: ${{ secrets.ZED_CLIENT_CHECKSUM_SEED }} +on: + schedule: + - cron: 47 1 * * 2 + workflow_dispatch: {} +jobs: + cron_unit_evals: + runs-on: namespace-profile-16x32-ubuntu-2204 + steps: + - name: steps::checkout_repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + clean: false + - name: steps::setup_cargo_config + run: | + mkdir -p ./../.cargo + cp ./.cargo/ci-config.toml ./../.cargo/config.toml + shell: bash -euxo pipefail {0} + - name: steps::cache_rust_dependencies_namespace + uses: namespacelabs/nscloud-cache-action@v1 + with: + cache: rust + - name: steps::setup_linux + run: ./script/linux + shell: bash -euxo pipefail {0} + - name: steps::install_mold + run: ./script/install-mold + shell: bash -euxo pipefail {0} + - name: steps::download_wasi_sdk + run: ./script/download-wasi-sdk + shell: bash -euxo pipefail {0} + - name: steps::cargo_install_nextest + run: cargo install cargo-nextest --locked + shell: bash -euxo pipefail {0} + - name: steps::clear_target_dir_if_large + run: ./script/clear-target-dir-if-larger-than 250 + shell: bash -euxo pipefail {0} + - name: ./script/run-unit-evals + run: ./script/run-unit-evals + shell: bash -euxo pipefail {0} + env: + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + GOOGLE_AI_API_KEY: ${{ secrets.GOOGLE_AI_API_KEY }} + GOOGLE_CLOUD_PROJECT: ${{ secrets.GOOGLE_CLOUD_PROJECT }} + - name: run_agent_evals::unit_evals::send_failure_to_slack + if: ${{ failure() }} + uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 + with: + method: chat.postMessage + token: ${{ secrets.SLACK_APP_ZED_UNIT_EVALS_BOT_TOKEN }} + payload: | + channel: C04UDRNNJFQ + text: "Unit Evals Failed: https://github.com/zed-industries/zed/actions/runs/${{ github.run_id }}" + - name: steps::cleanup_cargo_config + if: always() + run: | + rm -rf ./../.cargo + shell: bash -euxo pipefail {0} + - name: run_agent_evals::cron_unit_evals::send_failure_to_slack + if: ${{ failure() }} + uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 + with: + method: chat.postMessage + token: ${{ secrets.SLACK_APP_ZED_UNIT_EVALS_BOT_TOKEN }} + payload: | + channel: C04UDRNNJFQ + text: "Unit Evals Failed: https://github.com/zed-industries/zed/actions/runs/${{ github.run_id }}" +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }} + cancel-in-progress: true diff --git a/.github/workflows/run_unit_evals.yml b/.github/workflows/run_unit_evals.yml index e3f03c2f9ecd7a48423939f315ce41e13b934d7d..9f2af35dca5429488e169fd1fe6d9ac098a5059a 100644 --- a/.github/workflows/run_unit_evals.yml +++ b/.github/workflows/run_unit_evals.yml @@ -6,12 +6,21 @@ env: CARGO_INCREMENTAL: '0' RUST_BACKTRACE: '1' ZED_CLIENT_CHECKSUM_SEED: ${{ secrets.ZED_CLIENT_CHECKSUM_SEED }} + ZED_EVAL_TELEMETRY: '1' + MODEL_NAME: ${{ inputs.model_name }} on: - schedule: - - cron: 47 1 * * 2 - workflow_dispatch: {} + workflow_dispatch: + inputs: + model_name: + description: model_name + required: true + type: string + commit_sha: + description: commit_sha + required: true + type: string jobs: - unit_evals: + run_unit_evals: runs-on: namespace-profile-16x32-ubuntu-2204 steps: - name: steps::checkout_repo @@ -47,6 +56,10 @@ jobs: shell: bash -euxo pipefail {0} env: ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + GOOGLE_AI_API_KEY: ${{ secrets.GOOGLE_AI_API_KEY }} + GOOGLE_CLOUD_PROJECT: ${{ secrets.GOOGLE_CLOUD_PROJECT }} + UNIT_EVAL_COMMIT: ${{ inputs.commit_sha }} - name: run_agent_evals::unit_evals::send_failure_to_slack if: ${{ failure() }} uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 diff --git a/script/run-unit-evals b/script/run-unit-evals index 7a72d0b6a64b9ae9f3dcf340c16d7426d88d6a0b..c5178add7a1e4c76151b3907771abe81ba46aaaf 100755 --- a/script/run-unit-evals +++ b/script/run-unit-evals @@ -2,4 +2,8 @@ set -euxo pipefail +if [ -n "${UNIT_EVAL_COMMIT:-}" ]; then + git checkout "$UNIT_EVAL_COMMIT" +fi + GPUI_TEST_TIMEOUT=1500 cargo nextest run --workspace --no-fail-fast --features unit-eval --no-capture -E 'test(::eval_)' diff --git a/tooling/xtask/src/tasks/workflows.rs b/tooling/xtask/src/tasks/workflows.rs index bf6a332075c52cd08dcc44d73fc37239bd60a740..374a22f3ea9c65dcfc9743f77448a5c29117cedf 100644 --- a/tooling/xtask/src/tasks/workflows.rs +++ b/tooling/xtask/src/tasks/workflows.rs @@ -33,6 +33,10 @@ pub fn run_workflows(_: GenerateWorkflowArgs) -> Result<()> { ("cherry_pick.yml", cherry_pick::cherry_pick()), ("compare_perf.yml", compare_perf::compare_perf()), ("run_unit_evals.yml", run_agent_evals::run_unit_evals()), + ( + "run_cron_unit_evals.yml", + run_agent_evals::run_cron_unit_evals(), + ), ("run_agent_evals.yml", run_agent_evals::run_agent_evals()), ("after_release.yml", after_release::after_release()), ]; diff --git a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs index 846001201f62fd65bf9d05af53ace59646ea197c..b69216e5a00a61762625e92b2592fd4cbe0cef30 100644 --- a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs +++ b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs @@ -28,6 +28,36 @@ pub(crate) fn run_agent_evals() -> Workflow { .add_job(agent_evals.name, agent_evals.job) } +pub(crate) fn run_unit_evals() -> Workflow { + let model_name = Input::string("model_name", None); + let commit_sha = Input::string("commit_sha", None); + + let unit_evals = named::job(unit_evals(Some(&commit_sha))); + + named::workflow() + .name("run_unit_evals") + .on(Event::default().workflow_dispatch( + WorkflowDispatch::default() + .add_input(model_name.name, model_name.input()) + .add_input(commit_sha.name, commit_sha.input()), + )) + .concurrency(vars::one_workflow_per_non_main_branch()) + .add_env(("CARGO_TERM_COLOR", "always")) + .add_env(("CARGO_INCREMENTAL", 0)) + .add_env(("RUST_BACKTRACE", 1)) + .add_env(("ZED_CLIENT_CHECKSUM_SEED", vars::ZED_CLIENT_CHECKSUM_SEED)) + .add_env(("ZED_EVAL_TELEMETRY", 1)) + .add_env(("MODEL_NAME", model_name.to_string())) + .add_job(unit_evals.name, unit_evals.job) +} + +fn add_api_keys(step: Step) -> Step { + step.add_env(("ANTHROPIC_API_KEY", vars::ANTHROPIC_API_KEY)) + .add_env(("OPENAI_API_KEY", vars::OPENAI_API_KEY)) + .add_env(("GOOGLE_AI_API_KEY", vars::GOOGLE_AI_API_KEY)) + .add_env(("GOOGLE_CLOUD_PROJECT", vars::GOOGLE_CLOUD_PROJECT)) +} + fn agent_evals() -> NamedJob { fn run_eval() -> Step { named::bash( @@ -44,16 +74,16 @@ fn agent_evals() -> NamedJob { .map(steps::install_linux_dependencies) .add_step(setup_cargo_config(Platform::Linux)) .add_step(steps::script("cargo build --package=eval")) - .add_step(run_eval()) + .add_step(add_api_keys(run_eval())) .add_step(steps::cleanup_cargo_config(Platform::Linux)), ) } -pub(crate) fn run_unit_evals() -> Workflow { - let unit_evals = unit_evals(); +pub(crate) fn run_cron_unit_evals() -> Workflow { + let unit_evals = cron_unit_evals(); named::workflow() - .name("run_unit_evals") + .name("run_cron_unit_evals") .on(Event::default() .schedule([ // GitHub might drop jobs at busy times, so we choose a random time in the middle of the night. @@ -68,7 +98,7 @@ pub(crate) fn run_unit_evals() -> Workflow { .add_job(unit_evals.name, unit_evals.job) } -fn unit_evals() -> NamedJob { +fn cron_unit_evals() -> NamedJob { fn send_failure_to_slack() -> Step { named::uses( "slackapi", @@ -84,20 +114,39 @@ fn unit_evals() -> NamedJob { "#})) } - named::job( - Job::default() - .runs_on(runners::LINUX_DEFAULT) - .add_step(steps::checkout_repo()) - .add_step(steps::setup_cargo_config(Platform::Linux)) - .add_step(steps::cache_rust_dependencies_namespace()) - .map(steps::install_linux_dependencies) - .add_step(steps::cargo_install_nextest(Platform::Linux)) - .add_step(steps::clear_target_dir_if_large(Platform::Linux)) - .add_step( - steps::script("./script/run-unit-evals") - .add_env(("ANTHROPIC_API_KEY", vars::ANTHROPIC_API_KEY)), - ) - .add_step(send_failure_to_slack()) - .add_step(steps::cleanup_cargo_config(Platform::Linux)), - ) + named::job(unit_evals(None).add_step(send_failure_to_slack())) +} + +fn unit_evals(commit: Option<&Input>) -> Job { + fn send_failure_to_slack() -> Step { + named::uses( + "slackapi", + "slack-github-action", + "b0fa283ad8fea605de13dc3f449259339835fc52", + ) + .if_condition(Expression::new("${{ failure() }}")) + .add_with(("method", "chat.postMessage")) + .add_with(("token", vars::SLACK_APP_ZED_UNIT_EVALS_BOT_TOKEN)) + .add_with(("payload", indoc::indoc!{r#" + channel: C04UDRNNJFQ + text: "Unit Evals Failed: https://github.com/zed-industries/zed/actions/runs/${{ github.run_id }}" + "#})) + } + + let script_step = add_api_keys(steps::script("./script/run-unit-evals")); + + Job::default() + .runs_on(runners::LINUX_DEFAULT) + .add_step(steps::checkout_repo()) + .add_step(steps::setup_cargo_config(Platform::Linux)) + .add_step(steps::cache_rust_dependencies_namespace()) + .map(steps::install_linux_dependencies) + .add_step(steps::cargo_install_nextest(Platform::Linux)) + .add_step(steps::clear_target_dir_if_large(Platform::Linux)) + .add_step(match commit { + Some(commit) => script_step.add_env(("UNIT_EVAL_COMMIT", commit)), + None => script_step, + }) + .add_step(send_failure_to_slack()) + .add_step(steps::cleanup_cargo_config(Platform::Linux)) } From e01e0b83c4ceeba28fef70385362ebf98aab92ed Mon Sep 17 00:00:00 2001 From: Lay Sheth Date: Wed, 12 Nov 2025 00:21:57 +0530 Subject: [PATCH 0024/1030] Avoid panics in LSP store path handling (#42117) Release Notes: - Fixed incorrect journal paths handling --- crates/journal/src/journal.rs | 73 +++++++++++++++++++++++++++++++-- crates/project/src/lsp_store.rs | 5 +-- docs/src/configuring-zed.md | 3 +- 3 files changed, 74 insertions(+), 7 deletions(-) diff --git a/crates/journal/src/journal.rs b/crates/journal/src/journal.rs index 9e73e0da550e806b4a642942766414a4b28249ae..2e30b91dab833d18f5fc9c35ad7ea4934d197fa8 100644 --- a/crates/journal/src/journal.rs +++ b/crates/journal/src/journal.rs @@ -173,9 +173,15 @@ pub fn new_journal_entry(workspace: &Workspace, window: &mut Window, cx: &mut Ap } fn journal_dir(path: &str) -> Option { - shellexpand::full(path) //TODO handle this better - .ok() - .map(|dir| Path::new(&dir.to_string()).to_path_buf().join("journal")) + let expanded = shellexpand::full(path).ok()?; + let base_path = Path::new(expanded.as_ref()); + let absolute_path = if base_path.is_absolute() { + base_path.to_path_buf() + } else { + log::warn!("Invalid journal path {path:?} (not absolute), falling back to home directory",); + std::env::home_dir()? + }; + Some(absolute_path.join("journal")) } fn heading_entry(now: NaiveTime, hour_format: &HourFormat) -> String { @@ -224,4 +230,65 @@ mod tests { assert_eq!(actual_heading_entry, expected_heading_entry); } } + + mod journal_dir_tests { + use super::super::*; + + #[test] + #[cfg(target_family = "unix")] + fn test_absolute_unix_path() { + let result = journal_dir("/home/user"); + assert!(result.is_some()); + let path = result.unwrap(); + assert!(path.is_absolute()); + assert_eq!(path, PathBuf::from("/home/user/journal")); + } + + #[test] + fn test_tilde_expansion() { + let result = journal_dir("~/documents"); + assert!(result.is_some()); + let path = result.unwrap(); + + assert!(path.is_absolute(), "Tilde should expand to absolute path"); + + if let Some(home) = std::env::home_dir() { + assert_eq!(path, home.join("documents").join("journal")); + } + } + + #[test] + fn test_relative_path_falls_back_to_home() { + for relative_path in ["relative/path", "NONEXT/some/path", "../some/path"] { + let result = journal_dir(relative_path); + assert!(result.is_some(), "Failed for path: {}", relative_path); + let path = result.unwrap(); + + assert!( + path.is_absolute(), + "Path should be absolute for input '{}', got: {:?}", + relative_path, + path + ); + + if let Some(home) = std::env::home_dir() { + assert_eq!( + path, + home.join("journal"), + "Should fall back to home directory for input '{}'", + relative_path + ); + } + } + } + + #[test] + #[cfg(target_os = "windows")] + fn test_absolute_path_windows_style() { + let result = journal_dir("C:\\Users\\user\\Documents"); + assert!(result.is_some()); + let path = result.unwrap(); + assert_eq!(path, PathBuf::from("C:\\Users\\user\\Documents\\journal")); + } + } } diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index 90675e364b4d962b5c67cafb941b2b6cb9e1df9b..540a1a8eb0ac205d5f777e1728bbe7322bbe6187 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -7654,14 +7654,13 @@ impl LspStore { let uri = lsp::Uri::from_file_path(&abs_path) .ok() .with_context(|| format!("Failed to convert path to URI: {}", abs_path.display())) - .unwrap(); + .log_err()?; let next_snapshot = buffer.text_snapshot(); for language_server in language_servers { let language_server = language_server.clone(); let buffer_snapshots = self - .as_local_mut() - .unwrap() + .as_local_mut()? .buffer_snapshots .get_mut(&buffer.remote_id()) .and_then(|m| m.get_mut(&language_server.server_id()))?; diff --git a/docs/src/configuring-zed.md b/docs/src/configuring-zed.md index 14f11df256167928931280cb906cd996615b101b..07d93fd6d167bafeb0a8e4bc72f80f52265edee1 100644 --- a/docs/src/configuring-zed.md +++ b/docs/src/configuring-zed.md @@ -2519,11 +2519,12 @@ Unspecified values have a `false` value, hints won't be toggled if all the modif "path": "~", "hour_format": "hour12" } + ``` ### Path -- Description: The path of the directory where journal entries are stored. +- Description: The path of the directory where journal entries are stored. If an invalid path is specified, the journal will fall back to using `~` (the home directory). - Setting: `path` - Default: `~` From 0d56ed7d910e70f2e255dbbd9d806d418db0c058 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Tue, 11 Nov 2025 15:19:34 -0500 Subject: [PATCH 0025/1030] Only send unit eval failures to Slack for cron job (#42479) Release Notes: - N/A --- .github/workflows/run_cron_unit_evals.yml | 9 --------- .github/workflows/run_unit_evals.yml | 9 --------- .../xtask/src/tasks/workflows/run_agent_evals.rs | 16 ---------------- 3 files changed, 34 deletions(-) diff --git a/.github/workflows/run_cron_unit_evals.yml b/.github/workflows/run_cron_unit_evals.yml index 9137d1599c920d5f3c72ba7c884bc76d9aed6f54..a692a6707e3d223e7cea039419dc933f68e82896 100644 --- a/.github/workflows/run_cron_unit_evals.yml +++ b/.github/workflows/run_cron_unit_evals.yml @@ -50,15 +50,6 @@ jobs: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} GOOGLE_AI_API_KEY: ${{ secrets.GOOGLE_AI_API_KEY }} GOOGLE_CLOUD_PROJECT: ${{ secrets.GOOGLE_CLOUD_PROJECT }} - - name: run_agent_evals::unit_evals::send_failure_to_slack - if: ${{ failure() }} - uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 - with: - method: chat.postMessage - token: ${{ secrets.SLACK_APP_ZED_UNIT_EVALS_BOT_TOKEN }} - payload: | - channel: C04UDRNNJFQ - text: "Unit Evals Failed: https://github.com/zed-industries/zed/actions/runs/${{ github.run_id }}" - name: steps::cleanup_cargo_config if: always() run: | diff --git a/.github/workflows/run_unit_evals.yml b/.github/workflows/run_unit_evals.yml index 9f2af35dca5429488e169fd1fe6d9ac098a5059a..b4da67e25c48909d5f01cdd6e8c2cbab9d0b8c67 100644 --- a/.github/workflows/run_unit_evals.yml +++ b/.github/workflows/run_unit_evals.yml @@ -60,15 +60,6 @@ jobs: GOOGLE_AI_API_KEY: ${{ secrets.GOOGLE_AI_API_KEY }} GOOGLE_CLOUD_PROJECT: ${{ secrets.GOOGLE_CLOUD_PROJECT }} UNIT_EVAL_COMMIT: ${{ inputs.commit_sha }} - - name: run_agent_evals::unit_evals::send_failure_to_slack - if: ${{ failure() }} - uses: slackapi/slack-github-action@b0fa283ad8fea605de13dc3f449259339835fc52 - with: - method: chat.postMessage - token: ${{ secrets.SLACK_APP_ZED_UNIT_EVALS_BOT_TOKEN }} - payload: | - channel: C04UDRNNJFQ - text: "Unit Evals Failed: https://github.com/zed-industries/zed/actions/runs/${{ github.run_id }}" - name: steps::cleanup_cargo_config if: always() run: | diff --git a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs index b69216e5a00a61762625e92b2592fd4cbe0cef30..e3f001f8c2b722584a8cb117ad6980c71df95854 100644 --- a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs +++ b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs @@ -118,21 +118,6 @@ fn cron_unit_evals() -> NamedJob { } fn unit_evals(commit: Option<&Input>) -> Job { - fn send_failure_to_slack() -> Step { - named::uses( - "slackapi", - "slack-github-action", - "b0fa283ad8fea605de13dc3f449259339835fc52", - ) - .if_condition(Expression::new("${{ failure() }}")) - .add_with(("method", "chat.postMessage")) - .add_with(("token", vars::SLACK_APP_ZED_UNIT_EVALS_BOT_TOKEN)) - .add_with(("payload", indoc::indoc!{r#" - channel: C04UDRNNJFQ - text: "Unit Evals Failed: https://github.com/zed-industries/zed/actions/runs/${{ github.run_id }}" - "#})) - } - let script_step = add_api_keys(steps::script("./script/run-unit-evals")); Job::default() @@ -147,6 +132,5 @@ fn unit_evals(commit: Option<&Input>) -> Job { Some(commit) => script_step.add_env(("UNIT_EVAL_COMMIT", commit)), None => script_step, }) - .add_step(send_failure_to_slack()) .add_step(steps::cleanup_cargo_config(Platform::Linux)) } From b4b7a23c39b3339d09c307c386913713f4368a49 Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Wed, 12 Nov 2025 02:04:37 +0530 Subject: [PATCH 0026/1030] editor: Improve multi-buffer header filename click to jump to the latest selection from that buffer (#42480) Closes https://github.com/zed-industries/zed/pull/42099 Regressed in https://github.com/zed-industries/zed/pull/42283 Release Notes: - Clicking the multi-buffer header file name or the "Open file" button now jumps to the most recent selection in that buffer, if one exists. --- crates/editor/src/element.rs | 145 ++++++++++++++++++++++++----------- 1 file changed, 102 insertions(+), 43 deletions(-) diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index 7442ccc7442a11ab2f845cc637e5ad416085af02..6fd259dae9333933fa7f29041c2deb591b42bf6d 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -6,10 +6,10 @@ use crate::{ EditDisplayMode, EditPrediction, Editor, EditorMode, EditorSettings, EditorSnapshot, EditorStyle, FILE_HEADER_HEIGHT, FocusedBlock, GutterDimensions, HalfPageDown, HalfPageUp, HandleInput, HoveredCursor, InlayHintRefreshReason, JumpData, LineDown, LineHighlight, LineUp, - MAX_LINE_LEN, MINIMAP_FONT_SIZE, MULTI_BUFFER_EXCERPT_HEADER_HEIGHT, OpenExcerpts, - OpenExcerptsSplit, PageDown, PageUp, PhantomBreakpointIndicator, Point, RowExt, RowRangeExt, - SelectPhase, SelectedTextHighlight, Selection, SelectionDragState, SelectionEffects, - SizingBehavior, SoftWrap, StickyHeaderExcerpt, ToPoint, ToggleFold, ToggleFoldAll, + MAX_LINE_LEN, MINIMAP_FONT_SIZE, MULTI_BUFFER_EXCERPT_HEADER_HEIGHT, OpenExcerpts, PageDown, + PageUp, PhantomBreakpointIndicator, Point, RowExt, RowRangeExt, SelectPhase, + SelectedTextHighlight, Selection, SelectionDragState, SelectionEffects, SizingBehavior, + SoftWrap, StickyHeaderExcerpt, ToPoint, ToggleFold, ToggleFoldAll, code_context_menus::{CodeActionsMenu, MENU_ASIDE_MAX_WIDTH, MENU_ASIDE_MIN_WIDTH, MENU_GAP}, display_map::{ Block, BlockContext, BlockStyle, ChunkRendererId, DisplaySnapshot, EditorMargins, @@ -3664,6 +3664,7 @@ impl EditorElement { row_block_types: &mut HashMap, selections: &[Selection], selected_buffer_ids: &Vec, + selection_anchors: &[Selection], is_row_soft_wrapped: impl Copy + Fn(usize) -> bool, sticky_header_excerpt_id: Option, window: &mut Window, @@ -3739,7 +3740,13 @@ impl EditorElement { let selected = selected_buffer_ids.contains(&first_excerpt.buffer_id); let result = v_flex().id(block_id).w_full().pr(editor_margins.right); - let jump_data = header_jump_data(snapshot, block_row_start, *height, first_excerpt); + let jump_data = header_jump_data( + snapshot, + block_row_start, + *height, + first_excerpt, + selection_anchors, + ); result .child(self.render_buffer_header( first_excerpt, @@ -3774,7 +3781,13 @@ impl EditorElement { Block::BufferHeader { excerpt, height } => { let mut result = v_flex().id(block_id).w_full(); - let jump_data = header_jump_data(snapshot, block_row_start, *height, excerpt); + let jump_data = header_jump_data( + snapshot, + block_row_start, + *height, + excerpt, + selection_anchors, + ); if sticky_header_excerpt_id != Some(excerpt.id) { let selected = selected_buffer_ids.contains(&excerpt.buffer_id); @@ -4042,24 +4055,18 @@ impl EditorElement { ) .group_hover("", |div| div.underline()), ) - .on_click({ - let focus_handle = focus_handle.clone(); - move |event, window, cx| { - if event.modifiers().secondary() { - focus_handle.dispatch_action( - &OpenExcerptsSplit, - window, - cx, - ); - } else { - focus_handle.dispatch_action( - &OpenExcerpts, - window, - cx, - ); - } + .on_click(window.listener_for(&self.editor, { + let jump_data = jump_data.clone(); + + move |editor, e: &ClickEvent, window, cx| { + editor.open_excerpts_common( + Some(jump_data.clone()), + e.modifiers().secondary(), + window, + cx, + ); } - }), + })), ) .when_some(parent_path, |then, path| { then.child(div().child(path).text_color( @@ -4087,24 +4094,18 @@ impl EditorElement { cx, )), ) - .on_click({ - let focus_handle = focus_handle.clone(); - move |event, window, cx| { - if event.modifiers().secondary() { - focus_handle.dispatch_action( - &OpenExcerptsSplit, - window, - cx, - ); - } else { - focus_handle.dispatch_action( - &OpenExcerpts, - window, - cx, - ); - } + .on_click(window.listener_for(&self.editor, { + let jump_data = jump_data.clone(); + + move |editor, e: &ClickEvent, window, cx| { + editor.open_excerpts_common( + Some(jump_data.clone()), + e.modifiers().secondary(), + window, + cx, + ); } - }), + })), ) }, ) @@ -4250,6 +4251,7 @@ impl EditorElement { line_layouts: &mut [LineWithInvisibles], selections: &[Selection], selected_buffer_ids: &Vec, + selection_anchors: &[Selection], is_row_soft_wrapped: impl Copy + Fn(usize) -> bool, sticky_header_excerpt_id: Option, window: &mut Window, @@ -4293,6 +4295,7 @@ impl EditorElement { &mut row_block_types, selections, selected_buffer_ids, + selection_anchors, is_row_soft_wrapped, sticky_header_excerpt_id, window, @@ -4350,6 +4353,7 @@ impl EditorElement { &mut row_block_types, selections, selected_buffer_ids, + selection_anchors, is_row_soft_wrapped, sticky_header_excerpt_id, window, @@ -4405,6 +4409,7 @@ impl EditorElement { &mut row_block_types, selections, selected_buffer_ids, + selection_anchors, is_row_soft_wrapped, sticky_header_excerpt_id, window, @@ -4487,6 +4492,7 @@ impl EditorElement { hitbox: &Hitbox, selected_buffer_ids: &Vec, blocks: &[BlockLayout], + selection_anchors: &[Selection], window: &mut Window, cx: &mut App, ) -> AnyElement { @@ -4495,6 +4501,7 @@ impl EditorElement { DisplayRow(scroll_position.y as u32), FILE_HEADER_HEIGHT + MULTI_BUFFER_EXCERPT_HEADER_HEIGHT, excerpt, + selection_anchors, ); let editor_bg_color = cx.theme().colors().editor_background; @@ -7798,7 +7805,34 @@ fn header_jump_data( block_row_start: DisplayRow, height: u32, for_excerpt: &ExcerptInfo, + selection_anchors: &[Selection], ) -> JumpData { + if let Some(cursor_anchor) = latest_anchor_for_buffer(selection_anchors, for_excerpt.buffer_id) + { + let buffer_point = + language::ToPoint::to_point(&cursor_anchor.text_anchor, &for_excerpt.buffer); + let multibuffer_point = snapshot + .buffer_snapshot() + .summary_for_anchor::(&cursor_anchor); + let display_row = snapshot + .display_snapshot + .point_to_display_point(multibuffer_point, Bias::Left) + .row() + .0; + let scroll_row = snapshot + .scroll_anchor + .scroll_position(&snapshot.display_snapshot) + .y as u32; + let line_offset_from_top = display_row.saturating_sub(scroll_row); + + return JumpData::MultiBufferPoint { + excerpt_id: cursor_anchor.excerpt_id, + anchor: cursor_anchor.text_anchor, + position: buffer_point, + line_offset_from_top, + }; + } + let range = &for_excerpt.range; let buffer = &for_excerpt.buffer; let jump_anchor = range.primary.start; @@ -7828,6 +7862,20 @@ fn header_jump_data( } } +fn latest_anchor_for_buffer( + selection_anchors: &[Selection], + buffer_id: BufferId, +) -> Option { + selection_anchors + .iter() + .filter_map(|selection| { + let head = selection.head(); + (head.buffer_id == Some(buffer_id)).then_some((selection.id, head)) + }) + .max_by_key(|(id, _)| *id) + .map(|(_, anchor)| anchor) +} + pub struct AcceptEditPredictionBinding(pub(crate) Option); impl AcceptEditPredictionBinding { @@ -9139,15 +9187,18 @@ impl Element for EditorElement { cx, ); - let (local_selections, selected_buffer_ids): ( + let (local_selections, selected_buffer_ids, selection_anchors): ( Vec>, Vec, + Arc<[Selection]>, ) = self .editor_with_selections(cx) .map(|editor| { editor.update(cx, |editor, cx| { let all_selections = editor.selections.all::(&snapshot.display_snapshot); + let all_anchor_selections = + editor.selections.all_anchors(&snapshot.display_snapshot); let selected_buffer_ids = if editor.buffer_kind(cx) == ItemBufferKind::Singleton { Vec::new() @@ -9176,10 +9227,16 @@ impl Element for EditorElement { selections .extend(editor.selections.pending(&snapshot.display_snapshot)); - (selections, selected_buffer_ids) + (selections, selected_buffer_ids, all_anchor_selections) }) }) - .unwrap_or_default(); + .unwrap_or_else(|| { + ( + Vec::new(), + Vec::new(), + Arc::<[Selection]>::from(Vec::new()), + ) + }); let (selections, mut active_rows, newest_selection_head) = self .layout_selections( @@ -9410,6 +9467,7 @@ impl Element for EditorElement { &mut line_layouts, &local_selections, &selected_buffer_ids, + selection_anchors.as_ref(), is_row_soft_wrapped, sticky_header_excerpt_id, window, @@ -9443,6 +9501,7 @@ impl Element for EditorElement { &hitbox, &selected_buffer_ids, &blocks, + selection_anchors.as_ref(), window, cx, ) From a56693d9e830fa4defc1b748144d571b9dc8271d Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Tue, 11 Nov 2025 16:24:37 -0500 Subject: [PATCH 0027/1030] Fix panic when opening an invalid URL (#42483) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now instead of a panic we see this: Screenshot 2025-11-11 at 3 47 25 PM Release Notes: - Trying to open invalid URLs in a browser now shows an error instead of panicking --- crates/gpui/src/platform/mac/platform.rs | 9 ++++++--- crates/zed/src/zed.rs | 19 ++++++++++++++++++- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/crates/gpui/src/platform/mac/platform.rs b/crates/gpui/src/platform/mac/platform.rs index 101520cdbbc220a49b1fa56584729ad93d507fe7..21dab9d3b617d505b80cb2d48b579916a9eb1238 100644 --- a/crates/gpui/src/platform/mac/platform.rs +++ b/crates/gpui/src/platform/mac/platform.rs @@ -651,9 +651,12 @@ impl Platform for MacPlatform { fn open_url(&self, url: &str) { unsafe { - let url = NSURL::alloc(nil) - .initWithString_(ns_string(url)) - .autorelease(); + let ns_url = NSURL::alloc(nil).initWithString_(ns_string(url)); + if ns_url.is_null() { + log::error!("Failed to create NSURL from string: {}", url); + return; + } + let url = ns_url.autorelease(); let workspace: id = msg_send![class!(NSWorkspace), sharedWorkspace]; msg_send![workspace, openURL: url] } diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 7cdaea920f4b90de4393dd08e0c855ecd1cb2f88..cbd6fba694136c87c64c4d6ca2bfee6d1e1072dd 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -726,7 +726,24 @@ fn register_actions( ..Default::default() }) }) - .register_action(|_, action: &OpenBrowser, _window, cx| cx.open_url(&action.url)) + .register_action(|workspace, action: &OpenBrowser, _window, cx| { + // Parse and validate the URL to ensure it's properly formatted + match url::Url::parse(&action.url) { + Ok(parsed_url) => { + // Use the parsed URL's string representation which is properly escaped + cx.open_url(parsed_url.as_str()); + } + Err(e) => { + workspace.show_error( + &anyhow::anyhow!( + "Opening this URL in a browser failed because the URL is invalid: {}\n\nError was: {e}", + action.url + ), + cx, + ); + } + } + }) .register_action(|workspace, _: &workspace::Open, window, cx| { telemetry::event!("Project Opened"); let paths = workspace.prompt_for_open_path( From f62bfe1dfa59a6f84412892f6f8726f6ffb92512 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Tue, 11 Nov 2025 16:31:42 -0500 Subject: [PATCH 0028/1030] Use `enterprise_uri` for settings when provided (#42485) Closes #34945 Release Notes: - Fixed `enterprise_uri` not being used for GitHub settings URL when provided --- .../src/edit_prediction_button.rs | 114 +++++++++++++++++- 1 file changed, 109 insertions(+), 5 deletions(-) diff --git a/crates/edit_prediction_button/src/edit_prediction_button.rs b/crates/edit_prediction_button/src/edit_prediction_button.rs index a0bbe18b4bcaaf433b15db333b3e5a7eb8dfb4d0..685d408205863e5ad110a5e57891c0695f998cfb 100644 --- a/crates/edit_prediction_button/src/edit_prediction_button.rs +++ b/crates/edit_prediction_button/src/edit_prediction_button.rs @@ -43,7 +43,8 @@ actions!( ] ); -const COPILOT_SETTINGS_URL: &str = "https://github.com/settings/copilot"; +const COPILOT_SETTINGS_PATH: &str = "/settings/copilot"; +const COPILOT_SETTINGS_URL: &str = concat!("https://github.com", "/settings/copilot"); const PRIVACY_DOCS: &str = "https://zed.dev/docs/ai/privacy-and-security"; struct CopilotErrorToast; @@ -836,6 +837,16 @@ impl EditPredictionButton { window: &mut Window, cx: &mut Context, ) -> Entity { + let all_language_settings = all_language_settings(None, cx); + let copilot_config = copilot::copilot_chat::CopilotChatConfiguration { + enterprise_uri: all_language_settings + .edit_predictions + .copilot + .enterprise_uri + .clone(), + }; + let settings_url = copilot_settings_url(copilot_config.enterprise_uri.as_deref()); + ContextMenu::build(window, cx, |menu, window, cx| { let menu = self.build_language_settings_menu(menu, window, cx); let menu = @@ -844,10 +855,7 @@ impl EditPredictionButton { menu.separator() .link( "Go to Copilot Settings", - OpenBrowser { - url: COPILOT_SETTINGS_URL.to_string(), - } - .boxed_clone(), + OpenBrowser { url: settings_url }.boxed_clone(), ) .action("Sign Out", copilot::SignOut.boxed_clone()) }) @@ -1176,3 +1184,99 @@ fn toggle_edit_prediction_mode(fs: Arc, mode: EditPredictionsMode, cx: & }); } } + +fn copilot_settings_url(enterprise_uri: Option<&str>) -> String { + match enterprise_uri { + Some(uri) => { + format!("{}{}", uri.trim_end_matches('/'), COPILOT_SETTINGS_PATH) + } + None => COPILOT_SETTINGS_URL.to_string(), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use gpui::TestAppContext; + + #[gpui::test] + async fn test_copilot_settings_url_with_enterprise_uri(cx: &mut TestAppContext) { + cx.update(|cx| { + let settings_store = SettingsStore::test(cx); + cx.set_global(settings_store); + }); + + cx.update_global(|settings_store: &mut SettingsStore, cx| { + settings_store + .set_user_settings( + r#"{"edit_predictions":{"copilot":{"enterprise_uri":"https://my-company.ghe.com"}}}"#, + cx, + ) + .unwrap(); + }); + + let url = cx.update(|cx| { + let all_language_settings = all_language_settings(None, cx); + copilot_settings_url( + all_language_settings + .edit_predictions + .copilot + .enterprise_uri + .as_deref(), + ) + }); + + assert_eq!(url, "https://my-company.ghe.com/settings/copilot"); + } + + #[gpui::test] + async fn test_copilot_settings_url_with_enterprise_uri_trailing_slash(cx: &mut TestAppContext) { + cx.update(|cx| { + let settings_store = SettingsStore::test(cx); + cx.set_global(settings_store); + }); + + cx.update_global(|settings_store: &mut SettingsStore, cx| { + settings_store + .set_user_settings( + r#"{"edit_predictions":{"copilot":{"enterprise_uri":"https://my-company.ghe.com/"}}}"#, + cx, + ) + .unwrap(); + }); + + let url = cx.update(|cx| { + let all_language_settings = all_language_settings(None, cx); + copilot_settings_url( + all_language_settings + .edit_predictions + .copilot + .enterprise_uri + .as_deref(), + ) + }); + + assert_eq!(url, "https://my-company.ghe.com/settings/copilot"); + } + + #[gpui::test] + async fn test_copilot_settings_url_without_enterprise_uri(cx: &mut TestAppContext) { + cx.update(|cx| { + let settings_store = SettingsStore::test(cx); + cx.set_global(settings_store); + }); + + let url = cx.update(|cx| { + let all_language_settings = all_language_settings(None, cx); + copilot_settings_url( + all_language_settings + .edit_predictions + .copilot + .enterprise_uri + .as_deref(), + ) + }); + + assert_eq!(url, "https://github.com/settings/copilot"); + } +} From da94f898e6318de5dc85fd8ed2fe38a85365d4a6 Mon Sep 17 00:00:00 2001 From: Andrew Farkas <6060305+HactarCE@users.noreply.github.com> Date: Tue, 11 Nov 2025 16:34:25 -0500 Subject: [PATCH 0029/1030] Add support for multi-word snippet prefixes (#42398) Supercedes #41126 Closes #39559, #35397, and #41426 Release Notes: - Added support for multi-word snippet prefixes --------- Co-authored-by: Agus Zubiaga Co-authored-by: Conrad Irwin Co-authored-by: Cole Miller --- .../agent_ui/src/acp/completion_provider.rs | 16 ++ .../src/context_picker/completion_provider.rs | 14 + crates/agent_ui/src/slash_command.rs | 4 + .../src/session/running/console.rs | 4 + crates/editor/src/code_completion_tests.rs | 2 + crates/editor/src/code_context_menus.rs | 201 ++++++------- crates/editor/src/editor.rs | 271 ++++++++++++------ crates/editor/src/editor_tests.rs | 271 ++++++++++++++++++ crates/editor/src/test/editor_test_context.rs | 11 + crates/inspector_ui/src/div_inspector.rs | 2 + crates/keymap_editor/src/keymap_editor.rs | 2 + crates/project/Cargo.toml | 2 + crates/project/src/lsp_store.rs | 6 + crates/project/src/project.rs | 16 ++ crates/snippet_provider/Cargo.toml | 3 + crates/snippet_provider/src/lib.rs | 13 + 16 files changed, 652 insertions(+), 186 deletions(-) diff --git a/crates/agent_ui/src/acp/completion_provider.rs b/crates/agent_ui/src/acp/completion_provider.rs index 408dbedcfdd4998ca8d2e094aab4799bad168629..e87526957ce844a10c7c4f07f7ec6790927b142e 100644 --- a/crates/agent_ui/src/acp/completion_provider.rs +++ b/crates/agent_ui/src/acp/completion_provider.rs @@ -109,6 +109,8 @@ impl ContextPickerCompletionProvider { icon_path: Some(mode.icon().path().into()), documentation: None, source: project::CompletionSource::Custom, + match_start: None, + snippet_deduplication_key: None, insert_text_mode: None, // This ensures that when a user accepts this completion, the // completion menu will still be shown after "@category " is @@ -146,6 +148,8 @@ impl ContextPickerCompletionProvider { documentation: None, insert_text_mode: None, source: project::CompletionSource::Custom, + match_start: None, + snippet_deduplication_key: None, icon_path: Some(icon_for_completion), confirm: Some(confirm_completion_callback( thread_entry.title().clone(), @@ -177,6 +181,8 @@ impl ContextPickerCompletionProvider { documentation: None, insert_text_mode: None, source: project::CompletionSource::Custom, + match_start: None, + snippet_deduplication_key: None, icon_path: Some(icon_path), confirm: Some(confirm_completion_callback( rule.title, @@ -233,6 +239,8 @@ impl ContextPickerCompletionProvider { documentation: None, source: project::CompletionSource::Custom, icon_path: Some(completion_icon_path), + match_start: None, + snippet_deduplication_key: None, insert_text_mode: None, confirm: Some(confirm_completion_callback( file_name, @@ -284,6 +292,8 @@ impl ContextPickerCompletionProvider { documentation: None, source: project::CompletionSource::Custom, icon_path: Some(icon_path), + match_start: None, + snippet_deduplication_key: None, insert_text_mode: None, confirm: Some(confirm_completion_callback( symbol.name.into(), @@ -316,6 +326,8 @@ impl ContextPickerCompletionProvider { documentation: None, source: project::CompletionSource::Custom, icon_path: Some(icon_path), + match_start: None, + snippet_deduplication_key: None, insert_text_mode: None, confirm: Some(confirm_completion_callback( url_to_fetch.to_string().into(), @@ -384,6 +396,8 @@ impl ContextPickerCompletionProvider { icon_path: Some(action.icon().path().into()), documentation: None, source: project::CompletionSource::Custom, + match_start: None, + snippet_deduplication_key: None, insert_text_mode: None, // This ensures that when a user accepts this completion, the // completion menu will still be shown after "@category " is @@ -774,6 +788,8 @@ impl CompletionProvider for ContextPickerCompletionProvider { )), source: project::CompletionSource::Custom, icon_path: None, + match_start: None, + snippet_deduplication_key: None, insert_text_mode: None, confirm: Some(Arc::new({ let editor = editor.clone(); diff --git a/crates/agent_ui/src/context_picker/completion_provider.rs b/crates/agent_ui/src/context_picker/completion_provider.rs index 1fa128cde82dba900136ad6d136aad858512f169..5dee769b4d0f0d2556b407721eac5dc70f647060 100644 --- a/crates/agent_ui/src/context_picker/completion_provider.rs +++ b/crates/agent_ui/src/context_picker/completion_provider.rs @@ -278,6 +278,8 @@ impl ContextPickerCompletionProvider { icon_path: Some(mode.icon().path().into()), documentation: None, source: project::CompletionSource::Custom, + match_start: None, + snippet_deduplication_key: None, insert_text_mode: None, // This ensures that when a user accepts this completion, the // completion menu will still be shown after "@category " is @@ -386,6 +388,8 @@ impl ContextPickerCompletionProvider { icon_path: Some(action.icon().path().into()), documentation: None, source: project::CompletionSource::Custom, + match_start: None, + snippet_deduplication_key: None, insert_text_mode: None, // This ensures that when a user accepts this completion, the // completion menu will still be shown after "@category " is @@ -417,6 +421,8 @@ impl ContextPickerCompletionProvider { replace_range: source_range.clone(), new_text, label: CodeLabel::plain(thread_entry.title().to_string(), None), + match_start: None, + snippet_deduplication_key: None, documentation: None, insert_text_mode: None, source: project::CompletionSource::Custom, @@ -484,6 +490,8 @@ impl ContextPickerCompletionProvider { replace_range: source_range.clone(), new_text, label: CodeLabel::plain(rules.title.to_string(), None), + match_start: None, + snippet_deduplication_key: None, documentation: None, insert_text_mode: None, source: project::CompletionSource::Custom, @@ -524,6 +532,8 @@ impl ContextPickerCompletionProvider { documentation: None, source: project::CompletionSource::Custom, icon_path: Some(IconName::ToolWeb.path().into()), + match_start: None, + snippet_deduplication_key: None, insert_text_mode: None, confirm: Some(confirm_completion_callback( IconName::ToolWeb.path().into(), @@ -612,6 +622,8 @@ impl ContextPickerCompletionProvider { documentation: None, source: project::CompletionSource::Custom, icon_path: Some(completion_icon_path), + match_start: None, + snippet_deduplication_key: None, insert_text_mode: None, confirm: Some(confirm_completion_callback( crease_icon_path, @@ -689,6 +701,8 @@ impl ContextPickerCompletionProvider { documentation: None, source: project::CompletionSource::Custom, icon_path: Some(IconName::Code.path().into()), + match_start: None, + snippet_deduplication_key: None, insert_text_mode: None, confirm: Some(confirm_completion_callback( IconName::Code.path().into(), diff --git a/crates/agent_ui/src/slash_command.rs b/crates/agent_ui/src/slash_command.rs index c2f26c4f2ed33860196790746dd296e8c617b810..7d3ea0105a0aafb4cfccf4076cb95e28c99dec28 100644 --- a/crates/agent_ui/src/slash_command.rs +++ b/crates/agent_ui/src/slash_command.rs @@ -127,6 +127,8 @@ impl SlashCommandCompletionProvider { new_text, label: command.label(cx), icon_path: None, + match_start: None, + snippet_deduplication_key: None, insert_text_mode: None, confirm, source: CompletionSource::Custom, @@ -232,6 +234,8 @@ impl SlashCommandCompletionProvider { icon_path: None, new_text, documentation: None, + match_start: None, + snippet_deduplication_key: None, confirm, insert_text_mode: None, source: CompletionSource::Custom, diff --git a/crates/debugger_ui/src/session/running/console.rs b/crates/debugger_ui/src/session/running/console.rs index e157d832b440b8016f152c88b376a9418ee3c843..23b3ca481722c7869caf43958754889f92dc2fe5 100644 --- a/crates/debugger_ui/src/session/running/console.rs +++ b/crates/debugger_ui/src/session/running/console.rs @@ -677,6 +677,8 @@ impl ConsoleQueryBarCompletionProvider { ), new_text: string_match.string.clone(), label: CodeLabel::plain(string_match.string.clone(), None), + match_start: None, + snippet_deduplication_key: None, icon_path: None, documentation: Some(CompletionDocumentation::MultiLineMarkdown( variable_value.into(), @@ -790,6 +792,8 @@ impl ConsoleQueryBarCompletionProvider { documentation: completion.detail.map(|detail| { CompletionDocumentation::MultiLineMarkdown(detail.into()) }), + match_start: None, + snippet_deduplication_key: None, confirm: None, source: project::CompletionSource::Dap { sort_text }, insert_text_mode: None, diff --git a/crates/editor/src/code_completion_tests.rs b/crates/editor/src/code_completion_tests.rs index ec97c0ebb31952da9ad8e9e6f4f75b4b0078c4a3..364b310f367ff195f9aee8693a815be94db0b44d 100644 --- a/crates/editor/src/code_completion_tests.rs +++ b/crates/editor/src/code_completion_tests.rs @@ -305,6 +305,8 @@ impl CompletionBuilder { icon_path: None, insert_text_mode: None, confirm: None, + match_start: None, + snippet_deduplication_key: None, } } } diff --git a/crates/editor/src/code_context_menus.rs b/crates/editor/src/code_context_menus.rs index 9e29cd955a80c7025ef2ff1ee5aaf38c665bed1a..ac8f26764b5a037a0a1618052a34466effd80563 100644 --- a/crates/editor/src/code_context_menus.rs +++ b/crates/editor/src/code_context_menus.rs @@ -17,7 +17,6 @@ use project::{CompletionDisplayOptions, CompletionSource}; use task::DebugScenario; use task::TaskContext; -use std::collections::VecDeque; use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; use std::{ @@ -36,12 +35,13 @@ use util::ResultExt; use crate::hover_popover::{hover_markdown_style, open_markdown_url}; use crate::{ - CodeActionProvider, CompletionId, CompletionItemKind, CompletionProvider, DisplayRow, Editor, - EditorStyle, ResolvedTasks, + CodeActionProvider, CompletionId, CompletionProvider, DisplayRow, Editor, EditorStyle, + ResolvedTasks, actions::{ConfirmCodeAction, ConfirmCompletion}, split_words, styled_runs_for_code_label, }; use crate::{CodeActionSource, EditorSettings}; +use collections::{HashSet, VecDeque}; use settings::{Settings, SnippetSortOrder}; pub const MENU_GAP: Pixels = px(4.); @@ -220,7 +220,9 @@ pub struct CompletionsMenu { pub is_incomplete: bool, pub buffer: Entity, pub completions: Rc>>, - match_candidates: Arc<[StringMatchCandidate]>, + /// String match candidate for each completion, grouped by `match_start`. + match_candidates: Arc<[(Option, Vec)]>, + /// Entries displayed in the menu, which is a filtered and sorted subset of `match_candidates`. pub entries: Rc>>, pub selected_item: usize, filter_task: Task<()>, @@ -308,6 +310,8 @@ impl CompletionsMenu { .iter() .enumerate() .map(|(id, completion)| StringMatchCandidate::new(id, completion.label.filter_text())) + .into_group_map_by(|candidate| completions[candidate.id].match_start) + .into_iter() .collect(); let completions_menu = Self { @@ -355,6 +359,8 @@ impl CompletionsMenu { replace_range: selection.start.text_anchor..selection.end.text_anchor, new_text: choice.to_string(), label: CodeLabel::plain(choice.to_string(), None), + match_start: None, + snippet_deduplication_key: None, icon_path: None, documentation: None, confirm: None, @@ -363,11 +369,14 @@ impl CompletionsMenu { }) .collect(); - let match_candidates = choices - .iter() - .enumerate() - .map(|(id, completion)| StringMatchCandidate::new(id, completion)) - .collect(); + let match_candidates = Arc::new([( + None, + choices + .iter() + .enumerate() + .map(|(id, completion)| StringMatchCandidate::new(id, completion)) + .collect(), + )]); let entries = choices .iter() .enumerate() @@ -948,7 +957,7 @@ impl CompletionsMenu { } let mat = &self.entries.borrow()[self.selected_item]; - let completions = self.completions.borrow_mut(); + let completions = self.completions.borrow(); let multiline_docs = match completions[mat.candidate_id].documentation.as_ref() { Some(CompletionDocumentation::MultiLinePlainText(text)) => div().child(text.clone()), Some(CompletionDocumentation::SingleLineAndMultiLinePlainText { @@ -1026,57 +1035,74 @@ impl CompletionsMenu { pub fn filter( &mut self, - query: Option>, + query: Arc, + query_end: text::Anchor, + buffer: &Entity, provider: Option>, window: &mut Window, cx: &mut Context, ) { self.cancel_filter.store(true, Ordering::Relaxed); - if let Some(query) = query { - self.cancel_filter = Arc::new(AtomicBool::new(false)); - let matches = self.do_async_filtering(query, cx); - let id = self.id; - self.filter_task = cx.spawn_in(window, async move |editor, cx| { - let matches = matches.await; - editor - .update_in(cx, |editor, window, cx| { - editor.with_completions_menu_matching_id(id, |this| { - if let Some(this) = this { - this.set_filter_results(matches, provider, window, cx); - } - }); - }) - .ok(); - }); - } else { - self.filter_task = Task::ready(()); - let matches = self.unfiltered_matches(); - self.set_filter_results(matches, provider, window, cx); - } + self.cancel_filter = Arc::new(AtomicBool::new(false)); + let matches = self.do_async_filtering(query, query_end, buffer, cx); + let id = self.id; + self.filter_task = cx.spawn_in(window, async move |editor, cx| { + let matches = matches.await; + editor + .update_in(cx, |editor, window, cx| { + editor.with_completions_menu_matching_id(id, |this| { + if let Some(this) = this { + this.set_filter_results(matches, provider, window, cx); + } + }); + }) + .ok(); + }); } pub fn do_async_filtering( &self, query: Arc, + query_end: text::Anchor, + buffer: &Entity, cx: &Context, ) -> Task> { - let matches_task = cx.background_spawn({ - let query = query.clone(); - let match_candidates = self.match_candidates.clone(); - let cancel_filter = self.cancel_filter.clone(); - let background_executor = cx.background_executor().clone(); - async move { - fuzzy::match_strings( - &match_candidates, - &query, - query.chars().any(|c| c.is_uppercase()), - false, - 1000, - &cancel_filter, - background_executor, - ) - .await + let buffer_snapshot = buffer.read(cx).snapshot(); + let background_executor = cx.background_executor().clone(); + let match_candidates = self.match_candidates.clone(); + let cancel_filter = self.cancel_filter.clone(); + let default_query = query.clone(); + + let matches_task = cx.background_spawn(async move { + let queries_and_candidates = match_candidates + .iter() + .map(|(query_start, candidates)| { + let query_for_batch = match query_start { + Some(start) => { + Arc::new(buffer_snapshot.text_for_range(*start..query_end).collect()) + } + None => default_query.clone(), + }; + (query_for_batch, candidates) + }) + .collect_vec(); + + let mut results = vec![]; + for (query, match_candidates) in queries_and_candidates { + results.extend( + fuzzy::match_strings( + &match_candidates, + &query, + query.chars().any(|c| c.is_uppercase()), + false, + 1000, + &cancel_filter, + background_executor.clone(), + ) + .await, + ); } + results }); let completions = self.completions.clone(); @@ -1085,45 +1111,31 @@ impl CompletionsMenu { cx.foreground_executor().spawn(async move { let mut matches = matches_task.await; + let completions_ref = completions.borrow(); + if sort_completions { matches = Self::sort_string_matches( matches, - Some(&query), + Some(&query), // used for non-snippets only snippet_sort_order, - completions.borrow().as_ref(), + &completions_ref, ); } + // Remove duplicate snippet prefixes (e.g., "cool code" will match + // the text "c c" in two places; we should only show the longer one) + let mut snippets_seen = HashSet::<(usize, usize)>::default(); + matches.retain(|result| { + match completions_ref[result.candidate_id].snippet_deduplication_key { + Some(key) => snippets_seen.insert(key), + None => true, + } + }); + matches }) } - /// Like `do_async_filtering` but there is no filter query, so no need to spawn tasks. - pub fn unfiltered_matches(&self) -> Vec { - let mut matches = self - .match_candidates - .iter() - .enumerate() - .map(|(candidate_id, candidate)| StringMatch { - candidate_id, - score: Default::default(), - positions: Default::default(), - string: candidate.string.clone(), - }) - .collect(); - - if self.sort_completions { - matches = Self::sort_string_matches( - matches, - None, - self.snippet_sort_order, - self.completions.borrow().as_ref(), - ); - } - - matches - } - pub fn set_filter_results( &mut self, matches: Vec, @@ -1166,28 +1178,13 @@ impl CompletionsMenu { .and_then(|c| c.to_lowercase().next()); if snippet_sort_order == SnippetSortOrder::None { - matches.retain(|string_match| { - let completion = &completions[string_match.candidate_id]; - - let is_snippet = matches!( - &completion.source, - CompletionSource::Lsp { lsp_completion, .. } - if lsp_completion.kind == Some(CompletionItemKind::SNIPPET) - ); - - !is_snippet - }); + matches + .retain(|string_match| !completions[string_match.candidate_id].is_snippet_kind()); } matches.sort_unstable_by_key(|string_match| { let completion = &completions[string_match.candidate_id]; - let is_snippet = matches!( - &completion.source, - CompletionSource::Lsp { lsp_completion, .. } - if lsp_completion.kind == Some(CompletionItemKind::SNIPPET) - ); - let sort_text = match &completion.source { CompletionSource::Lsp { lsp_completion, .. } => lsp_completion.sort_text.as_deref(), CompletionSource::Dap { sort_text } => Some(sort_text.as_str()), @@ -1199,14 +1196,17 @@ impl CompletionsMenu { let score = string_match.score; let sort_score = Reverse(OrderedFloat(score)); - let query_start_doesnt_match_split_words = query_start_lower - .map(|query_char| { - !split_words(&string_match.string).any(|word| { - word.chars().next().and_then(|c| c.to_lowercase().next()) - == Some(query_char) + // Snippets do their own first-letter matching logic elsewhere. + let is_snippet = completion.is_snippet_kind(); + let query_start_doesnt_match_split_words = !is_snippet + && query_start_lower + .map(|query_char| { + !split_words(&string_match.string).any(|word| { + word.chars().next().and_then(|c| c.to_lowercase().next()) + == Some(query_char) + }) }) - }) - .unwrap_or(false); + .unwrap_or(false); if query_start_doesnt_match_split_words { MatchTier::OtherMatch { sort_score } @@ -1218,6 +1218,7 @@ impl CompletionsMenu { SnippetSortOrder::None => Reverse(0), }; let sort_positions = string_match.positions.clone(); + // This exact matching won't work for multi-word snippets, but it's fine let sort_exact = Reverse(if Some(completion.label.filter_text()) == query { 1 } else { diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index f7eb309fd1b67272103133d47303ef7f0b9e5f35..d4647300996ecfb14dbc470ef8d9cc8a5db3d1dd 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -5517,7 +5517,14 @@ impl Editor { if let Some(CodeContextMenu::Completions(menu)) = self.context_menu.borrow_mut().as_mut() { if filter_completions { - menu.filter(query.clone(), provider.clone(), window, cx); + menu.filter( + query.clone().unwrap_or_default(), + buffer_position.text_anchor, + &buffer, + provider.clone(), + window, + cx, + ); } // When `is_incomplete` is false, no need to re-query completions when the current query // is a suffix of the initial query. @@ -5526,7 +5533,7 @@ impl Editor { // If the new query is a suffix of the old query (typing more characters) and // the previous result was complete, the existing completions can be filtered. // - // Note that this is always true for snippet completions. + // Note that snippet completions are always complete. let query_matches = match (&menu.initial_query, &query) { (Some(initial_query), Some(query)) => query.starts_with(initial_query.as_ref()), (None, _) => true, @@ -5656,12 +5663,15 @@ impl Editor { }; let mut words = if load_word_completions { - cx.background_spawn(async move { - buffer_snapshot.words_in_range(WordsQuery { - fuzzy_contents: None, - range: word_search_range, - skip_digits, - }) + cx.background_spawn({ + let buffer_snapshot = buffer_snapshot.clone(); + async move { + buffer_snapshot.words_in_range(WordsQuery { + fuzzy_contents: None, + range: word_search_range, + skip_digits, + }) + } }) } else { Task::ready(BTreeMap::default()) @@ -5671,8 +5681,11 @@ impl Editor { && provider.show_snippets() && let Some(project) = self.project() { + let char_classifier = buffer_snapshot + .char_classifier_at(buffer_position) + .scope_context(Some(CharScopeContext::Completion)); project.update(cx, |project, cx| { - snippet_completions(project, &buffer, buffer_position, cx) + snippet_completions(project, &buffer, buffer_position, char_classifier, cx) }) } else { Task::ready(Ok(CompletionResponse { @@ -5727,6 +5740,8 @@ impl Editor { replace_range: word_replace_range.clone(), new_text: word.clone(), label: CodeLabel::plain(word, None), + match_start: None, + snippet_deduplication_key: None, icon_path: None, documentation: None, source: CompletionSource::BufferWord { @@ -5775,11 +5790,12 @@ impl Editor { ); let query = if filter_completions { query } else { None }; - let matches_task = if let Some(query) = query { - menu.do_async_filtering(query, cx) - } else { - Task::ready(menu.unfiltered_matches()) - }; + let matches_task = menu.do_async_filtering( + query.unwrap_or_default(), + buffer_position, + &buffer, + cx, + ); (menu, matches_task) }) else { return; @@ -5796,7 +5812,7 @@ impl Editor { return; }; - // Only valid to take prev_menu because it the new menu is immediately set + // Only valid to take prev_menu because either the new menu is immediately set // below, or the menu is hidden. if let Some(CodeContextMenu::Completions(prev_menu)) = editor.context_menu.borrow_mut().take() @@ -23201,10 +23217,11 @@ impl CodeActionProvider for Entity { fn snippet_completions( project: &Project, buffer: &Entity, - buffer_position: text::Anchor, + buffer_anchor: text::Anchor, + classifier: CharClassifier, cx: &mut App, ) -> Task> { - let languages = buffer.read(cx).languages_at(buffer_position); + let languages = buffer.read(cx).languages_at(buffer_anchor); let snippet_store = project.snippets().read(cx); let scopes: Vec<_> = languages @@ -23233,97 +23250,146 @@ fn snippet_completions( let executor = cx.background_executor().clone(); cx.background_spawn(async move { + let is_word_char = |c| classifier.is_word(c); + let mut is_incomplete = false; let mut completions: Vec = Vec::new(); - for (scope, snippets) in scopes.into_iter() { - let classifier = - CharClassifier::new(Some(scope)).scope_context(Some(CharScopeContext::Completion)); - - const MAX_WORD_PREFIX_LEN: usize = 128; - let last_word: String = snapshot - .reversed_chars_for_range(text::Anchor::MIN..buffer_position) - .take(MAX_WORD_PREFIX_LEN) - .take_while(|c| classifier.is_word(*c)) - .collect::() - .chars() - .rev() - .collect(); - if last_word.is_empty() { - return Ok(CompletionResponse { - completions: vec![], - display_options: CompletionDisplayOptions::default(), - is_incomplete: true, - }); - } + const MAX_PREFIX_LEN: usize = 128; + let buffer_offset = text::ToOffset::to_offset(&buffer_anchor, &snapshot); + let window_start = buffer_offset.saturating_sub(MAX_PREFIX_LEN); + let window_start = snapshot.clip_offset(window_start, Bias::Left); - let as_offset = text::ToOffset::to_offset(&buffer_position, &snapshot); - let to_lsp = |point: &text::Anchor| { - let end = text::ToPointUtf16::to_point_utf16(point, &snapshot); - point_to_lsp(end) - }; - let lsp_end = to_lsp(&buffer_position); + let max_buffer_window: String = snapshot + .text_for_range(window_start..buffer_offset) + .collect(); + + if max_buffer_window.is_empty() { + return Ok(CompletionResponse { + completions: vec![], + display_options: CompletionDisplayOptions::default(), + is_incomplete: true, + }); + } - let candidates = snippets + for (_scope, snippets) in scopes.into_iter() { + // Sort snippets by word count to match longer snippet prefixes first. + let mut sorted_snippet_candidates = snippets .iter() .enumerate() - .flat_map(|(ix, snippet)| { + .flat_map(|(snippet_ix, snippet)| { snippet .prefix .iter() - .map(move |prefix| StringMatchCandidate::new(ix, prefix)) + .enumerate() + .map(move |(prefix_ix, prefix)| { + let word_count = + snippet_candidate_suffixes(prefix, is_word_char).count(); + ((snippet_ix, prefix_ix), prefix, word_count) + }) }) - .collect::>(); + .collect_vec(); + sorted_snippet_candidates + .sort_unstable_by_key(|(_, _, word_count)| Reverse(*word_count)); + + // Each prefix may be matched multiple times; the completion menu must filter out duplicates. + + let buffer_windows = snippet_candidate_suffixes(&max_buffer_window, is_word_char) + .take( + sorted_snippet_candidates + .first() + .map(|(_, _, word_count)| *word_count) + .unwrap_or_default(), + ) + .collect_vec(); const MAX_RESULTS: usize = 100; - let mut matches = fuzzy::match_strings( - &candidates, - &last_word, - last_word.chars().any(|c| c.is_uppercase()), - true, - MAX_RESULTS, - &Default::default(), - executor.clone(), - ) - .await; + // Each match also remembers how many characters from the buffer it consumed + let mut matches: Vec<(StringMatch, usize)> = vec![]; + + let mut snippet_list_cutoff_index = 0; + for (buffer_index, buffer_window) in buffer_windows.iter().enumerate().rev() { + let word_count = buffer_index + 1; + // Increase `snippet_list_cutoff_index` until we have all of the + // snippets with sufficiently many words. + while sorted_snippet_candidates + .get(snippet_list_cutoff_index) + .is_some_and(|(_ix, _prefix, snippet_word_count)| { + *snippet_word_count >= word_count + }) + { + snippet_list_cutoff_index += 1; + } - if matches.len() >= MAX_RESULTS { - is_incomplete = true; - } + // Take only the candidates with at least `word_count` many words + let snippet_candidates_at_word_len = + &sorted_snippet_candidates[..snippet_list_cutoff_index]; - // Remove all candidates where the query's start does not match the start of any word in the candidate - if let Some(query_start) = last_word.chars().next() { - matches.retain(|string_match| { - split_words(&string_match.string).any(|word| { - // Check that the first codepoint of the word as lowercase matches the first - // codepoint of the query as lowercase - word.chars() - .flat_map(|codepoint| codepoint.to_lowercase()) - .zip(query_start.to_lowercase()) - .all(|(word_cp, query_cp)| word_cp == query_cp) + let candidates = snippet_candidates_at_word_len + .iter() + .map(|(_snippet_ix, prefix, _snippet_word_count)| prefix) + .enumerate() // index in `sorted_snippet_candidates` + // First char must match + .filter(|(_ix, prefix)| { + itertools::equal( + prefix + .chars() + .next() + .into_iter() + .flat_map(|c| c.to_lowercase()), + buffer_window + .chars() + .next() + .into_iter() + .flat_map(|c| c.to_lowercase()), + ) }) - }); + .map(|(ix, prefix)| StringMatchCandidate::new(ix, prefix)) + .collect::>(); + + matches.extend( + fuzzy::match_strings( + &candidates, + &buffer_window, + buffer_window.chars().any(|c| c.is_uppercase()), + true, + MAX_RESULTS - matches.len(), // always prioritize longer snippets + &Default::default(), + executor.clone(), + ) + .await + .into_iter() + .map(|string_match| (string_match, buffer_window.len())), + ); + + if matches.len() >= MAX_RESULTS { + break; + } } - let matched_strings = matches - .into_iter() - .map(|m| m.string) - .collect::>(); + let to_lsp = |point: &text::Anchor| { + let end = text::ToPointUtf16::to_point_utf16(point, &snapshot); + point_to_lsp(end) + }; + let lsp_end = to_lsp(&buffer_anchor); - completions.extend(snippets.iter().filter_map(|snippet| { - let matching_prefix = snippet - .prefix - .iter() - .find(|prefix| matched_strings.contains(*prefix))?; - let start = as_offset - last_word.len(); + if matches.len() >= MAX_RESULTS { + is_incomplete = true; + } + + completions.extend(matches.iter().map(|(string_match, buffer_window_len)| { + let ((snippet_index, prefix_index), matching_prefix, _snippet_word_count) = + sorted_snippet_candidates[string_match.candidate_id]; + let snippet = &snippets[snippet_index]; + let start = buffer_offset - buffer_window_len; let start = snapshot.anchor_before(start); - let range = start..buffer_position; + let range = start..buffer_anchor; let lsp_start = to_lsp(&start); let lsp_range = lsp::Range { start: lsp_start, end: lsp_end, }; - Some(Completion { + Completion { replace_range: range, new_text: snippet.body.clone(), source: CompletionSource::Lsp { @@ -23353,7 +23419,11 @@ fn snippet_completions( }), lsp_defaults: None, }, - label: CodeLabel::plain(matching_prefix.clone(), None), + label: CodeLabel { + text: matching_prefix.clone(), + runs: Vec::new(), + filter_range: 0..matching_prefix.len(), + }, icon_path: None, documentation: Some(CompletionDocumentation::SingleLineAndMultiLinePlainText { single_line: snippet.name.clone().into(), @@ -23364,8 +23434,10 @@ fn snippet_completions( }), insert_text_mode: None, confirm: None, - }) - })) + match_start: Some(start), + snippet_deduplication_key: Some((snippet_index, prefix_index)), + } + })); } Ok(CompletionResponse { @@ -24611,6 +24683,33 @@ pub(crate) fn split_words(text: &str) -> impl std::iter::Iterator + }) } +/// Given a string of text immediately before the cursor, iterates over possible +/// strings a snippet could match to. More precisely: returns an iterator over +/// suffixes of `text` created by splitting at word boundaries (before & after +/// every non-word character). +/// +/// Shorter suffixes are returned first. +pub(crate) fn snippet_candidate_suffixes( + text: &str, + is_word_char: impl Fn(char) -> bool, +) -> impl std::iter::Iterator { + let mut prev_index = text.len(); + let mut prev_codepoint = None; + text.char_indices() + .rev() + .chain([(0, '\0')]) + .filter_map(move |(index, codepoint)| { + let prev_index = std::mem::replace(&mut prev_index, index); + let prev_codepoint = prev_codepoint.replace(codepoint)?; + if is_word_char(prev_codepoint) && is_word_char(codepoint) { + None + } else { + let chunk = &text[prev_index..]; // go to end of string + Some(chunk) + } + }) +} + pub trait RangeToAnchorExt: Sized { fn to_anchors(self, snapshot: &MultiBufferSnapshot) -> Range; diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index 4510e61b74c9bd9ca8ace634f7554f63c4981dd7..36d7023db33587e43260640782f47522dbb41c6b 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -11414,6 +11414,53 @@ async fn test_snippet_indentation(cx: &mut TestAppContext) { ˇ"}); } +#[gpui::test] +async fn test_snippet_with_multi_word_prefix(cx: &mut TestAppContext) { + init_test(cx, |_| {}); + + let mut cx = EditorTestContext::new(cx).await; + cx.update_editor(|editor, _, cx| { + editor.project().unwrap().update(cx, |project, cx| { + project.snippets().update(cx, |snippets, _cx| { + let snippet = project::snippet_provider::Snippet { + prefix: vec!["multi word".to_string()], + body: "this is many words".to_string(), + description: Some("description".to_string()), + name: "multi-word snippet test".to_string(), + }; + snippets.add_snippet_for_test( + None, + PathBuf::from("test_snippets.json"), + vec![Arc::new(snippet)], + ); + }); + }) + }); + + for (input_to_simulate, should_match_snippet) in [ + ("m", true), + ("m ", true), + ("m w", true), + ("aa m w", true), + ("aa m g", false), + ] { + cx.set_state("ˇ"); + cx.simulate_input(input_to_simulate); // fails correctly + + cx.update_editor(|editor, _, _| { + let Some(CodeContextMenu::Completions(context_menu)) = &*editor.context_menu.borrow() + else { + assert!(!should_match_snippet); // no completions! don't even show the menu + return; + }; + assert!(context_menu.visible()); + let completions = context_menu.completions.borrow(); + + assert_eq!(!completions.is_empty(), should_match_snippet); + }); + } +} + #[gpui::test] async fn test_document_format_during_save(cx: &mut TestAppContext) { init_test(cx, |_| {}); @@ -17369,6 +17416,41 @@ fn test_split_words() { assert_eq!(split(":do_the_thing"), &[":", "do_", "the_", "thing"]); } +#[test] +fn test_split_words_for_snippet_prefix() { + fn split(text: &str) -> Vec<&str> { + snippet_candidate_suffixes(text, |c| c.is_alphanumeric() || c == '_').collect() + } + + assert_eq!(split("HelloWorld"), &["HelloWorld"]); + assert_eq!(split("hello_world"), &["hello_world"]); + assert_eq!(split("_hello_world_"), &["_hello_world_"]); + assert_eq!(split("Hello_World"), &["Hello_World"]); + assert_eq!(split("helloWOrld"), &["helloWOrld"]); + assert_eq!(split("helloworld"), &["helloworld"]); + assert_eq!( + split("this@is!@#$^many . symbols"), + &[ + "symbols", + " symbols", + ". symbols", + " . symbols", + " . symbols", + " . symbols", + "many . symbols", + "^many . symbols", + "$^many . symbols", + "#$^many . symbols", + "@#$^many . symbols", + "!@#$^many . symbols", + "is!@#$^many . symbols", + "@is!@#$^many . symbols", + "this@is!@#$^many . symbols", + ], + ); + assert_eq!(split("a.s"), &["s", ".s", "a.s"]); +} + #[gpui::test] async fn test_move_to_enclosing_bracket(cx: &mut TestAppContext) { init_test(cx, |_| {}); @@ -25620,6 +25702,195 @@ pub fn check_displayed_completions(expected: Vec<&'static str>, cx: &mut EditorL }); } +#[gpui::test] +async fn test_mixed_completions_with_multi_word_snippet(cx: &mut TestAppContext) { + init_test(cx, |_| {}); + let mut cx = EditorLspTestContext::new_rust( + lsp::ServerCapabilities { + completion_provider: Some(lsp::CompletionOptions { + ..Default::default() + }), + ..Default::default() + }, + cx, + ) + .await; + cx.lsp + .set_request_handler::(move |_, _| async move { + Ok(Some(lsp::CompletionResponse::Array(vec![ + lsp::CompletionItem { + label: "unsafe".into(), + text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { + range: lsp::Range { + start: lsp::Position { + line: 0, + character: 9, + }, + end: lsp::Position { + line: 0, + character: 11, + }, + }, + new_text: "unsafe".to_string(), + })), + insert_text_mode: Some(lsp::InsertTextMode::AS_IS), + ..Default::default() + }, + ]))) + }); + + cx.update_editor(|editor, _, cx| { + editor.project().unwrap().update(cx, |project, cx| { + project.snippets().update(cx, |snippets, _cx| { + snippets.add_snippet_for_test( + None, + PathBuf::from("test_snippets.json"), + vec![ + Arc::new(project::snippet_provider::Snippet { + prefix: vec![ + "unlimited word count".to_string(), + "unlimit word count".to_string(), + "unlimited unknown".to_string(), + ], + body: "this is many words".to_string(), + description: Some("description".to_string()), + name: "multi-word snippet test".to_string(), + }), + Arc::new(project::snippet_provider::Snippet { + prefix: vec!["unsnip".to_string(), "@few".to_string()], + body: "fewer words".to_string(), + description: Some("alt description".to_string()), + name: "other name".to_string(), + }), + Arc::new(project::snippet_provider::Snippet { + prefix: vec!["ab aa".to_string()], + body: "abcd".to_string(), + description: None, + name: "alphabet".to_string(), + }), + ], + ); + }); + }) + }); + + let get_completions = |cx: &mut EditorLspTestContext| { + cx.update_editor(|editor, _, _| match &*editor.context_menu.borrow() { + Some(CodeContextMenu::Completions(context_menu)) => { + let entries = context_menu.entries.borrow(); + entries + .iter() + .map(|entry| entry.string.clone()) + .collect_vec() + } + _ => vec![], + }) + }; + + // snippets: + // @foo + // foo bar + // + // when typing: + // + // when typing: + // - if I type a symbol "open the completions with snippets only" + // - if I type a word character "open the completions menu" (if it had been open snippets only, clear it out) + // + // stuff we need: + // - filtering logic change? + // - remember how far back the completion started. + + let test_cases: &[(&str, &[&str])] = &[ + ( + "un", + &[ + "unsafe", + "unlimit word count", + "unlimited unknown", + "unlimited word count", + "unsnip", + ], + ), + ( + "u ", + &[ + "unlimit word count", + "unlimited unknown", + "unlimited word count", + ], + ), + ("u a", &["ab aa", "unsafe"]), // unsAfe + ( + "u u", + &[ + "unsafe", + "unlimit word count", + "unlimited unknown", // ranked highest among snippets + "unlimited word count", + "unsnip", + ], + ), + ("uw c", &["unlimit word count", "unlimited word count"]), + ( + "u w", + &[ + "unlimit word count", + "unlimited word count", + "unlimited unknown", + ], + ), + ("u w ", &["unlimit word count", "unlimited word count"]), + ( + "u ", + &[ + "unlimit word count", + "unlimited unknown", + "unlimited word count", + ], + ), + ("wor", &[]), + ("uf", &["unsafe"]), + ("af", &["unsafe"]), + ("afu", &[]), + ( + "ue", + &["unsafe", "unlimited unknown", "unlimited word count"], + ), + ("@", &["@few"]), + ("@few", &["@few"]), + ("@ ", &[]), + ("a@", &["@few"]), + ("a@f", &["@few", "unsafe"]), + ("a@fw", &["@few"]), + ("a", &["ab aa", "unsafe"]), + ("aa", &["ab aa"]), + ("aaa", &["ab aa"]), + ("ab", &["ab aa"]), + ("ab ", &["ab aa"]), + ("ab a", &["ab aa", "unsafe"]), + ("ab ab", &["ab aa"]), + ("ab ab aa", &["ab aa"]), + ]; + + for &(input_to_simulate, expected_completions) in test_cases { + cx.set_state("fn a() { ˇ }\n"); + for c in input_to_simulate.split("") { + cx.simulate_input(c); + cx.run_until_parked(); + } + let expected_completions = expected_completions + .iter() + .map(|s| s.to_string()) + .collect_vec(); + assert_eq!( + get_completions(&mut cx), + expected_completions, + "< actual / expected >, input = {input_to_simulate:?}", + ); + } +} + /// Handle completion request passing a marked string specifying where the completion /// should be triggered from using '|' character, what range should be replaced, and what completions /// should be returned using '<' and '>' to delimit the range. diff --git a/crates/editor/src/test/editor_test_context.rs b/crates/editor/src/test/editor_test_context.rs index 7f5bb227fb98d1ebe5df51d59bdae22825bc4fef..200c1f08cfb87dec47d66760c385aa357e45ce95 100644 --- a/crates/editor/src/test/editor_test_context.rs +++ b/crates/editor/src/test/editor_test_context.rs @@ -59,6 +59,17 @@ impl EditorTestContext { }) .await .unwrap(); + + let language = project + .read_with(cx, |project, _cx| { + project.languages().language_for_name("Plain Text") + }) + .await + .unwrap(); + buffer.update(cx, |buffer, cx| { + buffer.set_language(Some(language), cx); + }); + let editor = cx.add_window(|window, cx| { let editor = build_editor_with_project( project, diff --git a/crates/inspector_ui/src/div_inspector.rs b/crates/inspector_ui/src/div_inspector.rs index da99c5b92c1e6ad4d8a3e92ed2e565bcb518e227..8c75c2674dfe0c0b7cd7e42897f868b3990b54b8 100644 --- a/crates/inspector_ui/src/div_inspector.rs +++ b/crates/inspector_ui/src/div_inspector.rs @@ -664,6 +664,8 @@ impl CompletionProvider for RustStyleCompletionProvider { replace_range: replace_range.clone(), new_text: format!(".{}()", method.name), label: CodeLabel::plain(method.name.to_string(), None), + match_start: None, + snippet_deduplication_key: None, icon_path: None, documentation: method.documentation.map(|documentation| { CompletionDocumentation::MultiLineMarkdown(documentation.into()) diff --git a/crates/keymap_editor/src/keymap_editor.rs b/crates/keymap_editor/src/keymap_editor.rs index 3d840de64d67f5bad7646339d66229ff47831028..b5b9f92a491b1f8f5b3f68828095b5a7b6cecb39 100644 --- a/crates/keymap_editor/src/keymap_editor.rs +++ b/crates/keymap_editor/src/keymap_editor.rs @@ -2993,6 +2993,8 @@ impl CompletionProvider for KeyContextCompletionProvider { documentation: None, source: project::CompletionSource::Custom, icon_path: None, + match_start: None, + snippet_deduplication_key: None, insert_text_mode: None, confirm: None, }) diff --git a/crates/project/Cargo.toml b/crates/project/Cargo.toml index d9285a8c24ec5130dd8ce8abf5bbd77c830e0f3f..9b67fde1e0bd31856bfa19d01818c1a5c6564218 100644 --- a/crates/project/Cargo.toml +++ b/crates/project/Cargo.toml @@ -18,6 +18,7 @@ test-support = [ "client/test-support", "language/test-support", "settings/test-support", + "snippet_provider/test-support", "text/test-support", "prettier/test-support", "worktree/test-support", @@ -107,6 +108,7 @@ pretty_assertions.workspace = true release_channel.workspace = true rpc = { workspace = true, features = ["test-support"] } settings = { workspace = true, features = ["test-support"] } +snippet_provider = { workspace = true, features = ["test-support"] } unindent.workspace = true util = { workspace = true, features = ["test-support"] } worktree = { workspace = true, features = ["test-support"] } diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index 540a1a8eb0ac205d5f777e1728bbe7322bbe6187..358bf164d9a26c58f1bbf1bd5829184f6d86e7e4 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -10143,6 +10143,8 @@ impl LspStore { source: completion.source, documentation: None, label: CodeLabel::default(), + match_start: None, + snippet_deduplication_key: None, insert_text_mode: None, icon_path: None, confirm: None, @@ -12847,6 +12849,8 @@ async fn populate_labels_for_completions( source: completion.source, icon_path: None, confirm: None, + match_start: None, + snippet_deduplication_key: None, }); } None => { @@ -12861,6 +12865,8 @@ async fn populate_labels_for_completions( insert_text_mode: None, icon_path: None, confirm: None, + match_start: None, + snippet_deduplication_key: None, }); } } diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 13ed42847d522c371226988d8ca133a1748d5fec..25afd501d6c66d699a9238314f6a3d6886b8baa1 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -103,6 +103,7 @@ use search_history::SearchHistory; use settings::{InvalidSettingsError, RegisterSetting, Settings, SettingsLocation, SettingsStore}; use smol::channel::Receiver; use snippet::Snippet; +pub use snippet_provider; use snippet_provider::SnippetProvider; use std::{ borrow::Cow, @@ -476,6 +477,12 @@ pub struct Completion { pub source: CompletionSource, /// A path to an icon for this completion that is shown in the menu. pub icon_path: Option, + /// Text starting here and ending at the cursor will be used as the query for filtering this completion. + /// + /// If None, the start of the surrounding word is used. + pub match_start: Option, + /// Key used for de-duplicating snippets. If None, always considered unique. + pub snippet_deduplication_key: Option<(usize, usize)>, /// Whether to adjust indentation (the default) or not. pub insert_text_mode: Option, /// An optional callback to invoke when this completion is confirmed. @@ -5643,6 +5650,15 @@ impl Completion { } /// Whether this completion is a snippet. + pub fn is_snippet_kind(&self) -> bool { + matches!( + &self.source, + CompletionSource::Lsp { lsp_completion, .. } + if lsp_completion.kind == Some(CompletionItemKind::SNIPPET) + ) + } + + /// Whether this completion is a snippet or snippet-style LSP completion. pub fn is_snippet(&self) -> bool { self.source // `lsp::CompletionListItemDefaults` has `insert_text_format` field diff --git a/crates/snippet_provider/Cargo.toml b/crates/snippet_provider/Cargo.toml index d71439118e90213335213e1365c766eb760bff44..c1f04117d483998ad076e9f4ed2c8d9677695503 100644 --- a/crates/snippet_provider/Cargo.toml +++ b/crates/snippet_provider/Cargo.toml @@ -8,6 +8,9 @@ license = "GPL-3.0-or-later" [lints] workspace = true +[features] +test-support = [] + [dependencies] anyhow.workspace = true collections.workspace = true diff --git a/crates/snippet_provider/src/lib.rs b/crates/snippet_provider/src/lib.rs index eac06924a7906aba08d90c0d1c3d1f1743531954..64711cfc3a7247f6250b65e4f7325dd0bfdc1dcb 100644 --- a/crates/snippet_provider/src/lib.rs +++ b/crates/snippet_provider/src/lib.rs @@ -235,6 +235,19 @@ impl SnippetProvider { user_snippets } + #[cfg(any(test, feature = "test-support"))] + pub fn add_snippet_for_test( + &mut self, + language: SnippetKind, + path: PathBuf, + snippet: Vec>, + ) { + self.snippets + .entry(language) + .or_default() + .insert(path, snippet); + } + pub fn snippets_for(&self, language: SnippetKind, cx: &App) -> Vec> { let mut requested_snippets = self.lookup_snippets::(&language, cx); From 854c6873c799c955f4c8030c2de2e26a6682a24e Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Tue, 11 Nov 2025 22:42:59 +0100 Subject: [PATCH 0030/1030] Revert "gpui: Fix `RefCell already borrowed` in `WindowsPlatform::run`" (#42481) Reverts zed-industries/zed#42440 There are invalid temporaries in here keeping the borrows alive for longer --- crates/gpui/src/platform/windows/events.rs | 14 ++++-- crates/gpui/src/platform/windows/platform.rs | 50 +++++++++----------- crates/gpui/src/platform/windows/window.rs | 10 +--- 3 files changed, 32 insertions(+), 42 deletions(-) diff --git a/crates/gpui/src/platform/windows/events.rs b/crates/gpui/src/platform/windows/events.rs index cc17f19bcfac86a6f8ac31ec1059d76c24e79695..4e6df63106f4c650ad3130e39d410670ddc4687d 100644 --- a/crates/gpui/src/platform/windows/events.rs +++ b/crates/gpui/src/platform/windows/events.rs @@ -487,12 +487,14 @@ impl WindowsWindowInner { let scale_factor = lock.scale_factor; let wheel_scroll_amount = match modifiers.shift { true => { - self.system_settings() + self.system_settings + .borrow() .mouse_wheel_settings .wheel_scroll_chars } false => { - self.system_settings() + self.system_settings + .borrow() .mouse_wheel_settings .wheel_scroll_lines } @@ -539,7 +541,8 @@ impl WindowsWindowInner { }; let scale_factor = lock.scale_factor; let wheel_scroll_chars = self - .system_settings() + .system_settings + .borrow() .mouse_wheel_settings .wheel_scroll_chars; drop(lock); @@ -674,7 +677,8 @@ impl WindowsWindowInner { // used by Chrome. However, it may result in one row of pixels being obscured // in our client area. But as Chrome says, "there seems to be no better solution." if is_maximized - && let Some(ref taskbar_position) = self.system_settings().auto_hide_taskbar_position + && let Some(ref taskbar_position) = + self.system_settings.borrow().auto_hide_taskbar_position { // For the auto-hide taskbar, adjust in by 1 pixel on taskbar edge, // so the window isn't treated as a "fullscreen app", which would cause @@ -1068,7 +1072,7 @@ impl WindowsWindowInner { lock.border_offset.update(handle).log_err(); // system settings may emit a window message which wants to take the refcell lock, so drop it drop(lock); - self.system_settings_mut().update(display, wparam.0); + self.system_settings.borrow_mut().update(display, wparam.0); } else { self.handle_system_theme_changed(handle, lparam)?; }; diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index 72f427beb55b18ff5b94a1a90e334e07045b8726..b985cc14b01b1171d4013bf5c41a0c5199565503 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -342,8 +342,9 @@ impl Platform for WindowsPlatform { } } - self.inner - .with_callback(|callbacks| &mut callbacks.quit, |callback| callback()); + if let Some(ref mut callback) = self.inner.state.borrow_mut().callbacks.quit { + callback(); + } } fn quit(&self) { @@ -577,13 +578,14 @@ impl Platform for WindowsPlatform { fn set_cursor_style(&self, style: CursorStyle) { let hcursor = load_cursor(style); - if self.inner.state.borrow_mut().current_cursor.map(|c| c.0) != hcursor.map(|c| c.0) { + let mut lock = self.inner.state.borrow_mut(); + if lock.current_cursor.map(|c| c.0) != hcursor.map(|c| c.0) { self.post_message( WM_GPUI_CURSOR_STYLE_CHANGED, WPARAM(0), LPARAM(hcursor.map_or(0, |c| c.0 as isize)), ); - self.inner.state.borrow_mut().current_cursor = hcursor; + lock.current_cursor = hcursor; } } @@ -722,18 +724,6 @@ impl WindowsPlatformInner { })) } - /// Calls `project` to project to the corresponding callback field, removes it from callbacks, calls `f` with the callback and then puts the callback back. - fn with_callback( - &self, - project: impl Fn(&mut PlatformCallbacks) -> &mut Option, - f: impl FnOnce(&mut T), - ) { - if let Some(mut callback) = project(&mut self.state.borrow_mut().callbacks).take() { - f(&mut callback); - *project(&mut self.state.borrow_mut().callbacks) = Some(callback) - } - } - fn handle_msg( self: &Rc, handle: HWND, @@ -817,36 +807,40 @@ impl WindowsPlatformInner { } fn handle_dock_action_event(&self, action_idx: usize) -> Option { - let Some(action) = self - .state - .borrow_mut() + let mut lock = self.state.borrow_mut(); + let mut callback = lock.callbacks.app_menu_action.take()?; + let Some(action) = lock .jump_list .dock_menus .get(action_idx) .map(|dock_menu| dock_menu.action.boxed_clone()) else { + lock.callbacks.app_menu_action = Some(callback); log::error!("Dock menu for index {action_idx} not found"); return Some(1); }; - self.with_callback( - |callbacks| &mut callbacks.app_menu_action, - |callback| callback(&*action), - ); + drop(lock); + callback(&*action); + self.state.borrow_mut().callbacks.app_menu_action = Some(callback); Some(0) } fn handle_keyboard_layout_change(&self) -> Option { - self.with_callback( - |callbacks| &mut callbacks.keyboard_layout_change, - |callback| callback(), - ); + let mut callback = self + .state + .borrow_mut() + .callbacks + .keyboard_layout_change + .take()?; + callback(); + self.state.borrow_mut().callbacks.keyboard_layout_change = Some(callback); Some(0) } fn handle_device_lost(&self, lparam: LPARAM) -> Option { + let mut lock = self.state.borrow_mut(); let directx_devices = lparam.0 as *const DirectXDevices; let directx_devices = unsafe { &*directx_devices }; - let mut lock = self.state.borrow_mut(); lock.directx_devices.take(); lock.directx_devices = Some(directx_devices.clone()); diff --git a/crates/gpui/src/platform/windows/window.rs b/crates/gpui/src/platform/windows/window.rs index 4b89fcffb39d9bfbc0734977cec16a00984f5c9a..0050fa4bc0e96b8702314f33637db67998b5941d 100644 --- a/crates/gpui/src/platform/windows/window.rs +++ b/crates/gpui/src/platform/windows/window.rs @@ -63,7 +63,7 @@ pub(crate) struct WindowsWindowInner { hwnd: HWND, drop_target_helper: IDropTargetHelper, pub(crate) state: RefCell, - system_settings: RefCell, + pub(crate) system_settings: RefCell, pub(crate) handle: AnyWindowHandle, pub(crate) hide_title_bar: bool, pub(crate) is_movable: bool, @@ -321,14 +321,6 @@ impl WindowsWindowInner { } Ok(()) } - - pub(crate) fn system_settings(&self) -> std::cell::Ref<'_, WindowsSystemSettings> { - self.system_settings.borrow() - } - - pub(crate) fn system_settings_mut(&self) -> std::cell::RefMut<'_, WindowsSystemSettings> { - self.system_settings.borrow_mut() - } } #[derive(Default)] From 2ad7ecbcf02541d3a8eeecffbdff649f998d26a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20C=C3=A1rdenas?= Date: Tue, 11 Nov 2025 16:53:40 -0500 Subject: [PATCH 0031/1030] project_panel: Add `auto_open` settings (#40435) - Based on #40234, and improvement of #40331 Release Notes: - Added granular settings to control when files auto-open in the project panel (project_panel.auto_open.on_create, on_paste, on_drop) Screenshot_2025-10-16_17-28-31 --------- Co-authored-by: Smit Barmase --- Cargo.lock | 1 + assets/settings/default.json | 11 +- crates/migrator/src/migrations.rs | 6 + .../src/migrations/m_2025_11_12/settings.rs | 84 ++++++ crates/migrator/src/migrator.rs | 53 ++++ crates/project_panel/Cargo.toml | 1 + crates/project_panel/src/project_panel.rs | 29 +- .../src/project_panel_settings.rs | 35 ++- .../project_panel/src/project_panel_tests.rs | 257 +++++++++++++++++- .../src/settings_content/workspace.rs | 23 +- crates/settings/src/vscode_import.rs | 2 +- crates/settings_ui/src/page_data.rs | 48 +++- docs/src/configuring-zed.md | 26 +- 13 files changed, 542 insertions(+), 34 deletions(-) create mode 100644 crates/migrator/src/migrations/m_2025_11_12/settings.rs diff --git a/Cargo.lock b/Cargo.lock index a3300a818c12f39406cc39848cae86eeb26a0a56..865dfe211ef606f4469be6017129dfac2916522b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13078,6 +13078,7 @@ dependencies = [ "settings", "smallvec", "telemetry", + "tempfile", "theme", "ui", "util", diff --git a/assets/settings/default.json b/assets/settings/default.json index d8c800081246dcf937f7380399d726dd3d349679..70011f3209c7f64fd4e86d3acbb62a9ff2d5a487 100644 --- a/assets/settings/default.json +++ b/assets/settings/default.json @@ -748,8 +748,15 @@ "hide_root": false, // Whether to hide the hidden entries in the project panel. "hide_hidden": false, - // Whether to automatically open files when pasting them in the project panel. - "open_file_on_paste": true + // Settings for automatically opening files. + "auto_open": { + // Whether to automatically open newly created files in the editor. + "on_create": true, + // Whether to automatically open files after pasting or duplicating them. + "on_paste": true, + // Whether to automatically open files dropped from external sources. + "on_drop": true + } }, "outline_panel": { // Whether to show the outline panel button in the status bar diff --git a/crates/migrator/src/migrations.rs b/crates/migrator/src/migrations.rs index e4358b36b94c9a738ad784eb7269652b29e7cdfb..2587e7a30829d4fa0e0832b91ab0294a86abc97e 100644 --- a/crates/migrator/src/migrations.rs +++ b/crates/migrator/src/migrations.rs @@ -135,3 +135,9 @@ pub(crate) mod m_2025_10_21 { pub(crate) use settings::make_relative_line_numbers_an_enum; } + +pub(crate) mod m_2025_11_12 { + mod settings; + + pub(crate) use settings::SETTINGS_PATTERNS; +} diff --git a/crates/migrator/src/migrations/m_2025_11_12/settings.rs b/crates/migrator/src/migrations/m_2025_11_12/settings.rs new file mode 100644 index 0000000000000000000000000000000000000000..6483f9e44bec64407313334e7b78d181e7a05815 --- /dev/null +++ b/crates/migrator/src/migrations/m_2025_11_12/settings.rs @@ -0,0 +1,84 @@ +use std::ops::Range; +use tree_sitter::{Query, QueryMatch}; + +use crate::MigrationPatterns; +use crate::patterns::SETTINGS_NESTED_KEY_VALUE_PATTERN; + +pub const SETTINGS_PATTERNS: MigrationPatterns = &[ + ( + SETTINGS_NESTED_KEY_VALUE_PATTERN, + rename_open_file_on_paste_setting, + ), + ( + SETTINGS_NESTED_KEY_VALUE_PATTERN, + replace_open_file_on_paste_setting_value, + ), +]; + +fn rename_open_file_on_paste_setting( + contents: &str, + mat: &QueryMatch, + query: &Query, +) -> Option<(Range, String)> { + if !is_project_panel_open_file_on_paste(contents, mat, query) { + return None; + } + + let setting_name_ix = query.capture_index_for_name("setting_name")?; + let setting_name_range = mat + .nodes_for_capture_index(setting_name_ix) + .next()? + .byte_range(); + + Some((setting_name_range, "auto_open".to_string())) +} + +fn replace_open_file_on_paste_setting_value( + contents: &str, + mat: &QueryMatch, + query: &Query, +) -> Option<(Range, String)> { + if !is_project_panel_open_file_on_paste(contents, mat, query) { + return None; + } + + let value_ix = query.capture_index_for_name("setting_value")?; + let value_node = mat.nodes_for_capture_index(value_ix).next()?; + let value_range = value_node.byte_range(); + let value_text = contents.get(value_range.clone())?.trim(); + + let normalized_value = match value_text { + "true" => "true", + "false" => "false", + _ => return None, + }; + + Some(( + value_range, + format!("{{ \"on_paste\": {normalized_value} }}"), + )) +} + +fn is_project_panel_open_file_on_paste(contents: &str, mat: &QueryMatch, query: &Query) -> bool { + let parent_key_ix = match query.capture_index_for_name("parent_key") { + Some(ix) => ix, + None => return false, + }; + let parent_range = match mat.nodes_for_capture_index(parent_key_ix).next() { + Some(node) => node.byte_range(), + None => return false, + }; + if contents.get(parent_range) != Some("project_panel") { + return false; + } + + let setting_name_ix = match query.capture_index_for_name("setting_name") { + Some(ix) => ix, + None => return false, + }; + let setting_name_range = match mat.nodes_for_capture_index(setting_name_ix).next() { + Some(node) => node.byte_range(), + None => return false, + }; + contents.get(setting_name_range) == Some("open_file_on_paste") +} diff --git a/crates/migrator/src/migrator.rs b/crates/migrator/src/migrator.rs index 3f5c1edaa7939e442c3e5c007579516fcdeb2151..74b73114cae81b57e5d0dc4227bafcd2cca31d10 100644 --- a/crates/migrator/src/migrator.rs +++ b/crates/migrator/src/migrator.rs @@ -215,6 +215,10 @@ pub fn migrate_settings(text: &str) -> Result> { MigrationType::Json(migrations::m_2025_10_16::restore_code_actions_on_format), MigrationType::Json(migrations::m_2025_10_17::make_file_finder_include_ignored_an_enum), MigrationType::Json(migrations::m_2025_10_21::make_relative_line_numbers_an_enum), + MigrationType::TreeSitter( + migrations::m_2025_11_12::SETTINGS_PATTERNS, + &SETTINGS_QUERY_2025_11_12, + ), ]; run_migrations(text, migrations) } @@ -333,6 +337,10 @@ define_query!( SETTINGS_QUERY_2025_10_03, migrations::m_2025_10_03::SETTINGS_PATTERNS ); +define_query!( + SETTINGS_QUERY_2025_11_12, + migrations::m_2025_11_12::SETTINGS_PATTERNS +); // custom query static EDIT_PREDICTION_SETTINGS_MIGRATION_QUERY: LazyLock = LazyLock::new(|| { @@ -2193,4 +2201,49 @@ mod tests { ), ); } + + #[test] + fn test_project_panel_open_file_on_paste_migration() { + assert_migrate_settings( + &r#" + { + "project_panel": { + "open_file_on_paste": true + } + } + "# + .unindent(), + Some( + &r#" + { + "project_panel": { + "auto_open": { "on_paste": true } + } + } + "# + .unindent(), + ), + ); + + assert_migrate_settings( + &r#" + { + "project_panel": { + "open_file_on_paste": false + } + } + "# + .unindent(), + Some( + &r#" + { + "project_panel": { + "auto_open": { "on_paste": false } + } + } + "# + .unindent(), + ), + ); + } } diff --git a/crates/project_panel/Cargo.toml b/crates/project_panel/Cargo.toml index a1238990db8617977494d151b1ab9e46a17d715f..2c47efd0b0e2490bbfd6125069fa5ca1438ffb51 100644 --- a/crates/project_panel/Cargo.toml +++ b/crates/project_panel/Cargo.toml @@ -53,4 +53,5 @@ editor = { workspace = true, features = ["test-support"] } gpui = { workspace = true, features = ["test-support"] } language = { workspace = true, features = ["test-support"] } serde_json.workspace = true +tempfile.workspace = true workspace = { workspace = true, features = ["test-support"] } diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index 38fd1d08c9802bd04c7e5faf60c171d492ed996f..8830de5aeffcd26b0f5c342fc1c8d16cdb762b40 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -1655,7 +1655,10 @@ impl ProjectPanel { } project_panel.update_visible_entries(None, false, false, window, cx); if is_new_entry && !is_dir { - project_panel.open_entry(new_entry.id, true, false, cx); + let settings = ProjectPanelSettings::get_global(cx); + if settings.auto_open.should_open_on_create() { + project_panel.open_entry(new_entry.id, true, false, cx); + } } cx.notify(); })?; @@ -2709,15 +2712,16 @@ impl ProjectPanel { if item_count == 1 { // open entry if not dir, setting is enabled, and only focus if rename is not pending - if !entry.is_dir() - && ProjectPanelSettings::get_global(cx).open_file_on_paste - { - project_panel.open_entry( - entry.id, - disambiguation_range.is_none(), - false, - cx, - ); + if !entry.is_dir() { + let settings = ProjectPanelSettings::get_global(cx); + if settings.auto_open.should_open_on_paste() { + project_panel.open_entry( + entry.id, + disambiguation_range.is_none(), + false, + cx, + ); + } } // if only one entry was pasted and it was disambiguated, open the rename editor @@ -3593,7 +3597,10 @@ impl ProjectPanel { let opened_entries = task.await.with_context(|| "failed to copy external paths")?; this.update(cx, |this, cx| { if open_file_after_drop && !opened_entries.is_empty() { - this.open_entry(opened_entries[0], true, false, cx); + let settings = ProjectPanelSettings::get_global(cx); + if settings.auto_open.should_open_on_drop() { + this.open_entry(opened_entries[0], true, false, cx); + } } }) } diff --git a/crates/project_panel/src/project_panel_settings.rs b/crates/project_panel/src/project_panel_settings.rs index 623fdda310ec872ee3919cb944d8f1f817d10448..266ab761a103fa4ca2a2e9a4e09b96514bfd25c1 100644 --- a/crates/project_panel/src/project_panel_settings.rs +++ b/crates/project_panel/src/project_panel_settings.rs @@ -32,7 +32,7 @@ pub struct ProjectPanelSettings { pub hide_root: bool, pub hide_hidden: bool, pub drag_and_drop: bool, - pub open_file_on_paste: bool, + pub auto_open: AutoOpenSettings, } #[derive(Copy, Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq, Eq)] @@ -48,6 +48,30 @@ pub struct ScrollbarSettings { pub show: Option, } +#[derive(Copy, Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq, Eq)] +pub struct AutoOpenSettings { + pub on_create: bool, + pub on_paste: bool, + pub on_drop: bool, +} + +impl AutoOpenSettings { + #[inline] + pub fn should_open_on_create(self) -> bool { + self.on_create + } + + #[inline] + pub fn should_open_on_paste(self) -> bool { + self.on_paste + } + + #[inline] + pub fn should_open_on_drop(self) -> bool { + self.on_drop + } +} + impl ScrollbarVisibility for ProjectPanelSettings { fn visibility(&self, cx: &ui::App) -> ShowScrollbar { self.scrollbar @@ -83,7 +107,14 @@ impl Settings for ProjectPanelSettings { hide_root: project_panel.hide_root.unwrap(), hide_hidden: project_panel.hide_hidden.unwrap(), drag_and_drop: project_panel.drag_and_drop.unwrap(), - open_file_on_paste: project_panel.open_file_on_paste.unwrap(), + auto_open: { + let auto_open = project_panel.auto_open.unwrap(); + AutoOpenSettings { + on_create: auto_open.on_create.unwrap(), + on_paste: auto_open.on_paste.unwrap(), + on_drop: auto_open.on_drop.unwrap(), + } + }, } } } diff --git a/crates/project_panel/src/project_panel_tests.rs b/crates/project_panel/src/project_panel_tests.rs index 51f028afd5ac07c15e55f15d68f75293fab3481a..675ed9c35208917aa80002d9daa7932f92a29495 100644 --- a/crates/project_panel/src/project_panel_tests.rs +++ b/crates/project_panel/src/project_panel_tests.rs @@ -4,7 +4,7 @@ use gpui::{Empty, Entity, TestAppContext, VisualTestContext, WindowHandle}; use pretty_assertions::assert_eq; use project::FakeFs; use serde_json::json; -use settings::SettingsStore; +use settings::{ProjectPanelAutoOpenSettings, SettingsStore}; use std::path::{Path, PathBuf}; use util::{path, paths::PathStyle, rel_path::rel_path}; use workspace::{ @@ -1998,6 +1998,248 @@ async fn test_remove_opened_file(cx: &mut gpui::TestAppContext) { ensure_no_open_items_and_panes(&workspace, cx); } +#[gpui::test] +async fn test_auto_open_new_file_when_enabled(cx: &mut gpui::TestAppContext) { + init_test_with_editor(cx); + set_auto_open_settings( + cx, + ProjectPanelAutoOpenSettings { + on_create: Some(true), + ..Default::default() + }, + ); + + let fs = FakeFs::new(cx.executor()); + fs.insert_tree(path!("/root"), json!({})).await; + + let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; + let workspace = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); + let cx = &mut VisualTestContext::from_window(*workspace, cx); + let panel = workspace.update(cx, ProjectPanel::new).unwrap(); + cx.run_until_parked(); + + panel.update_in(cx, |panel, window, cx| panel.new_file(&NewFile, window, cx)); + cx.run_until_parked(); + panel + .update_in(cx, |panel, window, cx| { + panel.filename_editor.update(cx, |editor, cx| { + editor.set_text("auto-open.rs", window, cx); + }); + panel.confirm_edit(true, window, cx).unwrap() + }) + .await + .unwrap(); + cx.run_until_parked(); + + ensure_single_file_is_opened(&workspace, "auto-open.rs", cx); +} + +#[gpui::test] +async fn test_auto_open_new_file_when_disabled(cx: &mut gpui::TestAppContext) { + init_test_with_editor(cx); + set_auto_open_settings( + cx, + ProjectPanelAutoOpenSettings { + on_create: Some(false), + ..Default::default() + }, + ); + + let fs = FakeFs::new(cx.executor()); + fs.insert_tree(path!("/root"), json!({})).await; + + let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; + let workspace = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); + let cx = &mut VisualTestContext::from_window(*workspace, cx); + let panel = workspace.update(cx, ProjectPanel::new).unwrap(); + cx.run_until_parked(); + + panel.update_in(cx, |panel, window, cx| panel.new_file(&NewFile, window, cx)); + cx.run_until_parked(); + panel + .update_in(cx, |panel, window, cx| { + panel.filename_editor.update(cx, |editor, cx| { + editor.set_text("manual-open.rs", window, cx); + }); + panel.confirm_edit(true, window, cx).unwrap() + }) + .await + .unwrap(); + cx.run_until_parked(); + + ensure_no_open_items_and_panes(&workspace, cx); +} + +#[gpui::test] +async fn test_auto_open_on_paste_when_enabled(cx: &mut gpui::TestAppContext) { + init_test_with_editor(cx); + set_auto_open_settings( + cx, + ProjectPanelAutoOpenSettings { + on_paste: Some(true), + ..Default::default() + }, + ); + + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + path!("/root"), + json!({ + "src": { + "original.rs": "" + }, + "target": {} + }), + ) + .await; + + let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; + let workspace = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); + let cx = &mut VisualTestContext::from_window(*workspace, cx); + let panel = workspace.update(cx, ProjectPanel::new).unwrap(); + cx.run_until_parked(); + + toggle_expand_dir(&panel, "root/src", cx); + toggle_expand_dir(&panel, "root/target", cx); + + select_path(&panel, "root/src/original.rs", cx); + panel.update_in(cx, |panel, window, cx| { + panel.copy(&Default::default(), window, cx); + }); + + select_path(&panel, "root/target", cx); + panel.update_in(cx, |panel, window, cx| { + panel.paste(&Default::default(), window, cx); + }); + cx.executor().run_until_parked(); + + ensure_single_file_is_opened(&workspace, "target/original.rs", cx); +} + +#[gpui::test] +async fn test_auto_open_on_paste_when_disabled(cx: &mut gpui::TestAppContext) { + init_test_with_editor(cx); + set_auto_open_settings( + cx, + ProjectPanelAutoOpenSettings { + on_paste: Some(false), + ..Default::default() + }, + ); + + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + path!("/root"), + json!({ + "src": { + "original.rs": "" + }, + "target": {} + }), + ) + .await; + + let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; + let workspace = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); + let cx = &mut VisualTestContext::from_window(*workspace, cx); + let panel = workspace.update(cx, ProjectPanel::new).unwrap(); + cx.run_until_parked(); + + toggle_expand_dir(&panel, "root/src", cx); + toggle_expand_dir(&panel, "root/target", cx); + + select_path(&panel, "root/src/original.rs", cx); + panel.update_in(cx, |panel, window, cx| { + panel.copy(&Default::default(), window, cx); + }); + + select_path(&panel, "root/target", cx); + panel.update_in(cx, |panel, window, cx| { + panel.paste(&Default::default(), window, cx); + }); + cx.executor().run_until_parked(); + + ensure_no_open_items_and_panes(&workspace, cx); + assert!( + find_project_entry(&panel, "root/target/original.rs", cx).is_some(), + "Pasted entry should exist even when auto-open is disabled" + ); +} + +#[gpui::test] +async fn test_auto_open_on_drop_when_enabled(cx: &mut gpui::TestAppContext) { + init_test_with_editor(cx); + set_auto_open_settings( + cx, + ProjectPanelAutoOpenSettings { + on_drop: Some(true), + ..Default::default() + }, + ); + + let fs = FakeFs::new(cx.executor()); + fs.insert_tree(path!("/root"), json!({})).await; + + let temp_dir = tempfile::tempdir().unwrap(); + let external_path = temp_dir.path().join("dropped.rs"); + std::fs::write(&external_path, "// dropped").unwrap(); + fs.insert_tree_from_real_fs(temp_dir.path(), temp_dir.path()) + .await; + + let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; + let workspace = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); + let cx = &mut VisualTestContext::from_window(*workspace, cx); + let panel = workspace.update(cx, ProjectPanel::new).unwrap(); + cx.run_until_parked(); + + let root_entry = find_project_entry(&panel, "root", cx).unwrap(); + panel.update_in(cx, |panel, window, cx| { + panel.drop_external_files(std::slice::from_ref(&external_path), root_entry, window, cx); + }); + cx.executor().run_until_parked(); + + ensure_single_file_is_opened(&workspace, "dropped.rs", cx); +} + +#[gpui::test] +async fn test_auto_open_on_drop_when_disabled(cx: &mut gpui::TestAppContext) { + init_test_with_editor(cx); + set_auto_open_settings( + cx, + ProjectPanelAutoOpenSettings { + on_drop: Some(false), + ..Default::default() + }, + ); + + let fs = FakeFs::new(cx.executor()); + fs.insert_tree(path!("/root"), json!({})).await; + + let temp_dir = tempfile::tempdir().unwrap(); + let external_path = temp_dir.path().join("manual.rs"); + std::fs::write(&external_path, "// dropped").unwrap(); + fs.insert_tree_from_real_fs(temp_dir.path(), temp_dir.path()) + .await; + + let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; + let workspace = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); + let cx = &mut VisualTestContext::from_window(*workspace, cx); + let panel = workspace.update(cx, ProjectPanel::new).unwrap(); + cx.run_until_parked(); + + let root_entry = find_project_entry(&panel, "root", cx).unwrap(); + panel.update_in(cx, |panel, window, cx| { + panel.drop_external_files(std::slice::from_ref(&external_path), root_entry, window, cx); + }); + cx.executor().run_until_parked(); + + ensure_no_open_items_and_panes(&workspace, cx); + assert!( + find_project_entry(&panel, "root/manual.rs", cx).is_some(), + "Dropped entry should exist even when auto-open is disabled" + ); +} + #[gpui::test] async fn test_create_duplicate_items(cx: &mut gpui::TestAppContext) { init_test_with_editor(cx); @@ -7368,6 +7610,19 @@ fn init_test_with_editor(cx: &mut TestAppContext) { }); } +fn set_auto_open_settings( + cx: &mut TestAppContext, + auto_open_settings: ProjectPanelAutoOpenSettings, +) { + cx.update(|cx| { + cx.update_global::(|store, cx| { + store.update_user_settings(cx, |settings| { + settings.project_panel.get_or_insert_default().auto_open = Some(auto_open_settings); + }); + }) + }); +} + fn ensure_single_file_is_opened( window: &WindowHandle, expected_path: &str, diff --git a/crates/settings/src/settings_content/workspace.rs b/crates/settings/src/settings_content/workspace.rs index c901d7010b37c685180ca67a3c4775da41be87ee..01c40528cb4a9b614270efbbf0d39b1b424bb7dc 100644 --- a/crates/settings/src/settings_content/workspace.rs +++ b/crates/settings/src/settings_content/workspace.rs @@ -510,6 +510,23 @@ impl OnLastWindowClosed { } } +#[skip_serializing_none] +#[derive(Clone, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug)] +pub struct ProjectPanelAutoOpenSettings { + /// Whether to automatically open newly created files in the editor. + /// + /// Default: true + pub on_create: Option, + /// Whether to automatically open files after pasting or duplicating them. + /// + /// Default: true + pub on_paste: Option, + /// Whether to automatically open files dropped from external sources. + /// + /// Default: true + pub on_drop: Option, +} + #[skip_serializing_none] #[derive(Clone, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug)] pub struct ProjectPanelSettingsContent { @@ -590,10 +607,8 @@ pub struct ProjectPanelSettingsContent { /// /// Default: true pub drag_and_drop: Option, - /// Whether to automatically open files when pasting them in the project panel. - /// - /// Default: true - pub open_file_on_paste: Option, + /// Settings for automatically opening files. + pub auto_open: Option, } #[derive( diff --git a/crates/settings/src/vscode_import.rs b/crates/settings/src/vscode_import.rs index cbffb33b1795dbf71e48df8089c472ee534306c1..31f1ab82b50b5fca32203c770cd41795e1cf92c3 100644 --- a/crates/settings/src/vscode_import.rs +++ b/crates/settings/src/vscode_import.rs @@ -664,13 +664,13 @@ impl VsCodeSettings { hide_root: None, indent_guides: None, indent_size: None, - open_file_on_paste: None, scrollbar: None, show_diagnostics: self .read_bool("problems.decorations.enabled") .and_then(|b| if b { Some(ShowDiagnostics::Off) } else { None }), starts_open: None, sticky_scroll: None, + auto_open: None, }; if let (Some(false), Some(false)) = ( diff --git a/crates/settings_ui/src/page_data.rs b/crates/settings_ui/src/page_data.rs index e3165fbc79850484950e90bdcdbb81338df9974d..973f40a20a5cc6052f30ba2ff17a5116c96eeb2b 100644 --- a/crates/settings_ui/src/page_data.rs +++ b/crates/settings_ui/src/page_data.rs @@ -3776,23 +3776,47 @@ pub(crate) fn settings_data(cx: &App) -> Vec { metadata: None, files: USER, }), + SettingsPageItem::SectionHeader("Auto Open Files"), SettingsPageItem::SettingItem(SettingItem { - title: "Open File on Paste", - description: "Whether to automatically open files when pasting them in the project panel.", + title: "On Create", + description: "Whether to automatically open newly created files in the editor.", field: Box::new(SettingField { - json_path: Some("project_panel.open_file_on_paste"), + json_path: Some("project_panel.auto_open.on_create"), pick: |settings_content| { - settings_content - .project_panel - .as_ref()? - .open_file_on_paste - .as_ref() + settings_content.project_panel.as_ref()?.auto_open.as_ref()?.on_create.as_ref() }, write: |settings_content, value| { - settings_content - .project_panel - .get_or_insert_default() - .open_file_on_paste = value; + settings_content.project_panel.get_or_insert_default().auto_open.get_or_insert_default().on_create = value; + }, + }), + metadata: None, + files: USER, + }), + SettingsPageItem::SettingItem(SettingItem { + title: "On Paste", + description: "Whether to automatically open files after pasting or duplicating them.", + field: Box::new(SettingField { + json_path: Some("project_panel.auto_open.on_paste"), + pick: |settings_content| { + settings_content.project_panel.as_ref()?.auto_open.as_ref()?.on_paste.as_ref() + }, + write: |settings_content, value| { + settings_content.project_panel.get_or_insert_default().auto_open.get_or_insert_default().on_paste = value; + }, + }), + metadata: None, + files: USER, + }), + SettingsPageItem::SettingItem(SettingItem { + title: "On Drop", + description: "Whether to automatically open files dropped from external sources.", + field: Box::new(SettingField { + json_path: Some("project_panel.auto_open.on_drop"), + pick: |settings_content| { + settings_content.project_panel.as_ref()?.auto_open.as_ref()?.on_drop.as_ref() + }, + write: |settings_content, value| { + settings_content.project_panel.get_or_insert_default().auto_open.get_or_insert_default().on_drop = value; }, }), metadata: None, diff --git a/docs/src/configuring-zed.md b/docs/src/configuring-zed.md index 07d93fd6d167bafeb0a8e4bc72f80f52265edee1..6841c9a3cb0364d8eab63a9319df2e6a38d5612e 100644 --- a/docs/src/configuring-zed.md +++ b/docs/src/configuring-zed.md @@ -4280,7 +4280,11 @@ Run the {#action theme_selector::Toggle} action in the command palette to see a "hide_root": false, "hide_hidden": false, "starts_open": true, - "open_file_on_paste": true + "auto_open": { + "on_create": true, + "on_paste": true, + "on_drop": true + } } } ``` @@ -4489,6 +4493,26 @@ Run the {#action theme_selector::Toggle} action in the command palette to see a } ``` +### Auto Open + +- Description: Control whether files are opened automatically after different creation flows in the project panel. +- Setting: `auto_open` +- Default: + +```json [settings] +"auto_open": { + "on_create": true, + "on_paste": true, + "on_drop": true +} +``` + +**Options** + +- `on_create`: Whether to automatically open newly created files in the editor. +- `on_paste`: Whether to automatically open files after pasting or duplicating them. +- `on_drop`: Whether to automatically open files dropped from external sources. + ## Agent Visit [the Configuration page](./ai/configuration.md) under the AI section to learn more about all the agent-related settings. From cf6ae01d07b5fd02629535250ebddc65f9d0d9ed Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Tue, 11 Nov 2025 17:10:46 -0500 Subject: [PATCH 0032/1030] Show recommended models under normal category too (#42489) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Screenshot 2025-11-11 at 4 04 57 PM Discussed with @danilo-leal and we're going with the "it's checked in both places" design! Closes #40910 Release Notes: - Recommended AI models now still appear in their normal category in addition to "Recommended:" --- crates/agent/src/agent.rs | 4 +- .../agent_ui/src/language_model_selector.rs | 82 ++++++++----------- 2 files changed, 34 insertions(+), 52 deletions(-) diff --git a/crates/agent/src/agent.rs b/crates/agent/src/agent.rs index fc0b66f4073ea137f53b29286b0c17b53d11bf83..a85c01bb225ab58a372a8b09fb07e7bc155a7aeb 100644 --- a/crates/agent/src/agent.rs +++ b/crates/agent/src/agent.rs @@ -133,9 +133,7 @@ impl LanguageModels { for model in provider.provided_models(cx) { let model_info = Self::map_language_model_to_info(&model, &provider); let model_id = model_info.id.clone(); - if !recommended_models.contains(&(model.provider_id(), model.id())) { - provider_models.push(model_info); - } + provider_models.push(model_info); models.insert(model_id, model); } if !provider_models.is_empty() { diff --git a/crates/agent_ui/src/language_model_selector.rs b/crates/agent_ui/src/language_model_selector.rs index 0f7b83e3edba6c8d97c2c12a939a65cb71c39dca..1de6bee791f782713d869bac7974ad3ec4e08b9f 100644 --- a/crates/agent_ui/src/language_model_selector.rs +++ b/crates/agent_ui/src/language_model_selector.rs @@ -1,6 +1,6 @@ use std::{cmp::Reverse, sync::Arc}; -use collections::{HashSet, IndexMap}; +use collections::IndexMap; use fuzzy::{StringMatch, StringMatchCandidate, match_strings}; use gpui::{Action, AnyElement, App, BackgroundExecutor, DismissEvent, Subscription, Task}; use language_model::{ @@ -57,7 +57,7 @@ fn all_models(cx: &App) -> GroupedModels { }) .collect(); - let other = providers + let all = providers .iter() .flat_map(|provider| { provider @@ -70,7 +70,7 @@ fn all_models(cx: &App) -> GroupedModels { }) .collect(); - GroupedModels::new(other, recommended) + GroupedModels::new(all, recommended) } #[derive(Clone)] @@ -210,33 +210,24 @@ impl LanguageModelPickerDelegate { struct GroupedModels { recommended: Vec, - other: IndexMap>, + all: IndexMap>, } impl GroupedModels { - pub fn new(other: Vec, recommended: Vec) -> Self { - let recommended_ids = recommended - .iter() - .map(|info| (info.model.provider_id(), info.model.id())) - .collect::>(); - - let mut other_by_provider: IndexMap<_, Vec> = IndexMap::default(); - for model in other { - if recommended_ids.contains(&(model.model.provider_id(), model.model.id())) { - continue; - } - + pub fn new(all: Vec, recommended: Vec) -> Self { + let mut all_by_provider: IndexMap<_, Vec> = IndexMap::default(); + for model in all { let provider = model.model.provider_id(); - if let Some(models) = other_by_provider.get_mut(&provider) { + if let Some(models) = all_by_provider.get_mut(&provider) { models.push(model); } else { - other_by_provider.insert(provider, vec![model]); + all_by_provider.insert(provider, vec![model]); } } Self { recommended, - other: other_by_provider, + all: all_by_provider, } } @@ -252,7 +243,7 @@ impl GroupedModels { ); } - for models in self.other.values() { + for models in self.all.values() { if models.is_empty() { continue; } @@ -267,20 +258,6 @@ impl GroupedModels { } entries } - - fn model_infos(&self) -> Vec { - let other = self - .other - .values() - .flat_map(|model| model.iter()) - .cloned() - .collect::>(); - self.recommended - .iter() - .chain(&other) - .cloned() - .collect::>() - } } enum LanguageModelPickerEntry { @@ -425,8 +402,9 @@ impl PickerDelegate for LanguageModelPickerDelegate { .collect::>(); let available_models = all_models - .model_infos() - .iter() + .all + .values() + .flat_map(|models| models.iter()) .filter(|m| configured_provider_ids.contains(&m.model.provider_id())) .cloned() .collect::>(); @@ -764,46 +742,52 @@ mod tests { } #[gpui::test] - fn test_exclude_recommended_models(_cx: &mut TestAppContext) { + fn test_recommended_models_also_appear_in_other(_cx: &mut TestAppContext) { let recommended_models = create_models(vec![("zed", "claude")]); let all_models = create_models(vec![ - ("zed", "claude"), // Should be filtered out from "other" + ("zed", "claude"), // Should also appear in "other" ("zed", "gemini"), ("copilot", "o3"), ]); let grouped_models = GroupedModels::new(all_models, recommended_models); - let actual_other_models = grouped_models - .other + let actual_all_models = grouped_models + .all .values() .flatten() .cloned() .collect::>(); - // Recommended models should not appear in "other" - assert_models_eq(actual_other_models, vec!["zed/gemini", "copilot/o3"]); + // Recommended models should also appear in "all" + assert_models_eq( + actual_all_models, + vec!["zed/claude", "zed/gemini", "copilot/o3"], + ); } #[gpui::test] - fn test_dont_exclude_models_from_other_providers(_cx: &mut TestAppContext) { + fn test_models_from_different_providers(_cx: &mut TestAppContext) { let recommended_models = create_models(vec![("zed", "claude")]); let all_models = create_models(vec![ - ("zed", "claude"), // Should be filtered out from "other" + ("zed", "claude"), // Should also appear in "other" ("zed", "gemini"), - ("copilot", "claude"), // Should not be filtered out from "other" + ("copilot", "claude"), // Different provider, should appear in "other" ]); let grouped_models = GroupedModels::new(all_models, recommended_models); - let actual_other_models = grouped_models - .other + let actual_all_models = grouped_models + .all .values() .flatten() .cloned() .collect::>(); - // Recommended models should not appear in "other" - assert_models_eq(actual_other_models, vec!["zed/gemini", "copilot/claude"]); + // All models should appear in "all" regardless of recommended status + assert_models_eq( + actual_all_models, + vec!["zed/claude", "zed/gemini", "copilot/claude"], + ); } } From 2bcfc129510a6bfb2054e8011a1d3e7437b6e9bc Mon Sep 17 00:00:00 2001 From: Andrew Farkas <6060305+HactarCE@users.noreply.github.com> Date: Tue, 11 Nov 2025 20:36:22 -0500 Subject: [PATCH 0033/1030] Absolutize LSP and DAP paths more conservatively (#42482) Fixes a regression caused by #42135 where LSP and DAP binaries weren't being used from `PATH` env var Now we absolutize the path if (path is relative AND (path has multiple components OR path exists in worktree)). - Relative paths with multiple components might not exist in the worktree because they are ignored. Paths with a single component will at least have an entry saying that they exist and are ignored. - Relative paths with multiple components will never use the `PATH` env var, so they can be safely absolutized Release Notes: - N/A --- crates/language/src/language.rs | 1 + crates/project/src/debugger/dap_store.rs | 4 ++-- crates/project/src/lsp_store.rs | 7 ++++-- crates/project/src/project_tests.rs | 29 +++++++++++++++++++----- crates/worktree/src/worktree.rs | 21 +++++++++++++++++ 5 files changed, 52 insertions(+), 10 deletions(-) diff --git a/crates/language/src/language.rs b/crates/language/src/language.rs index 2a2f870d6b55abc57a14e623375f77b9fb2d5dbc..ac94378c9cc1ae300f9dcbd5a088f25761f309b4 100644 --- a/crates/language/src/language.rs +++ b/crates/language/src/language.rs @@ -291,6 +291,7 @@ pub trait LspAdapterDelegate: Send + Sync { fn http_client(&self) -> Arc; fn worktree_id(&self) -> WorktreeId; fn worktree_root_path(&self) -> &Path; + fn resolve_executable_path(&self, path: PathBuf) -> PathBuf; fn update_status(&self, language: LanguageServerName, status: BinaryStatus); fn registered_lsp_adapters(&self) -> Vec>; async fn language_server_download_dir(&self, name: &LanguageServerName) -> Option>; diff --git a/crates/project/src/debugger/dap_store.rs b/crates/project/src/debugger/dap_store.rs index 04901a5fef60cfc1692f712f3cdd4a3ec1071632..a82286441d625561009f4f9259f5c06fe424ff10 100644 --- a/crates/project/src/debugger/dap_store.rs +++ b/crates/project/src/debugger/dap_store.rs @@ -262,8 +262,8 @@ impl DapStore { let user_installed_path = dap_settings.and_then(|s| match &s.binary { DapBinary::Default => None, DapBinary::Custom(binary) => { - // if `binary` is absolute, `.join()` will keep it unmodified - Some(worktree.read(cx).abs_path().join(PathBuf::from(binary))) + let path = PathBuf::from(binary); + Some(worktree.read(cx).resolve_executable_path(path)) } }); let user_args = dap_settings.map(|s| s.args.clone()); diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index 358bf164d9a26c58f1bbf1bd5829184f6d86e7e4..cae4d64c67d3261f59d87273a38865992da18284 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -573,8 +573,7 @@ impl LocalLspStore { env.extend(settings.env.unwrap_or_default()); Ok(LanguageServerBinary { - // if `path` is absolute, `.join()` will keep it unmodified - path: delegate.worktree_root_path().join(path), + path: delegate.resolve_executable_path(path), env: Some(env), arguments: settings .arguments @@ -13516,6 +13515,10 @@ impl LspAdapterDelegate for LocalLspAdapterDelegate { self.worktree.abs_path().as_ref() } + fn resolve_executable_path(&self, path: PathBuf) -> PathBuf { + self.worktree.resolve_executable_path(path) + } + async fn shell_env(&self) -> HashMap { let task = self.load_shell_env_task.clone(); task.await.unwrap_or_default() diff --git a/crates/project/src/project_tests.rs b/crates/project/src/project_tests.rs index 332fdb3e0ffd158cfb0d4df199752b3ccddfb743..02c0e42c10f06006fa5b61a549684e2bb336f509 100644 --- a/crates/project/src/project_tests.rs +++ b/crates/project/src/project_tests.rs @@ -1215,13 +1215,20 @@ async fn test_language_server_relative_path(cx: &mut gpui::TestAppContext) { let settings_json_contents = json!({ "languages": { "Rust": { - "language_servers": ["my_fake_lsp"] + "language_servers": ["my_fake_lsp", "lsp_on_path"] } }, "lsp": { "my_fake_lsp": { "binary": { - "path": path!("relative_path/to/my_fake_lsp_binary.exe").to_string(), + // file exists, so this is treated as a relative path + "path": path!(".relative_path/to/my_fake_lsp_binary.exe").to_string(), + } + }, + "lsp_on_path": { + "binary": { + // file doesn't exist, so it will fall back on PATH env var + "path": path!("lsp_on_path.exe").to_string(), } } }, @@ -1234,7 +1241,7 @@ async fn test_language_server_relative_path(cx: &mut gpui::TestAppContext) { ".zed": { "settings.json": settings_json_contents.to_string(), }, - "relative_path": { + ".relative_path": { "to": { "my_fake_lsp.exe": "", }, @@ -1250,13 +1257,20 @@ async fn test_language_server_relative_path(cx: &mut gpui::TestAppContext) { let language_registry = project.read_with(cx, |project, _| project.languages().clone()); language_registry.add(rust_lang()); - let mut fake_rust_servers = language_registry.register_fake_lsp( + let mut my_fake_lsp = language_registry.register_fake_lsp( "Rust", FakeLspAdapter { name: "my_fake_lsp", ..Default::default() }, ); + let mut lsp_on_path = language_registry.register_fake_lsp( + "Rust", + FakeLspAdapter { + name: "lsp_on_path", + ..Default::default() + }, + ); cx.run_until_parked(); @@ -1268,11 +1282,14 @@ async fn test_language_server_relative_path(cx: &mut gpui::TestAppContext) { .await .unwrap(); - let lsp_path = fake_rust_servers.next().await.unwrap().binary.path; + let lsp_path = my_fake_lsp.next().await.unwrap().binary.path; assert_eq!( lsp_path.to_string_lossy(), - path!("/the-root/relative_path/to/my_fake_lsp_binary.exe"), + path!("/the-root/.relative_path/to/my_fake_lsp_binary.exe"), ); + + let lsp_path = lsp_on_path.next().await.unwrap().binary.path; + assert_eq!(lsp_path.to_string_lossy(), path!("lsp_on_path.exe")); } #[gpui::test] diff --git a/crates/worktree/src/worktree.rs b/crates/worktree/src/worktree.rs index 69fee07583a33106689c463732fe6defbdcfbb40..7b412e187f0d2cab5c34800309525a16201a83c0 100644 --- a/crates/worktree/src/worktree.rs +++ b/crates/worktree/src/worktree.rs @@ -2384,6 +2384,27 @@ impl Snapshot { }) } + /// Resolves a path to an executable using the following heuristics: + /// + /// 1. If the path is relative and contains more than one component, + /// it is joined to the worktree root path. + /// 2. If the path is relative and exists in the worktree + /// (even if falls under an exclusion filter), + /// it is joined to the worktree root path. + /// 3. Otherwise the path is returned unmodified. + /// + /// Relative paths that do not exist in the worktree may + /// still be found using the `PATH` environment variable. + pub fn resolve_executable_path(&self, path: PathBuf) -> PathBuf { + if let Ok(rel_path) = RelPath::new(&path, self.path_style) + && (path.components().count() > 1 || self.entry_for_path(&rel_path).is_some()) + { + self.abs_path().join(path) + } else { + path + } + } + pub fn entry_for_id(&self, id: ProjectEntryId) -> Option<&Entry> { let entry = self.entries_by_id.get(&id, ())?; self.entry_for_path(&entry.path) From 231d1b1d58f8c73895fb10304a0e891c0d1b5939 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 12 Nov 2025 09:11:50 +0100 Subject: [PATCH 0034/1030] diagnostics: Close diagnosticsless buffers on refresh (#42503) Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/diagnostics/src/diagnostics.rs | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index 2e729cbdf420264d96db6e6fec8317d250ec642c..344ce652969e9a6d54a22769741616def48ab3b1 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -182,7 +182,6 @@ impl ProjectDiagnosticsEditor { project::Event::DiskBasedDiagnosticsFinished { language_server_id } => { log::debug!("disk based diagnostics finished for server {language_server_id}"); this.close_diagnosticless_buffers( - window, cx, this.editor.focus_handle(cx).contains_focused(window, cx) || this.focus_handle.contains_focused(window, cx), @@ -247,10 +246,10 @@ impl ProjectDiagnosticsEditor { window.focus(&this.focus_handle); } } - EditorEvent::Blurred => this.close_diagnosticless_buffers(window, cx, false), - EditorEvent::Saved => this.close_diagnosticless_buffers(window, cx, true), + EditorEvent::Blurred => this.close_diagnosticless_buffers(cx, false), + EditorEvent::Saved => this.close_diagnosticless_buffers(cx, true), EditorEvent::SelectionsChanged { .. } => { - this.close_diagnosticless_buffers(window, cx, true) + this.close_diagnosticless_buffers(cx, true) } _ => {} } @@ -298,12 +297,7 @@ impl ProjectDiagnosticsEditor { /// - have no diagnostics anymore /// - are saved (not dirty) /// - and, if `retain_selections` is true, do not have selections within them - fn close_diagnosticless_buffers( - &mut self, - _window: &mut Window, - cx: &mut Context, - retain_selections: bool, - ) { + fn close_diagnosticless_buffers(&mut self, cx: &mut Context, retain_selections: bool) { let snapshot = self .editor .update(cx, |editor, cx| editor.display_snapshot(cx)); @@ -447,7 +441,7 @@ impl ProjectDiagnosticsEditor { fn focus_out(&mut self, _: FocusOutEvent, window: &mut Window, cx: &mut Context) { if !self.focus_handle.is_focused(window) && !self.editor.focus_handle(cx).is_focused(window) { - self.close_diagnosticless_buffers(window, cx, false); + self.close_diagnosticless_buffers(cx, false); } } @@ -461,8 +455,7 @@ impl ProjectDiagnosticsEditor { }); } }); - self.multibuffer - .update(cx, |multibuffer, cx| multibuffer.clear(cx)); + self.close_diagnosticless_buffers(cx, false); self.project.update(cx, |project, cx| { self.paths_to_update = project .diagnostic_summaries(false, cx) From f2cadad49a91a5ed095a54616c7a59fdd163a216 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 12 Nov 2025 09:19:32 +0100 Subject: [PATCH 0035/1030] gpui: Fix `RefCell already borrowed` in `WindowsPlatform::run` (#42506) Relands #42440 Fixes ZED-1VX Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/gpui/src/platform/windows/events.rs | 14 ++---- crates/gpui/src/platform/windows/platform.rs | 51 +++++++++++--------- crates/gpui/src/platform/windows/window.rs | 10 +++- 3 files changed, 43 insertions(+), 32 deletions(-) diff --git a/crates/gpui/src/platform/windows/events.rs b/crates/gpui/src/platform/windows/events.rs index 4e6df63106f4c650ad3130e39d410670ddc4687d..cc17f19bcfac86a6f8ac31ec1059d76c24e79695 100644 --- a/crates/gpui/src/platform/windows/events.rs +++ b/crates/gpui/src/platform/windows/events.rs @@ -487,14 +487,12 @@ impl WindowsWindowInner { let scale_factor = lock.scale_factor; let wheel_scroll_amount = match modifiers.shift { true => { - self.system_settings - .borrow() + self.system_settings() .mouse_wheel_settings .wheel_scroll_chars } false => { - self.system_settings - .borrow() + self.system_settings() .mouse_wheel_settings .wheel_scroll_lines } @@ -541,8 +539,7 @@ impl WindowsWindowInner { }; let scale_factor = lock.scale_factor; let wheel_scroll_chars = self - .system_settings - .borrow() + .system_settings() .mouse_wheel_settings .wheel_scroll_chars; drop(lock); @@ -677,8 +674,7 @@ impl WindowsWindowInner { // used by Chrome. However, it may result in one row of pixels being obscured // in our client area. But as Chrome says, "there seems to be no better solution." if is_maximized - && let Some(ref taskbar_position) = - self.system_settings.borrow().auto_hide_taskbar_position + && let Some(ref taskbar_position) = self.system_settings().auto_hide_taskbar_position { // For the auto-hide taskbar, adjust in by 1 pixel on taskbar edge, // so the window isn't treated as a "fullscreen app", which would cause @@ -1072,7 +1068,7 @@ impl WindowsWindowInner { lock.border_offset.update(handle).log_err(); // system settings may emit a window message which wants to take the refcell lock, so drop it drop(lock); - self.system_settings.borrow_mut().update(display, wparam.0); + self.system_settings_mut().update(display, wparam.0); } else { self.handle_system_theme_changed(handle, lparam)?; }; diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index b985cc14b01b1171d4013bf5c41a0c5199565503..b1d0c80ffc997976be2ee1b557ca2ea86e46ee3d 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -342,9 +342,8 @@ impl Platform for WindowsPlatform { } } - if let Some(ref mut callback) = self.inner.state.borrow_mut().callbacks.quit { - callback(); - } + self.inner + .with_callback(|callbacks| &mut callbacks.quit, |callback| callback()); } fn quit(&self) { @@ -578,14 +577,13 @@ impl Platform for WindowsPlatform { fn set_cursor_style(&self, style: CursorStyle) { let hcursor = load_cursor(style); - let mut lock = self.inner.state.borrow_mut(); - if lock.current_cursor.map(|c| c.0) != hcursor.map(|c| c.0) { + if self.inner.state.borrow_mut().current_cursor.map(|c| c.0) != hcursor.map(|c| c.0) { self.post_message( WM_GPUI_CURSOR_STYLE_CHANGED, WPARAM(0), LPARAM(hcursor.map_or(0, |c| c.0 as isize)), ); - lock.current_cursor = hcursor; + self.inner.state.borrow_mut().current_cursor = hcursor; } } @@ -724,6 +722,19 @@ impl WindowsPlatformInner { })) } + /// Calls `project` to project to the corresponding callback field, removes it from callbacks, calls `f` with the callback and then puts the callback back. + fn with_callback( + &self, + project: impl Fn(&mut PlatformCallbacks) -> &mut Option, + f: impl FnOnce(&mut T), + ) { + let callback = project(&mut self.state.borrow_mut().callbacks).take(); + if let Some(mut callback) = callback { + f(&mut callback); + *project(&mut self.state.borrow_mut().callbacks) = Some(callback) + } + } + fn handle_msg( self: &Rc, handle: HWND, @@ -807,40 +818,36 @@ impl WindowsPlatformInner { } fn handle_dock_action_event(&self, action_idx: usize) -> Option { - let mut lock = self.state.borrow_mut(); - let mut callback = lock.callbacks.app_menu_action.take()?; - let Some(action) = lock + let Some(action) = self + .state + .borrow_mut() .jump_list .dock_menus .get(action_idx) .map(|dock_menu| dock_menu.action.boxed_clone()) else { - lock.callbacks.app_menu_action = Some(callback); log::error!("Dock menu for index {action_idx} not found"); return Some(1); }; - drop(lock); - callback(&*action); - self.state.borrow_mut().callbacks.app_menu_action = Some(callback); + self.with_callback( + |callbacks| &mut callbacks.app_menu_action, + |callback| callback(&*action), + ); Some(0) } fn handle_keyboard_layout_change(&self) -> Option { - let mut callback = self - .state - .borrow_mut() - .callbacks - .keyboard_layout_change - .take()?; - callback(); - self.state.borrow_mut().callbacks.keyboard_layout_change = Some(callback); + self.with_callback( + |callbacks| &mut callbacks.keyboard_layout_change, + |callback| callback(), + ); Some(0) } fn handle_device_lost(&self, lparam: LPARAM) -> Option { - let mut lock = self.state.borrow_mut(); let directx_devices = lparam.0 as *const DirectXDevices; let directx_devices = unsafe { &*directx_devices }; + let mut lock = self.state.borrow_mut(); lock.directx_devices.take(); lock.directx_devices = Some(directx_devices.clone()); diff --git a/crates/gpui/src/platform/windows/window.rs b/crates/gpui/src/platform/windows/window.rs index 0050fa4bc0e96b8702314f33637db67998b5941d..4b89fcffb39d9bfbc0734977cec16a00984f5c9a 100644 --- a/crates/gpui/src/platform/windows/window.rs +++ b/crates/gpui/src/platform/windows/window.rs @@ -63,7 +63,7 @@ pub(crate) struct WindowsWindowInner { hwnd: HWND, drop_target_helper: IDropTargetHelper, pub(crate) state: RefCell, - pub(crate) system_settings: RefCell, + system_settings: RefCell, pub(crate) handle: AnyWindowHandle, pub(crate) hide_title_bar: bool, pub(crate) is_movable: bool, @@ -321,6 +321,14 @@ impl WindowsWindowInner { } Ok(()) } + + pub(crate) fn system_settings(&self) -> std::cell::Ref<'_, WindowsSystemSettings> { + self.system_settings.borrow() + } + + pub(crate) fn system_settings_mut(&self) -> std::cell::RefMut<'_, WindowsSystemSettings> { + self.system_settings.borrow_mut() + } } #[derive(Default)] From ddf762e368352ae372b7631f7eebda4f5366e8bd Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Wed, 12 Nov 2025 10:24:06 +0200 Subject: [PATCH 0036/1030] Revert "gpui: Unify the index_for_x methods (#42162)" (#42505) This reverts commit 082b80ec89748bf238f0068da80e12211fb3c7d6. This broke clicking, e.g. in snippets like ```rs let x = vec![ 1, 2, // 3, ]; ``` clicking between `2` and `,` is quite off now. Release Notes: - N/A --- crates/editor/src/display_map.rs | 2 +- crates/editor/src/element.rs | 2 +- crates/editor/src/selections_collection.rs | 4 +- crates/gpui/examples/input.rs | 4 +- crates/gpui/src/text_system/line_layout.rs | 55 ++++++++++++++++++++-- crates/vim/src/visual.rs | 8 ++-- 6 files changed, 62 insertions(+), 13 deletions(-) diff --git a/crates/editor/src/display_map.rs b/crates/editor/src/display_map.rs index ebcc53b09bfbb9466a80d639d17cadfe2927a27e..c4c49eb7911e0d7c5ed375d83697584fbb493b81 100644 --- a/crates/editor/src/display_map.rs +++ b/crates/editor/src/display_map.rs @@ -1097,7 +1097,7 @@ impl DisplaySnapshot { details: &TextLayoutDetails, ) -> u32 { let layout_line = self.layout_row(display_row, details); - layout_line.index_for_x(x) as u32 + layout_line.closest_index_for_x(x) as u32 } pub fn grapheme_at(&self, mut point: DisplayPoint) -> Option { diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index 6fd259dae9333933fa7f29041c2deb591b42bf6d..680570d0926257b3bde4532b03681b4515111930 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -8668,7 +8668,7 @@ impl LineWithInvisibles { let fragment_end_x = fragment_start_x + shaped_line.width; if x < fragment_end_x { return Some( - fragment_start_index + shaped_line.index_for_x(x - fragment_start_x), + fragment_start_index + shaped_line.index_for_x(x - fragment_start_x)?, ); } fragment_start_x = fragment_end_x; diff --git a/crates/editor/src/selections_collection.rs b/crates/editor/src/selections_collection.rs index 75fffdc7fea17fe35f9942125499ba15c9a77422..7bb90deda0da84fa8719b9530dffef567c467c36 100644 --- a/crates/editor/src/selections_collection.rs +++ b/crates/editor/src/selections_collection.rs @@ -372,7 +372,7 @@ impl SelectionsCollection { let is_empty = positions.start == positions.end; let line_len = display_map.line_len(row); let line = display_map.layout_row(row, text_layout_details); - let start_col = line.index_for_x(positions.start) as u32; + let start_col = line.closest_index_for_x(positions.start) as u32; let (start, end) = if is_empty { let point = DisplayPoint::new(row, std::cmp::min(start_col, line_len)); @@ -382,7 +382,7 @@ impl SelectionsCollection { return None; } let start = DisplayPoint::new(row, start_col); - let end_col = line.index_for_x(positions.end) as u32; + let end_col = line.closest_index_for_x(positions.end) as u32; let end = DisplayPoint::new(row, end_col); (start, end) }; diff --git a/crates/gpui/examples/input.rs b/crates/gpui/examples/input.rs index 16af30166c6ccdbd06469f4e2fd4cd3df8352127..37115feaa551a787562e7299c9d44bcc97b5fca3 100644 --- a/crates/gpui/examples/input.rs +++ b/crates/gpui/examples/input.rs @@ -178,7 +178,7 @@ impl TextInput { if position.y > bounds.bottom() { return self.content.len(); } - line.index_for_x(position.x - bounds.left()) + line.closest_index_for_x(position.x - bounds.left()) } fn select_to(&mut self, offset: usize, cx: &mut Context) { @@ -380,7 +380,7 @@ impl EntityInputHandler for TextInput { let last_layout = self.last_layout.as_ref()?; assert_eq!(last_layout.text, self.content); - let utf8_index = last_layout.index_for_x(point.x - line_point.x); + let utf8_index = last_layout.index_for_x(point.x - line_point.x)?; Some(self.offset_to_utf16(utf8_index)) } } diff --git a/crates/gpui/src/text_system/line_layout.rs b/crates/gpui/src/text_system/line_layout.rs index 61edd614d804434d414b34a9804e51b0b0148ea4..375a9bdc7bccdddb9d34409c5ced138b2d5aebd2 100644 --- a/crates/gpui/src/text_system/line_layout.rs +++ b/crates/gpui/src/text_system/line_layout.rs @@ -54,9 +54,25 @@ pub struct ShapedGlyph { } impl LineLayout { + /// The index for the character at the given x coordinate + pub fn index_for_x(&self, x: Pixels) -> Option { + if x >= self.width { + None + } else { + for run in self.runs.iter().rev() { + for glyph in run.glyphs.iter().rev() { + if glyph.position.x <= x { + return Some(glyph.index); + } + } + } + Some(0) + } + } + /// closest_index_for_x returns the character boundary closest to the given x coordinate /// (e.g. to handle aligning up/down arrow keys) - pub fn index_for_x(&self, x: Pixels) -> usize { + pub fn closest_index_for_x(&self, x: Pixels) -> usize { let mut prev_index = 0; let mut prev_x = px(0.); @@ -262,10 +278,34 @@ impl WrappedLineLayout { } /// The index corresponding to a given position in this layout for the given line height. + /// + /// See also [`Self::closest_index_for_position`]. pub fn index_for_position( + &self, + position: Point, + line_height: Pixels, + ) -> Result { + self._index_for_position(position, line_height, false) + } + + /// The closest index to a given position in this layout for the given line height. + /// + /// Closest means the character boundary closest to the given position. + /// + /// See also [`LineLayout::closest_index_for_x`]. + pub fn closest_index_for_position( + &self, + position: Point, + line_height: Pixels, + ) -> Result { + self._index_for_position(position, line_height, true) + } + + fn _index_for_position( &self, mut position: Point, line_height: Pixels, + closest: bool, ) -> Result { let wrapped_line_ix = (position.y / line_height) as usize; @@ -305,9 +345,16 @@ impl WrappedLineLayout { } else if position_in_unwrapped_line.x >= wrapped_line_end_x { Err(wrapped_line_end_index) } else { - Ok(self - .unwrapped_layout - .index_for_x(position_in_unwrapped_line.x)) + if closest { + Ok(self + .unwrapped_layout + .closest_index_for_x(position_in_unwrapped_line.x)) + } else { + Ok(self + .unwrapped_layout + .index_for_x(position_in_unwrapped_line.x) + .unwrap()) + } } } diff --git a/crates/vim/src/visual.rs b/crates/vim/src/visual.rs index 498c4b4dc6ec6ad8af4f47bb6ea5044a5fcd3c0a..4172de80afdc1beacbf3ea342846de03953e1fc6 100644 --- a/crates/vim/src/visual.rs +++ b/crates/vim/src/visual.rs @@ -371,10 +371,12 @@ impl Vim { loop { let laid_out_line = map.layout_row(row, &text_layout_details); - let start = - DisplayPoint::new(row, laid_out_line.index_for_x(positions.start) as u32); + let start = DisplayPoint::new( + row, + laid_out_line.closest_index_for_x(positions.start) as u32, + ); let mut end = - DisplayPoint::new(row, laid_out_line.index_for_x(positions.end) as u32); + DisplayPoint::new(row, laid_out_line.closest_index_for_x(positions.end) as u32); if end <= start { if start.column() == map.line_len(start.row()) { end = start; From 4f158c19839237a600fc99701b6fca0e63398fb9 Mon Sep 17 00:00:00 2001 From: CnsMaple <92523839+CnsMaple@users.noreply.github.com> Date: Wed, 12 Nov 2025 17:05:17 +0800 Subject: [PATCH 0037/1030] docs: Update basedpyright settings examples (#42497) The [example](https://docs.basedpyright.com/latest/configuration/language-server-settings/#zed) on the official website of basedpyright is correct. Release Notes: - Update basedpyright settings examples --- docs/src/languages/python.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/src/languages/python.md b/docs/src/languages/python.md index 204258a8e736dc68ef51e338ecb0ee8ba7e0a737..5051a72209121176e05d41f57cb8d341db2ca351 100644 --- a/docs/src/languages/python.md +++ b/docs/src/languages/python.md @@ -128,9 +128,11 @@ You can use the following configuration: "lsp": { "basedpyright": { "settings": { - "analysis": { + "basedpyright.analysis": { "diagnosticMode": "workspace", - "inlayHints.callArgumentNames": false + "inlayHints": { + "callArgumentNames": false + } } } } From 78f466559acc62cc89428d79be62707113598971 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 12 Nov 2025 10:54:22 +0100 Subject: [PATCH 0038/1030] vim: Fix empty selections panic in `insert_at_previous` (#42504) Fixes ZED-15C Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/vim/src/normal.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/crates/vim/src/normal.rs b/crates/vim/src/normal.rs index 8b4aefcaac371383dd3114c2b12abd166ef9aa72..e200c24b94468b141020e12c0230fb1908ffbe8e 100644 --- a/crates/vim/src/normal.rs +++ b/crates/vim/src/normal.rs @@ -671,13 +671,13 @@ impl Vim { self.start_recording(cx); self.switch_mode(Mode::Insert, false, window, cx); self.update_editor(cx, |vim, editor, cx| { - let Some(Mark::Local(marks)) = vim.get_mark("^", editor, window, cx) else { - return; - }; - - editor.change_selections(Default::default(), window, cx, |s| { - s.select_anchor_ranges(marks.iter().map(|mark| *mark..*mark)) - }); + if let Some(Mark::Local(marks)) = vim.get_mark("^", editor, window, cx) + && !marks.is_empty() + { + editor.change_selections(Default::default(), window, cx, |s| { + s.select_anchor_ranges(marks.iter().map(|mark| *mark..*mark)) + }); + } }); } From f46990bac88668827e2fa5adb4623fc906ca84a1 Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Wed, 12 Nov 2025 11:12:02 +0100 Subject: [PATCH 0039/1030] extensions_ui: Add XML extension suggestion for XML files (#42514) Closes #41798 Release Notes: - N/A --- crates/extensions_ui/src/extension_suggest.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/extensions_ui/src/extension_suggest.rs b/crates/extensions_ui/src/extension_suggest.rs index 5dcd1e210527ee89a35a3b89008a901cf1f9f036..7ad4c1540a419f0cdeedb2aeff7661aafac5ef4c 100644 --- a/crates/extensions_ui/src/extension_suggest.rs +++ b/crates/extensions_ui/src/extension_suggest.rs @@ -75,6 +75,7 @@ const SUGGESTIONS_BY_EXTENSION_ID: &[(&str, &[&str])] = &[ ("vue", &["vue"]), ("wgsl", &["wgsl"]), ("wit", &["wit"]), + ("xml", &["xml"]), ("zig", &["zig"]), ]; From 70feff3c7adc2894feb3145fb57c10c58bce01aa Mon Sep 17 00:00:00 2001 From: Lena <241371603+zelenenka@users.noreply.github.com> Date: Wed, 12 Nov 2025 11:40:31 +0100 Subject: [PATCH 0040/1030] Add a one-off cleanup script for GH issue types (#42515) Mainly for historical purposes and in case we want to do something similar enough in the future. Release Notes: - N/A --- script/github-clean-issue-types.py | 105 +++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100755 script/github-clean-issue-types.py diff --git a/script/github-clean-issue-types.py b/script/github-clean-issue-types.py new file mode 100755 index 0000000000000000000000000000000000000000..dfd573628b6bb01af7fdd7fd3ad495bbc877465d --- /dev/null +++ b/script/github-clean-issue-types.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3 +"""Replace 'bug/feature/crash' labels with 'Bug/Feature/Crash' types on open +GitHub issues. + +Requires `requests` library and a GitHub access token with "Issues (write)" +permission passed as an environment variable. +Was used as a quick-and-dirty one-off-bulk-operation script to clean up issue +types in the `zed` repository. Leaving it here for reference only; there's no +error handling, you've been warned. +""" + + +import logging +import os + +import requests + +logging.basicConfig(level=logging.INFO) +log = logging.getLogger(__name__) + +GITHUB_API_BASE_URL = "https://api.github.com" +REPO_OWNER = "zed-industries" +REPO_NAME = "zed" +GITHUB_TOKEN = os.getenv("GITHUB_TOKEN") +HEADERS = { + "Authorization": f"token {GITHUB_TOKEN}", + "Accept": "application/vnd.github+json" +} +LABELS_TO_TYPES = { + 'bug': 'Bug', + 'feature': 'Feature', + 'crash': 'Crash', + } + + +def get_open_issues_without_type(repo): + """Get open issues without type via GitHub's REST API.""" + issues = [] + issues_url = f"{GITHUB_API_BASE_URL}/repos/{REPO_OWNER}/{repo}/issues" + + log.info("Start fetching issues from the GitHub API.") + params = { + "state": "open", + "type": "none", + "page": 1, + "per_page": 100, # worked fine despite the docs saying 30 + } + while True: + response = requests.get(issues_url, headers=HEADERS, params=params) + response.raise_for_status() + issues.extend(response.json()) + log.info(f"Fetched the next page, total issues so far: {len(issues)}.") + + # is there a next page? + link_header = response.headers.get('Link', '') + if 'rel="next"' not in link_header: + break + params['page'] += 1 + + log.info("Done fetching issues.") + return issues + + +def replace_labels_with_types(issues, labels_to_types): + """Replace labels with types, a new attribute of issues. + + Only changes the issues with one type-sounding label, leaving those with + two labels (e.g. `bug` *and* `crash`) alone, logging a warning. + """ + for issue in issues: + log.debug(f"Processing issue {issue['number']}.") + # for GitHub, all PRs are issues but not all issues are PRs; skip PRs + if 'pull_request' in issue: + continue + issue_labels = (label['name'] for label in issue['labels']) + matching_labels = labels_to_types.keys() & set(issue_labels) + if len(matching_labels) != 1: + log.warning( + f"Issue {issue['url']} has either no or multiple type-sounding " + "labels, won't be processed.") + continue + label_to_replace = matching_labels.pop() + issue_type = labels_to_types[label_to_replace] + log.debug( + f"Replacing label {label_to_replace} with type {issue_type} " + f"for issue {issue['title']}.") + + # add the type + api_url_issue = f"{GITHUB_API_BASE_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue['number']}" + add_type_response = requests.patch( + api_url_issue, headers=HEADERS, json={"type": issue_type}) + add_type_response.raise_for_status() + log.debug(f"Added type {issue_type} to issue {issue['title']}.") + + # delete the label + api_url_delete_label = f"{GITHUB_API_BASE_URL}/repos/{REPO_OWNER}/{REPO_NAME}/issues/{issue['number']}/labels/{label_to_replace}" + delete_response = requests.delete(api_url_delete_label, headers=HEADERS) + delete_response.raise_for_status() + log.info( + f"Deleted label {label_to_replace} from issue {issue['title']}.") + + +if __name__ == "__main__": + open_issues_without_type = get_open_issues_without_type(REPO_NAME) + replace_labels_with_types(open_issues_without_type, LABELS_TO_TYPES) From a0be53a19008709b5934f04cdf13dba6d03e161b Mon Sep 17 00:00:00 2001 From: Lena <241371603+zelenenka@users.noreply.github.com> Date: Wed, 12 Nov 2025 12:40:26 +0100 Subject: [PATCH 0041/1030] Wake up stalebot with an updated config (#42516) - switch the bot from looking at the `bug/crash` labels which we don't use anymore to the Bug/Crash issue types which we do use - shorten the period of time after which a bug is suspected to be stale (with our pace they can indeed be outdated in 60 days) - extend the grace period for someone to come around and say nope, this problem still exists (people might be away for a couple of weeks). Release Notes: - N/A --- .github/workflows/community_close_stale_issues.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/community_close_stale_issues.yml b/.github/workflows/community_close_stale_issues.yml index a38354c31709502d7c35bb43104691c0e63d9f4b..faa42669d0206965ba99ab683fea7165ef351c8f 100644 --- a/.github/workflows/community_close_stale_issues.yml +++ b/.github/workflows/community_close_stale_issues.yml @@ -15,13 +15,13 @@ jobs: stale-issue-message: > Hi there! 👋 - We're working to clean up our issue tracker by closing older issues that might not be relevant anymore. If you are able to reproduce this issue in the latest version of Zed, please let us know by commenting on this issue, and we will keep it open. If you can't reproduce it, feel free to close the issue yourself. Otherwise, we'll close it in 7 days. + We're working to clean up our issue tracker by closing older bugs that might not be relevant anymore. If you are able to reproduce this issue in the latest version of Zed, please let us know by commenting on this issue, and it will be kept open. If you can't reproduce it, feel free to close the issue yourself. Otherwise, it will close automatically in 14 days. Thanks for your help! close-issue-message: "This issue was closed due to inactivity. If you're still experiencing this problem, please open a new issue with a link to this issue." - days-before-stale: 120 - days-before-close: 7 - any-of-issue-labels: "bug,panic / crash" + days-before-stale: 60 + days-before-close: 14 + only-issue-types: "Bug,Crash" operations-per-run: 1000 ascending: true enable-statistics: true From c2980cba18e3794433be571f0c89533233c5d3a3 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 12 Nov 2025 12:57:53 +0100 Subject: [PATCH 0042/1030] remote_server: Bump fork to 0.4.0 (#42520) Release Notes: - N/A --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 865dfe211ef606f4469be6017129dfac2916522b..84ca9fbc10ddf4d2b72564167d90acbeb04669c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6359,9 +6359,9 @@ checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b" [[package]] name = "fork" -version = "0.2.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05dc8b302e04a1c27f4fe694439ef0f29779ca4edc205b7b58f00db04e29656d" +checksum = "30268f1eefccc9d72f43692e8b89e659aeb52e84016c3b32b6e7e9f1c8f38f94" dependencies = [ "libc", ] diff --git a/Cargo.toml b/Cargo.toml index 579bc394e6f13963188f03b1b812a05eec43a7b4..8511bd8d21d79a854453434827767eaca4adf3d8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -504,7 +504,7 @@ emojis = "0.6.1" env_logger = "0.11" exec = "0.3.1" fancy-regex = "0.14.0" -fork = "0.2.0" +fork = "0.4.0" futures = "0.3" futures-batch = "0.6.1" futures-lite = "1.13" From 7be76c74d6fbdf8e844062182fe0bd16c69e80dc Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Wed, 12 Nov 2025 04:52:19 -0800 Subject: [PATCH 0043/1030] Use `set -x` in `script/clear-target-dir-if-larger-than` (#42525) Closes #ISSUE Release Notes: - N/A *or* Added/Fixed/Improved ... --- script/clear-target-dir-if-larger-than | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/clear-target-dir-if-larger-than b/script/clear-target-dir-if-larger-than index b77b4cff0f3d2bf22756538a49e38e8a11c200ef..f5219dcc537178e50ea300aa3df3871ace9182a2 100755 --- a/script/clear-target-dir-if-larger-than +++ b/script/clear-target-dir-if-larger-than @@ -1,6 +1,6 @@ #!/usr/bin/env bash -set -eu +set -euxo pipefail if [[ $# -ne 1 ]]; then echo "usage: $0 " From e833d1af8d8ba7b3445b07df7356108886cb40aa Mon Sep 17 00:00:00 2001 From: Hans Date: Wed, 12 Nov 2025 21:04:24 +0800 Subject: [PATCH 0044/1030] vim: Fix change surround adding unwanted spaces with quotes (#42431) Update `Vim.change_surround` in order to ensure that there's no overlapping edits by keeping track of where the open string range ends and ensuring that the closing string range start does not go lower than the open string range end. Closes #42316 Release Notes: - Fix vim's change surrounds `cs` inserting spaces with quotes by preventing overlapping edits --------- Co-authored-by: dino --- crates/vim/src/surrounds.rs | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/crates/vim/src/surrounds.rs b/crates/vim/src/surrounds.rs index bc817e2d4871a0be07e8c100b332f5630dcec711..579ab7842096f1e5cb1bb4c70e2fd8f4256355d0 100644 --- a/crates/vim/src/surrounds.rs +++ b/crates/vim/src/surrounds.rs @@ -282,6 +282,7 @@ impl Vim { // that the end replacement string does not exceed // this value. Helpful when dealing with newlines. let mut edit_len = 0; + let mut open_range_end = 0; let mut chars_and_offset = display_map .buffer_chars_at(range.start.to_offset(&display_map, Bias::Left)) .peekable(); @@ -290,11 +291,11 @@ impl Vim { if ch.to_string() == will_replace_pair.start { let mut open_str = pair.start.clone(); let start = offset; - let mut end = start + 1; + open_range_end = start + 1; while let Some((next_ch, _)) = chars_and_offset.next() - && next_ch.to_string() == " " + && next_ch == ' ' { - end += 1; + open_range_end += 1; if preserve_space { open_str.push(next_ch); @@ -305,8 +306,8 @@ impl Vim { open_str.push(' '); }; - edit_len = end - start; - edits.push((start..end, open_str)); + edit_len = open_range_end - start; + edits.push((start..open_range_end, open_str)); anchors.push(start..start); break; } @@ -323,8 +324,9 @@ impl Vim { let mut start = offset; let end = start + 1; while let Some((next_ch, _)) = reverse_chars_and_offsets.next() - && next_ch.to_string() == " " + && next_ch == ' ' && close_str.len() < edit_len - 1 + && start > open_range_end { start -= 1; @@ -1236,6 +1238,23 @@ mod test { Mode::Normal, ); + // test spaces with quote change surrounds + cx.set_state( + indoc! {" + fn test_surround() { + \"ˇ \" + };"}, + Mode::Normal, + ); + cx.simulate_keystrokes("c s \" '"); + cx.assert_state( + indoc! {" + fn test_surround() { + ˇ' ' + };"}, + Mode::Normal, + ); + // Currently, the same test case but using the closing bracket `]` // actually removes a whitespace before the closing bracket, something // that might need to be fixed? From 2119ac42d70e8e40c927cf9e423eb6a32de9a21b Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 12 Nov 2025 15:13:29 +0100 Subject: [PATCH 0045/1030] git_panel: Fix partially staged changes not showing up (#42530) Release Notes: - N/A --- crates/git_ui/src/git_panel.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index 85cfb3b499f5cc2baefdc23f8e0ffc91f09b620d..81732732aff257b513faaf5f87ce85596a842dd8 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -3980,9 +3980,9 @@ impl GitPanel { .map(|ops| ops.staging() || ops.staged()) .or_else(|| { repo.status_for_path(&entry.repo_path) - .map(|status| status.status.staging().has_staged()) + .and_then(|status| status.status.staging().as_bool()) }) - .unwrap_or(entry.staging.has_staged()); + .or_else(|| entry.staging.as_bool()); let mut is_staged: ToggleState = is_staging_or_staged.into(); if self.show_placeholders && !self.has_staged_changes() && !entry.status.is_created() { is_staged = ToggleState::Selected; @@ -4102,7 +4102,9 @@ impl GitPanel { } }) .tooltip(move |_window, cx| { - let action = if is_staging_or_staged { + // If is_staging_or_staged is None, this implies the file was partially staged, and so + // we allow the user to stage it in full by displaying `Stage` in the tooltip. + let action = if is_staging_or_staged.unwrap_or(false) { "Unstage" } else { "Stage" From 49634f60417aa0df7aa68a478a44f46820f4b7e9 Mon Sep 17 00:00:00 2001 From: localcc Date: Wed, 12 Nov 2025 15:31:20 +0100 Subject: [PATCH 0046/1030] Miniprofiler (#42385) Release Notes: - Added hang detection and a built in performance profiler --- Cargo.lock | 29 +- Cargo.toml | 2 + crates/gpui/Cargo.toml | 2 + crates/gpui/src/executor.rs | 55 ++- crates/gpui/src/gpui.rs | 2 + crates/gpui/src/platform.rs | 28 +- crates/gpui/src/platform/linux/dispatcher.rs | 119 +++++- .../src/platform/linux/headless/client.rs | 5 +- crates/gpui/src/platform/linux/platform.rs | 8 +- .../gpui/src/platform/linux/wayland/client.rs | 37 +- crates/gpui/src/platform/linux/x11/client.rs | 34 +- crates/gpui/src/platform/mac/dispatcher.rs | 148 ++++++- crates/gpui/src/platform/test/dispatcher.rs | 33 +- .../gpui/src/platform/windows/dispatcher.rs | 97 ++++- crates/gpui/src/platform/windows/events.rs | 4 +- crates/gpui/src/platform/windows/platform.rs | 16 +- crates/gpui/src/platform/windows/window.rs | 5 +- crates/gpui/src/profiler.rs | 218 ++++++++++ crates/miniprofiler_ui/Cargo.toml | 23 + crates/miniprofiler_ui/LICENSE-GPL | 1 + crates/miniprofiler_ui/src/miniprofiler_ui.rs | 393 ++++++++++++++++++ crates/paths/src/paths.rs | 6 + crates/remote_server/src/unix.rs | 1 + crates/repl/src/repl.rs | 8 +- crates/zed/Cargo.toml | 2 + crates/zed/src/main.rs | 9 +- crates/zed/src/reliability.rs | 97 ++++- crates/zed_actions/src/lib.rs | 2 + 28 files changed, 1282 insertions(+), 102 deletions(-) create mode 100644 crates/gpui/src/profiler.rs create mode 100644 crates/miniprofiler_ui/Cargo.toml create mode 120000 crates/miniprofiler_ui/LICENSE-GPL create mode 100644 crates/miniprofiler_ui/src/miniprofiler_ui.rs diff --git a/Cargo.lock b/Cargo.lock index 84ca9fbc10ddf4d2b72564167d90acbeb04669c2..6f665286ad2a47e345d6c43f9c296af01c423c64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6248,7 +6248,7 @@ dependencies = [ "futures-core", "futures-sink", "nanorand", - "spin", + "spin 0.9.8", ] [[package]] @@ -7287,6 +7287,7 @@ dependencies = [ "calloop", "calloop-wayland-source", "cbindgen", + "circular-buffer", "cocoa 0.26.0", "cocoa-foundation 0.2.0", "collections", @@ -7342,6 +7343,7 @@ dependencies = [ "slotmap", "smallvec", "smol", + "spin 0.10.0", "stacksafe", "strum 0.27.2", "sum_tree", @@ -9072,7 +9074,7 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" dependencies = [ - "spin", + "spin 0.9.8", ] [[package]] @@ -10014,6 +10016,18 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" +[[package]] +name = "miniprofiler_ui" +version = "0.1.0" +dependencies = [ + "gpui", + "serde_json", + "smol", + "util", + "workspace", + "zed_actions", +] + [[package]] name = "miniz_oxide" version = "0.8.9" @@ -15854,6 +15868,15 @@ dependencies = [ "lock_api", ] +[[package]] +name = "spin" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe4ccb98d9c292d56fec89a5e07da7fc4cf0dc11e156b41793132775d3e591" +dependencies = [ + "lock_api", +] + [[package]] name = "spirv" version = "0.3.0+sdk-1.3.268.0" @@ -21165,6 +21188,7 @@ dependencies = [ "breadcrumbs", "call", "channel", + "chrono", "clap", "cli", "client", @@ -21222,6 +21246,7 @@ dependencies = [ "menu", "migrator", "mimalloc", + "miniprofiler_ui", "nc", "nix 0.29.0", "node_runtime", diff --git a/Cargo.toml b/Cargo.toml index 8511bd8d21d79a854453434827767eaca4adf3d8..c6471cd043152ca9c52cee671e3d494ab15be81a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -110,6 +110,7 @@ members = [ "crates/menu", "crates/migrator", "crates/mistral", + "crates/miniprofiler_ui", "crates/multi_buffer", "crates/nc", "crates/net", @@ -341,6 +342,7 @@ menu = { path = "crates/menu" } migrator = { path = "crates/migrator" } mistral = { path = "crates/mistral" } multi_buffer = { path = "crates/multi_buffer" } +miniprofiler_ui = { path = "crates/miniprofiler_ui" } nc = { path = "crates/nc" } net = { path = "crates/net" } node_runtime = { path = "crates/node_runtime" } diff --git a/crates/gpui/Cargo.toml b/crates/gpui/Cargo.toml index 6523bbe526848c15053a4bad45dce208a5ecd7e0..a6649f011d42410671bc7e317a3492803418cc2a 100644 --- a/crates/gpui/Cargo.toml +++ b/crates/gpui/Cargo.toml @@ -138,6 +138,8 @@ waker-fn = "1.2.0" lyon = "1.0" libc.workspace = true pin-project = "1.1.10" +circular-buffer.workspace = true +spin = "0.10.0" [target.'cfg(target_os = "macos")'.dependencies] block = "0.1" diff --git a/crates/gpui/src/executor.rs b/crates/gpui/src/executor.rs index 86cd7451fb3559ffd7da4001bdf6f6bd121e8b39..feeac47e01b3ba4a680b4a159ba0f6d09f66375f 100644 --- a/crates/gpui/src/executor.rs +++ b/crates/gpui/src/executor.rs @@ -1,4 +1,4 @@ -use crate::{App, PlatformDispatcher}; +use crate::{App, PlatformDispatcher, RunnableMeta, RunnableVariant}; use async_task::Runnable; use futures::channel::mpsc; use smol::prelude::*; @@ -62,7 +62,7 @@ enum TaskState { Ready(Option), /// A task that is currently running. - Spawned(async_task::Task), + Spawned(async_task::Task), } impl Task { @@ -146,6 +146,7 @@ impl BackgroundExecutor { } /// Enqueues the given future to be run to completion on a background thread. + #[track_caller] pub fn spawn(&self, future: impl Future + Send + 'static) -> Task where R: Send + 'static, @@ -155,6 +156,7 @@ impl BackgroundExecutor { /// Enqueues the given future to be run to completion on a background thread. /// The given label can be used to control the priority of the task in tests. + #[track_caller] pub fn spawn_labeled( &self, label: TaskLabel, @@ -166,14 +168,20 @@ impl BackgroundExecutor { self.spawn_internal::(Box::pin(future), Some(label)) } + #[track_caller] fn spawn_internal( &self, future: AnyFuture, label: Option, ) -> Task { let dispatcher = self.dispatcher.clone(); - let (runnable, task) = - async_task::spawn(future, move |runnable| dispatcher.dispatch(runnable, label)); + let location = core::panic::Location::caller(); + let (runnable, task) = async_task::Builder::new() + .metadata(RunnableMeta { location }) + .spawn( + move |_| future, + move |runnable| dispatcher.dispatch(RunnableVariant::Meta(runnable), label), + ); runnable.schedule(); Task(TaskState::Spawned(task)) } @@ -374,10 +382,13 @@ impl BackgroundExecutor { if duration.is_zero() { return Task::ready(()); } - let (runnable, task) = async_task::spawn(async move {}, { - let dispatcher = self.dispatcher.clone(); - move |runnable| dispatcher.dispatch_after(duration, runnable) - }); + let location = core::panic::Location::caller(); + let (runnable, task) = async_task::Builder::new() + .metadata(RunnableMeta { location }) + .spawn(move |_| async move {}, { + let dispatcher = self.dispatcher.clone(); + move |runnable| dispatcher.dispatch_after(duration, RunnableVariant::Meta(runnable)) + }); runnable.schedule(); Task(TaskState::Spawned(task)) } @@ -483,24 +494,29 @@ impl ForegroundExecutor { } /// Enqueues the given Task to run on the main thread at some point in the future. + #[track_caller] pub fn spawn(&self, future: impl Future + 'static) -> Task where R: 'static, { let dispatcher = self.dispatcher.clone(); + let location = core::panic::Location::caller(); #[track_caller] fn inner( dispatcher: Arc, future: AnyLocalFuture, + location: &'static core::panic::Location<'static>, ) -> Task { - let (runnable, task) = spawn_local_with_source_location(future, move |runnable| { - dispatcher.dispatch_on_main_thread(runnable) - }); + let (runnable, task) = spawn_local_with_source_location( + future, + move |runnable| dispatcher.dispatch_on_main_thread(RunnableVariant::Meta(runnable)), + RunnableMeta { location }, + ); runnable.schedule(); Task(TaskState::Spawned(task)) } - inner::(dispatcher, Box::pin(future)) + inner::(dispatcher, Box::pin(future), location) } } @@ -509,14 +525,16 @@ impl ForegroundExecutor { /// Copy-modified from: /// #[track_caller] -fn spawn_local_with_source_location( +fn spawn_local_with_source_location( future: Fut, schedule: S, -) -> (Runnable<()>, async_task::Task) + metadata: M, +) -> (Runnable, async_task::Task) where Fut: Future + 'static, Fut::Output: 'static, - S: async_task::Schedule<()> + Send + Sync + 'static, + S: async_task::Schedule + Send + Sync + 'static, + M: 'static, { #[inline] fn thread_id() -> ThreadId { @@ -564,7 +582,11 @@ where location: Location::caller(), }; - unsafe { async_task::spawn_unchecked(future, schedule) } + unsafe { + async_task::Builder::new() + .metadata(metadata) + .spawn_unchecked(move |_| future, schedule) + } } /// Scope manages a set of tasks that are enqueued and waited on together. See [`BackgroundExecutor::scoped`]. @@ -594,6 +616,7 @@ impl<'a> Scope<'a> { } /// Spawn a future into this scope. + #[track_caller] pub fn spawn(&mut self, f: F) where F: Future + Send + 'a, diff --git a/crates/gpui/src/gpui.rs b/crates/gpui/src/gpui.rs index 098c0780b2cc52c4dbfff4f65c8b59277fd9fa84..bc70362047d7826519f6f7c734b7c5a84281b31f 100644 --- a/crates/gpui/src/gpui.rs +++ b/crates/gpui/src/gpui.rs @@ -30,6 +30,7 @@ mod keymap; mod path_builder; mod platform; pub mod prelude; +mod profiler; mod scene; mod shared_string; mod shared_uri; @@ -87,6 +88,7 @@ use key_dispatch::*; pub use keymap::*; pub use path_builder::*; pub use platform::*; +pub use profiler::*; pub use refineable::*; pub use scene::*; pub use shared_string::*; diff --git a/crates/gpui/src/platform.rs b/crates/gpui/src/platform.rs index decdc547353f9290b710b337c7dd99cdae188918..04ae4480faf08015f6e4b6e62e7210b55997e3d4 100644 --- a/crates/gpui/src/platform.rs +++ b/crates/gpui/src/platform.rs @@ -40,8 +40,8 @@ use crate::{ DEFAULT_WINDOW_SIZE, DevicePixels, DispatchEventResult, Font, FontId, FontMetrics, FontRun, ForegroundExecutor, GlyphId, GpuSpecs, ImageSource, Keymap, LineLayout, Pixels, PlatformInput, Point, RenderGlyphParams, RenderImage, RenderImageParams, RenderSvgParams, Scene, ShapedGlyph, - ShapedRun, SharedString, Size, SvgRenderer, SystemWindowTab, Task, TaskLabel, Window, - WindowControlArea, hash, point, px, size, + ShapedRun, SharedString, Size, SvgRenderer, SystemWindowTab, Task, TaskLabel, TaskTiming, + ThreadTaskTimings, Window, WindowControlArea, hash, point, px, size, }; use anyhow::Result; use async_task::Runnable; @@ -559,14 +559,32 @@ pub(crate) trait PlatformWindow: HasWindowHandle + HasDisplayHandle { } } +/// This type is public so that our test macro can generate and use it, but it should not +/// be considered part of our public API. +#[doc(hidden)] +#[derive(Debug)] +pub struct RunnableMeta { + /// Location of the runnable + pub location: &'static core::panic::Location<'static>, +} + +#[doc(hidden)] +pub enum RunnableVariant { + Meta(Runnable), + Compat(Runnable), +} + /// This type is public so that our test macro can generate and use it, but it should not /// be considered part of our public API. #[doc(hidden)] pub trait PlatformDispatcher: Send + Sync { + fn get_all_timings(&self) -> Vec; + fn get_current_thread_timings(&self) -> Vec; fn is_main_thread(&self) -> bool; - fn dispatch(&self, runnable: Runnable, label: Option); - fn dispatch_on_main_thread(&self, runnable: Runnable); - fn dispatch_after(&self, duration: Duration, runnable: Runnable); + fn dispatch(&self, runnable: RunnableVariant, label: Option); + fn dispatch_on_main_thread(&self, runnable: RunnableVariant); + fn dispatch_after(&self, duration: Duration, runnable: RunnableVariant); + fn now(&self) -> Instant { Instant::now() } diff --git a/crates/gpui/src/platform/linux/dispatcher.rs b/crates/gpui/src/platform/linux/dispatcher.rs index 9ca1f76fd6996ffbd376d8254cbbe63a1c8d8fd0..c300109ffe32b3537acbbca47b4c39674cad2fd1 100644 --- a/crates/gpui/src/platform/linux/dispatcher.rs +++ b/crates/gpui/src/platform/linux/dispatcher.rs @@ -1,5 +1,7 @@ -use crate::{PlatformDispatcher, TaskLabel}; -use async_task::Runnable; +use crate::{ + GLOBAL_THREAD_TIMINGS, PlatformDispatcher, RunnableVariant, THREAD_TIMINGS, TaskLabel, + TaskTiming, ThreadTaskTimings, +}; use calloop::{ EventLoop, channel::{self, Sender}, @@ -13,20 +15,20 @@ use util::ResultExt; struct TimerAfter { duration: Duration, - runnable: Runnable, + runnable: RunnableVariant, } pub(crate) struct LinuxDispatcher { - main_sender: Sender, + main_sender: Sender, timer_sender: Sender, - background_sender: flume::Sender, + background_sender: flume::Sender, _background_threads: Vec>, main_thread_id: thread::ThreadId, } impl LinuxDispatcher { - pub fn new(main_sender: Sender) -> Self { - let (background_sender, background_receiver) = flume::unbounded::(); + pub fn new(main_sender: Sender) -> Self { + let (background_sender, background_receiver) = flume::unbounded::(); let thread_count = std::thread::available_parallelism() .map(|i| i.get()) .unwrap_or(1); @@ -40,7 +42,36 @@ impl LinuxDispatcher { for runnable in receiver { let start = Instant::now(); - runnable.run(); + let mut location = match runnable { + RunnableVariant::Meta(runnable) => { + let location = runnable.metadata().location; + let timing = TaskTiming { + location, + start, + end: None, + }; + Self::add_task_timing(timing); + + runnable.run(); + timing + } + RunnableVariant::Compat(runnable) => { + let location = core::panic::Location::caller(); + let timing = TaskTiming { + location, + start, + end: None, + }; + Self::add_task_timing(timing); + + runnable.run(); + timing + } + }; + + let end = Instant::now(); + location.end = Some(end); + Self::add_task_timing(location); log::trace!( "background thread {}: ran runnable. took: {:?}", @@ -72,7 +103,36 @@ impl LinuxDispatcher { calloop::timer::Timer::from_duration(timer.duration), move |_, _, _| { if let Some(runnable) = runnable.take() { - runnable.run(); + let start = Instant::now(); + let mut timing = match runnable { + RunnableVariant::Meta(runnable) => { + let location = runnable.metadata().location; + let timing = TaskTiming { + location, + start, + end: None, + }; + Self::add_task_timing(timing); + + runnable.run(); + timing + } + RunnableVariant::Compat(runnable) => { + let timing = TaskTiming { + location: core::panic::Location::caller(), + start, + end: None, + }; + Self::add_task_timing(timing); + + runnable.run(); + timing + } + }; + let end = Instant::now(); + + timing.end = Some(end); + Self::add_task_timing(timing); } TimeoutAction::Drop }, @@ -96,18 +156,53 @@ impl LinuxDispatcher { main_thread_id: thread::current().id(), } } + + pub(crate) fn add_task_timing(timing: TaskTiming) { + THREAD_TIMINGS.with(|timings| { + let mut timings = timings.lock(); + let timings = &mut timings.timings; + + if let Some(last_timing) = timings.iter_mut().rev().next() { + if last_timing.location == timing.location { + last_timing.end = timing.end; + return; + } + } + + timings.push_back(timing); + }); + } } impl PlatformDispatcher for LinuxDispatcher { + fn get_all_timings(&self) -> Vec { + let global_timings = GLOBAL_THREAD_TIMINGS.lock(); + ThreadTaskTimings::convert(&global_timings) + } + + fn get_current_thread_timings(&self) -> Vec { + THREAD_TIMINGS.with(|timings| { + let timings = timings.lock(); + let timings = &timings.timings; + + let mut vec = Vec::with_capacity(timings.len()); + + let (s1, s2) = timings.as_slices(); + vec.extend_from_slice(s1); + vec.extend_from_slice(s2); + vec + }) + } + fn is_main_thread(&self) -> bool { thread::current().id() == self.main_thread_id } - fn dispatch(&self, runnable: Runnable, _: Option) { + fn dispatch(&self, runnable: RunnableVariant, _: Option) { self.background_sender.send(runnable).unwrap(); } - fn dispatch_on_main_thread(&self, runnable: Runnable) { + fn dispatch_on_main_thread(&self, runnable: RunnableVariant) { self.main_sender.send(runnable).unwrap_or_else(|runnable| { // NOTE: Runnable may wrap a Future that is !Send. // @@ -121,7 +216,7 @@ impl PlatformDispatcher for LinuxDispatcher { }); } - fn dispatch_after(&self, duration: Duration, runnable: Runnable) { + fn dispatch_after(&self, duration: Duration, runnable: RunnableVariant) { self.timer_sender .send(TimerAfter { duration, runnable }) .ok(); diff --git a/crates/gpui/src/platform/linux/headless/client.rs b/crates/gpui/src/platform/linux/headless/client.rs index da54db371033bac53e2ac3324306fa86eb57fb57..33f1bb17e3230d0b9c9b2c53bcd0603a9cc7f22c 100644 --- a/crates/gpui/src/platform/linux/headless/client.rs +++ b/crates/gpui/src/platform/linux/headless/client.rs @@ -31,7 +31,10 @@ impl HeadlessClient { handle .insert_source(main_receiver, |event, _, _: &mut HeadlessClient| { if let calloop::channel::Event::Msg(runnable) = event { - runnable.run(); + match runnable { + crate::RunnableVariant::Meta(runnable) => runnable.run(), + crate::RunnableVariant::Compat(runnable) => runnable.run(), + }; } }) .ok(); diff --git a/crates/gpui/src/platform/linux/platform.rs b/crates/gpui/src/platform/linux/platform.rs index 322f5d76110ee36e3cfdf26449bbec85c3d51af5..6c2d13d2e78f003a950e5c1dc135b503ae6d4087 100644 --- a/crates/gpui/src/platform/linux/platform.rs +++ b/crates/gpui/src/platform/linux/platform.rs @@ -15,7 +15,6 @@ use std::{ }; use anyhow::{Context as _, anyhow}; -use async_task::Runnable; use calloop::{LoopSignal, channel::Channel}; use futures::channel::oneshot; use util::ResultExt as _; @@ -26,7 +25,8 @@ use crate::{ Action, AnyWindowHandle, BackgroundExecutor, ClipboardItem, CursorStyle, DisplayId, ForegroundExecutor, Keymap, LinuxDispatcher, Menu, MenuItem, OwnedMenu, PathPromptOptions, Pixels, Platform, PlatformDisplay, PlatformKeyboardLayout, PlatformKeyboardMapper, - PlatformTextSystem, PlatformWindow, Point, Result, Task, WindowAppearance, WindowParams, px, + PlatformTextSystem, PlatformWindow, Point, Result, RunnableVariant, Task, WindowAppearance, + WindowParams, px, }; #[cfg(any(feature = "wayland", feature = "x11"))] @@ -105,8 +105,8 @@ pub(crate) struct LinuxCommon { } impl LinuxCommon { - pub fn new(signal: LoopSignal) -> (Self, Channel) { - let (main_sender, main_receiver) = calloop::channel::channel::(); + pub fn new(signal: LoopSignal) -> (Self, Channel) { + let (main_sender, main_receiver) = calloop::channel::channel::(); #[cfg(any(feature = "wayland", feature = "x11"))] let text_system = Arc::new(crate::CosmicTextSystem::new()); diff --git a/crates/gpui/src/platform/linux/wayland/client.rs b/crates/gpui/src/platform/linux/wayland/client.rs index fd4d9fb2b31bfa04fe1ecc7d192db11f997d8d59..9a9ec213edd27d9ab7ac2e1437f408ac7d78f08e 100644 --- a/crates/gpui/src/platform/linux/wayland/client.rs +++ b/crates/gpui/src/platform/linux/wayland/client.rs @@ -71,7 +71,6 @@ use super::{ window::{ImeInput, WaylandWindowStatePtr}, }; -use crate::platform::{PlatformWindow, blade::BladeContext}; use crate::{ AnyWindowHandle, Bounds, Capslock, CursorStyle, DOUBLE_CLICK_INTERVAL, DevicePixels, DisplayId, FileDropEvent, ForegroundExecutor, KeyDownEvent, KeyUpEvent, Keystroke, LinuxCommon, @@ -80,6 +79,10 @@ use crate::{ PlatformInput, PlatformKeyboardLayout, Point, SCROLL_LINES, ScrollDelta, ScrollWheelEvent, Size, TouchPhase, WindowParams, point, px, size, }; +use crate::{ + LinuxDispatcher, RunnableVariant, TaskTiming, + platform::{PlatformWindow, blade::BladeContext}, +}; use crate::{ SharedString, platform::linux::{ @@ -491,7 +494,37 @@ impl WaylandClient { move |event, _, _: &mut WaylandClientStatePtr| { if let calloop::channel::Event::Msg(runnable) = event { handle.insert_idle(|_| { - runnable.run(); + let start = Instant::now(); + let mut timing = match runnable { + RunnableVariant::Meta(runnable) => { + let location = runnable.metadata().location; + let timing = TaskTiming { + location, + start, + end: None, + }; + LinuxDispatcher::add_task_timing(timing); + + runnable.run(); + timing + } + RunnableVariant::Compat(runnable) => { + let location = core::panic::Location::caller(); + let timing = TaskTiming { + location, + start, + end: None, + }; + LinuxDispatcher::add_task_timing(timing); + + runnable.run(); + timing + } + }; + + let end = Instant::now(); + timing.end = Some(end); + LinuxDispatcher::add_task_timing(timing); }); } } diff --git a/crates/gpui/src/platform/linux/x11/client.rs b/crates/gpui/src/platform/linux/x11/client.rs index 5b0be84b2fc08d220800271a402496e5ba487b15..32f50cdf5d9d9439909c7ecaf35df0d75a9c9eae 100644 --- a/crates/gpui/src/platform/linux/x11/client.rs +++ b/crates/gpui/src/platform/linux/x11/client.rs @@ -1,4 +1,4 @@ -use crate::{Capslock, xcb_flush}; +use crate::{Capslock, LinuxDispatcher, RunnableVariant, TaskTiming, xcb_flush}; use anyhow::{Context as _, anyhow}; use ashpd::WindowIdentifier; use calloop::{ @@ -313,7 +313,37 @@ impl X11Client { // events have higher priority and runnables are only worked off after the event // callbacks. handle.insert_idle(|_| { - runnable.run(); + let start = Instant::now(); + let mut timing = match runnable { + RunnableVariant::Meta(runnable) => { + let location = runnable.metadata().location; + let timing = TaskTiming { + location, + start, + end: None, + }; + LinuxDispatcher::add_task_timing(timing); + + runnable.run(); + timing + } + RunnableVariant::Compat(runnable) => { + let location = core::panic::Location::caller(); + let timing = TaskTiming { + location, + start, + end: None, + }; + LinuxDispatcher::add_task_timing(timing); + + runnable.run(); + timing + } + }; + + let end = Instant::now(); + timing.end = Some(end); + LinuxDispatcher::add_task_timing(timing); }); } } diff --git a/crates/gpui/src/platform/mac/dispatcher.rs b/crates/gpui/src/platform/mac/dispatcher.rs index c72f791f850469287cf66021558032902982ccec..8a2f42234eea960669cb212853c437ec680a7fd7 100644 --- a/crates/gpui/src/platform/mac/dispatcher.rs +++ b/crates/gpui/src/platform/mac/dispatcher.rs @@ -2,7 +2,11 @@ #![allow(non_camel_case_types)] #![allow(non_snake_case)] -use crate::{PlatformDispatcher, TaskLabel}; +use crate::{ + GLOBAL_THREAD_TIMINGS, PlatformDispatcher, RunnableMeta, RunnableVariant, THREAD_TIMINGS, + TaskLabel, TaskTiming, ThreadTaskTimings, +}; + use async_task::Runnable; use objc::{ class, msg_send, @@ -12,7 +16,7 @@ use objc::{ use std::{ ffi::c_void, ptr::{NonNull, addr_of}, - time::Duration, + time::{Duration, Instant}, }; /// All items in the generated file are marked as pub, so we're gonna wrap it in a separate mod to prevent @@ -29,47 +33,155 @@ pub(crate) fn dispatch_get_main_queue() -> dispatch_queue_t { pub(crate) struct MacDispatcher; impl PlatformDispatcher for MacDispatcher { + fn get_all_timings(&self) -> Vec { + let global_timings = GLOBAL_THREAD_TIMINGS.lock(); + ThreadTaskTimings::convert(&global_timings) + } + + fn get_current_thread_timings(&self) -> Vec { + THREAD_TIMINGS.with(|timings| { + let timings = &timings.lock().timings; + + let mut vec = Vec::with_capacity(timings.len()); + + let (s1, s2) = timings.as_slices(); + vec.extend_from_slice(s1); + vec.extend_from_slice(s2); + vec + }) + } + fn is_main_thread(&self) -> bool { let is_main_thread: BOOL = unsafe { msg_send![class!(NSThread), isMainThread] }; is_main_thread == YES } - fn dispatch(&self, runnable: Runnable, _: Option) { + fn dispatch(&self, runnable: RunnableVariant, _: Option) { + let (context, trampoline) = match runnable { + RunnableVariant::Meta(runnable) => ( + runnable.into_raw().as_ptr() as *mut c_void, + Some(trampoline as unsafe extern "C" fn(*mut c_void)), + ), + RunnableVariant::Compat(runnable) => ( + runnable.into_raw().as_ptr() as *mut c_void, + Some(trampoline_compat as unsafe extern "C" fn(*mut c_void)), + ), + }; unsafe { dispatch_async_f( dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH.try_into().unwrap(), 0), - runnable.into_raw().as_ptr() as *mut c_void, - Some(trampoline), + context, + trampoline, ); } } - fn dispatch_on_main_thread(&self, runnable: Runnable) { - unsafe { - dispatch_async_f( - dispatch_get_main_queue(), + fn dispatch_on_main_thread(&self, runnable: RunnableVariant) { + let (context, trampoline) = match runnable { + RunnableVariant::Meta(runnable) => ( runnable.into_raw().as_ptr() as *mut c_void, - Some(trampoline), - ); + Some(trampoline as unsafe extern "C" fn(*mut c_void)), + ), + RunnableVariant::Compat(runnable) => ( + runnable.into_raw().as_ptr() as *mut c_void, + Some(trampoline_compat as unsafe extern "C" fn(*mut c_void)), + ), + }; + unsafe { + dispatch_async_f(dispatch_get_main_queue(), context, trampoline); } } - fn dispatch_after(&self, duration: Duration, runnable: Runnable) { + fn dispatch_after(&self, duration: Duration, runnable: RunnableVariant) { + let (context, trampoline) = match runnable { + RunnableVariant::Meta(runnable) => ( + runnable.into_raw().as_ptr() as *mut c_void, + Some(trampoline as unsafe extern "C" fn(*mut c_void)), + ), + RunnableVariant::Compat(runnable) => ( + runnable.into_raw().as_ptr() as *mut c_void, + Some(trampoline_compat as unsafe extern "C" fn(*mut c_void)), + ), + }; unsafe { let queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH.try_into().unwrap(), 0); let when = dispatch_time(DISPATCH_TIME_NOW as u64, duration.as_nanos() as i64); - dispatch_after_f( - when, - queue, - runnable.into_raw().as_ptr() as *mut c_void, - Some(trampoline), - ); + dispatch_after_f(when, queue, context, trampoline); } } } extern "C" fn trampoline(runnable: *mut c_void) { + let task = + unsafe { Runnable::::from_raw(NonNull::new_unchecked(runnable as *mut ())) }; + + let location = task.metadata().location; + + let start = Instant::now(); + let timing = TaskTiming { + location, + start, + end: None, + }; + + THREAD_TIMINGS.with(|timings| { + let mut timings = timings.lock(); + let timings = &mut timings.timings; + if let Some(last_timing) = timings.iter_mut().rev().next() { + if last_timing.location == timing.location { + return; + } + } + + timings.push_back(timing); + }); + + task.run(); + let end = Instant::now(); + + THREAD_TIMINGS.with(|timings| { + let mut timings = timings.lock(); + let timings = &mut timings.timings; + let Some(last_timing) = timings.iter_mut().rev().next() else { + return; + }; + last_timing.end = Some(end); + }); +} + +extern "C" fn trampoline_compat(runnable: *mut c_void) { let task = unsafe { Runnable::<()>::from_raw(NonNull::new_unchecked(runnable as *mut ())) }; + + let location = core::panic::Location::caller(); + + let start = Instant::now(); + let timing = TaskTiming { + location, + start, + end: None, + }; + THREAD_TIMINGS.with(|timings| { + let mut timings = timings.lock(); + let timings = &mut timings.timings; + if let Some(last_timing) = timings.iter_mut().rev().next() { + if last_timing.location == timing.location { + return; + } + } + + timings.push_back(timing); + }); + task.run(); + let end = Instant::now(); + + THREAD_TIMINGS.with(|timings| { + let mut timings = timings.lock(); + let timings = &mut timings.timings; + let Some(last_timing) = timings.iter_mut().rev().next() else { + return; + }; + last_timing.end = Some(end); + }); } diff --git a/crates/gpui/src/platform/test/dispatcher.rs b/crates/gpui/src/platform/test/dispatcher.rs index 017c29bfb558f77874a9729a52b518d9d41fb256..fc01d112d9d4198d0f06c370e5feb1193b29c677 100644 --- a/crates/gpui/src/platform/test/dispatcher.rs +++ b/crates/gpui/src/platform/test/dispatcher.rs @@ -1,5 +1,4 @@ -use crate::{PlatformDispatcher, TaskLabel}; -use async_task::Runnable; +use crate::{PlatformDispatcher, RunnableVariant, TaskLabel}; use backtrace::Backtrace; use collections::{HashMap, HashSet, VecDeque}; use parking::Unparker; @@ -26,10 +25,10 @@ pub struct TestDispatcher { struct TestDispatcherState { random: StdRng, - foreground: HashMap>, - background: Vec, - deprioritized_background: Vec, - delayed: Vec<(Duration, Runnable)>, + foreground: HashMap>, + background: Vec, + deprioritized_background: Vec, + delayed: Vec<(Duration, RunnableVariant)>, start_time: Instant, time: Duration, is_main_thread: bool, @@ -175,7 +174,13 @@ impl TestDispatcher { let was_main_thread = state.is_main_thread; state.is_main_thread = main_thread; drop(state); - runnable.run(); + + // todo(localcc): add timings to tests + match runnable { + RunnableVariant::Meta(runnable) => runnable.run(), + RunnableVariant::Compat(runnable) => runnable.run(), + }; + self.state.lock().is_main_thread = was_main_thread; true @@ -268,6 +273,14 @@ impl Clone for TestDispatcher { } impl PlatformDispatcher for TestDispatcher { + fn get_all_timings(&self) -> Vec { + Vec::new() + } + + fn get_current_thread_timings(&self) -> Vec { + Vec::new() + } + fn is_main_thread(&self) -> bool { self.state.lock().is_main_thread } @@ -277,7 +290,7 @@ impl PlatformDispatcher for TestDispatcher { state.start_time + state.time } - fn dispatch(&self, runnable: Runnable, label: Option) { + fn dispatch(&self, runnable: RunnableVariant, label: Option) { { let mut state = self.state.lock(); if label.is_some_and(|label| state.deprioritized_task_labels.contains(&label)) { @@ -289,7 +302,7 @@ impl PlatformDispatcher for TestDispatcher { self.unpark_last(); } - fn dispatch_on_main_thread(&self, runnable: Runnable) { + fn dispatch_on_main_thread(&self, runnable: RunnableVariant) { self.state .lock() .foreground @@ -299,7 +312,7 @@ impl PlatformDispatcher for TestDispatcher { self.unpark_last(); } - fn dispatch_after(&self, duration: std::time::Duration, runnable: Runnable) { + fn dispatch_after(&self, duration: std::time::Duration, runnable: RunnableVariant) { let mut state = self.state.lock(); let next_time = state.time + duration; let ix = match state.delayed.binary_search_by_key(&next_time, |e| e.0) { diff --git a/crates/gpui/src/platform/windows/dispatcher.rs b/crates/gpui/src/platform/windows/dispatcher.rs index f60d6bd884d7bfe6b313a7ca555067991172fe31..f543202e77c75acfd007d632b2023317dcba43b6 100644 --- a/crates/gpui/src/platform/windows/dispatcher.rs +++ b/crates/gpui/src/platform/windows/dispatcher.rs @@ -1,10 +1,9 @@ use std::{ sync::atomic::{AtomicBool, Ordering}, thread::{ThreadId, current}, - time::Duration, + time::{Duration, Instant}, }; -use async_task::Runnable; use flume::Sender; use util::ResultExt; use windows::{ @@ -18,12 +17,13 @@ use windows::{ }; use crate::{ - HWND, PlatformDispatcher, SafeHwnd, TaskLabel, WM_GPUI_TASK_DISPATCHED_ON_MAIN_THREAD, + GLOBAL_THREAD_TIMINGS, HWND, PlatformDispatcher, RunnableVariant, SafeHwnd, THREAD_TIMINGS, + TaskLabel, TaskTiming, ThreadTaskTimings, WM_GPUI_TASK_DISPATCHED_ON_MAIN_THREAD, }; pub(crate) struct WindowsDispatcher { pub(crate) wake_posted: AtomicBool, - main_sender: Sender, + main_sender: Sender, main_thread_id: ThreadId, platform_window_handle: SafeHwnd, validation_number: usize, @@ -31,7 +31,7 @@ pub(crate) struct WindowsDispatcher { impl WindowsDispatcher { pub(crate) fn new( - main_sender: Sender, + main_sender: Sender, platform_window_handle: HWND, validation_number: usize, ) -> Self { @@ -47,42 +47,115 @@ impl WindowsDispatcher { } } - fn dispatch_on_threadpool(&self, runnable: Runnable) { + fn dispatch_on_threadpool(&self, runnable: RunnableVariant) { let handler = { let mut task_wrapper = Some(runnable); WorkItemHandler::new(move |_| { - task_wrapper.take().unwrap().run(); + Self::execute_runnable(task_wrapper.take().unwrap()); Ok(()) }) }; ThreadPool::RunWithPriorityAsync(&handler, WorkItemPriority::High).log_err(); } - fn dispatch_on_threadpool_after(&self, runnable: Runnable, duration: Duration) { + fn dispatch_on_threadpool_after(&self, runnable: RunnableVariant, duration: Duration) { let handler = { let mut task_wrapper = Some(runnable); TimerElapsedHandler::new(move |_| { - task_wrapper.take().unwrap().run(); + Self::execute_runnable(task_wrapper.take().unwrap()); Ok(()) }) }; ThreadPoolTimer::CreateTimer(&handler, duration.into()).log_err(); } + + #[inline(always)] + pub(crate) fn execute_runnable(runnable: RunnableVariant) { + let start = Instant::now(); + + let mut timing = match runnable { + RunnableVariant::Meta(runnable) => { + let location = runnable.metadata().location; + let timing = TaskTiming { + location, + start, + end: None, + }; + Self::add_task_timing(timing); + + runnable.run(); + + timing + } + RunnableVariant::Compat(runnable) => { + let timing = TaskTiming { + location: core::panic::Location::caller(), + start, + end: None, + }; + Self::add_task_timing(timing); + + runnable.run(); + + timing + } + }; + + let end = Instant::now(); + timing.end = Some(end); + + Self::add_task_timing(timing); + } + + pub(crate) fn add_task_timing(timing: TaskTiming) { + THREAD_TIMINGS.with(|timings| { + let mut timings = timings.lock(); + let timings = &mut timings.timings; + + if let Some(last_timing) = timings.iter_mut().rev().next() { + if last_timing.location == timing.location { + last_timing.end = timing.end; + return; + } + } + + timings.push_back(timing); + }); + } } impl PlatformDispatcher for WindowsDispatcher { + fn get_all_timings(&self) -> Vec { + let global_thread_timings = GLOBAL_THREAD_TIMINGS.lock(); + ThreadTaskTimings::convert(&global_thread_timings) + } + + fn get_current_thread_timings(&self) -> Vec { + THREAD_TIMINGS.with(|timings| { + let timings = timings.lock(); + let timings = &timings.timings; + + let mut vec = Vec::with_capacity(timings.len()); + + let (s1, s2) = timings.as_slices(); + vec.extend_from_slice(s1); + vec.extend_from_slice(s2); + vec + }) + } + fn is_main_thread(&self) -> bool { current().id() == self.main_thread_id } - fn dispatch(&self, runnable: Runnable, label: Option) { + fn dispatch(&self, runnable: RunnableVariant, label: Option) { self.dispatch_on_threadpool(runnable); if let Some(label) = label { log::debug!("TaskLabel: {label:?}"); } } - fn dispatch_on_main_thread(&self, runnable: Runnable) { + fn dispatch_on_main_thread(&self, runnable: RunnableVariant) { match self.main_sender.send(runnable) { Ok(_) => { if !self.wake_posted.swap(true, Ordering::AcqRel) { @@ -111,7 +184,7 @@ impl PlatformDispatcher for WindowsDispatcher { } } - fn dispatch_after(&self, duration: Duration, runnable: Runnable) { + fn dispatch_after(&self, duration: Duration, runnable: RunnableVariant) { self.dispatch_on_threadpool_after(runnable, duration); } } diff --git a/crates/gpui/src/platform/windows/events.rs b/crates/gpui/src/platform/windows/events.rs index cc17f19bcfac86a6f8ac31ec1059d76c24e79695..f80348fdc1f88aedc1231d6579c82af4c76f3c34 100644 --- a/crates/gpui/src/platform/windows/events.rs +++ b/crates/gpui/src/platform/windows/events.rs @@ -239,7 +239,7 @@ impl WindowsWindowInner { fn handle_timer_msg(&self, handle: HWND, wparam: WPARAM) -> Option { if wparam.0 == SIZE_MOVE_LOOP_TIMER_ID { for runnable in self.main_receiver.drain() { - runnable.run(); + WindowsDispatcher::execute_runnable(runnable); } self.handle_paint_msg(handle) } else { @@ -1142,8 +1142,10 @@ impl WindowsWindowInner { require_presentation: false, force_render, }); + self.state.borrow_mut().callbacks.request_frame = Some(request_frame); unsafe { ValidateRect(Some(handle), None).ok().log_err() }; + Some(0) } diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index b1d0c80ffc997976be2ee1b557ca2ea86e46ee3d..d845c9520f736d06a6cee637328871af7e329241 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -8,7 +8,6 @@ use std::{ use ::util::{ResultExt, paths::SanitizedPath}; use anyhow::{Context as _, Result, anyhow}; -use async_task::Runnable; use futures::channel::oneshot::{self, Receiver}; use itertools::Itertools; use parking_lot::RwLock; @@ -46,7 +45,7 @@ struct WindowsPlatformInner { raw_window_handles: std::sync::Weak>>, // The below members will never change throughout the entire lifecycle of the app. validation_number: usize, - main_receiver: flume::Receiver, + main_receiver: flume::Receiver, dispatcher: Arc, } @@ -93,7 +92,7 @@ impl WindowsPlatform { OleInitialize(None).context("unable to initialize Windows OLE")?; } let directx_devices = DirectXDevices::new().context("Creating DirectX devices")?; - let (main_sender, main_receiver) = flume::unbounded::(); + let (main_sender, main_receiver) = flume::unbounded::(); let validation_number = if usize::BITS == 64 { rand::random::() as usize } else { @@ -794,7 +793,7 @@ impl WindowsPlatformInner { fn run_foreground_task(&self) -> Option { loop { for runnable in self.main_receiver.drain() { - runnable.run(); + WindowsDispatcher::execute_runnable(runnable); } // Someone could enqueue a Runnable here. The flag is still true, so they will not PostMessage. @@ -805,7 +804,8 @@ impl WindowsPlatformInner { match self.main_receiver.try_recv() { Ok(runnable) => { let _ = dispatcher.wake_posted.swap(true, Ordering::AcqRel); - runnable.run(); + + WindowsDispatcher::execute_runnable(runnable); continue; } _ => { @@ -873,7 +873,7 @@ pub(crate) struct WindowCreationInfo { pub(crate) windows_version: WindowsVersion, pub(crate) drop_target_helper: IDropTargetHelper, pub(crate) validation_number: usize, - pub(crate) main_receiver: flume::Receiver, + pub(crate) main_receiver: flume::Receiver, pub(crate) platform_window_handle: HWND, pub(crate) disable_direct_composition: bool, pub(crate) directx_devices: DirectXDevices, @@ -883,8 +883,8 @@ struct PlatformWindowCreateContext { inner: Option>>, raw_window_handles: std::sync::Weak>>, validation_number: usize, - main_sender: Option>, - main_receiver: Option>, + main_sender: Option>, + main_receiver: Option>, directx_devices: Option, dispatcher: Option>, } diff --git a/crates/gpui/src/platform/windows/window.rs b/crates/gpui/src/platform/windows/window.rs index 4b89fcffb39d9bfbc0734977cec16a00984f5c9a..241293f0caa6c13de350c8b2fc44cb9d5abd82ec 100644 --- a/crates/gpui/src/platform/windows/window.rs +++ b/crates/gpui/src/platform/windows/window.rs @@ -12,7 +12,6 @@ use std::{ use ::util::ResultExt; use anyhow::{Context as _, Result}; -use async_task::Runnable; use futures::channel::oneshot::{self, Receiver}; use raw_window_handle as rwh; use smallvec::SmallVec; @@ -70,7 +69,7 @@ pub(crate) struct WindowsWindowInner { pub(crate) executor: ForegroundExecutor, pub(crate) windows_version: WindowsVersion, pub(crate) validation_number: usize, - pub(crate) main_receiver: flume::Receiver, + pub(crate) main_receiver: flume::Receiver, pub(crate) platform_window_handle: HWND, } @@ -357,7 +356,7 @@ struct WindowCreateContext { windows_version: WindowsVersion, drop_target_helper: IDropTargetHelper, validation_number: usize, - main_receiver: flume::Receiver, + main_receiver: flume::Receiver, platform_window_handle: HWND, appearance: WindowAppearance, disable_direct_composition: bool, diff --git a/crates/gpui/src/profiler.rs b/crates/gpui/src/profiler.rs new file mode 100644 index 0000000000000000000000000000000000000000..4e3f00c412cd19c8269497ff292ce9dbdd785fbe --- /dev/null +++ b/crates/gpui/src/profiler.rs @@ -0,0 +1,218 @@ +use std::{ + cell::LazyCell, + hash::Hasher, + hash::{DefaultHasher, Hash}, + sync::Arc, + thread::ThreadId, + time::Instant, +}; + +use serde::{Deserialize, Serialize}; + +#[doc(hidden)] +#[derive(Debug, Copy, Clone)] +pub struct TaskTiming { + pub location: &'static core::panic::Location<'static>, + pub start: Instant, + pub end: Option, +} + +#[doc(hidden)] +#[derive(Debug, Clone)] +pub struct ThreadTaskTimings { + pub thread_name: Option, + pub thread_id: ThreadId, + pub timings: Vec, +} + +impl ThreadTaskTimings { + pub(crate) fn convert(timings: &[GlobalThreadTimings]) -> Vec { + timings + .iter() + .filter_map(|t| match t.timings.upgrade() { + Some(timings) => Some((t.thread_id, timings)), + _ => None, + }) + .map(|(thread_id, timings)| { + let timings = timings.lock(); + let thread_name = timings.thread_name.clone(); + let timings = &timings.timings; + + let mut vec = Vec::with_capacity(timings.len()); + + let (s1, s2) = timings.as_slices(); + vec.extend_from_slice(s1); + vec.extend_from_slice(s2); + + ThreadTaskTimings { + thread_name, + thread_id, + timings: vec, + } + }) + .collect() + } +} + +/// Serializable variant of [`core::panic::Location`] +#[derive(Debug, Copy, Clone, Serialize, Deserialize)] +pub struct SerializedLocation<'a> { + /// Name of the source file + pub file: &'a str, + /// Line in the source file + pub line: u32, + /// Column in the source file + pub column: u32, +} + +impl<'a> From<&'a core::panic::Location<'a>> for SerializedLocation<'a> { + fn from(value: &'a core::panic::Location<'a>) -> Self { + SerializedLocation { + file: value.file(), + line: value.line(), + column: value.column(), + } + } +} + +/// Serializable variant of [`TaskTiming`] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SerializedTaskTiming<'a> { + /// Location of the timing + #[serde(borrow)] + pub location: SerializedLocation<'a>, + /// Time at which the measurement was reported in nanoseconds + pub start: u128, + /// Duration of the measurement in nanoseconds + pub duration: u128, +} + +impl<'a> SerializedTaskTiming<'a> { + /// Convert an array of [`TaskTiming`] into their serializable format + /// + /// # Params + /// + /// `anchor` - [`Instant`] that should be earlier than all timings to use as base anchor + pub fn convert(anchor: Instant, timings: &[TaskTiming]) -> Vec> { + let serialized = timings + .iter() + .map(|timing| { + let start = timing.start.duration_since(anchor).as_nanos(); + let duration = timing + .end + .unwrap_or_else(|| Instant::now()) + .duration_since(timing.start) + .as_nanos(); + SerializedTaskTiming { + location: timing.location.into(), + start, + duration, + } + }) + .collect::>(); + + serialized + } +} + +/// Serializable variant of [`ThreadTaskTimings`] +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SerializedThreadTaskTimings<'a> { + /// Thread name + pub thread_name: Option, + /// Hash of the thread id + pub thread_id: u64, + /// Timing records for this thread + #[serde(borrow)] + pub timings: Vec>, +} + +impl<'a> SerializedThreadTaskTimings<'a> { + /// Convert [`ThreadTaskTimings`] into their serializable format + /// + /// # Params + /// + /// `anchor` - [`Instant`] that should be earlier than all timings to use as base anchor + pub fn convert( + anchor: Instant, + timings: ThreadTaskTimings, + ) -> SerializedThreadTaskTimings<'static> { + let serialized_timings = SerializedTaskTiming::convert(anchor, &timings.timings); + + let mut hasher = DefaultHasher::new(); + timings.thread_id.hash(&mut hasher); + let thread_id = hasher.finish(); + + SerializedThreadTaskTimings { + thread_name: timings.thread_name, + thread_id, + timings: serialized_timings, + } + } +} + +// Allow 20mb of task timing entries +const MAX_TASK_TIMINGS: usize = (20 * 1024 * 1024) / core::mem::size_of::(); + +pub(crate) type TaskTimings = circular_buffer::CircularBuffer; +pub(crate) type GuardedTaskTimings = spin::Mutex; + +pub(crate) struct GlobalThreadTimings { + pub thread_id: ThreadId, + pub timings: std::sync::Weak, +} + +pub(crate) static GLOBAL_THREAD_TIMINGS: spin::Mutex> = + spin::Mutex::new(Vec::new()); + +thread_local! { + pub(crate) static THREAD_TIMINGS: LazyCell> = LazyCell::new(|| { + let current_thread = std::thread::current(); + let thread_name = current_thread.name(); + let thread_id = current_thread.id(); + let timings = ThreadTimings::new(thread_name.map(|e| e.to_string()), thread_id); + let timings = Arc::new(spin::Mutex::new(timings)); + + { + let timings = Arc::downgrade(&timings); + let global_timings = GlobalThreadTimings { + thread_id: std::thread::current().id(), + timings, + }; + GLOBAL_THREAD_TIMINGS.lock().push(global_timings); + } + + timings + }); +} + +pub(crate) struct ThreadTimings { + pub thread_name: Option, + pub thread_id: ThreadId, + pub timings: Box, +} + +impl ThreadTimings { + pub(crate) fn new(thread_name: Option, thread_id: ThreadId) -> Self { + ThreadTimings { + thread_name, + thread_id, + timings: TaskTimings::boxed(), + } + } +} + +impl Drop for ThreadTimings { + fn drop(&mut self) { + let mut thread_timings = GLOBAL_THREAD_TIMINGS.lock(); + + let Some((index, _)) = thread_timings + .iter() + .enumerate() + .find(|(_, t)| t.thread_id == self.thread_id) + else { + return; + }; + thread_timings.swap_remove(index); + } +} diff --git a/crates/miniprofiler_ui/Cargo.toml b/crates/miniprofiler_ui/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..bb508a188e91d2169cca61845e21905aae2c97e3 --- /dev/null +++ b/crates/miniprofiler_ui/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "miniprofiler_ui" +version = "0.1.0" +publish.workspace = true +edition.workspace = true +license = "GPL-3.0-or-later" + +[lints] +workspace = true + +[lib] +path = "src/miniprofiler_ui.rs" + +[dependencies] +gpui.workspace = true +zed_actions.workspace = true +workspace.workspace = true +util.workspace = true +serde_json.workspace = true +smol.workspace = true + +[dev-dependencies] +gpui = { workspace = true, features = ["test-support"] } diff --git a/crates/miniprofiler_ui/LICENSE-GPL b/crates/miniprofiler_ui/LICENSE-GPL new file mode 120000 index 0000000000000000000000000000000000000000..89e542f750cd3860a0598eff0dc34b56d7336dc4 --- /dev/null +++ b/crates/miniprofiler_ui/LICENSE-GPL @@ -0,0 +1 @@ +../../LICENSE-GPL \ No newline at end of file diff --git a/crates/miniprofiler_ui/src/miniprofiler_ui.rs b/crates/miniprofiler_ui/src/miniprofiler_ui.rs new file mode 100644 index 0000000000000000000000000000000000000000..b3dab02058651177fbcbc53453d8f451dcdcf8a3 --- /dev/null +++ b/crates/miniprofiler_ui/src/miniprofiler_ui.rs @@ -0,0 +1,393 @@ +use std::{ + ops::Range, + path::PathBuf, + time::{Duration, Instant}, +}; + +use gpui::{ + App, AppContext, Context, Entity, Hsla, InteractiveElement, IntoElement, ParentElement, Render, + ScrollHandle, SerializedTaskTiming, StatefulInteractiveElement, Styled, Task, TaskTiming, + TitlebarOptions, WindowBounds, WindowHandle, WindowOptions, div, prelude::FluentBuilder, px, + relative, size, +}; +use util::ResultExt; +use workspace::{ + Workspace, + ui::{ + ActiveTheme, Button, ButtonCommon, ButtonStyle, Checkbox, Clickable, ToggleState, + WithScrollbar, h_flex, v_flex, + }, +}; +use zed_actions::OpenPerformanceProfiler; + +pub fn init(startup_time: Instant, cx: &mut App) { + cx.observe_new(move |workspace: &mut workspace::Workspace, _, _| { + workspace.register_action(move |workspace, _: &OpenPerformanceProfiler, window, cx| { + let window_handle = window + .window_handle() + .downcast::() + .expect("Workspaces are root Windows"); + open_performance_profiler(startup_time, workspace, window_handle, cx); + }); + }) + .detach(); +} + +fn open_performance_profiler( + startup_time: Instant, + _workspace: &mut workspace::Workspace, + workspace_handle: WindowHandle, + cx: &mut App, +) { + let existing_window = cx + .windows() + .into_iter() + .find_map(|window| window.downcast::()); + + if let Some(existing_window) = existing_window { + existing_window + .update(cx, |profiler_window, window, _cx| { + profiler_window.workspace = Some(workspace_handle); + window.activate_window(); + }) + .log_err(); + return; + } + + let default_bounds = size(px(1280.), px(720.)); // 16:9 + + cx.open_window( + WindowOptions { + titlebar: Some(TitlebarOptions { + title: Some("Profiler Window".into()), + appears_transparent: false, + traffic_light_position: None, + }), + focus: true, + show: true, + is_movable: true, + kind: gpui::WindowKind::Normal, + window_background: cx.theme().window_background_appearance(), + window_decorations: None, + window_min_size: Some(default_bounds), + window_bounds: Some(WindowBounds::centered(default_bounds, cx)), + ..Default::default() + }, + |_window, cx| ProfilerWindow::new(startup_time, Some(workspace_handle), cx), + ) + .log_err(); +} + +enum DataMode { + Realtime(Option>), + Snapshot(Vec), +} + +struct TimingBar { + location: &'static core::panic::Location<'static>, + start: Instant, + end: Instant, + color: Hsla, +} + +pub struct ProfilerWindow { + startup_time: Instant, + data: DataMode, + include_self_timings: ToggleState, + autoscroll: bool, + scroll_handle: ScrollHandle, + workspace: Option>, + _refresh: Option>, +} + +impl ProfilerWindow { + pub fn new( + startup_time: Instant, + workspace_handle: Option>, + cx: &mut App, + ) -> Entity { + let entity = cx.new(|cx| ProfilerWindow { + startup_time, + data: DataMode::Realtime(None), + include_self_timings: ToggleState::Unselected, + autoscroll: true, + scroll_handle: ScrollHandle::new(), + workspace: workspace_handle, + _refresh: Some(Self::begin_listen(cx)), + }); + + entity + } + + fn begin_listen(cx: &mut Context) -> Task<()> { + cx.spawn(async move |this, cx| { + loop { + let data = cx + .foreground_executor() + .dispatcher + .get_current_thread_timings(); + + this.update(cx, |this: &mut ProfilerWindow, cx| { + let scroll_offset = this.scroll_handle.offset(); + let max_offset = this.scroll_handle.max_offset(); + this.autoscroll = -scroll_offset.y >= (max_offset.height - px(5.0)); + + this.data = DataMode::Realtime(Some(data)); + + if this.autoscroll { + this.scroll_handle.scroll_to_bottom(); + } + + cx.notify(); + }) + .ok(); + + // yield to the executor + cx.background_executor() + .timer(Duration::from_micros(1)) + .await; + } + }) + } + + fn get_timings(&self) -> Option<&Vec> { + match &self.data { + DataMode::Realtime(data) => data.as_ref(), + DataMode::Snapshot(data) => Some(data), + } + } + + fn render_timing( + &self, + value_range: Range, + item: TimingBar, + cx: &App, + ) -> impl IntoElement { + let time_ms = item.end.duration_since(item.start).as_secs_f32() * 1000f32; + + let remap = value_range + .end + .duration_since(value_range.start) + .as_secs_f32() + * 1000f32; + + let start = (item.start.duration_since(value_range.start).as_secs_f32() * 1000f32) / remap; + let end = (item.end.duration_since(value_range.start).as_secs_f32() * 1000f32) / remap; + + let bar_width = end - start.abs(); + + let location = item + .location + .file() + .rsplit_once("/") + .unwrap_or(("", item.location.file())) + .1; + let location = location.rsplit_once("\\").unwrap_or(("", location)).1; + + let label = format!( + "{}:{}:{}", + location, + item.location.line(), + item.location.column() + ); + + h_flex() + .gap_2() + .w_full() + .h(px(32.0)) + .child( + div() + .w(px(200.0)) + .flex_shrink_0() + .overflow_hidden() + .child(div().text_ellipsis().child(label)), + ) + .child( + div() + .flex_1() + .h(px(24.0)) + .bg(cx.theme().colors().background) + .rounded_md() + .p(px(2.0)) + .relative() + .child( + div() + .absolute() + .h_full() + .rounded_sm() + .bg(item.color) + .left(relative(start.max(0f32))) + .w(relative(bar_width)), + ), + ) + .child( + div() + .min_w(px(60.0)) + .flex_shrink_0() + .text_right() + .child(format!("{:.1}ms", time_ms)), + ) + } +} + +impl Render for ProfilerWindow { + fn render( + &mut self, + window: &mut gpui::Window, + cx: &mut gpui::Context, + ) -> impl gpui::IntoElement { + v_flex() + .id("profiler") + .w_full() + .h_full() + .gap_2() + .bg(cx.theme().colors().surface_background) + .text_color(cx.theme().colors().text) + .child( + h_flex() + .w_full() + .justify_between() + .child( + h_flex() + .gap_2() + .child( + Button::new( + "switch-mode", + match self.data { + DataMode::Snapshot { .. } => "Resume", + DataMode::Realtime(_) => "Pause", + }, + ) + .style(ButtonStyle::Filled) + .on_click(cx.listener( + |this, _, _window, cx| { + match &this.data { + DataMode::Realtime(Some(data)) => { + this._refresh = None; + this.data = DataMode::Snapshot(data.clone()); + } + DataMode::Snapshot { .. } => { + this._refresh = Some(Self::begin_listen(cx)); + this.data = DataMode::Realtime(None); + } + _ => {} + }; + cx.notify(); + }, + )), + ) + .child( + Button::new("export-data", "Save") + .style(ButtonStyle::Filled) + .on_click(cx.listener(|this, _, _window, cx| { + let Some(workspace) = this.workspace else { + return; + }; + + let Some(data) = this.get_timings() else { + return; + }; + let timings = + SerializedTaskTiming::convert(this.startup_time, &data); + + let active_path = workspace + .read_with(cx, |workspace, cx| { + workspace.most_recent_active_path(cx) + }) + .log_err() + .flatten() + .and_then(|p| p.parent().map(|p| p.to_owned())) + .unwrap_or_else(|| PathBuf::default()); + + let path = cx.prompt_for_new_path( + &active_path, + Some("performance_profile.miniprof"), + ); + + cx.background_spawn(async move { + let path = path.await; + let path = + path.log_err().and_then(|p| p.log_err()).flatten(); + + let Some(path) = path else { + return; + }; + + let Some(timings) = + serde_json::to_string(&timings).log_err() + else { + return; + }; + + smol::fs::write(path, &timings).await.log_err(); + }) + .detach(); + })), + ), + ) + .child( + Checkbox::new("include-self", self.include_self_timings) + .label("Include profiler timings") + .on_click(cx.listener(|this, checked, _window, cx| { + this.include_self_timings = *checked; + cx.notify(); + })), + ), + ) + .when_some(self.get_timings(), |div, e| { + if e.len() == 0 { + return div; + } + + let min = e[0].start; + let max = e[e.len() - 1].end.unwrap_or_else(|| Instant::now()); + div.child( + v_flex() + .id("timings.bars") + .overflow_scroll() + .w_full() + .h_full() + .gap_2() + .track_scroll(&self.scroll_handle) + .on_scroll_wheel(cx.listener(|this, _, _, _cx| { + let scroll_offset = this.scroll_handle.offset(); + let max_offset = this.scroll_handle.max_offset(); + this.autoscroll = -scroll_offset.y >= (max_offset.height - px(5.0)); + })) + .children( + e.iter() + .filter(|timing| { + timing + .end + .unwrap_or_else(|| Instant::now()) + .duration_since(timing.start) + .as_millis() + >= 1 + }) + .filter(|timing| { + if self.include_self_timings.selected() { + true + } else { + !timing.location.file().ends_with("miniprofiler_ui.rs") + } + }) + .enumerate() + .map(|(i, timing)| { + self.render_timing( + max.checked_sub(Duration::from_secs(10)).unwrap_or(min) + ..max, + TimingBar { + location: timing.location, + start: timing.start, + end: timing.end.unwrap_or_else(|| Instant::now()), + color: cx.theme().accents().color_for_index(i as u32), + }, + cx, + ) + }), + ), + ) + .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx) + }) + } +} diff --git a/crates/paths/src/paths.rs b/crates/paths/src/paths.rs index a4c8f2424967cc3e0c4c2a76650811a7639ef5cb..7b5188b0f2b0db1c8b20876e6284209ce91fee6e 100644 --- a/crates/paths/src/paths.rs +++ b/crates/paths/src/paths.rs @@ -155,6 +155,12 @@ pub fn temp_dir() -> &'static PathBuf { }) } +/// Returns the path to the hang traces directory. +pub fn hang_traces_dir() -> &'static PathBuf { + static LOGS_DIR: OnceLock = OnceLock::new(); + LOGS_DIR.get_or_init(|| data_dir().join("hang_traces")) +} + /// Returns the path to the logs directory. pub fn logs_dir() -> &'static PathBuf { static LOGS_DIR: OnceLock = OnceLock::new(); diff --git a/crates/remote_server/src/unix.rs b/crates/remote_server/src/unix.rs index 729758c0d16c05d625b2836de66cdc4578f852d3..c51b93d4554aca86d13cefeb9dd4aaadacda399c 100644 --- a/crates/remote_server/src/unix.rs +++ b/crates/remote_server/src/unix.rs @@ -321,6 +321,7 @@ fn init_paths() -> anyhow::Result<()> { paths::languages_dir(), paths::logs_dir(), paths::temp_dir(), + paths::hang_traces_dir(), paths::remote_extensions_dir(), paths::remote_extensions_uploads_dir(), ] diff --git a/crates/repl/src/repl.rs b/crates/repl/src/repl.rs index be6491d9cdc61ec4b2a9f9fe950f8da00d0fb691..db21e198cc726df306bd94503615aa8633e0cbd6 100644 --- a/crates/repl/src/repl.rs +++ b/crates/repl/src/repl.rs @@ -12,7 +12,7 @@ mod session; use std::{sync::Arc, time::Duration}; use async_dispatcher::{Dispatcher, Runnable, set_dispatcher}; -use gpui::{App, PlatformDispatcher}; +use gpui::{App, PlatformDispatcher, RunnableVariant}; use project::Fs; pub use runtimelib::ExecutionState; @@ -45,11 +45,13 @@ fn zed_dispatcher(cx: &mut App) -> impl Dispatcher { // other crates in Zed. impl Dispatcher for ZedDispatcher { fn dispatch(&self, runnable: Runnable) { - self.dispatcher.dispatch(runnable, None) + self.dispatcher + .dispatch(RunnableVariant::Compat(runnable), None); } fn dispatch_after(&self, duration: Duration, runnable: Runnable) { - self.dispatcher.dispatch_after(duration, runnable); + self.dispatcher + .dispatch_after(duration, RunnableVariant::Compat(runnable)); } } diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 8b641dd8ce1f769dbc517f06e0a4e5824a61380c..722880ca1bad7552490dd67e6f34f9b0d509848f 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -97,6 +97,7 @@ markdown.workspace = true markdown_preview.workspace = true menu.workspace = true migrator.workspace = true +miniprofiler_ui.workspace = true mimalloc = { version = "0.1", optional = true } nc.workspace = true nix = { workspace = true, features = ["pthread", "signal"] } @@ -166,6 +167,7 @@ zeta.workspace = true zeta2.workspace = true zlog.workspace = true zlog_settings.workspace = true +chrono.workspace = true [target.'cfg(target_os = "windows")'.dependencies] windows.workspace = true diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 14e718ec2457b7d0f49c60cbc923cc7f215f9a15..a2e0c449e982594d7197da355ff4720c4da87163 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -37,7 +37,8 @@ use std::{ io::{self, IsTerminal}, path::{Path, PathBuf}, process, - sync::Arc, + sync::{Arc, OnceLock}, + time::Instant, }; use theme::{ActiveTheme, GlobalTheme, ThemeRegistry}; use util::{ResultExt, TryFutureExt, maybe}; @@ -162,7 +163,11 @@ fn fail_to_open_window(e: anyhow::Error, _cx: &mut App) { } } +pub static STARTUP_TIME: OnceLock = OnceLock::new(); + pub fn main() { + STARTUP_TIME.get_or_init(|| Instant::now()); + #[cfg(unix)] util::prevent_root_execution(); @@ -637,6 +642,7 @@ pub fn main() { zeta::init(cx); inspector_ui::init(app_state.clone(), cx); json_schema_store::init(cx); + miniprofiler_ui::init(*STARTUP_TIME.get().unwrap(), cx); cx.observe_global::({ let http = app_state.client.http_client(); @@ -1226,6 +1232,7 @@ fn init_paths() -> HashMap> { paths::database_dir(), paths::logs_dir(), paths::temp_dir(), + paths::hang_traces_dir(), ] .into_iter() .fold(HashMap::default(), |mut errors, path| { diff --git a/crates/zed/src/reliability.rs b/crates/zed/src/reliability.rs index dcabe93aab4ff35de44b77b87eb8495f537564fe..5a45c0c558389cd9d2aff02efaed5753bdb5d1f2 100644 --- a/crates/zed/src/reliability.rs +++ b/crates/zed/src/reliability.rs @@ -1,17 +1,22 @@ use anyhow::{Context as _, Result}; use client::{TelemetrySettings, telemetry::MINIDUMP_ENDPOINT}; use futures::AsyncReadExt; -use gpui::{App, AppContext as _}; +use gpui::{App, AppContext as _, SerializedThreadTaskTimings}; use http_client::{self, HttpClient, HttpClientWithUrl}; +use log::info; use project::Project; use proto::{CrashReport, GetCrashFilesResponse}; use reqwest::multipart::{Form, Part}; use settings::Settings; use smol::stream::StreamExt; -use std::{ffi::OsStr, fs, sync::Arc}; +use std::{ffi::OsStr, fs, sync::Arc, thread::ThreadId, time::Duration}; use util::ResultExt; +use crate::STARTUP_TIME; + pub fn init(http_client: Arc, installation_id: Option, cx: &mut App) { + monitor_hangs(cx); + #[cfg(target_os = "macos")] monitor_main_thread_hangs(http_client.clone(), installation_id.clone(), cx); @@ -272,6 +277,94 @@ pub fn monitor_main_thread_hangs( .detach() } +fn monitor_hangs(cx: &App) { + let main_thread_id = std::thread::current().id(); + + let foreground_executor = cx.foreground_executor(); + let background_executor = cx.background_executor(); + + // 3 seconds hang + let (mut tx, mut rx) = futures::channel::mpsc::channel(3); + foreground_executor + .spawn(async move { while (rx.next().await).is_some() {} }) + .detach(); + + background_executor + .spawn({ + let background_executor = background_executor.clone(); + async move { + let mut hang_time = None; + + let mut hanging = false; + loop { + background_executor.timer(Duration::from_secs(1)).await; + match tx.try_send(()) { + Ok(_) => { + hang_time = None; + hanging = false; + continue; + } + Err(e) => { + let is_full = e.into_send_error().is_full(); + if is_full && !hanging { + hanging = true; + hang_time = Some(chrono::Local::now()); + } + + if is_full { + save_hang_trace( + main_thread_id, + &background_executor, + hang_time.unwrap(), + ); + } + } + } + } + } + }) + .detach(); +} + +fn save_hang_trace( + main_thread_id: ThreadId, + background_executor: &gpui::BackgroundExecutor, + hang_time: chrono::DateTime, +) { + let thread_timings = background_executor.dispatcher.get_all_timings(); + let thread_timings = thread_timings + .into_iter() + .map(|mut timings| { + if timings.thread_id == main_thread_id { + timings.thread_name = Some("main".to_string()); + } + + SerializedThreadTaskTimings::convert(*STARTUP_TIME.get().unwrap(), timings) + }) + .collect::>(); + + let trace_path = paths::hang_traces_dir().join(&format!( + "hang-{}.miniprof", + hang_time.format("%Y-%m-%d_%H-%M-%S") + )); + + let Some(timings) = serde_json::to_string(&thread_timings) + .context("hang timings serialization") + .log_err() + else { + return; + }; + + std::fs::write(&trace_path, timings) + .context("hang trace file writing") + .log_err(); + + info!( + "hang detected, trace file saved at: {}", + trace_path.display() + ); +} + pub async fn upload_previous_minidumps( http: Arc, installation_id: Option, diff --git a/crates/zed_actions/src/lib.rs b/crates/zed_actions/src/lib.rs index c0739c74c7ac6c103e34c7a2cd730096503ef565..f00b2a7bfd3371359659f310a37ee36ef75b04f5 100644 --- a/crates/zed_actions/src/lib.rs +++ b/crates/zed_actions/src/lib.rs @@ -65,6 +65,8 @@ actions!( OpenLicenses, /// Opens the telemetry log. OpenTelemetryLog, + /// Opens the performance profiler. + OpenPerformanceProfiler, ] ); From 1fdd95a9b3489aad8d7616010904b611f5d7aee7 Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Wed, 12 Nov 2025 20:31:43 +0530 Subject: [PATCH 0047/1030] Revert "editor: Improve multi-buffer header filename click to jump to the latest selection from that buffer" (#42534) Reverts zed-industries/zed#42480 This panics on Nightly in cases where anchor might not be valid for that snapshot. Taking it back before the cutoff. Release Notes: - N/A --- crates/editor/src/element.rs | 145 +++++++++++------------------------ 1 file changed, 43 insertions(+), 102 deletions(-) diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index 680570d0926257b3bde4532b03681b4515111930..a211db1dec5f956affdaabf237d7768a80efc793 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -6,10 +6,10 @@ use crate::{ EditDisplayMode, EditPrediction, Editor, EditorMode, EditorSettings, EditorSnapshot, EditorStyle, FILE_HEADER_HEIGHT, FocusedBlock, GutterDimensions, HalfPageDown, HalfPageUp, HandleInput, HoveredCursor, InlayHintRefreshReason, JumpData, LineDown, LineHighlight, LineUp, - MAX_LINE_LEN, MINIMAP_FONT_SIZE, MULTI_BUFFER_EXCERPT_HEADER_HEIGHT, OpenExcerpts, PageDown, - PageUp, PhantomBreakpointIndicator, Point, RowExt, RowRangeExt, SelectPhase, - SelectedTextHighlight, Selection, SelectionDragState, SelectionEffects, SizingBehavior, - SoftWrap, StickyHeaderExcerpt, ToPoint, ToggleFold, ToggleFoldAll, + MAX_LINE_LEN, MINIMAP_FONT_SIZE, MULTI_BUFFER_EXCERPT_HEADER_HEIGHT, OpenExcerpts, + OpenExcerptsSplit, PageDown, PageUp, PhantomBreakpointIndicator, Point, RowExt, RowRangeExt, + SelectPhase, SelectedTextHighlight, Selection, SelectionDragState, SelectionEffects, + SizingBehavior, SoftWrap, StickyHeaderExcerpt, ToPoint, ToggleFold, ToggleFoldAll, code_context_menus::{CodeActionsMenu, MENU_ASIDE_MAX_WIDTH, MENU_ASIDE_MIN_WIDTH, MENU_GAP}, display_map::{ Block, BlockContext, BlockStyle, ChunkRendererId, DisplaySnapshot, EditorMargins, @@ -3664,7 +3664,6 @@ impl EditorElement { row_block_types: &mut HashMap, selections: &[Selection], selected_buffer_ids: &Vec, - selection_anchors: &[Selection], is_row_soft_wrapped: impl Copy + Fn(usize) -> bool, sticky_header_excerpt_id: Option, window: &mut Window, @@ -3740,13 +3739,7 @@ impl EditorElement { let selected = selected_buffer_ids.contains(&first_excerpt.buffer_id); let result = v_flex().id(block_id).w_full().pr(editor_margins.right); - let jump_data = header_jump_data( - snapshot, - block_row_start, - *height, - first_excerpt, - selection_anchors, - ); + let jump_data = header_jump_data(snapshot, block_row_start, *height, first_excerpt); result .child(self.render_buffer_header( first_excerpt, @@ -3781,13 +3774,7 @@ impl EditorElement { Block::BufferHeader { excerpt, height } => { let mut result = v_flex().id(block_id).w_full(); - let jump_data = header_jump_data( - snapshot, - block_row_start, - *height, - excerpt, - selection_anchors, - ); + let jump_data = header_jump_data(snapshot, block_row_start, *height, excerpt); if sticky_header_excerpt_id != Some(excerpt.id) { let selected = selected_buffer_ids.contains(&excerpt.buffer_id); @@ -4055,18 +4042,24 @@ impl EditorElement { ) .group_hover("", |div| div.underline()), ) - .on_click(window.listener_for(&self.editor, { - let jump_data = jump_data.clone(); - - move |editor, e: &ClickEvent, window, cx| { - editor.open_excerpts_common( - Some(jump_data.clone()), - e.modifiers().secondary(), - window, - cx, - ); + .on_click({ + let focus_handle = focus_handle.clone(); + move |event, window, cx| { + if event.modifiers().secondary() { + focus_handle.dispatch_action( + &OpenExcerptsSplit, + window, + cx, + ); + } else { + focus_handle.dispatch_action( + &OpenExcerpts, + window, + cx, + ); + } } - })), + }), ) .when_some(parent_path, |then, path| { then.child(div().child(path).text_color( @@ -4094,18 +4087,24 @@ impl EditorElement { cx, )), ) - .on_click(window.listener_for(&self.editor, { - let jump_data = jump_data.clone(); - - move |editor, e: &ClickEvent, window, cx| { - editor.open_excerpts_common( - Some(jump_data.clone()), - e.modifiers().secondary(), - window, - cx, - ); + .on_click({ + let focus_handle = focus_handle.clone(); + move |event, window, cx| { + if event.modifiers().secondary() { + focus_handle.dispatch_action( + &OpenExcerptsSplit, + window, + cx, + ); + } else { + focus_handle.dispatch_action( + &OpenExcerpts, + window, + cx, + ); + } } - })), + }), ) }, ) @@ -4251,7 +4250,6 @@ impl EditorElement { line_layouts: &mut [LineWithInvisibles], selections: &[Selection], selected_buffer_ids: &Vec, - selection_anchors: &[Selection], is_row_soft_wrapped: impl Copy + Fn(usize) -> bool, sticky_header_excerpt_id: Option, window: &mut Window, @@ -4295,7 +4293,6 @@ impl EditorElement { &mut row_block_types, selections, selected_buffer_ids, - selection_anchors, is_row_soft_wrapped, sticky_header_excerpt_id, window, @@ -4353,7 +4350,6 @@ impl EditorElement { &mut row_block_types, selections, selected_buffer_ids, - selection_anchors, is_row_soft_wrapped, sticky_header_excerpt_id, window, @@ -4409,7 +4405,6 @@ impl EditorElement { &mut row_block_types, selections, selected_buffer_ids, - selection_anchors, is_row_soft_wrapped, sticky_header_excerpt_id, window, @@ -4492,7 +4487,6 @@ impl EditorElement { hitbox: &Hitbox, selected_buffer_ids: &Vec, blocks: &[BlockLayout], - selection_anchors: &[Selection], window: &mut Window, cx: &mut App, ) -> AnyElement { @@ -4501,7 +4495,6 @@ impl EditorElement { DisplayRow(scroll_position.y as u32), FILE_HEADER_HEIGHT + MULTI_BUFFER_EXCERPT_HEADER_HEIGHT, excerpt, - selection_anchors, ); let editor_bg_color = cx.theme().colors().editor_background; @@ -7805,34 +7798,7 @@ fn header_jump_data( block_row_start: DisplayRow, height: u32, for_excerpt: &ExcerptInfo, - selection_anchors: &[Selection], ) -> JumpData { - if let Some(cursor_anchor) = latest_anchor_for_buffer(selection_anchors, for_excerpt.buffer_id) - { - let buffer_point = - language::ToPoint::to_point(&cursor_anchor.text_anchor, &for_excerpt.buffer); - let multibuffer_point = snapshot - .buffer_snapshot() - .summary_for_anchor::(&cursor_anchor); - let display_row = snapshot - .display_snapshot - .point_to_display_point(multibuffer_point, Bias::Left) - .row() - .0; - let scroll_row = snapshot - .scroll_anchor - .scroll_position(&snapshot.display_snapshot) - .y as u32; - let line_offset_from_top = display_row.saturating_sub(scroll_row); - - return JumpData::MultiBufferPoint { - excerpt_id: cursor_anchor.excerpt_id, - anchor: cursor_anchor.text_anchor, - position: buffer_point, - line_offset_from_top, - }; - } - let range = &for_excerpt.range; let buffer = &for_excerpt.buffer; let jump_anchor = range.primary.start; @@ -7862,20 +7828,6 @@ fn header_jump_data( } } -fn latest_anchor_for_buffer( - selection_anchors: &[Selection], - buffer_id: BufferId, -) -> Option { - selection_anchors - .iter() - .filter_map(|selection| { - let head = selection.head(); - (head.buffer_id == Some(buffer_id)).then_some((selection.id, head)) - }) - .max_by_key(|(id, _)| *id) - .map(|(_, anchor)| anchor) -} - pub struct AcceptEditPredictionBinding(pub(crate) Option); impl AcceptEditPredictionBinding { @@ -9187,18 +9139,15 @@ impl Element for EditorElement { cx, ); - let (local_selections, selected_buffer_ids, selection_anchors): ( + let (local_selections, selected_buffer_ids): ( Vec>, Vec, - Arc<[Selection]>, ) = self .editor_with_selections(cx) .map(|editor| { editor.update(cx, |editor, cx| { let all_selections = editor.selections.all::(&snapshot.display_snapshot); - let all_anchor_selections = - editor.selections.all_anchors(&snapshot.display_snapshot); let selected_buffer_ids = if editor.buffer_kind(cx) == ItemBufferKind::Singleton { Vec::new() @@ -9227,16 +9176,10 @@ impl Element for EditorElement { selections .extend(editor.selections.pending(&snapshot.display_snapshot)); - (selections, selected_buffer_ids, all_anchor_selections) + (selections, selected_buffer_ids) }) }) - .unwrap_or_else(|| { - ( - Vec::new(), - Vec::new(), - Arc::<[Selection]>::from(Vec::new()), - ) - }); + .unwrap_or_default(); let (selections, mut active_rows, newest_selection_head) = self .layout_selections( @@ -9467,7 +9410,6 @@ impl Element for EditorElement { &mut line_layouts, &local_selections, &selected_buffer_ids, - selection_anchors.as_ref(), is_row_soft_wrapped, sticky_header_excerpt_id, window, @@ -9501,7 +9443,6 @@ impl Element for EditorElement { &hitbox, &selected_buffer_ids, &blocks, - selection_anchors.as_ref(), window, cx, ) From c5ab1d46799cc4730b11f9d4dca25c30eae03a23 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Wed, 12 Nov 2025 10:13:40 -0500 Subject: [PATCH 0048/1030] Stop thread on Restore Checkpoint (#42537) Closes #35142 In addition to cleaning up the terminals, also stops the conversation. Release Notes: - Restoring a checkpoint now stops the agent conversation. --- crates/acp_thread/src/acp_thread.rs | 330 ++++++++++++++++++++++++++++ 1 file changed, 330 insertions(+) diff --git a/crates/acp_thread/src/acp_thread.rs b/crates/acp_thread/src/acp_thread.rs index a63dabf1fb25258b6f4255a5c67682165371b255..56e33fda47f095eef1873f7a0724b021e88a0bdc 100644 --- a/crates/acp_thread/src/acp_thread.rs +++ b/crates/acp_thread/src/acp_thread.rs @@ -1866,10 +1866,14 @@ impl AcpThread { .checkpoint .as_ref() .map(|c| c.git_checkpoint.clone()); + + // Cancel any in-progress generation before restoring + let cancel_task = self.cancel(cx); let rewind = self.rewind(id.clone(), cx); let git_store = self.project.read(cx).git_store().clone(); cx.spawn(async move |_, cx| { + cancel_task.await; rewind.await?; if let Some(checkpoint) = checkpoint { git_store @@ -1894,9 +1898,25 @@ impl AcpThread { cx.update(|cx| truncate.run(id.clone(), cx))?.await?; this.update(cx, |this, cx| { if let Some((ix, _)) = this.user_message_mut(&id) { + // Collect all terminals from entries that will be removed + let terminals_to_remove: Vec = this.entries[ix..] + .iter() + .flat_map(|entry| entry.terminals()) + .filter_map(|terminal| terminal.read(cx).id().clone().into()) + .collect(); + let range = ix..this.entries.len(); this.entries.truncate(ix); cx.emit(AcpThreadEvent::EntriesRemoved(range)); + + // Kill and remove the terminals + for terminal_id in terminals_to_remove { + if let Some(terminal) = this.terminals.remove(&terminal_id) { + terminal.update(cx, |terminal, cx| { + terminal.kill(cx); + }); + } + } } this.action_log().update(cx, |action_log, cx| { action_log.reject_all_edits(Some(telemetry), cx) @@ -3803,4 +3823,314 @@ mod tests { } }); } + + /// Tests that restoring a checkpoint properly cleans up terminals that were + /// created after that checkpoint, and cancels any in-progress generation. + /// + /// Reproduces issue #35142: When a checkpoint is restored, any terminal processes + /// that were started after that checkpoint should be terminated, and any in-progress + /// AI generation should be canceled. + #[gpui::test] + async fn test_restore_checkpoint_kills_terminal(cx: &mut TestAppContext) { + init_test(cx); + + let fs = FakeFs::new(cx.executor()); + let project = Project::test(fs, [], cx).await; + let connection = Rc::new(FakeAgentConnection::new()); + let thread = cx + .update(|cx| connection.new_thread(project, Path::new(path!("/test")), cx)) + .await + .unwrap(); + + // Send first user message to create a checkpoint + cx.update(|cx| { + thread.update(cx, |thread, cx| { + thread.send(vec!["first message".into()], cx) + }) + }) + .await + .unwrap(); + + // Send second message (creates another checkpoint) - we'll restore to this one + cx.update(|cx| { + thread.update(cx, |thread, cx| { + thread.send(vec!["second message".into()], cx) + }) + }) + .await + .unwrap(); + + // Create 2 terminals BEFORE the checkpoint that have completed running + let terminal_id_1 = acp::TerminalId(uuid::Uuid::new_v4().to_string().into()); + let mock_terminal_1 = cx.new(|cx| { + let builder = ::terminal::TerminalBuilder::new_display_only( + ::terminal::terminal_settings::CursorShape::default(), + ::terminal::terminal_settings::AlternateScroll::On, + None, + 0, + ) + .unwrap(); + builder.subscribe(cx) + }); + + thread.update(cx, |thread, cx| { + thread.on_terminal_provider_event( + TerminalProviderEvent::Created { + terminal_id: terminal_id_1.clone(), + label: "echo 'first'".to_string(), + cwd: Some(PathBuf::from("/test")), + output_byte_limit: None, + terminal: mock_terminal_1.clone(), + }, + cx, + ); + }); + + thread.update(cx, |thread, cx| { + thread.on_terminal_provider_event( + TerminalProviderEvent::Output { + terminal_id: terminal_id_1.clone(), + data: b"first\n".to_vec(), + }, + cx, + ); + }); + + thread.update(cx, |thread, cx| { + thread.on_terminal_provider_event( + TerminalProviderEvent::Exit { + terminal_id: terminal_id_1.clone(), + status: acp::TerminalExitStatus { + exit_code: Some(0), + signal: None, + meta: None, + }, + }, + cx, + ); + }); + + let terminal_id_2 = acp::TerminalId(uuid::Uuid::new_v4().to_string().into()); + let mock_terminal_2 = cx.new(|cx| { + let builder = ::terminal::TerminalBuilder::new_display_only( + ::terminal::terminal_settings::CursorShape::default(), + ::terminal::terminal_settings::AlternateScroll::On, + None, + 0, + ) + .unwrap(); + builder.subscribe(cx) + }); + + thread.update(cx, |thread, cx| { + thread.on_terminal_provider_event( + TerminalProviderEvent::Created { + terminal_id: terminal_id_2.clone(), + label: "echo 'second'".to_string(), + cwd: Some(PathBuf::from("/test")), + output_byte_limit: None, + terminal: mock_terminal_2.clone(), + }, + cx, + ); + }); + + thread.update(cx, |thread, cx| { + thread.on_terminal_provider_event( + TerminalProviderEvent::Output { + terminal_id: terminal_id_2.clone(), + data: b"second\n".to_vec(), + }, + cx, + ); + }); + + thread.update(cx, |thread, cx| { + thread.on_terminal_provider_event( + TerminalProviderEvent::Exit { + terminal_id: terminal_id_2.clone(), + status: acp::TerminalExitStatus { + exit_code: Some(0), + signal: None, + meta: None, + }, + }, + cx, + ); + }); + + // Get the second message ID to restore to + let second_message_id = thread.read_with(cx, |thread, _| { + // At this point we have: + // - Index 0: First user message (with checkpoint) + // - Index 1: Second user message (with checkpoint) + // No assistant responses because FakeAgentConnection just returns EndTurn + let AgentThreadEntry::UserMessage(message) = &thread.entries[1] else { + panic!("expected user message at index 1"); + }; + message.id.clone().unwrap() + }); + + // Create a terminal AFTER the checkpoint we'll restore to. + // This simulates the AI agent starting a long-running terminal command. + let terminal_id = acp::TerminalId(uuid::Uuid::new_v4().to_string().into()); + let mock_terminal = cx.new(|cx| { + let builder = ::terminal::TerminalBuilder::new_display_only( + ::terminal::terminal_settings::CursorShape::default(), + ::terminal::terminal_settings::AlternateScroll::On, + None, + 0, + ) + .unwrap(); + builder.subscribe(cx) + }); + + // Register the terminal as created + thread.update(cx, |thread, cx| { + thread.on_terminal_provider_event( + TerminalProviderEvent::Created { + terminal_id: terminal_id.clone(), + label: "sleep 1000".to_string(), + cwd: Some(PathBuf::from("/test")), + output_byte_limit: None, + terminal: mock_terminal.clone(), + }, + cx, + ); + }); + + // Simulate the terminal producing output (still running) + thread.update(cx, |thread, cx| { + thread.on_terminal_provider_event( + TerminalProviderEvent::Output { + terminal_id: terminal_id.clone(), + data: b"terminal is running...\n".to_vec(), + }, + cx, + ); + }); + + // Create a tool call entry that references this terminal + // This represents the agent requesting a terminal command + thread.update(cx, |thread, cx| { + thread + .handle_session_update( + acp::SessionUpdate::ToolCall(acp::ToolCall { + id: acp::ToolCallId("terminal-tool-1".into()), + title: "Running command".into(), + kind: acp::ToolKind::Execute, + status: acp::ToolCallStatus::InProgress, + content: vec![acp::ToolCallContent::Terminal { + terminal_id: terminal_id.clone(), + }], + locations: vec![], + raw_input: Some( + serde_json::json!({"command": "sleep 1000", "cd": "/test"}), + ), + raw_output: None, + meta: None, + }), + cx, + ) + .unwrap(); + }); + + // Verify terminal exists and is in the thread + let terminal_exists_before = + thread.read_with(cx, |thread, _| thread.terminals.contains_key(&terminal_id)); + assert!( + terminal_exists_before, + "Terminal should exist before checkpoint restore" + ); + + // Verify the terminal's underlying task is still running (not completed) + let terminal_running_before = thread.read_with(cx, |thread, _cx| { + let terminal_entity = thread.terminals.get(&terminal_id).unwrap(); + terminal_entity.read_with(cx, |term, _cx| { + term.output().is_none() // output is None means it's still running + }) + }); + assert!( + terminal_running_before, + "Terminal should be running before checkpoint restore" + ); + + // Verify we have the expected entries before restore + let entry_count_before = thread.read_with(cx, |thread, _| thread.entries.len()); + assert!( + entry_count_before > 1, + "Should have multiple entries before restore" + ); + + // Restore the checkpoint to the second message. + // This should: + // 1. Cancel any in-progress generation (via the cancel() call) + // 2. Remove the terminal that was created after that point + thread + .update(cx, |thread, cx| { + thread.restore_checkpoint(second_message_id, cx) + }) + .await + .unwrap(); + + // Verify that no send_task is in progress after restore + // (cancel() clears the send_task) + let has_send_task_after = thread.read_with(cx, |thread, _| thread.send_task.is_some()); + assert!( + !has_send_task_after, + "Should not have a send_task after restore (cancel should have cleared it)" + ); + + // Verify the entries were truncated (restoring to index 1 truncates at 1, keeping only index 0) + let entry_count = thread.read_with(cx, |thread, _| thread.entries.len()); + assert_eq!( + entry_count, 1, + "Should have 1 entry after restore (only the first user message)" + ); + + // Verify the 2 completed terminals from before the checkpoint still exist + let terminal_1_exists = thread.read_with(cx, |thread, _| { + thread.terminals.contains_key(&terminal_id_1) + }); + assert!( + terminal_1_exists, + "Terminal 1 (from before checkpoint) should still exist" + ); + + let terminal_2_exists = thread.read_with(cx, |thread, _| { + thread.terminals.contains_key(&terminal_id_2) + }); + assert!( + terminal_2_exists, + "Terminal 2 (from before checkpoint) should still exist" + ); + + // Verify they're still in completed state + let terminal_1_completed = thread.read_with(cx, |thread, _cx| { + let terminal_entity = thread.terminals.get(&terminal_id_1).unwrap(); + terminal_entity.read_with(cx, |term, _cx| term.output().is_some()) + }); + assert!(terminal_1_completed, "Terminal 1 should still be completed"); + + let terminal_2_completed = thread.read_with(cx, |thread, _cx| { + let terminal_entity = thread.terminals.get(&terminal_id_2).unwrap(); + terminal_entity.read_with(cx, |term, _cx| term.output().is_some()) + }); + assert!(terminal_2_completed, "Terminal 2 should still be completed"); + + // Verify the running terminal (created after checkpoint) was removed + let terminal_3_exists = + thread.read_with(cx, |thread, _| thread.terminals.contains_key(&terminal_id)); + assert!( + !terminal_3_exists, + "Terminal 3 (created after checkpoint) should have been removed" + ); + + // Verify total count is 2 (the two from before the checkpoint) + let terminal_count = thread.read_with(cx, |thread, _| thread.terminals.len()); + assert_eq!( + terminal_count, 2, + "Should have exactly 2 terminals (the completed ones from before checkpoint)" + ); + } } From 1d75a9c4b2cd8c51352060cbe4abda46954392dd Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Wed, 12 Nov 2025 20:47:29 +0530 Subject: [PATCH 0049/1030] Reverts "add OpenExcerptsSplit and dispatches on click" (#42538) Partially reverts https://github.com/zed-industries/zed/pull/42283 to restore the old behavior of excerpt clicking. Release Notes: - N/A --- crates/editor/src/element.rs | 62 ++++++++++++++---------------------- 1 file changed, 24 insertions(+), 38 deletions(-) diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index a211db1dec5f956affdaabf237d7768a80efc793..3080e0048230e35f6cd28c553a5ccc054a292e60 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -6,10 +6,10 @@ use crate::{ EditDisplayMode, EditPrediction, Editor, EditorMode, EditorSettings, EditorSnapshot, EditorStyle, FILE_HEADER_HEIGHT, FocusedBlock, GutterDimensions, HalfPageDown, HalfPageUp, HandleInput, HoveredCursor, InlayHintRefreshReason, JumpData, LineDown, LineHighlight, LineUp, - MAX_LINE_LEN, MINIMAP_FONT_SIZE, MULTI_BUFFER_EXCERPT_HEADER_HEIGHT, OpenExcerpts, - OpenExcerptsSplit, PageDown, PageUp, PhantomBreakpointIndicator, Point, RowExt, RowRangeExt, - SelectPhase, SelectedTextHighlight, Selection, SelectionDragState, SelectionEffects, - SizingBehavior, SoftWrap, StickyHeaderExcerpt, ToPoint, ToggleFold, ToggleFoldAll, + MAX_LINE_LEN, MINIMAP_FONT_SIZE, MULTI_BUFFER_EXCERPT_HEADER_HEIGHT, OpenExcerpts, PageDown, + PageUp, PhantomBreakpointIndicator, Point, RowExt, RowRangeExt, SelectPhase, + SelectedTextHighlight, Selection, SelectionDragState, SelectionEffects, SizingBehavior, + SoftWrap, StickyHeaderExcerpt, ToPoint, ToggleFold, ToggleFoldAll, code_context_menus::{CodeActionsMenu, MENU_ASIDE_MAX_WIDTH, MENU_ASIDE_MIN_WIDTH, MENU_GAP}, display_map::{ Block, BlockContext, BlockStyle, ChunkRendererId, DisplaySnapshot, EditorMargins, @@ -4042,24 +4042,17 @@ impl EditorElement { ) .group_hover("", |div| div.underline()), ) - .on_click({ - let focus_handle = focus_handle.clone(); - move |event, window, cx| { - if event.modifiers().secondary() { - focus_handle.dispatch_action( - &OpenExcerptsSplit, - window, - cx, - ); - } else { - focus_handle.dispatch_action( - &OpenExcerpts, - window, - cx, - ); - } + .on_click(window.listener_for(&self.editor, { + let jump_data = jump_data.clone(); + move |editor, e: &ClickEvent, window, cx| { + editor.open_excerpts_common( + Some(jump_data.clone()), + e.modifiers().secondary(), + window, + cx, + ); } - }), + })), ) .when_some(parent_path, |then, path| { then.child(div().child(path).text_color( @@ -4087,24 +4080,17 @@ impl EditorElement { cx, )), ) - .on_click({ - let focus_handle = focus_handle.clone(); - move |event, window, cx| { - if event.modifiers().secondary() { - focus_handle.dispatch_action( - &OpenExcerptsSplit, - window, - cx, - ); - } else { - focus_handle.dispatch_action( - &OpenExcerpts, - window, - cx, - ); - } + .on_click(window.listener_for(&self.editor, { + let jump_data = jump_data.clone(); + move |editor, e: &ClickEvent, window, cx| { + editor.open_excerpts_common( + Some(jump_data.clone()), + e.modifiers().secondary(), + window, + cx, + ); } - }), + })), ) }, ) From 53652cdb3ff097468a5bb50f0661541baef1db88 Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Wed, 12 Nov 2025 10:36:28 -0500 Subject: [PATCH 0050/1030] Bump Zed to v0.214 (#42539) Release Notes: - N/A --- Cargo.lock | 2 +- crates/zed/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6f665286ad2a47e345d6c43f9c296af01c423c64..25bb2b4063ce437f5de1411a04d129a8de06aff0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21170,7 +21170,7 @@ dependencies = [ [[package]] name = "zed" -version = "0.213.0" +version = "0.214.0" dependencies = [ "acp_tools", "activity_indicator", diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 722880ca1bad7552490dd67e6f34f9b0d509848f..1ccfe8d1f5f60f825072f5034f629296f1229269 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -2,7 +2,7 @@ description = "The fast, collaborative code editor." edition.workspace = true name = "zed" -version = "0.213.0" +version = "0.214.0" publish.workspace = true license = "GPL-3.0-or-later" authors = ["Zed Team "] From 80b32ddaadd0fbe74848c24a85413e0325b4f532 Mon Sep 17 00:00:00 2001 From: Vasyl Protsiv Date: Wed, 12 Nov 2025 17:37:14 +0200 Subject: [PATCH 0051/1030] gpui: Add 'Nearest' scrolling strategy to 'UniformList' (#41844) This PR introduces `Nearest` scrolling strategy to `UniformList`. This is now used in completions menu and the picker to choose the appropriate scrolling strategy depending on movement direction. Previously, selecting the next element after the last visible item caused the menu to scroll with `ScrollStrategy::Top`, which scrolled the whole page and placed the next element at the top. This behavior is inconsistent, because using `ScrollStrategy::Top` when moving up only scrolls one element, not the whole page. https://github.com/user-attachments/assets/ccfb238f-8f76-4a18-a18d-bbcb63340c5a The solution is to introduce the `Nearest` scrolling strategy which will internally choose the scrolling strategy depending on whether the new selected item is below or above currently visible items. This ensures a single-item scroll regardless of movement direction. https://github.com/user-attachments/assets/8502efb8-e2c0-4ab1-bd8d-93103841a9c4 I also noticed that some functions in the file have different logic depending on `y_flipped`. This appears related to reversing the order of elements in the list when the completion menu appears above the cursor. This was a feature suggested in #11200 and implemented in #23446. It looks like this feature was reverted in #27765 and there currently seem to be no way to have `y_flipped` to be set to `true`. My understanding is that the opposite scroll strategy should be used if `y_flipped`, but since there is no way to enable this feature to test it and I don't know if the feature is ever going to be reintroduced I decided not to include it in this PR. Release Notes: - gpui: Add 'Nearest' scrolling strategy to 'UniformList' --- crates/editor/src/code_context_menus.rs | 2 +- crates/gpui/src/elements/uniform_list.rs | 219 +++++++++++++++++++---- crates/picker/src/picker.rs | 2 +- 3 files changed, 187 insertions(+), 36 deletions(-) diff --git a/crates/editor/src/code_context_menus.rs b/crates/editor/src/code_context_menus.rs index ac8f26764b5a037a0a1618052a34466effd80563..f220cadee5acca5c7c1d3c91b9350380bc0bf10e 100644 --- a/crates/editor/src/code_context_menus.rs +++ b/crates/editor/src/code_context_menus.rs @@ -506,7 +506,7 @@ impl CompletionsMenu { cx: &mut Context, ) { self.scroll_handle - .scroll_to_item(self.selected_item, ScrollStrategy::Top); + .scroll_to_item(self.selected_item, ScrollStrategy::Nearest); if let Some(provider) = provider { let entries = self.entries.borrow(); let entry = if self.selected_item < entries.len() { diff --git a/crates/gpui/src/elements/uniform_list.rs b/crates/gpui/src/elements/uniform_list.rs index 93082563c02f4168b1d73e2929a6bf9dbd153237..ba002a67f3c614e614dd591d795f839e7f1ea73d 100644 --- a/crates/gpui/src/elements/uniform_list.rs +++ b/crates/gpui/src/elements/uniform_list.rs @@ -92,6 +92,10 @@ pub enum ScrollStrategy { /// May not be possible if there's not enough list items above the item scrolled to: /// in this case, the element will be placed at the closest possible position. Bottom, + /// If the element is not visible attempt to place it at: + /// - The top of the list's viewport if the target element is above currently visible elements. + /// - The bottom of the list's viewport if the target element is above currently visible elements. + Nearest, } #[derive(Clone, Copy, Debug)] @@ -391,39 +395,42 @@ impl Element for UniformList { scroll_offset.x = Pixels::ZERO; } - if let Some(deferred_scroll) = shared_scroll_to_item { - let mut ix = deferred_scroll.item_index; + if let Some(DeferredScrollToItem { + mut item_index, + mut strategy, + offset, + scroll_strict, + }) = shared_scroll_to_item + { if y_flipped { - ix = self.item_count.saturating_sub(ix + 1); + item_index = self.item_count.saturating_sub(item_index + 1); } let list_height = padded_bounds.size.height; let mut updated_scroll_offset = shared_scroll_offset.borrow_mut(); - let item_top = item_height * ix; + let item_top = item_height * item_index; let item_bottom = item_top + item_height; let scroll_top = -updated_scroll_offset.y; - let offset_pixels = item_height * deferred_scroll.offset; - let mut scrolled_to_top = false; - - if item_top < scroll_top + offset_pixels { - scrolled_to_top = true; - // todo: using the padding here is wrong - this only works well for few scenarios - updated_scroll_offset.y = -item_top + padding.top + offset_pixels; - } else if item_bottom > scroll_top + list_height { - scrolled_to_top = true; - updated_scroll_offset.y = -(item_bottom - list_height); - } + let offset_pixels = item_height * offset; + + // is the selected item above/below currently visible items + let is_above = item_top < scroll_top + offset_pixels; + let is_below = item_bottom > scroll_top + list_height; + + if scroll_strict || is_above || is_below { + if strategy == ScrollStrategy::Nearest { + if is_above { + strategy = ScrollStrategy::Top; + } else if is_below { + strategy = ScrollStrategy::Bottom; + } + } - if deferred_scroll.scroll_strict - || (scrolled_to_top - && (item_top < scroll_top + offset_pixels - || item_bottom > scroll_top + list_height)) - { - match deferred_scroll.strategy { + let max_scroll_offset = + (content_height - list_height).max(Pixels::ZERO); + match strategy { ScrollStrategy::Top => { updated_scroll_offset.y = -(item_top - offset_pixels) - .max(Pixels::ZERO) - .min(content_height - list_height) - .max(Pixels::ZERO); + .clamp(Pixels::ZERO, max_scroll_offset); } ScrollStrategy::Center => { let item_center = item_top + item_height / 2.0; @@ -431,18 +438,15 @@ impl Element for UniformList { let viewport_height = list_height - offset_pixels; let viewport_center = offset_pixels + viewport_height / 2.0; let target_scroll_top = item_center - viewport_center; - - updated_scroll_offset.y = -target_scroll_top - .max(Pixels::ZERO) - .min(content_height - list_height) - .max(Pixels::ZERO); + updated_scroll_offset.y = + -target_scroll_top.clamp(Pixels::ZERO, max_scroll_offset); } ScrollStrategy::Bottom => { - updated_scroll_offset.y = -(item_bottom - list_height - + offset_pixels) - .max(Pixels::ZERO) - .min(content_height - list_height) - .max(Pixels::ZERO); + updated_scroll_offset.y = -(item_bottom - list_height) + .clamp(Pixels::ZERO, max_scroll_offset); + } + ScrollStrategy::Nearest => { + // Nearest, but the item is visible -> no scroll is required } } } @@ -695,3 +699,150 @@ impl InteractiveElement for UniformList { &mut self.interactivity } } + +#[cfg(test)] +mod test { + use crate::TestAppContext; + + #[gpui::test] + fn test_scroll_strategy_nearest(cx: &mut TestAppContext) { + use crate::{ + Context, FocusHandle, ScrollStrategy, UniformListScrollHandle, Window, actions, div, + prelude::*, px, uniform_list, + }; + use std::ops::Range; + + actions!(example, [SelectNext, SelectPrev]); + + struct TestView { + index: usize, + length: usize, + scroll_handle: UniformListScrollHandle, + focus_handle: FocusHandle, + visible_range: Range, + } + + impl TestView { + pub fn select_next( + &mut self, + _: &SelectNext, + window: &mut Window, + _: &mut Context, + ) { + if self.index + 1 == self.length { + self.index = 0 + } else { + self.index += 1; + } + self.scroll_handle + .scroll_to_item(self.index, ScrollStrategy::Nearest); + window.refresh(); + } + + pub fn select_previous( + &mut self, + _: &SelectPrev, + window: &mut Window, + _: &mut Context, + ) { + if self.index == 0 { + self.index = self.length - 1 + } else { + self.index -= 1; + } + self.scroll_handle + .scroll_to_item(self.index, ScrollStrategy::Nearest); + window.refresh(); + } + } + + impl Render for TestView { + fn render(&mut self, _window: &mut Window, cx: &mut Context) -> impl IntoElement { + div() + .id("list-example") + .track_focus(&self.focus_handle) + .on_action(cx.listener(Self::select_next)) + .on_action(cx.listener(Self::select_previous)) + .size_full() + .child( + uniform_list( + "entries", + self.length, + cx.processor(|this, range: Range, _window, _cx| { + this.visible_range = range.clone(); + range + .map(|ix| div().id(ix).h(px(20.0)).child(format!("Item {ix}"))) + .collect() + }), + ) + .track_scroll(self.scroll_handle.clone()) + .h(px(200.0)), + ) + } + } + + let (view, cx) = cx.add_window_view(|window, cx| { + let focus_handle = cx.focus_handle(); + window.focus(&focus_handle); + TestView { + scroll_handle: UniformListScrollHandle::new(), + index: 0, + focus_handle, + length: 47, + visible_range: 0..0, + } + }); + + // 10 out of 47 items are visible + + // First 9 times selecting next item does not scroll + for ix in 1..10 { + cx.dispatch_action(SelectNext); + view.read_with(cx, |view, _| { + assert_eq!(view.index, ix); + assert_eq!(view.visible_range, 0..10); + }) + } + + // Now each time the list scrolls down by 1 + for ix in 10..47 { + cx.dispatch_action(SelectNext); + view.read_with(cx, |view, _| { + assert_eq!(view.index, ix); + assert_eq!(view.visible_range, ix - 9..ix + 1); + }) + } + + // After the last item we move back to the start + cx.dispatch_action(SelectNext); + view.read_with(cx, |view, _| { + assert_eq!(view.index, 0); + assert_eq!(view.visible_range, 0..10); + }); + + // Return to the last element + cx.dispatch_action(SelectPrev); + view.read_with(cx, |view, _| { + assert_eq!(view.index, 46); + assert_eq!(view.visible_range, 37..47); + }); + + // First 9 times selecting previous does not scroll + for ix in (37..46).rev() { + cx.dispatch_action(SelectPrev); + view.read_with(cx, |view, _| { + assert_eq!(view.index, ix); + assert_eq!(view.visible_range, 37..47); + }) + } + + // Now each time the list scrolls up by 1 + for ix in (0..37).rev() { + cx.dispatch_action(SelectPrev); + view.read_with(cx, |view, _| { + assert_eq!(view.index, ix); + assert_eq!(view.visible_range, ix..ix + 10); + }) + } + } +} diff --git a/crates/picker/src/picker.rs b/crates/picker/src/picker.rs index 6027ae5cd5e77db938116568ac7001548e97bde9..1a2c6509f24843210014c8c868f7eec6c7918d91 100644 --- a/crates/picker/src/picker.rs +++ b/crates/picker/src/picker.rs @@ -709,7 +709,7 @@ impl Picker { match &mut self.element_container { ElementContainer::List(state) => state.scroll_to_reveal_item(ix), ElementContainer::UniformList(scroll_handle) => { - scroll_handle.scroll_to_item(ix, ScrollStrategy::Top) + scroll_handle.scroll_to_item(ix, ScrollStrategy::Nearest) } } } From cfbde9183306e766b59687430b311c7564679f82 Mon Sep 17 00:00:00 2001 From: Marco Mihai Condrache <52580954+marcocondrache@users.noreply.github.com> Date: Wed, 12 Nov 2025 16:38:06 +0100 Subject: [PATCH 0052/1030] terminal: Add setting for scroll multiplier (#39463) Closes #5130 Release Notes: - Added setting option for scroll multiplier of the terminal --------- Signed-off-by: Marco Mihai Condrache <52580954+marcocondrache@users.noreply.github.com> Co-authored-by: MrSubidubi --- assets/settings/default.json | 2 ++ .../settings/src/settings_content/terminal.rs | 4 ++++ crates/settings/src/vscode_import.rs | 1 + crates/settings_ui/src/page_data.rs | 18 ++++++++++++++++ crates/terminal/src/terminal.rs | 19 +++++++---------- crates/terminal/src/terminal_settings.rs | 3 +++ crates/terminal_view/src/terminal_view.rs | 7 ++++++- docs/src/configuring-zed.md | 21 +++++++++++++++++++ 8 files changed, 63 insertions(+), 12 deletions(-) diff --git a/assets/settings/default.json b/assets/settings/default.json index 70011f3209c7f64fd4e86d3acbb62a9ff2d5a487..f1b8d9e76bc600de6fd41834c08f40a9b2d51b42 100644 --- a/assets/settings/default.json +++ b/assets/settings/default.json @@ -1550,6 +1550,8 @@ // Default: 10_000, maximum: 100_000 (all bigger values set will be treated as 100_000), 0 disables the scrolling. // Existing terminals will not pick up this change until they are recreated. "max_scroll_history_lines": 10000, + // The multiplier for scrolling speed in the terminal. + "scroll_multiplier": 1.0, // The minimum APCA perceptual contrast between foreground and background colors. // APCA (Accessible Perceptual Contrast Algorithm) is more accurate than WCAG 2.x, // especially for dark mode. Values range from 0 to 106. diff --git a/crates/settings/src/settings_content/terminal.rs b/crates/settings/src/settings_content/terminal.rs index 2413eb96a0d01feb96c8b7322131f1c5f52af91b..723156bc3ad2d5d07866f40836f10ec9f3e79087 100644 --- a/crates/settings/src/settings_content/terminal.rs +++ b/crates/settings/src/settings_content/terminal.rs @@ -116,6 +116,10 @@ pub struct TerminalSettingsContent { /// /// Default: 10_000 pub max_scroll_history_lines: Option, + /// The multiplier for scrolling with the mouse wheel. + /// + /// Default: 1.0 + pub scroll_multiplier: Option, /// Toolbar related settings pub toolbar: Option, /// Scrollbar-related settings diff --git a/crates/settings/src/vscode_import.rs b/crates/settings/src/vscode_import.rs index 31f1ab82b50b5fca32203c770cd41795e1cf92c3..0de37b5daecadb6d8da42d553bffa30d1ffeb1a7 100644 --- a/crates/settings/src/vscode_import.rs +++ b/crates/settings/src/vscode_import.rs @@ -737,6 +737,7 @@ impl VsCodeSettings { option_as_meta: self.read_bool("terminal.integrated.macOptionIsMeta"), project: self.project_terminal_settings_content(), scrollbar: None, + scroll_multiplier: None, toolbar: None, }) } diff --git a/crates/settings_ui/src/page_data.rs b/crates/settings_ui/src/page_data.rs index 973f40a20a5cc6052f30ba2ff17a5116c96eeb2b..611ce10a75e5e2e52c28b88d6583108a006e63b3 100644 --- a/crates/settings_ui/src/page_data.rs +++ b/crates/settings_ui/src/page_data.rs @@ -5168,6 +5168,24 @@ pub(crate) fn settings_data(cx: &App) -> Vec { metadata: None, files: USER, }), + SettingsPageItem::SettingItem(SettingItem { + title: "Scroll Multiplier", + description: "The multiplier for scrolling in the terminal with the mouse wheel", + field: Box::new(SettingField { + json_path: Some("terminal.scroll_multiplier"), + pick: |settings_content| { + settings_content.terminal.as_ref()?.scroll_multiplier.as_ref() + }, + write: |settings_content, value| { + settings_content + .terminal + .get_or_insert_default() + .scroll_multiplier = value; + }, + }), + metadata: None, + files: USER, + }), SettingsPageItem::SectionHeader("Toolbar"), SettingsPageItem::SettingItem(SettingItem { title: "Breadcrumbs", diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 59c71474d47b9c79d33b28bd7cbbc1a187b4ebfd..3c71a7f0e1a483f1e27fe52170bbabbe6129b974 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -108,13 +108,6 @@ actions!( ] ); -///Scrolling is unbearably sluggish by default. Alacritty supports a configurable -///Scroll multiplier that is set to 3 by default. This will be removed when I -///Implement scroll bars. -#[cfg(target_os = "macos")] -const SCROLL_MULTIPLIER: f32 = 4.; -#[cfg(not(target_os = "macos"))] -const SCROLL_MULTIPLIER: f32 = 1.; const DEBUG_TERMINAL_WIDTH: Pixels = px(500.); const DEBUG_TERMINAL_HEIGHT: Pixels = px(30.); const DEBUG_CELL_WIDTH: Pixels = px(5.); @@ -1890,10 +1883,11 @@ impl Terminal { } ///Scroll the terminal - pub fn scroll_wheel(&mut self, e: &ScrollWheelEvent) { + pub fn scroll_wheel(&mut self, e: &ScrollWheelEvent, scroll_multiplier: f32) { let mouse_mode = self.mouse_mode(e.shift); + let scroll_multiplier = if mouse_mode { 1. } else { scroll_multiplier }; - if let Some(scroll_lines) = self.determine_scroll_lines(e, mouse_mode) { + if let Some(scroll_lines) = self.determine_scroll_lines(e, scroll_multiplier) { if mouse_mode { let point = grid_point( e.position - self.last_content.terminal_bounds.bounds.origin, @@ -1926,8 +1920,11 @@ impl Terminal { self.word_from_position(window.mouse_position()); } - fn determine_scroll_lines(&mut self, e: &ScrollWheelEvent, mouse_mode: bool) -> Option { - let scroll_multiplier = if mouse_mode { 1. } else { SCROLL_MULTIPLIER }; + fn determine_scroll_lines( + &mut self, + e: &ScrollWheelEvent, + scroll_multiplier: f32, + ) -> Option { let line_height = self.last_content.terminal_bounds.line_height; match e.touch_phase { /* Reset scroll state on started */ diff --git a/crates/terminal/src/terminal_settings.rs b/crates/terminal/src/terminal_settings.rs index 01def426d887309f657efeef1172facec3e16b42..0c6f03832c939a1d0ad4431932d9ce4ea3d7f57f 100644 --- a/crates/terminal/src/terminal_settings.rs +++ b/crates/terminal/src/terminal_settings.rs @@ -7,6 +7,7 @@ use schemars::JsonSchema; use serde::{Deserialize, Serialize}; pub use settings::AlternateScroll; + use settings::{ RegisterSetting, ShowScrollbar, TerminalBlink, TerminalDockPosition, TerminalLineHeight, VenvSettings, WorkingDirectory, merge_from::MergeFrom, @@ -42,6 +43,7 @@ pub struct TerminalSettings { pub default_height: Pixels, pub detect_venv: VenvSettings, pub max_scroll_history_lines: Option, + pub scroll_multiplier: f32, pub toolbar: Toolbar, pub scrollbar: ScrollbarSettings, pub minimum_contrast: f32, @@ -105,6 +107,7 @@ impl settings::Settings for TerminalSettings { default_width: px(user_content.default_width.unwrap()), default_height: px(user_content.default_height.unwrap()), detect_venv: project_content.detect_venv.unwrap(), + scroll_multiplier: user_content.scroll_multiplier.unwrap(), max_scroll_history_lines: user_content.max_scroll_history_lines, toolbar: Toolbar { breadcrumbs: user_content.toolbar.unwrap().breadcrumbs.unwrap(), diff --git a/crates/terminal_view/src/terminal_view.rs b/crates/terminal_view/src/terminal_view.rs index 2bdce16125e3a70eaa94779cf0297c62e87f9cac..43714a5cfeee690644e9b772d89c12bcbd909964 100644 --- a/crates/terminal_view/src/terminal_view.rs +++ b/crates/terminal_view/src/terminal_view.rs @@ -519,7 +519,12 @@ impl TerminalView { return; } } - self.terminal.update(cx, |term, _| term.scroll_wheel(event)); + self.terminal.update(cx, |term, cx| { + term.scroll_wheel( + event, + TerminalSettings::get_global(cx).scroll_multiplier.max(0.01), + ) + }); } fn scroll_line_up(&mut self, _: &ScrollLineUp, _: &mut Window, cx: &mut Context) { diff --git a/docs/src/configuring-zed.md b/docs/src/configuring-zed.md index 6841c9a3cb0364d8eab63a9319df2e6a38d5612e..43c36767f0ebb526ec6f12649d0d03b027eab636 100644 --- a/docs/src/configuring-zed.md +++ b/docs/src/configuring-zed.md @@ -3586,6 +3586,7 @@ List of `integer` column numbers "option_as_meta": false, "button": true, "shell": "system", + "scroll_multiplier": 3.0, "toolbar": { "breadcrumbs": false }, @@ -3998,6 +3999,26 @@ Disable with: } ``` +### Terminal: Scroll Multiplier + +- Description: The multiplier for scrolling speed in the terminal when using mouse wheel or trackpad. +- Setting: `scroll_multiplier` +- Default: `1.0` + +**Options** + +Positive floating point values. Values less than or equal to 0 will be clamped to a minimum of 0.01. + +**Example** + +```json +{ + "terminal": { + "scroll_multiplier": 5.0 + } +} +``` + ## Terminal: Toolbar - Description: Whether or not to show various elements in the terminal toolbar. From ab62739605d0f0d5b0c2bbbb4670f708dd454278 Mon Sep 17 00:00:00 2001 From: Marshall Bowers Date: Wed, 12 Nov 2025 10:38:16 -0500 Subject: [PATCH 0053/1030] collab: Remove unused methods from `User` model (#42536) This PR removes some unused methods from the `User` model. Release Notes: - N/A --- crates/collab/src/db/tables/user.rs | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/crates/collab/src/db/tables/user.rs b/crates/collab/src/db/tables/user.rs index 8e8c03fafc92127f8754f473e04dfab39592ea14..3f753954ebb1cff78318de8d9be4786a4d5d0efb 100644 --- a/crates/collab/src/db/tables/user.rs +++ b/crates/collab/src/db/tables/user.rs @@ -39,25 +39,6 @@ pub enum Relation { Contributor, } -impl Model { - /// Returns the timestamp of when the user's account was created. - /// - /// This will be the earlier of the `created_at` and `github_user_created_at` timestamps. - pub fn account_created_at(&self) -> NaiveDateTime { - let mut account_created_at = self.created_at; - if let Some(github_created_at) = self.github_user_created_at { - account_created_at = account_created_at.min(github_created_at); - } - - account_created_at - } - - /// Returns the age of the user's account. - pub fn account_age(&self) -> chrono::Duration { - chrono::Utc::now().naive_utc() - self.account_created_at() - } -} - impl Related for Entity { fn to() -> RelationDef { Relation::AccessToken.def() From e79188261ba42c9261d91ffb9afd9859d7f932fb Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Wed, 12 Nov 2025 17:26:53 +0100 Subject: [PATCH 0054/1030] fs: Fix wrong watcher trace log on Linux (#42544) Follow-up to #40200 Release Notes: - N/A --- crates/fs/src/fs_watcher.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/fs/src/fs_watcher.rs b/crates/fs/src/fs_watcher.rs index 32be1112d0b235281d33dd14534ebb87d8a3bc55..18d5dbeeb9e82948aaa503e7268d39c5d1852a2b 100644 --- a/crates/fs/src/fs_watcher.rs +++ b/crates/fs/src/fs_watcher.rs @@ -72,8 +72,8 @@ impl Watcher for FsWatcher { } #[cfg(target_os = "linux")] { - log::trace!("path to watch is already watched: {path:?}"); if self.registrations.lock().contains_key(path) { + log::trace!("path to watch is already watched: {path:?}"); return Ok(()); } } From ab352f669e0336aba01631c9784b848b34a04102 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Wed, 12 Nov 2025 11:55:25 -0500 Subject: [PATCH 0055/1030] Gracefully handle `@mention`-ing large files with no outlines (#42543) Closes #32098 Release Notes: - In the Agent panel, when `@mention`-ing large files with no outline, their first 1KB is now added to context --- crates/agent/src/outline.rs | 78 +++++++++++++++++++++++ crates/agent_ui/src/acp/message_editor.rs | 36 +++++++---- crates/agent_ui/src/context.rs | 14 ++-- 3 files changed, 107 insertions(+), 21 deletions(-) diff --git a/crates/agent/src/outline.rs b/crates/agent/src/outline.rs index 262fa8d3d139a5c8f5900d0dd55348f9dc716167..0de035c34bf285d41ff20676f037abf2464213a1 100644 --- a/crates/agent/src/outline.rs +++ b/crates/agent/src/outline.rs @@ -44,6 +44,25 @@ pub async fn get_buffer_content_or_outline( .collect::>() })?; + // If no outline exists, fall back to first 1KB so the agent has some context + if outline_items.is_empty() { + let text = buffer.read_with(cx, |buffer, _| { + let snapshot = buffer.snapshot(); + let len = snapshot.len().min(1024); + let content = snapshot.text_for_range(0..len).collect::(); + if let Some(path) = path { + format!("# First 1KB of {path} (file too large to show full content, and no outline available)\n\n{content}") + } else { + format!("# First 1KB of file (file too large to show full content, and no outline available)\n\n{content}") + } + })?; + + return Ok(BufferContent { + text, + is_outline: false, + }); + } + let outline_text = render_outline(outline_items, None, 0, usize::MAX).await?; let text = if let Some(path) = path { @@ -140,3 +159,62 @@ fn render_entries( entries_rendered } + +#[cfg(test)] +mod tests { + use super::*; + use fs::FakeFs; + use gpui::TestAppContext; + use project::Project; + use settings::SettingsStore; + + #[gpui::test] + async fn test_large_file_fallback_to_subset(cx: &mut TestAppContext) { + cx.update(|cx| { + let settings = SettingsStore::test(cx); + cx.set_global(settings); + }); + + let fs = FakeFs::new(cx.executor()); + let project = Project::test(fs, [], cx).await; + + let content = "A".repeat(100 * 1024); // 100KB + let content_len = content.len(); + let buffer = project + .update(cx, |project, cx| project.create_buffer(true, cx)) + .await + .expect("failed to create buffer"); + + buffer.update(cx, |buffer, cx| buffer.set_text(content, cx)); + + let result = cx + .spawn(|cx| async move { get_buffer_content_or_outline(buffer, None, &cx).await }) + .await + .unwrap(); + + // Should contain some of the actual file content + assert!( + result.text.contains("AAAAAAAAAA"), + "Result did not contain content subset" + ); + + // Should be marked as not an outline (it's truncated content) + assert!( + !result.is_outline, + "Large file without outline should not be marked as outline" + ); + + // Should be reasonably sized (much smaller than original) + assert!( + result.text.len() < 50 * 1024, + "Result size {} should be smaller than 50KB", + result.text.len() + ); + + // Should be significantly smaller than the original content + assert!( + result.text.len() < content_len / 10, + "Result should be much smaller than original content" + ); + } +} diff --git a/crates/agent_ui/src/acp/message_editor.rs b/crates/agent_ui/src/acp/message_editor.rs index b7037a6413d93fb4ee538af7062049df9f58e818..9835dc929bd86085b481cbdb5e2ee667591c6e73 100644 --- a/crates/agent_ui/src/acp/message_editor.rs +++ b/crates/agent_ui/src/acp/message_editor.rs @@ -2671,13 +2671,14 @@ mod tests { } #[gpui::test] - async fn test_large_file_mention_uses_outline(cx: &mut TestAppContext) { + async fn test_large_file_mention_fallback(cx: &mut TestAppContext) { init_test(cx); let fs = FakeFs::new(cx.executor()); // Create a large file that exceeds AUTO_OUTLINE_SIZE - const LINE: &str = "fn example_function() { /* some code */ }\n"; + // Using plain text without a configured language, so no outline is available + const LINE: &str = "This is a line of text in the file\n"; let large_content = LINE.repeat(2 * (outline::AUTO_OUTLINE_SIZE / LINE.len())); assert!(large_content.len() > outline::AUTO_OUTLINE_SIZE); @@ -2688,8 +2689,8 @@ mod tests { fs.insert_tree( "/project", json!({ - "large_file.rs": large_content.clone(), - "small_file.rs": small_content, + "large_file.txt": large_content.clone(), + "small_file.txt": small_content, }), ) .await; @@ -2735,7 +2736,7 @@ mod tests { let large_file_abs_path = project.read_with(cx, |project, cx| { let worktree = project.worktrees(cx).next().unwrap(); let worktree_root = worktree.read(cx).abs_path(); - worktree_root.join("large_file.rs") + worktree_root.join("large_file.txt") }); let large_file_task = message_editor.update(cx, |editor, cx| { editor.confirm_mention_for_file(large_file_abs_path, cx) @@ -2744,11 +2745,20 @@ mod tests { let large_file_mention = large_file_task.await.unwrap(); match large_file_mention { Mention::Text { content, .. } => { - // Should contain outline header for large files - assert!(content.contains("File outline for")); - assert!(content.contains("file too large to show full content")); - // Should not contain the full repeated content - assert!(!content.contains(&LINE.repeat(100))); + // Should contain some of the content but not all of it + assert!( + content.contains(LINE), + "Should contain some of the file content" + ); + assert!( + !content.contains(&LINE.repeat(100)), + "Should not contain the full file" + ); + // Should be much smaller than original + assert!( + content.len() < large_content.len() / 10, + "Should be significantly truncated" + ); } _ => panic!("Expected Text mention for large file"), } @@ -2758,7 +2768,7 @@ mod tests { let small_file_abs_path = project.read_with(cx, |project, cx| { let worktree = project.worktrees(cx).next().unwrap(); let worktree_root = worktree.read(cx).abs_path(); - worktree_root.join("small_file.rs") + worktree_root.join("small_file.txt") }); let small_file_task = message_editor.update(cx, |editor, cx| { editor.confirm_mention_for_file(small_file_abs_path, cx) @@ -2767,10 +2777,8 @@ mod tests { let small_file_mention = small_file_task.await.unwrap(); match small_file_mention { Mention::Text { content, .. } => { - // Should contain the actual content + // Should contain the full actual content assert_eq!(content, small_content); - // Should not contain outline header - assert!(!content.contains("File outline for")); } _ => panic!("Expected Text mention for small file"), } diff --git a/crates/agent_ui/src/context.rs b/crates/agent_ui/src/context.rs index 0bbf4d45ee56bf8220987f52fd7a1f6aa0a73055..7f497f9cab9eae7ca9fa2a573100ab2993546228 100644 --- a/crates/agent_ui/src/context.rs +++ b/crates/agent_ui/src/context.rs @@ -1089,7 +1089,7 @@ mod tests { } #[gpui::test] - async fn test_large_file_uses_outline(cx: &mut TestAppContext) { + async fn test_large_file_uses_fallback(cx: &mut TestAppContext) { init_test_settings(cx); // Create a large file that exceeds AUTO_OUTLINE_SIZE @@ -1101,16 +1101,16 @@ mod tests { let file_context = load_context_for("file.txt", large_content, cx).await; + // Should contain some of the actual file content assert!( - file_context - .text - .contains(&format!("# File outline for {}", path!("test/file.txt"))), - "Large files should not get an outline" + file_context.text.contains(LINE), + "Should contain some of the file content" ); + // Should be much smaller than original assert!( - file_context.text.len() < content_len, - "Outline should be smaller than original content" + file_context.text.len() < content_len / 10, + "Should be significantly smaller than original content" ); } From c8930e07a3677d8abe01576b1650da99f15fe6a5 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Wed, 12 Nov 2025 10:29:31 -0700 Subject: [PATCH 0056/1030] Allow multiple parked threads in tests (#42551) Closes #ISSUE Release Notes: - N/A Co-Authored-By: Piotr --- crates/gpui/src/executor.rs | 6 ++--- crates/gpui/src/platform/test/dispatcher.rs | 26 ++++++++------------- 2 files changed, 12 insertions(+), 20 deletions(-) diff --git a/crates/gpui/src/executor.rs b/crates/gpui/src/executor.rs index feeac47e01b3ba4a680b4a159ba0f6d09f66375f..c0aa978c8eb0b217aa1cf7cd734664dc0736c355 100644 --- a/crates/gpui/src/executor.rs +++ b/crates/gpui/src/executor.rs @@ -327,10 +327,8 @@ impl BackgroundExecutor { "parked with nothing left to run{waiting_message}{backtrace_message}", ) } - dispatcher.set_unparker(unparker.clone()); - parker.park_timeout( - test_should_end_by.saturating_duration_since(Instant::now()), - ); + dispatcher.push_unparker(unparker.clone()); + parker.park_timeout(Duration::from_millis(1)); if Instant::now() > test_should_end_by { panic!("test timed out after {duration:?} with allow_parking") } diff --git a/crates/gpui/src/platform/test/dispatcher.rs b/crates/gpui/src/platform/test/dispatcher.rs index fc01d112d9d4198d0f06c370e5feb1193b29c677..538aacda83a095449193db6aab63f3a06189ef7a 100644 --- a/crates/gpui/src/platform/test/dispatcher.rs +++ b/crates/gpui/src/platform/test/dispatcher.rs @@ -38,7 +38,7 @@ struct TestDispatcherState { waiting_backtrace: Option, deprioritized_task_labels: HashSet, block_on_ticks: RangeInclusive, - last_parked: Option, + unparkers: Vec, } impl TestDispatcher { @@ -58,7 +58,7 @@ impl TestDispatcher { waiting_backtrace: None, deprioritized_task_labels: Default::default(), block_on_ticks: 0..=1000, - last_parked: None, + unparkers: Default::default(), }; TestDispatcher { @@ -245,20 +245,14 @@ impl TestDispatcher { let block_on_ticks = lock.block_on_ticks.clone(); lock.random.random_range(block_on_ticks) } - pub fn unpark_last(&self) { - self.state - .lock() - .last_parked - .take() - .as_ref() - .map(Unparker::unpark); + + pub fn unpark_all(&self) { + self.state.lock().unparkers.retain(|parker| parker.unpark()); } - pub fn set_unparker(&self, unparker: Unparker) { - let last = { self.state.lock().last_parked.replace(unparker) }; - if let Some(last) = last { - last.unpark(); - } + pub fn push_unparker(&self, unparker: Unparker) { + let mut state = self.state.lock(); + state.unparkers.push(unparker); } } @@ -299,7 +293,7 @@ impl PlatformDispatcher for TestDispatcher { state.background.push(runnable); } } - self.unpark_last(); + self.unpark_all(); } fn dispatch_on_main_thread(&self, runnable: RunnableVariant) { @@ -309,7 +303,7 @@ impl PlatformDispatcher for TestDispatcher { .entry(self.id) .or_default() .push_back(runnable); - self.unpark_last(); + self.unpark_all(); } fn dispatch_after(&self, duration: std::time::Duration, runnable: RunnableVariant) { From 6c0069ca983128bb879c186297fdc721920ef522 Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Wed, 12 Nov 2025 09:52:11 -0800 Subject: [PATCH 0057/1030] zeta2: Improve error reporting and eval purity (#42470) Closes #ISSUE Improves error reporting for various failure modes of zeta2, including failing to parse the ``/`` pattern, and the contents of `` failing to match. Additionally, makes it so that evals are checked out into a worktree with the _repo_ name instead of the _example_ name, in order to make sure that the eval name has no influence on the models prediction. The repo name worktrees are still namespaced by the example name like `{example_name}/{repo_name}` to ensure evals pointing to the same repo do not conflict. Release Notes: - N/A *or* Added/Fixed/Improved ... --------- Co-authored-by: Agus --- Cargo.lock | 1 + crates/zeta2/Cargo.toml | 3 +- crates/zeta2/src/retrieval_search.rs | 42 +++++++++++---------- crates/zeta2/src/xml_edits.rs | 56 ++++++++++++++++++++++++++-- crates/zeta_cli/src/example.rs | 16 +++++--- 5 files changed, 89 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 25bb2b4063ce437f5de1411a04d129a8de06aff0..d2c799dc41d03e8ed961f5d854ac74797efd01ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21719,6 +21719,7 @@ dependencies = [ "serde_json", "settings", "smol", + "strsim", "thiserror 2.0.17", "util", "uuid", diff --git a/crates/zeta2/Cargo.toml b/crates/zeta2/Cargo.toml index 1cb3a866065748f8e39dee7a980b99ea0b6c63fa..0360a74e65a109a0c95ea4787a0df1c61b375615 100644 --- a/crates/zeta2/Cargo.toml +++ b/crates/zeta2/Cargo.toml @@ -37,11 +37,13 @@ release_channel.workspace = true serde.workspace = true serde_json.workspace = true smol.workspace = true +strsim.workspace = true thiserror.workspace = true util.workspace = true uuid.workspace = true workspace.workspace = true worktree.workspace = true +pretty_assertions.workspace = true [dev-dependencies] clock = { workspace = true, features = ["test-support"] } @@ -51,7 +53,6 @@ lsp.workspace = true indoc.workspace = true language = { workspace = true, features = ["test-support"] } language_model = { workspace = true, features = ["test-support"] } -pretty_assertions.workspace = true project = { workspace = true, features = ["test-support"] } settings = { workspace = true, features = ["test-support"] } zlog.workspace = true diff --git a/crates/zeta2/src/retrieval_search.rs b/crates/zeta2/src/retrieval_search.rs index f735f44cad9623711e5ed9a1293a74e34e084888..fe28976bb27e27cd6355d3efa13e0a1bf26d5962 100644 --- a/crates/zeta2/src/retrieval_search.rs +++ b/crates/zeta2/src/retrieval_search.rs @@ -81,25 +81,7 @@ pub async fn run_retrieval_searches( for (buffer, ranges) in results.iter_mut() { if let Some(snapshot) = snapshots.get(&buffer.entity_id()) { - ranges.sort_unstable_by(|a, b| { - a.start - .cmp(&b.start, snapshot) - .then(b.end.cmp(&b.end, snapshot)) - }); - - let mut index = 1; - while index < ranges.len() { - if ranges[index - 1] - .end - .cmp(&ranges[index].start, snapshot) - .is_gt() - { - let removed = ranges.remove(index); - ranges[index - 1].end = removed.end; - } else { - index += 1; - } - } + merge_anchor_ranges(ranges, snapshot); } } @@ -108,6 +90,28 @@ pub async fn run_retrieval_searches( .await } +fn merge_anchor_ranges(ranges: &mut Vec>, snapshot: &BufferSnapshot) { + ranges.sort_unstable_by(|a, b| { + a.start + .cmp(&b.start, snapshot) + .then(b.end.cmp(&b.end, snapshot)) + }); + + let mut index = 1; + while index < ranges.len() { + if ranges[index - 1] + .end + .cmp(&ranges[index].start, snapshot) + .is_ge() + { + let removed = ranges.remove(index); + ranges[index - 1].end = removed.end; + } else { + index += 1; + } + } +} + const MAX_EXCERPT_LEN: usize = 768; const MAX_RESULTS_LEN: usize = MAX_EXCERPT_LEN * 5; diff --git a/crates/zeta2/src/xml_edits.rs b/crates/zeta2/src/xml_edits.rs index e8bcc4b1ba7eb2d00cd73b0b2e8d1638a5b00e32..6c9b5a97f6398cc00eaca08f9af6c4c9de991785 100644 --- a/crates/zeta2/src/xml_edits.rs +++ b/crates/zeta2/src/xml_edits.rs @@ -5,6 +5,15 @@ use std::path::Path; use std::sync::Arc; pub async fn parse_xml_edits<'a>( + input: &'a str, + get_buffer: impl Fn(&Path) -> Option<(&'a BufferSnapshot, &'a [Range])> + Send, +) -> Result<(&'a BufferSnapshot, Vec<(Range, Arc)>)> { + parse_xml_edits_inner(input, get_buffer) + .await + .with_context(|| format!("Failed to parse XML edits:\n{input}")) +} + +async fn parse_xml_edits_inner<'a>( mut input: &'a str, get_buffer: impl Fn(&Path) -> Option<(&'a BufferSnapshot, &'a [Range])> + Send, ) -> Result<(&'a BufferSnapshot, Vec<(Range, Arc)>)> { @@ -56,13 +65,29 @@ fn resolve_new_text_old_text_in_buffer( let range = range.to_offset(buffer); let text = buffer.text_for_range(range.clone()).collect::(); for (match_offset, _) in text.match_indices(old_text) { - if offset.is_some() { - anyhow::bail!("old_text is not unique enough:\n{}", old_text); + if let Some(offset) = offset { + let offset_match_point = buffer.offset_to_point(offset); + let second_match_point = buffer.offset_to_point(range.start + match_offset); + anyhow::bail!( + "old_text is not unique enough:\n{}\nFound at {:?} and {:?}", + old_text, + offset_match_point, + second_match_point + ); } offset = Some(range.start + match_offset); } } - offset.ok_or_else(|| anyhow!("Failed to match old_text:\n{}", old_text)) + offset.ok_or_else(|| { + #[cfg(debug_assertions)] + if let Some(closest_match) = closest_old_text_match(buffer, old_text) { + log::info!( + "Closest `old_text` match: {}", + pretty_assertions::StrComparison::new(old_text, &closest_match) + ) + } + anyhow!("Failed to match old_text:\n{}", old_text) + }) }?; let edits_within_hunk = language::text_diff(&old_text, &new_text); @@ -77,6 +102,31 @@ fn resolve_new_text_old_text_in_buffer( })) } +#[cfg(debug_assertions)] +fn closest_old_text_match(buffer: &TextBufferSnapshot, old_text: &str) -> Option { + let buffer_text = buffer.text(); + let mut cursor = 0; + let len = old_text.len(); + + let mut min_score = usize::MAX; + let mut min_start = 0; + + while cursor + len <= buffer_text.len() { + let candidate = &buffer_text[cursor..cursor + len]; + let score = strsim::levenshtein(candidate, old_text); + if score < min_score { + min_score = score; + min_start = cursor; + } + cursor += 1; + } + if min_score != usize::MAX { + Some(buffer_text[min_start..min_start + len].to_string()) + } else { + None + } +} + struct ParsedTag<'a> { attributes: &'a str, body: &'a str, diff --git a/crates/zeta_cli/src/example.rs b/crates/zeta_cli/src/example.rs index a470effa575f5e8ece3c59781dc09d9d1c5e822e..20176fbb5d73de83b90b8edb2831104ecddc8ef0 100644 --- a/crates/zeta_cli/src/example.rs +++ b/crates/zeta_cli/src/example.rs @@ -315,9 +315,6 @@ impl NamedExample { let (repo_owner, repo_name) = self.repo_name()?; let file_name = self.file_name(); - fs::create_dir_all(&*REPOS_DIR)?; - fs::create_dir_all(&*WORKTREES_DIR)?; - let repo_dir = REPOS_DIR.join(repo_owner.as_ref()).join(repo_name.as_ref()); let repo_lock = lock_repo(&repo_dir).await; @@ -332,7 +329,14 @@ impl NamedExample { } // Resolve the example to a revision, fetching it if needed. - let revision = run_git(&repo_dir, &["rev-parse", &self.example.revision]).await; + let revision = run_git( + &repo_dir, + &[ + "rev-parse", + &format!("{}^{{commit}}", self.example.revision), + ], + ) + .await; let revision = if let Ok(revision) = revision { revision } else { @@ -349,7 +353,7 @@ impl NamedExample { }; // Create the worktree for this example if needed. - let worktree_path = WORKTREES_DIR.join(&file_name); + let worktree_path = WORKTREES_DIR.join(&file_name).join(repo_name.as_ref()); if worktree_path.is_dir() { run_git(&worktree_path, &["clean", "--force", "-d"]).await?; run_git(&worktree_path, &["reset", "--hard", "HEAD"]).await?; @@ -477,7 +481,7 @@ impl NamedExample { let mut matches = text.match_indices(&cursor_excerpt); let Some((excerpt_offset, _)) = matches.next() else { anyhow::bail!( - "Cursor excerpt did not exist in buffer.\nExcerpt:\n\n{cursor_excerpt}\nBuffer text:\n{text}\n" + "\nExcerpt:\n\n{cursor_excerpt}\nBuffer text:\n{text}\n.Cursor excerpt did not exist in buffer." ); }; assert!(matches.next().is_none()); From 6501b0c311a1f8420c1ec1ed7b9f3186bfab6435 Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Wed, 12 Nov 2025 10:16:13 -0800 Subject: [PATCH 0058/1030] zeta eval: Improve determinism and debugging ergonomics (#42478) - Improves the determinism of the search step for better cache reusability - Adds a `--cache force` mode that refuses to make any requests or searches that aren't cached - The structure of the `zeta-*` directories under `target` has been rethought for convenience Release Notes: - N/A --------- Co-authored-by: Agus --- .../src/retrieval_prompt.rs | 2 +- crates/zeta2/Cargo.toml | 2 +- crates/zeta2/src/retrieval_search.rs | 95 ++++++++++- crates/zeta2/src/xml_edits.rs | 39 ++++- crates/zeta2/src/zeta2.rs | 138 ++++++++++----- crates/zeta_cli/Cargo.toml | 2 +- crates/zeta_cli/src/evaluate.rs | 39 +++-- crates/zeta_cli/src/example.rs | 2 +- crates/zeta_cli/src/main.rs | 2 + crates/zeta_cli/src/paths.rs | 52 ++++-- crates/zeta_cli/src/predict.rs | 159 ++++++++++++------ 11 files changed, 395 insertions(+), 137 deletions(-) diff --git a/crates/cloud_zeta2_prompt/src/retrieval_prompt.rs b/crates/cloud_zeta2_prompt/src/retrieval_prompt.rs index 7fbc3834dfd0f4bbfc4085d696b7fbf755e6dd3d..a11c56da41384257b8331a31161224c9e25d0894 100644 --- a/crates/cloud_zeta2_prompt/src/retrieval_prompt.rs +++ b/crates/cloud_zeta2_prompt/src/retrieval_prompt.rs @@ -44,7 +44,7 @@ pub struct SearchToolInput { } /// Search for relevant code by path, syntax hierarchy, and content. -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Hash)] pub struct SearchToolQuery { /// 1. A glob pattern to match file paths in the codebase to search in. pub glob: String, diff --git a/crates/zeta2/Cargo.toml b/crates/zeta2/Cargo.toml index 0360a74e65a109a0c95ea4787a0df1c61b375615..1eef507e6def3d80560ff1515623d0c42687d74a 100644 --- a/crates/zeta2/Cargo.toml +++ b/crates/zeta2/Cargo.toml @@ -12,7 +12,7 @@ workspace = true path = "src/zeta2.rs" [features] -llm-response-cache = [] +eval-support = [] [dependencies] anyhow.workspace = true diff --git a/crates/zeta2/src/retrieval_search.rs b/crates/zeta2/src/retrieval_search.rs index fe28976bb27e27cd6355d3efa13e0a1bf26d5962..76501fb1e5c73a22ff8eebc5c29d117d45389beb 100644 --- a/crates/zeta2/src/retrieval_search.rs +++ b/crates/zeta2/src/retrieval_search.rs @@ -1,5 +1,3 @@ -use std::ops::Range; - use anyhow::Result; use cloud_zeta2_prompt::retrieval_prompt::SearchToolQuery; use collections::HashMap; @@ -14,17 +12,76 @@ use project::{ search::{SearchQuery, SearchResult}, }; use smol::channel; +use std::ops::Range; use util::{ ResultExt as _, paths::{PathMatcher, PathStyle}, }; use workspace::item::Settings as _; +#[cfg(feature = "eval-support")] +type CachedSearchResults = std::collections::BTreeMap>>; + pub async fn run_retrieval_searches( - project: Entity, queries: Vec, + project: Entity, + #[cfg(feature = "eval-support")] eval_cache: Option>, cx: &mut AsyncApp, ) -> Result, Vec>>> { + #[cfg(feature = "eval-support")] + let cache = if let Some(eval_cache) = eval_cache { + use crate::EvalCacheEntryKind; + use anyhow::Context; + use collections::FxHasher; + use std::hash::{Hash, Hasher}; + + let mut hasher = FxHasher::default(); + project.read_with(cx, |project, cx| { + let mut worktrees = project.worktrees(cx); + let Some(worktree) = worktrees.next() else { + panic!("Expected a single worktree in eval project. Found none."); + }; + assert!( + worktrees.next().is_none(), + "Expected a single worktree in eval project. Found more than one." + ); + worktree.read(cx).abs_path().hash(&mut hasher); + })?; + + queries.hash(&mut hasher); + let key = (EvalCacheEntryKind::Search, hasher.finish()); + + if let Some(cached_results) = eval_cache.read(key) { + let file_results = serde_json::from_str::(&cached_results) + .context("Failed to deserialize cached search results")?; + let mut results = HashMap::default(); + + for (path, ranges) in file_results { + let buffer = project + .update(cx, |project, cx| { + let project_path = project.find_project_path(path, cx).unwrap(); + project.open_buffer(project_path, cx) + })? + .await?; + let snapshot = buffer.read_with(cx, |buffer, _| buffer.snapshot())?; + let mut ranges = ranges + .into_iter() + .map(|range| { + snapshot.anchor_before(range.start)..snapshot.anchor_after(range.end) + }) + .collect(); + merge_anchor_ranges(&mut ranges, &snapshot); + results.insert(buffer, ranges); + } + + return Ok(results); + } + + Some((eval_cache, serde_json::to_string_pretty(&queries)?, key)) + } else { + None + }; + let (exclude_matcher, path_style) = project.update(cx, |project, cx| { let global_settings = WorktreeSettings::get_global(cx); let exclude_patterns = global_settings @@ -58,6 +115,8 @@ pub async fn run_retrieval_searches( } drop(results_tx); + #[cfg(feature = "eval-support")] + let cache = cache.clone(); cx.background_spawn(async move { let mut results: HashMap, Vec>> = HashMap::default(); let mut snapshots = HashMap::default(); @@ -79,6 +138,29 @@ pub async fn run_retrieval_searches( } } + #[cfg(feature = "eval-support")] + if let Some((cache, queries, key)) = cache { + let cached_results: CachedSearchResults = results + .iter() + .filter_map(|(buffer, ranges)| { + let snapshot = snapshots.get(&buffer.entity_id())?; + let path = snapshot.file().map(|f| f.path()); + let mut ranges = ranges + .iter() + .map(|range| range.to_offset(&snapshot)) + .collect::>(); + ranges.sort_unstable_by_key(|range| (range.start, range.end)); + + Some((path?.as_std_path().to_path_buf(), ranges)) + }) + .collect(); + cache.write( + key, + &queries, + &serde_json::to_string_pretty(&cached_results)?, + ); + } + for (buffer, ranges) in results.iter_mut() { if let Some(snapshot) = snapshots.get(&buffer.entity_id()) { merge_anchor_ranges(ranges, snapshot); @@ -489,9 +571,10 @@ mod tests { expected_output: &str, cx: &mut TestAppContext, ) { - let results = run_retrieval_searches(project.clone(), vec![query], &mut cx.to_async()) - .await - .unwrap(); + let results = + run_retrieval_searches(vec![query], project.clone(), None, &mut cx.to_async()) + .await + .unwrap(); let mut results = results.into_iter().collect::>(); results.sort_by_key(|results| { diff --git a/crates/zeta2/src/xml_edits.rs b/crates/zeta2/src/xml_edits.rs index 6c9b5a97f6398cc00eaca08f9af6c4c9de991785..97087ec65e06a1a2f418ca0c4ebba41a19b1af84 100644 --- a/crates/zeta2/src/xml_edits.rs +++ b/crates/zeta2/src/xml_edits.rs @@ -105,21 +105,58 @@ fn resolve_new_text_old_text_in_buffer( #[cfg(debug_assertions)] fn closest_old_text_match(buffer: &TextBufferSnapshot, old_text: &str) -> Option { let buffer_text = buffer.text(); - let mut cursor = 0; let len = old_text.len(); + if len == 0 || buffer_text.len() < len { + return None; + } + let mut min_score = usize::MAX; let mut min_start = 0; + let old_text_bytes = old_text.as_bytes(); + let old_alpha_count = old_text_bytes + .iter() + .filter(|&&b| b.is_ascii_alphanumeric()) + .count(); + + let old_line_count = old_text.lines().count(); + + let mut cursor = 0; + while cursor + len <= buffer_text.len() { let candidate = &buffer_text[cursor..cursor + len]; + let candidate_bytes = candidate.as_bytes(); + + if usize::abs_diff(candidate.lines().count(), old_line_count) > 4 { + cursor += 1; + continue; + } + + let candidate_alpha_count = candidate_bytes + .iter() + .filter(|&&b| b.is_ascii_alphanumeric()) + .count(); + + // If alphanumeric character count differs by more than 30%, skip + if usize::abs_diff(old_alpha_count, candidate_alpha_count) * 10 > old_alpha_count * 3 { + cursor += 1; + continue; + } + let score = strsim::levenshtein(candidate, old_text); if score < min_score { min_score = score; min_start = cursor; + + if min_score <= len / 10 { + break; + } } + cursor += 1; } + if min_score != usize::MAX { Some(buffer_text[min_start..min_start + len].to_string()) } else { diff --git a/crates/zeta2/src/zeta2.rs b/crates/zeta2/src/zeta2.rs index 6139c9c75e16f8805e6529dc1700eef1beacd713..d7bff2b51a69a031d2f24b0b357b9748dd5a473b 100644 --- a/crates/zeta2/src/zeta2.rs +++ b/crates/zeta2/src/zeta2.rs @@ -132,15 +132,8 @@ pub struct Zeta { options: ZetaOptions, update_required: bool, debug_tx: Option>, - #[cfg(feature = "llm-response-cache")] - llm_response_cache: Option>, -} - -#[cfg(feature = "llm-response-cache")] -pub trait LlmResponseCache: Send + Sync { - fn get_key(&self, url: &gpui::http_client::Url, body: &str) -> u64; - fn read_response(&self, key: u64) -> Option; - fn write_response(&self, key: u64, value: &str); + #[cfg(feature = "eval-support")] + eval_cache: Option>, } #[derive(Debug, Clone, PartialEq)] @@ -369,14 +362,14 @@ impl Zeta { ), update_required: false, debug_tx: None, - #[cfg(feature = "llm-response-cache")] - llm_response_cache: None, + #[cfg(feature = "eval-support")] + eval_cache: None, } } - #[cfg(feature = "llm-response-cache")] - pub fn with_llm_response_cache(&mut self, cache: Arc) { - self.llm_response_cache = Some(cache); + #[cfg(feature = "eval-support")] + pub fn with_eval_cache(&mut self, cache: Arc) { + self.eval_cache = Some(cache); } pub fn debug_info(&mut self) -> mpsc::UnboundedReceiver { @@ -736,9 +729,19 @@ impl Zeta { // TODO data collection let can_collect_data = cx.is_staff(); - let mut included_files = project_state + let empty_context_files = HashMap::default(); + let context_files = project_state .and_then(|project_state| project_state.context.as_ref()) - .unwrap_or(&HashMap::default()) + .unwrap_or(&empty_context_files); + + #[cfg(feature = "eval-support")] + let parsed_fut = futures::future::join_all( + context_files + .keys() + .map(|buffer| buffer.read(cx).parsing_idle()), + ); + + let mut included_files = context_files .iter() .filter_map(|(buffer_entity, ranges)| { let buffer = buffer_entity.read(cx); @@ -751,12 +754,19 @@ impl Zeta { }) .collect::>(); - #[cfg(feature = "llm-response-cache")] - let llm_response_cache = self.llm_response_cache.clone(); + included_files.sort_by(|(_, _, path_a, ranges_a), (_, _, path_b, ranges_b)| { + (path_a, ranges_a.len()).cmp(&(path_b, ranges_b.len())) + }); + + #[cfg(feature = "eval-support")] + let eval_cache = self.eval_cache.clone(); let request_task = cx.background_spawn({ let active_buffer = active_buffer.clone(); async move { + #[cfg(feature = "eval-support")] + parsed_fut.await; + let index_state = if let Some(index_state) = index_state { Some(index_state.lock_owned().await) } else { @@ -819,17 +829,17 @@ impl Zeta { let included_files = included_files .iter() - .map(|(_, buffer, path, ranges)| { + .map(|(_, snapshot, path, ranges)| { let excerpts = merge_excerpts( - &buffer, + &snapshot, ranges.iter().map(|range| { - let point_range = range.to_point(&buffer); + let point_range = range.to_point(&snapshot); Line(point_range.start.row)..Line(point_range.end.row) }), ); predict_edits_v3::IncludedFile { path: path.clone(), - max_row: Line(buffer.max_point().row), + max_row: Line(snapshot.max_point().row), excerpts, } }) @@ -948,8 +958,10 @@ impl Zeta { client, llm_token, app_version, - #[cfg(feature = "llm-response-cache")] - llm_response_cache, + #[cfg(feature = "eval-support")] + eval_cache, + #[cfg(feature = "eval-support")] + EvalCacheEntryKind::Prediction, ) .await; let request_time = chrono::Utc::now() - before_request; @@ -1049,9 +1061,8 @@ impl Zeta { client: Arc, llm_token: LlmApiToken, app_version: SemanticVersion, - #[cfg(feature = "llm-response-cache")] llm_response_cache: Option< - Arc, - >, + #[cfg(feature = "eval-support")] eval_cache: Option>, + #[cfg(feature = "eval-support")] eval_cache_kind: EvalCacheEntryKind, ) -> Result<(open_ai::Response, Option)> { let url = if let Some(predict_edits_url) = PREDICT_EDITS_URL.as_ref() { http_client::Url::parse(&predict_edits_url)? @@ -1061,16 +1072,23 @@ impl Zeta { .build_zed_llm_url("/predict_edits/raw", &[])? }; - #[cfg(feature = "llm-response-cache")] - let cache_key = if let Some(cache) = llm_response_cache { - let request_json = serde_json::to_string(&request)?; - let key = cache.get_key(&url, &request_json); + #[cfg(feature = "eval-support")] + let cache_key = if let Some(cache) = eval_cache { + use collections::FxHasher; + use std::hash::{Hash, Hasher}; + + let mut hasher = FxHasher::default(); + url.hash(&mut hasher); + let request_str = serde_json::to_string_pretty(&request)?; + request_str.hash(&mut hasher); + let hash = hasher.finish(); - if let Some(response_str) = cache.read_response(key) { + let key = (eval_cache_kind, hash); + if let Some(response_str) = cache.read(key) { return Ok((serde_json::from_str(&response_str)?, None)); } - Some((cache, key)) + Some((cache, request_str, key)) } else { None }; @@ -1088,9 +1106,9 @@ impl Zeta { ) .await?; - #[cfg(feature = "llm-response-cache")] - if let Some((cache, key)) = cache_key { - cache.write_response(key, &serde_json::to_string(&response)?); + #[cfg(feature = "eval-support")] + if let Some((cache, request, key)) = cache_key { + cache.write(key, &request, &serde_json::to_string_pretty(&response)?); } Ok((response, usage)) @@ -1361,8 +1379,8 @@ impl Zeta { reasoning_effort: None, }; - #[cfg(feature = "llm-response-cache")] - let llm_response_cache = self.llm_response_cache.clone(); + #[cfg(feature = "eval-support")] + let eval_cache = self.eval_cache.clone(); cx.spawn(async move |this, cx| { log::trace!("Sending search planning request"); @@ -1371,8 +1389,10 @@ impl Zeta { client, llm_token, app_version, - #[cfg(feature = "llm-response-cache")] - llm_response_cache, + #[cfg(feature = "eval-support")] + eval_cache.clone(), + #[cfg(feature = "eval-support")] + EvalCacheEntryKind::Context, ) .await; let mut response = Self::handle_api_response(&this, response, cx)?; @@ -1421,8 +1441,14 @@ impl Zeta { log::trace!("Running retrieval search: {queries:#?}"); - let related_excerpts_result = - retrieval_search::run_retrieval_searches(project.clone(), queries, cx).await; + let related_excerpts_result = retrieval_search::run_retrieval_searches( + queries, + project.clone(), + #[cfg(feature = "eval-support")] + eval_cache, + cx, + ) + .await; log::trace!("Search queries executed"); @@ -1772,6 +1798,34 @@ fn add_signature( Some(signature_index) } +#[cfg(feature = "eval-support")] +pub type EvalCacheKey = (EvalCacheEntryKind, u64); + +#[cfg(feature = "eval-support")] +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum EvalCacheEntryKind { + Context, + Search, + Prediction, +} + +#[cfg(feature = "eval-support")] +impl std::fmt::Display for EvalCacheEntryKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + EvalCacheEntryKind::Search => write!(f, "search"), + EvalCacheEntryKind::Context => write!(f, "context"), + EvalCacheEntryKind::Prediction => write!(f, "prediction"), + } + } +} + +#[cfg(feature = "eval-support")] +pub trait EvalCache: Send + Sync { + fn read(&self, key: EvalCacheKey) -> Option; + fn write(&self, key: EvalCacheKey, input: &str, value: &str); +} + #[cfg(test)] mod tests { use std::{path::Path, sync::Arc}; diff --git a/crates/zeta_cli/Cargo.toml b/crates/zeta_cli/Cargo.toml index 2e62f2a4462e31b7632aa5e825ea76a4b7df5fc8..e18cf54787ca98e2be60db4977dd2de18e9c09e2 100644 --- a/crates/zeta_cli/Cargo.toml +++ b/crates/zeta_cli/Cargo.toml @@ -54,7 +54,7 @@ toml.workspace = true util.workspace = true watch.workspace = true zeta.workspace = true -zeta2 = { workspace = true, features = ["llm-response-cache"] } +zeta2 = { workspace = true, features = ["eval-support"] } zlog.workspace = true [dev-dependencies] diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index b5c23af24845a90d153943f6ee2ccd29bbfaf6a7..0359ccf0fea3179dd480645ad7031b61fc3a357c 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -14,18 +14,19 @@ use crate::{ PromptFormat, example::{Example, NamedExample}, headless::ZetaCliAppState, - predict::{PredictionDetails, zeta2_predict}, + paths::print_run_data_dir, + predict::{CacheMode, PredictionDetails, zeta2_predict}, }; #[derive(Debug, Args)] pub struct EvaluateArguments { example_paths: Vec, - #[clap(long)] - skip_cache: bool, #[arg(long, value_enum, default_value_t = PromptFormat::default())] prompt_format: PromptFormat, #[arg(long)] use_expected_context: bool, + #[clap(long, value_enum, default_value_t = CacheMode::default())] + cache: CacheMode, } pub async fn run_evaluate( @@ -39,43 +40,49 @@ pub async fn run_evaluate( cx.spawn(async move |cx| { run_evaluate_one( &path, - args.skip_cache, args.prompt_format, args.use_expected_context, + args.cache, app_state.clone(), cx, ) .await }) }); - let all_results = futures::future::try_join_all(all_tasks).await.unwrap(); + let all_results = futures::future::try_join_all(all_tasks).await; + + if let Ok(all_results) = &all_results { + let aggregated_result = EvaluationResult { + context: Scores::aggregate(all_results.iter().map(|r| &r.context)), + edit_prediction: Scores::aggregate(all_results.iter().map(|r| &r.edit_prediction)), + }; + + if example_len > 1 { + println!("\n{}", "-".repeat(80)); + println!("\n## TOTAL SCORES"); + println!("{}", aggregated_result.to_markdown()); + } + } - let aggregated_result = EvaluationResult { - context: Scores::aggregate(all_results.iter().map(|r| &r.context)), - edit_prediction: Scores::aggregate(all_results.iter().map(|r| &r.edit_prediction)), - }; + print_run_data_dir(); - if example_len > 1 { - println!("\n{}", "-".repeat(80)); - println!("# TOTAL SCORES:"); - println!("{}", aggregated_result.to_markdown()); - } + all_results.unwrap(); } pub async fn run_evaluate_one( example_path: &Path, - skip_cache: bool, prompt_format: PromptFormat, use_expected_context: bool, + cache_mode: CacheMode, app_state: Arc, cx: &mut AsyncApp, ) -> Result { let example = NamedExample::load(&example_path).unwrap(); let predictions = zeta2_predict( example.clone(), - skip_cache, prompt_format, use_expected_context, + cache_mode, &app_state, cx, ) diff --git a/crates/zeta_cli/src/example.rs b/crates/zeta_cli/src/example.rs index 20176fbb5d73de83b90b8edb2831104ecddc8ef0..3e55fb0b62e0191fa5abf1014a71bc7f613fc0c9 100644 --- a/crates/zeta_cli/src/example.rs +++ b/crates/zeta_cli/src/example.rs @@ -398,7 +398,7 @@ impl NamedExample { Ok(worktree_path) } - fn file_name(&self) -> String { + pub fn file_name(&self) -> String { self.name .chars() .map(|c| { diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index 82760d6061d9b96a2da74bf5cb24e43d9ecdba60..1dd246e612979e7a4a77c74926be1a5cab72dbc6 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -54,6 +54,7 @@ enum Command { #[arg(long, value_enum, default_value_t = ExampleFormat::Md)] output_format: ExampleFormat, }, + Clean, } #[derive(Subcommand, Debug)] @@ -470,6 +471,7 @@ fn main() { let example = NamedExample::load(path).unwrap(); example.write(output_format, io::stdout()).unwrap(); } + Command::Clean => std::fs::remove_dir_all(&*crate::paths::TARGET_ZETA_DIR).unwrap(), }; let _ = cx.update(|cx| cx.quit()); diff --git a/crates/zeta_cli/src/paths.rs b/crates/zeta_cli/src/paths.rs index fc7f8b3afc3dbcd724649749a58b76dbab275750..73d541c6a0409deab5baac1714feded986fb94c1 100644 --- a/crates/zeta_cli/src/paths.rs +++ b/crates/zeta_cli/src/paths.rs @@ -1,16 +1,40 @@ use std::{env, path::PathBuf, sync::LazyLock}; -static TARGET_DIR: LazyLock = LazyLock::new(|| env::current_dir().unwrap().join("target")); -pub static CACHE_DIR: LazyLock = - LazyLock::new(|| TARGET_DIR.join("zeta-llm-response-cache")); -pub static REPOS_DIR: LazyLock = LazyLock::new(|| TARGET_DIR.join("zeta-repos")); -pub static WORKTREES_DIR: LazyLock = LazyLock::new(|| TARGET_DIR.join("zeta-worktrees")); -pub static LOGS_DIR: LazyLock = LazyLock::new(|| TARGET_DIR.join("zeta-logs")); -pub static LOGS_SEARCH_PROMPT: LazyLock = - LazyLock::new(|| LOGS_DIR.join("search_prompt.md")); -pub static LOGS_SEARCH_QUERIES: LazyLock = - LazyLock::new(|| LOGS_DIR.join("search_queries.json")); -pub static LOGS_PREDICTION_PROMPT: LazyLock = - LazyLock::new(|| LOGS_DIR.join("prediction_prompt.md")); -pub static LOGS_PREDICTION_RESPONSE: LazyLock = - LazyLock::new(|| LOGS_DIR.join("prediction_response.md")); +pub static TARGET_ZETA_DIR: LazyLock = + LazyLock::new(|| env::current_dir().unwrap().join("target/zeta")); +pub static CACHE_DIR: LazyLock = LazyLock::new(|| TARGET_ZETA_DIR.join("cache")); +pub static REPOS_DIR: LazyLock = LazyLock::new(|| TARGET_ZETA_DIR.join("repos")); +pub static WORKTREES_DIR: LazyLock = LazyLock::new(|| TARGET_ZETA_DIR.join("worktrees")); +pub static RUN_DIR: LazyLock = LazyLock::new(|| { + TARGET_ZETA_DIR + .join("runs") + .join(chrono::Local::now().format("%d-%m-%y-%H_%M_%S").to_string()) +}); +pub static LATEST_EXAMPLE_RUN_DIR: LazyLock = + LazyLock::new(|| TARGET_ZETA_DIR.join("latest")); + +pub fn print_run_data_dir() { + println!("\n## Run Data\n"); + + let current_dir = std::env::current_dir().unwrap(); + for file in std::fs::read_dir(&*RUN_DIR).unwrap() { + let file = file.unwrap(); + if file.file_type().unwrap().is_dir() { + for file in std::fs::read_dir(file.path()).unwrap() { + let path = file.unwrap().path(); + let path = path.strip_prefix(¤t_dir).unwrap_or(&path); + println!( + "- {}/\x1b[34m{}\x1b[0m", + path.parent().unwrap().display(), + path.file_name().unwrap().display(), + ); + } + } else { + let path = file.path(); + println!( + "- {} ", + path.strip_prefix(¤t_dir).unwrap_or(&path).display() + ); + } + } +} diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index 32f2f564fc53df987579bf2946eb5765519157c6..82108df076c025089f5e374f447a3136fdb0c563 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -1,20 +1,15 @@ use crate::PromptFormat; use crate::example::{ActualExcerpt, ExpectedExcerpt, NamedExample}; use crate::headless::ZetaCliAppState; -use crate::paths::{ - CACHE_DIR, LOGS_DIR, LOGS_PREDICTION_PROMPT, LOGS_PREDICTION_RESPONSE, LOGS_SEARCH_PROMPT, - LOGS_SEARCH_QUERIES, -}; +use crate::paths::{CACHE_DIR, LATEST_EXAMPLE_RUN_DIR, RUN_DIR, print_run_data_dir}; use ::serde::Serialize; -use anyhow::{Result, anyhow}; -use clap::Args; -use collections::HashMap; -use gpui::http_client::Url; -use language::{Anchor, Buffer, Point}; -// use cloud_llm_client::predict_edits_v3::PromptFormat; +use anyhow::{Context, Result, anyhow}; +use clap::{Args, ValueEnum}; use cloud_zeta2_prompt::{CURSOR_MARKER, write_codeblock}; +use collections::HashMap; use futures::StreamExt as _; use gpui::{AppContext, AsyncApp, Entity}; +use language::{Anchor, Buffer, Point}; use project::Project; use serde::Deserialize; use std::cell::Cell; @@ -25,7 +20,7 @@ use std::path::PathBuf; use std::sync::Arc; use std::sync::Mutex; use std::time::{Duration, Instant}; -use zeta2::LlmResponseCache; +use zeta2::{EvalCache, EvalCacheEntryKind, EvalCacheKey}; #[derive(Debug, Args)] pub struct PredictArguments { @@ -36,8 +31,31 @@ pub struct PredictArguments { #[clap(long, short, value_enum, default_value_t = PredictionsOutputFormat::Md)] format: PredictionsOutputFormat, example_path: PathBuf, - #[clap(long)] - skip_cache: bool, + #[clap(long, value_enum, default_value_t = CacheMode::default())] + cache: CacheMode, +} + +#[derive(Debug, ValueEnum, Default, Clone, Copy)] +pub enum CacheMode { + /// Use cached LLM requests and responses, based on the hash of the prompt and the endpoint. + #[default] + #[value(alias = "request")] + Requests, + /// Ignore existing cache entries for both LLM and search. + Skip, + /// Use cached LLM responses AND search results for full determinism. Fails if they haven't been cached yet. + /// Useful for reproducing results and fixing bugs outside of search queries + Force, +} + +impl CacheMode { + fn use_cached_llm_responses(&self) -> bool { + matches!(self, CacheMode::Requests | CacheMode::Force) + } + + fn use_cached_search_results(&self) -> bool { + matches!(self, CacheMode::Force) + } } #[derive(clap::ValueEnum, Debug, Clone)] @@ -55,9 +73,9 @@ pub async fn run_zeta2_predict( let example = NamedExample::load(args.example_path).unwrap(); let result = zeta2_predict( example, - args.skip_cache, args.prompt_format, args.use_expected_context, + args.cache, &app_state, cx, ) @@ -65,14 +83,7 @@ pub async fn run_zeta2_predict( .unwrap(); result.write(args.format, std::io::stdout()).unwrap(); - println!("## Logs\n"); - println!("Search prompt: {}", LOGS_SEARCH_PROMPT.display()); - println!("Search queries: {}", LOGS_SEARCH_QUERIES.display()); - println!("Prediction prompt: {}", LOGS_PREDICTION_PROMPT.display()); - println!( - "Prediction response: {}", - LOGS_PREDICTION_RESPONSE.display() - ); + print_run_data_dir(); } thread_local! { @@ -81,13 +92,12 @@ thread_local! { pub async fn zeta2_predict( example: NamedExample, - skip_cache: bool, prompt_format: PromptFormat, use_expected_context: bool, + cache_mode: CacheMode, app_state: &Arc, cx: &mut AsyncApp, ) -> Result { - fs::create_dir_all(&*LOGS_DIR)?; let worktree_path = example.setup_worktree().await?; if !AUTHENTICATED.get() { @@ -126,8 +136,25 @@ pub async fn zeta2_predict( let zeta = cx.update(|cx| zeta2::Zeta::global(&app_state.client, &app_state.user_store, cx))?; + let example_run_dir = RUN_DIR.join(&example.file_name()); + fs::create_dir_all(&example_run_dir)?; + if LATEST_EXAMPLE_RUN_DIR.exists() { + fs::remove_file(&*LATEST_EXAMPLE_RUN_DIR)?; + } + + #[cfg(unix)] + std::os::unix::fs::symlink(&example_run_dir, &*LATEST_EXAMPLE_RUN_DIR) + .context("creating latest link")?; + + #[cfg(windows)] + std::os::windows::fs::symlink_dir(&example_run_dir, &*LATEST_EXAMPLE_RUN_DIR) + .context("creating latest link")?; + zeta.update(cx, |zeta, _cx| { - zeta.with_llm_response_cache(Arc::new(Cache { skip_cache })); + zeta.with_eval_cache(Arc::new(RunCache { + example_run_dir: example_run_dir.clone(), + cache_mode, + })); })?; cx.subscribe(&buffer_store, { @@ -159,12 +186,15 @@ pub async fn zeta2_predict( match event { zeta2::ZetaDebugInfo::ContextRetrievalStarted(info) => { start_time = Some(info.timestamp); - fs::write(&*LOGS_SEARCH_PROMPT, &info.search_prompt)?; + fs::write( + example_run_dir.join("search_prompt.md"), + &info.search_prompt, + )?; } zeta2::ZetaDebugInfo::SearchQueriesGenerated(info) => { search_queries_generated_at = Some(info.timestamp); fs::write( - &*LOGS_SEARCH_QUERIES, + example_run_dir.join("search_queries.json"), serde_json::to_string_pretty(&info.search_queries).unwrap(), )?; } @@ -176,7 +206,7 @@ pub async fn zeta2_predict( let prediction_started_at = Instant::now(); start_time.get_or_insert(prediction_started_at); fs::write( - &*LOGS_PREDICTION_PROMPT, + example_run_dir.join("prediction_prompt.md"), &request.local_prompt.unwrap_or_default(), )?; @@ -210,7 +240,7 @@ pub async fn zeta2_predict( let response = request.response_rx.await?.0.map_err(|err| anyhow!(err))?; let response = zeta2::text_from_response(response).unwrap_or_default(); let prediction_finished_at = Instant::now(); - fs::write(&*LOGS_PREDICTION_RESPONSE, &response)?; + fs::write(example_run_dir.join("prediction_response.md"), &response)?; let mut result = result.lock().unwrap(); @@ -328,48 +358,69 @@ async fn resolve_context_entry( Ok((buffer, ranges)) } -struct Cache { - skip_cache: bool, +struct RunCache { + cache_mode: CacheMode, + example_run_dir: PathBuf, } -impl Cache { - fn path(key: u64) -> PathBuf { - CACHE_DIR.join(format!("{key:x}.json")) +impl RunCache { + fn output_cache_path((kind, key): &EvalCacheKey) -> PathBuf { + CACHE_DIR.join(format!("{kind}_out_{key:x}.json",)) } -} -impl LlmResponseCache for Cache { - fn get_key(&self, url: &Url, body: &str) -> u64 { - use collections::FxHasher; - use std::hash::{Hash, Hasher}; + fn input_cache_path((kind, key): &EvalCacheKey) -> PathBuf { + CACHE_DIR.join(format!("{kind}_in_{key:x}.json",)) + } - let mut hasher = FxHasher::default(); - url.hash(&mut hasher); - body.hash(&mut hasher); - hasher.finish() + fn link_to_run(&self, key: &EvalCacheKey) { + let output_link_path = self.example_run_dir.join(format!("{}_out.json", key.0)); + fs::hard_link(Self::output_cache_path(key), &output_link_path).unwrap(); + + let input_link_path = self.example_run_dir.join(format!("{}_in.json", key.0)); + fs::hard_link(Self::input_cache_path(key), &input_link_path).unwrap(); } +} + +impl EvalCache for RunCache { + fn read(&self, key: EvalCacheKey) -> Option { + let path = RunCache::output_cache_path(&key); - fn read_response(&self, key: u64) -> Option { - let path = Cache::path(key); if path.exists() { - if self.skip_cache { - log::info!("Skipping existing cached LLM response: {}", path.display()); - None - } else { - log::info!("Using LLM response from cache: {}", path.display()); + let use_cache = match key.0 { + EvalCacheEntryKind::Search => self.cache_mode.use_cached_search_results(), + EvalCacheEntryKind::Context | EvalCacheEntryKind::Prediction => { + self.cache_mode.use_cached_llm_responses() + } + }; + if use_cache { + log::info!("Using cache entry: {}", path.display()); + self.link_to_run(&key); Some(fs::read_to_string(path).unwrap()) + } else { + log::info!("Skipping cached entry: {}", path.display()); + None } + } else if matches!(self.cache_mode, CacheMode::Force) { + panic!( + "No cached entry found for {:?}. Run without `--cache force` at least once.", + key.0 + ); } else { None } } - fn write_response(&self, key: u64, value: &str) { + fn write(&self, key: EvalCacheKey, input: &str, output: &str) { fs::create_dir_all(&*CACHE_DIR).unwrap(); - let path = Cache::path(key); - log::info!("Writing LLM response to cache: {}", path.display()); - fs::write(path, value).unwrap(); + let input_path = RunCache::input_cache_path(&key); + fs::write(&input_path, input).unwrap(); + + let output_path = RunCache::output_cache_path(&key); + log::info!("Writing cache entry: {}", output_path.display()); + fs::write(&output_path, output).unwrap(); + + self.link_to_run(&key); } } From e8daab15ab06a04aee8e2137ebf4da1e5127901e Mon Sep 17 00:00:00 2001 From: Remco Smits Date: Wed, 12 Nov 2025 19:18:10 +0100 Subject: [PATCH 0059/1030] debugger: Fix prevent creating breakpoints inside breakpoint editor (#42475) Closes #38057 This PR fixes that you can no longer create breakpoints inside the breakpoint editor in code called `BreakpointPromptEditor`. As you can see, inside the after video, there is no breakpoint editor created anymore. **Before** https://github.com/user-attachments/assets/c4e02684-ac40-4176-bd19-f8f08e831dde **After** https://github.com/user-attachments/assets/f5b1176f-9545-4629-be12-05c64697a3de Release Notes: - Debugger: Prevent breakpoints from being created inside the breakpoint editor --- crates/editor/src/editor.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index d4647300996ecfb14dbc470ef8d9cc8a5db3d1dd..cff337714b9619b54469e8915bfb36ff7a69111e 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -11060,6 +11060,10 @@ impl Editor { window: &mut Window, cx: &mut Context, ) { + if self.breakpoint_store.is_none() { + return; + } + for (anchor, breakpoint) in self.breakpoints_at_cursors(window, cx) { let breakpoint = breakpoint.unwrap_or_else(|| Breakpoint { message: None, @@ -11119,6 +11123,10 @@ impl Editor { window: &mut Window, cx: &mut Context, ) { + if self.breakpoint_store.is_none() { + return; + } + for (anchor, breakpoint) in self.breakpoints_at_cursors(window, cx) { let Some(breakpoint) = breakpoint.filter(|breakpoint| breakpoint.is_disabled()) else { continue; @@ -11138,6 +11146,10 @@ impl Editor { window: &mut Window, cx: &mut Context, ) { + if self.breakpoint_store.is_none() { + return; + } + for (anchor, breakpoint) in self.breakpoints_at_cursors(window, cx) { let Some(breakpoint) = breakpoint.filter(|breakpoint| breakpoint.is_enabled()) else { continue; @@ -11157,6 +11169,10 @@ impl Editor { window: &mut Window, cx: &mut Context, ) { + if self.breakpoint_store.is_none() { + return; + } + for (anchor, breakpoint) in self.breakpoints_at_cursors(window, cx) { if let Some(breakpoint) = breakpoint { self.edit_breakpoint_at_anchor( From 4adec27a3d963fe155ea3cf927fab71be317b5a2 Mon Sep 17 00:00:00 2001 From: KyleBarton Date: Wed, 12 Nov 2025 10:32:46 -0800 Subject: [PATCH 0060/1030] Implement pretty TypeScript errors (#42494) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #7844 This change uses tree-sitter highlights as a method of showing typescript errors prettily, keeping regex as simple as possible: Screenshot 2025-11-11 at 3 40 24 PM It covers three main areas: 1. Diagnostics Diagnostics are now rendered with language-aware typescript, by providing the project's language registry. 2. Vtsls The LSP provider for typescript now implements the `diagnostic_message_to_markdown` function in the `LspAdapter` trait, so as to provide Diagnostics with \`\`\`typescript...\`\`\`-style code blocks for any selection of typescript longer than one word. In the single-word case, it simply wraps with \`\` 3. Typescript's `highlights.scm` `vtsls` doesn't provide strictly valid typescript in much of its messaging. Rather, it returns a message with snippets of typescript values which are invalid. Tree-sitter was not properly highlighting these snippets because it was expecting key-value formats. For instance: ``` type foo = { foo: string; bar: string; baz: number[] } ``` is valid, whereas simply ``` { foo: string; bar: string; baz: number[] } ``` is not. Therefore, highlights.scm needed to be adjusted in order to pattern-match on literal values that might be returned from the vtsls diagnostics messages. This was done by a) identifying arrow functions on their own, and b) augmenting the `statment_block` pattern matching in order to match on values which were clearly object literals. This approach may not be exhaustive - I'm happy to work on any additional cases we might identify from `vtsls` here - but hopefully demonstrates an extensible approach to making these messages look nice, without taking on the technical burden of extensive regex. Release Notes: - Show pretty TypeScript errors with language-aware Markdown. --- crates/diagnostics/src/buffer_diagnostics.rs | 5 ++ crates/diagnostics/src/diagnostic_renderer.rs | 35 +++++++++-- crates/diagnostics/src/diagnostics.rs | 6 +- crates/editor/src/editor.rs | 21 +++++-- crates/editor/src/hover_popover.rs | 13 +++- .../languages/src/typescript/highlights.scm | 32 ++++++++++ crates/languages/src/vtsls.rs | 59 ++++++++++++++++++- 7 files changed, 159 insertions(+), 12 deletions(-) diff --git a/crates/diagnostics/src/buffer_diagnostics.rs b/crates/diagnostics/src/buffer_diagnostics.rs index 01626ddfd2a3f1a4773b2e88a9b8ff001b46680a..ed079c34864100238fd459cb2ec116bf21827fdd 100644 --- a/crates/diagnostics/src/buffer_diagnostics.rs +++ b/crates/diagnostics/src/buffer_diagnostics.rs @@ -370,11 +370,16 @@ impl BufferDiagnosticsEditor { continue; } + let languages = buffer_diagnostics_editor + .read_with(cx, |b, cx| b.project.read(cx).languages().clone()) + .ok(); + let diagnostic_blocks = cx.update(|_window, cx| { DiagnosticRenderer::diagnostic_blocks_for_group( group, buffer_snapshot.remote_id(), Some(Arc::new(buffer_diagnostics_editor.clone())), + languages, cx, ) })?; diff --git a/crates/diagnostics/src/diagnostic_renderer.rs b/crates/diagnostics/src/diagnostic_renderer.rs index 6204bf4b52ddb903773beac28627d53c3cce7765..2636b1aadc9708ff6832a5baa212277672dd305f 100644 --- a/crates/diagnostics/src/diagnostic_renderer.rs +++ b/crates/diagnostics/src/diagnostic_renderer.rs @@ -6,7 +6,7 @@ use editor::{ hover_popover::diagnostics_markdown_style, }; use gpui::{AppContext, Entity, Focusable, WeakEntity}; -use language::{BufferId, Diagnostic, DiagnosticEntryRef}; +use language::{BufferId, Diagnostic, DiagnosticEntryRef, LanguageRegistry}; use lsp::DiagnosticSeverity; use markdown::{Markdown, MarkdownElement}; use settings::Settings; @@ -27,6 +27,7 @@ impl DiagnosticRenderer { diagnostic_group: Vec>, buffer_id: BufferId, diagnostics_editor: Option>, + language_registry: Option>, cx: &mut App, ) -> Vec { let Some(primary_ix) = diagnostic_group @@ -75,11 +76,14 @@ impl DiagnosticRenderer { )) } } + results.push(DiagnosticBlock { initial_range: primary.range.clone(), severity: primary.diagnostic.severity, diagnostics_editor: diagnostics_editor.clone(), - markdown: cx.new(|cx| Markdown::new(markdown.into(), None, None, cx)), + markdown: cx.new(|cx| { + Markdown::new(markdown.into(), language_registry.clone(), None, cx) + }), }); } else { if entry.range.start.row.abs_diff(primary.range.start.row) >= 5 { @@ -91,7 +95,9 @@ impl DiagnosticRenderer { initial_range: entry.range.clone(), severity: entry.diagnostic.severity, diagnostics_editor: diagnostics_editor.clone(), - markdown: cx.new(|cx| Markdown::new(markdown.into(), None, None, cx)), + markdown: cx.new(|cx| { + Markdown::new(markdown.into(), language_registry.clone(), None, cx) + }), }); } } @@ -118,9 +124,16 @@ impl editor::DiagnosticRenderer for DiagnosticRenderer { buffer_id: BufferId, snapshot: EditorSnapshot, editor: WeakEntity, + language_registry: Option>, cx: &mut App, ) -> Vec> { - let blocks = Self::diagnostic_blocks_for_group(diagnostic_group, buffer_id, None, cx); + let blocks = Self::diagnostic_blocks_for_group( + diagnostic_group, + buffer_id, + None, + language_registry, + cx, + ); blocks .into_iter() @@ -146,9 +159,16 @@ impl editor::DiagnosticRenderer for DiagnosticRenderer { diagnostic_group: Vec>, range: Range, buffer_id: BufferId, + language_registry: Option>, cx: &mut App, ) -> Option> { - let blocks = Self::diagnostic_blocks_for_group(diagnostic_group, buffer_id, None, cx); + let blocks = Self::diagnostic_blocks_for_group( + diagnostic_group, + buffer_id, + None, + language_registry, + cx, + ); blocks .into_iter() .find_map(|block| (block.initial_range == range).then(|| block.markdown)) @@ -206,6 +226,11 @@ impl DiagnosticBlock { self.markdown.clone(), diagnostics_markdown_style(bcx.window, cx), ) + .code_block_renderer(markdown::CodeBlockRenderer::Default { + copy_button: false, + copy_button_on_hover: false, + border: false, + }) .on_url_click({ move |link, window, cx| { editor diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index 344ce652969e9a6d54a22769741616def48ab3b1..92a4ba097f21d1f5894235bb2356c7ded9413359 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -73,7 +73,7 @@ pub fn init(cx: &mut App) { } pub(crate) struct ProjectDiagnosticsEditor { - project: Entity, + pub project: Entity, workspace: WeakEntity, focus_handle: FocusHandle, editor: Entity, @@ -545,11 +545,15 @@ impl ProjectDiagnosticsEditor { if group_severity.is_none_or(|s| s > max_severity) { continue; } + let languages = this + .read_with(cx, |t, cx| t.project.read(cx).languages().clone()) + .ok(); let more = cx.update(|_, cx| { crate::diagnostic_renderer::DiagnosticRenderer::diagnostic_blocks_for_group( group, buffer_snapshot.remote_id(), Some(diagnostics_toolbar_editor.clone()), + languages, cx, ) })?; diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index cff337714b9619b54469e8915bfb36ff7a69111e..04d8794169ddcc0410f122c0d56124c7e5bcc254 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -117,8 +117,9 @@ use language::{ AutoindentMode, BlockCommentConfig, BracketMatch, BracketPair, Buffer, BufferRow, BufferSnapshot, Capability, CharClassifier, CharKind, CharScopeContext, CodeLabel, CursorShape, DiagnosticEntryRef, DiffOptions, EditPredictionsMode, EditPreview, HighlightedText, IndentKind, - IndentSize, Language, OffsetRangeExt, OutlineItem, Point, Runnable, RunnableRange, Selection, - SelectionGoal, TextObject, TransactionId, TreeSitterOptions, WordsQuery, + IndentSize, Language, LanguageRegistry, OffsetRangeExt, OutlineItem, Point, Runnable, + RunnableRange, Selection, SelectionGoal, TextObject, TransactionId, TreeSitterOptions, + WordsQuery, language_settings::{ self, LspInsertMode, RewrapBehavior, WordsCompletionMode, all_language_settings, language_settings, @@ -371,6 +372,7 @@ pub trait DiagnosticRenderer { buffer_id: BufferId, snapshot: EditorSnapshot, editor: WeakEntity, + language_registry: Option>, cx: &mut App, ) -> Vec>; @@ -379,6 +381,7 @@ pub trait DiagnosticRenderer { diagnostic_group: Vec>, range: Range, buffer_id: BufferId, + language_registry: Option>, cx: &mut App, ) -> Option>; @@ -17947,8 +17950,18 @@ impl Editor { .diagnostic_group(buffer_id, diagnostic.diagnostic.group_id) .collect::>(); - let blocks = - renderer.render_group(diagnostic_group, buffer_id, snapshot, cx.weak_entity(), cx); + let language_registry = self + .project() + .map(|project| project.read(cx).languages().clone()); + + let blocks = renderer.render_group( + diagnostic_group, + buffer_id, + snapshot, + cx.weak_entity(), + language_registry, + cx, + ); let blocks = self.display_map.update(cx, |display_map, cx| { display_map.insert_blocks(blocks, cx).into_iter().collect() diff --git a/crates/editor/src/hover_popover.rs b/crates/editor/src/hover_popover.rs index 1da3361f53853a5ea5a9d532b9ee2c05d6010a5d..721fce34c8c030322207cd74a69a266119596086 100644 --- a/crates/editor/src/hover_popover.rs +++ b/crates/editor/src/hover_popover.rs @@ -341,7 +341,13 @@ fn show_hover( renderer .as_ref() .and_then(|renderer| { - renderer.render_hover(group, point_range, buffer_id, cx) + renderer.render_hover( + group, + point_range, + buffer_id, + language_registry.clone(), + cx, + ) }) .context("no rendered diagnostic") })??; @@ -986,6 +992,11 @@ impl DiagnosticPopover { self.markdown.clone(), diagnostics_markdown_style(window, cx), ) + .code_block_renderer(markdown::CodeBlockRenderer::Default { + copy_button: false, + copy_button_on_hover: false, + border: false, + }) .on_url_click( move |link, window, cx| { if let Some(renderer) = GlobalDiagnosticRenderer::global(cx) diff --git a/crates/languages/src/typescript/highlights.scm b/crates/languages/src/typescript/highlights.scm index 6474ba2a05af330b1a7bd2da8ed3411b9132fe22..14f6e2203476e5483af5bc21b69b6c62019c6b82 100644 --- a/crates/languages/src/typescript/highlights.scm +++ b/crates/languages/src/typescript/highlights.scm @@ -9,6 +9,36 @@ (type_identifier) @type (predefined_type) @type.builtin +;; Highlights object literals by hijacking the statement_block pattern, but only if +;; the statement block follows an object literal pattern +((statement_block + (labeled_statement + ;; highlight the label like a property name + label: (statement_identifier) @property.name + body: [ + ;; match a terminating expression statement + (expression_statement + ;; single identifier - treat as a type name + [(identifier) @type.name + ;; object - treat as a property - type pair + (object + (pair + key: (_) @property.name + value: (_) @type.name)) + ;; subscript_expression - treat as an array declaration + (subscript_expression + object: (_) @type.name + index: (_) + ) + ;; templated string - treat each identifier contained as a type name + (template_string + (template_substitution + (identifier) @type.name)) + ]) + ;; match a nested statement block + (statement_block) @nested + ]))) + (import_specifier "type" name: (identifier) @type @@ -79,6 +109,8 @@ left: (identifier) @function right: [(function_expression) (arrow_function)]) +(arrow_function) @function + ; Literals (this) @variable.special diff --git a/crates/languages/src/vtsls.rs b/crates/languages/src/vtsls.rs index 0766be24bab6a220748523c1107d40f5a58f03ae..fa1f47ff792265bd433ee82831dbd43f7500b289 100644 --- a/crates/languages/src/vtsls.rs +++ b/crates/languages/src/vtsls.rs @@ -6,11 +6,12 @@ use language::{LanguageName, LspAdapter, LspAdapterDelegate, LspInstaller, Toolc use lsp::{CodeActionKind, LanguageServerBinary, LanguageServerName}; use node_runtime::{NodeRuntime, VersionStrategy}; use project::{Fs, lsp_store::language_server_settings}; +use regex::Regex; use serde_json::Value; use std::{ ffi::OsString, path::{Path, PathBuf}, - sync::Arc, + sync::{Arc, LazyLock}, }; use util::{ResultExt, maybe, merge_json_value_into}; @@ -56,6 +57,20 @@ impl VtslsLspAdapter { None } } + + pub fn enhance_diagnostic_message(message: &str) -> Option { + static SINGLE_WORD_REGEX: LazyLock = + LazyLock::new(|| Regex::new(r"'([^\s']*)'").expect("Failed to create REGEX")); + + static MULTI_WORD_REGEX: LazyLock = + LazyLock::new(|| Regex::new(r"'([^']+\s+[^']*)'").expect("Failed to create REGEX")); + + let first = SINGLE_WORD_REGEX.replace_all(message, "`$1`").to_string(); + let second = MULTI_WORD_REGEX + .replace_all(&first, "\n```typescript\n$1\n```\n") + .to_string(); + Some(second) + } } pub struct TypeScriptVersions { @@ -274,6 +289,10 @@ impl LspAdapter for VtslsLspAdapter { Ok(default_workspace_configuration) } + fn diagnostic_message_to_markdown(&self, message: &str) -> Option { + VtslsLspAdapter::enhance_diagnostic_message(message) + } + fn language_ids(&self) -> HashMap { HashMap::from_iter([ (LanguageName::new("TypeScript"), "typescript".into()), @@ -302,3 +321,41 @@ async fn get_cached_ts_server_binary( .await .log_err() } + +#[cfg(test)] +mod tests { + use crate::vtsls::VtslsLspAdapter; + + #[test] + fn test_diagnostic_message_to_markdown() { + // Leaves simple messages unchanged + let message = "The expected type comes from the return type of this signature."; + + let expected = "The expected type comes from the return type of this signature."; + + assert_eq!( + VtslsLspAdapter::enhance_diagnostic_message(message).expect("Should be some"), + expected + ); + + // Parses both multi-word and single-word correctly + let message = "Property 'baz' is missing in type '{ foo: string; bar: string; }' but required in type 'User'."; + + let expected = "Property `baz` is missing in type \n```typescript\n{ foo: string; bar: string; }\n```\n but required in type `User`."; + + assert_eq!( + VtslsLspAdapter::enhance_diagnostic_message(message).expect("Should be some"), + expected + ); + + // Parses multi-and-single word in any order, and ignores existing newlines + let message = "Type '() => { foo: string; bar: string; }' is not assignable to type 'GetUserFunction'.\n Property 'baz' is missing in type '{ foo: string; bar: string; }' but required in type 'User'."; + + let expected = "Type \n```typescript\n() => { foo: string; bar: string; }\n```\n is not assignable to type `GetUserFunction`.\n Property `baz` is missing in type \n```typescript\n{ foo: string; bar: string; }\n```\n but required in type `User`."; + + assert_eq!( + VtslsLspAdapter::enhance_diagnostic_message(message).expect("Should be some"), + expected + ); + } +} From 1c625f87835a2b12126ef538671ef72780b026b7 Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Wed, 12 Nov 2025 10:33:02 -0800 Subject: [PATCH 0061/1030] Fix JSON Schema documentation for `code_actions_on_format` (#42128) Closes #ISSUE Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/settings/src/settings_content/language.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/settings/src/settings_content/language.rs b/crates/settings/src/settings_content/language.rs index a0a8aff3ae82a9001eb52367ab315912b5aac609..fc11dd4956a50906951af8fa43a7dacc61568f70 100644 --- a/crates/settings/src/settings_content/language.rs +++ b/crates/settings/src/settings_content/language.rs @@ -334,7 +334,7 @@ pub struct LanguageSettingsContent { /// /// Default: true pub use_on_type_format: Option, - /// Which code actions to run on save after the formatter. + /// Which code actions to run on save before the formatter. /// These are not run if formatting is off. /// /// Default: {} (or {"source.organizeImports": true} for Go). From cb4067723b42d3f84470e006a40e3f1b6ed4607e Mon Sep 17 00:00:00 2001 From: Konstantinos Lyrakis <24938740+lyrakisk@users.noreply.github.com> Date: Wed, 12 Nov 2025 20:07:34 +0100 Subject: [PATCH 0062/1030] Fix typo (#42559) Fixed a typo in the docs Release Notes: - N/A --- docs/src/remote-development.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/remote-development.md b/docs/src/remote-development.md index 057a3d2e0814e083a3ecbbeafd987762cd825388..f046fa44334554230d19f885e6e38ab0274f2b44 100644 --- a/docs/src/remote-development.md +++ b/docs/src/remote-development.md @@ -206,7 +206,7 @@ If you are struggling with connection issues, you should be able to see more inf ## Supported SSH Options -Under the hood, Zed shells out to the `ssh` binary to connect to the remote server. We create one SSH control master per project, and use then use that to multiplex SSH connections for the Zed protocol itself, any terminals you open and tasks you run. We read settings from your SSH config file, but if you want to specify additional options to the SSH control master you can configure Zed to set them. +Under the hood, Zed shells out to the `ssh` binary to connect to the remote server. We create one SSH control master per project, and then use that to multiplex SSH connections for the Zed protocol itself, any terminals you open and tasks you run. We read settings from your SSH config file, but if you want to specify additional options to the SSH control master you can configure Zed to set them. When typing in the "Connect New Server" dialog, you can use bash-style quoting to pass options containing a space. Once you have created a server it will be added to the `"ssh_connections": []` array in your settings file. You can edit the settings file directly to make changes to SSH connections. From b403c199dfd4491e1c1bcec2b4fed257f5e0e51a Mon Sep 17 00:00:00 2001 From: KyleBarton Date: Wed, 12 Nov 2025 11:59:10 -0800 Subject: [PATCH 0063/1030] Add additional comment for context in Tyepscript highlights (#42564) This adds additional comments which were left out from #42494 by accident. Namely, it describes why we have additional custom highlighting in `highlights.scm` for the Typescript grammar. Release Notes: - N/A --- crates/languages/src/typescript/highlights.scm | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/languages/src/typescript/highlights.scm b/crates/languages/src/typescript/highlights.scm index 14f6e2203476e5483af5bc21b69b6c62019c6b82..5e8d55581e3ae86c85ca2b845e8a07caa6444c1d 100644 --- a/crates/languages/src/typescript/highlights.scm +++ b/crates/languages/src/typescript/highlights.scm @@ -9,6 +9,9 @@ (type_identifier) @type (predefined_type) @type.builtin +;; Enables ts-pretty-errors +;; The Lsp returns "snippets" of typescript, which are not valid typescript in totality, +;; but should still be highlighted ;; Highlights object literals by hijacking the statement_block pattern, but only if ;; the statement block follows an object literal pattern ((statement_block From 73e5df6445e22e42182509af88956fa346308056 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 12 Nov 2025 21:05:40 +0100 Subject: [PATCH 0064/1030] ci: Install pre-built cargo nextest instead of rolling our own (#42556) Closes #ISSUE Release Notes: - N/A --- .github/workflows/release.yml | 9 +-------- .github/workflows/release_nightly.yml | 3 --- .github/workflows/run_cron_unit_evals.yml | 3 +-- .github/workflows/run_tests.yml | 9 +-------- .github/workflows/run_unit_evals.yml | 3 +-- tooling/xtask/src/tasks/workflows/run_agent_evals.rs | 2 +- tooling/xtask/src/tasks/workflows/run_tests.rs | 4 +++- tooling/xtask/src/tasks/workflows/steps.rs | 4 ++-- 8 files changed, 10 insertions(+), 27 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 25f8b00910f5d64e9319eb40943ae1b5b89d8f28..88c719d60027aa6a684666cbc1f7430c9f471502 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -29,9 +29,6 @@ jobs: - name: steps::clippy run: ./script/clippy shell: bash -euxo pipefail {0} - - name: steps::cargo_install_nextest - run: cargo install cargo-nextest --locked - shell: bash -euxo pipefail {0} - name: steps::clear_target_dir_if_large run: ./script/clear-target-dir-if-larger-than 300 shell: bash -euxo pipefail {0} @@ -78,8 +75,7 @@ jobs: run: ./script/clippy shell: bash -euxo pipefail {0} - name: steps::cargo_install_nextest - run: cargo install cargo-nextest --locked - shell: bash -euxo pipefail {0} + uses: taiki-e/install-action@nextest - name: steps::clear_target_dir_if_large run: ./script/clear-target-dir-if-larger-than 250 shell: bash -euxo pipefail {0} @@ -112,9 +108,6 @@ jobs: - name: steps::clippy run: ./script/clippy.ps1 shell: pwsh - - name: steps::cargo_install_nextest - run: cargo install cargo-nextest --locked - shell: pwsh - name: steps::clear_target_dir_if_large run: ./script/clear-target-dir-if-larger-than.ps1 250 shell: pwsh diff --git a/.github/workflows/release_nightly.yml b/.github/workflows/release_nightly.yml index 431308bd1cfdf6f4385a8f462edcab8c5769ba5f..c5d498fe49c350dcb651f1037d9c470e921f2cb7 100644 --- a/.github/workflows/release_nightly.yml +++ b/.github/workflows/release_nightly.yml @@ -47,9 +47,6 @@ jobs: - name: steps::clippy run: ./script/clippy.ps1 shell: pwsh - - name: steps::cargo_install_nextest - run: cargo install cargo-nextest --locked - shell: pwsh - name: steps::clear_target_dir_if_large run: ./script/clear-target-dir-if-larger-than.ps1 250 shell: pwsh diff --git a/.github/workflows/run_cron_unit_evals.yml b/.github/workflows/run_cron_unit_evals.yml index a692a6707e3d223e7cea039419dc933f68e82896..e7dcb04f77f3507aa861aee30e152850cb36b600 100644 --- a/.github/workflows/run_cron_unit_evals.yml +++ b/.github/workflows/run_cron_unit_evals.yml @@ -37,8 +37,7 @@ jobs: run: ./script/download-wasi-sdk shell: bash -euxo pipefail {0} - name: steps::cargo_install_nextest - run: cargo install cargo-nextest --locked - shell: bash -euxo pipefail {0} + uses: taiki-e/install-action@nextest - name: steps::clear_target_dir_if_large run: ./script/clear-target-dir-if-larger-than 250 shell: bash -euxo pipefail {0} diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index a7d0a145b6d26d964020f48c321556032ae567ed..2322f5d7659366ec312bc76b7501afdab86ff5dc 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -113,9 +113,6 @@ jobs: - name: steps::clippy run: ./script/clippy.ps1 shell: pwsh - - name: steps::cargo_install_nextest - run: cargo install cargo-nextest --locked - shell: pwsh - name: steps::clear_target_dir_if_large run: ./script/clear-target-dir-if-larger-than.ps1 250 shell: pwsh @@ -164,8 +161,7 @@ jobs: run: ./script/clippy shell: bash -euxo pipefail {0} - name: steps::cargo_install_nextest - run: cargo install cargo-nextest --locked - shell: bash -euxo pipefail {0} + uses: taiki-e/install-action@nextest - name: steps::clear_target_dir_if_large run: ./script/clear-target-dir-if-larger-than 250 shell: bash -euxo pipefail {0} @@ -200,9 +196,6 @@ jobs: - name: steps::clippy run: ./script/clippy shell: bash -euxo pipefail {0} - - name: steps::cargo_install_nextest - run: cargo install cargo-nextest --locked - shell: bash -euxo pipefail {0} - name: steps::clear_target_dir_if_large run: ./script/clear-target-dir-if-larger-than 300 shell: bash -euxo pipefail {0} diff --git a/.github/workflows/run_unit_evals.yml b/.github/workflows/run_unit_evals.yml index b4da67e25c48909d5f01cdd6e8c2cbab9d0b8c67..d4d4529a30e1abfc4be06e7a39640d09f6b06a63 100644 --- a/.github/workflows/run_unit_evals.yml +++ b/.github/workflows/run_unit_evals.yml @@ -46,8 +46,7 @@ jobs: run: ./script/download-wasi-sdk shell: bash -euxo pipefail {0} - name: steps::cargo_install_nextest - run: cargo install cargo-nextest --locked - shell: bash -euxo pipefail {0} + uses: taiki-e/install-action@nextest - name: steps::clear_target_dir_if_large run: ./script/clear-target-dir-if-larger-than 250 shell: bash -euxo pipefail {0} diff --git a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs index e3f001f8c2b722584a8cb117ad6980c71df95854..fd9f696dc9537ec21388dc2a287eb52b6334ce70 100644 --- a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs +++ b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs @@ -126,7 +126,7 @@ fn unit_evals(commit: Option<&Input>) -> Job { .add_step(steps::setup_cargo_config(Platform::Linux)) .add_step(steps::cache_rust_dependencies_namespace()) .map(steps::install_linux_dependencies) - .add_step(steps::cargo_install_nextest(Platform::Linux)) + .add_step(steps::cargo_install_nextest()) .add_step(steps::clear_target_dir_if_large(Platform::Linux)) .add_step(match commit { Some(commit) => script_step.add_env(("UNIT_EVAL_COMMIT", commit)), diff --git a/tooling/xtask/src/tasks/workflows/run_tests.rs b/tooling/xtask/src/tasks/workflows/run_tests.rs index 2c2cd306cd26ea3695f6f94db3e571cffe427b0c..f8212fed243c1c3bccffe7240ee152fa203e14d2 100644 --- a/tooling/xtask/src/tasks/workflows/run_tests.rs +++ b/tooling/xtask/src/tasks/workflows/run_tests.rs @@ -321,7 +321,9 @@ pub(crate) fn run_platform_tests(platform: Platform) -> NamedJob { ) .add_step(steps::setup_node()) .add_step(steps::clippy(platform)) - .add_step(steps::cargo_install_nextest(platform)) + .when(platform == Platform::Linux, |job| { + job.add_step(steps::cargo_install_nextest()) + }) .add_step(steps::clear_target_dir_if_large(platform)) .add_step(steps::cargo_nextest(platform)) .add_step(steps::cleanup_cargo_config(platform)), diff --git a/tooling/xtask/src/tasks/workflows/steps.rs b/tooling/xtask/src/tasks/workflows/steps.rs index 3ca3610a5eca612d98154495e9ad6daf03d09997..969dd35db07dc6ed315e0fa3e3ae2b69934e8b95 100644 --- a/tooling/xtask/src/tasks/workflows/steps.rs +++ b/tooling/xtask/src/tasks/workflows/steps.rs @@ -48,8 +48,8 @@ pub fn cargo_fmt() -> Step { named::bash("cargo fmt --all -- --check") } -pub fn cargo_install_nextest(platform: Platform) -> Step { - named::run(platform, "cargo install cargo-nextest --locked") +pub fn cargo_install_nextest() -> Step { + named::uses("taiki-e", "install-action", "nextest") } pub fn cargo_nextest(platform: Platform) -> Step { From 6b239c3a9a496bf622e801a3a4a3a77f73add2c1 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 12 Nov 2025 21:27:04 +0100 Subject: [PATCH 0065/1030] Bump Rust to 1.91.1 (#42561) Release Notes: - N/A --------- Co-authored-by: Julia Ryan --- Dockerfile-collab | 2 +- crates/call/src/call_impl/room.rs | 8 ++----- crates/editor/src/editor.rs | 9 ++----- crates/gpui/src/interactive.rs | 18 ++++---------- crates/gpui/src/platform.rs | 18 ++++---------- crates/gpui/src/tab_stop.rs | 2 +- .../src/settings_content/language_model.rs | 9 ++----- crates/text/src/selection.rs | 14 +++++------ crates/vim/src/state.rs | 9 ++----- crates/workspace/src/pane.rs | 9 ++----- flake.lock | 24 +++++++++---------- rust-toolchain.toml | 2 +- 12 files changed, 39 insertions(+), 85 deletions(-) diff --git a/Dockerfile-collab b/Dockerfile-collab index a85fe93f198475534cb7396abe594f9d02eeb57b..4bc369140e79219b9719b570659e9edd27453260 100644 --- a/Dockerfile-collab +++ b/Dockerfile-collab @@ -1,6 +1,6 @@ # syntax = docker/dockerfile:1.2 -FROM rust:1.90-bookworm as builder +FROM rust:1.91.1-bookworm as builder WORKDIR app COPY . . diff --git a/crates/call/src/call_impl/room.rs b/crates/call/src/call_impl/room.rs index e659d1cf05b228423796d4c48906d568d71770d9..2a540619d4576ec7fcf711b288ecc12bf89fd20c 100644 --- a/crates/call/src/call_impl/room.rs +++ b/crates/call/src/call_impl/room.rs @@ -1683,7 +1683,9 @@ impl LiveKitRoom { } } +#[derive(Default)] enum LocalTrack { + #[default] None, Pending { publish_id: usize, @@ -1694,12 +1696,6 @@ enum LocalTrack { }, } -impl Default for LocalTrack { - fn default() -> Self { - Self::None - } -} - #[derive(Copy, Clone, PartialEq, Eq)] pub enum RoomStatus { Online, diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 04d8794169ddcc0410f122c0d56124c7e5bcc254..2da80a405a0db357712039f06d10c9e6b33e05c8 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -1300,8 +1300,9 @@ struct SelectionHistoryEntry { add_selections_state: Option, } -#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[derive(Copy, Clone, Default, Debug, PartialEq, Eq)] enum SelectionHistoryMode { + #[default] Normal, Undoing, Redoing, @@ -1314,12 +1315,6 @@ struct HoveredCursor { selection_id: usize, } -impl Default for SelectionHistoryMode { - fn default() -> Self { - Self::Normal - } -} - #[derive(Debug)] /// SelectionEffects controls the side-effects of updating the selection. /// diff --git a/crates/gpui/src/interactive.rs b/crates/gpui/src/interactive.rs index 8659433278d13c84a11312127639f700bfbb9cdc..9e18b7990d9a30f2fd5e50010766d5f14a33e4e2 100644 --- a/crates/gpui/src/interactive.rs +++ b/crates/gpui/src/interactive.rs @@ -305,9 +305,10 @@ pub enum KeyboardButton { } /// An enum representing the mouse button that was pressed. -#[derive(Hash, PartialEq, Eq, Copy, Clone, Debug)] +#[derive(Hash, Default, PartialEq, Eq, Copy, Clone, Debug)] pub enum MouseButton { /// The left mouse button. + #[default] Left, /// The right mouse button. @@ -333,28 +334,17 @@ impl MouseButton { } } -impl Default for MouseButton { - fn default() -> Self { - Self::Left - } -} - /// A navigation direction, such as back or forward. -#[derive(Hash, PartialEq, Eq, Copy, Clone, Debug)] +#[derive(Hash, Default, PartialEq, Eq, Copy, Clone, Debug)] pub enum NavigationDirection { /// The back button. + #[default] Back, /// The forward button. Forward, } -impl Default for NavigationDirection { - fn default() -> Self { - Self::Back - } -} - /// A mouse move event from the platform. #[derive(Clone, Debug, Default)] pub struct MouseMoveEvent { diff --git a/crates/gpui/src/platform.rs b/crates/gpui/src/platform.rs index 04ae4480faf08015f6e4b6e62e7210b55997e3d4..e50f407dc313038032c433aa0243d3c7791c5c1f 100644 --- a/crates/gpui/src/platform.rs +++ b/crates/gpui/src/platform.rs @@ -1346,11 +1346,12 @@ pub enum WindowKind { /// /// On macOS, this corresponds to named [`NSAppearance`](https://developer.apple.com/documentation/appkit/nsappearance) /// values. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] pub enum WindowAppearance { /// A light appearance. /// /// On macOS, this corresponds to the `aqua` appearance. + #[default] Light, /// A light appearance with vibrant colors. @@ -1369,12 +1370,6 @@ pub enum WindowAppearance { VibrantDark, } -impl Default for WindowAppearance { - fn default() -> Self { - Self::Light - } -} - /// The appearance of the background of the window itself, when there is /// no content or the content is transparent. #[derive(Copy, Clone, Debug, Default, PartialEq)] @@ -1475,9 +1470,10 @@ impl From<&str> for PromptButton { } /// The style of the cursor (pointer) -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)] +#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Hash, Serialize, Deserialize, JsonSchema)] pub enum CursorStyle { /// The default cursor + #[default] Arrow, /// A text input cursor @@ -1564,12 +1560,6 @@ pub enum CursorStyle { None, } -impl Default for CursorStyle { - fn default() -> Self { - Self::Arrow - } -} - /// A clipboard item that should be copied to the clipboard #[derive(Clone, Debug, Eq, PartialEq)] pub struct ClipboardItem { diff --git a/crates/gpui/src/tab_stop.rs b/crates/gpui/src/tab_stop.rs index 8a95a3975af736d544e01cbf6e212994b8e7e8c6..a2050059634d202490e7156830fbe579d48c47ab 100644 --- a/crates/gpui/src/tab_stop.rs +++ b/crates/gpui/src/tab_stop.rs @@ -320,7 +320,7 @@ mod tests { let focus_map = Arc::new(FocusMap::default()); let mut tab_index_map = TabStopMap::default(); - let focus_handles = vec![ + let focus_handles = [ FocusHandle::new(&focus_map).tab_stop(true).tab_index(0), FocusHandle::new(&focus_map).tab_stop(true).tab_index(1), FocusHandle::new(&focus_map).tab_stop(true).tab_index(1), diff --git a/crates/settings/src/settings_content/language_model.rs b/crates/settings/src/settings_content/language_model.rs index a0aa57a970c2483e4d9c617506d7b869c223cdf0..50ad812142e1544d2fa7947d4c6a845c6d459090 100644 --- a/crates/settings/src/settings_content/language_model.rs +++ b/crates/settings/src/settings_content/language_model.rs @@ -388,19 +388,14 @@ pub struct OpenRouterProvider { sort: Option, } -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] +#[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] #[serde(rename_all = "lowercase")] pub enum DataCollection { + #[default] Allow, Disallow, } -impl Default for DataCollection { - fn default() -> Self { - Self::Allow - } -} - fn default_true() -> bool { true } diff --git a/crates/text/src/selection.rs b/crates/text/src/selection.rs index e690792d0c903031f8fc7e8bf81215bf0db0e336..349d557fab8c980901149698223cae78739797d8 100644 --- a/crates/text/src/selection.rs +++ b/crates/text/src/selection.rs @@ -2,11 +2,15 @@ use crate::{Anchor, BufferSnapshot, TextDimension}; use std::cmp::Ordering; use std::ops::Range; -#[derive(Copy, Clone, Debug, PartialEq)] +#[derive(Default, Copy, Clone, Debug, PartialEq)] pub enum SelectionGoal { + #[default] None, HorizontalPosition(f64), - HorizontalRange { start: f64, end: f64 }, + HorizontalRange { + start: f64, + end: f64, + }, WrappedHorizontalPosition((u32, f32)), } @@ -19,12 +23,6 @@ pub struct Selection { pub goal: SelectionGoal, } -impl Default for SelectionGoal { - fn default() -> Self { - Self::None - } -} - impl Selection { /// A place where the selection had stopped at. pub fn head(&self) -> T { diff --git a/crates/vim/src/state.rs b/crates/vim/src/state.rs index 3f4fc99584f96754afc5342d299a502eb9a3dbad..8a7b85349273176f67e8eed8b6939ef047f83b4c 100644 --- a/crates/vim/src/state.rs +++ b/crates/vim/src/state.rs @@ -38,8 +38,9 @@ use util::rel_path::RelPath; use workspace::searchable::Direction; use workspace::{Workspace, WorkspaceDb, WorkspaceId}; -#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Copy, Default, Debug, PartialEq, Serialize, Deserialize)] pub enum Mode { + #[default] Normal, Insert, Replace, @@ -78,12 +79,6 @@ impl Mode { } } -impl Default for Mode { - fn default() -> Self { - Self::Normal - } -} - #[derive(Clone, Debug, PartialEq)] pub enum Operator { Change, diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index d85662733d52390db820957818901fa2e2cfd2a2..3965aeb4211cf8834bbcf4e52709f2abf09343fd 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -422,8 +422,9 @@ struct NavHistoryState { next_timestamp: Arc, } -#[derive(Debug, Copy, Clone)] +#[derive(Debug, Default, Copy, Clone)] pub enum NavigationMode { + #[default] Normal, GoingBack, GoingForward, @@ -432,12 +433,6 @@ pub enum NavigationMode { Disabled, } -impl Default for NavigationMode { - fn default() -> Self { - Self::Normal - } -} - pub struct NavigationEntry { pub item: Arc, pub data: Option>, diff --git a/flake.lock b/flake.lock index ced528afa60cb567b291c6962c80e503845b08fc..3074b947ef51c387b5d20aba85478636f48de557 100644 --- a/flake.lock +++ b/flake.lock @@ -2,11 +2,11 @@ "nodes": { "crane": { "locked": { - "lastModified": 1758215636, - "narHash": "sha256-8nkzkPbdxze8CxWhKWlcLbJEU1vfLM/nVqRlTy17V54=", + "lastModified": 1762538466, + "narHash": "sha256-8zrIPl6J+wLm9MH5ksHcW7BUHo7jSNOu0/hA0ohOOaM=", "owner": "ipetkov", "repo": "crane", - "rev": "a669fe77a8b0cd6f11419d89ea45a16691ca5121", + "rev": "0cea393fffb39575c46b7a0318386467272182fe", "type": "github" }, "original": { @@ -17,11 +17,11 @@ }, "flake-compat": { "locked": { - "lastModified": 1747046372, - "narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=", + "lastModified": 1761588595, + "narHash": "sha256-XKUZz9zewJNUj46b4AJdiRZJAvSZ0Dqj2BNfXvFlJC4=", "owner": "edolstra", "repo": "flake-compat", - "rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885", + "rev": "f387cd2afec9419c8ee37694406ca490c3f34ee5", "type": "github" }, "original": { @@ -33,10 +33,10 @@ "nixpkgs": { "locked": { "lastModified": 315532800, - "narHash": "sha256-YPoFUJMpbuPvIS4FJBn2Sv/iWsui9S26gu2ufFWEY0g=", - "rev": "a1f79a1770d05af18111fbbe2a3ab2c42c0f6cd0", + "narHash": "sha256-5CwQ80ucRHiqVbMEEbTFnjz70/axSJ0aliyzSaFSkmY=", + "rev": "f6b44b2401525650256b977063dbcf830f762369", "type": "tarball", - "url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre864673.a1f79a1770d0/nixexprs.tar.xz" + "url": "https://releases.nixos.org/nixpkgs/nixpkgs-25.11pre891648.f6b44b240152/nixexprs.tar.xz" }, "original": { "type": "tarball", @@ -58,11 +58,11 @@ ] }, "locked": { - "lastModified": 1758508617, - "narHash": "sha256-kx2uELmVnAbiekj/YFfWR26OXqXedImkhe2ocnbumTA=", + "lastModified": 1762915112, + "narHash": "sha256-d9j1g8nKmYDHy+/bIOPQTh9IwjRliqaTM0QLHMV92Ic=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "d2bac276ac7e669a1f09c48614538a37e3eb6d0f", + "rev": "aa1e85921cfa04de7b6914982a94621fbec5cc02", "type": "github" }, "original": { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 6ef0865182f1d5ef79cebdcbeacd52d5f71b72ae..59765d94abe9c04e6668203de31b598dd6b34dc7 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.90" +channel = "1.91.1" profile = "minimal" components = [ "rustfmt", "clippy" ] targets = [ From fd837b348f3f96eadde3939c1267811f189d2ca2 Mon Sep 17 00:00:00 2001 From: Lucas Parry Date: Thu, 13 Nov 2025 07:34:40 +1100 Subject: [PATCH 0066/1030] project_panel: Make natural sort ordering consistent with other apps (#41080) The existing sorting approach when faced with `Dir1`, `dir2`, `Dir3`, would only get as far as comparing the stems without numbers (`dir` and `Dir`), and then the lowercase-first tie breaker in that function would determine that `dir2` should come first, resulting in an undesirable order of `dir2`, `Dir1`, `Dir3`. This patch defers tie-breaking until it's determined that there's no other difference in the strings outside of case to order on, at which point we tie-break to provide a stable sort. Natural number sorting is still preserved, and mixing different cases alphabetically (as opposed to all lowercase alpha, followed by all uppercase alpha) is preserved. Closes #41080 Release Notes: - Fixed: ProjectPanel sorting bug Screenshots: Before | After ----|--- image | image I'm having trouble reasoning through what was previously going wrong with `docs` in the before screenshot, but it also seems to now appear alphabetically where you'd expect it with this patch --------- Co-authored-by: Smit Barmase --- crates/util/src/paths.rs | 78 ++++++++++++++++++++++++++++------------ 1 file changed, 56 insertions(+), 22 deletions(-) diff --git a/crates/util/src/paths.rs b/crates/util/src/paths.rs index 015711fa2a17a12f7e47e37c5bee3a6941a29691..d4081d2edc113b58795845680feb00f703622364 100644 --- a/crates/util/src/paths.rs +++ b/crates/util/src/paths.rs @@ -800,22 +800,6 @@ impl Default for PathMatcher { } } -/// Custom character comparison that prioritizes lowercase for same letters -fn compare_chars(a: char, b: char) -> Ordering { - // First compare case-insensitive - match a.to_ascii_lowercase().cmp(&b.to_ascii_lowercase()) { - Ordering::Equal => { - // If same letter, prioritize lowercase (lowercase < uppercase) - match (a.is_ascii_lowercase(), b.is_ascii_lowercase()) { - (true, false) => Ordering::Less, // lowercase comes first - (false, true) => Ordering::Greater, // uppercase comes after - _ => Ordering::Equal, // both same case or both non-ascii - } - } - other => other, - } -} - /// Compares two sequences of consecutive digits for natural sorting. /// /// This function is a core component of natural sorting that handles numeric comparison @@ -916,21 +900,25 @@ where /// * Numbers are compared by numeric value, not character by character /// * Leading zeros affect ordering when numeric values are equal /// * Can handle numbers larger than u128::MAX (falls back to string comparison) +/// * When strings are equal case-insensitively, lowercase is prioritized (lowercase < uppercase) /// /// # Algorithm /// /// The function works by: -/// 1. Processing strings character by character +/// 1. Processing strings character by character in a case-insensitive manner /// 2. When encountering digits, treating consecutive digits as a single number /// 3. Comparing numbers by their numeric value rather than lexicographically -/// 4. For non-numeric characters, using case-sensitive comparison with lowercase priority +/// 4. For non-numeric characters, using case-insensitive comparison +/// 5. If everything is equal case-insensitively, using case-sensitive comparison as final tie-breaker pub fn natural_sort(a: &str, b: &str) -> Ordering { let mut a_iter = a.chars().peekable(); let mut b_iter = b.chars().peekable(); loop { match (a_iter.peek(), b_iter.peek()) { - (None, None) => return Ordering::Equal, + (None, None) => { + return b.cmp(a); + } (None, _) => return Ordering::Less, (_, None) => return Ordering::Greater, (Some(&a_char), Some(&b_char)) => { @@ -940,7 +928,10 @@ pub fn natural_sort(a: &str, b: &str) -> Ordering { ordering => return ordering, } } else { - match compare_chars(a_char, b_char) { + match a_char + .to_ascii_lowercase() + .cmp(&b_char.to_ascii_lowercase()) + { Ordering::Equal => { a_iter.next(); b_iter.next(); @@ -952,6 +943,7 @@ pub fn natural_sort(a: &str, b: &str) -> Ordering { } } } + pub fn compare_rel_paths( (path_a, a_is_file): (&RelPath, bool), (path_b, b_is_file): (&RelPath, bool), @@ -1246,6 +1238,33 @@ mod tests { ); } + #[perf] + fn compare_paths_mixed_case_numeric_ordering() { + let mut entries = [ + (Path::new(".config"), false), + (Path::new("Dir1"), false), + (Path::new("dir01"), false), + (Path::new("dir2"), false), + (Path::new("Dir02"), false), + (Path::new("dir10"), false), + (Path::new("Dir10"), false), + ]; + + entries.sort_by(|&a, &b| compare_paths(a, b)); + + let ordered: Vec<&str> = entries + .iter() + .map(|(path, _)| path.to_str().unwrap()) + .collect(); + + assert_eq!( + ordered, + vec![ + ".config", "Dir1", "dir01", "dir2", "Dir02", "dir10", "Dir10" + ] + ); + } + #[perf] fn path_with_position_parse_posix_path() { // Test POSIX filename edge cases @@ -1917,10 +1936,25 @@ mod tests { ), Ordering::Less ); + } - // Mixed case with numbers - assert_eq!(natural_sort("File1", "file2"), Ordering::Greater); + #[perf] + fn test_natural_sort_case_sensitive() { + // Numerically smaller values come first. + assert_eq!(natural_sort("File1", "file2"), Ordering::Less); assert_eq!(natural_sort("file1", "File2"), Ordering::Less); + + // Numerically equal values: the case-insensitive comparison decides first. + // Case-sensitive comparison only occurs when both are equal case-insensitively. + assert_eq!(natural_sort("Dir1", "dir01"), Ordering::Less); + assert_eq!(natural_sort("dir2", "Dir02"), Ordering::Less); + assert_eq!(natural_sort("dir2", "dir02"), Ordering::Less); + + // Numerically equal and case-insensitively equal: + // the lexicographically smaller (case-sensitive) one wins. + assert_eq!(natural_sort("dir1", "Dir1"), Ordering::Less); + assert_eq!(natural_sort("dir02", "Dir02"), Ordering::Less); + assert_eq!(natural_sort("dir10", "Dir10"), Ordering::Less); } #[perf] From 8fd8b989a64e1d042f55359627eb83a6c3aef8db Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Wed, 12 Nov 2025 13:41:20 -0700 Subject: [PATCH 0067/1030] Use powershell for winget job steps (#42565) Co-Authored-By: Claude Release Notes: - N/A --- .github/workflows/after_release.yml | 14 +++++++------- .../xtask/src/tasks/workflows/after_release.rs | 16 ++++++++-------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/.github/workflows/after_release.yml b/.github/workflows/after_release.yml index cf77b429353e59697858434c3904a68b91ecc63a..2279b3647b646b30a7e7badfb235c2a41fe83ad2 100644 --- a/.github/workflows/after_release.yml +++ b/.github/workflows/after_release.yml @@ -56,14 +56,14 @@ jobs: - id: set-package-name name: after_release::publish_winget::set_package_name run: | - if [ "${{ github.event.release.prerelease }}" == "true" ]; then - PACKAGE_NAME=ZedIndustries.Zed.Preview - else - PACKAGE_NAME=ZedIndustries.Zed - fi + if ("${{ github.event.release.prerelease }}" -eq "true") { + $PACKAGE_NAME = "ZedIndustries.Zed.Preview" + } else { + $PACKAGE_NAME = "ZedIndustries.Zed" + } - echo "PACKAGE_NAME=$PACKAGE_NAME" >> "$GITHUB_OUTPUT" - shell: bash -euxo pipefail {0} + echo "PACKAGE_NAME=$PACKAGE_NAME" >> $env:GITHUB_OUTPUT + shell: pwsh - name: after_release::publish_winget::winget_releaser uses: vedantmgoyal9/winget-releaser@19e706d4c9121098010096f9c495a70a7518b30f with: diff --git a/tooling/xtask/src/tasks/workflows/after_release.rs b/tooling/xtask/src/tasks/workflows/after_release.rs index 0d45210a4b24df3d50f775cd4bd07f0b55a8a270..bdf4afedb2c5a8a92fed5cb19e2c3e2469f97a65 100644 --- a/tooling/xtask/src/tasks/workflows/after_release.rs +++ b/tooling/xtask/src/tasks/workflows/after_release.rs @@ -98,14 +98,14 @@ fn post_to_discord(deps: &[&NamedJob]) -> NamedJob { fn publish_winget() -> NamedJob { fn set_package_name() -> (Step, StepOutput) { - let step = named::bash(indoc::indoc! {r#" - if [ "${{ github.event.release.prerelease }}" == "true" ]; then - PACKAGE_NAME=ZedIndustries.Zed.Preview - else - PACKAGE_NAME=ZedIndustries.Zed - fi - - echo "PACKAGE_NAME=$PACKAGE_NAME" >> "$GITHUB_OUTPUT" + let step = named::pwsh(indoc::indoc! {r#" + if ("${{ github.event.release.prerelease }}" -eq "true") { + $PACKAGE_NAME = "ZedIndustries.Zed.Preview" + } else { + $PACKAGE_NAME = "ZedIndustries.Zed" + } + + echo "PACKAGE_NAME=$PACKAGE_NAME" >> $env:GITHUB_OUTPUT "#}) .id("set-package-name"); From 0c1ca2a45a2d8136c5944d7833eb114d9b30aad5 Mon Sep 17 00:00:00 2001 From: Andrew Farkas <6060305+HactarCE@users.noreply.github.com> Date: Wed, 12 Nov 2025 16:08:41 -0500 Subject: [PATCH 0068/1030] Improve `pane: reopen closed item` to not reopen closed tabs (#42568) Closes #42134 Release Notes: - Improved `pane: reopen closed item` to not reopen closed tabs. --- crates/workspace/src/pane.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index 3965aeb4211cf8834bbcf4e52709f2abf09343fd..1f950460299443187457275185d3a28763b11166 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -4116,6 +4116,7 @@ impl NavHistory { is_preview, }); } + NavigationMode::ClosingItem if is_preview => return, NavigationMode::ClosingItem => { if state.closed_stack.len() >= MAX_NAVIGATION_HISTORY_LEN { state.closed_stack.pop_front(); From f2a1eb99632ebe9763cf550c61ac4428f526496c Mon Sep 17 00:00:00 2001 From: Michael Sloan Date: Wed, 12 Nov 2025 14:58:12 -0700 Subject: [PATCH 0069/1030] Make check-licenses script check that AGPL crates are not included in release binaries (#42571) See discussion in #24657. Recalled that I had a stashed change for this, so polished it up Release Notes: - N/A --- script/check-licenses | 27 +++++++++++++++++++++++---- script/licenses/zed-licenses.toml | 11 +++++++++++ 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/script/check-licenses b/script/check-licenses index a58806f37973162c0632fa7854409a9cc9041f01..0363f31970f936e26c61f734bf88ed5f60dc0393 100755 --- a/script/check-licenses +++ b/script/check-licenses @@ -2,14 +2,16 @@ set -euo pipefail +AGPL_CRATES=("collab") +RELEASE_CRATES=("cli" "remote_server" "zed") + check_license () { local dir="$1" local allowed_licenses=() - local agpl_crates=("crates/collab") local is_agpl=false - for agpl_crate in "${agpl_crates[@]}"; do - if [[ "$dir" == "$agpl_crate" ]]; then + for agpl_crate in "${AGPL_CRATES[@]}"; do + if [[ "$dir" == "crates/$agpl_crate" ]]; then is_agpl=true break fi @@ -30,7 +32,7 @@ check_license () { fi done - if [[ "$dir" == "crates/collab" ]]; then + if [[ "$is_agpl" == true ]]; then echo "Error: $dir does not contain a LICENSE-AGPL symlink" else echo "Error: $dir does not contain a LICENSE-GPL or LICENSE-APACHE symlink" @@ -41,3 +43,20 @@ check_license () { git ls-files "**/*/Cargo.toml" | while read -r cargo_toml; do check_license "$(dirname "$cargo_toml")" done + + +# Make sure the AGPL server crates are included in the release tarball. +for release_crate in "${RELEASE_CRATES[@]}"; do + tree_output=$(cargo tree --package "$release_crate") + for agpl_crate in "${AGPL_CRATES[@]}"; do + # Look for lines that contain the crate name followed by " v" (version) + # This matches patterns like "├── collab v0.44.0" + if echo "$tree_output" | grep -E "(^|[^a-zA-Z_])${agpl_crate} v" > /dev/null; then + echo "Error: crate '${agpl_crate}' is AGPL and is a dependency of crate '${release_crate}'." >&2 + echo "AGPL licensed code should not be used in the release distribution, only in servers." >&2 + exit 1 + fi + done +done + +echo "check-licenses succeeded" diff --git a/script/licenses/zed-licenses.toml b/script/licenses/zed-licenses.toml index 4f7281a050863b26c6e012acbf116cecadcb4269..572dd5c14aebcdea3544ac15b751be4c212ecf52 100644 --- a/script/licenses/zed-licenses.toml +++ b/script/licenses/zed-licenses.toml @@ -1,5 +1,16 @@ no-clearly-defined = true private = { ignore = true } +# Licenses allowed in Zed's dependencies. AGPL should not be added to +# this list as use of AGPL software is sometimes disallowed. When +# adding to this list, please check the following open source license +# policies: +# +# * https://opensource.google/documentation/reference/thirdparty/licenses +# +# The Zed project does have AGPL crates, but these are only involved +# in servers and are not built into the binaries in the release +# tarball. `script/check-licenses` checks that AGPL crates are not +# involved in release binaries. accepted = [ "Apache-2.0", "MIT", From b0700a46250ea12d1855d63e4cc04b65db3948ab Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Wed, 12 Nov 2025 18:58:22 -0300 Subject: [PATCH 0070/1030] zeta eval: `--repeat` flag (#42569) Adds a `--repeat` flag to the zeta eval that runs each example as many times as specified. Also makes the output nicer in a few ways. Release Notes: - N/A --------- Co-authored-by: Ben Kunkle Co-authored-by: Michael --- crates/zeta2/src/xml_edits.rs | 4 +- crates/zeta2/src/zeta2.rs | 31 +++-- crates/zeta_cli/src/evaluate.rs | 195 ++++++++++++++++++++++++-------- crates/zeta_cli/src/example.rs | 84 +++++++++++++- crates/zeta_cli/src/main.rs | 26 +++-- crates/zeta_cli/src/paths.rs | 29 +++-- crates/zeta_cli/src/predict.rs | 122 +++++++++----------- 7 files changed, 344 insertions(+), 147 deletions(-) diff --git a/crates/zeta2/src/xml_edits.rs b/crates/zeta2/src/xml_edits.rs index 97087ec65e06a1a2f418ca0c4ebba41a19b1af84..d1eea285d6861dc4cbe6fe65a133453d5b06adaf 100644 --- a/crates/zeta2/src/xml_edits.rs +++ b/crates/zeta2/src/xml_edits.rs @@ -79,7 +79,7 @@ fn resolve_new_text_old_text_in_buffer( } } offset.ok_or_else(|| { - #[cfg(debug_assertions)] + #[cfg(any(debug_assertions, feature = "eval-support"))] if let Some(closest_match) = closest_old_text_match(buffer, old_text) { log::info!( "Closest `old_text` match: {}", @@ -102,7 +102,7 @@ fn resolve_new_text_old_text_in_buffer( })) } -#[cfg(debug_assertions)] +#[cfg(any(debug_assertions, feature = "eval-support"))] fn closest_old_text_match(buffer: &TextBufferSnapshot, old_text: &str) -> Option { let buffer_text = buffer.text(); let len = old_text.len(); diff --git a/crates/zeta2/src/zeta2.rs b/crates/zeta2/src/zeta2.rs index d7bff2b51a69a031d2f24b0b357b9748dd5a473b..b32e902b71a1b4a20e5f935eea854ecf115ae0f1 100644 --- a/crates/zeta2/src/zeta2.rs +++ b/crates/zeta2/src/zeta2.rs @@ -208,7 +208,7 @@ pub struct ZetaSearchQueryDebugInfo { pub type RequestDebugInfo = predict_edits_v3::DebugInfo; struct ZetaProject { - syntax_index: Entity, + syntax_index: Option>, events: VecDeque, registered_buffers: HashMap, current_prediction: Option, @@ -445,9 +445,13 @@ impl Zeta { self.projects .entry(project.entity_id()) .or_insert_with(|| ZetaProject { - syntax_index: cx.new(|cx| { - SyntaxIndex::new(project, self.options.file_indexing_parallelism, cx) - }), + syntax_index: if let ContextMode::Syntax(_) = &self.options.context { + Some(cx.new(|cx| { + SyntaxIndex::new(project, self.options.file_indexing_parallelism, cx) + })) + } else { + None + }, events: VecDeque::new(), registered_buffers: HashMap::default(), current_prediction: None, @@ -685,10 +689,11 @@ impl Zeta { ) -> Task>> { let project_state = self.projects.get(&project.entity_id()); - let index_state = project_state.map(|state| { + let index_state = project_state.and_then(|state| { state .syntax_index - .read_with(cx, |index, _cx| index.state().clone()) + .as_ref() + .map(|syntax_index| syntax_index.read_with(cx, |index, _cx| index.state().clone())) }); let options = self.options.clone(); let active_snapshot = active_buffer.read(cx).snapshot(); @@ -1555,10 +1560,11 @@ impl Zeta { ) -> Task> { let project_state = self.projects.get(&project.entity_id()); - let index_state = project_state.map(|state| { + let index_state = project_state.and_then(|state| { state .syntax_index - .read_with(cx, |index, _cx| index.state().clone()) + .as_ref() + .map(|index| index.read_with(cx, |index, _cx| index.state().clone())) }); let options = self.options.clone(); let snapshot = buffer.read(cx).snapshot(); @@ -1628,10 +1634,11 @@ impl Zeta { cx: &mut App, ) -> Task> { let zeta_project = self.get_or_init_zeta_project(project, cx); - zeta_project - .syntax_index - .read(cx) - .wait_for_initial_file_indexing(cx) + if let Some(syntax_index) = &zeta_project.syntax_index { + syntax_index.read(cx).wait_for_initial_file_indexing(cx) + } else { + Task::ready(Ok(())) + } } } diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index 0359ccf0fea3179dd480645ad7031b61fc3a357c..6644ecbb5a72b4d7218a9d33bcc1f9f602c3f65d 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -1,14 +1,16 @@ use std::{ - io::IsTerminal, - path::{Path, PathBuf}, + io::{IsTerminal, Write}, + path::PathBuf, sync::Arc, }; use anyhow::Result; use clap::Args; use collections::HashSet; -use gpui::AsyncApp; -use zeta2::udiff::DiffLine; +use gpui::{AsyncApp, Entity}; +use project::Project; +use util::ResultExt as _; +use zeta2::{Zeta, udiff::DiffLine}; use crate::{ PromptFormat, @@ -27,6 +29,8 @@ pub struct EvaluateArguments { use_expected_context: bool, #[clap(long, value_enum, default_value_t = CacheMode::default())] cache: CacheMode, + #[clap(short, long, default_value_t = 1, alias = "repeat")] + repetitions: u16, } pub async fn run_evaluate( @@ -34,75 +38,169 @@ pub async fn run_evaluate( app_state: &Arc, cx: &mut AsyncApp, ) { - let example_len = args.example_paths.len(); + if args.example_paths.is_empty() { + eprintln!("No examples provided"); + return; + } let all_tasks = args.example_paths.into_iter().map(|path| { let app_state = app_state.clone(); + let example = NamedExample::load(&path).unwrap(); + cx.spawn(async move |cx| { - run_evaluate_one( - &path, - args.prompt_format, - args.use_expected_context, - args.cache, - app_state.clone(), - cx, - ) - .await + let (project, zetas, _edited_buffers) = example + .setup_project(&app_state, args.repetitions, cx) + .await + .unwrap(); + + let tasks = zetas.into_iter().enumerate().map(|(repetition_ix, zeta)| { + let repetition_ix = (args.repetitions > 1).then(|| repetition_ix as u16); + + let example = example.clone(); + let project = project.clone(); + + cx.spawn(async move |cx| { + let name = example.name.clone(); + run_evaluate_one( + example, + repetition_ix, + project, + zeta, + args.prompt_format, + args.use_expected_context, + args.cache, + cx, + ) + .await + .map_err(|err| (err, name, repetition_ix)) + }) + }); + futures::future::join_all(tasks).await }) }); - let all_results = futures::future::try_join_all(all_tasks).await; - - if let Ok(all_results) = &all_results { - let aggregated_result = EvaluationResult { - context: Scores::aggregate(all_results.iter().map(|r| &r.context)), - edit_prediction: Scores::aggregate(all_results.iter().map(|r| &r.edit_prediction)), - }; - - if example_len > 1 { - println!("\n{}", "-".repeat(80)); - println!("\n## TOTAL SCORES"); - println!("{}", aggregated_result.to_markdown()); + let all_results = futures::future::join_all(all_tasks).await; + + write_aggregated_scores(&mut std::io::stdout(), &all_results).unwrap(); + if let Some(mut output_file) = + std::fs::File::create(crate::paths::RUN_DIR.join("aggregated_results.md")).log_err() + { + write_aggregated_scores(&mut output_file, &all_results).log_err(); + }; + print_run_data_dir(args.repetitions == 1); +} + +fn write_aggregated_scores( + w: &mut impl std::io::Write, + all_results: &Vec)>>>, +) -> Result<()> { + let mut successful = Vec::new(); + let mut failed_count = 0; + writeln!(w, "## Errors\n")?; + for result in all_results.iter().flatten() { + match result { + Ok(eval_result) => successful.push(eval_result), + Err((err, name, repetition_ix)) => { + failed_count += 1; + let err = err + .to_string() + .replace("", "\n```"); + writeln!( + w, + "### ERROR {name}{}\n\n{err}\n", + repetition_ix + .map(|ix| format!(" [RUN {ix:03}]")) + .unwrap_or_default() + )?; + } } } - - print_run_data_dir(); - - all_results.unwrap(); + let aggregated_result = EvaluationResult { + context: Scores::aggregate(successful.iter().map(|r| &r.context)), + edit_prediction: Scores::aggregate(successful.iter().map(|r| &r.edit_prediction)), + }; + + writeln!(w, "\n{}", "-".repeat(80))?; + writeln!(w, "\n## TOTAL SCORES")?; + writeln!(w, "\n### Success Rate")?; + writeln!( + w, + "\nCongratulations! {}/{} ({:.2}%) of runs weren't outright failures 🎉", + successful.len(), + successful.len() + failed_count, + (successful.len() as f64 / (successful.len() + failed_count) as f64) * 100.0 + )?; + writeln!(w, "{}", aggregated_result)?; + + Ok(()) } pub async fn run_evaluate_one( - example_path: &Path, + example: NamedExample, + repetition_ix: Option, + project: Entity, + zeta: Entity, prompt_format: PromptFormat, use_expected_context: bool, cache_mode: CacheMode, - app_state: Arc, cx: &mut AsyncApp, ) -> Result { - let example = NamedExample::load(&example_path).unwrap(); - let predictions = zeta2_predict( + let predict_result = zeta2_predict( example.clone(), + project, + zeta, + repetition_ix, prompt_format, use_expected_context, cache_mode, - &app_state, cx, ) - .await - .unwrap(); + .await?; + + let evaluation_result = evaluate(&example.example, &predict_result); + + if repetition_ix.is_none() { + write_eval_result( + &example, + &predict_result, + &evaluation_result, + &mut std::io::stdout(), + )?; + } - let evaluation_result = evaluate(&example.example, &predictions); + if let Some(mut results_file) = + std::fs::File::create(predict_result.run_example_dir.join("results.md")).log_err() + { + write_eval_result( + &example, + &predict_result, + &evaluation_result, + &mut results_file, + ) + .log_err(); + } + + anyhow::Ok(evaluation_result) +} - println!( +fn write_eval_result( + example: &NamedExample, + predictions: &PredictionDetails, + evaluation_result: &EvaluationResult, + out: &mut impl Write, +) -> Result<()> { + writeln!( + out, "## Expected edit prediction:\n\n```diff\n{}\n```\n", compare_diffs(&example.example.expected_patch, &predictions.diff) - ); - println!( + )?; + writeln!( + out, "## Actual edit prediction:\n\n```diff\n{}\n```\n", compare_diffs(&predictions.diff, &example.example.expected_patch) - ); - - println!("{}", evaluation_result.to_markdown()); + )?; + writeln!(out, "{}", evaluation_result)?; - anyhow::Ok(evaluation_result) + anyhow::Ok(()) } #[derive(Debug, Default)] @@ -194,9 +292,10 @@ False Negatives : {}", } } -impl EvaluationResult { - pub fn to_markdown(&self) -> String { - format!( +impl std::fmt::Display for EvaluationResult { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, r#" ### Context Scores {} diff --git a/crates/zeta_cli/src/example.rs b/crates/zeta_cli/src/example.rs index 3e55fb0b62e0191fa5abf1014a71bc7f613fc0c9..5732e1efcb0e05f5a4c3122130ee40cad5a1d4f6 100644 --- a/crates/zeta_cli/src/example.rs +++ b/crates/zeta_cli/src/example.rs @@ -6,9 +6,10 @@ use std::{ io::Write, mem, path::{Path, PathBuf}, - sync::Arc, + sync::{Arc, OnceLock}, }; +use crate::headless::ZetaCliAppState; use anyhow::{Context as _, Result, anyhow}; use clap::ValueEnum; use cloud_zeta2_prompt::CURSOR_MARKER; @@ -18,13 +19,14 @@ use futures::{ AsyncWriteExt as _, lock::{Mutex, OwnedMutexGuard}, }; -use gpui::{AsyncApp, Entity, http_client::Url}; +use futures::{FutureExt as _, future::Shared}; +use gpui::{AppContext as _, AsyncApp, Entity, Task, http_client::Url}; use language::{Anchor, Buffer}; use project::{Project, ProjectPath}; use pulldown_cmark::CowStr; use serde::{Deserialize, Serialize}; use util::{paths::PathStyle, rel_path::RelPath}; -use zeta2::udiff::OpenedBuffers; +use zeta2::{Zeta, udiff::OpenedBuffers}; use crate::paths::{REPOS_DIR, WORKTREES_DIR}; @@ -311,6 +313,82 @@ impl NamedExample { } } + pub async fn setup_project<'a>( + &'a self, + app_state: &Arc, + repetitions: u16, + cx: &mut AsyncApp, + ) -> Result<(Entity, Vec>, OpenedBuffers<'a>)> { + let worktree_path = self.setup_worktree().await?; + + static AUTHENTICATED: OnceLock>> = OnceLock::new(); + + AUTHENTICATED + .get_or_init(|| { + let client = app_state.client.clone(); + cx.spawn(async move |cx| { + client + .sign_in_with_optional_connect(true, cx) + .await + .unwrap(); + }) + .shared() + }) + .clone() + .await; + + let project = cx.update(|cx| { + Project::local( + app_state.client.clone(), + app_state.node_runtime.clone(), + app_state.user_store.clone(), + app_state.languages.clone(), + app_state.fs.clone(), + None, + cx, + ) + })?; + + let worktree = project + .update(cx, |project, cx| { + project.create_worktree(&worktree_path, true, cx) + })? + .await?; + worktree + .read_with(cx, |worktree, _cx| { + worktree.as_local().unwrap().scan_complete() + })? + .await; + + let buffer_store = project.read_with(cx, |project, _| project.buffer_store().clone())?; + + let zetas = (0..repetitions) + .map(|_| { + let zeta = cx.new(|cx| { + zeta2::Zeta::new(app_state.client.clone(), app_state.user_store.clone(), cx) + })?; + + cx.subscribe(&buffer_store, { + let project = project.clone(); + let zeta = zeta.clone(); + move |_, event, cx| match event { + project::buffer_store::BufferStoreEvent::BufferAdded(buffer) => { + zeta.update(cx, |zeta, cx| zeta.register_buffer(&buffer, &project, cx)); + } + _ => {} + } + })? + .detach(); + + anyhow::Ok(zeta) + }) + .collect::>>()?; + + let edited_buffers = self.apply_edit_history(&project, cx).await?; + + anyhow::Ok((project, zetas, edited_buffers)) + } + pub async fn setup_worktree(&self) -> Result { let (repo_owner, repo_name) = self.repo_name()?; let file_name = self.file_name(); diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index 1dd246e612979e7a4a77c74926be1a5cab72dbc6..f75b4a7e25020395f24d2638af88d4ba8b390e77 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -35,8 +35,10 @@ use crate::util::{open_buffer, open_buffer_with_language_server}; #[derive(Parser, Debug)] #[command(name = "zeta")] struct ZetaCliArgs { + #[arg(long, default_value_t = false)] + printenv: bool, #[command(subcommand)] - command: Command, + command: Option, } #[derive(Subcommand, Debug)] @@ -413,14 +415,22 @@ fn main() { let app_state = Arc::new(headless::init(cx)); cx.spawn(async move |cx| { match args.command { - Command::Zeta1 { + None => { + if args.printenv { + ::util::shell_env::print_env(); + return; + } else { + panic!("Expected a command"); + } + } + Some(Command::Zeta1 { command: Zeta1Command::Context { context_args }, - } => { + }) => { let context = zeta1_context(context_args, &app_state, cx).await.unwrap(); let result = serde_json::to_string_pretty(&context.body).unwrap(); println!("{}", result); } - Command::Zeta2 { command } => match command { + Some(Command::Zeta2 { command }) => match command { Zeta2Command::Predict(arguments) => { run_zeta2_predict(arguments, &app_state, cx).await; } @@ -464,14 +474,16 @@ fn main() { println!("{}", result.unwrap()); } }, - Command::ConvertExample { + Some(Command::ConvertExample { path, output_format, - } => { + }) => { let example = NamedExample::load(path).unwrap(); example.write(output_format, io::stdout()).unwrap(); } - Command::Clean => std::fs::remove_dir_all(&*crate::paths::TARGET_ZETA_DIR).unwrap(), + Some(Command::Clean) => { + std::fs::remove_dir_all(&*crate::paths::TARGET_ZETA_DIR).unwrap() + } }; let _ = cx.update(|cx| cx.quit()); diff --git a/crates/zeta_cli/src/paths.rs b/crates/zeta_cli/src/paths.rs index 73d541c6a0409deab5baac1714feded986fb94c1..15c4941f3dacce0b9a06c15daee431014b12944d 100644 --- a/crates/zeta_cli/src/paths.rs +++ b/crates/zeta_cli/src/paths.rs @@ -13,28 +13,41 @@ pub static RUN_DIR: LazyLock = LazyLock::new(|| { pub static LATEST_EXAMPLE_RUN_DIR: LazyLock = LazyLock::new(|| TARGET_ZETA_DIR.join("latest")); -pub fn print_run_data_dir() { +pub fn print_run_data_dir(deep: bool) { println!("\n## Run Data\n"); + let mut files = Vec::new(); let current_dir = std::env::current_dir().unwrap(); for file in std::fs::read_dir(&*RUN_DIR).unwrap() { let file = file.unwrap(); - if file.file_type().unwrap().is_dir() { + if file.file_type().unwrap().is_dir() && deep { for file in std::fs::read_dir(file.path()).unwrap() { let path = file.unwrap().path(); let path = path.strip_prefix(¤t_dir).unwrap_or(&path); - println!( + files.push(format!( "- {}/\x1b[34m{}\x1b[0m", path.parent().unwrap().display(), path.file_name().unwrap().display(), - ); + )); } } else { let path = file.path(); - println!( - "- {} ", - path.strip_prefix(¤t_dir).unwrap_or(&path).display() - ); + let path = path.strip_prefix(¤t_dir).unwrap_or(&path); + files.push(format!( + "- {}/\x1b[34m{}\x1b[0m", + path.parent().unwrap().display(), + path.file_name().unwrap().display(), + )); } } + files.sort(); + + for file in files { + println!("{}", file); + } + + println!( + "\n💡 Tip of the day: {} always points to the latest run\n", + LATEST_EXAMPLE_RUN_DIR.display() + ); } diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index 82108df076c025089f5e374f447a3136fdb0c563..0cfc7421547b1b00bc552f157ae22b2a8afad541 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -12,7 +12,6 @@ use gpui::{AppContext, AsyncApp, Entity}; use language::{Anchor, Buffer, Point}; use project::Project; use serde::Deserialize; -use std::cell::Cell; use std::fs; use std::io::Write; use std::ops::Range; @@ -20,7 +19,7 @@ use std::path::PathBuf; use std::sync::Arc; use std::sync::Mutex; use std::time::{Duration, Instant}; -use zeta2::{EvalCache, EvalCacheEntryKind, EvalCacheKey}; +use zeta2::{EvalCache, EvalCacheEntryKind, EvalCacheKey, Zeta}; #[derive(Debug, Args)] pub struct PredictArguments { @@ -35,10 +34,12 @@ pub struct PredictArguments { cache: CacheMode, } -#[derive(Debug, ValueEnum, Default, Clone, Copy)] +#[derive(Debug, ValueEnum, Default, Clone, Copy, PartialEq)] pub enum CacheMode { - /// Use cached LLM requests and responses, based on the hash of the prompt and the endpoint. + /// Use cached LLM requests and responses, except when multiple repetitions are requested #[default] + Auto, + /// Use cached LLM requests and responses, based on the hash of the prompt and the endpoint. #[value(alias = "request")] Requests, /// Ignore existing cache entries for both LLM and search. @@ -50,12 +51,22 @@ pub enum CacheMode { impl CacheMode { fn use_cached_llm_responses(&self) -> bool { + self.assert_not_auto(); matches!(self, CacheMode::Requests | CacheMode::Force) } fn use_cached_search_results(&self) -> bool { + self.assert_not_auto(); matches!(self, CacheMode::Force) } + + fn assert_not_auto(&self) { + assert_ne!( + *self, + CacheMode::Auto, + "Cache mode should not be auto at this point!" + ); + } } #[derive(clap::ValueEnum, Debug, Clone)] @@ -71,72 +82,49 @@ pub async fn run_zeta2_predict( cx: &mut AsyncApp, ) { let example = NamedExample::load(args.example_path).unwrap(); + let (project, mut zetas, _edited_buffers) = + example.setup_project(app_state, 1, cx).await.unwrap(); let result = zeta2_predict( example, + project, + zetas.remove(0), + None, args.prompt_format, args.use_expected_context, args.cache, - &app_state, cx, ) .await .unwrap(); result.write(args.format, std::io::stdout()).unwrap(); - print_run_data_dir(); -} - -thread_local! { - static AUTHENTICATED: Cell = const { Cell::new(false) }; + print_run_data_dir(true); } pub async fn zeta2_predict( example: NamedExample, + project: Entity, + zeta: Entity, + repetition_ix: Option, prompt_format: PromptFormat, use_expected_context: bool, - cache_mode: CacheMode, - app_state: &Arc, + mut cache_mode: CacheMode, cx: &mut AsyncApp, ) -> Result { - let worktree_path = example.setup_worktree().await?; - - if !AUTHENTICATED.get() { - AUTHENTICATED.set(true); - - app_state - .client - .sign_in_with_optional_connect(true, cx) - .await?; + if repetition_ix.is_some() { + if cache_mode != CacheMode::Auto && cache_mode != CacheMode::Skip { + panic!("Repetitions are not supported in Auto cache mode"); + } else { + cache_mode = CacheMode::Skip; + } + } else if cache_mode == CacheMode::Auto { + cache_mode = CacheMode::Requests; } - let project = cx.update(|cx| { - Project::local( - app_state.client.clone(), - app_state.node_runtime.clone(), - app_state.user_store.clone(), - app_state.languages.clone(), - app_state.fs.clone(), - None, - cx, - ) - })?; - - let buffer_store = project.read_with(cx, |project, _| project.buffer_store().clone())?; - - let worktree = project - .update(cx, |project, cx| { - project.create_worktree(&worktree_path, true, cx) - })? - .await?; - worktree - .read_with(cx, |worktree, _cx| { - worktree.as_local().unwrap().scan_complete() - })? - .await; - - let zeta = cx.update(|cx| zeta2::Zeta::global(&app_state.client, &app_state.user_store, cx))?; - - let example_run_dir = RUN_DIR.join(&example.file_name()); + let mut example_run_dir = RUN_DIR.join(&example.file_name()); + if let Some(repetition_ix) = repetition_ix { + example_run_dir = example_run_dir.join(format!("{:03}", repetition_ix)); + } fs::create_dir_all(&example_run_dir)?; if LATEST_EXAMPLE_RUN_DIR.exists() { fs::remove_file(&*LATEST_EXAMPLE_RUN_DIR)?; @@ -157,23 +145,9 @@ pub async fn zeta2_predict( })); })?; - cx.subscribe(&buffer_store, { - let project = project.clone(); - move |_, event, cx| match event { - project::buffer_store::BufferStoreEvent::BufferAdded(buffer) => { - zeta2::Zeta::try_global(cx) - .unwrap() - .update(cx, |zeta, cx| zeta.register_buffer(&buffer, &project, cx)); - } - _ => {} - } - })? - .detach(); - - let _edited_buffers = example.apply_edit_history(&project, cx).await?; let (cursor_buffer, cursor_anchor) = example.cursor_position(&project, cx).await?; - let result = Arc::new(Mutex::new(PredictionDetails::default())); + let result = Arc::new(Mutex::new(PredictionDetails::new(example_run_dir.clone()))); let mut debug_rx = zeta.update(cx, |zeta, _| zeta.debug_info())?; let debug_task = cx.background_spawn({ @@ -397,7 +371,7 @@ impl EvalCache for RunCache { self.link_to_run(&key); Some(fs::read_to_string(path).unwrap()) } else { - log::info!("Skipping cached entry: {}", path.display()); + log::trace!("Skipping cached entry: {}", path.display()); None } } else if matches!(self.cache_mode, CacheMode::Force) { @@ -417,14 +391,14 @@ impl EvalCache for RunCache { fs::write(&input_path, input).unwrap(); let output_path = RunCache::output_cache_path(&key); - log::info!("Writing cache entry: {}", output_path.display()); + log::trace!("Writing cache entry: {}", output_path.display()); fs::write(&output_path, output).unwrap(); self.link_to_run(&key); } } -#[derive(Clone, Debug, Default, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] pub struct PredictionDetails { pub diff: String, pub excerpts: Vec, @@ -433,9 +407,23 @@ pub struct PredictionDetails { pub running_search_time: Option, pub prediction_time: Duration, pub total_time: Duration, + pub run_example_dir: PathBuf, } impl PredictionDetails { + pub fn new(run_example_dir: PathBuf) -> Self { + Self { + diff: Default::default(), + excerpts: Default::default(), + excerpts_text: Default::default(), + planning_search_time: Default::default(), + running_search_time: Default::default(), + prediction_time: Default::default(), + total_time: Default::default(), + run_example_dir, + } + } + pub fn write(&self, format: PredictionsOutputFormat, mut out: impl Write) -> Result<()> { let formatted = match format { PredictionsOutputFormat::Md => self.to_markdown(), From ede3b1dae686482c352916aadbd2b8eba4f7ad9d Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Wed, 12 Nov 2025 17:04:38 -0500 Subject: [PATCH 0071/1030] Allow running concurrent unit evals (#42578) Right now only one unit eval GitHub Action can be run at a time. This permits them to run concurrently. Release Notes: - N/A --- .github/workflows/run_unit_evals.yml | 2 +- tooling/xtask/src/tasks/workflows/run_agent_evals.rs | 2 +- tooling/xtask/src/tasks/workflows/vars.rs | 6 ++++++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/run_unit_evals.yml b/.github/workflows/run_unit_evals.yml index d4d4529a30e1abfc4be06e7a39640d09f6b06a63..8f64a5c8bcfd07d56279438795f817bfaa1e2e28 100644 --- a/.github/workflows/run_unit_evals.yml +++ b/.github/workflows/run_unit_evals.yml @@ -65,5 +65,5 @@ jobs: rm -rf ./../.cargo shell: bash -euxo pipefail {0} concurrency: - group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }} + group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.run_id }} cancel-in-progress: true diff --git a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs index fd9f696dc9537ec21388dc2a287eb52b6334ce70..ec9b70a2db9049b62676b43d614818374e0930a1 100644 --- a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs +++ b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs @@ -41,7 +41,7 @@ pub(crate) fn run_unit_evals() -> Workflow { .add_input(model_name.name, model_name.input()) .add_input(commit_sha.name, commit_sha.input()), )) - .concurrency(vars::one_workflow_per_non_main_branch()) + .concurrency(vars::allow_concurrent_runs()) .add_env(("CARGO_TERM_COLOR", "always")) .add_env(("CARGO_INCREMENTAL", 0)) .add_env(("RUST_BACKTRACE", 1)) diff --git a/tooling/xtask/src/tasks/workflows/vars.rs b/tooling/xtask/src/tasks/workflows/vars.rs index 6493bd0ab62699454af6c100d526143c54f8774f..e5cc3bc222fab974412f294439648c0d23da50eb 100644 --- a/tooling/xtask/src/tasks/workflows/vars.rs +++ b/tooling/xtask/src/tasks/workflows/vars.rs @@ -81,6 +81,12 @@ pub(crate) fn one_workflow_per_non_main_branch() -> Concurrency { .cancel_in_progress(true) } +pub(crate) fn allow_concurrent_runs() -> Concurrency { + Concurrency::default() + .group("${{ github.workflow }}-${{ github.ref_name }}-${{ github.run_id }}") + .cancel_in_progress(true) +} + // Represents a pattern to check for changed files and corresponding output variable pub(crate) struct PathCondition { pub name: &'static str, From c9e231043ac6a597cb7e5210ad6fafb25aeb9048 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 12 Nov 2025 16:41:04 -0800 Subject: [PATCH 0072/1030] Report discarded zeta predictions and indicate whether they were shown (#42403) Release Notes: - N/A --------- Co-authored-by: Michael Sloan Co-authored-by: Ben Kunkle Co-authored-by: Agus Zubiaga --- .../cloud_llm_client/src/cloud_llm_client.rs | 14 ++ crates/edit_prediction/src/edit_prediction.rs | 6 + crates/editor/src/editor.rs | 7 + crates/zeta/src/zeta.rs | 135 +++++++++++++++++- 4 files changed, 158 insertions(+), 4 deletions(-) diff --git a/crates/cloud_llm_client/src/cloud_llm_client.rs b/crates/cloud_llm_client/src/cloud_llm_client.rs index ff8275fe40eae6945691a7b8d315414617be0235..241e760887cdf0c4455f6769c79a813de0626028 100644 --- a/crates/cloud_llm_client/src/cloud_llm_client.rs +++ b/crates/cloud_llm_client/src/cloud_llm_client.rs @@ -58,6 +58,9 @@ pub const SERVER_SUPPORTS_STATUS_MESSAGES_HEADER_NAME: &str = /// The name of the header used by the client to indicate that it supports receiving xAI models. pub const CLIENT_SUPPORTS_X_AI_HEADER_NAME: &str = "x-zed-client-supports-x-ai"; +/// The maximum number of edit predictions that can be rejected per request. +pub const MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST: usize = 100; + #[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub enum UsageLimit { @@ -192,6 +195,17 @@ pub struct AcceptEditPredictionBody { pub request_id: String, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RejectEditPredictionsBody { + pub rejections: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct EditPredictionRejection { + pub request_id: String, + pub was_shown: bool, +} + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub enum CompletionMode { diff --git a/crates/edit_prediction/src/edit_prediction.rs b/crates/edit_prediction/src/edit_prediction.rs index c9bb0672a0c9cb7c56c3c703b0e10594d56cc0c1..aebfa5e5229ef1fec50f2d9cf74e354878ddc1c5 100644 --- a/crates/edit_prediction/src/edit_prediction.rs +++ b/crates/edit_prediction/src/edit_prediction.rs @@ -104,6 +104,7 @@ pub trait EditPredictionProvider: 'static + Sized { ); fn accept(&mut self, cx: &mut Context); fn discard(&mut self, cx: &mut Context); + fn did_show(&mut self, _cx: &mut Context) {} fn suggest( &mut self, buffer: &Entity, @@ -142,6 +143,7 @@ pub trait EditPredictionProviderHandle { direction: Direction, cx: &mut App, ); + fn did_show(&self, cx: &mut App); fn accept(&self, cx: &mut App); fn discard(&self, cx: &mut App); fn suggest( @@ -233,6 +235,10 @@ where self.update(cx, |this, cx| this.discard(cx)) } + fn did_show(&self, cx: &mut App) { + self.update(cx, |this, cx| this.did_show(cx)) + } + fn suggest( &self, buffer: &Entity, diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 2da80a405a0db357712039f06d10c9e6b33e05c8..057d0b223bb43b41c863316010648005b3675119 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -7865,6 +7865,10 @@ impl Editor { self.edit_prediction_preview, EditPredictionPreview::Inactive { .. } ) { + if let Some(provider) = self.edit_prediction_provider.as_ref() { + provider.provider.did_show(cx) + } + self.edit_prediction_preview = EditPredictionPreview::Active { previous_scroll_position: None, since: Instant::now(), @@ -8044,6 +8048,9 @@ impl Editor { && !self.edit_predictions_hidden_for_vim_mode; if show_completions_in_buffer { + if let Some(provider) = &self.edit_prediction_provider { + provider.provider.did_show(cx); + } if edits .iter() .all(|(range, _)| range.to_offset(&multibuffer).is_empty()) diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index 708a53ff47bd2c60e6b9620e8bed30b16419ba14..577ca77c13c0b9f8e0eff578c20d0a933c858bce 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -8,7 +8,9 @@ mod rate_completion_modal; pub(crate) use completion_diff_element::*; use db::kvp::{Dismissable, KEY_VALUE_STORE}; +use db::smol::stream::StreamExt as _; use edit_prediction::DataCollectionState; +use futures::channel::mpsc; pub use init::*; use license_detection::LicenseDetectionWatcher; pub use rate_completion_modal::*; @@ -17,8 +19,10 @@ use anyhow::{Context as _, Result, anyhow}; use arrayvec::ArrayVec; use client::{Client, EditPredictionUsage, UserStore}; use cloud_llm_client::{ - AcceptEditPredictionBody, EXPIRED_LLM_TOKEN_HEADER_NAME, MINIMUM_REQUIRED_VERSION_HEADER_NAME, - PredictEditsBody, PredictEditsGitInfo, PredictEditsResponse, ZED_VERSION_HEADER_NAME, + AcceptEditPredictionBody, EXPIRED_LLM_TOKEN_HEADER_NAME, EditPredictionRejection, + MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST, MINIMUM_REQUIRED_VERSION_HEADER_NAME, + PredictEditsBody, PredictEditsGitInfo, PredictEditsResponse, RejectEditPredictionsBody, + ZED_VERSION_HEADER_NAME, }; use collections::{HashMap, HashSet, VecDeque}; use futures::AsyncReadExt; @@ -171,12 +175,15 @@ pub struct Zeta { shown_completions: VecDeque, rated_completions: HashSet, data_collection_choice: DataCollectionChoice, + discarded_completions: Vec, llm_token: LlmApiToken, _llm_token_subscription: Subscription, /// Whether an update to a newer version of Zed is required to continue using Zeta. update_required: bool, user_store: Entity, license_detection_watchers: HashMap>, + discard_completions_debounce_task: Option>, + discard_completions_tx: mpsc::UnboundedSender<()>, } struct ZetaProject { @@ -226,11 +233,25 @@ impl Zeta { fn new(client: Arc, user_store: Entity, cx: &mut Context) -> Self { let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx); let data_collection_choice = Self::load_data_collection_choice(); + let (reject_tx, mut reject_rx) = mpsc::unbounded(); + cx.spawn(async move |this, cx| { + while let Some(()) = reject_rx.next().await { + this.update(cx, |this, cx| this.reject_edit_predictions(cx))? + .await + .log_err(); + } + anyhow::Ok(()) + }) + .detach(); + Self { projects: HashMap::default(), client, shown_completions: VecDeque::new(), rated_completions: HashSet::default(), + discarded_completions: Vec::new(), + discard_completions_debounce_task: None, + discard_completions_tx: reject_tx, data_collection_choice, llm_token: LlmApiToken::default(), _llm_token_subscription: cx.subscribe( @@ -692,6 +713,75 @@ impl Zeta { }) } + fn reject_edit_predictions(&mut self, cx: &mut Context) -> Task> { + let client = self.client.clone(); + let llm_token = self.llm_token.clone(); + let app_version = AppVersion::global(cx); + let last_rejection = self.discarded_completions.last().cloned(); + let body = serde_json::to_string(&RejectEditPredictionsBody { + rejections: self.discarded_completions.clone(), + }) + .ok(); + + let Some(last_rejection) = last_rejection else { + return Task::ready(anyhow::Ok(())); + }; + + cx.spawn(async move |this, cx| { + let http_client = client.http_client(); + let mut response = llm_token_retry(&llm_token, &client, |token| { + let request_builder = http_client::Request::builder().method(Method::POST); + let request_builder = request_builder.uri( + http_client + .build_zed_llm_url("/predict_edits/reject", &[])? + .as_ref(), + ); + Ok(request_builder + .header("Content-Type", "application/json") + .header("Authorization", format!("Bearer {}", token)) + .header(ZED_VERSION_HEADER_NAME, app_version.to_string()) + .body( + body.as_ref() + .context("failed to serialize body")? + .clone() + .into(), + )?) + }) + .await?; + + if let Some(minimum_required_version) = response + .headers() + .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) + .and_then(|version| SemanticVersion::from_str(version.to_str().ok()?).ok()) + && app_version < minimum_required_version + { + return Err(anyhow!(ZedUpdateRequiredError { + minimum_version: minimum_required_version + })); + } + + if response.status().is_success() { + this.update(cx, |this, _| { + if let Some(ix) = this + .discarded_completions + .iter() + .position(|rejection| rejection.request_id == last_rejection.request_id) + { + this.discarded_completions.drain(..ix + 1); + } + }) + } else { + let mut body = String::new(); + response.body_mut().read_to_string(&mut body).await?; + Err(anyhow!( + "error rejecting edit predictions.\nStatus: {:?}\nBody: {}", + response.status(), + body + )) + } + }) + } + fn process_completion_response( prediction_response: PredictEditsResponse, buffer: Entity, @@ -995,6 +1085,31 @@ impl Zeta { ) }); } + + fn discard_completion( + &mut self, + completion_id: EditPredictionId, + was_shown: bool, + cx: &mut Context, + ) { + self.discarded_completions.push(EditPredictionRejection { + request_id: completion_id.to_string(), + was_shown, + }); + + let reached_request_limit = + self.discarded_completions.len() >= MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST; + let discard_completions_tx = self.discard_completions_tx.clone(); + self.discard_completions_debounce_task = Some(cx.spawn(async move |_this, cx| { + const DISCARD_COMPLETIONS_DEBOUNCE: Duration = Duration::from_secs(15); + if !reached_request_limit { + cx.background_executor() + .timer(DISCARD_COMPLETIONS_DEBOUNCE) + .await; + } + discard_completions_tx.unbounded_send(()).log_err(); + })); + } } pub struct PerformPredictEditsParams { @@ -1167,6 +1282,7 @@ impl Event { struct CurrentEditPrediction { buffer_id: EntityId, completion: EditPrediction, + was_shown: bool, } impl CurrentEditPrediction { @@ -1414,6 +1530,7 @@ impl edit_prediction::EditPredictionProvider for ZetaEditPredictionProvider { c.map(|completion| CurrentEditPrediction { buffer_id: buffer.entity_id(), completion, + was_shown: false, }) }) } @@ -1505,9 +1622,19 @@ impl edit_prediction::EditPredictionProvider for ZetaEditPredictionProvider { self.pending_completions.clear(); } - fn discard(&mut self, _cx: &mut Context) { + fn discard(&mut self, cx: &mut Context) { self.pending_completions.clear(); - self.current_completion.take(); + if let Some(completion) = self.current_completion.take() { + self.zeta.update(cx, |zeta, cx| { + zeta.discard_completion(completion.completion.id, completion.was_shown, cx); + }); + } + } + + fn did_show(&mut self, _cx: &mut Context) { + if let Some(current_completion) = self.current_completion.as_mut() { + current_completion.was_shown = true; + } } fn suggest( From 28adedf1fa568e9fa4d1aa9f59c3a3e8cc96907c Mon Sep 17 00:00:00 2001 From: Julia Ryan Date: Wed, 12 Nov 2025 20:03:59 -0800 Subject: [PATCH 0073/1030] Disable env clearing for npm subcommands (#42587) Fixes #39448 Several node version managers such as [volta](https://volta.sh) use thin wrappers that locate the "real" node/npm binary with an env var that points at their install root. When it finds this, it prepends the correct directory to PATH, otherwise it'll check a hardcoded default location and prepend that to PATH if it exists. We were clearing env for npm subcommands, which meant that volta and co. failed to locate the install root, and because they were installed via scoop they don't use the default install path either so it simply doesn't prepend anything to PATH (winget on the other hand installs volta to the right place, which is why it worked when using that instead of scoop to install volta @IllusionaryX). So volta's npm wrapper executes a subcommand `npm`, but when that doesn't prepend a different directory to PATH the first `npm` found in PATH is that same wrapper itself, which horrifyingly causes itself to re-exec continuously. I think they might have some logic to try to prevent this using, you'll never guess, another env var that they set whenever a volta wrapper execs something. Of course since we clear the env that var also fails to propagate. Removing env clearing (but keeping the prepending of npm path from your settings) fixes these issues. Release Notes: - Fixed issues with scoop installations of mise/volta Co-authored-by: John Tur --- crates/node_runtime/src/node_runtime.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/node_runtime/src/node_runtime.rs b/crates/node_runtime/src/node_runtime.rs index c82fc178edfde742d5dedd79b4787a6b5f229d31..6e705289a3fd9e814574198004b8213e5147b7ca 100644 --- a/crates/node_runtime/src/node_runtime.rs +++ b/crates/node_runtime/src/node_runtime.rs @@ -557,7 +557,6 @@ impl NodeRuntimeTrait for ManagedNodeRuntime { let node_ca_certs = env::var(NODE_CA_CERTS_ENV_VAR).unwrap_or_else(|_| String::new()); let mut command = util::command::new_smol_command(node_binary); - command.env_clear(); command.env("PATH", env_path); command.env(NODE_CA_CERTS_ENV_VAR, node_ca_certs); command.arg(npm_file).arg(subcommand); From b32559f07d5d473dcac9ebd714786a95c76ffdd6 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Wed, 12 Nov 2025 21:50:15 -0700 Subject: [PATCH 0074/1030] Avoid re-creating releases when re-running workflows (#42573) Closes #ISSUE Release Notes: - N/A --- script/create-draft-release | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/script/create-draft-release b/script/create-draft-release index 95b1a1450a8b2ba65e512b317bfe13c11556dd9c..d50ebf2e5a19a57a652e24ec66de893bde97f47c 100755 --- a/script/create-draft-release +++ b/script/create-draft-release @@ -5,4 +5,5 @@ if [[ "$GITHUB_REF_NAME" == *"-pre" ]]; then preview="-p" fi -gh release create -t "$GITHUB_REF_NAME" -d "$GITHUB_REF_NAME" -F "$1" $preview +gh release view "$GITHUB_REF_NAME" ||\ + gh release create -t "$GITHUB_REF_NAME" -d "$GITHUB_REF_NAME" -F "$1" $preview From 1936f16c621a77834c4e582e7dd2f7b2176037fc Mon Sep 17 00:00:00 2001 From: Sean Hagstrom Date: Wed, 12 Nov 2025 21:58:13 -0800 Subject: [PATCH 0075/1030] editor: Use a single newline between each copied line from a multi-cursor selection (#41204) Closes #40923 Release Notes: - Fixed the amount of newlines between copied lines from a multi-cursor selection of multiple full-line copies. --- https://github.com/user-attachments/assets/ab7474d6-0e49-4c29-9700-7692cd019cef --- crates/editor/src/editor.rs | 14 ++++++-- crates/editor/src/editor_tests.rs | 54 +++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 3 deletions(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 057d0b223bb43b41c863316010648005b3675119..0d3325ff8212b6ae9dcc5a9c34dd13e4c5324178 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -12585,6 +12585,7 @@ impl Editor { { let max_point = buffer.max_point(); let mut is_first = true; + let mut prev_selection_was_entire_line = false; for selection in &mut selections { let is_entire_line = (selection.is_empty() && cut_no_selection_line) || self.selections.line_mode(); @@ -12599,9 +12600,10 @@ impl Editor { } if is_first { is_first = false; - } else { + } else if !prev_selection_was_entire_line { text += "\n"; } + prev_selection_was_entire_line = is_entire_line; let mut len = 0; for chunk in buffer.text_for_range(selection.start..selection.end) { text.push_str(chunk); @@ -12684,6 +12686,7 @@ impl Editor { { let max_point = buffer.max_point(); let mut is_first = true; + let mut prev_selection_was_entire_line = false; for selection in &selections { let mut start = selection.start; let mut end = selection.end; @@ -12742,9 +12745,10 @@ impl Editor { for trimmed_range in trimmed_selections { if is_first { is_first = false; - } else { + } else if !prev_selection_was_entire_line { text += "\n"; } + prev_selection_was_entire_line = is_entire_line; let mut len = 0; for chunk in buffer.text_for_range(trimmed_range.start..trimmed_range.end) { text.push_str(chunk); @@ -12818,7 +12822,11 @@ impl Editor { let end_offset = start_offset + clipboard_selection.len; to_insert = &clipboard_text[start_offset..end_offset]; entire_line = clipboard_selection.is_entire_line; - start_offset = end_offset + 1; + start_offset = if entire_line { + end_offset + } else { + end_offset + 1 + }; original_indent_column = Some(clipboard_selection.first_line_indent); } else { to_insert = &*clipboard_text; diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index 36d7023db33587e43260640782f47522dbb41c6b..0b485d4e1adac071a82d1ad8bde53f07d14f1434 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -27375,6 +27375,60 @@ async fn test_copy_line_without_trailing_newline(cx: &mut TestAppContext) { cx.assert_editor_state("line1\nline2\nˇ"); } +#[gpui::test] +async fn test_multi_selection_copy_with_newline_between_copied_lines(cx: &mut TestAppContext) { + init_test(cx, |_| {}); + + let mut cx = EditorTestContext::new(cx).await; + + cx.set_state("ˇline1\nˇline2\nˇline3\n"); + + cx.update_editor(|e, window, cx| e.copy(&Copy, window, cx)); + + let clipboard_text = cx + .read_from_clipboard() + .and_then(|item| item.text().as_deref().map(str::to_string)); + + assert_eq!( + clipboard_text, + Some("line1\nline2\nline3\n".to_string()), + "Copying multiple lines should include a single newline between lines" + ); + + cx.set_state("lineA\nˇ"); + + cx.update_editor(|e, window, cx| e.paste(&Paste, window, cx)); + + cx.assert_editor_state("lineA\nline1\nline2\nline3\nˇ"); +} + +#[gpui::test] +async fn test_multi_selection_cut_with_newline_between_copied_lines(cx: &mut TestAppContext) { + init_test(cx, |_| {}); + + let mut cx = EditorTestContext::new(cx).await; + + cx.set_state("ˇline1\nˇline2\nˇline3\n"); + + cx.update_editor(|e, window, cx| e.cut(&Cut, window, cx)); + + let clipboard_text = cx + .read_from_clipboard() + .and_then(|item| item.text().as_deref().map(str::to_string)); + + assert_eq!( + clipboard_text, + Some("line1\nline2\nline3\n".to_string()), + "Copying multiple lines should include a single newline between lines" + ); + + cx.set_state("lineA\nˇ"); + + cx.update_editor(|e, window, cx| e.paste(&Paste, window, cx)); + + cx.assert_editor_state("lineA\nline1\nline2\nline3\nˇ"); +} + #[gpui::test] async fn test_end_of_editor_context(cx: &mut TestAppContext) { init_test(cx, |_| {}); From 045ac6d1b6ee94c8084f8887ad3df543bd749b7d Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Wed, 12 Nov 2025 23:11:09 -0700 Subject: [PATCH 0076/1030] Release failure visibility (#42572) Closes #ISSUE Release Notes: - N/A --- .github/workflows/after_release.yml | 16 ++++++++++++++++ .github/workflows/release.yml | 14 ++++++++++++++ .github/workflows/release_nightly.yml | 18 ++++++++++++++++++ .../xtask/src/tasks/workflows/after_release.rs | 10 +++++++++- tooling/xtask/src/tasks/workflows/release.rs | 16 ++++++++++++++++ .../src/tasks/workflows/release_nightly.rs | 4 +++- tooling/xtask/src/tasks/workflows/vars.rs | 1 + 7 files changed, 77 insertions(+), 2 deletions(-) diff --git a/.github/workflows/after_release.yml b/.github/workflows/after_release.yml index 2279b3647b646b30a7e7badfb235c2a41fe83ad2..f9412672b3dd0fc5e029b3e0dc0c7a93d8582aa2 100644 --- a/.github/workflows/after_release.yml +++ b/.github/workflows/after_release.yml @@ -86,3 +86,19 @@ jobs: SENTRY_ORG: zed-dev SENTRY_PROJECT: zed SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }} + notify_on_failure: + needs: + - rebuild_releases_page + - post_to_discord + - publish_winget + - create_sentry_release + if: failure() + runs-on: namespace-profile-2x4-ubuntu-2404 + steps: + - name: release::notify_on_failure::notify_slack + run: |- + curl -X POST -H 'Content-type: application/json'\ + --data '{"text":"${{ github.workflow }} failed: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"}' "$SLACK_WEBHOOK" + shell: bash -euxo pipefail {0} + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_WORKFLOW_FAILURES }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 88c719d60027aa6a684666cbc1f7430c9f471502..d591ebc509fb9347daf41614c7b53f09a5ec0312 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -477,6 +477,20 @@ jobs: shell: bash -euxo pipefail {0} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + notify_on_failure: + needs: + - upload_release_assets + - auto_release_preview + if: failure() + runs-on: namespace-profile-2x4-ubuntu-2404 + steps: + - name: release::notify_on_failure::notify_slack + run: |- + curl -X POST -H 'Content-type: application/json'\ + --data '{"text":"${{ github.workflow }} failed: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"}' "$SLACK_WEBHOOK" + shell: bash -euxo pipefail {0} + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_WORKFLOW_FAILURES }} concurrency: group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }} cancel-in-progress: true diff --git a/.github/workflows/release_nightly.yml b/.github/workflows/release_nightly.yml index c5d498fe49c350dcb651f1037d9c470e921f2cb7..0604aad15531fc7f013b4790ba1e0efa9c21eb52 100644 --- a/.github/workflows/release_nightly.yml +++ b/.github/workflows/release_nightly.yml @@ -490,3 +490,21 @@ jobs: SENTRY_PROJECT: zed SENTRY_AUTH_TOKEN: ${{ secrets.SENTRY_AUTH_TOKEN }} timeout-minutes: 60 + notify_on_failure: + needs: + - bundle_linux_aarch64 + - bundle_linux_x86_64 + - bundle_mac_aarch64 + - bundle_mac_x86_64 + - bundle_windows_aarch64 + - bundle_windows_x86_64 + if: failure() + runs-on: namespace-profile-2x4-ubuntu-2404 + steps: + - name: release::notify_on_failure::notify_slack + run: |- + curl -X POST -H 'Content-type: application/json'\ + --data '{"text":"${{ github.workflow }} failed: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"}' "$SLACK_WEBHOOK" + shell: bash -euxo pipefail {0} + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK_WORKFLOW_FAILURES }} diff --git a/tooling/xtask/src/tasks/workflows/after_release.rs b/tooling/xtask/src/tasks/workflows/after_release.rs index bdf4afedb2c5a8a92fed5cb19e2c3e2469f97a65..d626faf52098a41c531eab0f13f4d727ea7a7cf9 100644 --- a/tooling/xtask/src/tasks/workflows/after_release.rs +++ b/tooling/xtask/src/tasks/workflows/after_release.rs @@ -1,7 +1,8 @@ use gh_workflow::*; use crate::tasks::workflows::{ - release, runners, + release::{self, notify_on_failure}, + runners, steps::{NamedJob, checkout_repo, dependant_job, named}, vars::{self, StepOutput}, }; @@ -11,6 +12,12 @@ pub fn after_release() -> Workflow { let post_to_discord = post_to_discord(&[&refresh_zed_dev]); let publish_winget = publish_winget(); let create_sentry_release = create_sentry_release(); + let notify_on_failure = notify_on_failure(&[ + &refresh_zed_dev, + &post_to_discord, + &publish_winget, + &create_sentry_release, + ]); named::workflow() .on(Event::default().release(Release::default().types(vec![ReleaseType::Published]))) @@ -18,6 +25,7 @@ pub fn after_release() -> Workflow { .add_job(post_to_discord.name, post_to_discord.job) .add_job(publish_winget.name, publish_winget.job) .add_job(create_sentry_release.name, create_sentry_release.job) + .add_job(notify_on_failure.name, notify_on_failure.job) } fn rebuild_releases_page() -> NamedJob { diff --git a/tooling/xtask/src/tasks/workflows/release.rs b/tooling/xtask/src/tasks/workflows/release.rs index a38c70bfe97470f409b5f8cc3a2b042721928722..e06a71340192c036d442d65d9572e52ed2983cae 100644 --- a/tooling/xtask/src/tasks/workflows/release.rs +++ b/tooling/xtask/src/tasks/workflows/release.rs @@ -28,6 +28,7 @@ pub(crate) fn release() -> Workflow { let upload_release_assets = upload_release_assets(&[&create_draft_release], &bundle); let auto_release_preview = auto_release_preview(&[&upload_release_assets]); + let notify_on_failure = notify_on_failure(&[&upload_release_assets, &auto_release_preview]); named::workflow() .on(Event::default().push(Push::default().tags(vec!["v*".to_string()]))) @@ -47,6 +48,7 @@ pub(crate) fn release() -> Workflow { }) .add_job(upload_release_assets.name, upload_release_assets.job) .add_job(auto_release_preview.name, auto_release_preview.job) + .add_job(notify_on_failure.name, notify_on_failure.job) } pub(crate) struct ReleaseBundleJobs { @@ -177,3 +179,17 @@ fn create_draft_release() -> NamedJob { .add_step(create_release()), ) } + +pub(crate) fn notify_on_failure(deps: &[&NamedJob]) -> NamedJob { + fn notify_slack() -> Step { + named::bash( + "curl -X POST -H 'Content-type: application/json'\\\n --data '{\"text\":\"${{ github.workflow }} failed: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}\"}' \"$SLACK_WEBHOOK\"" + ).add_env(("SLACK_WEBHOOK", vars::SLACK_WEBHOOK_WORKFLOW_FAILURES)) + } + + let job = dependant_job(deps) + .runs_on(runners::LINUX_SMALL) + .cond(Expression::new("failure()")) + .add_step(notify_slack()); + named::job(job) +} diff --git a/tooling/xtask/src/tasks/workflows/release_nightly.rs b/tooling/xtask/src/tasks/workflows/release_nightly.rs index f557a26f670320a452d83cbc7d48cbabf115b8df..c916013c0bc6f5ad89d56f382f3484097edcbed3 100644 --- a/tooling/xtask/src/tasks/workflows/release_nightly.rs +++ b/tooling/xtask/src/tasks/workflows/release_nightly.rs @@ -1,7 +1,7 @@ use crate::tasks::workflows::{ nix_build::build_nix, release::{ - ReleaseBundleJobs, create_sentry_release, download_workflow_artifacts, + ReleaseBundleJobs, create_sentry_release, download_workflow_artifacts, notify_on_failure, prep_release_artifacts, }, run_bundling::{bundle_linux, bundle_mac, bundle_windows}, @@ -44,6 +44,7 @@ pub fn release_nightly() -> Workflow { &[&style, &tests], ); let update_nightly_tag = update_nightly_tag_job(&bundle); + let notify_on_failure = notify_on_failure(&bundle.jobs()); named::workflow() .on(Event::default() @@ -63,6 +64,7 @@ pub fn release_nightly() -> Workflow { .add_job(nix_linux_x86.name, nix_linux_x86.job) .add_job(nix_mac_arm.name, nix_mac_arm.job) .add_job(update_nightly_tag.name, update_nightly_tag.job) + .add_job(notify_on_failure.name, notify_on_failure.job) } fn check_style() -> NamedJob { diff --git a/tooling/xtask/src/tasks/workflows/vars.rs b/tooling/xtask/src/tasks/workflows/vars.rs index e5cc3bc222fab974412f294439648c0d23da50eb..1b3a0ec72ab7ad9a3a6c4446f5e08743d0212a2b 100644 --- a/tooling/xtask/src/tasks/workflows/vars.rs +++ b/tooling/xtask/src/tasks/workflows/vars.rs @@ -42,6 +42,7 @@ secret!(ZED_ZIPPY_APP_PRIVATE_KEY); secret!(DISCORD_WEBHOOK_RELEASE_NOTES); secret!(WINGET_TOKEN); secret!(VERCEL_TOKEN); +secret!(SLACK_WEBHOOK_WORKFLOW_FAILURES); // todo(ci) make these secrets too... var!(AZURE_SIGNING_ACCOUNT_NAME); From 1fc0642de1f8f8f53ee5b74e7e5be44d932ad315 Mon Sep 17 00:00:00 2001 From: AidanV <84053180+AidanV@users.noreply.github.com> Date: Wed, 12 Nov 2025 22:46:14 -0800 Subject: [PATCH 0077/1030] vim: Make each vim repeat its own transaction (#41735) Release Notes: - Pressing `u` after multiple `.` in rapid succession will now only undo the latest repeat instead of all repeats. --------- Co-authored-by: Conrad Irwin --- crates/vim/src/normal/repeat.rs | 19 ++++++++++++++++++- crates/vim/src/test.rs | 16 ++++++++++++++++ .../test_data/test_repeat_grouping_41735.json | 10 ++++++++++ 3 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 crates/vim/test_data/test_repeat_grouping_41735.json diff --git a/crates/vim/src/normal/repeat.rs b/crates/vim/src/normal/repeat.rs index 2d7927480869f7a14cff7e2051ec421268df1d97..e0b515595db013b23730c535df64123aa4dd6707 100644 --- a/crates/vim/src/normal/repeat.rs +++ b/crates/vim/src/normal/repeat.rs @@ -110,7 +110,24 @@ impl Replayer { } lock.running = true; let this = self.clone(); - window.defer(cx, move |window, cx| this.next(window, cx)) + window.defer(cx, move |window, cx| { + this.next(window, cx); + let Some(Some(workspace)) = window.root::() else { + return; + }; + let Some(editor) = workspace + .read(cx) + .active_item(cx) + .and_then(|item| item.act_as::(cx)) + else { + return; + }; + editor.update(cx, |editor, cx| { + editor + .buffer() + .update(cx, |multi, cx| multi.finalize_last_transaction(cx)) + }); + }) } pub fn stop(self) { diff --git a/crates/vim/src/test.rs b/crates/vim/src/test.rs index cb02a3ab0fafdeec254e8b3722bdd877fbeda0e2..d6aa116e8ddb12c0f3aff15fbe971b701fe90ab7 100644 --- a/crates/vim/src/test.rs +++ b/crates/vim/src/test.rs @@ -2365,3 +2365,19 @@ async fn test_wrap_selections_in_tag_line_mode(cx: &mut gpui::TestAppContext) { Mode::VisualLine, ); } + +#[gpui::test] +async fn test_repeat_grouping_41735(cx: &mut gpui::TestAppContext) { + let mut cx = NeovimBackedTestContext::new(cx).await; + + // typically transaction gropuing is disabled in tests, but here we need to test it. + cx.update_buffer(|buffer, _cx| buffer.set_group_interval(Duration::from_millis(300))); + + cx.set_shared_state("ˇ").await; + + cx.simulate_shared_keystrokes("i a escape").await; + cx.simulate_shared_keystrokes(". . .").await; + cx.shared_state().await.assert_eq("ˇaaaa"); + cx.simulate_shared_keystrokes("u").await; + cx.shared_state().await.assert_eq("ˇaaa"); +} diff --git a/crates/vim/test_data/test_repeat_grouping_41735.json b/crates/vim/test_data/test_repeat_grouping_41735.json new file mode 100644 index 0000000000000000000000000000000000000000..6523be6e4bebad7162a15da1af3455394abdfe12 --- /dev/null +++ b/crates/vim/test_data/test_repeat_grouping_41735.json @@ -0,0 +1,10 @@ +{"Put":{"state":"ˇ"}} +{"Key":"i"} +{"Key":"a"} +{"Key":"escape"} +{"Key":"."} +{"Key":"."} +{"Key":"."} +{"Get":{"state":"ˇaaaa","mode":"Normal"}} +{"Key":"u"} +{"Get":{"state":"ˇaaa","mode":"Normal"}} From b92b28314f53a08fa05374e0c7dd35de88a25dd0 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 13 Nov 2025 09:11:18 +0100 Subject: [PATCH 0078/1030] Replace {floor/ceil}_char_boundary polyfills with std (#42599) Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/editor/src/display_map/inlay_map.rs | 33 +-------- crates/rope/src/chunk.rs | 28 +------ crates/rope/src/rope.rs | 85 ++-------------------- crates/util/src/util.rs | 6 ++ 4 files changed, 18 insertions(+), 134 deletions(-) diff --git a/crates/editor/src/display_map/inlay_map.rs b/crates/editor/src/display_map/inlay_map.rs index 697cf1f68ceac4f6a777a3ec401658394f646af5..f3f3a3eee8ea6d1f95261ae4d313afb6f4d497e3 100644 --- a/crates/editor/src/display_map/inlay_map.rs +++ b/crates/editor/src/display_map/inlay_map.rs @@ -248,10 +248,8 @@ impl<'a> Iterator for InlayChunks<'a> { // Determine split index handling edge cases let split_index = if desired_bytes >= chunk.text.len() { chunk.text.len() - } else if chunk.text.is_char_boundary(desired_bytes) { - desired_bytes } else { - find_next_utf8_boundary(chunk.text, desired_bytes) + chunk.text.ceil_char_boundary(desired_bytes) }; let (prefix, suffix) = chunk.text.split_at(split_index); @@ -373,10 +371,8 @@ impl<'a> Iterator for InlayChunks<'a> { .next() .map(|c| c.len_utf8()) .unwrap_or(1) - } else if inlay_chunk.is_char_boundary(next_inlay_highlight_endpoint) { - next_inlay_highlight_endpoint } else { - find_next_utf8_boundary(inlay_chunk, next_inlay_highlight_endpoint) + inlay_chunk.ceil_char_boundary(next_inlay_highlight_endpoint) }; let (chunk, remainder) = inlay_chunk.split_at(split_index); @@ -1146,31 +1142,6 @@ fn push_isomorphic(sum_tree: &mut SumTree, summary: TextSummary) { } } -/// Given a byte index that is NOT a UTF-8 boundary, find the next one. -/// Assumes: 0 < byte_index < text.len() and !text.is_char_boundary(byte_index) -#[inline(always)] -fn find_next_utf8_boundary(text: &str, byte_index: usize) -> usize { - let bytes = text.as_bytes(); - let mut idx = byte_index + 1; - - // Scan forward until we find a boundary - while idx < text.len() { - if is_utf8_char_boundary(bytes[idx]) { - return idx; - } - idx += 1; - } - - // Hit the end, return the full length - text.len() -} - -// Private helper function taken from Rust's core::num module (which is both Apache2 and MIT licensed) -const fn is_utf8_char_boundary(byte: u8) -> bool { - // This is bit magic equivalent to: b < 128 || b >= 192 - (byte as i8) >= -0x40 -} - #[cfg(test)] mod tests { use super::*; diff --git a/crates/rope/src/chunk.rs b/crates/rope/src/chunk.rs index 4c1e4cd68560f15274722ff1d8249205300c4e68..7ada5c2052481408bc5af56740f8e35916623f14 100644 --- a/crates/rope/src/chunk.rs +++ b/crates/rope/src/chunk.rs @@ -110,18 +110,12 @@ impl Chunk { } pub fn floor_char_boundary(&self, index: usize) -> usize { - #[inline] - pub(crate) const fn is_utf8_char_boundary(u8: u8) -> bool { - // This is bit magic equivalent to: b < 128 || b >= 192 - (u8 as i8) >= -0x40 - } - if index >= self.text.len() { self.text.len() } else { let mut i = index; while i > 0 { - if is_utf8_char_boundary(self.text.as_bytes()[i]) { + if util::is_utf8_char_boundary(self.text.as_bytes()[i]) { break; } i -= 1; @@ -423,25 +417,7 @@ impl<'a> ChunkSlice<'a> { } pub fn floor_char_boundary(&self, index: usize) -> usize { - #[inline] - pub(crate) const fn is_utf8_char_boundary(u8: u8) -> bool { - // This is bit magic equivalent to: b < 128 || b >= 192 - (u8 as i8) >= -0x40 - } - - if index >= self.text.len() { - self.text.len() - } else { - let mut i = index; - while i > 0 { - if is_utf8_char_boundary(self.text.as_bytes()[i]) { - break; - } - i -= 1; - } - - i - } + self.text.floor_char_boundary(index) } #[inline(always)] diff --git a/crates/rope/src/rope.rs b/crates/rope/src/rope.rs index 394e6ef0ca589d19ffcf7cf07a92bcd15c8e4a18..a5699554a32b552e395001ded24512e10d645d4b 100644 --- a/crates/rope/src/rope.rs +++ b/crates/rope/src/rope.rs @@ -74,29 +74,9 @@ impl Rope { if index >= self.len() { self.len() } else { - #[inline] - pub(crate) const fn is_utf8_char_boundary(u8: u8) -> bool { - // This is bit magic equivalent to: b < 128 || b >= 192 - (u8 as i8) >= -0x40 - } - let (start, _, item) = self.chunks.find::((), &index, Bias::Left); let chunk_offset = index - start; - let lower_idx = item.map(|chunk| { - let lower_bound = chunk_offset.saturating_sub(3); - chunk - .text - .as_bytes() - .get(lower_bound..=chunk_offset) - .map(|it| { - let new_idx = it - .iter() - .rposition(|&b| is_utf8_char_boundary(b)) - .unwrap_or(0); - lower_bound + new_idx - }) - .unwrap_or(chunk.text.len()) - }); + let lower_idx = item.map(|chunk| chunk.text.floor_char_boundary(chunk_offset)); lower_idx.map_or_else(|| self.len(), |idx| start + idx) } } @@ -105,22 +85,9 @@ impl Rope { if index > self.len() { self.len() } else { - #[inline] - pub(crate) const fn is_utf8_char_boundary(u8: u8) -> bool { - // This is bit magic equivalent to: b < 128 || b >= 192 - (u8 as i8) >= -0x40 - } - let (start, _, item) = self.chunks.find::((), &index, Bias::Left); let chunk_offset = index - start; - let upper_idx = item.map(|chunk| { - let upper_bound = Ord::min(chunk_offset + 4, chunk.text.len()); - chunk.text.as_bytes()[chunk_offset..upper_bound] - .iter() - .position(|&b| is_utf8_char_boundary(b)) - .map_or(upper_bound, |pos| pos + chunk_offset) - }); - + let upper_idx = item.map(|chunk| chunk.text.ceil_char_boundary(chunk_offset)); upper_idx.map_or_else(|| self.len(), |idx| start + idx) } } @@ -2186,79 +2153,43 @@ mod tests { #[test] fn test_floor_char_boundary() { - // polyfill of str::floor_char_boundary - fn floor_char_boundary(str: &str, index: usize) -> usize { - if index >= str.len() { - str.len() - } else { - let lower_bound = index.saturating_sub(3); - let new_index = str.as_bytes()[lower_bound..=index] - .iter() - .rposition(|b| (*b as i8) >= -0x40); - - lower_bound + new_index.unwrap() - } - } - let fixture = "地"; let rope = Rope::from("地"); for b in 0..=fixture.len() { - assert_eq!( - rope.floor_char_boundary(b), - floor_char_boundary(&fixture, b) - ); + assert_eq!(rope.floor_char_boundary(b), fixture.floor_char_boundary(b)); } let fixture = ""; let rope = Rope::from(""); for b in 0..=fixture.len() { - assert_eq!( - rope.floor_char_boundary(b), - floor_char_boundary(&fixture, b) - ); + assert_eq!(rope.floor_char_boundary(b), fixture.floor_char_boundary(b)); } let fixture = "🔴🟠🟡🟢🔵🟣⚫️⚪️🟤\n🏳️‍⚧️🏁🏳️‍🌈🏴‍☠️⛳️📬📭🏴🏳️🚩"; let rope = Rope::from("🔴🟠🟡🟢🔵🟣⚫️⚪️🟤\n🏳️‍⚧️🏁🏳️‍🌈🏴‍☠️⛳️📬📭🏴🏳️🚩"); for b in 0..=fixture.len() { - assert_eq!( - rope.floor_char_boundary(b), - floor_char_boundary(&fixture, b) - ); + assert_eq!(rope.floor_char_boundary(b), fixture.floor_char_boundary(b)); } } #[test] fn test_ceil_char_boundary() { - // polyfill of str::ceil_char_boundary - fn ceil_char_boundary(str: &str, index: usize) -> usize { - if index > str.len() { - str.len() - } else { - let upper_bound = Ord::min(index + 4, str.len()); - str.as_bytes()[index..upper_bound] - .iter() - .position(|b| (*b as i8) >= -0x40) - .map_or(upper_bound, |pos| pos + index) - } - } - let fixture = "地"; let rope = Rope::from("地"); for b in 0..=fixture.len() { - assert_eq!(rope.ceil_char_boundary(b), ceil_char_boundary(&fixture, b)); + assert_eq!(rope.ceil_char_boundary(b), fixture.ceil_char_boundary(b)); } let fixture = ""; let rope = Rope::from(""); for b in 0..=fixture.len() { - assert_eq!(rope.ceil_char_boundary(b), ceil_char_boundary(&fixture, b)); + assert_eq!(rope.ceil_char_boundary(b), fixture.ceil_char_boundary(b)); } let fixture = "🔴🟠🟡🟢🔵🟣⚫️⚪️🟤\n🏳️‍⚧️🏁🏳️‍🌈🏴‍☠️⛳️📬📭🏴🏳️🚩"; let rope = Rope::from("🔴🟠🟡🟢🔵🟣⚫️⚪️🟤\n🏳️‍⚧️🏁🏳️‍🌈🏴‍☠️⛳️📬📭🏴🏳️🚩"); for b in 0..=fixture.len() { - assert_eq!(rope.ceil_char_boundary(b), ceil_char_boundary(&fixture, b)); + assert_eq!(rope.ceil_char_boundary(b), fixture.ceil_char_boundary(b)); } } diff --git a/crates/util/src/util.rs b/crates/util/src/util.rs index 211b972e69deb9edf5c045a8fc2d52f5b8115bb2..169da43b5282456ab4b056149bcaf3dbda5b4534 100644 --- a/crates/util/src/util.rs +++ b/crates/util/src/util.rs @@ -51,6 +51,12 @@ macro_rules! debug_panic { }; } +#[inline] +pub const fn is_utf8_char_boundary(u8: u8) -> bool { + // This is bit magic equivalent to: b < 128 || b >= 192 + (u8 as i8) >= -0x40 +} + pub fn truncate(s: &str, max_chars: usize) -> &str { match s.char_indices().nth(max_chars) { None => s, From 7eac6d242ced63b2c839d653af60fcf902f44c01 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 13 Nov 2025 10:13:54 +0100 Subject: [PATCH 0079/1030] diagnostics: Workaround weird panic in `update_path_excerpts` (#42602) Fixes ZED-36P Patching this over for now until I can figure out the cause of this Release Notes: - Fixed panic in diagnostics pane --- crates/multi_buffer/src/path_key.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/multi_buffer/src/path_key.rs b/crates/multi_buffer/src/path_key.rs index 09d098ea0727906500b37acd8f694f569fea75e2..56f28d26642439f7869bad714a8b3191dd4edbec 100644 --- a/crates/multi_buffer/src/path_key.rs +++ b/crates/multi_buffer/src/path_key.rs @@ -410,6 +410,8 @@ impl MultiBuffer { } self.insert_excerpts_with_ids_after(insert_after, buffer, to_insert, cx); + // todo(lw): There is a logic bug somewhere that causes the to_remove vector to be not ordered correctly + to_remove.sort_by_cached_key(|&id| snapshot.excerpt_locator_for_id(id)); self.remove_excerpts(to_remove, cx); if excerpt_ids.is_empty() { self.excerpts_by_path.remove(&path); From dea3c8c94966dc3aba93b7a93277a518e65ef777 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 13 Nov 2025 10:53:31 +0100 Subject: [PATCH 0080/1030] remote: More nushell fixes (#42608) Closes https://github.com/zed-industries/zed/issues/42594 Release Notes: - Fixed remote server installation failing with nutshell --- crates/remote/src/transport/ssh.rs | 13 +++++++------ crates/remote/src/transport/wsl.rs | 12 +++++++++--- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/crates/remote/src/transport/ssh.rs b/crates/remote/src/transport/ssh.rs index ec020cba0b321ea3cb5929a3fa17cb6c425b1ef7..049ec3575a4b99438587dab2d048503259eb1618 100644 --- a/crates/remote/src/transport/ssh.rs +++ b/crates/remote/src/transport/ssh.rs @@ -487,7 +487,9 @@ impl SshRemoteConnection { drop(askpass); let ssh_shell = socket.shell().await; + log::info!("Remote shell discovered: {}", ssh_shell); let ssh_platform = socket.platform(ShellKind::new(&ssh_shell, false)).await?; + log::info!("Remote platform discovered: {}", ssh_shell); let ssh_path_style = match ssh_platform.os { "windows" => PathStyle::Windows, _ => PathStyle::Posix, @@ -622,7 +624,7 @@ impl SshRemoteConnection { } Err(e) => { log::error!( - "Failed to download binary on server, attempting to upload server: {e:#}", + "Failed to download binary on server, attempting to download locally and then upload it the server: {e:#}", ) } } @@ -688,6 +690,7 @@ impl SshRemoteConnection { return Err(e); } + log::info!("curl is not available, trying wget"); match self .socket .run_command( @@ -973,13 +976,11 @@ impl SshSocket { args: &[impl AsRef], allow_pseudo_tty: bool, ) -> Result { - let output = self - .ssh_command(shell_kind, program, args, allow_pseudo_tty) - .output() - .await?; + let mut command = self.ssh_command(shell_kind, program, args, allow_pseudo_tty); + let output = command.output().await?; anyhow::ensure!( output.status.success(), - "failed to run command: {}", + "failed to run command {command:?}: {}", String::from_utf8_lossy(&output.stderr) ); Ok(String::from_utf8_lossy(&output.stdout).to_string()) diff --git a/crates/remote/src/transport/wsl.rs b/crates/remote/src/transport/wsl.rs index 3e14fcfc8b7b8a7f74b1de2a10107a143461108b..d9a7c7ea36f2c620bf0ba01d7735537b09883f08 100644 --- a/crates/remote/src/transport/wsl.rs +++ b/crates/remote/src/transport/wsl.rs @@ -84,12 +84,15 @@ impl WslRemoteConnection { .detect_shell() .await .context("failed detecting shell")?; + log::info!("Remote shell discovered: {}", this.shell); this.shell_kind = ShellKind::new(&this.shell, false); this.can_exec = this.detect_can_exec().await; + log::info!("Remote can exec: {}", this.can_exec); this.platform = this .detect_platform() .await .context("failed detecting platform")?; + log::info!("Remote platform discovered: {}", this.shell); this.remote_binary_path = Some( this.ensure_server_binary(&delegate, release_channel, version, commit, cx) .await @@ -178,7 +181,8 @@ impl WslRemoteConnection { if let Some(parent) = dst_path.parent() { let parent = parent.display(PathStyle::Posix); - self.run_wsl_command("mkdir", &["-p", &parent]) + let mkdir = self.shell_kind.prepend_command_prefix("mkdir"); + self.run_wsl_command(&mkdir, &["-p", &parent]) .await .map_err(|e| anyhow!("Failed to create directory: {}", e))?; } @@ -244,7 +248,8 @@ impl WslRemoteConnection { if let Some(parent) = dst_path.parent() { let parent = parent.display(PathStyle::Posix); - self.run_wsl_command("mkdir", &["-p", &parent]) + let mkdir = self.shell_kind.prepend_command_prefix("mkdir"); + self.run_wsl_command(&mkdir, &["-p", &parent]) .await .map_err(|e| anyhow!("Failed to create directory when uploading file: {}", e))?; } @@ -259,8 +264,9 @@ impl WslRemoteConnection { ); let src_path_in_wsl = self.windows_path_to_wsl_path(src_path).await?; + let cp = self.shell_kind.prepend_command_prefix("cp"); self.run_wsl_command( - "cp", + &cp, &["-f", &src_path_in_wsl, &dst_path.display(PathStyle::Posix)], ) .await From 99016e3a856abdbf508b3d81b93c5a2b23e69907 Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Thu, 13 Nov 2025 12:35:16 +0200 Subject: [PATCH 0081/1030] Update outdated dependencies (#42611) New rustc starts to output a few warnings, fix them by updating the corresponding packages.
Incompatibility notes ``` The following warnings were discovered during the build. These warnings are an indication that the packages contain code that will become an error in a future release of Rust. These warnings typically cover changes to close soundness problems, unintended or undocumented behavior, or critical problems that cannot be fixed in a backwards-compatible fashion, and are not expected to be in wide use. Each warning should contain a link for more information on what the warning means and how to resolve it. To solve this problem, you can try the following approaches: - Some affected dependencies have newer versions available. You may want to consider updating them to a newer version to see if the issue has been fixed. num-bigint-dig v0.8.4 has the following newer versions available: 0.8.5, 0.9.0, 0.9.1 - If the issue is not solved by updating the dependencies, a fix has to be implemented by those dependencies. You can help with that by notifying the maintainers of this problem (e.g. by creating a bug report) or by proposing a fix to the maintainers (e.g. by creating a pull request): - num-bigint-dig@0.8.4 - Repository: https://github.com/dignifiedquire/num-bigint - Detailed warning command: `cargo report future-incompatibilities --id 1 --package num-bigint-dig@0.8.4` - If waiting for an upstream fix is not an option, you can use the `[patch]` section in `Cargo.toml` to use your own version of the dependency. For more information, see: https://doc.rust-lang.org/cargo/reference/overriding-dependencies.html#the-patch-section The package `num-bigint-dig v0.8.4` currently triggers the following future incompatibility lints: > warning: macro `vec` is private > --> /Users/someonetoignore/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/num-bigint-dig-0.8.4/src/biguint.rs:490:22 > | > 490 | BigUint::new(vec![1]) > | ^^^ > | > = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! > = note: for more information, see issue #120192 > > warning: macro `vec` is private > --> /Users/someonetoignore/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/num-bigint-dig-0.8.4/src/biguint.rs:2005:9 > | > 2005 | vec![0] > | ^^^ > | > = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! > = note: for more information, see issue #120192 > > warning: macro `vec` is private > --> /Users/someonetoignore/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/num-bigint-dig-0.8.4/src/biguint.rs:2027:16 > | > 2027 | return vec![b'0']; > | ^^^ > | > = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! > = note: for more information, see issue #120192 > > warning: macro `vec` is private > --> /Users/someonetoignore/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/num-bigint-dig-0.8.4/src/biguint.rs:2313:13 > | > 2313 | vec![0] > | ^^^ > | > = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! > = note: for more information, see issue #120192 > > warning: macro `vec` is private > --> /Users/someonetoignore/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/num-bigint-dig-0.8.4/src/prime.rs:138:22 > | > 138 | let mut moduli = vec![BigUint::zero(); prime_limit]; > | ^^^ > | > = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! > = note: for more information, see issue #120192 > > warning: macro `vec` is private > --> /Users/someonetoignore/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/num-bigint-dig-0.8.4/src/bigrand.rs:319:25 > | > 319 | let mut bytes = vec![0u8; bytes_len]; > | ^^^ > | > = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! > = note: for more information, see issue #120192 > ```
Release Notes: - N/A --- Cargo.lock | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d2c799dc41d03e8ed961f5d854ac74797efd01ae..2c02183244f6651a6aad7e79aca2c218838def56 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10519,11 +10519,10 @@ dependencies = [ [[package]] name = "num-bigint-dig" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +checksum = "e661dda6640fad38e827a6d4a310ff4763082116fe217f279885c97f511bb0b7" dependencies = [ - "byteorder", "lazy_static", "libm", "num-integer", @@ -14385,9 +14384,9 @@ dependencies = [ [[package]] name = "rsa" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78928ac1ed176a5ca1d17e578a1825f3d81ca54cf41053a592584b020cfd691b" +checksum = "40a0376c50d0358279d9d643e4bf7b7be212f1f4ff1da9070a7b54d22ef75c88" dependencies = [ "const-oid", "digest", From 34a7cfb2e5e65969df3a70249ad20f9c020cbc6f Mon Sep 17 00:00:00 2001 From: mikeHag <36011050+mikeHag@users.noreply.github.com> Date: Thu, 13 Nov 2025 06:31:23 -0500 Subject: [PATCH 0082/1030] Update cargo.rs to allow debugging of integration test annotated with the ignore attribute (#42574) Address #40429 If an integration test is annotated with the ignore attribute, allow the "debug: Test" option of the debug scenario or Code Action to run with "--include-ignored" Closes #40429 Release Notes: - N/A --- crates/project/src/debugger/locators/cargo.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/project/src/debugger/locators/cargo.rs b/crates/project/src/debugger/locators/cargo.rs index 662b9ca7efcd53b8792127e531a9baba24967ea1..72aafff51609c8e55ac15f671a7b7f553b34d44f 100644 --- a/crates/project/src/debugger/locators/cargo.rs +++ b/crates/project/src/debugger/locators/cargo.rs @@ -148,6 +148,8 @@ impl DapLocator for CargoLocator { .first() .is_some_and(|arg| arg == "test" || arg == "t"); + let is_ignored = build_config.args.contains(&"--include-ignored".to_owned()); + let executables = output .lines() .filter(|line| !line.trim().is_empty()) @@ -205,6 +207,9 @@ impl DapLocator for CargoLocator { let mut args: Vec<_> = test_name.into_iter().collect(); if is_test { args.push("--nocapture".to_owned()); + if is_ignored { + args.push("--include-ignored".to_owned()); + } } Ok(DebugRequest::Launch(task::LaunchRequest { From b9ce52dc95697fea479c678a518f6e1d84f9cc66 Mon Sep 17 00:00:00 2001 From: "R.Amogh" Date: Thu, 13 Nov 2025 17:11:38 +0530 Subject: [PATCH 0083/1030] agent_ui: Fix scrolling in context server configuration modal (#42502) ## Summary Fixes #42342 When installing a dev extension with long installation instructions, the configuration modal would overflow and users couldn't scroll to see the full content or interact with buttons at the bottom. ## Solution This PR adds a `ScrollHandle` to the `ConfigureContextServerModal` and passes it to the `Modal` component, enabling the built-in modal scrolling capability. This ensures all content remains accessible regardless of length. ## Changes - Added `ScrollHandle` import to the ui imports - Added `scroll_handle: ScrollHandle` field to `ConfigureContextServerModal` struct - Initialize `scroll_handle` with `ScrollHandle::new()` when creating the modal - Pass the scroll handle to `Modal::new()` instead of `None` ## Testing - Built the changes locally - Tested with extensions that have long installation instructions - Verified scrolling works and all content is accessible - Confirmed no regression for extensions with short descriptions Release Notes: - Fixed scrolling issue in extension configuration modal when installation instructions overflow the viewport --------- Co-authored-by: Finn Evers --- .../configure_context_server_modal.rs | 41 ++++++++++++++----- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs b/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs index 88896f51086dc5f7d3eddb2fffef2fa3a7039c79..ed1e8afd1b3b3220d31119f7292b6b0934cd2ba7 100644 --- a/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs +++ b/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs @@ -7,8 +7,8 @@ use anyhow::{Context as _, Result}; use context_server::{ContextServerCommand, ContextServerId}; use editor::{Editor, EditorElement, EditorStyle}; use gpui::{ - AsyncWindowContext, DismissEvent, Entity, EventEmitter, FocusHandle, Focusable, Task, - TextStyle, TextStyleRefinement, UnderlineStyle, WeakEntity, prelude::*, + AsyncWindowContext, DismissEvent, Entity, EventEmitter, FocusHandle, Focusable, ScrollHandle, + Task, TextStyle, TextStyleRefinement, UnderlineStyle, WeakEntity, prelude::*, }; use language::{Language, LanguageRegistry}; use markdown::{Markdown, MarkdownElement, MarkdownStyle}; @@ -23,7 +23,8 @@ use project::{ use settings::{Settings as _, update_settings_file}; use theme::ThemeSettings; use ui::{ - CommonAnimationExt, KeyBinding, Modal, ModalFooter, ModalHeader, Section, Tooltip, prelude::*, + CommonAnimationExt, KeyBinding, Modal, ModalFooter, ModalHeader, Section, Tooltip, + WithScrollbar, prelude::*, }; use util::ResultExt as _; use workspace::{ModalView, Workspace}; @@ -252,6 +253,7 @@ pub struct ConfigureContextServerModal { source: ConfigurationSource, state: State, original_server_id: Option, + scroll_handle: ScrollHandle, } impl ConfigureContextServerModal { @@ -361,6 +363,7 @@ impl ConfigureContextServerModal { window, cx, ), + scroll_handle: ScrollHandle::new(), }) }) }) @@ -680,6 +683,7 @@ impl ConfigureContextServerModal { impl Render for ConfigureContextServerModal { fn render(&mut self, window: &mut Window, cx: &mut Context) -> impl IntoElement { + let scroll_handle = self.scroll_handle.clone(); div() .elevation_3(cx) .w(rems(34.)) @@ -699,14 +703,29 @@ impl Render for ConfigureContextServerModal { Modal::new("configure-context-server", None) .header(self.render_modal_header()) .section( - Section::new() - .child(self.render_modal_description(window, cx)) - .child(self.render_modal_content(cx)) - .child(match &self.state { - State::Idle => div(), - State::Waiting => Self::render_waiting_for_context_server(), - State::Error(error) => Self::render_modal_error(error.clone()), - }), + Section::new().child( + div() + .size_full() + .child( + div() + .id("modal-content") + .max_h(vh(0.7, window)) + .overflow_y_scroll() + .track_scroll(&scroll_handle) + .child(self.render_modal_description(window, cx)) + .child(self.render_modal_content(cx)) + .child(match &self.state { + State::Idle => div(), + State::Waiting => { + Self::render_waiting_for_context_server() + } + State::Error(error) => { + Self::render_modal_error(error.clone()) + } + }), + ) + .vertical_scrollbar_for(scroll_handle, window, cx), + ), ) .footer(self.render_modal_footer(cx)), ) From b5633f5bc79f537e1d2fc007f96b13e4e9784483 Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Thu, 13 Nov 2025 17:14:33 +0530 Subject: [PATCH 0084/1030] editor: Improve multi-buffer header filename click to jump to the latest selection from that buffer - take 2 (#42613) Relands https://github.com/zed-industries/zed/pull/42480 Release Notes: - Clicking the multi-buffer header file name or the "Open file" button now jumps to the most recent selection in that buffer, if one exists. --------- Co-authored-by: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> --- crates/editor/src/element.rs | 102 ++++++++++++++++++++++++++++++----- 1 file changed, 89 insertions(+), 13 deletions(-) diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index 3080e0048230e35f6cd28c553a5ccc054a292e60..16f8ef4fca612528467612c4d12fe3bc659d8e04 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -3664,6 +3664,7 @@ impl EditorElement { row_block_types: &mut HashMap, selections: &[Selection], selected_buffer_ids: &Vec, + latest_selection_anchors: &HashMap, is_row_soft_wrapped: impl Copy + Fn(usize) -> bool, sticky_header_excerpt_id: Option, window: &mut Window, @@ -3739,7 +3740,13 @@ impl EditorElement { let selected = selected_buffer_ids.contains(&first_excerpt.buffer_id); let result = v_flex().id(block_id).w_full().pr(editor_margins.right); - let jump_data = header_jump_data(snapshot, block_row_start, *height, first_excerpt); + let jump_data = header_jump_data( + snapshot, + block_row_start, + *height, + first_excerpt, + latest_selection_anchors, + ); result .child(self.render_buffer_header( first_excerpt, @@ -3774,7 +3781,13 @@ impl EditorElement { Block::BufferHeader { excerpt, height } => { let mut result = v_flex().id(block_id).w_full(); - let jump_data = header_jump_data(snapshot, block_row_start, *height, excerpt); + let jump_data = header_jump_data( + snapshot, + block_row_start, + *height, + excerpt, + latest_selection_anchors, + ); if sticky_header_excerpt_id != Some(excerpt.id) { let selected = selected_buffer_ids.contains(&excerpt.buffer_id); @@ -4236,6 +4249,7 @@ impl EditorElement { line_layouts: &mut [LineWithInvisibles], selections: &[Selection], selected_buffer_ids: &Vec, + latest_selection_anchors: &HashMap, is_row_soft_wrapped: impl Copy + Fn(usize) -> bool, sticky_header_excerpt_id: Option, window: &mut Window, @@ -4279,6 +4293,7 @@ impl EditorElement { &mut row_block_types, selections, selected_buffer_ids, + latest_selection_anchors, is_row_soft_wrapped, sticky_header_excerpt_id, window, @@ -4336,6 +4351,7 @@ impl EditorElement { &mut row_block_types, selections, selected_buffer_ids, + latest_selection_anchors, is_row_soft_wrapped, sticky_header_excerpt_id, window, @@ -4391,6 +4407,7 @@ impl EditorElement { &mut row_block_types, selections, selected_buffer_ids, + latest_selection_anchors, is_row_soft_wrapped, sticky_header_excerpt_id, window, @@ -4473,6 +4490,7 @@ impl EditorElement { hitbox: &Hitbox, selected_buffer_ids: &Vec, blocks: &[BlockLayout], + latest_selection_anchors: &HashMap, window: &mut Window, cx: &mut App, ) -> AnyElement { @@ -4481,6 +4499,7 @@ impl EditorElement { DisplayRow(scroll_position.y as u32), FILE_HEADER_HEIGHT + MULTI_BUFFER_EXCERPT_HEADER_HEIGHT, excerpt, + latest_selection_anchors, ); let editor_bg_color = cx.theme().colors().editor_background; @@ -7783,15 +7802,46 @@ fn header_jump_data( snapshot: &EditorSnapshot, block_row_start: DisplayRow, height: u32, - for_excerpt: &ExcerptInfo, + first_excerpt: &ExcerptInfo, + latest_selection_anchors: &HashMap, ) -> JumpData { - let range = &for_excerpt.range; - let buffer = &for_excerpt.buffer; - let jump_anchor = range.primary.start; + let jump_target = if let Some(anchor) = latest_selection_anchors.get(&first_excerpt.buffer_id) + && let Some(range) = snapshot.context_range_for_excerpt(anchor.excerpt_id) + { + JumpTargetInExcerptInput { + id: anchor.excerpt_id, + buffer: &first_excerpt.buffer, + excerpt_start_anchor: range.start, + jump_anchor: anchor.text_anchor, + } + } else { + JumpTargetInExcerptInput { + id: first_excerpt.id, + buffer: &first_excerpt.buffer, + excerpt_start_anchor: first_excerpt.range.context.start, + jump_anchor: first_excerpt.range.primary.start, + } + }; + header_jump_data_inner(snapshot, block_row_start, height, &jump_target) +} + +struct JumpTargetInExcerptInput<'a> { + id: ExcerptId, + buffer: &'a language::BufferSnapshot, + excerpt_start_anchor: text::Anchor, + jump_anchor: text::Anchor, +} - let excerpt_start = range.context.start; - let jump_position = language::ToPoint::to_point(&jump_anchor, buffer); - let rows_from_excerpt_start = if jump_anchor == excerpt_start { +fn header_jump_data_inner( + snapshot: &EditorSnapshot, + block_row_start: DisplayRow, + height: u32, + for_excerpt: &JumpTargetInExcerptInput, +) -> JumpData { + let buffer = &for_excerpt.buffer; + let jump_position = language::ToPoint::to_point(&for_excerpt.jump_anchor, buffer); + let excerpt_start = for_excerpt.excerpt_start_anchor; + let rows_from_excerpt_start = if for_excerpt.jump_anchor == excerpt_start { 0 } else { let excerpt_start_point = language::ToPoint::to_point(&excerpt_start, buffer); @@ -7808,7 +7858,7 @@ fn header_jump_data( JumpData::MultiBufferPoint { excerpt_id: for_excerpt.id, - anchor: jump_anchor, + anchor: for_excerpt.jump_anchor, position: jump_position, line_offset_from_top, } @@ -9125,15 +9175,18 @@ impl Element for EditorElement { cx, ); - let (local_selections, selected_buffer_ids): ( + let (local_selections, selected_buffer_ids, latest_selection_anchors): ( Vec>, Vec, + HashMap, ) = self .editor_with_selections(cx) .map(|editor| { editor.update(cx, |editor, cx| { let all_selections = editor.selections.all::(&snapshot.display_snapshot); + let all_anchor_selections = + editor.selections.all_anchors(&snapshot.display_snapshot); let selected_buffer_ids = if editor.buffer_kind(cx) == ItemBufferKind::Singleton { Vec::new() @@ -9162,10 +9215,31 @@ impl Element for EditorElement { selections .extend(editor.selections.pending(&snapshot.display_snapshot)); - (selections, selected_buffer_ids) + let mut anchors_by_buffer: HashMap = + HashMap::default(); + for selection in all_anchor_selections.iter() { + let head = selection.head(); + if let Some(buffer_id) = head.buffer_id { + anchors_by_buffer + .entry(buffer_id) + .and_modify(|(latest_id, latest_anchor)| { + if selection.id > *latest_id { + *latest_id = selection.id; + *latest_anchor = head; + } + }) + .or_insert((selection.id, head)); + } + } + let latest_selection_anchors = anchors_by_buffer + .into_iter() + .map(|(buffer_id, (_, anchor))| (buffer_id, anchor)) + .collect(); + + (selections, selected_buffer_ids, latest_selection_anchors) }) }) - .unwrap_or_default(); + .unwrap_or_else(|| (Vec::new(), Vec::new(), HashMap::default())); let (selections, mut active_rows, newest_selection_head) = self .layout_selections( @@ -9396,6 +9470,7 @@ impl Element for EditorElement { &mut line_layouts, &local_selections, &selected_buffer_ids, + &latest_selection_anchors, is_row_soft_wrapped, sticky_header_excerpt_id, window, @@ -9429,6 +9504,7 @@ impl Element for EditorElement { &hitbox, &selected_buffer_ids, &blocks, + &latest_selection_anchors, window, cx, ) From 27f700e2b2afce4c150f641dc0f101922e198c42 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 13 Nov 2025 14:37:47 +0100 Subject: [PATCH 0085/1030] askpass: Quote paths in generated askpass script (#42622) Closes https://github.com/zed-industries/zed/issues/42618 Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/askpass/src/askpass.rs | 56 +++++++++++++++++++++--------- crates/remote/src/transport/ssh.rs | 2 +- 2 files changed, 41 insertions(+), 17 deletions(-) diff --git a/crates/askpass/src/askpass.rs b/crates/askpass/src/askpass.rs index 81cdd355bf7173b3954a8c2731a0728d354253ba..0974409477d452958df13893e316845a919723c5 100644 --- a/crates/askpass/src/askpass.rs +++ b/crates/askpass/src/askpass.rs @@ -205,13 +205,9 @@ impl PasswordProxy { } else { ShellKind::Posix }; - let askpass_program = ASKPASS_PROGRAM - .get_or_init(|| current_exec) - .try_shell_safe(shell_kind) - .context("Failed to shell-escape Askpass program path.")? - .to_string(); + let askpass_program = ASKPASS_PROGRAM.get_or_init(|| current_exec); // Create an askpass script that communicates back to this process. - let askpass_script = generate_askpass_script(&askpass_program, &askpass_socket); + let askpass_script = generate_askpass_script(shell_kind, askpass_program, &askpass_socket)?; let _task = executor.spawn(async move { maybe!(async move { let listener = @@ -334,23 +330,51 @@ pub fn set_askpass_program(path: std::path::PathBuf) { #[inline] #[cfg(not(target_os = "windows"))] -fn generate_askpass_script(askpass_program: &str, askpass_socket: &std::path::Path) -> String { - format!( +fn generate_askpass_script( + shell_kind: ShellKind, + askpass_program: &std::path::Path, + askpass_socket: &std::path::Path, +) -> Result { + let askpass_program = shell_kind.prepend_command_prefix( + askpass_program + .to_str() + .context("Askpass program is on a non-utf8 path")?, + ); + let askpass_program = shell_kind + .try_quote_prefix_aware(&askpass_program) + .context("Failed to shell-escape Askpass program path")?; + let askpass_socket = askpass_socket + .try_shell_safe(shell_kind) + .context("Failed to shell-escape Askpass socket path")?; + let print_args = "printf '%s\\0' \"$@\""; + let shebang = "#!/bin/sh"; + Ok(format!( "{shebang}\n{print_args} | {askpass_program} --askpass={askpass_socket} 2> /dev/null \n", - askpass_socket = askpass_socket.display(), - print_args = "printf '%s\\0' \"$@\"", - shebang = "#!/bin/sh", - ) + )) } #[inline] #[cfg(target_os = "windows")] -fn generate_askpass_script(askpass_program: &str, askpass_socket: &std::path::Path) -> String { - format!( +fn generate_askpass_script( + shell_kind: ShellKind, + askpass_program: &std::path::Path, + askpass_socket: &std::path::Path, +) -> Result { + let askpass_program = shell_kind.prepend_command_prefix( + askpass_program + .to_str() + .context("Askpass program is on a non-utf8 path")?, + ); + let askpass_program = shell_kind + .try_quote_prefix_aware(&askpass_program) + .context("Failed to shell-escape Askpass program path")?; + let askpass_socket = askpass_socket + .try_shell_safe(shell_kind) + .context("Failed to shell-escape Askpass socket path")?; + Ok(format!( r#" $ErrorActionPreference = 'Stop'; ($args -join [char]0) | & {askpass_program} --askpass={askpass_socket} 2> $null "#, - askpass_socket = askpass_socket.display(), - ) + )) } diff --git a/crates/remote/src/transport/ssh.rs b/crates/remote/src/transport/ssh.rs index 049ec3575a4b99438587dab2d048503259eb1618..cf8e6f3e9cc9599aa7d2d05ea204c550892ac4c4 100644 --- a/crates/remote/src/transport/ssh.rs +++ b/crates/remote/src/transport/ssh.rs @@ -113,7 +113,7 @@ impl MasterProcess { .args(additional_args) .args(args); - master_process.arg(format!("ControlPath={}", socket_path.display())); + master_process.arg(format!("ControlPath='{}'", socket_path.display())); let process = master_process.arg(&url).spawn()?; From 430b55405a7cde4da0f91777580b81f952b32278 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Thu, 13 Nov 2025 14:56:40 +0100 Subject: [PATCH 0086/1030] search: New recent old search implementation (#40835) This is an in-progress work on changing how task scheduler affects performance of project search. Instead of relying on tasks being executed at a discretion of the task scheduler, we want to experiment with having a set of "agents" that prioritize driving in-progress project search matches to completion over pushing the whole thing to completion. This should hopefully significantly improve throughput & latency of project search. This PR has been reverted previously in #40831. Release Notes: - Improved project search performance in local projects. --------- Co-authored-by: Smit Barmase Co-authored-by: Smit Barmase --- Cargo.lock | 17 + Cargo.toml | 1 + crates/project/src/buffer_store.rs | 71 +- crates/project/src/project.rs | 235 ++---- crates/project/src/project_search.rs | 790 +++++++++++++++++++ crates/project/src/worktree_store.rs | 151 +--- crates/project_benchmarks/Cargo.toml | 21 + crates/project_benchmarks/LICENSE-GPL | 1 + crates/project_benchmarks/src/main.rs | 135 ++++ crates/remote_server/Cargo.toml | 2 +- crates/remote_server/src/headless_project.rs | 12 +- 11 files changed, 1047 insertions(+), 389 deletions(-) create mode 100644 crates/project/src/project_search.rs create mode 100644 crates/project_benchmarks/Cargo.toml create mode 120000 crates/project_benchmarks/LICENSE-GPL create mode 100644 crates/project_benchmarks/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index 2c02183244f6651a6aad7e79aca2c218838def56..7d26e24a8d46081157165dff92a9cd820e615054 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13064,6 +13064,23 @@ dependencies = [ "zlog", ] +[[package]] +name = "project_benchmarks" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap", + "client", + "futures 0.3.31", + "gpui", + "http_client", + "language", + "node_runtime", + "project", + "settings", + "watch", +] + [[package]] name = "project_panel" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index c6471cd043152ca9c52cee671e3d494ab15be81a..328f5b8db5e870df4f1954ccbdb713173a520f8b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -127,6 +127,7 @@ members = [ "crates/picker", "crates/prettier", "crates/project", + "crates/project_benchmarks", "crates/project_panel", "crates/project_symbols", "crates/prompt_store", diff --git a/crates/project/src/buffer_store.rs b/crates/project/src/buffer_store.rs index 39e302a2d9b1ae92cce9691c957cb9fcfbf26d7d..daafa014b28f62b04ece0f391c714d6ea699326c 100644 --- a/crates/project/src/buffer_store.rs +++ b/crates/project/src/buffer_store.rs @@ -1,14 +1,12 @@ use crate::{ - ProjectItem as _, ProjectPath, + ProjectPath, lsp_store::OpenLspBufferHandle, - search::SearchQuery, worktree_store::{WorktreeStore, WorktreeStoreEvent}, }; use anyhow::{Context as _, Result, anyhow}; use client::Client; use collections::{HashMap, HashSet, hash_map}; -use fs::Fs; -use futures::{Future, FutureExt as _, StreamExt, channel::oneshot, future::Shared}; +use futures::{Future, FutureExt as _, channel::oneshot, future::Shared}; use gpui::{ App, AppContext as _, AsyncApp, Context, Entity, EventEmitter, Subscription, Task, WeakEntity, }; @@ -23,8 +21,8 @@ use rpc::{ AnyProtoClient, ErrorCode, ErrorExt as _, TypedEnvelope, proto::{self}, }; -use smol::channel::Receiver; -use std::{io, pin::pin, sync::Arc, time::Instant}; + +use std::{io, sync::Arc, time::Instant}; use text::{BufferId, ReplicaId}; use util::{ResultExt as _, TryFutureExt, debug_panic, maybe, paths::PathStyle, rel_path::RelPath}; use worktree::{File, PathChange, ProjectEntryId, Worktree, WorktreeId}; @@ -978,6 +976,10 @@ impl BufferStore { .filter_map(|buffer| buffer.upgrade()) } + pub(crate) fn is_searchable(&self, id: &BufferId) -> bool { + !self.non_searchable_buffers.contains(&id) + } + pub fn loading_buffers( &self, ) -> impl Iterator>>)> { @@ -1102,63 +1104,6 @@ impl BufferStore { Some(()) } - pub fn find_search_candidates( - &mut self, - query: &SearchQuery, - mut limit: usize, - fs: Arc, - cx: &mut Context, - ) -> Receiver> { - let (tx, rx) = smol::channel::unbounded(); - let mut open_buffers = HashSet::default(); - let mut unnamed_buffers = Vec::new(); - for handle in self.buffers() { - let buffer = handle.read(cx); - if self.non_searchable_buffers.contains(&buffer.remote_id()) { - continue; - } else if let Some(entry_id) = buffer.entry_id(cx) { - open_buffers.insert(entry_id); - } else { - limit = limit.saturating_sub(1); - unnamed_buffers.push(handle) - }; - } - - const MAX_CONCURRENT_BUFFER_OPENS: usize = 64; - let project_paths_rx = self - .worktree_store - .update(cx, |worktree_store, cx| { - worktree_store.find_search_candidates(query.clone(), limit, open_buffers, fs, cx) - }) - .chunks(MAX_CONCURRENT_BUFFER_OPENS); - - cx.spawn(async move |this, cx| { - for buffer in unnamed_buffers { - tx.send(buffer).await.ok(); - } - - let mut project_paths_rx = pin!(project_paths_rx); - while let Some(project_paths) = project_paths_rx.next().await { - let buffers = this.update(cx, |this, cx| { - project_paths - .into_iter() - .map(|project_path| this.open_buffer(project_path, cx)) - .collect::>() - })?; - for buffer_task in buffers { - if let Some(buffer) = buffer_task.await.log_err() - && tx.send(buffer).await.is_err() - { - return anyhow::Ok(()); - } - } - } - anyhow::Ok(()) - }) - .detach(); - rx - } - fn on_buffer_event( &mut self, buffer: Entity, diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 25afd501d6c66d699a9238314f6a3d6886b8baa1..3f325aba2b18efb4f36faef4e0a655f716a860bd 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -11,6 +11,7 @@ pub mod lsp_command; pub mod lsp_store; mod manifest_tree; pub mod prettier_store; +mod project_search; pub mod project_settings; pub mod search; mod task_inventory; @@ -37,6 +38,7 @@ use dap::inline_value::{InlineValueLocation, VariableLookupKind, VariableScope}; use crate::{ git_store::GitStore, lsp_store::{SymbolLocation, log_store::LogKind}, + project_search::SearchResultsHandle, }; pub use agent_server_store::{AgentServerStore, AgentServersUpdated, ExternalAgentServerName}; pub use git_store::{ @@ -44,6 +46,7 @@ pub use git_store::{ git_traversal::{ChildEntriesGitIter, GitEntry, GitEntryRef, GitTraversal}, }; pub use manifest_tree::ManifestTree; +pub use project_search::Search; use anyhow::{Context as _, Result, anyhow}; use buffer_store::{BufferStore, BufferStoreEvent}; @@ -108,7 +111,7 @@ use snippet_provider::SnippetProvider; use std::{ borrow::Cow, collections::BTreeMap, - ops::Range, + ops::{Not as _, Range}, path::{Path, PathBuf}, pin::pin, str, @@ -122,7 +125,7 @@ use text::{Anchor, BufferId, OffsetRangeExt, Point, Rope}; use toolchain_store::EmptyToolchainStore; use util::{ ResultExt as _, maybe, - paths::{PathStyle, SanitizedPath, compare_paths, is_absolute}, + paths::{PathStyle, SanitizedPath, is_absolute}, rel_path::RelPath, }; use worktree::{CreatedEntry, Snapshot, Traversal}; @@ -149,8 +152,6 @@ pub use lsp_store::{ }; pub use toolchain_store::{ToolchainStore, Toolchains}; const MAX_PROJECT_SEARCH_HISTORY_SIZE: usize = 500; -const MAX_SEARCH_RESULT_FILES: usize = 5_000; -const MAX_SEARCH_RESULT_RANGES: usize = 10_000; pub trait ProjectItem: 'static { fn try_open( @@ -3998,179 +3999,44 @@ impl Project { }) } - pub fn search(&mut self, query: SearchQuery, cx: &mut Context) -> Receiver { - let (result_tx, result_rx) = smol::channel::unbounded(); - - let matching_buffers_rx = if query.is_opened_only() { - self.sort_search_candidates(&query, cx) - } else { - self.find_search_candidate_buffers(&query, MAX_SEARCH_RESULT_FILES + 1, cx) - }; - - cx.spawn(async move |_, cx| { - let mut range_count = 0; - let mut buffer_count = 0; - let mut limit_reached = false; - let query = Arc::new(query); - let chunks = matching_buffers_rx.ready_chunks(64); - - // Now that we know what paths match the query, we will load at most - // 64 buffers at a time to avoid overwhelming the main thread. For each - // opened buffer, we will spawn a background task that retrieves all the - // ranges in the buffer matched by the query. - let mut chunks = pin!(chunks); - 'outer: while let Some(matching_buffer_chunk) = chunks.next().await { - let mut chunk_results = Vec::with_capacity(matching_buffer_chunk.len()); - for buffer in matching_buffer_chunk { - let query = query.clone(); - let snapshot = buffer.read_with(cx, |buffer, _| buffer.snapshot())?; - chunk_results.push(cx.background_spawn(async move { - let ranges = query - .search(&snapshot, None) - .await - .iter() - .map(|range| { - snapshot.anchor_before(range.start) - ..snapshot.anchor_after(range.end) - }) - .collect::>(); - anyhow::Ok((buffer, ranges)) - })); - } - - let chunk_results = futures::future::join_all(chunk_results).await; - for result in chunk_results { - if let Some((buffer, ranges)) = result.log_err() { - range_count += ranges.len(); - buffer_count += 1; - result_tx - .send(SearchResult::Buffer { buffer, ranges }) - .await?; - if buffer_count > MAX_SEARCH_RESULT_FILES - || range_count > MAX_SEARCH_RESULT_RANGES - { - limit_reached = true; - break 'outer; - } - } - } - } - - if limit_reached { - result_tx.send(SearchResult::LimitReached).await?; - } - - anyhow::Ok(()) - }) - .detach(); - - result_rx - } - - pub fn find_search_candidate_buffers( - &mut self, - query: &SearchQuery, - limit: usize, - cx: &mut Context, - ) -> Receiver> { - if self.is_local() { - let fs = self.fs.clone(); - self.buffer_store.update(cx, |buffer_store, cx| { - buffer_store.find_search_candidates(query, limit, fs, cx) - }) - } else { - self.find_search_candidates_remote(query, limit, cx) - } - } - - fn sort_search_candidates( - &mut self, - search_query: &SearchQuery, - cx: &mut Context, - ) -> Receiver> { - let worktree_store = self.worktree_store.read(cx); - let mut buffers = search_query - .buffers() - .into_iter() - .flatten() - .filter(|buffer| { - let b = buffer.read(cx); - if let Some(file) = b.file() { - if !search_query.match_path(file.path().as_std_path()) { - return false; - } - if let Some(entry) = b - .entry_id(cx) - .and_then(|entry_id| worktree_store.entry_for_id(entry_id, cx)) - && entry.is_ignored - && !search_query.include_ignored() - { - return false; - } - } - true - }) - .collect::>(); - let (tx, rx) = smol::channel::unbounded(); - buffers.sort_by(|a, b| match (a.read(cx).file(), b.read(cx).file()) { - (None, None) => a.read(cx).remote_id().cmp(&b.read(cx).remote_id()), - (None, Some(_)) => std::cmp::Ordering::Less, - (Some(_), None) => std::cmp::Ordering::Greater, - (Some(a), Some(b)) => compare_paths( - (a.path().as_std_path(), true), - (b.path().as_std_path(), true), - ), - }); - for buffer in buffers { - tx.send_blocking(buffer.clone()).unwrap() - } - - rx - } - - fn find_search_candidates_remote( - &mut self, - query: &SearchQuery, - limit: usize, - cx: &mut Context, - ) -> Receiver> { - let (tx, rx) = smol::channel::unbounded(); - - let (client, remote_id): (AnyProtoClient, _) = if let Some(ssh_client) = &self.remote_client - { - (ssh_client.read(cx).proto_client(), 0) + fn search_impl(&mut self, query: SearchQuery, cx: &mut Context) -> SearchResultsHandle { + let client: Option<(AnyProtoClient, _)> = if let Some(ssh_client) = &self.remote_client { + Some((ssh_client.read(cx).proto_client(), 0)) } else if let Some(remote_id) = self.remote_id() { - (self.collab_client.clone().into(), remote_id) + self.is_local() + .not() + .then(|| (self.collab_client.clone().into(), remote_id)) } else { - return rx; + None }; - - let request = client.request(proto::FindSearchCandidates { - project_id: remote_id, - query: Some(query.to_proto()), - limit: limit as _, - }); - let guard = self.retain_remotely_created_models(cx); - - cx.spawn(async move |project, cx| { - let response = request.await?; - for buffer_id in response.buffer_ids { - let buffer_id = BufferId::new(buffer_id)?; - let buffer = project - .update(cx, |project, cx| { - project.buffer_store.update(cx, |buffer_store, cx| { - buffer_store.wait_for_remote_buffer(buffer_id, cx) - }) - })? - .await?; - let _ = tx.send(buffer).await; + let searcher = if query.is_opened_only() { + project_search::Search::open_buffers_only( + self.buffer_store.clone(), + self.worktree_store.clone(), + project_search::Search::MAX_SEARCH_RESULT_FILES + 1, + ) + } else { + match client { + Some((client, remote_id)) => project_search::Search::remote( + self.buffer_store.clone(), + self.worktree_store.clone(), + project_search::Search::MAX_SEARCH_RESULT_FILES + 1, + (client, remote_id, self.remotely_created_models.clone()), + ), + None => project_search::Search::local( + self.fs.clone(), + self.buffer_store.clone(), + self.worktree_store.clone(), + project_search::Search::MAX_SEARCH_RESULT_FILES + 1, + cx, + ), } + }; + searcher.into_handle(query, cx) + } - drop(guard); - anyhow::Ok(()) - }) - .detach_and_log_err(cx); - rx + pub fn search(&mut self, query: SearchQuery, cx: &mut Context) -> Receiver { + self.search_impl(query, cx).results(cx) } pub fn request_lsp( @@ -4895,18 +4761,31 @@ impl Project { fn retain_remotely_created_models( &mut self, cx: &mut Context, + ) -> RemotelyCreatedModelGuard { + Self::retain_remotely_created_models_impl( + &self.remotely_created_models, + &self.buffer_store, + &self.worktree_store, + cx, + ) + } + + fn retain_remotely_created_models_impl( + models: &Arc>, + buffer_store: &Entity, + worktree_store: &Entity, + cx: &mut App, ) -> RemotelyCreatedModelGuard { { - let mut remotely_create_models = self.remotely_created_models.lock(); + let mut remotely_create_models = models.lock(); if remotely_create_models.retain_count == 0 { - remotely_create_models.buffers = self.buffer_store.read(cx).buffers().collect(); - remotely_create_models.worktrees = - self.worktree_store.read(cx).worktrees().collect(); + remotely_create_models.buffers = buffer_store.read(cx).buffers().collect(); + remotely_create_models.worktrees = worktree_store.read(cx).worktrees().collect(); } remotely_create_models.retain_count += 1; } RemotelyCreatedModelGuard { - remote_models: Arc::downgrade(&self.remotely_created_models), + remote_models: Arc::downgrade(&models), } } @@ -4976,7 +4855,7 @@ impl Project { let query = SearchQuery::from_proto(message.query.context("missing query field")?, path_style)?; let results = this.update(&mut cx, |this, cx| { - this.find_search_candidate_buffers(&query, message.limit as _, cx) + this.search_impl(query, cx).matching_buffers(cx) })?; let mut response = proto::FindSearchCandidatesResponse { diff --git a/crates/project/src/project_search.rs b/crates/project/src/project_search.rs new file mode 100644 index 0000000000000000000000000000000000000000..e638240ebba2be8fbc35b04a0653ab32519497dd --- /dev/null +++ b/crates/project/src/project_search.rs @@ -0,0 +1,790 @@ +use std::{ + io::{BufRead, BufReader}, + ops::Range, + path::Path, + pin::pin, + sync::Arc, +}; + +use anyhow::Context; +use collections::HashSet; +use fs::Fs; +use futures::{SinkExt, StreamExt, select_biased, stream::FuturesOrdered}; +use gpui::{App, AppContext, AsyncApp, Entity, Task}; +use language::{Buffer, BufferSnapshot}; +use parking_lot::Mutex; +use postage::oneshot; +use rpc::{AnyProtoClient, proto}; +use smol::{ + channel::{Receiver, Sender, bounded, unbounded}, + future::FutureExt, +}; + +use text::BufferId; +use util::{ResultExt, maybe, paths::compare_rel_paths}; +use worktree::{Entry, ProjectEntryId, Snapshot, Worktree}; + +use crate::{ + Project, ProjectItem, ProjectPath, RemotelyCreatedModels, + buffer_store::BufferStore, + search::{SearchQuery, SearchResult}, + worktree_store::WorktreeStore, +}; + +pub struct Search { + buffer_store: Entity, + worktree_store: Entity, + limit: usize, + kind: SearchKind, +} + +/// Represents search setup, before it is actually kicked off with Search::into_results +enum SearchKind { + /// Search for candidates by inspecting file contents on file system, avoiding loading the buffer unless we know that a given file contains a match. + Local { + fs: Arc, + worktrees: Vec>, + }, + /// Query remote host for candidates. As of writing, the host runs a local search in "buffers with matches only" mode. + Remote { + client: AnyProtoClient, + remote_id: u64, + models: Arc>, + }, + /// Run search against a known set of candidates. Even when working with a remote host, this won't round-trip to host. + OpenBuffersOnly, +} + +/// Represents results of project search and allows one to either obtain match positions OR +/// just the handles to buffers that may match the search. Grabbing the handles is cheaper than obtaining full match positions, because in that case we'll look for +/// at most one match in each file. +#[must_use] +pub struct SearchResultsHandle { + results: Receiver, + matching_buffers: Receiver>, + trigger_search: Box Task<()> + Send + Sync>, +} + +impl SearchResultsHandle { + pub fn results(self, cx: &mut App) -> Receiver { + (self.trigger_search)(cx).detach(); + self.results + } + pub fn matching_buffers(self, cx: &mut App) -> Receiver> { + (self.trigger_search)(cx).detach(); + self.matching_buffers + } +} + +#[derive(Clone)] +enum FindSearchCandidates { + Local { + fs: Arc, + /// Start off with all paths in project and filter them based on: + /// - Include filters + /// - Exclude filters + /// - Only open buffers + /// - Scan ignored files + /// Put another way: filter out files that can't match (without looking at file contents) + input_paths_rx: Receiver, + /// After that, if the buffer is not yet loaded, we'll figure out if it contains at least one match + /// based on disk contents of a buffer. This step is not performed for buffers we already have in memory. + confirm_contents_will_match_tx: Sender, + confirm_contents_will_match_rx: Receiver, + /// Of those that contain at least one match (or are already in memory), look for rest of matches (and figure out their ranges). + /// But wait - first, we need to go back to the main thread to open a buffer (& create an entity for it). + get_buffer_for_full_scan_tx: Sender, + }, + Remote, + OpenBuffersOnly, +} + +impl Search { + pub fn local( + fs: Arc, + buffer_store: Entity, + worktree_store: Entity, + limit: usize, + cx: &mut App, + ) -> Self { + let worktrees = worktree_store.read(cx).visible_worktrees(cx).collect(); + Self { + kind: SearchKind::Local { fs, worktrees }, + buffer_store, + worktree_store, + limit, + } + } + + pub(crate) fn remote( + buffer_store: Entity, + worktree_store: Entity, + limit: usize, + client_state: (AnyProtoClient, u64, Arc>), + ) -> Self { + Self { + kind: SearchKind::Remote { + client: client_state.0, + remote_id: client_state.1, + models: client_state.2, + }, + buffer_store, + worktree_store, + limit, + } + } + pub(crate) fn open_buffers_only( + buffer_store: Entity, + worktree_store: Entity, + limit: usize, + ) -> Self { + Self { + kind: SearchKind::OpenBuffersOnly, + buffer_store, + worktree_store, + limit, + } + } + + pub(crate) const MAX_SEARCH_RESULT_FILES: usize = 5_000; + pub(crate) const MAX_SEARCH_RESULT_RANGES: usize = 10_000; + /// Prepares a project search run. The resulting [`SearchResultsHandle`] has to be used to specify whether you're interested in matching buffers + /// or full search results. + pub fn into_handle(mut self, query: SearchQuery, cx: &mut App) -> SearchResultsHandle { + let mut open_buffers = HashSet::default(); + let mut unnamed_buffers = Vec::new(); + const MAX_CONCURRENT_BUFFER_OPENS: usize = 64; + let buffers = self.buffer_store.read(cx); + for handle in buffers.buffers() { + let buffer = handle.read(cx); + if !buffers.is_searchable(&buffer.remote_id()) { + continue; + } else if let Some(entry_id) = buffer.entry_id(cx) { + open_buffers.insert(entry_id); + } else { + self.limit = self.limit.saturating_sub(1); + unnamed_buffers.push(handle) + }; + } + let executor = cx.background_executor().clone(); + let (tx, rx) = unbounded(); + let (grab_buffer_snapshot_tx, grab_buffer_snapshot_rx) = unbounded(); + let matching_buffers = grab_buffer_snapshot_rx.clone(); + let trigger_search = Box::new(move |cx: &mut App| { + cx.spawn(async move |cx| { + for buffer in unnamed_buffers { + _ = grab_buffer_snapshot_tx.send(buffer).await; + } + + let (find_all_matches_tx, find_all_matches_rx) = + bounded(MAX_CONCURRENT_BUFFER_OPENS); + + let (candidate_searcher, tasks) = match self.kind { + SearchKind::OpenBuffersOnly => { + let Ok(open_buffers) = cx.update(|cx| self.all_loaded_buffers(&query, cx)) + else { + return; + }; + let fill_requests = cx + .background_spawn(async move { + for buffer in open_buffers { + if let Err(_) = grab_buffer_snapshot_tx.send(buffer).await { + return; + } + } + }) + .boxed_local(); + (FindSearchCandidates::OpenBuffersOnly, vec![fill_requests]) + } + SearchKind::Local { + fs, + ref mut worktrees, + } => { + let (get_buffer_for_full_scan_tx, get_buffer_for_full_scan_rx) = + unbounded(); + let (confirm_contents_will_match_tx, confirm_contents_will_match_rx) = + bounded(64); + let (sorted_search_results_tx, sorted_search_results_rx) = unbounded(); + + let (input_paths_tx, input_paths_rx) = unbounded(); + + let tasks = vec![ + cx.spawn(Self::provide_search_paths( + std::mem::take(worktrees), + query.include_ignored(), + input_paths_tx, + sorted_search_results_tx, + )) + .boxed_local(), + Self::open_buffers( + &self.buffer_store, + get_buffer_for_full_scan_rx, + grab_buffer_snapshot_tx, + cx.clone(), + ) + .boxed_local(), + cx.background_spawn(Self::maintain_sorted_search_results( + sorted_search_results_rx, + get_buffer_for_full_scan_tx.clone(), + self.limit, + )) + .boxed_local(), + ]; + ( + FindSearchCandidates::Local { + fs, + get_buffer_for_full_scan_tx, + confirm_contents_will_match_tx, + confirm_contents_will_match_rx, + input_paths_rx, + }, + tasks, + ) + } + SearchKind::Remote { + client, + remote_id, + models, + } => { + let request = client.request(proto::FindSearchCandidates { + project_id: remote_id, + query: Some(query.to_proto()), + limit: self.limit as _, + }); + let Ok(guard) = cx.update(|cx| { + Project::retain_remotely_created_models_impl( + &models, + &self.buffer_store, + &self.worktree_store, + cx, + ) + }) else { + return; + }; + let buffer_store = self.buffer_store.downgrade(); + let issue_remote_buffers_request = cx + .spawn(async move |cx| { + let _ = maybe!(async move { + let response = request.await?; + log::error!( + "Received {} match candidates for a project search", + response.buffer_ids.len() + ); + for buffer_id in response.buffer_ids { + let buffer_id = BufferId::new(buffer_id)?; + let buffer = buffer_store + .update(cx, |buffer_store, cx| { + buffer_store.wait_for_remote_buffer(buffer_id, cx) + })? + .await?; + let _ = grab_buffer_snapshot_tx.send(buffer).await; + } + + drop(guard); + anyhow::Ok(()) + }) + .await + .log_err(); + }) + .boxed_local(); + ( + FindSearchCandidates::Remote, + vec![issue_remote_buffers_request], + ) + } + }; + + let should_find_all_matches = !tx.is_closed(); + + let worker_pool = executor.scoped(|scope| { + let num_cpus = executor.num_cpus(); + + assert!(num_cpus > 0); + for _ in 0..executor.num_cpus() - 1 { + let worker = Worker { + query: &query, + open_buffers: &open_buffers, + candidates: candidate_searcher.clone(), + find_all_matches_rx: find_all_matches_rx.clone(), + }; + scope.spawn(worker.run()); + } + + drop(find_all_matches_rx); + drop(candidate_searcher); + }); + + let (sorted_matches_tx, sorted_matches_rx) = unbounded(); + // The caller of `into_handle` decides whether they're interested in all matches (files that matched + all matching ranges) or + // just the files. *They are using the same stream as the guts of the project search do*. + // This means that we cannot grab values off of that stream unless it's strictly needed for making a progress in project search. + // + // Grabbing buffer snapshots is only necessary when we're looking for all matches. If the caller decided that they're not interested + // in all matches, running that task unconditionally would hinder caller's ability to observe all matching file paths. + let buffer_snapshots = if should_find_all_matches { + Some( + Self::grab_buffer_snapshots( + grab_buffer_snapshot_rx, + find_all_matches_tx, + sorted_matches_tx, + cx.clone(), + ) + .boxed_local(), + ) + } else { + drop(find_all_matches_tx); + + None + }; + let ensure_matches_are_reported_in_order = if should_find_all_matches { + Some( + Self::ensure_matched_ranges_are_reported_in_order(sorted_matches_rx, tx) + .boxed_local(), + ) + } else { + drop(tx); + None + }; + + futures::future::join_all( + [worker_pool.boxed_local()] + .into_iter() + .chain(buffer_snapshots) + .chain(ensure_matches_are_reported_in_order) + .chain(tasks), + ) + .await; + }) + }); + + SearchResultsHandle { + results: rx, + matching_buffers, + trigger_search, + } + } + + fn provide_search_paths( + worktrees: Vec>, + include_ignored: bool, + tx: Sender, + results: Sender>, + ) -> impl AsyncFnOnce(&mut AsyncApp) { + async move |cx| { + _ = maybe!(async move { + for worktree in worktrees { + let (mut snapshot, worktree_settings) = worktree + .read_with(cx, |this, _| { + Some((this.snapshot(), this.as_local()?.settings())) + })? + .context("The worktree is not local")?; + if include_ignored { + // Pre-fetch all of the ignored directories as they're going to be searched. + let mut entries_to_refresh = vec![]; + for entry in snapshot.entries(include_ignored, 0) { + if entry.is_ignored && entry.kind.is_unloaded() { + if !worktree_settings.is_path_excluded(&entry.path) { + entries_to_refresh.push(entry.path.clone()); + } + } + } + let barrier = worktree.update(cx, |this, _| { + let local = this.as_local_mut()?; + let barrier = entries_to_refresh + .into_iter() + .map(|path| local.add_path_prefix_to_scan(path).into_future()) + .collect::>(); + Some(barrier) + })?; + if let Some(barriers) = barrier { + futures::future::join_all(barriers).await; + } + snapshot = worktree.read_with(cx, |this, _| this.snapshot())?; + } + cx.background_executor() + .scoped(|scope| { + scope.spawn(async { + for entry in snapshot.files(include_ignored, 0) { + let (should_scan_tx, should_scan_rx) = oneshot::channel(); + let Ok(_) = tx + .send(InputPath { + entry: entry.clone(), + snapshot: snapshot.clone(), + should_scan_tx, + }) + .await + else { + return; + }; + if results.send(should_scan_rx).await.is_err() { + return; + }; + } + }) + }) + .await; + } + anyhow::Ok(()) + }) + .await; + } + } + + async fn maintain_sorted_search_results( + rx: Receiver>, + paths_for_full_scan: Sender, + limit: usize, + ) { + let mut rx = pin!(rx); + let mut matched = 0; + while let Some(mut next_path_result) = rx.next().await { + let Some(successful_path) = next_path_result.next().await else { + // This file did not produce a match, hence skip it. + continue; + }; + if paths_for_full_scan.send(successful_path).await.is_err() { + return; + }; + matched += 1; + if matched >= limit { + break; + } + } + } + + /// Background workers cannot open buffers by themselves, hence main thread will do it on their behalf. + async fn open_buffers( + buffer_store: &Entity, + rx: Receiver, + find_all_matches_tx: Sender>, + mut cx: AsyncApp, + ) { + let mut rx = pin!(rx.ready_chunks(64)); + _ = maybe!(async move { + while let Some(requested_paths) = rx.next().await { + let mut buffers = buffer_store.update(&mut cx, |this, cx| { + requested_paths + .into_iter() + .map(|path| this.open_buffer(path, cx)) + .collect::>() + })?; + + while let Some(buffer) = buffers.next().await { + if let Some(buffer) = buffer.log_err() { + find_all_matches_tx.send(buffer).await?; + } + } + } + Result::<_, anyhow::Error>::Ok(()) + }) + .await; + } + + async fn grab_buffer_snapshots( + rx: Receiver>, + find_all_matches_tx: Sender<( + Entity, + BufferSnapshot, + oneshot::Sender<(Entity, Vec>)>, + )>, + results: Sender, Vec>)>>, + mut cx: AsyncApp, + ) { + _ = maybe!(async move { + while let Ok(buffer) = rx.recv().await { + let snapshot = buffer.read_with(&mut cx, |this, _| this.snapshot())?; + let (tx, rx) = oneshot::channel(); + find_all_matches_tx.send((buffer, snapshot, tx)).await?; + results.send(rx).await?; + } + debug_assert!(rx.is_empty()); + Result::<_, anyhow::Error>::Ok(()) + }) + .await; + } + + async fn ensure_matched_ranges_are_reported_in_order( + rx: Receiver, Vec>)>>, + tx: Sender, + ) { + use postage::stream::Stream; + _ = maybe!(async move { + let mut matched_buffers = 0; + let mut matches = 0; + while let Ok(mut next_buffer_matches) = rx.recv().await { + let Some((buffer, ranges)) = next_buffer_matches.recv().await else { + continue; + }; + + if matched_buffers > Search::MAX_SEARCH_RESULT_FILES + || matches > Search::MAX_SEARCH_RESULT_RANGES + { + _ = tx.send(SearchResult::LimitReached).await; + break; + } + matched_buffers += 1; + matches += ranges.len(); + + _ = tx.send(SearchResult::Buffer { buffer, ranges }).await?; + } + anyhow::Ok(()) + }) + .await; + } + + fn all_loaded_buffers(&self, search_query: &SearchQuery, cx: &App) -> Vec> { + let worktree_store = self.worktree_store.read(cx); + let mut buffers = search_query + .buffers() + .into_iter() + .flatten() + .filter(|buffer| { + let b = buffer.read(cx); + if let Some(file) = b.file() { + if !search_query.match_path(file.path().as_std_path()) { + return false; + } + if !search_query.include_ignored() + && let Some(entry) = b + .entry_id(cx) + .and_then(|entry_id| worktree_store.entry_for_id(entry_id, cx)) + && entry.is_ignored + { + return false; + } + } + true + }) + .cloned() + .collect::>(); + buffers.sort_by(|a, b| { + let a = a.read(cx); + let b = b.read(cx); + match (a.file(), b.file()) { + (None, None) => a.remote_id().cmp(&b.remote_id()), + (None, Some(_)) => std::cmp::Ordering::Less, + (Some(_), None) => std::cmp::Ordering::Greater, + (Some(a), Some(b)) => compare_rel_paths((a.path(), true), (b.path(), true)), + } + }); + + buffers + } +} + +struct Worker<'search> { + query: &'search SearchQuery, + open_buffers: &'search HashSet, + candidates: FindSearchCandidates, + /// Ok, we're back in background: run full scan & find all matches in a given buffer snapshot. + /// Then, when you're done, share them via the channel you were given. + find_all_matches_rx: Receiver<( + Entity, + BufferSnapshot, + oneshot::Sender<(Entity, Vec>)>, + )>, +} + +impl Worker<'_> { + async fn run(self) { + let ( + input_paths_rx, + confirm_contents_will_match_rx, + mut confirm_contents_will_match_tx, + mut get_buffer_for_full_scan_tx, + fs, + ) = match self.candidates { + FindSearchCandidates::Local { + fs, + input_paths_rx, + confirm_contents_will_match_rx, + confirm_contents_will_match_tx, + get_buffer_for_full_scan_tx, + } => ( + input_paths_rx, + confirm_contents_will_match_rx, + confirm_contents_will_match_tx, + get_buffer_for_full_scan_tx, + Some(fs), + ), + FindSearchCandidates::Remote | FindSearchCandidates::OpenBuffersOnly => ( + unbounded().1, + unbounded().1, + unbounded().0, + unbounded().0, + None, + ), + }; + // WorkerA: grabs a request for "find all matches in file/a" <- takes 5 minutes + // right after: WorkerB: grabs a request for "find all matches in file/b" <- takes 5 seconds + let mut find_all_matches = pin!(self.find_all_matches_rx.fuse()); + let mut find_first_match = pin!(confirm_contents_will_match_rx.fuse()); + let mut scan_path = pin!(input_paths_rx.fuse()); + + loop { + let handler = RequestHandler { + query: self.query, + open_entries: &self.open_buffers, + fs: fs.as_deref(), + confirm_contents_will_match_tx: &confirm_contents_will_match_tx, + get_buffer_for_full_scan_tx: &get_buffer_for_full_scan_tx, + }; + // Whenever we notice that some step of a pipeline is closed, we don't want to close subsequent + // steps straight away. Another worker might be about to produce a value that will + // be pushed there, thus we'll replace current worker's pipe with a dummy one. + // That way, we'll only ever close a next-stage channel when ALL workers do so. + select_biased! { + find_all_matches = find_all_matches.next() => { + let Some(matches) = find_all_matches else { + continue; + }; + handler.handle_find_all_matches(matches).await; + }, + find_first_match = find_first_match.next() => { + if let Some(buffer_with_at_least_one_match) = find_first_match { + handler.handle_find_first_match(buffer_with_at_least_one_match).await; + } else { + get_buffer_for_full_scan_tx = bounded(1).0; + } + + }, + scan_path = scan_path.next() => { + if let Some(path_to_scan) = scan_path { + handler.handle_scan_path(path_to_scan).await; + } else { + // If we're the last worker to notice that this is not producing values, close the upstream. + confirm_contents_will_match_tx = bounded(1).0; + } + + } + complete => { + break + }, + + } + } + } +} + +struct RequestHandler<'worker> { + query: &'worker SearchQuery, + fs: Option<&'worker dyn Fs>, + open_entries: &'worker HashSet, + confirm_contents_will_match_tx: &'worker Sender, + get_buffer_for_full_scan_tx: &'worker Sender, +} + +impl RequestHandler<'_> { + async fn handle_find_all_matches( + &self, + (buffer, snapshot, mut report_matches): ( + Entity, + BufferSnapshot, + oneshot::Sender<(Entity, Vec>)>, + ), + ) { + let ranges = self + .query + .search(&snapshot, None) + .await + .iter() + .map(|range| snapshot.anchor_before(range.start)..snapshot.anchor_after(range.end)) + .collect::>(); + + _ = report_matches.send((buffer, ranges)).await; + } + + async fn handle_find_first_match(&self, mut entry: MatchingEntry) { + _=maybe!(async move { + let abs_path = entry.worktree_root.join(entry.path.path.as_std_path()); + let Some(file) = self.fs.context("Trying to query filesystem in remote project search")?.open_sync(&abs_path).await.log_err() else { + return anyhow::Ok(()); + }; + + let mut file = BufReader::new(file); + let file_start = file.fill_buf()?; + + if let Err(Some(starting_position)) = + std::str::from_utf8(file_start).map_err(|e| e.error_len()) + { + // Before attempting to match the file content, throw away files that have invalid UTF-8 sequences early on; + // That way we can still match files in a streaming fashion without having look at "obviously binary" files. + log::debug!( + "Invalid UTF-8 sequence in file {abs_path:?} at byte position {starting_position}" + ); + return Ok(()); + } + + if self.query.detect(file).unwrap_or(false) { + // Yes, we should scan the whole file. + entry.should_scan_tx.send(entry.path).await?; + } + Ok(()) + }).await; + } + + async fn handle_scan_path(&self, req: InputPath) { + _ = maybe!(async move { + let InputPath { + entry, + + snapshot, + should_scan_tx, + } = req; + + if entry.is_fifo || !entry.is_file() { + return Ok(()); + } + + if self.query.filters_path() { + let matched_path = if self.query.match_full_paths() { + let mut full_path = snapshot.root_name().as_std_path().to_owned(); + full_path.push(entry.path.as_std_path()); + self.query.match_path(&full_path) + } else { + self.query.match_path(entry.path.as_std_path()) + }; + if !matched_path { + return Ok(()); + } + } + + if self.open_entries.contains(&entry.id) { + // The buffer is already in memory and that's the version we want to scan; + // hence skip the dilly-dally and look for all matches straight away. + self.get_buffer_for_full_scan_tx + .send(ProjectPath { + worktree_id: snapshot.id(), + path: entry.path.clone(), + }) + .await?; + } else { + self.confirm_contents_will_match_tx + .send(MatchingEntry { + should_scan_tx: should_scan_tx, + worktree_root: snapshot.abs_path().clone(), + path: ProjectPath { + worktree_id: snapshot.id(), + path: entry.path.clone(), + }, + }) + .await?; + } + + anyhow::Ok(()) + }) + .await; + } +} + +struct InputPath { + entry: Entry, + snapshot: Snapshot, + should_scan_tx: oneshot::Sender, +} + +struct MatchingEntry { + worktree_root: Arc, + path: ProjectPath, + should_scan_tx: oneshot::Sender, +} diff --git a/crates/project/src/worktree_store.rs b/crates/project/src/worktree_store.rs index e6da207dadbde3ebc725fbb84ed19b3b35414f87..676c96f4331d73b87d4bc16766a5f6c4d6194864 100644 --- a/crates/project/src/worktree_store.rs +++ b/crates/project/src/worktree_store.rs @@ -8,10 +8,7 @@ use std::{ use anyhow::{Context as _, Result, anyhow, bail}; use collections::{HashMap, HashSet}; use fs::{Fs, copy_recursive}; -use futures::{ - FutureExt, SinkExt, - future::{BoxFuture, Shared}, -}; +use futures::{FutureExt, SinkExt, future::Shared}; use gpui::{ App, AppContext as _, AsyncApp, Context, Entity, EntityId, EventEmitter, Task, WeakEntity, }; @@ -999,148 +996,14 @@ impl WorktreeStore { matching_paths_rx } - fn scan_ignored_dir<'a>( - fs: &'a Arc, - snapshot: &'a worktree::Snapshot, - path: &'a RelPath, - query: &'a SearchQuery, - filter_tx: &'a Sender, - output_tx: &'a Sender>, - ) -> BoxFuture<'a, Result<()>> { - async move { - let abs_path = snapshot.absolutize(path); - let Some(mut files) = fs - .read_dir(&abs_path) - .await - .with_context(|| format!("listing ignored path {abs_path:?}")) - .log_err() - else { - return Ok(()); - }; - - let mut results = Vec::new(); - - while let Some(Ok(file)) = files.next().await { - let Some(metadata) = fs - .metadata(&file) - .await - .with_context(|| format!("fetching fs metadata for {abs_path:?}")) - .log_err() - .flatten() - else { - continue; - }; - if metadata.is_symlink || metadata.is_fifo { - continue; - } - let relative_path = file.strip_prefix(snapshot.abs_path())?; - let relative_path = RelPath::new(&relative_path, snapshot.path_style()) - .context("getting relative path")?; - results.push((relative_path.into_arc(), !metadata.is_dir)) - } - results.sort_by(|(a_path, _), (b_path, _)| a_path.cmp(b_path)); - for (path, is_file) in results { - if is_file { - if query.filters_path() { - let matched_path = if query.match_full_paths() { - let mut full_path = snapshot.root_name().as_std_path().to_owned(); - full_path.push(path.as_std_path()); - query.match_path(&full_path) - } else { - query.match_path(&path.as_std_path()) - }; - if !matched_path { - continue; - } - } - let (tx, rx) = oneshot::channel(); - output_tx.send(rx).await?; - filter_tx - .send(MatchingEntry { - respond: tx, - worktree_root: snapshot.abs_path().clone(), - path: ProjectPath { - worktree_id: snapshot.id(), - path: path.into_arc(), - }, - }) - .await?; - } else { - Self::scan_ignored_dir(fs, snapshot, &path, query, filter_tx, output_tx) - .await?; - } - } - Ok(()) - } - .boxed() - } - async fn find_candidate_paths( - fs: Arc, - snapshots: Vec<(worktree::Snapshot, WorktreeSettings)>, - open_entries: HashSet, - query: SearchQuery, - filter_tx: Sender, - output_tx: Sender>, + _: Arc, + _: Vec<(worktree::Snapshot, WorktreeSettings)>, + _: HashSet, + _: SearchQuery, + _: Sender, + _: Sender>, ) -> Result<()> { - for (snapshot, settings) in snapshots { - for entry in snapshot.entries(query.include_ignored(), 0) { - if entry.is_dir() && entry.is_ignored { - if !settings.is_path_excluded(&entry.path) { - Self::scan_ignored_dir( - &fs, - &snapshot, - &entry.path, - &query, - &filter_tx, - &output_tx, - ) - .await?; - } - continue; - } - - if entry.is_fifo || !entry.is_file() { - continue; - } - - if query.filters_path() { - let matched_path = if query.match_full_paths() { - let mut full_path = snapshot.root_name().as_std_path().to_owned(); - full_path.push(entry.path.as_std_path()); - query.match_path(&full_path) - } else { - query.match_path(entry.path.as_std_path()) - }; - if !matched_path { - continue; - } - } - - let (mut tx, rx) = oneshot::channel(); - - if open_entries.contains(&entry.id) { - tx.send(ProjectPath { - worktree_id: snapshot.id(), - path: entry.path.clone(), - }) - .await?; - } else { - filter_tx - .send(MatchingEntry { - respond: tx, - worktree_root: snapshot.abs_path().clone(), - path: ProjectPath { - worktree_id: snapshot.id(), - path: entry.path.clone(), - }, - }) - .await?; - } - - output_tx.send(rx).await?; - } - } Ok(()) } diff --git a/crates/project_benchmarks/Cargo.toml b/crates/project_benchmarks/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..1171d468c649bdd9f76a44b3ef0155dc652c6034 --- /dev/null +++ b/crates/project_benchmarks/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "project_benchmarks" +version = "0.1.0" +publish.workspace = true +edition.workspace = true + +[dependencies] +anyhow.workspace = true +clap.workspace = true +client.workspace = true +futures.workspace = true +gpui = { workspace = true, features = ["windows-manifest"] } +http_client = { workspace = true, features = ["test-support"]} +language.workspace = true +node_runtime.workspace = true +project.workspace = true +settings.workspace = true +watch.workspace = true + +[lints] +workspace = true diff --git a/crates/project_benchmarks/LICENSE-GPL b/crates/project_benchmarks/LICENSE-GPL new file mode 120000 index 0000000000000000000000000000000000000000..89e542f750cd3860a0598eff0dc34b56d7336dc4 --- /dev/null +++ b/crates/project_benchmarks/LICENSE-GPL @@ -0,0 +1 @@ +../../LICENSE-GPL \ No newline at end of file diff --git a/crates/project_benchmarks/src/main.rs b/crates/project_benchmarks/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..738d0d0f2240f566f77f98a07df4a9ac587e10b4 --- /dev/null +++ b/crates/project_benchmarks/src/main.rs @@ -0,0 +1,135 @@ +use std::sync::Arc; + +use clap::Parser; +use client::{Client, UserStore}; +use gpui::{AppContext as _, Application}; +use http_client::FakeHttpClient; +use language::LanguageRegistry; +use node_runtime::NodeRuntime; +use project::{ + Project, RealFs, + search::{SearchQuery, SearchResult}, +}; + +#[derive(Parser)] +struct Args { + /// List of worktrees to run the search against. + worktrees: Vec, + #[clap(short)] + query: String, + /// Treat query as a regex. + #[clap(short, long)] + regex: bool, + /// Matches have to be standalone words. + #[clap(long)] + whole_word: bool, + /// Make matching case-sensitive. + #[clap(long, default_value_t = false)] + case_sensitive: bool, + /// Include gitignored files in the search. + #[clap(long)] + include_ignored: bool, +} + +fn main() -> Result<(), anyhow::Error> { + let args = Args::parse(); + let query = if args.regex { + SearchQuery::regex( + args.query, + args.whole_word, + args.case_sensitive, + args.include_ignored, + false, + Default::default(), + Default::default(), + false, + None, + ) + } else { + SearchQuery::text( + args.query, + args.whole_word, + args.case_sensitive, + args.include_ignored, + Default::default(), + Default::default(), + false, + None, + ) + }?; + Application::headless().run(|cx| { + settings::init(cx); + let client = Client::production(cx); + let http_client = FakeHttpClient::with_200_response(); + let (_, rx) = watch::channel(None); + let node = NodeRuntime::new(http_client, None, rx); + let user_store = cx.new(|cx| UserStore::new(client.clone(), cx)); + let registry = Arc::new(LanguageRegistry::new(cx.background_executor().clone())); + let fs = Arc::new(RealFs::new(None, cx.background_executor().clone())); + let project = Project::local( + client, + node, + user_store, + registry, + fs, + Some(Default::default()), + cx, + ); + + project.clone().update(cx, move |_, cx| { + cx.spawn(async move |_, cx| { + println!("Loading worktrees"); + let worktrees = project.update(cx, |this, cx| { + args.worktrees + .into_iter() + .map(|worktree| this.find_or_create_worktree(worktree, true, cx)) + .collect::>() + })?; + + let worktrees = futures::future::join_all(worktrees) + .await + .into_iter() + .collect::, anyhow::Error>>()?; + + for (worktree, _) in &worktrees { + worktree + .update(cx, |this, _| this.as_local().unwrap().scan_complete())? + .await; + } + println!("Worktrees loaded"); + + println!("Starting a project search"); + let timer = std::time::Instant::now(); + let mut first_match = None; + let matches = project + .update(cx, |this, cx| this.search(query, cx)) + .unwrap(); + let mut matched_files = 0; + let mut matched_chunks = 0; + while let Ok(match_result) = matches.recv().await { + if first_match.is_none() { + let time = timer.elapsed(); + first_match = Some(time); + println!("First match found after {time:?}"); + } + if let SearchResult::Buffer { ranges, .. } = match_result { + matched_files += 1; + matched_chunks += ranges.len(); + } else { + break; + } + } + let elapsed = timer.elapsed(); + println!( + "Finished project search after {elapsed:?}. Matched {matched_files} files and {matched_chunks} excerpts" + ); + drop(project); + cx.update(|cx| cx.quit())?; + + anyhow::Ok(()) + }) + .detach(); + }); + }); + Ok(()) +} diff --git a/crates/remote_server/Cargo.toml b/crates/remote_server/Cargo.toml index f03851b9558d85514adde2afc10ad3f4cee77863..50e9fd73cb7d1a9b7eeb6b2bf5bf77320fa7a169 100644 --- a/crates/remote_server/Cargo.toml +++ b/crates/remote_server/Cargo.toml @@ -77,7 +77,7 @@ minidumper.workspace = true [dev-dependencies] action_log.workspace = true -agent.workspace = true +agent = { workspace = true, features = ["test-support"] } client = { workspace = true, features = ["test-support"] } clock = { workspace = true, features = ["test-support"] } collections.workspace = true diff --git a/crates/remote_server/src/headless_project.rs b/crates/remote_server/src/headless_project.rs index 31d55b50eb04d0d0955af34873a432b3e5304fcf..6d64202a038145fc32dc5e5896484e23f03dacef 100644 --- a/crates/remote_server/src/headless_project.rs +++ b/crates/remote_server/src/headless_project.rs @@ -710,9 +710,15 @@ impl HeadlessProject { PathStyle::local(), )?; let results = this.update(&mut cx, |this, cx| { - this.buffer_store.update(cx, |buffer_store, cx| { - buffer_store.find_search_candidates(&query, message.limit as _, this.fs.clone(), cx) - }) + project::Search::local( + this.fs.clone(), + this.buffer_store.clone(), + this.worktree_store.clone(), + message.limit as _, + cx, + ) + .into_handle(query, cx) + .matching_buffers(cx) })?; let mut response = proto::FindSearchCandidatesResponse { From f318bb5fd7790fe926bb8b972256037b7a90924d Mon Sep 17 00:00:00 2001 From: Remco Smits Date: Thu, 13 Nov 2025 15:12:17 +0100 Subject: [PATCH 0087/1030] markdown: Add support for `HTML` href elements (#42265) This PR adds support for `HTML` href elements. It also refactored the way we stored the regions, this was done because otherwise I had to add 2 extra arguments to each `HTML` parser method. It's now also more inline with how we have done it for the highlights. **Small note**: the markdown parser only supports HTML href tags inside a paragraph tag. So adding them as a root node will result in just showing the inner text. This is a limitation of the markdown parser we use itself. **Before** Screenshot 2025-11-08 at 15 40 28 **After** Screenshot 2025-11-08 at 15 29 55 **Example code** ```markdown

asd Link Text more text

Link Text

[Duck Duck Go](https://duckduckgo.com) ``` **TODO**: - [x] Add tests cc @bennetbo Release Notes: - Markdown Preview: Add support for `HTML` href elements. --------- Co-authored-by: Bennet Bo Fenner --- .../markdown_preview/src/markdown_elements.rs | 4 +- .../markdown_preview/src/markdown_parser.rs | 208 ++++++++++++------ .../markdown_preview/src/markdown_renderer.rs | 47 ++-- 3 files changed, 169 insertions(+), 90 deletions(-) diff --git a/crates/markdown_preview/src/markdown_elements.rs b/crates/markdown_preview/src/markdown_elements.rs index 0a5e138e432cc66ddb0cb2a7231cffd2fd54a074..23e0a69b6addef4a963b81a67da198a7e2e1796f 100644 --- a/crates/markdown_preview/src/markdown_elements.rs +++ b/crates/markdown_preview/src/markdown_elements.rs @@ -171,10 +171,8 @@ pub struct ParsedMarkdownText { pub contents: SharedString, /// The list of highlights contained in the Markdown document. pub highlights: Vec<(Range, MarkdownHighlight)>, - /// The regions of the various ranges in the Markdown document. - pub region_ranges: Vec>, /// The regions of the Markdown document. - pub regions: Vec, + pub regions: Vec<(Range, ParsedRegion)>, } /// A run of highlighted Markdown text. diff --git a/crates/markdown_preview/src/markdown_parser.rs b/crates/markdown_preview/src/markdown_parser.rs index e76f5182b047c9079750aa2eab53d83a48e139e6..7b3886d10f5c8977f8766bddc39fb81f6d8f316f 100644 --- a/crates/markdown_preview/src/markdown_parser.rs +++ b/crates/markdown_preview/src/markdown_parser.rs @@ -245,8 +245,7 @@ impl<'a> MarkdownParser<'a> { let mut strikethrough_depth = 0; let mut link: Option = None; let mut image: Option = None; - let mut region_ranges: Vec> = vec![]; - let mut regions: Vec = vec![]; + let mut regions: Vec<(Range, ParsedRegion)> = vec![]; let mut highlights: Vec<(Range, MarkdownHighlight)> = vec![]; let mut link_urls: Vec = vec![]; let mut link_ranges: Vec> = vec![]; @@ -291,11 +290,13 @@ impl<'a> MarkdownParser<'a> { } let last_run_len = if let Some(link) = link.clone() { - region_ranges.push(prev_len..text.len()); - regions.push(ParsedRegion { - code: false, - link: Some(link), - }); + regions.push(( + prev_len..text.len(), + ParsedRegion { + code: false, + link: Some(link), + }, + )); style.link = true; prev_len } else { @@ -325,13 +326,16 @@ impl<'a> MarkdownParser<'a> { ..style }), )); - region_ranges.push(range.clone()); - regions.push(ParsedRegion { - code: false, - link: Some(Link::Web { - url: link.as_str().to_string(), - }), - }); + + regions.push(( + range.clone(), + ParsedRegion { + code: false, + link: Some(Link::Web { + url: link.as_str().to_string(), + }), + }, + )); last_link_len = end; } last_link_len @@ -356,21 +360,24 @@ impl<'a> MarkdownParser<'a> { } Event::Code(t) => { text.push_str(t.as_ref()); - region_ranges.push(prev_len..text.len()); + let range = prev_len..text.len(); if link.is_some() { highlights.push(( - prev_len..text.len(), + range.clone(), MarkdownHighlight::Style(MarkdownHighlightStyle { link: true, ..Default::default() }), )); } - regions.push(ParsedRegion { - code: true, - link: link.clone(), - }); + regions.push(( + range, + ParsedRegion { + code: true, + link: link.clone(), + }, + )); } Event::Start(tag) => match tag { Tag::Emphasis => italic_depth += 1, @@ -388,7 +395,6 @@ impl<'a> MarkdownParser<'a> { source_range: source_range.clone(), contents: mem::take(&mut text).into(), highlights: mem::take(&mut highlights), - region_ranges: mem::take(&mut region_ranges), regions: mem::take(&mut regions), }); markdown_text_like.push(parsed_regions); @@ -416,7 +422,6 @@ impl<'a> MarkdownParser<'a> { if !text.is_empty() { image.set_alt_text(std::mem::take(&mut text).into()); mem::take(&mut highlights); - mem::take(&mut region_ranges); mem::take(&mut regions); } markdown_text_like.push(MarkdownParagraphChunk::Image(image)); @@ -443,7 +448,6 @@ impl<'a> MarkdownParser<'a> { contents: text.into(), highlights, regions, - region_ranges, })); } markdown_text_like @@ -869,7 +873,6 @@ impl<'a> MarkdownParser<'a> { MarkdownParagraphChunk::Text(ParsedMarkdownText { source_range, regions: Vec::default(), - region_ranges: Vec::default(), highlights: Vec::default(), contents: contents.borrow().to_string().into(), }), @@ -891,7 +894,13 @@ impl<'a> MarkdownParser<'a> { } } else if local_name!("p") == name.local { let mut paragraph = MarkdownParagraph::new(); - self.parse_paragraph(source_range, node, &mut paragraph, &mut styles); + self.parse_paragraph( + source_range, + node, + &mut paragraph, + &mut styles, + &mut Vec::new(), + ); if !paragraph.is_empty() { elements.push(ParsedMarkdownElement::Paragraph(paragraph)); @@ -906,7 +915,13 @@ impl<'a> MarkdownParser<'a> { | local_name!("h6") ) { let mut paragraph = MarkdownParagraph::new(); - self.consume_paragraph(source_range.clone(), node, &mut paragraph, &mut styles); + self.consume_paragraph( + source_range.clone(), + node, + &mut paragraph, + &mut styles, + &mut Vec::new(), + ); if !paragraph.is_empty() { elements.push(ParsedMarkdownElement::Heading(ParsedMarkdownHeading { @@ -954,15 +969,15 @@ impl<'a> MarkdownParser<'a> { node: &Rc, paragraph: &mut MarkdownParagraph, highlights: &mut Vec, + regions: &mut Vec<(Range, ParsedRegion)>, ) { - fn add_highlight_range( - text: &String, - start: usize, - highlights: Vec, - ) -> Vec<(Range, MarkdownHighlight)> { - highlights + fn items_with_range( + range: Range, + items: impl IntoIterator, + ) -> Vec<(Range, T)> { + items .into_iter() - .map(|style| (start..text.len(), style)) + .map(|item| (range.clone(), item)) .collect() } @@ -976,22 +991,30 @@ impl<'a> MarkdownParser<'a> { }) { let mut new_text = text.contents.to_string(); new_text.push_str(&contents.borrow()); - let highlights = add_highlight_range( - &new_text, - text.contents.len(), - std::mem::take(highlights), - ); + text.highlights.extend(items_with_range( + text.contents.len()..new_text.len(), + std::mem::take(highlights), + )); + text.regions.extend(items_with_range( + text.contents.len()..new_text.len(), + std::mem::take(regions) + .into_iter() + .map(|(_, region)| region), + )); text.contents = SharedString::from(new_text); - text.highlights.extend(highlights); } else { let contents = contents.borrow().to_string(); paragraph.push(MarkdownParagraphChunk::Text(ParsedMarkdownText { source_range, - highlights: add_highlight_range(&contents, 0, std::mem::take(highlights)), - regions: Vec::default(), + highlights: items_with_range(0..contents.len(), std::mem::take(highlights)), + regions: items_with_range( + 0..contents.len(), + std::mem::take(regions) + .into_iter() + .map(|(_, region)| region), + ), contents: contents.into(), - region_ranges: Vec::default(), })); } } @@ -1006,37 +1029,57 @@ impl<'a> MarkdownParser<'a> { ..Default::default() })); - self.consume_paragraph(source_range, node, paragraph, highlights); + self.consume_paragraph(source_range, node, paragraph, highlights, regions); } else if local_name!("i") == name.local { highlights.push(MarkdownHighlight::Style(MarkdownHighlightStyle { italic: true, ..Default::default() })); - self.consume_paragraph(source_range, node, paragraph, highlights); + self.consume_paragraph(source_range, node, paragraph, highlights, regions); } else if local_name!("em") == name.local { highlights.push(MarkdownHighlight::Style(MarkdownHighlightStyle { oblique: true, ..Default::default() })); - self.consume_paragraph(source_range, node, paragraph, highlights); + self.consume_paragraph(source_range, node, paragraph, highlights, regions); } else if local_name!("del") == name.local { highlights.push(MarkdownHighlight::Style(MarkdownHighlightStyle { strikethrough: true, ..Default::default() })); - self.consume_paragraph(source_range, node, paragraph, highlights); + self.consume_paragraph(source_range, node, paragraph, highlights, regions); } else if local_name!("ins") == name.local { highlights.push(MarkdownHighlight::Style(MarkdownHighlightStyle { underline: true, ..Default::default() })); - self.consume_paragraph(source_range, node, paragraph, highlights); + self.consume_paragraph(source_range, node, paragraph, highlights, regions); + } else if local_name!("a") == name.local { + if let Some(url) = Self::attr_value(attrs, local_name!("href")) + && let Some(link) = + Link::identify(self.file_location_directory.clone(), url) + { + highlights.push(MarkdownHighlight::Style(MarkdownHighlightStyle { + link: true, + ..Default::default() + })); + + regions.push(( + source_range.clone(), + ParsedRegion { + code: false, + link: Some(link), + }, + )); + } + + self.consume_paragraph(source_range, node, paragraph, highlights, regions); } else { - self.consume_paragraph(source_range, node, paragraph, highlights); + self.consume_paragraph(source_range, node, paragraph, highlights, regions); } } _ => {} @@ -1049,9 +1092,10 @@ impl<'a> MarkdownParser<'a> { node: &Rc, paragraph: &mut MarkdownParagraph, highlights: &mut Vec, + regions: &mut Vec<(Range, ParsedRegion)>, ) { for node in node.children.borrow().iter() { - self.parse_paragraph(source_range.clone(), node, paragraph, highlights); + self.parse_paragraph(source_range.clone(), node, paragraph, highlights, regions); } } @@ -1096,7 +1140,13 @@ impl<'a> MarkdownParser<'a> { } let mut children = MarkdownParagraph::new(); - self.consume_paragraph(source_range, node, &mut children, &mut Vec::new()); + self.consume_paragraph( + source_range, + node, + &mut children, + &mut Vec::new(), + &mut Vec::new(), + ); let is_header = matches!(name.local, local_name!("th")); @@ -1374,6 +1424,7 @@ impl<'a> MarkdownParser<'a> { node, &mut paragraph, &mut Vec::new(), + &mut Vec::new(), ); caption = Some(paragraph); } @@ -1494,7 +1545,6 @@ mod tests { source_range: 0..35, contents: "Some bostrikethroughld text".into(), highlights: Vec::new(), - region_ranges: Vec::new(), regions: Vec::new(), } )]) @@ -1618,6 +1668,51 @@ mod tests { ); } + #[gpui::test] + async fn test_html_href_element() { + let parsed = + parse("

Some text link more text

").await; + + assert_eq!(1, parsed.children.len()); + let chunks = if let ParsedMarkdownElement::Paragraph(chunks) = &parsed.children[0] { + chunks + } else { + panic!("Expected a paragraph"); + }; + + assert_eq!(1, chunks.len()); + let text = if let MarkdownParagraphChunk::Text(text) = &chunks[0] { + text + } else { + panic!("Expected a paragraph"); + }; + + assert_eq!(0..65, text.source_range); + assert_eq!("Some text link more text", text.contents.as_str(),); + assert_eq!( + vec![( + 10..14, + MarkdownHighlight::Style(MarkdownHighlightStyle { + link: true, + ..Default::default() + },), + )], + text.highlights + ); + assert_eq!( + vec![( + 10..14, + ParsedRegion { + code: false, + link: Some(Link::Web { + url: "https://example.com".into() + }) + } + )], + text.regions + ) + } + #[gpui::test] async fn test_text_with_inline_html() { let parsed = parse("This is a paragraph with an inline HTML tag.").await; @@ -1768,7 +1863,6 @@ mod tests { source_range: 0..81, contents: " Lorem Ipsum ".into(), highlights: Vec::new(), - region_ranges: Vec::new(), regions: Vec::new(), }), MarkdownParagraphChunk::Image(Image { @@ -2029,7 +2123,6 @@ mod tests { source_range: 0..71, contents: "Some text".into(), highlights: Default::default(), - region_ranges: Default::default(), regions: Default::default() }), MarkdownParagraphChunk::Image(Image { @@ -2045,7 +2138,6 @@ mod tests { source_range: 0..71, contents: " some more text".into(), highlights: Default::default(), - region_ranges: Default::default(), regions: Default::default() }), ])] @@ -2221,7 +2313,6 @@ mod tests { source_range: 0..280, contents: "My Table".into(), highlights: Default::default(), - region_ranges: Default::default(), regions: Default::default() })]), vec![], @@ -2385,7 +2476,6 @@ mod tests { source_range: 0..96, contents: "Heading".into(), highlights: Vec::default(), - region_ranges: Vec::default(), regions: Vec::default() })], }), @@ -2396,7 +2486,6 @@ mod tests { source_range: 0..96, contents: "Heading".into(), highlights: Vec::default(), - region_ranges: Vec::default(), regions: Vec::default() })], }), @@ -2407,7 +2496,6 @@ mod tests { source_range: 0..96, contents: "Heading".into(), highlights: Vec::default(), - region_ranges: Vec::default(), regions: Vec::default() })], }), @@ -2418,7 +2506,6 @@ mod tests { source_range: 0..96, contents: "Heading".into(), highlights: Vec::default(), - region_ranges: Vec::default(), regions: Vec::default() })], }), @@ -2429,7 +2516,6 @@ mod tests { source_range: 0..96, contents: "Heading".into(), highlights: Vec::default(), - region_ranges: Vec::default(), regions: Vec::default() })], }), @@ -2440,7 +2526,6 @@ mod tests { source_range: 0..96, contents: "Heading".into(), highlights: Vec::default(), - region_ranges: Vec::default(), regions: Vec::default() })], }), @@ -3040,7 +3125,6 @@ fn main() { fn text(contents: &str, source_range: Range) -> MarkdownParagraph { vec![MarkdownParagraphChunk::Text(ParsedMarkdownText { highlights: Vec::new(), - region_ranges: Vec::new(), regions: Vec::new(), source_range, contents: contents.to_string().into(), diff --git a/crates/markdown_preview/src/markdown_renderer.rs b/crates/markdown_preview/src/markdown_renderer.rs index 4a8c69e997f3db8881c0d47cb2e62d8edbeda526..b229705692c0fade2b35b4dd9f66a27e2aba57bc 100644 --- a/crates/markdown_preview/src/markdown_renderer.rs +++ b/crates/markdown_preview/src/markdown_renderer.rs @@ -679,33 +679,31 @@ fn render_markdown_text(parsed_new: &MarkdownParagraph, cx: &mut RenderContext) .to_highlight_style(&syntax_theme) .map(|style| (range.clone(), style)) }), - parsed.regions.iter().zip(&parsed.region_ranges).filter_map( - |(region, range)| { - if region.code { - Some(( - range.clone(), - HighlightStyle { - background_color: Some(code_span_bg_color), - ..Default::default() - }, - )) - } else if region.link.is_some() { - Some(( - range.clone(), - HighlightStyle { - color: Some(link_color), - ..Default::default() - }, - )) - } else { - None - } - }, - ), + parsed.regions.iter().filter_map(|(range, region)| { + if region.code { + Some(( + range.clone(), + HighlightStyle { + background_color: Some(code_span_bg_color), + ..Default::default() + }, + )) + } else if region.link.is_some() { + Some(( + range.clone(), + HighlightStyle { + color: Some(link_color), + ..Default::default() + }, + )) + } else { + None + } + }), ); let mut links = Vec::new(); let mut link_ranges = Vec::new(); - for (range, region) in parsed.region_ranges.iter().zip(&parsed.regions) { + for (range, region) in parsed.regions.iter() { if let Some(link) = region.link.clone() { links.push(link); link_ranges.push(range.clone()); @@ -927,7 +925,6 @@ mod tests { source_range: 0..text.len(), contents: SharedString::new(text), highlights: Default::default(), - region_ranges: Default::default(), regions: Default::default(), }) } From 92e64f9cf0e0ee11b631a7e5e5b9aa18fa2f4e75 Mon Sep 17 00:00:00 2001 From: Mayank Verma Date: Thu, 13 Nov 2025 19:44:18 +0530 Subject: [PATCH 0088/1030] settings: Add tilde expansion support for LSP binary path (#41715) Closes #38227 Release Notes: - Added tilde expansion support for LSP binary path in `settings.json` --- crates/project/src/project_tests.rs | 61 +++++++++++++++++++++++++++++ crates/worktree/src/worktree.rs | 15 +++++-- 2 files changed, 73 insertions(+), 3 deletions(-) diff --git a/crates/project/src/project_tests.rs b/crates/project/src/project_tests.rs index 02c0e42c10f06006fa5b61a549684e2bb336f509..eb953cb32515ff2fa4a20785ca69d2ad857ae4ae 100644 --- a/crates/project/src/project_tests.rs +++ b/crates/project/src/project_tests.rs @@ -1292,6 +1292,67 @@ async fn test_language_server_relative_path(cx: &mut gpui::TestAppContext) { assert_eq!(lsp_path.to_string_lossy(), path!("lsp_on_path.exe")); } +#[gpui::test] +async fn test_language_server_tilde_path(cx: &mut gpui::TestAppContext) { + init_test(cx); + + let settings_json_contents = json!({ + "languages": { + "Rust": { + "language_servers": ["tilde_lsp"] + } + }, + "lsp": { + "tilde_lsp": { + "binary": { + "path": "~/.local/bin/rust-analyzer", + } + } + }, + }); + + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + path!("/root"), + json!({ + ".zed": { + "settings.json": settings_json_contents.to_string(), + }, + "src": { + "main.rs": "fn main() {}", + } + }), + ) + .await; + + let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; + let language_registry = project.read_with(cx, |project, _| project.languages().clone()); + language_registry.add(rust_lang()); + + let mut tilde_lsp = language_registry.register_fake_lsp( + "Rust", + FakeLspAdapter { + name: "tilde_lsp", + ..Default::default() + }, + ); + cx.run_until_parked(); + + project + .update(cx, |project, cx| { + project.open_local_buffer_with_lsp(path!("/root/src/main.rs"), cx) + }) + .await + .unwrap(); + + let lsp_path = tilde_lsp.next().await.unwrap().binary.path; + let expected_path = paths::home_dir().join(".local/bin/rust-analyzer"); + assert_eq!( + lsp_path, expected_path, + "Tilde path should expand to home directory" + ); +} + #[gpui::test] async fn test_reporting_fs_changes_to_language_servers(cx: &mut gpui::TestAppContext) { init_test(cx); diff --git a/crates/worktree/src/worktree.rs b/crates/worktree/src/worktree.rs index 7b412e187f0d2cab5c34800309525a16201a83c0..1e8c1648dca98b267146211a9b36fb78f743fb82 100644 --- a/crates/worktree/src/worktree.rs +++ b/crates/worktree/src/worktree.rs @@ -2386,16 +2386,25 @@ impl Snapshot { /// Resolves a path to an executable using the following heuristics: /// - /// 1. If the path is relative and contains more than one component, + /// 1. If the path starts with `~`, it is expanded to the user's home directory. + /// 2. If the path is relative and contains more than one component, /// it is joined to the worktree root path. - /// 2. If the path is relative and exists in the worktree + /// 3. If the path is relative and exists in the worktree /// (even if falls under an exclusion filter), /// it is joined to the worktree root path. - /// 3. Otherwise the path is returned unmodified. + /// 4. Otherwise the path is returned unmodified. /// /// Relative paths that do not exist in the worktree may /// still be found using the `PATH` environment variable. pub fn resolve_executable_path(&self, path: PathBuf) -> PathBuf { + if let Some(path_str) = path.to_str() { + if let Some(remaining_path) = path_str.strip_prefix("~/") { + return home_dir().join(remaining_path); + } else if path_str == "~" { + return home_dir().to_path_buf(); + } + } + if let Ok(rel_path) = RelPath::new(&path, self.path_style) && (path.components().count() > 1 || self.entry_for_path(&rel_path).is_some()) { From fb90b12073aabd8c753e048d0fe0a185a1de2b2c Mon Sep 17 00:00:00 2001 From: Tim McLean Date: Thu, 13 Nov 2025 09:15:46 -0500 Subject: [PATCH 0089/1030] Add retry support for OpenAI-compatible LLM providers (#37891) Automatically retry the agent's LLM completion requests when the provider returns 429 Too Many Requests. Uses the Retry-After header to determine the retry delay if it is available. Many providers are frequently overloaded or have low rate limits. These providers are essentially unusable without automatic retries. Tested with Cerebras configured via openai_compatible. Related: #31531 Release Notes: - Added automatic retries for OpenAI-compatible LLM providers --------- Co-authored-by: Bennet Bo Fenner --- Cargo.lock | 2 + crates/language_model/Cargo.toml | 1 + crates/language_model/src/language_model.rs | 21 ++++++ .../language_models/src/provider/open_ai.rs | 13 ++-- .../src/provider/open_ai_compatible.rs | 19 ++++-- crates/language_models/src/provider/vercel.rs | 14 ++-- crates/language_models/src/provider/x_ai.rs | 25 ++++--- crates/open_ai/Cargo.toml | 1 + crates/open_ai/src/open_ai.rs | 66 +++++++++++-------- 9 files changed, 115 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7d26e24a8d46081157165dff92a9cd820e615054..d11ca902d1edbdc838071bfdb7df10dea88f9c81 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8872,6 +8872,7 @@ dependencies = [ "icons", "image", "log", + "open_ai", "open_router", "parking_lot", "proto", @@ -11025,6 +11026,7 @@ dependencies = [ "serde_json", "settings", "strum 0.27.2", + "thiserror 2.0.17", ] [[package]] diff --git a/crates/language_model/Cargo.toml b/crates/language_model/Cargo.toml index 4d40a063b604b405f7bcb29a3457956e1dd5541d..7c6470f4fa0c1eac847c1194e967b451093a76ad 100644 --- a/crates/language_model/Cargo.toml +++ b/crates/language_model/Cargo.toml @@ -29,6 +29,7 @@ http_client.workspace = true icons.workspace = true image.workspace = true log.workspace = true +open_ai = { workspace = true, features = ["schemars"] } open_router.workspace = true parking_lot.workspace = true proto.workspace = true diff --git a/crates/language_model/src/language_model.rs b/crates/language_model/src/language_model.rs index 94f6ec33f15062dd53b4122ca9d9dcac3fbff83d..4f0eed34331980ec0fd499c6a77e49e94b524fe0 100644 --- a/crates/language_model/src/language_model.rs +++ b/crates/language_model/src/language_model.rs @@ -345,6 +345,27 @@ impl From for LanguageModelCompletionError { } } +impl From for LanguageModelCompletionError { + fn from(error: open_ai::RequestError) -> Self { + match error { + open_ai::RequestError::HttpResponseError { + provider, + status_code, + body, + headers, + } => { + let retry_after = headers + .get(http::header::RETRY_AFTER) + .and_then(|val| val.to_str().ok()?.parse::().ok()) + .map(Duration::from_secs); + + Self::from_http_status(provider.into(), status_code, body, retry_after) + } + open_ai::RequestError::Other(e) => Self::Other(e), + } + } +} + impl From for LanguageModelCompletionError { fn from(error: OpenRouterError) -> Self { let provider = LanguageModelProviderName::new("OpenRouter"); diff --git a/crates/language_models/src/provider/open_ai.rs b/crates/language_models/src/provider/open_ai.rs index cabd78c35be58667fd799fe34de07e1d1bfa5808..792d280950ceafa24cdf5e4104b80dd49bd45f3f 100644 --- a/crates/language_models/src/provider/open_ai.rs +++ b/crates/language_models/src/provider/open_ai.rs @@ -226,12 +226,17 @@ impl OpenAiLanguageModel { }; let future = self.request_limiter.stream(async move { + let provider = PROVIDER_NAME; let Some(api_key) = api_key else { - return Err(LanguageModelCompletionError::NoApiKey { - provider: PROVIDER_NAME, - }); + return Err(LanguageModelCompletionError::NoApiKey { provider }); }; - let request = stream_completion(http_client.as_ref(), &api_url, &api_key, request); + let request = stream_completion( + http_client.as_ref(), + provider.0.as_str(), + &api_url, + &api_key, + request, + ); let response = request.await?; Ok(response) }); diff --git a/crates/language_models/src/provider/open_ai_compatible.rs b/crates/language_models/src/provider/open_ai_compatible.rs index 4ed0de851244d65b0f838c582ccdffe763d6775f..a30c8bfa5d3a728d6dd388f8e768cd470ee9736d 100644 --- a/crates/language_models/src/provider/open_ai_compatible.rs +++ b/crates/language_models/src/provider/open_ai_compatible.rs @@ -205,8 +205,13 @@ impl OpenAiCompatibleLanguageModel { &self, request: open_ai::Request, cx: &AsyncApp, - ) -> BoxFuture<'static, Result>>> - { + ) -> BoxFuture< + 'static, + Result< + futures::stream::BoxStream<'static, Result>, + LanguageModelCompletionError, + >, + > { let http_client = self.http_client.clone(); let Ok((api_key, api_url)) = self.state.read_with(cx, |state, _cx| { @@ -216,7 +221,7 @@ impl OpenAiCompatibleLanguageModel { state.settings.api_url.clone(), ) }) else { - return future::ready(Err(anyhow!("App state dropped"))).boxed(); + return future::ready(Err(anyhow!("App state dropped").into())).boxed(); }; let provider = self.provider_name.clone(); @@ -224,7 +229,13 @@ impl OpenAiCompatibleLanguageModel { let Some(api_key) = api_key else { return Err(LanguageModelCompletionError::NoApiKey { provider }); }; - let request = stream_completion(http_client.as_ref(), &api_url, &api_key, request); + let request = stream_completion( + http_client.as_ref(), + provider.0.as_str(), + &api_url, + &api_key, + request, + ); let response = request.await?; Ok(response) }); diff --git a/crates/language_models/src/provider/vercel.rs b/crates/language_models/src/provider/vercel.rs index 20db24274aae0249efcfc897cb1bdfdcce8f1220..061dc1799922c03952b1a96e2785425f61bcf00b 100644 --- a/crates/language_models/src/provider/vercel.rs +++ b/crates/language_models/src/provider/vercel.rs @@ -220,13 +220,17 @@ impl VercelLanguageModel { }; let future = self.request_limiter.stream(async move { + let provider = PROVIDER_NAME; let Some(api_key) = api_key else { - return Err(LanguageModelCompletionError::NoApiKey { - provider: PROVIDER_NAME, - }); + return Err(LanguageModelCompletionError::NoApiKey { provider }); }; - let request = - open_ai::stream_completion(http_client.as_ref(), &api_url, &api_key, request); + let request = open_ai::stream_completion( + http_client.as_ref(), + provider.0.as_str(), + &api_url, + &api_key, + request, + ); let response = request.await?; Ok(response) }); diff --git a/crates/language_models/src/provider/x_ai.rs b/crates/language_models/src/provider/x_ai.rs index e7ee71ba86e202fe17d567923f4b04d3c886ae08..cc54dfa0dd8a3f2ca6ab2b769a779afa8e73988b 100644 --- a/crates/language_models/src/provider/x_ai.rs +++ b/crates/language_models/src/provider/x_ai.rs @@ -211,25 +211,34 @@ impl XAiLanguageModel { &self, request: open_ai::Request, cx: &AsyncApp, - ) -> BoxFuture<'static, Result>>> - { + ) -> BoxFuture< + 'static, + Result< + futures::stream::BoxStream<'static, Result>, + LanguageModelCompletionError, + >, + > { let http_client = self.http_client.clone(); let Ok((api_key, api_url)) = self.state.read_with(cx, |state, cx| { let api_url = XAiLanguageModelProvider::api_url(cx); (state.api_key_state.key(&api_url), api_url) }) else { - return future::ready(Err(anyhow!("App state dropped"))).boxed(); + return future::ready(Err(anyhow!("App state dropped").into())).boxed(); }; let future = self.request_limiter.stream(async move { + let provider = PROVIDER_NAME; let Some(api_key) = api_key else { - return Err(LanguageModelCompletionError::NoApiKey { - provider: PROVIDER_NAME, - }); + return Err(LanguageModelCompletionError::NoApiKey { provider }); }; - let request = - open_ai::stream_completion(http_client.as_ref(), &api_url, &api_key, request); + let request = open_ai::stream_completion( + http_client.as_ref(), + provider.0.as_str(), + &api_url, + &api_key, + request, + ); let response = request.await?; Ok(response) }); diff --git a/crates/open_ai/Cargo.toml b/crates/open_ai/Cargo.toml index 49284eff79c11414c0811abd107f7c16ca701179..037ca14437cd13a6fc4bfe76dafb113c6a9f1482 100644 --- a/crates/open_ai/Cargo.toml +++ b/crates/open_ai/Cargo.toml @@ -25,3 +25,4 @@ serde.workspace = true serde_json.workspace = true settings.workspace = true strum.workspace = true +thiserror.workspace = true diff --git a/crates/open_ai/src/open_ai.rs b/crates/open_ai/src/open_ai.rs index e1f58fe95a487f5be650d758df32b8097ee578e4..aaeee01c9c74f8592ccfffa01893f9333f120e89 100644 --- a/crates/open_ai/src/open_ai.rs +++ b/crates/open_ai/src/open_ai.rs @@ -1,11 +1,15 @@ use anyhow::{Context as _, Result, anyhow}; use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::BoxStream}; -use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest}; +use http_client::{ + AsyncBody, HttpClient, Method, Request as HttpRequest, StatusCode, + http::{HeaderMap, HeaderValue}, +}; use serde::{Deserialize, Serialize}; use serde_json::Value; pub use settings::OpenAiReasoningEffort as ReasoningEffort; use std::{convert::TryFrom, future::Future}; use strum::EnumIter; +use thiserror::Error; pub const OPEN_AI_API_URL: &str = "https://api.openai.com/v1"; @@ -441,8 +445,21 @@ pub struct ChoiceDelta { pub finish_reason: Option, } +#[derive(Error, Debug)] +pub enum RequestError { + #[error("HTTP response error from {provider}'s API: status {status_code} - {body:?}")] + HttpResponseError { + provider: String, + status_code: StatusCode, + body: String, + headers: HeaderMap, + }, + #[error(transparent)] + Other(#[from] anyhow::Error), +} + #[derive(Serialize, Deserialize, Debug)] -pub struct OpenAiError { +pub struct ResponseStreamError { message: String, } @@ -450,7 +467,7 @@ pub struct OpenAiError { #[serde(untagged)] pub enum ResponseStreamResult { Ok(ResponseStreamEvent), - Err { error: OpenAiError }, + Err { error: ResponseStreamError }, } #[derive(Serialize, Deserialize, Debug)] @@ -461,10 +478,11 @@ pub struct ResponseStreamEvent { pub async fn stream_completion( client: &dyn HttpClient, + provider_name: &str, api_url: &str, api_key: &str, request: Request, -) -> Result>> { +) -> Result>, RequestError> { let uri = format!("{api_url}/chat/completions"); let request_builder = HttpRequest::builder() .method(Method::POST) @@ -472,7 +490,12 @@ pub async fn stream_completion( .header("Content-Type", "application/json") .header("Authorization", format!("Bearer {}", api_key.trim())); - let request = request_builder.body(AsyncBody::from(serde_json::to_string(&request)?))?; + let request = request_builder + .body(AsyncBody::from( + serde_json::to_string(&request).map_err(|e| RequestError::Other(e.into()))?, + )) + .map_err(|e| RequestError::Other(e.into()))?; + let mut response = client.send(request).await?; if response.status().is_success() { let reader = BufReader::new(response.into_body()); @@ -508,27 +531,18 @@ pub async fn stream_completion( .boxed()) } else { let mut body = String::new(); - response.body_mut().read_to_string(&mut body).await?; - - #[derive(Deserialize)] - struct OpenAiResponse { - error: OpenAiError, - } - - match serde_json::from_str::(&body) { - Ok(response) if !response.error.message.is_empty() => Err(anyhow!( - "API request to {} failed: {}", - api_url, - response.error.message, - )), - - _ => anyhow::bail!( - "API request to {} failed with status {}: {}", - api_url, - response.status(), - body, - ), - } + response + .body_mut() + .read_to_string(&mut body) + .await + .map_err(|e| RequestError::Other(e.into()))?; + + Err(RequestError::HttpResponseError { + provider: provider_name.to_owned(), + status_code: response.status(), + body, + headers: response.headers().clone(), + }) } } From 8467a1b08b644e131e2afbb11953d4fd65c0e104 Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Thu, 13 Nov 2025 11:47:48 -0300 Subject: [PATCH 0090/1030] zeta eval: Improve output (#42629) Hides the aggregated scores if only one example/repetition ran. It also fixes an issue with the expected context scoring. Release Notes: - N/A --- crates/zeta_cli/src/evaluate.rs | 56 ++++++++++++++++++++------------- 1 file changed, 35 insertions(+), 21 deletions(-) diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index 6644ecbb5a72b4d7218a9d33bcc1f9f602c3f65d..4f8e984a7de36a96c4e8ad3ac7e5d9e9bfda244b 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -94,11 +94,15 @@ fn write_aggregated_scores( ) -> Result<()> { let mut successful = Vec::new(); let mut failed_count = 0; - writeln!(w, "## Errors\n")?; + for result in all_results.iter().flatten() { match result { Ok(eval_result) => successful.push(eval_result), Err((err, name, repetition_ix)) => { + if failed_count == 0 { + writeln!(w, "## Errors\n")?; + } + failed_count += 1; let err = err .to_string() @@ -114,22 +118,28 @@ fn write_aggregated_scores( } } } - let aggregated_result = EvaluationResult { - context: Scores::aggregate(successful.iter().map(|r| &r.context)), - edit_prediction: Scores::aggregate(successful.iter().map(|r| &r.edit_prediction)), - }; - writeln!(w, "\n{}", "-".repeat(80))?; - writeln!(w, "\n## TOTAL SCORES")?; - writeln!(w, "\n### Success Rate")?; - writeln!( - w, - "\nCongratulations! {}/{} ({:.2}%) of runs weren't outright failures 🎉", - successful.len(), - successful.len() + failed_count, - (successful.len() as f64 / (successful.len() + failed_count) as f64) * 100.0 - )?; - writeln!(w, "{}", aggregated_result)?; + if successful.len() > 1 { + let aggregated_result = EvaluationResult { + context: Scores::aggregate(successful.iter().map(|r| &r.context)), + edit_prediction: Scores::aggregate(successful.iter().map(|r| &r.edit_prediction)), + }; + + writeln!(w, "\n{}", "-".repeat(80))?; + writeln!(w, "\n## TOTAL SCORES")?; + writeln!(w, "\n### Success Rate")?; + writeln!(w, "{}", aggregated_result)?; + } + + if successful.len() + failed_count > 1 { + writeln!( + w, + "\nCongratulations! {}/{} ({:.2}%) of runs weren't outright failures 🎉", + successful.len(), + successful.len() + failed_count, + (successful.len() as f64 / (successful.len() + failed_count) as f64) * 100.0 + )?; + } Ok(()) } @@ -326,7 +336,7 @@ pub fn evaluate(example: &Example, preds: &PredictionDetails) -> EvaluationResul let mut false_positive_lines = actual_context_lines.clone(); for entry in &example.expected_context { - let mut best_alternative_score = Scores::default(); + let mut best_alternative_score: Option = None; for alternative in &entry.alternatives { let expected: HashSet<_> = alternative @@ -344,13 +354,17 @@ pub fn evaluate(example: &Example, preds: &PredictionDetails) -> EvaluationResul false_positive_lines.retain(|line| !actual_context_lines.contains(line)); - if scores.recall() > best_alternative_score.recall() { - best_alternative_score = scores; + if best_alternative_score + .as_ref() + .is_none_or(|best| scores.recall() > best.recall()) + { + best_alternative_score = Some(scores); } } - eval_result.context.false_negatives += best_alternative_score.false_negatives; - eval_result.context.true_positives += best_alternative_score.true_positives; + let best_alternative = best_alternative_score.unwrap_or_default(); + eval_result.context.false_negatives += best_alternative.false_negatives; + eval_result.context.true_positives += best_alternative.true_positives; } eval_result.context.false_positives = false_positive_lines.len(); From 55e4258147b232ebb927762495e62d53d00b526c Mon Sep 17 00:00:00 2001 From: Oleksiy Syvokon Date: Thu, 13 Nov 2025 17:09:16 +0200 Subject: [PATCH 0091/1030] agent: Workaround for Sonnet inserting tag (#42634) Release Notes: - N/A --- crates/agent/src/edit_agent/edit_parser.rs | 28 +++++++++++++++------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/crates/agent/src/edit_agent/edit_parser.rs b/crates/agent/src/edit_agent/edit_parser.rs index 425bf93efff115d4daef380e3f82abcdb8c0746f..c1aa61e18d4a450bc1caef939a53f65aed529a82 100644 --- a/crates/agent/src/edit_agent/edit_parser.rs +++ b/crates/agent/src/edit_agent/edit_parser.rs @@ -15,12 +15,14 @@ const SEPARATOR_MARKER: &str = "======="; const REPLACE_MARKER: &str = ">>>>>>> REPLACE"; const SONNET_PARAMETER_INVOKE_1: &str = "\n"; const SONNET_PARAMETER_INVOKE_2: &str = ""; -const END_TAGS: [&str; 5] = [ +const SONNET_PARAMETER_INVOKE_3: &str = ""; +const END_TAGS: [&str; 6] = [ OLD_TEXT_END_TAG, NEW_TEXT_END_TAG, EDITS_END_TAG, - SONNET_PARAMETER_INVOKE_1, // Remove this after switching to streaming tool call + SONNET_PARAMETER_INVOKE_1, // Remove these after switching to streaming tool call SONNET_PARAMETER_INVOKE_2, + SONNET_PARAMETER_INVOKE_3, ]; #[derive(Debug)] @@ -567,21 +569,29 @@ mod tests { parse_random_chunks( indoc! {" some textupdated text + more textupd "}, &mut parser, &mut rng ), - vec![Edit { - old_text: "some text".to_string(), - new_text: "updated text".to_string(), - line_hint: None, - },] + vec![ + Edit { + old_text: "some text".to_string(), + new_text: "updated text".to_string(), + line_hint: None, + }, + Edit { + old_text: "more text".to_string(), + new_text: "upd".to_string(), + line_hint: None, + }, + ] ); assert_eq!( parser.finish(), EditParserMetrics { - tags: 2, - mismatched_tags: 1 + tags: 4, + mismatched_tags: 2 } ); } From b500470391cbd2c109cab6b184ffdea909ac9f16 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 13 Nov 2025 10:10:02 -0500 Subject: [PATCH 0092/1030] Disabled agent commands (#42579) Closes #31346 Release Notes: - Agent commands no longer show up in the command palette when `agent` is disabled. Same for edit predictions. --- crates/agent_ui/src/agent_ui.rs | 220 +++++++++++++++++++++++++++----- 1 file changed, 189 insertions(+), 31 deletions(-) diff --git a/crates/agent_ui/src/agent_ui.rs b/crates/agent_ui/src/agent_ui.rs index 781374f117d24b2265a16a5aa9260690850d10d4..da1543a2790599fbe590f4e29d3594588bd2f351 100644 --- a/crates/agent_ui/src/agent_ui.rs +++ b/crates/agent_ui/src/agent_ui.rs @@ -30,7 +30,10 @@ use command_palette_hooks::CommandPaletteFilter; use feature_flags::FeatureFlagAppExt as _; use fs::Fs; use gpui::{Action, App, Entity, SharedString, actions}; -use language::LanguageRegistry; +use language::{ + LanguageRegistry, + language_settings::{AllLanguageSettings, EditPredictionProvider}, +}; use language_model::{ ConfiguredModel, LanguageModel, LanguageModelId, LanguageModelProviderId, LanguageModelRegistry, }; @@ -286,7 +289,25 @@ pub fn init( fn update_command_palette_filter(cx: &mut App) { let disable_ai = DisableAiSettings::get_global(cx).disable_ai; + let agent_enabled = AgentSettings::get_global(cx).enabled; + let edit_prediction_provider = AllLanguageSettings::get_global(cx) + .edit_predictions + .provider; + CommandPaletteFilter::update_global(cx, |filter, _| { + use editor::actions::{ + AcceptEditPrediction, AcceptPartialEditPrediction, NextEditPrediction, + PreviousEditPrediction, ShowEditPrediction, ToggleEditPrediction, + }; + let edit_prediction_actions = [ + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + ]; + if disable_ai { filter.hide_namespace("agent"); filter.hide_namespace("assistant"); @@ -295,42 +316,45 @@ fn update_command_palette_filter(cx: &mut App) { filter.hide_namespace("zed_predict_onboarding"); filter.hide_namespace("edit_prediction"); - use editor::actions::{ - AcceptEditPrediction, AcceptPartialEditPrediction, NextEditPrediction, - PreviousEditPrediction, ShowEditPrediction, ToggleEditPrediction, - }; - let edit_prediction_actions = [ - TypeId::of::(), - TypeId::of::(), - TypeId::of::(), - TypeId::of::(), - TypeId::of::(), - TypeId::of::(), - ]; filter.hide_action_types(&edit_prediction_actions); filter.hide_action_types(&[TypeId::of::()]); } else { - filter.show_namespace("agent"); + if agent_enabled { + filter.show_namespace("agent"); + } else { + filter.hide_namespace("agent"); + } + filter.show_namespace("assistant"); - filter.show_namespace("copilot"); - filter.show_namespace("zed_predict_onboarding"); - filter.show_namespace("edit_prediction"); - - use editor::actions::{ - AcceptEditPrediction, AcceptPartialEditPrediction, NextEditPrediction, - PreviousEditPrediction, ShowEditPrediction, ToggleEditPrediction, - }; - let edit_prediction_actions = [ - TypeId::of::(), - TypeId::of::(), - TypeId::of::(), - TypeId::of::(), - TypeId::of::(), - TypeId::of::(), - ]; - filter.show_action_types(edit_prediction_actions.iter()); + match edit_prediction_provider { + EditPredictionProvider::None => { + filter.hide_namespace("edit_prediction"); + filter.hide_namespace("copilot"); + filter.hide_namespace("supermaven"); + filter.hide_action_types(&edit_prediction_actions); + } + EditPredictionProvider::Copilot => { + filter.show_namespace("edit_prediction"); + filter.show_namespace("copilot"); + filter.hide_namespace("supermaven"); + filter.show_action_types(edit_prediction_actions.iter()); + } + EditPredictionProvider::Supermaven => { + filter.show_namespace("edit_prediction"); + filter.hide_namespace("copilot"); + filter.show_namespace("supermaven"); + filter.show_action_types(edit_prediction_actions.iter()); + } + EditPredictionProvider::Zed | EditPredictionProvider::Codestral => { + filter.show_namespace("edit_prediction"); + filter.hide_namespace("copilot"); + filter.hide_namespace("supermaven"); + filter.show_action_types(edit_prediction_actions.iter()); + } + } + filter.show_namespace("zed_predict_onboarding"); filter.show_action_types(&[TypeId::of::()]); } }); @@ -420,3 +444,137 @@ fn register_slash_commands(cx: &mut App) { }) .detach(); } + +#[cfg(test)] +mod tests { + use super::*; + use agent_settings::{AgentProfileId, AgentSettings, CompletionMode}; + use command_palette_hooks::CommandPaletteFilter; + use editor::actions::AcceptEditPrediction; + use gpui::{BorrowAppContext, TestAppContext, px}; + use project::DisableAiSettings; + use settings::{ + DefaultAgentView, DockPosition, NotifyWhenAgentWaiting, Settings, SettingsStore, + }; + + #[gpui::test] + fn test_agent_command_palette_visibility(cx: &mut TestAppContext) { + // Init settings + cx.update(|cx| { + let store = SettingsStore::test(cx); + cx.set_global(store); + command_palette_hooks::init(cx); + AgentSettings::register(cx); + DisableAiSettings::register(cx); + AllLanguageSettings::register(cx); + }); + + let agent_settings = AgentSettings { + enabled: true, + button: true, + dock: DockPosition::Right, + default_width: px(300.), + default_height: px(600.), + default_model: None, + inline_assistant_model: None, + commit_message_model: None, + thread_summary_model: None, + inline_alternatives: vec![], + default_profile: AgentProfileId::default(), + default_view: DefaultAgentView::Thread, + profiles: Default::default(), + always_allow_tool_actions: false, + notify_when_agent_waiting: NotifyWhenAgentWaiting::default(), + play_sound_when_agent_done: false, + single_file_review: false, + model_parameters: vec![], + preferred_completion_mode: CompletionMode::Normal, + enable_feedback: false, + expand_edit_card: true, + expand_terminal_card: true, + use_modifier_to_send: true, + message_editor_min_lines: 1, + }; + + cx.update(|cx| { + AgentSettings::override_global(agent_settings.clone(), cx); + DisableAiSettings::override_global(DisableAiSettings { disable_ai: false }, cx); + + // Initial update + update_command_palette_filter(cx); + }); + + // Assert visible + cx.update(|cx| { + let filter = CommandPaletteFilter::try_global(cx).unwrap(); + assert!( + !filter.is_hidden(&NewThread), + "NewThread should be visible by default" + ); + }); + + // Disable agent + cx.update(|cx| { + let mut new_settings = agent_settings.clone(); + new_settings.enabled = false; + AgentSettings::override_global(new_settings, cx); + + // Trigger update + update_command_palette_filter(cx); + }); + + // Assert hidden + cx.update(|cx| { + let filter = CommandPaletteFilter::try_global(cx).unwrap(); + assert!( + filter.is_hidden(&NewThread), + "NewThread should be hidden when agent is disabled" + ); + }); + + // Test EditPredictionProvider + // Enable EditPredictionProvider::Copilot + cx.update(|cx| { + cx.update_global::(|store, cx| { + store.update_user_settings(cx, |s| { + s.project + .all_languages + .features + .get_or_insert(Default::default()) + .edit_prediction_provider = Some(EditPredictionProvider::Copilot); + }); + }); + update_command_palette_filter(cx); + }); + + cx.update(|cx| { + let filter = CommandPaletteFilter::try_global(cx).unwrap(); + assert!( + !filter.is_hidden(&AcceptEditPrediction), + "EditPrediction should be visible when provider is Copilot" + ); + }); + + // Disable EditPredictionProvider (None) + cx.update(|cx| { + cx.update_global::(|store, cx| { + store.update_user_settings(cx, |s| { + s.project + .all_languages + .features + .get_or_insert(Default::default()) + .edit_prediction_provider = Some(EditPredictionProvider::None); + }); + }); + update_command_palette_filter(cx); + }); + + cx.update(|cx| { + let filter = CommandPaletteFilter::try_global(cx).unwrap(); + assert!( + filter.is_hidden(&AcceptEditPrediction), + "EditPrediction should be hidden when provider is None" + ); + }); + } +} From eadc2301e08da63fd961797663fbd70be22c35df Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 13 Nov 2025 10:21:53 -0500 Subject: [PATCH 0093/1030] Fetch the unit eval commit before checking it out (#42636) Release Notes: - N/A --- script/run-unit-evals | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/run-unit-evals b/script/run-unit-evals index c5178add7a1e4c76151b3907771abe81ba46aaaf..adbf96553691d24a4f7253806bcb38f5ef78ab2a 100755 --- a/script/run-unit-evals +++ b/script/run-unit-evals @@ -3,7 +3,7 @@ set -euxo pipefail if [ -n "${UNIT_EVAL_COMMIT:-}" ]; then - git checkout "$UNIT_EVAL_COMMIT" + git fetch origin "$UNIT_EVAL_COMMIT" && git checkout "$UNIT_EVAL_COMMIT" fi GPUI_TEST_TIMEOUT=1500 cargo nextest run --workspace --no-fail-fast --features unit-eval --no-capture -E 'test(::eval_)' From c2c5fceb5bd9aacf243749fbe962c40c501919b9 Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Thu, 13 Nov 2025 12:43:22 -0300 Subject: [PATCH 0094/1030] zeta eval: Allow no headings under "Expected Context" (#42638) Release Notes: - N/A --- crates/zeta_cli/src/example.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/zeta_cli/src/example.rs b/crates/zeta_cli/src/example.rs index 5732e1efcb0e05f5a4c3122130ee40cad5a1d4f6..300e453af93bd3c69a47f5e155e274431aa01c92 100644 --- a/crates/zeta_cli/src/example.rs +++ b/crates/zeta_cli/src/example.rs @@ -259,6 +259,11 @@ impl NamedExample { if !text.ends_with('\n') { text.push('\n'); } + + if named.example.expected_context.is_empty() { + named.example.expected_context.push(Default::default()); + } + let alternatives = &mut named .example .expected_context From ec1664f61a34038e7198e00f8d7b780a8ca85114 Mon Sep 17 00:00:00 2001 From: kitt <11167504+kitt-cat@users.noreply.github.com> Date: Thu, 13 Nov 2025 07:46:51 -0800 Subject: [PATCH 0095/1030] zed: Enable line wrapping for cli help (#42496) This enables clap's [wrap-help] feature and sets max_term_width to wrap after 100 columns (the value clap is planning to default to in clap-v5). This commit also adds blank lines which cause clap to split longer doc comments into separate help (displayed for `-h`) and long_help (displayed for `--help`) messages, as per [doc-processing]. [wrap-help]: https://docs.rs/clap/4.5.49/clap/_features/index.html#optional-features [doc-processing]: https://docs.rs/clap/4.5.49/clap/_derive/index.html#pre-processing ![before: some lines of help text stretch across the whole screen. after: all lines are wrapped at 100 columns, and some manual linebreaks are preserved where it makes sense (in particular, when listing the user-data-dir locations on each platform)](https://github.com/user-attachments/assets/359067b4-5ffb-4fe3-80bd-5e1062986417) Release Notes: - N/A --- Cargo.toml | 2 +- crates/zed/src/main.rs | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 328f5b8db5e870df4f1954ccbdb713173a520f8b..bd9f57049af3a3add6a52a74aa518e0dc7ec0dbe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -485,7 +485,7 @@ cfg-if = "1.0.3" chrono = { version = "0.4", features = ["serde"] } ciborium = "0.2" circular-buffer = "1.0" -clap = { version = "4.4", features = ["derive"] } +clap = { version = "4.4", features = ["derive", "wrap_help"] } cocoa = "=0.26.0" cocoa-foundation = "=0.2.0" convert_case = "0.8.0" diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index a2e0c449e982594d7197da355ff4720c4da87163..01318a7636bb42916f115ad55339ff4df0937e83 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -1248,7 +1248,7 @@ pub fn stdout_is_a_pty() -> bool { } #[derive(Parser, Debug)] -#[command(name = "zed", disable_version_flag = true)] +#[command(name = "zed", disable_version_flag = true, max_term_width = 100)] struct Args { /// A sequence of space-separated paths or urls that you want to open. /// @@ -1263,11 +1263,12 @@ struct Args { diff: Vec, /// Sets a custom directory for all user data (e.g., database, extensions, logs). + /// /// This overrides the default platform-specific data directory location. /// On macOS, the default is `~/Library/Application Support/Zed`. /// On Linux/FreeBSD, the default is `$XDG_DATA_HOME/zed`. /// On Windows, the default is `%LOCALAPPDATA%\Zed`. - #[arg(long, value_name = "DIR")] + #[arg(long, value_name = "DIR", verbatim_doc_comment)] user_data_dir: Option, /// The username and WSL distribution to use when opening paths. If not specified, @@ -1287,8 +1288,11 @@ struct Args { #[arg(long)] dev_server_token: Option, - /// Prints system specs. Useful for submitting issues on GitHub when encountering a bug - /// that prevents Zed from starting, so you can't run `zed: copy system specs to clipboard` + /// Prints system specs. + /// + /// Useful for submitting issues on GitHub when encountering a bug that + /// prevents Zed from starting, so you can't run `zed: copy system specs to + /// clipboard` #[arg(long)] system_specs: bool, From b6972d70a5f282fa13d6353d835a2ab680706c32 Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Thu, 13 Nov 2025 21:18:05 +0530 Subject: [PATCH 0096/1030] editor: Fix panic when calculating jump data for buffer header (#42639) Just on nightly. Release Notes: - N/A *or* Added/Fixed/Improved ... Co-authored-by: Lukas Wirth --- crates/editor/src/element.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index 16f8ef4fca612528467612c4d12fe3bc659d8e04..785fd9de00888a7f658785e689df34bd2cffdf8d 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -7799,18 +7799,21 @@ fn file_status_label_color(file_status: Option) -> Color { } fn header_jump_data( - snapshot: &EditorSnapshot, + editor_snapshot: &EditorSnapshot, block_row_start: DisplayRow, height: u32, first_excerpt: &ExcerptInfo, latest_selection_anchors: &HashMap, ) -> JumpData { let jump_target = if let Some(anchor) = latest_selection_anchors.get(&first_excerpt.buffer_id) - && let Some(range) = snapshot.context_range_for_excerpt(anchor.excerpt_id) + && let Some(range) = editor_snapshot.context_range_for_excerpt(anchor.excerpt_id) + && let Some(buffer) = editor_snapshot + .buffer_snapshot() + .buffer_for_excerpt(anchor.excerpt_id) { JumpTargetInExcerptInput { id: anchor.excerpt_id, - buffer: &first_excerpt.buffer, + buffer, excerpt_start_anchor: range.start, jump_anchor: anchor.text_anchor, } @@ -7822,7 +7825,7 @@ fn header_jump_data( jump_anchor: first_excerpt.range.primary.start, } }; - header_jump_data_inner(snapshot, block_row_start, height, &jump_target) + header_jump_data_inner(editor_snapshot, block_row_start, height, &jump_target) } struct JumpTargetInExcerptInput<'a> { From b709996ec6e57d7593f409fe519695df7d5e2608 Mon Sep 17 00:00:00 2001 From: Dino Date: Thu, 13 Nov 2025 15:57:26 +0000 Subject: [PATCH 0097/1030] editor: Fix pane's tab buttons flicker on right-click (#42549) Whenever right-click was used on the editor, the pane's tab buttons would flicker, which was confirmed to happen because of the following check: ``` self.focus_handle.contains_focused(window, cx) || self .active_item() .is_some_and(|item| { item.item_focus_handle(cx).contains_focused(window, cx) }) ``` This check was returning `false` right after right-clicking but returning `true` right after. When digging into it a little bit more, this appears to be happening because the editor's `MouseContextMenu` relies on `ContextMenu` which is rendered in a deferred fashion but `MouseContextMenu` updates the window's focus to it instantaneously. Since the `ContextMenu` is rendered in a deferred fashion, its focus handle is not yet a descendant of the editor (pane's active item) focus handle, so the `contains_focused(window, cx)` call would return `false`, with it returning `true` after the menu was rendered. This commit updates the `MouseContextMenu::new` function to leverage `cx.on_next_frame` and ensure that the focus is only moved to the `ContextMenu` 2 frames later, ensuring that by the time the focus is moved, the `ContextMenu`'s focus handle is a descendant of the editor's. Closes #41771 Release Notes: - Fixed pane's tab buttons flickering when using right-click on the editor --- crates/editor/src/mouse_context_menu.rs | 26 +++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/crates/editor/src/mouse_context_menu.rs b/crates/editor/src/mouse_context_menu.rs index 2a63e39adda52734b301eda0d32a5bfa10a8e47e..94e5019d59b68a33d2d64245d2d1e17a764638da 100644 --- a/crates/editor/src/mouse_context_menu.rs +++ b/crates/editor/src/mouse_context_menu.rs @@ -81,7 +81,19 @@ impl MouseContextMenu { cx: &mut Context, ) -> Self { let context_menu_focus = context_menu.focus_handle(cx); - window.focus(&context_menu_focus); + + // Since `ContextMenu` is rendered in a deferred fashion its focus + // handle is not linked to the Editor's until after the deferred draw + // callback runs. + // We need to wait for that to happen before focusing it, so that + // calling `contains_focused` on the editor's focus handle returns + // `true` when the `ContextMenu` is focused. + let focus_handle = context_menu_focus.clone(); + cx.on_next_frame(window, move |_, window, cx| { + cx.on_next_frame(window, move |_, window, _cx| { + window.focus(&focus_handle); + }); + }); let _dismiss_subscription = cx.subscribe_in(&context_menu, window, { let context_menu_focus = context_menu_focus.clone(); @@ -329,8 +341,18 @@ mod tests { } "}); cx.editor(|editor, _window, _app| assert!(editor.mouse_context_menu.is_none())); + cx.update_editor(|editor, window, cx| { - deploy_context_menu(editor, Some(Default::default()), point, window, cx) + deploy_context_menu(editor, Some(Default::default()), point, window, cx); + + // Assert that, even after deploying the editor's mouse context + // menu, the editor's focus handle still contains the focused + // element. The pane's tab bar relies on this to determine whether + // to show the tab bar buttons and there was a small flicker when + // deploying the mouse context menu that would cause this to not be + // true, making it so that the buttons would disappear for a couple + // of frames. + assert!(editor.focus_handle.contains_focused(window, cx)); }); cx.assert_editor_state(indoc! {" From b900ac2ac7ca36636d0a2476c308d127bf0344b8 Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Thu, 13 Nov 2025 07:58:59 -0800 Subject: [PATCH 0098/1030] ci: Fix `script/clear-target-dir-if-larger-than` post #41652 (#42640) Closes #ISSUE The namespace runners mount the `target` directory to the cache drive, so `rm -rf target` would fail with `Device Busy`. Instead we now do `rm -rf target/* target/.*` to remove all files (including hidden files) from the `target` directory, without removing the target directory itself Release Notes: - N/A *or* Added/Fixed/Improved ... --- script/clear-target-dir-if-larger-than | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script/clear-target-dir-if-larger-than b/script/clear-target-dir-if-larger-than index f5219dcc537178e50ea300aa3df3871ace9182a2..e2e3062a1862da673c8f684f4231f53d367e5546 100755 --- a/script/clear-target-dir-if-larger-than +++ b/script/clear-target-dir-if-larger-than @@ -21,5 +21,5 @@ echo "target directory size: ${current_size_gb}gb. max size: ${max_size_gb}gb" if [[ ${current_size_gb} -gt ${max_size_gb} ]]; then echo "clearing target directory" - rm -rf target + rm -rf target/* target/.* fi From cb7bd5fe19097003190542deeff7208f166ddfc9 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Thu, 13 Nov 2025 09:06:26 -0700 Subject: [PATCH 0099/1030] Include source PR number in cherry-picks (#42642) Release Notes: - N/A --- .github/workflows/cherry_pick.yml | 5 +++++ tooling/xtask/src/tasks/workflows/cherry_pick.rs | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cherry_pick.yml b/.github/workflows/cherry_pick.yml index 69a46558396bd04db9f43e5d401c74d14b07fc88..bc01aae17e7141a2359b162c3de94c1aec7b765c 100644 --- a/.github/workflows/cherry_pick.yml +++ b/.github/workflows/cherry_pick.yml @@ -1,6 +1,7 @@ # Generated from xtask::workflows::cherry_pick # Rebuild with `cargo xtask workflows`. name: cherry_pick +run-name: 'cherry_pick to ${{ inputs.channel }} #${{ inputs.pr_number }}' on: workflow_dispatch: inputs: @@ -16,6 +17,10 @@ on: description: channel required: true type: string + pr_number: + description: pr_number + required: true + type: string jobs: run_cherry_pick: runs-on: namespace-profile-2x4-ubuntu-2404 diff --git a/tooling/xtask/src/tasks/workflows/cherry_pick.rs b/tooling/xtask/src/tasks/workflows/cherry_pick.rs index 1a8407f2ec403648946dc091d3d1763b982d8452..6181d79e042365b87ce2d6ad00a75580c71344a2 100644 --- a/tooling/xtask/src/tasks/workflows/cherry_pick.rs +++ b/tooling/xtask/src/tasks/workflows/cherry_pick.rs @@ -10,13 +10,16 @@ pub fn cherry_pick() -> Workflow { let branch = Input::string("branch", None); let commit = Input::string("commit", None); let channel = Input::string("channel", None); + let pr_number = Input::string("pr_number", None); let cherry_pick = run_cherry_pick(&branch, &commit, &channel); named::workflow() + .run_name(format!("cherry_pick to {channel} #{pr_number}")) .on(Event::default().workflow_dispatch( WorkflowDispatch::default() .add_input(commit.name, commit.input()) .add_input(branch.name, branch.input()) - .add_input(channel.name, channel.input()), + .add_input(channel.name, channel.input()) + .add_input(pr_number.name, pr_number.input()), )) .add_job(cherry_pick.name, cherry_pick.job) } From 46eb9e5223036d103181c849187770917255eaac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafael=20L=C3=BCder?= Date: Thu, 13 Nov 2025 10:51:13 -0600 Subject: [PATCH 0100/1030] Update scale factor and drawable size when macOS window changes screen (#38269) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary Fixes UI scaling issue that occurs when starting Zed after disconnecting an external monitor on macOS. The window's scale factor and drawable size are now properly updated when the window changes screens. Problem Description When an external monitor is disconnected and Zed is started with only the built-in screen active, the UI scale becomes incorrect. This happens because: 1. macOS triggers the `window_did_change_screen` callback when a window moves between displays (including when displays are disconnected) 2. The existing implementation only restarted the display link but didn't update the window's scale factor or drawable size 3. This left the window with stale scaling information from the previous display configuration Root Cause The `window_did_change_screen` callback in `crates/gpui/src/platform/mac/window.rs` was missing the logic to update the window's scale factor and drawable size when moving between screens. This logic was only present in the `view_did_change_backing_properties callback`, which isn't triggered when external monitors are disconnected. Solution - Extracted common logic: Created a new `update_window_scale_factor()` function that encapsulates the scale factor and drawable size update logic - Added scale factor update to screen change: Modified `window_did_change_screen` to call this function after restarting the display link - Refactored existing code: Updated `view_did_change_backing_properties` to use the new shared function, reducing code duplication The fix ensures that whenever a window changes screens (due to monitor disconnect, reconnect, or manual movement), the scale factor, drawable size, and renderer state are properly synchronized. Testing - ✅ Verified that UI scaling remains correct after disconnecting external monitor - ✅ Confirmed that reconnecting external monitor works properly - ✅ Tested that manual window movement between displays updates scaling correctly - ✅ No regressions observed in normal window operations To verity my fix worked I had to copy my preview workspace over my dev workspace, once I had done this I could reproduce the issue on main consistently. After switching to the branch with this fix the issue was resolved. The fix is similar to what was done on https://github.com/zed-industries/zed/pull/35686 (Windows) Closes #37245 #38229 Release Notes: - Fixed: Update scale factor and drawable size when macOS window changes screen --------- Co-authored-by: Kate --- crates/gpui/src/platform/mac/window.rs | 48 +++++++++++++++----------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/crates/gpui/src/platform/mac/window.rs b/crates/gpui/src/platform/mac/window.rs index 11ea4fb7e272c0660c7981cabf8f8c74ffa71830..53a5688ad6b78cda8f610e4acc810a7df58cf47b 100644 --- a/crates/gpui/src/platform/mac/window.rs +++ b/crates/gpui/src/platform/mac/window.rs @@ -1967,10 +1967,36 @@ extern "C" fn window_did_move(this: &Object, _: Sel, _: id) { } } +// Update the window scale factor and drawable size, and call the resize callback if any. +fn update_window_scale_factor(window_state: &Arc>) { + let mut lock = window_state.as_ref().lock(); + let scale_factor = lock.scale_factor(); + let size = lock.content_size(); + let drawable_size = size.to_device_pixels(scale_factor); + unsafe { + let _: () = msg_send![ + lock.renderer.layer(), + setContentsScale: scale_factor as f64 + ]; + } + + lock.renderer.update_drawable_size(drawable_size); + + if let Some(mut callback) = lock.resize_callback.take() { + let content_size = lock.content_size(); + let scale_factor = lock.scale_factor(); + drop(lock); + callback(content_size, scale_factor); + window_state.as_ref().lock().resize_callback = Some(callback); + }; +} + extern "C" fn window_did_change_screen(this: &Object, _: Sel, _: id) { let window_state = unsafe { get_window_state(this) }; let mut lock = window_state.as_ref().lock(); lock.start_display_link(); + drop(lock); + update_window_scale_factor(&window_state); } extern "C" fn window_did_change_key_status(this: &Object, selector: Sel, _: id) { @@ -2079,27 +2105,7 @@ extern "C" fn make_backing_layer(this: &Object, _: Sel) -> id { extern "C" fn view_did_change_backing_properties(this: &Object, _: Sel) { let window_state = unsafe { get_window_state(this) }; - let mut lock = window_state.as_ref().lock(); - - let scale_factor = lock.scale_factor(); - let size = lock.content_size(); - let drawable_size = size.to_device_pixels(scale_factor); - unsafe { - let _: () = msg_send![ - lock.renderer.layer(), - setContentsScale: scale_factor as f64 - ]; - } - - lock.renderer.update_drawable_size(drawable_size); - - if let Some(mut callback) = lock.resize_callback.take() { - let content_size = lock.content_size(); - let scale_factor = lock.scale_factor(); - drop(lock); - callback(content_size, scale_factor); - window_state.as_ref().lock().resize_callback = Some(callback); - }; + update_window_scale_factor(&window_state); } extern "C" fn set_frame_size(this: &Object, _: Sel, size: NSSize) { From e91be9e98e78e02983d1ef7d246735463edb9dd7 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Thu, 13 Nov 2025 13:13:09 -0500 Subject: [PATCH 0101/1030] Fix ACP CLI login via remote (#42647) Release Notes: - Fixed logging into Gemini CLI and Claude Code when remoting and authenticating via CLI Co-authored-by: Lukas Wirth --- crates/project/src/agent_server_store.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/project/src/agent_server_store.rs b/crates/project/src/agent_server_store.rs index d3c078ffa101c8c66d1c5ab75fb8b59d7748127a..67d3a0b8132be1db487fe347f3b79e42a8b5910d 100644 --- a/crates/project/src/agent_server_store.rs +++ b/crates/project/src/agent_server_store.rs @@ -21,7 +21,7 @@ use rpc::{AnyProtoClient, TypedEnvelope, proto}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use settings::{RegisterSetting, SettingsStore}; -use task::Shell; +use task::{Shell, SpawnInTerminal}; use util::{ResultExt as _, debug_panic}; use crate::ProjectEnvironment; @@ -1010,7 +1010,7 @@ impl ExternalAgentServer for RemoteExternalAgentServer { env: Some(command.env), }, root_dir, - None, + response.login.map(SpawnInTerminal::from_proto), )) }) } From fa0c7500c1316ef720eed202a807ffe7272282c3 Mon Sep 17 00:00:00 2001 From: Lionel Henry Date: Thu, 13 Nov 2025 20:35:45 +0100 Subject: [PATCH 0102/1030] Update runtimed to fix compatibility issue with the Ark kernel (#40889) Closes #40888 This updates runtimed to the latest version, which handles the "starting" variant of `execution_state`. It actually handles a bunch of other variants that are not documented in the protocol (see https://jupyter-client.readthedocs.io/en/stable/messaging.html#kernel-status), like "starting", "terminating", etc. I added implementations for these variants as well. Release Notes: - Fixed issue that prevented the Ark kernel from working in Zed (#40888). --------- Co-authored-by: Conrad Irwin --- Cargo.lock | 44 ++++++---- Cargo.toml | 10 +-- crates/repl/src/kernels/mod.rs | 7 ++ crates/repl/src/outputs.rs | 7 ++ crates/repl/src/session.rs | 7 ++ .../zed/src/zed/quick_action_bar/repl_menu.rs | 88 ++++++++++++------- 6 files changed, 109 insertions(+), 54 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d11ca902d1edbdc838071bfdb7df10dea88f9c81..67fa79b009fe59b052c22f77cf3b3b1c364d0c66 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1461,6 +1461,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879b6c89592deb404ba4dc0ae6b58ffd1795c78991cbb5b8bc441c48a070440d" dependencies = [ "aws-lc-sys", + "untrusted 0.7.1", "zeroize", ] @@ -8658,23 +8659,25 @@ dependencies = [ [[package]] name = "jupyter-protocol" -version = "0.6.0" -source = "git+https://github.com/ConradIrwin/runtimed?rev=7130c804216b6914355d15d0b91ea91f6babd734#7130c804216b6914355d15d0b91ea91f6babd734" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c047f6b5e551563af2ddb13dafed833f0ec5a5b0f9621d5ad740a9ff1e1095" dependencies = [ - "anyhow", "async-trait", "bytes 1.10.1", "chrono", "futures 0.3.31", "serde", "serde_json", + "thiserror 2.0.17", "uuid", ] [[package]] name = "jupyter-websocket-client" -version = "0.9.0" -source = "git+https://github.com/ConradIrwin/runtimed?rev=7130c804216b6914355d15d0b91ea91f6babd734#7130c804216b6914355d15d0b91ea91f6babd734" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4197fa926a6b0bddfed7377d9fed3d00a0dec44a1501e020097bd26604699cae" dependencies = [ "anyhow", "async-trait", @@ -8683,6 +8686,7 @@ dependencies = [ "jupyter-protocol", "serde", "serde_json", + "tokio", "url", "uuid", ] @@ -10233,8 +10237,9 @@ dependencies = [ [[package]] name = "nbformat" -version = "0.10.0" -source = "git+https://github.com/ConradIrwin/runtimed?rev=7130c804216b6914355d15d0b91ea91f6babd734#7130c804216b6914355d15d0b91ea91f6babd734" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89c7229d604d847227002715e1235cd84e81919285d904ccb290a42ecc409348" dependencies = [ "anyhow", "chrono", @@ -14274,7 +14279,7 @@ dependencies = [ "cfg-if", "getrandom 0.2.16", "libc", - "untrusted", + "untrusted 0.9.0", "windows-sys 0.52.0", ] @@ -14455,25 +14460,26 @@ dependencies = [ [[package]] name = "runtimelib" -version = "0.25.0" -source = "git+https://github.com/ConradIrwin/runtimed?rev=7130c804216b6914355d15d0b91ea91f6babd734#7130c804216b6914355d15d0b91ea91f6babd734" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "481b48894073a0096f28cbe9860af01fc1b861e55b3bc96afafc645ee3de62dc" dependencies = [ - "anyhow", "async-dispatcher", "async-std", + "aws-lc-rs", "base64 0.22.1", "bytes 1.10.1", "chrono", "data-encoding", - "dirs 5.0.1", + "dirs 6.0.0", "futures 0.3.31", "glob", "jupyter-protocol", - "ring", "serde", "serde_json", "shellexpand 3.1.1", "smol", + "thiserror 2.0.17", "uuid", "zeromq", ] @@ -14741,7 +14747,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ "ring", - "untrusted", + "untrusted 0.9.0", ] [[package]] @@ -14753,7 +14759,7 @@ dependencies = [ "aws-lc-rs", "ring", "rustls-pki-types", - "untrusted", + "untrusted 0.9.0", ] [[package]] @@ -14983,7 +14989,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ "ring", - "untrusted", + "untrusted 0.9.0", ] [[package]] @@ -18576,6 +18582,12 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + [[package]] name = "untrusted" version = "0.9.0" diff --git a/Cargo.toml b/Cargo.toml index bd9f57049af3a3add6a52a74aa518e0dc7ec0dbe..87d76a3636de6fcf33431ae63b977e3236dcacae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -534,8 +534,8 @@ itertools = "0.14.0" json_dotpath = "1.1" jsonschema = "0.30.0" jsonwebtoken = "9.3" -jupyter-protocol = { git = "https://github.com/ConradIrwin/runtimed", rev = "7130c804216b6914355d15d0b91ea91f6babd734" } -jupyter-websocket-client = { git = "https://github.com/ConradIrwin/runtimed" ,rev = "7130c804216b6914355d15d0b91ea91f6babd734" } +jupyter-protocol = "0.10.0" +jupyter-websocket-client = "0.15.0" libc = "0.2" libsqlite3-sys = { version = "0.30.1", features = ["bundled"] } linkify = "0.10.0" @@ -548,7 +548,7 @@ minidumper = "0.8" moka = { version = "0.12.10", features = ["sync"] } naga = { version = "25.0", features = ["wgsl-in"] } nanoid = "0.4" -nbformat = { git = "https://github.com/ConradIrwin/runtimed", rev = "7130c804216b6914355d15d0b91ea91f6babd734" } +nbformat = "0.15.0" nix = "0.29" num-format = "0.4.4" num-traits = "0.2" @@ -619,8 +619,8 @@ reqwest = { git = "https://github.com/zed-industries/reqwest.git", rev = "c15662 "stream", ], package = "zed-reqwest", version = "0.12.15-zed" } rsa = "0.9.6" -runtimelib = { git = "https://github.com/ConradIrwin/runtimed", rev = "7130c804216b6914355d15d0b91ea91f6babd734", default-features = false, features = [ - "async-dispatcher-runtime", +runtimelib = { version = "0.30.0", default-features = false, features = [ + "async-dispatcher-runtime", "aws-lc-rs" ] } rust-embed = { version = "8.4", features = ["include-exclude"] } rustc-hash = "2.1.0" diff --git a/crates/repl/src/kernels/mod.rs b/crates/repl/src/kernels/mod.rs index a99c80230a4ec468ac6505346b4c0d017e7576d7..02952010f3c510fdcc12176610ed85d94c1e0e80 100644 --- a/crates/repl/src/kernels/mod.rs +++ b/crates/repl/src/kernels/mod.rs @@ -213,6 +213,13 @@ impl From<&Kernel> for KernelStatus { Kernel::RunningKernel(kernel) => match kernel.execution_state() { ExecutionState::Idle => KernelStatus::Idle, ExecutionState::Busy => KernelStatus::Busy, + ExecutionState::Unknown => KernelStatus::Error, + ExecutionState::Starting => KernelStatus::Starting, + ExecutionState::Restarting => KernelStatus::Restarting, + ExecutionState::Terminating => KernelStatus::ShuttingDown, + ExecutionState::AutoRestarting => KernelStatus::Restarting, + ExecutionState::Dead => KernelStatus::Error, + ExecutionState::Other(_) => KernelStatus::Error, }, Kernel::StartingKernel(_) => KernelStatus::Starting, Kernel::ErroredLaunch(_) => KernelStatus::Error, diff --git a/crates/repl/src/outputs.rs b/crates/repl/src/outputs.rs index a192123865ae1632bef66fdc97d3056219c10d30..b99562393a2bbaad051f47bf58cf6c77ea5fb27b 100644 --- a/crates/repl/src/outputs.rs +++ b/crates/repl/src/outputs.rs @@ -476,6 +476,13 @@ impl ExecutionView { self.status = ExecutionStatus::Executing; } ExecutionState::Idle => self.status = ExecutionStatus::Finished, + ExecutionState::Unknown => self.status = ExecutionStatus::Unknown, + ExecutionState::Starting => self.status = ExecutionStatus::ConnectingToKernel, + ExecutionState::Restarting => self.status = ExecutionStatus::Restarting, + ExecutionState::Terminating => self.status = ExecutionStatus::ShuttingDown, + ExecutionState::AutoRestarting => self.status = ExecutionStatus::Restarting, + ExecutionState::Dead => self.status = ExecutionStatus::Shutdown, + ExecutionState::Other(_) => self.status = ExecutionStatus::Unknown, } cx.notify(); return; diff --git a/crates/repl/src/session.rs b/crates/repl/src/session.rs index 674639c402f0bb81437ce4f2ee440f2edaed4a8e..1fa0bfec356c436abd0d4dfde1cf0108d71d304c 100644 --- a/crates/repl/src/session.rs +++ b/crates/repl/src/session.rs @@ -673,6 +673,13 @@ impl Render for Session { Kernel::RunningKernel(kernel) => match kernel.execution_state() { ExecutionState::Idle => Color::Success, ExecutionState::Busy => Color::Modified, + ExecutionState::Unknown => Color::Modified, + ExecutionState::Starting => Color::Modified, + ExecutionState::Restarting => Color::Modified, + ExecutionState::Terminating => Color::Disabled, + ExecutionState::AutoRestarting => Color::Modified, + ExecutionState::Dead => Color::Disabled, + ExecutionState::Other(_) => Color::Modified, }, Kernel::StartingKernel(_) => Color::Modified, Kernel::ErroredLaunch(_) => Color::Error, diff --git a/crates/zed/src/zed/quick_action_bar/repl_menu.rs b/crates/zed/src/zed/quick_action_bar/repl_menu.rs index 5210bb718c0663d2c256f865f0fcabf41bd5708f..1ebdf35bb93824b7881afabe289a07feb93f8135 100644 --- a/crates/zed/src/zed/quick_action_bar/repl_menu.rs +++ b/crates/zed/src/zed/quick_action_bar/repl_menu.rs @@ -388,16 +388,55 @@ fn session_state(session: Entity, cx: &mut App) -> ReplMenuState { } }; - match &session.kernel { - Kernel::Restarting => ReplMenuState { - tooltip: format!("Restarting {}", kernel_name).into(), - icon_is_animating: true, - popover_disabled: true, + let transitional = + |tooltip: SharedString, animating: bool, popover_disabled: bool| ReplMenuState { + tooltip, + icon_is_animating: animating, + popover_disabled, icon_color: Color::Muted, indicator: Some(Indicator::dot().color(Color::Muted)), status: session.kernel.status(), ..fill_fields() - }, + }; + + let starting = || transitional(format!("{} is starting", kernel_name).into(), true, true); + let restarting = || transitional(format!("Restarting {}", kernel_name).into(), true, true); + let shutting_down = || { + transitional( + format!("{} is shutting down", kernel_name).into(), + false, + true, + ) + }; + let auto_restarting = || { + transitional( + format!("Auto-restarting {}", kernel_name).into(), + true, + true, + ) + }; + let unknown = || transitional(format!("{} state unknown", kernel_name).into(), false, true); + let other = |state: &str| { + transitional( + format!("{} state: {}", kernel_name, state).into(), + false, + true, + ) + }; + + let shutdown = || ReplMenuState { + tooltip: "Nothing running".into(), + icon: IconName::ReplNeutral, + icon_color: Color::Default, + icon_is_animating: false, + popover_disabled: false, + indicator: None, + status: KernelStatus::Shutdown, + ..fill_fields() + }; + + match &session.kernel { + Kernel::Restarting => restarting(), Kernel::RunningKernel(kernel) => match &kernel.execution_state() { ExecutionState::Idle => ReplMenuState { tooltip: format!("Run code on {} ({})", kernel_name, kernel_language).into(), @@ -413,16 +452,15 @@ fn session_state(session: Entity, cx: &mut App) -> ReplMenuState { status: session.kernel.status(), ..fill_fields() }, + ExecutionState::Unknown => unknown(), + ExecutionState::Starting => starting(), + ExecutionState::Restarting => restarting(), + ExecutionState::Terminating => shutting_down(), + ExecutionState::AutoRestarting => auto_restarting(), + ExecutionState::Dead => shutdown(), + ExecutionState::Other(state) => other(state), }, - Kernel::StartingKernel(_) => ReplMenuState { - tooltip: format!("{} is starting", kernel_name).into(), - icon_is_animating: true, - popover_disabled: true, - icon_color: Color::Muted, - indicator: Some(Indicator::dot().color(Color::Muted)), - status: session.kernel.status(), - ..fill_fields() - }, + Kernel::StartingKernel(_) => starting(), Kernel::ErroredLaunch(e) => ReplMenuState { tooltip: format!("Error with kernel {}: {}", kernel_name, e).into(), popover_disabled: false, @@ -430,23 +468,7 @@ fn session_state(session: Entity, cx: &mut App) -> ReplMenuState { status: session.kernel.status(), ..fill_fields() }, - Kernel::ShuttingDown => ReplMenuState { - tooltip: format!("{} is shutting down", kernel_name).into(), - popover_disabled: true, - icon_color: Color::Muted, - indicator: Some(Indicator::dot().color(Color::Muted)), - status: session.kernel.status(), - ..fill_fields() - }, - Kernel::Shutdown => ReplMenuState { - tooltip: "Nothing running".into(), - icon: IconName::ReplNeutral, - icon_color: Color::Default, - icon_is_animating: false, - popover_disabled: false, - indicator: None, - status: KernelStatus::Shutdown, - ..fill_fields() - }, + Kernel::ShuttingDown => shutting_down(), + Kernel::Shutdown => shutdown(), } } From c626e770a036320e1c827c90d2182dc1ee541f6e Mon Sep 17 00:00:00 2001 From: Kevin Rubio <105080383+kevinru2023@users.noreply.github.com> Date: Thu, 13 Nov 2025 11:37:22 -0800 Subject: [PATCH 0103/1030] outline_panel: Remove toggle expanded behavior from OpenSelectedEntry (#42214) Fixed outline panel space key behavior by removing duplicate toggle call The `open_selected_entry` function in `outline_panel.rs` was incorrectly calling `self.toggle_expanded(&selected_entry, window, cx)` in addition to its primary logic, causing the space key to both open/close entries AND toggle their expanded state. Removed the redundant `toggle_expanded` call to achieve the intended behavior. Closes #41711 Release Notes: - Fixed issue with the outline panel where pressing space would cause an open selected entry to collapse and cause a closed selected entry to open. --------- Co-authored-by: Smit Barmase --- crates/outline_panel/src/outline_panel.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/crates/outline_panel/src/outline_panel.rs b/crates/outline_panel/src/outline_panel.rs index a7fe9ea679d565b2a8a2a26bf86306b93dd62e78..f44c6438ebd454d343a8ac49b0f6db11c11b469d 100644 --- a/crates/outline_panel/src/outline_panel.rs +++ b/crates/outline_panel/src/outline_panel.rs @@ -986,7 +986,6 @@ impl OutlinePanel { if self.filter_editor.focus_handle(cx).is_focused(window) { cx.propagate() } else if let Some(selected_entry) = self.selected_entry().cloned() { - self.toggle_expanded(&selected_entry, window, cx); self.scroll_editor_to_entry(&selected_entry, true, true, window, cx); } } @@ -5845,7 +5844,7 @@ mod tests { } #[gpui::test] - async fn test_multiple_workrees(cx: &mut TestAppContext) { + async fn test_multiple_worktrees(cx: &mut TestAppContext) { init_test(cx); let fs = FakeFs::new(cx.background_executor.clone()); @@ -5951,7 +5950,7 @@ two/ outline_panel.update_in(cx, |outline_panel, window, cx| { outline_panel.select_previous(&SelectPrevious, window, cx); - outline_panel.open_selected_entry(&OpenSelectedEntry, window, cx); + outline_panel.collapse_selected_entry(&CollapseSelectedEntry, window, cx); }); cx.executor() .advance_clock(UPDATE_DEBOUNCE + Duration::from_millis(100)); @@ -5977,7 +5976,7 @@ two/ outline_panel.update_in(cx, |outline_panel, window, cx| { outline_panel.select_next(&SelectNext, window, cx); - outline_panel.open_selected_entry(&OpenSelectedEntry, window, cx); + outline_panel.collapse_selected_entry(&CollapseSelectedEntry, window, cx); }); cx.executor() .advance_clock(UPDATE_DEBOUNCE + Duration::from_millis(100)); @@ -6000,7 +5999,7 @@ two/ <==== selected"#, }); outline_panel.update_in(cx, |outline_panel, window, cx| { - outline_panel.open_selected_entry(&OpenSelectedEntry, window, cx); + outline_panel.expand_selected_entry(&ExpandSelectedEntry, window, cx); }); cx.executor() .advance_clock(UPDATE_DEBOUNCE + Duration::from_millis(100)); @@ -7532,7 +7531,7 @@ outline: fn main()" cx.update(|window, cx| { outline_panel.update(cx, |outline_panel, cx| { - outline_panel.open_selected_entry(&OpenSelectedEntry, window, cx); + outline_panel.collapse_selected_entry(&CollapseSelectedEntry, window, cx); }); }); @@ -7564,7 +7563,7 @@ outline: fn main()" cx.update(|window, cx| { outline_panel.update(cx, |outline_panel, cx| { - outline_panel.open_selected_entry(&OpenSelectedEntry, window, cx); + outline_panel.expand_selected_entry(&ExpandSelectedEntry, window, cx); }); }); From 03fad4b951a28257949db7112a1b06305f8a89ab Mon Sep 17 00:00:00 2001 From: Abul Hossain Khan <140191921+abulgit@users.noreply.github.com> Date: Fri, 14 Nov 2025 01:52:57 +0530 Subject: [PATCH 0104/1030] workspace: Fix pinned tab causing resize loop on adjacent tab (#41884) Closes #41467 My first PR in Zed, any guidance or tips are appreciated. This fixes the flickering/resize loop that occurred on the tab immediately to the right of a pinned tab. Removed the conditional border on the pinned tabs container. The border was a visual indicator to show when unpinned tabs were scrolled, but it wasn't essential and was causing the layout thrashing. Release Notes: - Fixed --------- Co-authored-by: Smit Barmase --- crates/workspace/src/pane.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index 1f950460299443187457275185d3a28763b11166..dcfd634dfd37c7e5a078f9cef862acf333c84a2a 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -19,7 +19,7 @@ use futures::{StreamExt, stream::FuturesUnordered}; use gpui::{ Action, AnyElement, App, AsyncWindowContext, ClickEvent, ClipboardItem, Context, Corner, Div, DragMoveEvent, Entity, EntityId, EventEmitter, ExternalPaths, FocusHandle, FocusOutEvent, - Focusable, IsZero, KeyContext, MouseButton, MouseDownEvent, NavigationDirection, Pixels, Point, + Focusable, KeyContext, MouseButton, MouseDownEvent, NavigationDirection, Pixels, Point, PromptLevel, Render, ScrollHandle, Subscription, Task, WeakEntity, WeakFocusHandle, Window, actions, anchored, deferred, prelude::*, }; @@ -3074,6 +3074,7 @@ impl Pane { } let unpinned_tabs = tab_items.split_off(self.pinned_tab_count); let pinned_tabs = tab_items; + TabBar::new("tab_bar") .when( self.display_nav_history_buttons.unwrap_or_default(), @@ -3097,8 +3098,10 @@ impl Pane { .children(pinned_tabs.len().ne(&0).then(|| { let max_scroll = self.tab_bar_scroll_handle.max_offset().width; // We need to check both because offset returns delta values even when the scroll handle is not scrollable - let is_scrollable = !max_scroll.is_zero(); let is_scrolled = self.tab_bar_scroll_handle.offset().x < px(0.); + // Avoid flickering when max_offset is very small (< 2px). + // The border adds 1-2px which can push max_offset back to 0, creating a loop. + let is_scrollable = max_scroll > px(2.0); let has_active_unpinned_tab = self.active_item_index >= self.pinned_tab_count; h_flex() .children(pinned_tabs) From 84f24e4b62c550b6a6b8193b316ace7a677d4afc Mon Sep 17 00:00:00 2001 From: AidanV <84053180+AidanV@users.noreply.github.com> Date: Thu, 13 Nov 2025 12:27:08 -0800 Subject: [PATCH 0105/1030] vim: Add `:w ` command (#41256) Release Notes: - Adds support for `:[range]w {file}` - This writes the lines in the range to the specified - Adds support for `:[range]w` - This replaces the current file with the selected lines --- crates/language/src/buffer.rs | 5 + crates/vim/src/command.rs | 203 ++++++++++++++++++++++++++++++++-- 2 files changed, 197 insertions(+), 11 deletions(-) diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index ea2405d04c32cba45963bc32747ee0b94292ffd9..3b4f24a400403f7e4dbd4f09ee7fb829f4cbbe00 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -2055,6 +2055,11 @@ impl Buffer { } } + /// Marks the buffer as having a conflict regardless of current buffer state. + pub fn set_conflict(&mut self) { + self.has_conflict = true; + } + /// Checks if the buffer and its file have both changed since the buffer /// was last saved or reloaded. pub fn has_conflict(&self) -> bool { diff --git a/crates/vim/src/command.rs b/crates/vim/src/command.rs index cba8351e8d36e784c77c20b15ac0dead41f84a13..70d0e93c5db5999878f2bb79c7fc42f16e6861a1 100644 --- a/crates/vim/src/command.rs +++ b/crates/vim/src/command.rs @@ -189,6 +189,7 @@ pub struct VimSet { #[derive(Clone, PartialEq, Action)] #[action(namespace = vim, no_json, no_register)] struct VimSave { + pub range: Option, pub save_intent: Option, pub filename: String, } @@ -324,6 +325,134 @@ pub fn register(editor: &mut Editor, cx: &mut Context) { }); Vim::action(editor, cx, |vim, action: &VimSave, window, cx| { + if let Some(range) = &action.range { + vim.update_editor(cx, |vim, editor, cx| { + let Some(range) = range.buffer_range(vim, editor, window, cx).ok() else { + return; + }; + let Some((line_ending, text, whole_buffer)) = editor.buffer().update(cx, |multi, cx| { + Some(multi.as_singleton()?.update(cx, |buffer, _| { + ( + buffer.line_ending(), + buffer.as_rope().slice_rows(range.start.0..range.end.0 + 1), + range.start.0 == 0 && range.end.0 + 1 >= buffer.row_count(), + ) + })) + }) else { + return; + }; + + let filename = action.filename.clone(); + let filename = if filename.is_empty() { + let Some(file) = editor + .buffer() + .read(cx) + .as_singleton() + .and_then(|buffer| buffer.read(cx).file()) + else { + let _ = window.prompt( + gpui::PromptLevel::Warning, + "No file name", + Some("Partial buffer write requires file name."), + &["Cancel"], + cx, + ); + return; + }; + file.path().display(file.path_style(cx)).to_string() + } else { + filename + }; + + if action.filename.is_empty() { + if whole_buffer { + if let Some(workspace) = vim.workspace(window) { + workspace.update(cx, |workspace, cx| { + workspace + .save_active_item( + action.save_intent.unwrap_or(SaveIntent::Save), + window, + cx, + ) + .detach_and_prompt_err("Failed to save", window, cx, |_, _, _| None); + }); + } + return; + } + if Some(SaveIntent::Overwrite) != action.save_intent { + let _ = window.prompt( + gpui::PromptLevel::Warning, + "Use ! to write partial buffer", + Some("Overwriting the current file with selected buffer content requires '!'."), + &["Cancel"], + cx, + ); + return; + } + editor.buffer().update(cx, |multi, cx| { + if let Some(buffer) = multi.as_singleton() { + buffer.update(cx, |buffer, _| buffer.set_conflict()); + } + }); + }; + + editor.project().unwrap().update(cx, |project, cx| { + let worktree = project.visible_worktrees(cx).next().unwrap(); + + worktree.update(cx, |worktree, cx| { + let path_style = worktree.path_style(); + let Some(path) = RelPath::new(Path::new(&filename), path_style).ok() else { + return; + }; + + let rx = (worktree.entry_for_path(&path).is_some() && Some(SaveIntent::Overwrite) != action.save_intent).then(|| { + window.prompt( + gpui::PromptLevel::Warning, + &format!("{path:?} already exists. Do you want to replace it?"), + Some( + "A file or folder with the same name already exists. Replacing it will overwrite its current contents.", + ), + &["Replace", "Cancel"], + cx + ) + }); + let filename = filename.clone(); + cx.spawn_in(window, async move |this, cx| { + if let Some(rx) = rx + && Ok(0) != rx.await + { + return; + } + + let _ = this.update_in(cx, |worktree, window, cx| { + let Some(path) = RelPath::new(Path::new(&filename), path_style).ok() else { + return; + }; + worktree + .write_file(path.into_arc(), text.clone(), line_ending, cx) + .detach_and_prompt_err("Failed to write lines", window, cx, |_, _, _| None); + }); + }) + .detach(); + }); + }); + }); + return; + } + if action.filename.is_empty() { + if let Some(workspace) = vim.workspace(window) { + workspace.update(cx, |workspace, cx| { + workspace + .save_active_item( + action.save_intent.unwrap_or(SaveIntent::Save), + window, + cx, + ) + .detach_and_prompt_err("Failed to save", window, cx, |_, _, _| None); + }); + } + return; + } vim.update_editor(cx, |_, editor, cx| { let Some(project) = editor.project().cloned() else { return; @@ -1175,24 +1304,34 @@ fn generate_commands(_: &App) -> Vec { vec![ VimCommand::new( ("w", "rite"), - workspace::Save { + VimSave { save_intent: Some(SaveIntent::Save), + filename: "".into(), + range: None, }, ) - .bang(workspace::Save { + .bang(VimSave { save_intent: Some(SaveIntent::Overwrite), + filename: "".into(), + range: None, }) .filename(|action, filename| { Some( VimSave { save_intent: action .as_any() - .downcast_ref::() + .downcast_ref::() .and_then(|action| action.save_intent), filename, + range: None, } .boxed_clone(), ) + }) + .range(|action, range| { + let mut action: VimSave = action.as_any().downcast_ref::().unwrap().clone(); + action.range.replace(range.clone()); + Some(Box::new(action)) }), VimCommand::new(("e", "dit"), editor::actions::ReloadFile) .bang(editor::actions::ReloadFile) @@ -1692,12 +1831,12 @@ pub fn command_interceptor( let mut positions: Vec<_> = positions.iter().map(|&pos| pos + offset).collect(); positions.splice(0..0, no_args_positions.clone()); let string = format!("{display_string} {string}"); - let action = match cx - .update(|cx| commands(cx).get(cmd_idx)?.parse(&string[1..], &range, cx)) - { - Ok(Some(action)) => action, - _ => continue, - }; + let (range, query) = VimCommand::parse_range(&string[1..]); + let action = + match cx.update(|cx| commands(cx).get(cmd_idx)?.parse(&query, &range, cx)) { + Ok(Some(action)) => action, + _ => continue, + }; results.push(CommandInterceptItem { action, string, @@ -2302,7 +2441,7 @@ impl ShellExec { #[cfg(test)] mod test { - use std::path::Path; + use std::path::{Path, PathBuf}; use crate::{ VimAddon, @@ -2314,7 +2453,7 @@ mod test { use indoc::indoc; use settings::Settings; use util::path; - use workspace::Workspace; + use workspace::{OpenOptions, Workspace}; #[gpui::test] async fn test_command_basics(cx: &mut TestAppContext) { @@ -2619,6 +2758,48 @@ mod test { }); } + #[gpui::test] + async fn test_command_write_range(cx: &mut TestAppContext) { + let mut cx = VimTestContext::new(cx, true).await; + + cx.workspace(|workspace, _, cx| { + assert_active_item(workspace, path!("/root/dir/file.rs"), "", cx); + }); + + cx.set_state( + indoc! {" + The quick + brown« fox + jumpsˇ» over + the lazy dog + "}, + Mode::Visual, + ); + + cx.simulate_keystrokes(": w space dir/other.rs"); + cx.simulate_keystrokes("enter"); + + let other = path!("/root/dir/other.rs"); + + let _ = cx + .workspace(|workspace, window, cx| { + workspace.open_abs_path(PathBuf::from(other), OpenOptions::default(), window, cx) + }) + .await; + + cx.workspace(|workspace, _, cx| { + assert_active_item( + workspace, + other, + indoc! {" + brown fox + jumps over + "}, + cx, + ); + }); + } + #[gpui::test] async fn test_command_matching_lines(cx: &mut TestAppContext) { let mut cx = NeovimBackedTestContext::new(cx).await; From e1d295a6b4ee0def3d155337818c6e1a5a52a3f3 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Thu, 13 Nov 2025 19:36:16 -0300 Subject: [PATCH 0106/1030] markdown: Improve table display (#42674) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes https://github.com/zed-industries/zed/issues/36330 Closes https://github.com/zed-industries/zed/issues/35460 This PR improves how we display markdown tables by relying on grids rather than flexbox. Given this makes text inside each cell wrap, I ended up removing the `table_overflow_x_scroll` method, as it was 1) used only in the agent panel, and 2) arguably not the best approach as a whole, because as soon as you need to scroll a table, you probably need more elements to make it be really great. One thing I'm slightly unsatisfied with, though, is the border situation. I added a half pixel border to the cell so they all sum up to 1px, but there are cases where there's a tiny space between rows and I don't quite know where that's coming from and how it happens. But I think it's a reasonable improvement overall. Screenshot 2025-11-13 at 7  05@2x Release Notes: - agent: Improved table rendering in the agent panel, ensuring cell text wraps, not going off-screen. --- crates/agent_ui/src/acp/thread_view.rs | 1 - crates/markdown/src/markdown.rs | 39 ++++++++++++-------------- 2 files changed, 18 insertions(+), 22 deletions(-) diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 4f3bbe718d3c6265f54f3cc4a949256b81c25572..8a0b282d9b9d5c6bab492391bdabfb1c09131bed 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -5907,7 +5907,6 @@ fn default_markdown_style( syntax: cx.theme().syntax().clone(), selection_background_color: colors.element_selection_background, code_block_overflow_x_scroll: true, - table_overflow_x_scroll: true, heading_level_styles: Some(HeadingLevelStyles { h1: Some(TextStyleRefinement { font_size: Some(rems(1.15).into()), diff --git a/crates/markdown/src/markdown.rs b/crates/markdown/src/markdown.rs index b74416d8483c6b3fbdcc4f89e7bff348b81be272..9a1596092ae0497fe2d45a1d756a34e81d601b7c 100644 --- a/crates/markdown/src/markdown.rs +++ b/crates/markdown/src/markdown.rs @@ -66,7 +66,6 @@ pub struct MarkdownStyle { pub selection_background_color: Hsla, pub heading: StyleRefinement, pub heading_level_styles: Option, - pub table_overflow_x_scroll: bool, pub height_is_multiple_of_line_height: bool, pub prevent_mouse_interaction: bool, } @@ -87,7 +86,6 @@ impl Default for MarkdownStyle { selection_background_color: Default::default(), heading: Default::default(), heading_level_styles: None, - table_overflow_x_scroll: false, height_is_multiple_of_line_height: false, prevent_mouse_interaction: false, } @@ -992,54 +990,54 @@ impl Element for MarkdownElement { MarkdownTag::MetadataBlock(_) => {} MarkdownTag::Table(alignments) => { builder.table_alignments = alignments.clone(); + builder.push_div( div() .id(("table", range.start)) - .flex() + .min_w_0() + .size_full() + .mb_2() .border_1() .border_color(cx.theme().colors().border) .rounded_sm() - .when(self.style.table_overflow_x_scroll, |mut table| { - table.style().restrict_scroll_to_axis = Some(true); - table.overflow_x_scroll() - }), + .overflow_hidden(), range, markdown_end, ); - // This inner `v_flex` is so the table rows will stack vertically without disrupting the `overflow_x_scroll`. - builder.push_div(div().v_flex().flex_grow(), range, markdown_end); } MarkdownTag::TableHead => { + let column_count = builder.table_alignments.len(); + builder.push_div( div() - .flex() - .justify_between() - .border_b_1() - .border_color(cx.theme().colors().border), + .grid() + .grid_cols(column_count as u16) + .bg(cx.theme().colors().title_bar_background), range, markdown_end, ); builder.push_text_style(TextStyleRefinement { - font_weight: Some(FontWeight::BOLD), + font_weight: Some(FontWeight::SEMIBOLD), ..Default::default() }); } MarkdownTag::TableRow => { + let column_count = builder.table_alignments.len(); + builder.push_div( - div().h_flex().justify_between().px_1().py_0p5(), + div().grid().grid_cols(column_count as u16), range, markdown_end, ); } MarkdownTag::TableCell => { - let column_count = builder.table_alignments.len(); - builder.push_div( div() - .flex() + .min_w_0() + .border(px(0.5)) + .border_color(cx.theme().colors().border) .px_1() - .w(relative(1. / column_count as f32)) - .truncate(), + .py_0p5(), range, markdown_end, ); @@ -1154,7 +1152,6 @@ impl Element for MarkdownElement { } } MarkdownTagEnd::Table => { - builder.pop_div(); builder.pop_div(); builder.table_alignments.clear(); } From 3c577ba0195c73f4c388e36493c906ae276ced92 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 13 Nov 2025 23:57:05 +0100 Subject: [PATCH 0107/1030] git_panel: Fix Stage All/Unstage All ignoring partially staged files (#42677) Release Notes: - Fix "Stage All"/"Unstage All" not affecting partially staged files --- crates/project/src/git_store.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/project/src/git_store.rs b/crates/project/src/git_store.rs index e75bafa2d2bdc3b8854e71d7e1e7c543c131d2ee..497c93833cba612ef25237b30603eb4318c902e0 100644 --- a/crates/project/src/git_store.rs +++ b/crates/project/src/git_store.rs @@ -4060,7 +4060,7 @@ impl Repository { } else { Some(entry.repo_path) } - } else if entry.status.staging().has_staged() { + } else if entry.status.staging().is_fully_staged() { None } else { Some(entry.repo_path) @@ -4080,7 +4080,7 @@ impl Repository { } else { Some(entry.repo_path) } - } else if entry.status.staging().has_unstaged() { + } else if entry.status.staging().is_fully_unstaged() { None } else { Some(entry.repo_path) From e80b490ac00bfb5c206d3eb3f5c2dec10e742678 Mon Sep 17 00:00:00 2001 From: Marshall Bowers Date: Thu, 13 Nov 2025 18:13:27 -0500 Subject: [PATCH 0108/1030] client: Clear plan and usage information when signing out (#42678) This PR makes it so we clear the user's plan and usage information when they sign out. Release Notes: - Signing out will now clear the local cache containing the plan and usage information. --- crates/client/src/user.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/crates/client/src/user.rs b/crates/client/src/user.rs index 525a3e960ce8bc2aede4b0665af23ab3c33cac15..37f0f3ec278d28279e8d75f5c0b64c75f69901bb 100644 --- a/crates/client/src/user.rs +++ b/crates/client/src/user.rs @@ -267,6 +267,7 @@ impl UserStore { Status::SignedOut => { current_user_tx.send(None).await.ok(); this.update(cx, |this, cx| { + this.clear_plan_and_usage(); cx.emit(Event::PrivateUserInfoUpdated); cx.notify(); this.clear_contacts() @@ -779,6 +780,12 @@ impl UserStore { cx.notify(); } + pub fn clear_plan_and_usage(&mut self) { + self.plan_info = None; + self.model_request_usage = None; + self.edit_prediction_usage = None; + } + fn update_authenticated_user( &mut self, response: GetAuthenticatedUserResponse, From 15ab96af6bc10e417b42668e48d41c86d24b6888 Mon Sep 17 00:00:00 2001 From: Julia Ryan Date: Thu, 13 Nov 2025 15:33:33 -0800 Subject: [PATCH 0109/1030] Add windows nightly update banner (#42576) Hopefully this will nudge some of the beta users who were on nightly to get on the official stable builds now that they're out. Release Notes: - N/A --- crates/zed/Cargo.toml | 1 + crates/zed/src/zed.rs | 50 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 1ccfe8d1f5f60f825072f5034f629296f1229269..547855ec8f54ec8970a7470088bcc824a0a98148 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -171,6 +171,7 @@ chrono.workspace = true [target.'cfg(target_os = "windows")'.dependencies] windows.workspace = true +chrono.workspace = true [target.'cfg(target_os = "windows")'.build-dependencies] winresource = "0.1" diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index cbd6fba694136c87c64c4d6ca2bfee6d1e1072dd..49f1206caeaaba2d54f5084de025ba7bb70f310d 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -393,6 +393,9 @@ pub fn initialize_workspace( } } + #[cfg(target_os = "windows")] + unstable_version_notification(cx); + let edit_prediction_menu_handle = PopoverMenuHandle::default(); let edit_prediction_button = cx.new(|cx| { edit_prediction_button::EditPredictionButton::new( @@ -471,6 +474,53 @@ pub fn initialize_workspace( .detach(); } +#[cfg(target_os = "windows")] +fn unstable_version_notification(cx: &mut App) { + if !matches!( + ReleaseChannel::try_global(cx), + Some(ReleaseChannel::Nightly) + ) { + return; + } + let db_key = "zed_windows_nightly_notif_shown_at".to_owned(); + let time = chrono::Utc::now(); + if let Some(last_shown) = db::kvp::KEY_VALUE_STORE + .read_kvp(&db_key) + .log_err() + .flatten() + .and_then(|timestamp| chrono::DateTime::parse_from_rfc3339(×tamp).ok()) + { + if time.fixed_offset() - last_shown < chrono::Duration::days(7) { + return; + } + } + cx.spawn(async move |_| { + db::kvp::KEY_VALUE_STORE + .write_kvp(db_key, time.to_rfc3339()) + .await + }) + .detach_and_log_err(cx); + struct WindowsNightly; + show_app_notification(NotificationId::unique::(), cx, |cx| { + cx.new(|cx| { + MessageNotification::new("You're using an unstable version of Zed (Nightly)", cx) + .primary_message("Download Stable") + .primary_icon_color(Color::Accent) + .primary_icon(IconName::Download) + .primary_on_click(|window, cx| { + window.dispatch_action( + zed_actions::OpenBrowser { + url: "https://zed.dev/download".to_string(), + } + .boxed_clone(), + cx, + ); + cx.emit(DismissEvent); + }) + }) + }); +} + #[cfg(any(target_os = "linux", target_os = "freebsd"))] fn initialize_file_watcher(window: &mut Window, cx: &mut Context) { if let Err(e) = fs::fs_watcher::global(|_| {}) { From 6f99eeffa857ba0eafb63efcb0da8e1fb478c719 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Thu, 13 Nov 2025 16:39:35 -0700 Subject: [PATCH 0110/1030] Don't try and delete ./target/. (#42680) Release Notes: - N/A --- script/clear-target-dir-if-larger-than | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/script/clear-target-dir-if-larger-than b/script/clear-target-dir-if-larger-than index e2e3062a1862da673c8f684f4231f53d367e5546..46256159a804aff14422dc7d205e65016bf5a83a 100755 --- a/script/clear-target-dir-if-larger-than +++ b/script/clear-target-dir-if-larger-than @@ -21,5 +21,6 @@ echo "target directory size: ${current_size_gb}gb. max size: ${max_size_gb}gb" if [[ ${current_size_gb} -gt ${max_size_gb} ]]; then echo "clearing target directory" - rm -rf target/* target/.* + shopt -s dotglob + rm -rf target/* fi From 3da4d3aac3d884435bdf9f8f9a946a07175400ec Mon Sep 17 00:00:00 2001 From: Anthony Eid <56899983+Anthony-Eid@users.noreply.github.com> Date: Thu, 13 Nov 2025 20:06:09 -0500 Subject: [PATCH 0111/1030] settings_ui: Make open project settings action open settings UI (#42669) This PR makes the `OpenProjectSettings` action open the settings UI in project settings mode for the first visible worktree, instead of opening the file. It also adds a `OpenProjectSettingsFile` action that maintains the old behavior. Finally, this PR partially fixes a bug where the settings UI won't load project settings when the settings window is loaded before opening a project/workspace. This happens because the global `app_state` isn't correct in the `Subscription` that refreshes the available setting files to open. The bug is still present in some cases, but it's out of scope for this PR. Release Notes: - settings ui: Project Settings action now opens settings UI instead of a file --- crates/settings_ui/src/settings_ui.rs | 119 ++++++++++++++++++++------ crates/zed/src/zed.rs | 8 +- crates/zed/src/zed/app_menus.rs | 6 +- crates/zed_actions/src/lib.rs | 3 + 4 files changed, 107 insertions(+), 29 deletions(-) diff --git a/crates/settings_ui/src/settings_ui.rs b/crates/settings_ui/src/settings_ui.rs index 6a561086f2d614eb8fc06c5be146b5e02dc05b3d..3911a4e0cd3023524df9e023cfdc670fc7c24a8a 100644 --- a/crates/settings_ui/src/settings_ui.rs +++ b/crates/settings_ui/src/settings_ui.rs @@ -15,7 +15,7 @@ use project::{Project, WorktreeId}; use release_channel::ReleaseChannel; use schemars::JsonSchema; use serde::Deserialize; -use settings::{Settings, SettingsContent, SettingsStore}; +use settings::{Settings, SettingsContent, SettingsStore, initial_project_settings_content}; use std::{ any::{Any, TypeId, type_name}, cell::RefCell, @@ -35,7 +35,7 @@ use ui::{ use ui_input::{NumberField, NumberFieldType}; use util::{ResultExt as _, paths::PathStyle, rel_path::RelPath}; use workspace::{AppState, OpenOptions, OpenVisible, Workspace, client_side_decorations}; -use zed_actions::{OpenSettings, OpenSettingsAt}; +use zed_actions::{OpenProjectSettings, OpenSettings, OpenSettingsAt}; use crate::components::{ EnumVariantDropdown, SettingsInputField, font_picker, icon_theme_picker, theme_picker, @@ -379,26 +379,30 @@ pub fn init(cx: &mut App) { init_renderers(cx); cx.observe_new(|workspace: &mut workspace::Workspace, _, _| { - workspace.register_action( - |workspace, OpenSettingsAt { path }: &OpenSettingsAt, window, cx| { + workspace + .register_action( + |workspace, OpenSettingsAt { path }: &OpenSettingsAt, window, cx| { + let window_handle = window + .window_handle() + .downcast::() + .expect("Workspaces are root Windows"); + open_settings_editor(workspace, Some(&path), false, window_handle, cx); + }, + ) + .register_action(|workspace, _: &OpenSettings, window, cx| { let window_handle = window .window_handle() .downcast::() .expect("Workspaces are root Windows"); - open_settings_editor(workspace, Some(&path), window_handle, cx); - }, - ); - }) - .detach(); - - cx.observe_new(|workspace: &mut workspace::Workspace, _, _| { - workspace.register_action(|workspace, _: &OpenSettings, window, cx| { - let window_handle = window - .window_handle() - .downcast::() - .expect("Workspaces are root Windows"); - open_settings_editor(workspace, None, window_handle, cx); - }); + open_settings_editor(workspace, None, false, window_handle, cx); + }) + .register_action(|workspace, _: &OpenProjectSettings, window, cx| { + let window_handle = window + .window_handle() + .downcast::() + .expect("Workspaces are root Windows"); + open_settings_editor(workspace, None, true, window_handle, cx); + }); }) .detach(); } @@ -506,6 +510,7 @@ fn init_renderers(cx: &mut App) { pub fn open_settings_editor( _workspace: &mut Workspace, path: Option<&str>, + open_project_settings: bool, workspace_handle: WindowHandle, cx: &mut App, ) { @@ -514,6 +519,8 @@ pub fn open_settings_editor( /// Assumes a settings GUI window is already open fn open_path( path: &str, + // Note: This option is unsupported right now + _open_project_settings: bool, settings_window: &mut SettingsWindow, window: &mut Window, cx: &mut Context, @@ -540,7 +547,17 @@ pub fn open_settings_editor( settings_window.original_window = Some(workspace_handle); window.activate_window(); if let Some(path) = path { - open_path(path, settings_window, window, cx); + open_path(path, open_project_settings, settings_window, window, cx); + } else if open_project_settings { + if let Some(file_index) = settings_window + .files + .iter() + .position(|(file, _)| file.worktree_id().is_some()) + { + settings_window.change_file(file_index, window, cx); + } + + cx.notify(); } }) .ok(); @@ -588,7 +605,17 @@ pub fn open_settings_editor( cx.new(|cx| SettingsWindow::new(Some(workspace_handle), window, cx)); settings_window.update(cx, |settings_window, cx| { if let Some(path) = path { - open_path(&path, settings_window, window, cx); + open_path(&path, open_project_settings, settings_window, window, cx); + } else if open_project_settings { + if let Some(file_index) = settings_window + .files + .iter() + .position(|(file, _)| file.worktree_id().is_some()) + { + settings_window.change_file(file_index, window, cx); + } + + settings_window.fetch_files(window, cx); } }); @@ -1159,7 +1186,7 @@ fn all_language_names(cx: &App) -> Vec { } #[allow(unused)] -#[derive(Clone, PartialEq)] +#[derive(Clone, PartialEq, Debug)] enum SettingsUiFile { User, // Uses all settings. Project((WorktreeId, Arc)), // Has a special name, and special set of settings @@ -1283,15 +1310,34 @@ impl SettingsWindow { }) .collect::>() { + cx.observe_release_in(&project, window, |this, _, window, cx| { + this.fetch_files(window, cx) + }) + .detach(); cx.subscribe_in(&project, window, Self::handle_project_event) .detach(); } + + for workspace in app_state + .workspace_store + .read(cx) + .workspaces() + .iter() + .filter_map(|space| space.entity(cx).ok()) + { + cx.observe_release_in(&workspace, window, |this, _, window, cx| { + this.fetch_files(window, cx) + }) + .detach(); + } } else { log::error!("App state doesn't exist when creating a new settings window"); } let this_weak = cx.weak_entity(); cx.observe_new::({ + let this_weak = this_weak.clone(); + move |_, window, cx| { let project = cx.entity(); let Some(window) = window else { @@ -1299,7 +1345,13 @@ impl SettingsWindow { }; this_weak - .update(cx, |_, cx| { + .update(cx, |this, cx| { + this.fetch_files(window, cx); + cx.observe_release_in(&project, window, |_, _, window, cx| { + cx.defer_in(window, |this, window, cx| this.fetch_files(window, cx)); + }) + .detach(); + cx.subscribe_in(&project, window, Self::handle_project_event) .detach(); }) @@ -1308,6 +1360,24 @@ impl SettingsWindow { }) .detach(); + cx.observe_new::(move |_, window, cx| { + let workspace = cx.entity(); + let Some(window) = window else { + return; + }; + + this_weak + .update(cx, |this, cx| { + this.fetch_files(window, cx); + cx.observe_release_in(&workspace, window, |this, _, window, cx| { + this.fetch_files(window, cx) + }) + .detach(); + }) + .ok(); + }) + .detach(); + let title_bar = if !cfg!(target_os = "macos") { Some(cx.new(|cx| PlatformTitleBar::new("settings-title-bar", cx))) } else { @@ -1818,6 +1888,7 @@ impl SettingsWindow { cx.notify(); } + #[track_caller] fn fetch_files(&mut self, window: &mut Window, cx: &mut Context) { self.worktree_root_dirs.clear(); let prev_files = self.files.clone(); @@ -1870,7 +1941,7 @@ impl SettingsWindow { let mut missing_worktrees = Vec::new(); for worktree in all_projects(cx) - .flat_map(|project| project.read(cx).worktrees(cx)) + .flat_map(|project| project.read(cx).visible_worktrees(cx)) .filter(|tree| !self.worktree_root_dirs.contains_key(&tree.read(cx).id())) { let worktree = worktree.read(cx); @@ -3028,7 +3099,7 @@ impl SettingsWindow { tree.create_entry( settings_path.clone(), false, - Some("{\n\n}".as_bytes().to_vec()), + Some(initial_project_settings_content().as_bytes().to_vec()), cx, ) })) diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 49f1206caeaaba2d54f5084de025ba7bb70f310d..617534ee0f8a68f91e8e55d58d0d9550d265c6b2 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -107,8 +107,8 @@ actions!( Minimize, /// Opens the default settings file. OpenDefaultSettings, - /// Opens project-specific settings. - OpenProjectSettings, + /// Opens project-specific settings file. + OpenProjectSettingsFile, /// Opens the project tasks configuration. OpenProjectTasks, /// Opens the tasks panel. @@ -1783,7 +1783,7 @@ pub fn open_new_ssh_project_from_project( fn open_project_settings_file( workspace: &mut Workspace, - _: &OpenProjectSettings, + _: &OpenProjectSettingsFile, window: &mut Window, cx: &mut Context, ) { @@ -5067,7 +5067,7 @@ mod tests { .update(cx, |workspace, window, cx| { // Call the exact function that contains the bug eprintln!("About to call open_project_settings_file"); - open_project_settings_file(workspace, &OpenProjectSettings, window, cx); + open_project_settings_file(workspace, &OpenProjectSettingsFile, window, cx); }) .unwrap(); diff --git a/crates/zed/src/zed/app_menus.rs b/crates/zed/src/zed/app_menus.rs index b86889f60acb5f738c93012335ef27b091edc0e2..20a4f8be3b25991b4b22e4fcabd6008c7e502b65 100644 --- a/crates/zed/src/zed/app_menus.rs +++ b/crates/zed/src/zed/app_menus.rs @@ -69,7 +69,11 @@ pub fn app_menus(cx: &mut App) -> Vec { items: vec![ MenuItem::action("Open Settings", zed_actions::OpenSettings), MenuItem::action("Open Settings File", super::OpenSettingsFile), - MenuItem::action("Open Project Settings", super::OpenProjectSettings), + MenuItem::action("Open Project Settings", zed_actions::OpenProjectSettings), + MenuItem::action( + "Open Project Settings File", + super::OpenProjectSettingsFile, + ), MenuItem::action("Open Default Settings", super::OpenDefaultSettings), MenuItem::separator(), MenuItem::action("Open Keymap", zed_actions::OpenKeymap), diff --git a/crates/zed_actions/src/lib.rs b/crates/zed_actions/src/lib.rs index f00b2a7bfd3371359659f310a37ee36ef75b04f5..b9fde1f34402c56becd811a76a1d33da93413c50 100644 --- a/crates/zed_actions/src/lib.rs +++ b/crates/zed_actions/src/lib.rs @@ -43,6 +43,9 @@ actions!( /// Opens the settings JSON file. #[action(deprecated_aliases = ["zed_actions::OpenSettings"])] OpenSettingsFile, + /// Opens project-specific settings. + #[action(deprecated_aliases = ["zed_actions::OpenProjectSettings"])] + OpenProjectSettings, /// Opens the default keymap file. OpenDefaultKeymap, /// Opens the user keymap file. From e2c95a8d842cf1715e3cc08419d4724abf6dc852 Mon Sep 17 00:00:00 2001 From: Mayank Verma Date: Fri, 14 Nov 2025 07:46:38 +0530 Subject: [PATCH 0112/1030] git: Continue parsing other branches when refs have missing fields (#42523) Closes #34684 Release Notes: - (Let's Git Together) Fixed Git panel not showing any branches when repository contains refs with missing fields --- crates/git/src/repository.rs | 81 +++++++++++++++++++++++++++++------- 1 file changed, 67 insertions(+), 14 deletions(-) diff --git a/crates/git/src/repository.rs b/crates/git/src/repository.rs index 2a1cd9478d3079716eda8234c02c8122b9381b38..2c9189962492daa75dba86e9e2ebd247ad85254e 100644 --- a/crates/git/src/repository.rs +++ b/crates/git/src/repository.rs @@ -2387,22 +2387,37 @@ fn parse_branch_input(input: &str) -> Result> { continue; } let mut fields = line.split('\x00'); - let is_current_branch = fields.next().context("no HEAD")? == "*"; - let head_sha: SharedString = fields.next().context("no objectname")?.to_string().into(); - let parent_sha: SharedString = fields.next().context("no parent")?.to_string().into(); - let ref_name = fields.next().context("no refname")?.to_string().into(); - let upstream_name = fields.next().context("no upstream")?.to_string(); - let upstream_tracking = parse_upstream_track(fields.next().context("no upstream:track")?)?; - let commiterdate = fields.next().context("no committerdate")?.parse::()?; - let author_name = fields.next().context("no authorname")?.to_string().into(); - let subject: SharedString = fields - .next() - .context("no contents:subject")? - .to_string() - .into(); + let Some(head) = fields.next() else { + continue; + }; + let Some(head_sha) = fields.next().map(|f| f.to_string().into()) else { + continue; + }; + let Some(parent_sha) = fields.next().map(|f| f.to_string()) else { + continue; + }; + let Some(ref_name) = fields.next().map(|f| f.to_string().into()) else { + continue; + }; + let Some(upstream_name) = fields.next().map(|f| f.to_string()) else { + continue; + }; + let Some(upstream_tracking) = fields.next().and_then(|f| parse_upstream_track(f).ok()) + else { + continue; + }; + let Some(commiterdate) = fields.next().and_then(|f| f.parse::().ok()) else { + continue; + }; + let Some(author_name) = fields.next().map(|f| f.to_string().into()) else { + continue; + }; + let Some(subject) = fields.next().map(|f| f.to_string().into()) else { + continue; + }; branches.push(Branch { - is_head: is_current_branch, + is_head: head == "*", ref_name, most_recent_commit: Some(CommitSummary { sha: head_sha, @@ -2744,6 +2759,44 @@ mod tests { ) } + #[test] + fn test_branches_parsing_containing_refs_with_missing_fields() { + #[allow(clippy::octal_escapes)] + let input = " \090012116c03db04344ab10d50348553aa94f1ea0\0refs/heads/broken\n \0eb0cae33272689bd11030822939dd2701c52f81e\0895951d681e5561478c0acdd6905e8aacdfd2249\0refs/heads/dev\0\0\01762948725\0Zed\0Add feature\n*\0895951d681e5561478c0acdd6905e8aacdfd2249\0\0refs/heads/main\0\0\01762948695\0Zed\0Initial commit\n"; + + let branches = parse_branch_input(input).unwrap(); + assert_eq!(branches.len(), 2); + assert_eq!( + branches, + vec![ + Branch { + is_head: false, + ref_name: "refs/heads/dev".into(), + upstream: None, + most_recent_commit: Some(CommitSummary { + sha: "eb0cae33272689bd11030822939dd2701c52f81e".into(), + subject: "Add feature".into(), + commit_timestamp: 1762948725, + author_name: SharedString::new("Zed"), + has_parent: true, + }) + }, + Branch { + is_head: true, + ref_name: "refs/heads/main".into(), + upstream: None, + most_recent_commit: Some(CommitSummary { + sha: "895951d681e5561478c0acdd6905e8aacdfd2249".into(), + subject: "Initial commit".into(), + commit_timestamp: 1762948695, + author_name: SharedString::new("Zed"), + has_parent: false, + }) + } + ] + ) + } + impl RealGitRepository { /// Force a Git garbage collection on the repository. fn gc(&self) -> BoxFuture<'_, Result<()>> { From 28a0b826180eb7b772469b1c8e147971a0982e43 Mon Sep 17 00:00:00 2001 From: Xipeng Jin <56369076+xipeng-jin@users.noreply.github.com> Date: Thu, 13 Nov 2025 21:59:39 -0500 Subject: [PATCH 0113/1030] git_panel: Fix FocusChanges does nothing with no entries (#42553) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #31155 Release Notes: - Ensure `git_panel::FocusChanges` bypasses the panel’s `Focusable` logic and directly focuses the `ChangesList` handle so the command works even when the repository has no entries. - Keep the `Focusable` behavior from the commit 45b126a (which routes empty panels to the commit editor) by handling this special-case action rather than regressing the default focus experience. --- crates/git_ui/src/git_panel.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index 81732732aff257b513faaf5f87ce85596a842dd8..dbed85ba787975e3d55076440ed667e8c9d276fb 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -745,7 +745,7 @@ impl GitPanel { ) { self.select_first_entry_if_none(cx); - cx.focus_self(window); + self.focus_handle.focus(window); cx.notify(); } From 3de3a369f502ec5f802ea64a1c3f190269870c2d Mon Sep 17 00:00:00 2001 From: Josh Piasecki <138541977+FloppyDisco@users.noreply.github.com> Date: Thu, 13 Nov 2025 21:33:53 -0600 Subject: [PATCH 0114/1030] editor: Add `diffs_expanded` to key context when diff hunks are expanded (#40617) including a new identifier on the Editor key context will allow for some more flexibility when creating keybindings. for example i would like to be able to set the following: ```json { "context": "Editor", "bindings": { "pageup": ["editor::MovePageUp", { "center_cursor": true }], "pagedown": ["editor::MovePageDown", { "center_cursor": true }], } }, { "context": "Editor && diffs_expanded", "bindings": { "pageup": "editor::GoToPrevHunk", "pagedown": "editor::GoToHunk", } }, ``` Screenshot 2025-10-18 at 23 51 46 very open to suggestions for the name. that's the best i could come up with. the action *IS* called `editor::ExpandAllDiffHunks` so this seems fitting. the identifier is included if *any* diff hunk is visible, even if some of them have been closed using `editor::ToggleSelectedDiffHunk` Release Notes: - The Editor key context now includes 'diffs_expanded' when diff changes are visible --- crates/editor/src/editor.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 0d3325ff8212b6ae9dcc5a9c34dd13e4c5324178..8cb3d1abf7d026e7201c60834f355d0f5e56671d 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -2631,6 +2631,10 @@ impl Editor { key_context.add("end_of_input"); } + if self.has_any_expanded_diff_hunks(cx) { + key_context.add("diffs_expanded"); + } + key_context } @@ -19336,6 +19340,16 @@ impl Editor { }) } + fn has_any_expanded_diff_hunks(&self, cx: &App) -> bool { + if self.buffer.read(cx).all_diff_hunks_expanded() { + return true; + } + let ranges = vec![Anchor::min()..Anchor::max()]; + self.buffer + .read(cx) + .has_expanded_diff_hunks_in_ranges(&ranges, cx) + } + fn toggle_diff_hunks_in_ranges( &mut self, ranges: Vec>, From ead4f26b52290f06f006b896f693010d16775c3e Mon Sep 17 00:00:00 2001 From: morgankrey Date: Fri, 14 Nov 2025 00:22:20 -0600 Subject: [PATCH 0115/1030] Update docs for Gemini ZDR (#42697) Closes #ISSUE Release Notes: - N/A --- docs/src/ai/ai-improvement.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docs/src/ai/ai-improvement.md b/docs/src/ai/ai-improvement.md index 6d7fe8fdb172afa17f494723bb16b1cc69c9336c..857ca2c0efa14e9a7d465f7998310808e0e5237b 100644 --- a/docs/src/ai/ai-improvement.md +++ b/docs/src/ai/ai-improvement.md @@ -20,13 +20,9 @@ When using upstream services through Zed's hosted models, we require assurances | Provider | No Training Guarantee | Zero-Data Retention (ZDR) | | --------- | ------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | | Anthropic | [Yes](https://www.anthropic.com/legal/commercial-terms) | [Yes](https://privacy.anthropic.com/en/articles/8956058-i-have-a-zero-data-retention-agreement-with-anthropic-what-products-does-it-apply-to) | -| Google | [Yes](https://cloud.google.com/terms/service-terms) | **No**, in flight | +| Google | [Yes](https://cloud.google.com/terms/service-terms) | [Yes](https://cloud.google.com/terms/service-terms), see Service Terms sections 17 and 19h | | OpenAI | [Yes](https://openai.com/enterprise-privacy/) | [Yes](https://platform.openai.com/docs/guides/your-data) | -> Zed's use of Gemini models is currently supported via [Google AI Studio](https://ai.google.dev/aistudio), which **_does not_** support ZDR. We're migrating to [Vertex AI](https://cloud.google.com/vertex-ai?hl=en), which **_does_**, and upon completion of that migration will offer ZDR to all users of Zed's hosted Google/Gemini models. - -> If ZDR from upstream model providers is important to you, _please do not use Gemini models at this time_. Your data will never be used for training purposes by any model providers hosted by Zed, however. - When you use your own API keys or external agents, **Zed does not have control over how your data is used by that service provider.** You should reference your agreement with each service provider to understand what terms and conditions apply. From 020f518231707b572d0568eb07e0094679d3f918 Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Fri, 14 Nov 2025 13:31:44 +0530 Subject: [PATCH 0116/1030] project_panel: Add tests for cross worktree drag-and-drop (#42704) Add missing tests for cross worktree drag-and-drop: - file -> directory - file -> file (drops into parent directory) - whole directory move - multi selection move Release Notes: - N/A --- .../project_panel/src/project_panel_tests.rs | 126 ++++++++++++++++++ 1 file changed, 126 insertions(+) diff --git a/crates/project_panel/src/project_panel_tests.rs b/crates/project_panel/src/project_panel_tests.rs index 675ed9c35208917aa80002d9daa7932f92a29495..baf4d2f8a6f529464733a171fd3d726d846d2faa 100644 --- a/crates/project_panel/src/project_panel_tests.rs +++ b/crates/project_panel/src/project_panel_tests.rs @@ -4173,6 +4173,106 @@ async fn test_dragged_selection_resolve_entry(cx: &mut gpui::TestAppContext) { ); } +#[gpui::test] +async fn test_drag_entries_between_different_worktrees(cx: &mut gpui::TestAppContext) { + init_test(cx); + + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root_a", + json!({ + "src": { + "lib.rs": "", + "main.rs": "" + }, + "docs": { + "guide.md": "" + }, + "multi": { + "alpha.txt": "", + "beta.txt": "" + } + }), + ) + .await; + fs.insert_tree( + "/root_b", + json!({ + "dst": { + "existing.md": "" + }, + "target.txt": "" + }), + ) + .await; + + let project = Project::test(fs.clone(), ["/root_a".as_ref(), "/root_b".as_ref()], cx).await; + let workspace = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); + let cx = &mut VisualTestContext::from_window(*workspace, cx); + let panel = workspace.update(cx, ProjectPanel::new).unwrap(); + cx.run_until_parked(); + + // Case 1: move a file onto a directory in another worktree. + select_path(&panel, "root_a/src/main.rs", cx); + drag_selection_to(&panel, "root_b/dst", false, cx); + assert!( + find_project_entry(&panel, "root_b/dst/main.rs", cx).is_some(), + "Dragged file should appear under destination worktree" + ); + assert_eq!( + find_project_entry(&panel, "root_a/src/main.rs", cx), + None, + "Dragged file should be removed from the source worktree" + ); + + // Case 2: drop a file onto another worktree file so it lands in the parent directory. + select_path(&panel, "root_a/docs/guide.md", cx); + drag_selection_to(&panel, "root_b/dst/existing.md", true, cx); + assert!( + find_project_entry(&panel, "root_b/dst/guide.md", cx).is_some(), + "Dropping onto a file should place the entry beside the target file" + ); + assert_eq!( + find_project_entry(&panel, "root_a/docs/guide.md", cx), + None, + "Source file should be removed after the move" + ); + + // Case 3: move an entire directory. + select_path(&panel, "root_a/src", cx); + drag_selection_to(&panel, "root_b/dst", false, cx); + assert!( + find_project_entry(&panel, "root_b/dst/src/lib.rs", cx).is_some(), + "Dragging a directory should move its nested contents" + ); + assert_eq!( + find_project_entry(&panel, "root_a/src", cx), + None, + "Directory should no longer exist in the source worktree" + ); + + // Case 4: multi-selection drag between worktrees. + panel.update(cx, |panel, _| panel.marked_entries.clear()); + select_path_with_mark(&panel, "root_a/multi/alpha.txt", cx); + select_path_with_mark(&panel, "root_a/multi/beta.txt", cx); + drag_selection_to(&panel, "root_b/dst", false, cx); + assert!( + find_project_entry(&panel, "root_b/dst/alpha.txt", cx).is_some() + && find_project_entry(&panel, "root_b/dst/beta.txt", cx).is_some(), + "All marked entries should move to the destination worktree" + ); + assert_eq!( + find_project_entry(&panel, "root_a/multi/alpha.txt", cx), + None, + "Marked entries should be removed from the origin worktree" + ); + assert_eq!( + find_project_entry(&panel, "root_a/multi/beta.txt", cx), + None, + "Marked entries should be removed from the origin worktree" + ); +} + #[gpui::test] async fn test_autoreveal_and_gitignored_files(cx: &mut gpui::TestAppContext) { init_test_with_editor(cx); @@ -7496,6 +7596,32 @@ fn select_path_with_mark(panel: &Entity, path: &str, cx: &mut Visu }); } +fn drag_selection_to( + panel: &Entity, + target_path: &str, + is_file: bool, + cx: &mut VisualTestContext, +) { + let target_entry = find_project_entry(panel, target_path, cx) + .unwrap_or_else(|| panic!("no entry for target path {target_path:?}")); + + panel.update_in(cx, |panel, window, cx| { + let selection = panel + .state + .selection + .expect("a selection is required before dragging"); + let drag = DraggedSelection { + active_selection: SelectedEntry { + worktree_id: selection.worktree_id, + entry_id: panel.resolve_entry(selection.entry_id), + }, + marked_selections: Arc::from(panel.marked_entries.clone()), + }; + panel.drag_onto(&drag, target_entry, is_file, window, cx); + }); + cx.executor().run_until_parked(); +} + fn find_project_entry( panel: &Entity, path: &str, From b4167caaf1f26a83995126f16bb49f86d2499bfd Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 14 Nov 2025 10:42:32 +0100 Subject: [PATCH 0117/1030] git_panel: Fix StageAll/UnstageAll not working when panel not in focus (#42708) Release Notes: - Fixed "Stage All"/"Unstage All" buttons from not working when git panel is not in focus --- crates/git_ui/src/git_panel.rs | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index dbed85ba787975e3d55076440ed667e8c9d276fb..98220396eec7917a79297ec3fae3eaf192500aa1 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -3224,18 +3224,12 @@ impl GitPanel { ) -> Option { self.active_repository.as_ref()?; - let text; - let action; - let tooltip; - if self.total_staged_count() == self.entry_count && self.entry_count > 0 { - text = "Unstage All"; - action = git::UnstageAll.boxed_clone(); - tooltip = "git reset"; - } else { - text = "Stage All"; - action = git::StageAll.boxed_clone(); - tooltip = "git add --all ." - } + let (text, action, stage, tooltip) = + if self.total_staged_count() == self.entry_count && self.entry_count > 0 { + ("Unstage All", UnstageAll.boxed_clone(), false, "git reset") + } else { + ("Stage All", StageAll.boxed_clone(), true, "git add --all") + }; let change_string = match self.entry_count { 0 => "No Changes".to_string(), @@ -3273,11 +3267,15 @@ impl GitPanel { &self.focus_handle, )) .disabled(self.entry_count == 0) - .on_click(move |_, _, cx| { - let action = action.boxed_clone(); - cx.defer(move |cx| { - cx.dispatch_action(action.as_ref()); - }) + .on_click({ + let git_panel = cx.weak_entity(); + move |_, _, cx| { + git_panel + .update(cx, |git_panel, cx| { + git_panel.change_all_files_stage(stage, cx); + }) + .ok(); + } }), ), ), From 37523b0007f3dbcd25aad6ddcec995e00020d036 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 14 Nov 2025 13:55:04 +0100 Subject: [PATCH 0118/1030] git_panel: Fix buffer header checkbox not showing partially staged files (#42718) Release Notes: - Fixed buffer header controls (staging checkbox) not showing partially staged files --- crates/git_ui/src/git_panel.rs | 41 +++++++++++---------- crates/project/src/git_store.rs | 11 ++---- crates/project/src/git_store/branch_diff.rs | 6 +-- crates/project/src/project_tests.rs | 6 +-- crates/project_panel/src/project_panel.rs | 6 +-- 5 files changed, 30 insertions(+), 40 deletions(-) diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index 98220396eec7917a79297ec3fae3eaf192500aa1..6a80f22773f154f32907d2bbadfa91c2eec53108 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -409,17 +409,10 @@ impl GitPanel { } GitStoreEvent::RepositoryUpdated( _, - RepositoryEvent::StatusesChanged { full_scan: true } + RepositoryEvent::StatusesChanged | RepositoryEvent::BranchChanged | RepositoryEvent::MergeHeadsChanged, true, - ) => { - this.schedule_update(window, cx); - } - GitStoreEvent::RepositoryUpdated( - _, - RepositoryEvent::StatusesChanged { full_scan: false }, - true, ) | GitStoreEvent::RepositoryAdded | GitStoreEvent::RepositoryRemoved(_) => { @@ -1224,14 +1217,18 @@ impl GitPanel { let Some(active_repository) = self.active_repository.as_ref() else { return; }; + let repo = active_repository.read(cx); let (stage, repo_paths) = match entry { GitListEntry::Status(status_entry) => { let repo_paths = vec![status_entry.clone()]; - let stage = if active_repository - .read(cx) + let stage = if repo .pending_ops_for_path(&status_entry.repo_path) .map(|ops| ops.staging() || ops.staged()) - .unwrap_or(status_entry.status.staging().has_staged()) + .or_else(|| { + repo.status_for_path(&status_entry.repo_path) + .map(|status| status.status.staging().has_staged()) + }) + .unwrap_or(status_entry.staging.has_staged()) { if let Some(op) = self.bulk_staging.clone() && op.anchor == status_entry.repo_path @@ -1247,13 +1244,12 @@ impl GitPanel { } GitListEntry::Header(section) => { let goal_staged_state = !self.header_state(section.header).selected(); - let repository = active_repository.read(cx); let entries = self .entries .iter() .filter_map(|entry| entry.status_entry()) .filter(|status_entry| { - section.contains(status_entry, repository) + section.contains(status_entry, repo) && status_entry.staging.as_bool() != Some(goal_staged_state) }) .cloned() @@ -3659,13 +3655,18 @@ impl GitPanel { let ix = self.entry_by_path(&repo_path, cx)?; let entry = self.entries.get(ix)?; - let is_staging_or_staged = if let Some(status_entry) = entry.status_entry() { - repo.pending_ops_for_path(&repo_path) - .map(|ops| ops.staging() || ops.staged()) - .unwrap_or(status_entry.staging.has_staged()) - } else { - false - }; + let is_staging_or_staged = repo + .pending_ops_for_path(&repo_path) + .map(|ops| ops.staging() || ops.staged()) + .or_else(|| { + repo.status_for_path(&repo_path) + .and_then(|status| status.status.staging().as_bool()) + }) + .or_else(|| { + entry + .status_entry() + .and_then(|entry| entry.staging.as_bool()) + }); let checkbox = Checkbox::new("stage-file", is_staging_or_staged.into()) .disabled(!self.has_write_access(cx)) diff --git a/crates/project/src/git_store.rs b/crates/project/src/git_store.rs index 497c93833cba612ef25237b30603eb4318c902e0..4cac71c6ae3e2eb3f3615821443db7c82e01d810 100644 --- a/crates/project/src/git_store.rs +++ b/crates/project/src/git_store.rs @@ -312,10 +312,7 @@ pub enum RepositoryState { #[derive(Clone, Debug, PartialEq, Eq)] pub enum RepositoryEvent { - StatusesChanged { - // TODO could report which statuses changed here - full_scan: bool, - }, + StatusesChanged, MergeHeadsChanged, BranchChanged, StashEntriesChanged, @@ -4989,7 +4986,7 @@ impl Repository { ) .collect::>(); if !edits.is_empty() { - cx.emit(RepositoryEvent::StatusesChanged { full_scan: true }); + cx.emit(RepositoryEvent::StatusesChanged); } self.snapshot.statuses_by_path.edit(edits, ()); if update.is_last_update { @@ -5343,7 +5340,7 @@ impl Repository { } if !changed_path_statuses.is_empty() { - cx.emit(RepositoryEvent::StatusesChanged { full_scan: false }); + cx.emit(RepositoryEvent::StatusesChanged); this.snapshot .statuses_by_path .edit(changed_path_statuses, ()); @@ -5725,7 +5722,7 @@ async fn compute_snapshot( } if statuses_by_path != prev_snapshot.statuses_by_path { - events.push(RepositoryEvent::StatusesChanged { full_scan: true }) + events.push(RepositoryEvent::StatusesChanged) } // Useful when branch is None in detached head state diff --git a/crates/project/src/git_store/branch_diff.rs b/crates/project/src/git_store/branch_diff.rs index 08dbd77a541f01a52dbb9b0d10c5af3a377170f9..5065eafe4e185e65ce144f6d797ac8ccd616d5fa 100644 --- a/crates/project/src/git_store/branch_diff.rs +++ b/crates/project/src/git_store/branch_diff.rs @@ -63,11 +63,7 @@ impl BranchDiff { window, move |this, _git_store, event, _window, cx| match event { GitStoreEvent::ActiveRepositoryChanged(_) - | GitStoreEvent::RepositoryUpdated( - _, - RepositoryEvent::StatusesChanged { full_scan: _ }, - true, - ) + | GitStoreEvent::RepositoryUpdated(_, RepositoryEvent::StatusesChanged, true) | GitStoreEvent::ConflictsUpdated => { cx.emit(BranchDiffEvent::FileListChanged); *this.update_needed.borrow_mut() = (); diff --git a/crates/project/src/project_tests.rs b/crates/project/src/project_tests.rs index eb953cb32515ff2fa4a20785ca69d2ad857ae4ae..f3c935f3390305c8c78074439084f20b4d1562b2 100644 --- a/crates/project/src/project_tests.rs +++ b/crates/project/src/project_tests.rs @@ -9575,7 +9575,7 @@ async fn test_ignored_dirs_events(cx: &mut gpui::TestAppContext) { assert_eq!( repository_updates.lock().drain(..).collect::>(), vec![ - RepositoryEvent::StatusesChanged { full_scan: true }, + RepositoryEvent::StatusesChanged, RepositoryEvent::MergeHeadsChanged, ], "Initial worktree scan should produce a repo update event" @@ -9743,8 +9743,8 @@ async fn test_odd_events_for_ignored_dirs( vec![ RepositoryEvent::MergeHeadsChanged, RepositoryEvent::BranchChanged, - RepositoryEvent::StatusesChanged { full_scan: false }, - RepositoryEvent::StatusesChanged { full_scan: false }, + RepositoryEvent::StatusesChanged, + RepositoryEvent::StatusesChanged, ], "Initial worktree scan should produce a repo update event" ); diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index 8830de5aeffcd26b0f5c342fc1c8d16cdb762b40..5cc2b5dea7ffff4e2f3368705b59d6484affe448 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -505,11 +505,7 @@ impl ProjectPanel { &git_store, window, |this, _, event, window, cx| match event { - GitStoreEvent::RepositoryUpdated( - _, - RepositoryEvent::StatusesChanged { full_scan: _ }, - _, - ) + GitStoreEvent::RepositoryUpdated(_, RepositoryEvent::StatusesChanged, _) | GitStoreEvent::RepositoryAdded | GitStoreEvent::RepositoryRemoved(_) => { this.update_visible_entries(None, false, false, window, cx); From 723f9b1371b0eaef3f349f038bbafe51e38a8eab Mon Sep 17 00:00:00 2001 From: Oleksiy Syvokon Date: Fri, 14 Nov 2025 15:08:54 +0200 Subject: [PATCH 0119/1030] zeta2: Add minimal prompt for fine-tuned models (#42691) 1. Add `--prompt-format=minimal` that matches single-sentence instructions used in fine-tuned models (specifically, in `1028-*` and `1029-*` models) 2. Use separate configs for agentic context search model and edit prediction model. This is useful when running a fine-tuned EP model, but we still want to run vanilla model for context retrieval. 3. `zeta2-exp` is a symlink to the same-named Baseten deployment. This model can be redeployed and updated without having to update the deployment id. 4. Print scores as a compact table Release Notes: - N/A --------- Co-authored-by: Piotr Osiewicz --- .../cloud_llm_client/src/predict_edits_v3.rs | 3 + .../src/cloud_zeta2_prompt.rs | 127 ++++++++++++++---- crates/zeta2/src/retrieval_search.rs | 13 +- crates/zeta2/src/udiff.rs | 4 +- crates/zeta2/src/zeta2.rs | 28 +++- crates/zeta_cli/src/evaluate.rs | 45 ++++++- crates/zeta_cli/src/main.rs | 2 + crates/zeta_cli/src/predict.rs | 2 +- 8 files changed, 186 insertions(+), 38 deletions(-) diff --git a/crates/cloud_llm_client/src/predict_edits_v3.rs b/crates/cloud_llm_client/src/predict_edits_v3.rs index 98ca0748934d663d204c64544af8a3e83fcd704d..e17a92387e68b5cf6e0993ec91f382f6c14cc765 100644 --- a/crates/cloud_llm_client/src/predict_edits_v3.rs +++ b/crates/cloud_llm_client/src/predict_edits_v3.rs @@ -76,6 +76,8 @@ pub enum PromptFormat { OldTextNewText, /// Prompt format intended for use via zeta_cli OnlySnippets, + /// One-sentence instructions used in fine-tuned models + Minimal, } impl PromptFormat { @@ -102,6 +104,7 @@ impl std::fmt::Display for PromptFormat { PromptFormat::OnlySnippets => write!(f, "Only Snippets"), PromptFormat::NumLinesUniDiff => write!(f, "Numbered Lines / Unified Diff"), PromptFormat::OldTextNewText => write!(f, "Old Text / New Text"), + PromptFormat::Minimal => write!(f, "Minimal"), } } } diff --git a/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs b/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs index 3f0bd476c50b9e6f92a9f457af15899fcb33b8ed..89c7536f88e1c0bdcce7b67fb2f2704052b5a677 100644 --- a/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs +++ b/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs @@ -86,6 +86,13 @@ const NUMBERED_LINES_INSTRUCTIONS: &str = indoc! {r#" "#}; +const STUDENT_MODEL_INSTRUCTIONS: &str = indoc! {r#" + You are a code completion assistant that analyzes edit history to identify and systematically complete incomplete refactorings or patterns across the entire codebase. + + # Edit History: + + "#}; + const UNIFIED_DIFF_REMINDER: &str = indoc! {" --- @@ -100,6 +107,14 @@ const UNIFIED_DIFF_REMINDER: &str = indoc! {" to uniquely identify it amongst all excerpts of code provided. "}; +const MINIMAL_PROMPT_REMINDER: &str = indoc! {" + --- + + Please analyze the edit history and the files, then provide the unified diff for your predicted edits. + Do not include the cursor marker in your output. + If you're editing multiple files, be sure to reflect filename in the hunk's header. + "}; + const XML_TAGS_INSTRUCTIONS: &str = indoc! {r#" # Instructions @@ -171,6 +186,7 @@ pub fn build_prompt( ], PromptFormat::LabeledSections | PromptFormat::NumLinesUniDiff + | PromptFormat::Minimal | PromptFormat::OldTextNewText => { vec![(request.cursor_point, CURSOR_MARKER)] } @@ -183,28 +199,47 @@ pub fn build_prompt( PromptFormat::NumLinesUniDiff => NUMBERED_LINES_INSTRUCTIONS.to_string(), PromptFormat::OldTextNewText => XML_TAGS_INSTRUCTIONS.to_string(), PromptFormat::OnlySnippets => String::new(), + PromptFormat::Minimal => STUDENT_MODEL_INSTRUCTIONS.to_string(), }; if request.events.is_empty() { prompt.push_str("(No edit history)\n\n"); } else { - prompt.push_str("Here are the latest edits made by the user, from earlier to later.\n\n"); + let edit_preamble = if request.prompt_format == PromptFormat::Minimal { + "The following are the latest edits made by the user, from earlier to later.\n\n" + } else { + "Here are the latest edits made by the user, from earlier to later.\n\n" + }; + prompt.push_str(edit_preamble); push_events(&mut prompt, &request.events); } - prompt.push_str(indoc! {" - # Code Excerpts - - The cursor marker <|user_cursor|> indicates the current user cursor position. - The file is in current state, edits from edit history have been applied. - "}); - - if request.prompt_format == PromptFormat::NumLinesUniDiff { - prompt.push_str(indoc! {" + let excerpts_preamble = match request.prompt_format { + PromptFormat::Minimal => indoc! {" + # Part of the file under the cursor: + + (The cursor marker <|user_cursor|> indicates the current user cursor position. + The file is in current state, edits from edit history has been applied. + We only show part of the file around the cursor. + You can only edit exactly this part of the file. + We prepend line numbers (e.g., `123|`); they are not part of the file.) + "}, + PromptFormat::NumLinesUniDiff => indoc! {" + # Code Excerpts + + The cursor marker <|user_cursor|> indicates the current user cursor position. + The file is in current state, edits from edit history have been applied. We prepend line numbers (e.g., `123|`); they are not part of the file. - "}); - } + "}, + _ => indoc! {" + # Code Excerpts + The cursor marker <|user_cursor|> indicates the current user cursor position. + The file is in current state, edits from edit history have been applied. + "}, + }; + + prompt.push_str(excerpts_preamble); prompt.push('\n'); let mut section_labels = Default::default(); @@ -217,19 +252,38 @@ pub fn build_prompt( anyhow::bail!("PromptFormat::LabeledSections cannot be used with ContextMode::Llm"); } + let include_line_numbers = matches!( + request.prompt_format, + PromptFormat::NumLinesUniDiff | PromptFormat::Minimal + ); for related_file in &request.included_files { - write_codeblock( - &related_file.path, - &related_file.excerpts, - if related_file.path == request.excerpt_path { - &insertions - } else { - &[] - }, - related_file.max_row, - request.prompt_format == PromptFormat::NumLinesUniDiff, - &mut prompt, - ); + if request.prompt_format == PromptFormat::Minimal { + write_codeblock_with_filename( + &related_file.path, + &related_file.excerpts, + if related_file.path == request.excerpt_path { + &insertions + } else { + &[] + }, + related_file.max_row, + include_line_numbers, + &mut prompt, + ); + } else { + write_codeblock( + &related_file.path, + &related_file.excerpts, + if related_file.path == request.excerpt_path { + &insertions + } else { + &[] + }, + related_file.max_row, + include_line_numbers, + &mut prompt, + ); + } } } @@ -240,6 +294,9 @@ pub fn build_prompt( PromptFormat::OldTextNewText => { prompt.push_str(OLD_TEXT_NEW_TEXT_REMINDER); } + PromptFormat::Minimal => { + prompt.push_str(MINIMAL_PROMPT_REMINDER); + } _ => {} } @@ -255,6 +312,27 @@ pub fn write_codeblock<'a>( output: &'a mut String, ) { writeln!(output, "`````{}", DiffPathFmt(path)).unwrap(); + + write_excerpts( + excerpts, + sorted_insertions, + file_line_count, + include_line_numbers, + output, + ); + write!(output, "`````\n\n").unwrap(); +} + +fn write_codeblock_with_filename<'a>( + path: &Path, + excerpts: impl IntoIterator, + sorted_insertions: &[(Point, &str)], + file_line_count: Line, + include_line_numbers: bool, + output: &'a mut String, +) { + writeln!(output, "`````filename={}", DiffPathFmt(path)).unwrap(); + write_excerpts( excerpts, sorted_insertions, @@ -666,6 +744,7 @@ impl<'a> SyntaxBasedPrompt<'a> { PromptFormat::MarkedExcerpt | PromptFormat::OnlySnippets | PromptFormat::OldTextNewText + | PromptFormat::Minimal | PromptFormat::NumLinesUniDiff => { if range.start.0 > 0 && !skipped_last_snippet { output.push_str("…\n"); diff --git a/crates/zeta2/src/retrieval_search.rs b/crates/zeta2/src/retrieval_search.rs index 76501fb1e5c73a22ff8eebc5c29d117d45389beb..d642c2edaa1fbc897b3c74b0b5c8b1fb71227e84 100644 --- a/crates/zeta2/src/retrieval_search.rs +++ b/crates/zeta2/src/retrieval_search.rs @@ -571,10 +571,15 @@ mod tests { expected_output: &str, cx: &mut TestAppContext, ) { - let results = - run_retrieval_searches(vec![query], project.clone(), None, &mut cx.to_async()) - .await - .unwrap(); + let results = run_retrieval_searches( + vec![query], + project.clone(), + #[cfg(feature = "eval-support")] + None, + &mut cx.to_async(), + ) + .await + .unwrap(); let mut results = results.into_iter().collect::>(); results.sort_by_key(|results| { diff --git a/crates/zeta2/src/udiff.rs b/crates/zeta2/src/udiff.rs index d765a64345f839b9314632444d209fa79e9ca5ce..d565fab1b0c2bbf1e27fe183df1c95e27cac871d 100644 --- a/crates/zeta2/src/udiff.rs +++ b/crates/zeta2/src/udiff.rs @@ -49,7 +49,7 @@ pub async fn parse_diff<'a>( DiffEvent::FileEnd { renamed_to } => { let (buffer, _) = edited_buffer .take() - .expect("Got a FileEnd event before an Hunk event"); + .context("Got a FileEnd event before an Hunk event")?; if renamed_to.is_some() { anyhow::bail!("edit predictions cannot rename files"); @@ -133,7 +133,7 @@ pub async fn apply_diff<'a>( DiffEvent::FileEnd { renamed_to } => { let (buffer, _) = current_file .take() - .expect("Got a FileEnd event before an Hunk event"); + .context("Got a FileEnd event before an Hunk event")?; if let Some(renamed_to) = renamed_to { project diff --git a/crates/zeta2/src/zeta2.rs b/crates/zeta2/src/zeta2.rs index b32e902b71a1b4a20e5f935eea854ecf115ae0f1..7322cb4b6e6882ad2f3597abb505224cc24dbd5e 100644 --- a/crates/zeta2/src/zeta2.rs +++ b/crates/zeta2/src/zeta2.rs @@ -91,13 +91,22 @@ pub const DEFAULT_OPTIONS: ZetaOptions = ZetaOptions { static USE_OLLAMA: LazyLock = LazyLock::new(|| env::var("ZED_ZETA2_OLLAMA").is_ok_and(|var| !var.is_empty())); -static MODEL_ID: LazyLock = LazyLock::new(|| { - env::var("ZED_ZETA2_MODEL").unwrap_or(if *USE_OLLAMA { +static CONTEXT_RETRIEVAL_MODEL_ID: LazyLock = LazyLock::new(|| { + env::var("ZED_ZETA2_CONTEXT_MODEL").unwrap_or(if *USE_OLLAMA { "qwen3-coder:30b".to_string() } else { "yqvev8r3".to_string() }) }); +static EDIT_PREDICTIONS_MODEL_ID: LazyLock = LazyLock::new(|| { + match env::var("ZED_ZETA2_MODEL").as_deref() { + Ok("zeta2-exp") => "4w5n28vw", // Fine-tuned model @ Baseten + Ok(model) => model, + Err(_) if *USE_OLLAMA => "qwen3-coder:30b", + Err(_) => "yqvev8r3", // Vanilla qwen3-coder @ Baseten + } + .to_string() +}); static PREDICT_EDITS_URL: LazyLock> = LazyLock::new(|| { env::var("ZED_PREDICT_EDITS_URL").ok().or_else(|| { if *USE_OLLAMA { @@ -826,7 +835,7 @@ impl Zeta { } else { included_files.push(( active_buffer.clone(), - active_snapshot, + active_snapshot.clone(), excerpt_path.clone(), vec![excerpt_anchor_range], )); @@ -940,7 +949,7 @@ impl Zeta { let (prompt, _) = prompt_result?; let request = open_ai::Request { - model: MODEL_ID.clone(), + model: EDIT_PREDICTIONS_MODEL_ID.clone(), messages: vec![open_ai::RequestMessage::User { content: open_ai::MessageContent::Plain(prompt), }], @@ -1010,8 +1019,17 @@ impl Zeta { let (edited_buffer_snapshot, edits) = match options.prompt_format { PromptFormat::NumLinesUniDiff => { + // TODO: Implement parsing of multi-file diffs crate::udiff::parse_diff(&output_text, get_buffer_from_context).await? } + PromptFormat::Minimal => { + if output_text.contains("--- a/\n+++ b/\nNo edits") { + let edits = vec![]; + (&active_snapshot, edits) + } else { + crate::udiff::parse_diff(&output_text, get_buffer_from_context).await? + } + } PromptFormat::OldTextNewText => { crate::xml_edits::parse_xml_edits(&output_text, get_buffer_from_context) .await? @@ -1363,7 +1381,7 @@ impl Zeta { let (tool_schema, tool_description) = TOOL_SCHEMA.clone(); let request = open_ai::Request { - model: MODEL_ID.clone(), + model: CONTEXT_RETRIEVAL_MODEL_ID.clone(), messages: vec![open_ai::RequestMessage::User { content: open_ai::MessageContent::Plain(prompt), }], diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index 4f8e984a7de36a96c4e8ad3ac7e5d9e9bfda244b..d255d1a56102d836cc18ce4df10586edad0ca957 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -54,7 +54,6 @@ pub async fn run_evaluate( let tasks = zetas.into_iter().enumerate().map(|(repetition_ix, zeta)| { let repetition_ix = (args.repetitions > 1).then(|| repetition_ix as u16); - let example = example.clone(); let project = project.clone(); @@ -208,7 +207,7 @@ fn write_eval_result( "## Actual edit prediction:\n\n```diff\n{}\n```\n", compare_diffs(&predictions.diff, &example.example.expected_patch) )?; - writeln!(out, "{}", evaluation_result)?; + writeln!(out, "{:#}", evaluation_result)?; anyhow::Ok(()) } @@ -304,6 +303,16 @@ False Negatives : {}", impl std::fmt::Display for EvaluationResult { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if f.alternate() { + self.fmt_table(f) + } else { + self.fmt_markdown(f) + } + } +} + +impl EvaluationResult { + fn fmt_markdown(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!( f, r#" @@ -317,6 +326,38 @@ impl std::fmt::Display for EvaluationResult { self.edit_prediction.to_markdown() ) } + + fn fmt_table(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "### Scores\n")?; + writeln!( + f, + " TP FP FN Precision Recall F1" + )?; + writeln!( + f, + "──────────────────────────────────────────────────────────────────" + )?; + writeln!( + f, + "Context Retrieval {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", + self.context.true_positives, + self.context.false_positives, + self.context.false_negatives, + self.context.precision() * 100.0, + self.context.recall() * 100.0, + self.context.f1_score() * 100.0 + )?; + writeln!( + f, + "Edit Prediction {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", + self.edit_prediction.true_positives, + self.edit_prediction.false_positives, + self.edit_prediction.false_negatives, + self.edit_prediction.precision() * 100.0, + self.edit_prediction.recall() * 100.0, + self.edit_prediction.f1_score() * 100.0 + ) + } } pub fn evaluate(example: &Example, preds: &PredictionDetails) -> EvaluationResult { diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index f75b4a7e25020395f24d2638af88d4ba8b390e77..7305d3bb2479452e0b8a54392a0a84cbea1be426 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -175,6 +175,7 @@ enum PromptFormat { #[default] NumberedLines, OldTextNewText, + Minimal, } impl Into for PromptFormat { @@ -185,6 +186,7 @@ impl Into for PromptFormat { Self::OnlySnippets => predict_edits_v3::PromptFormat::OnlySnippets, Self::NumberedLines => predict_edits_v3::PromptFormat::NumLinesUniDiff, Self::OldTextNewText => predict_edits_v3::PromptFormat::OldTextNewText, + Self::Minimal => predict_edits_v3::PromptFormat::Minimal, } } } diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index 0cfc7421547b1b00bc552f157ae22b2a8afad541..1f419fd09a87d1270d73bc90fe4b312cbaf0b4a4 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -126,7 +126,7 @@ pub async fn zeta2_predict( example_run_dir = example_run_dir.join(format!("{:03}", repetition_ix)); } fs::create_dir_all(&example_run_dir)?; - if LATEST_EXAMPLE_RUN_DIR.exists() { + if LATEST_EXAMPLE_RUN_DIR.is_symlink() { fs::remove_file(&*LATEST_EXAMPLE_RUN_DIR)?; } From 092071a2f07819b9efe66610074a63121e5fade0 Mon Sep 17 00:00:00 2001 From: Josh Piasecki <138541977+FloppyDisco@users.noreply.github.com> Date: Fri, 14 Nov 2025 07:47:46 -0600 Subject: [PATCH 0120/1030] git_ui: Allow opening a file with the diff hunks expanded (#40616) So i just discovered `editor::ExpandAllDiffHunks` I have been really missing the ability to look at changes NOT in a multi buffer so i was very pleased to finally figure out that this is already possible in Zed. i have seen alot of discussion/issues requesting this feature so i think it is safe to say i'm not the only one that is not aware it exists. i think the wording in the docs could better communicate what this feature actually is, however, i think an even better way to show users that this feature exists would be to just put it in front of them. In the `GitPanel`: - `menu::Confirm` opens the project diff - `menu::SecondaryConfirm` opens the selected file in a new editor. I think it would be REALLY nice if opening a file with `SecondaryConfirm` opened the file with the diff hunks already expanded and scrolled the editor to the first hunk. ideally i see this being toggle-able in settings something like `GitPanel - Open File with Diffs Expanded` or something. so the user could turn this off if they preferred. I tried creating a new keybinding using the new `actions::Sequence` it was something like: ```json { "context": "GitPanel && ChangesList", "bindings": { "cmd-enter" : [ "actions::Sequence", ["menu:SecondaryConfirm", "editor::ToggleFocus", "editor::ExpandAllDiffHunks", "editor::GoToHunk"]] } } ``` but the action sequence does not work. i think because opening the file is an async task. i have a first attempt here, of just trying to get the diff hunks to expand after opening the file. i tried to copy and paste the logic/structure as best i could from the confirm method in file_finder.rs:1432 it compiles, but it does not work, and i do not have enough experience in rust or in this project to figure out anything further. if anyone was interested in working on this with me i would enjoy learning more and i think this would be a nice way to showcase this tool! --- crates/git_ui/src/git_panel.rs | 51 ++++++++++++++++++++++++++++------ 1 file changed, 42 insertions(+), 9 deletions(-) diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index 6a80f22773f154f32907d2bbadfa91c2eec53108..e2a4a26b320284fed727a7f7e60acf807c39abf0 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -12,7 +12,9 @@ use agent_settings::AgentSettings; use anyhow::Context as _; use askpass::AskPassDelegate; use db::kvp::KEY_VALUE_STORE; -use editor::{Editor, EditorElement, EditorMode, MultiBuffer}; +use editor::{ + Direction, Editor, EditorElement, EditorMode, MultiBuffer, actions::ExpandAllDiffHunks, +}; use futures::StreamExt as _; use git::blame::ParsedCommitMessage; use git::repository::{ @@ -69,7 +71,7 @@ use cloud_llm_client::CompletionIntent; use workspace::{ Workspace, dock::{DockPosition, Panel, PanelEvent}, - notifications::{DetachAndPromptErr, ErrorMessagePrompt, NotificationId}, + notifications::{DetachAndPromptErr, ErrorMessagePrompt, NotificationId, NotifyResultExt}, }; actions!( @@ -792,15 +794,46 @@ impl GitPanel { return None; } - self.workspace + let open_task = self + .workspace .update(cx, |workspace, cx| { - workspace - .open_path_preview(path, None, false, false, true, window, cx) - .detach_and_prompt_err("Failed to open file", window, cx, |e, _, _| { - Some(format!("{e}")) - }); + workspace.open_path_preview(path, None, false, false, true, window, cx) }) - .ok() + .ok()?; + + cx.spawn_in(window, async move |_, mut cx| { + let item = open_task + .await + .notify_async_err(&mut cx) + .ok_or_else(|| anyhow::anyhow!("Failed to open file"))?; + if let Some(active_editor) = item.downcast::() { + if let Some(diff_task) = + active_editor.update(cx, |editor, _cx| editor.wait_for_diff_to_load())? + { + diff_task.await; + } + + cx.update(|window, cx| { + active_editor.update(cx, |editor, cx| { + editor.expand_all_diff_hunks(&ExpandAllDiffHunks, window, cx); + + let snapshot = editor.snapshot(window, cx); + editor.go_to_hunk_before_or_after_position( + &snapshot, + language::Point::new(0, 0), + Direction::Next, + window, + cx, + ); + }) + })?; + } + + anyhow::Ok(()) + }) + .detach(); + + Some(()) }); } From c1096d8b631b414580c0184fc21a9431e931e743 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Fri, 14 Nov 2025 12:12:47 -0300 Subject: [PATCH 0121/1030] agent_ui: Render error descriptions as markdown in thread view callouts (#42732) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR makes the description in the callout that display general errors in the agent panel be rendered as markdown. This allow us to pass URLs to these error strings that will be clickable, improving the overall interaction with them. Here's an example: Screenshot 2025-11-14 at 11  43@2x Release Notes: - agent: Improved the interaction with errors by allowing links to be clickable. --- crates/agent_ui/src/acp/thread_view.rs | 43 +++++++++++++++------ crates/language_model/src/language_model.rs | 2 +- crates/ui/src/components/callout.rs | 39 ++++++++++++++----- 3 files changed, 62 insertions(+), 22 deletions(-) diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 8a0b282d9b9d5c6bab492391bdabfb1c09131bed..f4c76b10573dd2e36e797c20230739d6d6a77e46 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -278,6 +278,7 @@ pub struct AcpThreadView { notification_subscriptions: HashMap, Vec>, thread_retry_status: Option, thread_error: Option, + thread_error_markdown: Option>, thread_feedback: ThreadFeedbackState, list_state: ListState, auth_task: Option>, @@ -415,6 +416,7 @@ impl AcpThreadView { list_state: list_state, thread_retry_status: None, thread_error: None, + thread_error_markdown: None, thread_feedback: Default::default(), auth_task: None, expanded_tool_calls: HashSet::default(), @@ -798,6 +800,7 @@ impl AcpThreadView { if should_retry { self.thread_error = None; + self.thread_error_markdown = None; self.reset(window, cx); } } @@ -1327,6 +1330,7 @@ impl AcpThreadView { fn clear_thread_error(&mut self, cx: &mut Context) { self.thread_error = None; + self.thread_error_markdown = None; cx.notify(); } @@ -5344,9 +5348,9 @@ impl AcpThreadView { } } - fn render_thread_error(&self, cx: &mut Context) -> Option
{ + fn render_thread_error(&mut self, window: &mut Window, cx: &mut Context) -> Option
{ let content = match self.thread_error.as_ref()? { - ThreadError::Other(error) => self.render_any_thread_error(error.clone(), cx), + ThreadError::Other(error) => self.render_any_thread_error(error.clone(), window, cx), ThreadError::Refusal => self.render_refusal_error(cx), ThreadError::AuthenticationRequired(error) => { self.render_authentication_required_error(error.clone(), cx) @@ -5431,7 +5435,12 @@ impl AcpThreadView { .dismiss_action(self.dismiss_error_button(cx)) } - fn render_any_thread_error(&self, error: SharedString, cx: &mut Context<'_, Self>) -> Callout { + fn render_any_thread_error( + &mut self, + error: SharedString, + window: &mut Window, + cx: &mut Context<'_, Self>, + ) -> Callout { let can_resume = self .thread() .map_or(false, |thread| thread.read(cx).can_resume(cx)); @@ -5444,11 +5453,24 @@ impl AcpThreadView { supports_burn_mode && thread.completion_mode() == CompletionMode::Normal }); + let markdown = if let Some(markdown) = &self.thread_error_markdown { + markdown.clone() + } else { + let markdown = cx.new(|cx| Markdown::new(error.clone(), None, None, cx)); + self.thread_error_markdown = Some(markdown.clone()); + markdown + }; + + let markdown_style = default_markdown_style(false, true, window, cx); + let description = self + .render_markdown(markdown, markdown_style) + .into_any_element(); + Callout::new() .severity(Severity::Error) - .title("Error") .icon(IconName::XCircle) - .description(error.clone()) + .title("An Error Happened") + .description_slot(description) .actions_slot( h_flex() .gap_0p5() @@ -5467,11 +5489,9 @@ impl AcpThreadView { }) .when(can_resume, |this| { this.child( - Button::new("retry", "Retry") - .icon(IconName::RotateCw) - .icon_position(IconPosition::Start) + IconButton::new("retry", IconName::RotateCw) .icon_size(IconSize::Small) - .label_size(LabelSize::Small) + .tooltip(Tooltip::text("Retry Generation")) .on_click(cx.listener(|this, _, _window, cx| { this.resume_chat(cx); })), @@ -5613,7 +5633,6 @@ impl AcpThreadView { IconButton::new("copy", IconName::Copy) .icon_size(IconSize::Small) - .icon_color(Color::Muted) .tooltip(Tooltip::text("Copy Error Message")) .on_click(move |_, _, cx| { cx.write_to_clipboard(ClipboardItem::new_string(message.clone())) @@ -5623,7 +5642,6 @@ impl AcpThreadView { fn dismiss_error_button(&self, cx: &mut Context) -> impl IntoElement { IconButton::new("dismiss", IconName::Close) .icon_size(IconSize::Small) - .icon_color(Color::Muted) .tooltip(Tooltip::text("Dismiss Error")) .on_click(cx.listener({ move |this, _, _, cx| { @@ -5841,7 +5859,7 @@ impl Render for AcpThreadView { None } }) - .children(self.render_thread_error(cx)) + .children(self.render_thread_error(window, cx)) .when_some( self.new_server_version_available.as_ref().filter(|_| { !has_messages || !matches!(self.thread_state, ThreadState::Ready { .. }) @@ -5974,6 +5992,7 @@ fn default_markdown_style( }, link: TextStyleRefinement { background_color: Some(colors.editor_foreground.opacity(0.025)), + color: Some(colors.text_accent), underline: Some(UnderlineStyle { color: Some(colors.text_accent.opacity(0.5)), thickness: px(1.), diff --git a/crates/language_model/src/language_model.rs b/crates/language_model/src/language_model.rs index 4f0eed34331980ec0fd499c6a77e49e94b524fe0..606b0921b29f056ddea22947f08b2686af37d639 100644 --- a/crates/language_model/src/language_model.rs +++ b/crates/language_model/src/language_model.rs @@ -138,7 +138,7 @@ pub enum LanguageModelCompletionError { provider: LanguageModelProviderName, message: String, }, - #[error("permission error with {provider}'s API: {message}")] + #[error("Permission error with {provider}'s API: {message}")] PermissionError { provider: LanguageModelProviderName, message: String, diff --git a/crates/ui/src/components/callout.rs b/crates/ui/src/components/callout.rs index b5d1d7f25531cc956388da9d4a977bdfd14204b9..4eb849d7f640aca78b70645f5f93301281ca6627 100644 --- a/crates/ui/src/components/callout.rs +++ b/crates/ui/src/components/callout.rs @@ -30,6 +30,7 @@ pub struct Callout { icon: Option, title: Option, description: Option, + description_slot: Option, actions_slot: Option, dismiss_action: Option, line_height: Option, @@ -44,6 +45,7 @@ impl Callout { icon: None, title: None, description: None, + description_slot: None, actions_slot: None, dismiss_action: None, line_height: None, @@ -76,6 +78,13 @@ impl Callout { self } + /// Allows for any element—like markdown elements—to fill the description slot of the callout. + /// This method wins over `description` if both happen to be set. + pub fn description_slot(mut self, description: impl IntoElement) -> Self { + self.description_slot = Some(description.into_any_element()); + self + } + /// Sets the primary call-to-action button. pub fn actions_slot(mut self, action: impl IntoElement) -> Self { self.actions_slot = Some(action.into_any_element()); @@ -179,15 +188,27 @@ impl RenderOnce for Callout { ) }), ) - .when_some(self.description, |this, description| { - this.child( - div() - .w_full() - .flex_1() - .text_ui_sm(cx) - .text_color(cx.theme().colors().text_muted) - .child(description), - ) + .map(|this| { + if let Some(description_slot) = self.description_slot { + this.child( + div() + .w_full() + .flex_1() + .text_ui_sm(cx) + .child(description_slot), + ) + } else if let Some(description) = self.description { + this.child( + div() + .w_full() + .flex_1() + .text_ui_sm(cx) + .text_color(cx.theme().colors().text_muted) + .child(description), + ) + } else { + this + } }), ) } From aaa116d1293a34650cfed12a4733e70ce2c24790 Mon Sep 17 00:00:00 2001 From: Dino Date: Fri, 14 Nov 2025 16:04:54 +0000 Subject: [PATCH 0122/1030] languages: Fix command used for Go subtests (#42734) The command used to run go subtests was breaking if the test contained square brackets, for example: ``` go test . -v -run ^TestInventoryCheckout$/^\[test\]_test_checkout$ ``` After a bit of testing it appears that the best way to actually resolve this in a way supported by `go test` is to wrap this command in quotes. As such, this commit updates the command to, considering the example above: ``` go test . -v -run '^TestInventoryCheckout$/^\[test\]_test_checkout$' ``` We also tested escape the square brackets, using `\\\[` instead of `\[`, but that would lead to a more complex change, so we opted for the simpler solution of wrapping the command in quotes. Closes #42347 Release Notes: - Fixed command used to run Go subtests to ensure that escaped characters don't lead to a failure in finding tests to run --- crates/languages/src/go.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/languages/src/go.rs b/crates/languages/src/go.rs index 6c75abf123af62b3f4ab43a6e94d3b040e2f010a..a8699fe9c2dc8cf99ca46a16fe75b1de6eea7ffa 100644 --- a/crates/languages/src/go.rs +++ b/crates/languages/src/go.rs @@ -654,7 +654,7 @@ impl ContextProvider for GoContextProvider { "-v".into(), "-run".into(), format!( - "\\^{}\\$/\\^{}\\$", + "'^{}$/^{}$'", VariableName::Symbol.template_value(), GO_SUBTEST_NAME_TASK_VARIABLE.template_value(), ), From 877272703439b227ae85a668534b2854ccc68455 Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Fri, 14 Nov 2025 08:18:16 -0800 Subject: [PATCH 0123/1030] zeta2: Improve zeta old text matching (#42580) This PR improves Zeta2's matching of `old_text`/`new_text` pairs, using similar code to what we use in the edit agent. For right now, we've duplicated the code, as opposed to trying to generalize it. Release Notes: - N/A --------- Co-authored-by: Max Co-authored-by: Michael Co-authored-by: Max Brunsfeld Co-authored-by: Agus --- .../src/retrieval_prompt.rs | 4 +- ...merge_excerpts.rs => assemble_excerpts.rs} | 19 +- crates/zeta2/src/retrieval_search.rs | 14 +- crates/zeta2/src/xml_edits.rs | 397 ++++++++++++------ crates/zeta2/src/zeta2.rs | 27 +- crates/zeta_cli/src/evaluate.rs | 23 +- crates/zeta_cli/src/paths.rs | 10 +- crates/zeta_cli/src/predict.rs | 11 +- 8 files changed, 331 insertions(+), 174 deletions(-) rename crates/zeta2/src/{merge_excerpts.rs => assemble_excerpts.rs} (91%) diff --git a/crates/cloud_zeta2_prompt/src/retrieval_prompt.rs b/crates/cloud_zeta2_prompt/src/retrieval_prompt.rs index a11c56da41384257b8331a31161224c9e25d0894..e334674ef8004b485608e3864cf1e4e8d4c97cdb 100644 --- a/crates/cloud_zeta2_prompt/src/retrieval_prompt.rs +++ b/crates/cloud_zeta2_prompt/src/retrieval_prompt.rs @@ -11,11 +11,11 @@ pub fn build_prompt(request: predict_edits_v3::PlanContextRetrievalRequest) -> R let mut prompt = SEARCH_INSTRUCTIONS.to_string(); if !request.events.is_empty() { - writeln!(&mut prompt, "## User Edits\n")?; + writeln!(&mut prompt, "\n## User Edits\n\n")?; push_events(&mut prompt, &request.events); } - writeln!(&mut prompt, "## Cursor context")?; + writeln!(&mut prompt, "## Cursor context\n")?; write_codeblock( &request.excerpt_path, &[Excerpt { diff --git a/crates/zeta2/src/merge_excerpts.rs b/crates/zeta2/src/assemble_excerpts.rs similarity index 91% rename from crates/zeta2/src/merge_excerpts.rs rename to crates/zeta2/src/assemble_excerpts.rs index 846d8034a8c2e88b8552dc8c9d48af6ccdc5efcf..f2a5b5adb1fcffab945cd9bdb88153bc5e494138 100644 --- a/crates/zeta2/src/merge_excerpts.rs +++ b/crates/zeta2/src/assemble_excerpts.rs @@ -3,27 +3,16 @@ use edit_prediction_context::Line; use language::{BufferSnapshot, Point}; use std::ops::Range; -pub fn merge_excerpts( +pub fn assemble_excerpts( buffer: &BufferSnapshot, - sorted_line_ranges: impl IntoIterator>, + merged_line_ranges: impl IntoIterator>, ) -> Vec { let mut output = Vec::new(); - let mut merged_ranges = Vec::>::new(); - - for line_range in sorted_line_ranges { - if let Some(last_line_range) = merged_ranges.last_mut() - && line_range.start <= last_line_range.end - { - last_line_range.end = last_line_range.end.max(line_range.end); - continue; - } - merged_ranges.push(line_range); - } let outline_items = buffer.outline_items_as_points_containing(0..buffer.len(), false, None); let mut outline_items = outline_items.into_iter().peekable(); - for range in merged_ranges { + for range in merged_line_ranges { let point_range = Point::new(range.start.0, 0)..Point::new(range.end.0, 0); while let Some(outline_item) = outline_items.peek() { @@ -155,7 +144,7 @@ mod tests { let mut output = String::new(); cloud_zeta2_prompt::write_excerpts( - merge_excerpts(&buffer.snapshot(), ranges).iter(), + assemble_excerpts(&buffer.snapshot(), ranges).iter(), &insertions, Line(buffer.max_point().row), true, diff --git a/crates/zeta2/src/retrieval_search.rs b/crates/zeta2/src/retrieval_search.rs index d642c2edaa1fbc897b3c74b0b5c8b1fb71227e84..fd7364cf23ac66fe9baf2f911868ef251d2d25cf 100644 --- a/crates/zeta2/src/retrieval_search.rs +++ b/crates/zeta2/src/retrieval_search.rs @@ -64,7 +64,7 @@ pub async fn run_retrieval_searches( })? .await?; let snapshot = buffer.read_with(cx, |buffer, _| buffer.snapshot())?; - let mut ranges = ranges + let mut ranges: Vec<_> = ranges .into_iter() .map(|range| { snapshot.anchor_before(range.start)..snapshot.anchor_after(range.end) @@ -172,11 +172,11 @@ pub async fn run_retrieval_searches( .await } -fn merge_anchor_ranges(ranges: &mut Vec>, snapshot: &BufferSnapshot) { +pub(crate) fn merge_anchor_ranges(ranges: &mut Vec>, snapshot: &BufferSnapshot) { ranges.sort_unstable_by(|a, b| { a.start .cmp(&b.start, snapshot) - .then(b.end.cmp(&b.end, snapshot)) + .then(b.end.cmp(&a.end, snapshot)) }); let mut index = 1; @@ -187,7 +187,9 @@ fn merge_anchor_ranges(ranges: &mut Vec>, snapshot: &BufferSnapsho .is_ge() { let removed = ranges.remove(index); - ranges[index - 1].end = removed.end; + if removed.end.cmp(&ranges[index - 1].end, snapshot).is_gt() { + ranges[index - 1].end = removed.end; + } } else { index += 1; } @@ -416,7 +418,7 @@ fn expand_to_parent_range( #[cfg(test)] mod tests { use super::*; - use crate::merge_excerpts::merge_excerpts; + use crate::assemble_excerpts::assemble_excerpts; use cloud_zeta2_prompt::write_codeblock; use edit_prediction_context::Line; use gpui::TestAppContext; @@ -602,7 +604,7 @@ mod tests { write_codeblock( &buffer.file().unwrap().full_path(cx), - merge_excerpts(&buffer.snapshot(), excerpts).iter(), + assemble_excerpts(&buffer.snapshot(), excerpts).iter(), &[], Line(buffer.max_point().row), false, diff --git a/crates/zeta2/src/xml_edits.rs b/crates/zeta2/src/xml_edits.rs index d1eea285d6861dc4cbe6fe65a133453d5b06adaf..468efa8b202141c4cca04459233ea91c5bff9d44 100644 --- a/crates/zeta2/src/xml_edits.rs +++ b/crates/zeta2/src/xml_edits.rs @@ -1,8 +1,6 @@ -use anyhow::{Context as _, Result, anyhow}; -use language::{Anchor, BufferSnapshot, OffsetRangeExt as _, TextBufferSnapshot}; -use std::ops::Range; -use std::path::Path; -use std::sync::Arc; +use anyhow::{Context as _, Result}; +use language::{Anchor, BufferSnapshot, OffsetRangeExt as _, Point}; +use std::{cmp, ops::Range, path::Path, sync::Arc}; pub async fn parse_xml_edits<'a>( input: &'a str, @@ -40,128 +38,76 @@ async fn parse_xml_edits_inner<'a>( while let Some(old_text_tag) = parse_tag(&mut input, "old_text")? { let new_text_tag = parse_tag(&mut input, "new_text")?.context("no new_text tag following old_text")?; - edits.extend(resolve_new_text_old_text_in_buffer( - new_text_tag.body, - old_text_tag.body, - buffer, - context_ranges, - )?); + let match_range = fuzzy_match_in_ranges(old_text_tag.body, buffer, context_ranges)?; + let old_text = buffer + .text_for_range(match_range.clone()) + .collect::(); + let edits_within_hunk = language::text_diff(&old_text, &new_text_tag.body); + edits.extend( + edits_within_hunk + .into_iter() + .map(move |(inner_range, inner_text)| { + ( + buffer.anchor_after(match_range.start + inner_range.start) + ..buffer.anchor_before(match_range.start + inner_range.end), + inner_text, + ) + }), + ); } Ok((buffer, edits)) } -fn resolve_new_text_old_text_in_buffer( - new_text: &str, +fn fuzzy_match_in_ranges( old_text: &str, - buffer: &TextBufferSnapshot, - ranges: &[Range], -) -> Result, Arc)>, anyhow::Error> { - let context_offset = if old_text.is_empty() { - Ok(0) - } else { - let mut offset = None; - for range in ranges { - let range = range.to_offset(buffer); - let text = buffer.text_for_range(range.clone()).collect::(); - for (match_offset, _) in text.match_indices(old_text) { - if let Some(offset) = offset { - let offset_match_point = buffer.offset_to_point(offset); - let second_match_point = buffer.offset_to_point(range.start + match_offset); - anyhow::bail!( - "old_text is not unique enough:\n{}\nFound at {:?} and {:?}", - old_text, - offset_match_point, - second_match_point - ); + buffer: &BufferSnapshot, + context_ranges: &[Range], +) -> Result> { + let mut state = FuzzyMatcher::new(buffer, old_text); + let mut best_match = None; + let mut tie_match_range = None; + + for range in context_ranges { + let best_match_cost = best_match.as_ref().map(|(score, _)| *score); + match (best_match_cost, state.match_range(range.to_offset(buffer))) { + (Some(lowest_cost), Some((new_cost, new_range))) => { + if new_cost == lowest_cost { + tie_match_range = Some(new_range); + } else if new_cost < lowest_cost { + tie_match_range.take(); + best_match = Some((new_cost, new_range)); } - offset = Some(range.start + match_offset); } - } - offset.ok_or_else(|| { - #[cfg(any(debug_assertions, feature = "eval-support"))] - if let Some(closest_match) = closest_old_text_match(buffer, old_text) { - log::info!( - "Closest `old_text` match: {}", - pretty_assertions::StrComparison::new(old_text, &closest_match) - ) + (None, Some(new_match)) => { + best_match = Some(new_match); } - anyhow!("Failed to match old_text:\n{}", old_text) - }) - }?; - - let edits_within_hunk = language::text_diff(&old_text, &new_text); - Ok(edits_within_hunk - .into_iter() - .map(move |(inner_range, inner_text)| { - ( - buffer.anchor_after(context_offset + inner_range.start) - ..buffer.anchor_before(context_offset + inner_range.end), - inner_text, - ) - })) -} - -#[cfg(any(debug_assertions, feature = "eval-support"))] -fn closest_old_text_match(buffer: &TextBufferSnapshot, old_text: &str) -> Option { - let buffer_text = buffer.text(); - let len = old_text.len(); - - if len == 0 || buffer_text.len() < len { - return None; + (None, None) | (Some(_), None) => {} + }; } - let mut min_score = usize::MAX; - let mut min_start = 0; - - let old_text_bytes = old_text.as_bytes(); - let old_alpha_count = old_text_bytes - .iter() - .filter(|&&b| b.is_ascii_alphanumeric()) - .count(); - - let old_line_count = old_text.lines().count(); - - let mut cursor = 0; - - while cursor + len <= buffer_text.len() { - let candidate = &buffer_text[cursor..cursor + len]; - let candidate_bytes = candidate.as_bytes(); - - if usize::abs_diff(candidate.lines().count(), old_line_count) > 4 { - cursor += 1; - continue; - } - - let candidate_alpha_count = candidate_bytes - .iter() - .filter(|&&b| b.is_ascii_alphanumeric()) - .count(); - - // If alphanumeric character count differs by more than 30%, skip - if usize::abs_diff(old_alpha_count, candidate_alpha_count) * 10 > old_alpha_count * 3 { - cursor += 1; - continue; - } - - let score = strsim::levenshtein(candidate, old_text); - if score < min_score { - min_score = score; - min_start = cursor; - - if min_score <= len / 10 { - break; - } + if let Some((_, best_match_range)) = best_match { + if let Some(tie_match_range) = tie_match_range { + anyhow::bail!( + "Multiple ambiguous matches:\n{:?}:\n{}\n\n{:?}:\n{}", + best_match_range.clone(), + buffer.text_for_range(best_match_range).collect::(), + tie_match_range.clone(), + buffer.text_for_range(tie_match_range).collect::() + ); } - - cursor += 1; + return Ok(best_match_range); } - if min_score != usize::MAX { - Some(buffer_text[min_start..min_start + len].to_string()) - } else { - None - } + anyhow::bail!( + "Failed to fuzzy match `old_text`:\n{}\nin:\n```\n{}\n```", + old_text, + context_ranges + .iter() + .map(|range| buffer.text_for_range(range.clone()).collect::()) + .collect::>() + .join("```\n```") + ); } struct ParsedTag<'a> { @@ -187,10 +133,218 @@ fn parse_tag<'a>(input: &mut &'a str, tag: &str) -> Result> .with_context(|| format!("no `{close_tag}` tag"))?; let body = &input[closing_bracket_ix + '>'.len_utf8()..end_ix]; let body = body.strip_prefix('\n').unwrap_or(body); + let body = body.strip_suffix('\n').unwrap_or(body); *input = &input[end_ix + close_tag.len()..]; Ok(Some(ParsedTag { attributes, body })) } +const REPLACEMENT_COST: u32 = 1; +const INSERTION_COST: u32 = 3; +const DELETION_COST: u32 = 10; + +/// A fuzzy matcher that can process text chunks incrementally +/// and return the best match found so far at each step. +struct FuzzyMatcher<'a> { + snapshot: &'a BufferSnapshot, + query_lines: Vec<&'a str>, + matrix: SearchMatrix, +} + +impl<'a> FuzzyMatcher<'a> { + fn new(snapshot: &'a BufferSnapshot, old_text: &'a str) -> Self { + let query_lines = old_text.lines().collect(); + Self { + snapshot, + query_lines, + matrix: SearchMatrix::new(0), + } + } + + fn match_range(&mut self, range: Range) -> Option<(u32, Range)> { + let point_range = range.to_point(&self.snapshot); + let buffer_line_count = (point_range.end.row - point_range.start.row + 1) as usize; + + self.matrix + .reset(self.query_lines.len() + 1, buffer_line_count + 1); + let query_line_count = self.query_lines.len(); + + for row in 0..query_line_count { + let query_line = self.query_lines[row].trim(); + let leading_deletion_cost = (row + 1) as u32 * DELETION_COST; + + self.matrix.set( + row + 1, + 0, + SearchState::new(leading_deletion_cost, SearchDirection::Up), + ); + + let mut buffer_lines = self.snapshot.text_for_range(range.clone()).lines(); + + let mut col = 0; + while let Some(buffer_line) = buffer_lines.next() { + let buffer_line = buffer_line.trim(); + let up = SearchState::new( + self.matrix + .get(row, col + 1) + .cost + .saturating_add(DELETION_COST), + SearchDirection::Up, + ); + let left = SearchState::new( + self.matrix + .get(row + 1, col) + .cost + .saturating_add(INSERTION_COST), + SearchDirection::Left, + ); + let diagonal = SearchState::new( + if query_line == buffer_line { + self.matrix.get(row, col).cost + } else if fuzzy_eq(query_line, buffer_line) { + self.matrix.get(row, col).cost + REPLACEMENT_COST + } else { + self.matrix + .get(row, col) + .cost + .saturating_add(DELETION_COST + INSERTION_COST) + }, + SearchDirection::Diagonal, + ); + self.matrix + .set(row + 1, col + 1, up.min(left).min(diagonal)); + col += 1; + } + } + + // Find all matches with the best cost + let mut best_cost = u32::MAX; + let mut matches_with_best_cost = Vec::new(); + + for col in 1..=buffer_line_count { + let cost = self.matrix.get(query_line_count, col).cost; + if cost < best_cost { + best_cost = cost; + matches_with_best_cost.clear(); + matches_with_best_cost.push(col as u32); + } else if cost == best_cost { + matches_with_best_cost.push(col as u32); + } + } + + // Find ranges for the matches + for &match_end_col in &matches_with_best_cost { + let mut matched_lines = 0; + let mut query_row = query_line_count; + let mut match_start_col = match_end_col; + while query_row > 0 && match_start_col > 0 { + let current = self.matrix.get(query_row, match_start_col as usize); + match current.direction { + SearchDirection::Diagonal => { + query_row -= 1; + match_start_col -= 1; + matched_lines += 1; + } + SearchDirection::Up => { + query_row -= 1; + } + SearchDirection::Left => { + match_start_col -= 1; + } + } + } + + let buffer_row_start = match_start_col + point_range.start.row; + let buffer_row_end = match_end_col + point_range.start.row; + + let matched_buffer_row_count = buffer_row_end - buffer_row_start; + let matched_ratio = matched_lines as f32 + / (matched_buffer_row_count as f32).max(query_line_count as f32); + if matched_ratio >= 0.8 { + let buffer_start_ix = self + .snapshot + .point_to_offset(Point::new(buffer_row_start, 0)); + let buffer_end_ix = self.snapshot.point_to_offset(Point::new( + buffer_row_end - 1, + self.snapshot.line_len(buffer_row_end - 1), + )); + return Some((best_cost, buffer_start_ix..buffer_end_ix)); + } + } + + None + } +} + +fn fuzzy_eq(left: &str, right: &str) -> bool { + const THRESHOLD: f64 = 0.8; + + let min_levenshtein = left.len().abs_diff(right.len()); + let min_normalized_levenshtein = + 1. - (min_levenshtein as f64 / cmp::max(left.len(), right.len()) as f64); + if min_normalized_levenshtein < THRESHOLD { + return false; + } + + strsim::normalized_levenshtein(left, right) >= THRESHOLD +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +enum SearchDirection { + Up, + Left, + Diagonal, +} + +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +struct SearchState { + cost: u32, + direction: SearchDirection, +} + +impl SearchState { + fn new(cost: u32, direction: SearchDirection) -> Self { + Self { cost, direction } + } +} + +struct SearchMatrix { + cols: usize, + rows: usize, + data: Vec, +} + +impl SearchMatrix { + fn new(cols: usize) -> Self { + SearchMatrix { + cols, + rows: 0, + data: Vec::new(), + } + } + + fn reset(&mut self, rows: usize, cols: usize) { + self.rows = rows; + self.cols = cols; + self.data + .fill(SearchState::new(0, SearchDirection::Diagonal)); + self.data.resize( + self.rows * self.cols, + SearchState::new(0, SearchDirection::Diagonal), + ); + } + + fn get(&self, row: usize, col: usize) -> SearchState { + debug_assert!(row < self.rows); + debug_assert!(col < self.cols); + self.data[row * self.cols + col] + } + + fn set(&mut self, row: usize, col: usize, state: SearchState) { + debug_assert!(row < self.rows && col < self.cols); + self.data[row * self.cols + col] = state; + } +} + #[cfg(test)] mod tests { use super::*; @@ -212,7 +366,7 @@ mod tests { "# }; let parsed = parse_tag(&mut input, "tag").unwrap().unwrap(); assert_eq!(parsed.attributes, "attr=\"foo\""); - assert_eq!(parsed.body, "tag value\n"); + assert_eq!(parsed.body, "tag value"); assert_eq!(input, "\n"); } @@ -224,7 +378,9 @@ mod tests { one two three four five six seven eight nine ten eleven twelve - "# }; + thirteen fourteen fifteen + sixteen seventeen eighteen + "#}; fs.insert_tree( path!("/root"), @@ -246,16 +402,17 @@ mod tests { let edits = indoc! {r#" - five six seven eight + nine ten eleven twelve - five SIX seven eight! + nine TEN eleven twelve! "#}; + let included_ranges = [(buffer_snapshot.anchor_before(Point::new(1, 0))..Anchor::MAX)]; let (buffer, edits) = parse_xml_edits(edits, |_path| { - Some((&buffer_snapshot, &[(Anchor::MIN..Anchor::MAX)] as &[_])) + Some((&buffer_snapshot, included_ranges.as_slice())) }) .await .unwrap(); @@ -267,8 +424,8 @@ mod tests { assert_eq!( edits, &[ - (Point::new(1, 5)..Point::new(1, 8), "SIX".into()), - (Point::new(1, 20)..Point::new(1, 20), "!".into()) + (Point::new(2, 5)..Point::new(2, 8), "TEN".into()), + (Point::new(2, 22)..Point::new(2, 22), "!".into()) ] ); } diff --git a/crates/zeta2/src/zeta2.rs b/crates/zeta2/src/zeta2.rs index 7322cb4b6e6882ad2f3597abb505224cc24dbd5e..1521fbd9291c7a69cc56152d193734f41cf0451e 100644 --- a/crates/zeta2/src/zeta2.rs +++ b/crates/zeta2/src/zeta2.rs @@ -42,14 +42,14 @@ use util::rel_path::RelPathBuf; use util::{LogErrorFuture, TryFutureExt}; use workspace::notifications::{ErrorMessagePrompt, NotificationId, show_app_notification}; -pub mod merge_excerpts; +pub mod assemble_excerpts; mod prediction; mod provider; pub mod retrieval_search; pub mod udiff; mod xml_edits; -use crate::merge_excerpts::merge_excerpts; +use crate::assemble_excerpts::assemble_excerpts; use crate::prediction::EditPrediction; pub use crate::prediction::EditPredictionId; pub use provider::ZetaEditPredictionProvider; @@ -820,16 +820,8 @@ impl Zeta { }) { let (_, buffer, _, ranges) = &mut included_files[buffer_ix]; - let range_ix = ranges - .binary_search_by(|probe| { - probe - .start - .cmp(&excerpt_anchor_range.start, buffer) - .then(excerpt_anchor_range.end.cmp(&probe.end, buffer)) - }) - .unwrap_or_else(|ix| ix); - - ranges.insert(range_ix, excerpt_anchor_range); + ranges.push(excerpt_anchor_range); + retrieval_search::merge_anchor_ranges(ranges, buffer); let last_ix = included_files.len() - 1; included_files.swap(buffer_ix, last_ix); } else { @@ -844,13 +836,14 @@ impl Zeta { let included_files = included_files .iter() .map(|(_, snapshot, path, ranges)| { - let excerpts = merge_excerpts( - &snapshot, - ranges.iter().map(|range| { + let ranges = ranges + .iter() + .map(|range| { let point_range = range.to_point(&snapshot); Line(point_range.start.row)..Line(point_range.end.row) - }), - ); + }) + .collect::>(); + let excerpts = assemble_excerpts(&snapshot, ranges); predict_edits_v3::IncludedFile { path: path.clone(), max_row: Line(snapshot.max_point().row), diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index d255d1a56102d836cc18ce4df10586edad0ca957..b0b3820362889051e3e5c0eef03ef10c7f0d6fa8 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -84,7 +84,7 @@ pub async fn run_evaluate( { write_aggregated_scores(&mut output_file, &all_results).log_err(); }; - print_run_data_dir(args.repetitions == 1); + print_run_data_dir(args.repetitions == 1, std::io::stdout().is_terminal()); } fn write_aggregated_scores( @@ -103,8 +103,7 @@ fn write_aggregated_scores( } failed_count += 1; - let err = err - .to_string() + let err = format!("{err:?}") .replace("", "\n```"); writeln!( @@ -173,6 +172,7 @@ pub async fn run_evaluate_one( &predict_result, &evaluation_result, &mut std::io::stdout(), + std::io::stdout().is_terminal(), )?; } @@ -184,6 +184,7 @@ pub async fn run_evaluate_one( &predict_result, &evaluation_result, &mut results_file, + false, ) .log_err(); } @@ -196,16 +197,25 @@ fn write_eval_result( predictions: &PredictionDetails, evaluation_result: &EvaluationResult, out: &mut impl Write, + use_color: bool, ) -> Result<()> { writeln!( out, "## Expected edit prediction:\n\n```diff\n{}\n```\n", - compare_diffs(&example.example.expected_patch, &predictions.diff) + compare_diffs( + &example.example.expected_patch, + &predictions.diff, + use_color + ) )?; writeln!( out, "## Actual edit prediction:\n\n```diff\n{}\n```\n", - compare_diffs(&predictions.diff, &example.example.expected_patch) + compare_diffs( + &predictions.diff, + &example.example.expected_patch, + use_color + ) )?; writeln!(out, "{:#}", evaluation_result)?; @@ -434,8 +444,7 @@ pub fn evaluate(example: &Example, preds: &PredictionDetails) -> EvaluationResul /// Return annotated `patch_a` so that: /// Additions and deletions that are not present in `patch_b` will be highlighted in red. /// Additions and deletions that are present in `patch_b` will be highlighted in green. -pub fn compare_diffs(patch_a: &str, patch_b: &str) -> String { - let use_color = std::io::stdout().is_terminal(); +pub fn compare_diffs(patch_a: &str, patch_b: &str, use_color: bool) -> String { let green = if use_color { "\x1b[32m✓ " } else { "" }; let red = if use_color { "\x1b[31m✗ " } else { "" }; let neutral = if use_color { " " } else { "" }; diff --git a/crates/zeta_cli/src/paths.rs b/crates/zeta_cli/src/paths.rs index 15c4941f3dacce0b9a06c15daee431014b12944d..3cc2beec5bd50380b9eef8b502dcba0ccba32772 100644 --- a/crates/zeta_cli/src/paths.rs +++ b/crates/zeta_cli/src/paths.rs @@ -13,7 +13,7 @@ pub static RUN_DIR: LazyLock = LazyLock::new(|| { pub static LATEST_EXAMPLE_RUN_DIR: LazyLock = LazyLock::new(|| TARGET_ZETA_DIR.join("latest")); -pub fn print_run_data_dir(deep: bool) { +pub fn print_run_data_dir(deep: bool, use_color: bool) { println!("\n## Run Data\n"); let mut files = Vec::new(); @@ -25,18 +25,22 @@ pub fn print_run_data_dir(deep: bool) { let path = file.unwrap().path(); let path = path.strip_prefix(¤t_dir).unwrap_or(&path); files.push(format!( - "- {}/\x1b[34m{}\x1b[0m", + "- {}/{}{}{}", path.parent().unwrap().display(), + if use_color { "\x1b[34m" } else { "" }, path.file_name().unwrap().display(), + if use_color { "\x1b[0m" } else { "" }, )); } } else { let path = file.path(); let path = path.strip_prefix(¤t_dir).unwrap_or(&path); files.push(format!( - "- {}/\x1b[34m{}\x1b[0m", + "- {}/{}{}{}", path.parent().unwrap().display(), + if use_color { "\x1b[34m" } else { "" }, path.file_name().unwrap().display(), + if use_color { "\x1b[0m" } else { "" } )); } } diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index 1f419fd09a87d1270d73bc90fe4b312cbaf0b4a4..0618cf38bafd15a6b8a50b03cb745c9d3365cbf8 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -13,7 +13,7 @@ use language::{Anchor, Buffer, Point}; use project::Project; use serde::Deserialize; use std::fs; -use std::io::Write; +use std::io::{IsTerminal, Write}; use std::ops::Range; use std::path::PathBuf; use std::sync::Arc; @@ -98,7 +98,7 @@ pub async fn run_zeta2_predict( .unwrap(); result.write(args.format, std::io::stdout()).unwrap(); - print_run_data_dir(true); + print_run_data_dir(true, std::io::stdout().is_terminal()); } pub async fn zeta2_predict( @@ -289,8 +289,11 @@ pub async fn zeta2_predict( let new_text = prediction .buffer .update(cx, |buffer, cx| { - buffer.edit(prediction.edits.iter().cloned(), None, cx); - buffer.text() + let branch = buffer.branch(cx); + branch.update(cx, |branch, cx| { + branch.edit(prediction.edits.iter().cloned(), None, cx); + branch.text() + }) }) .unwrap(); language::unified_diff(&old_text, &new_text) From 524b97d729a1309ac7ab0b1d586c592571b574c8 Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Fri, 14 Nov 2025 21:56:48 +0530 Subject: [PATCH 0124/1030] project_panel: Fix autoscroll and filename editor focus race condition (#42739) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes https://github.com/zed-industries/zed/issues/40867 Since the recent changes in [https://github.com/zed-industries/zed/pull/38881](https://github.com/zed-industries/zed/pull/38881), the filename editor is sometimes not focused after duplicating a file or creating a new one, and similarly, autoscroll sometimes didn’t work. It turns out that multiple calls to `update_visible_entries_task` cancel the existing task, which might contain information about whether we need to focus the filename editor and autoscroll after the task ends. To fix this, we now carry that information forward to the next task that overwrites it, so that when the latest task ends, we can use that information to do the right thing. Release Notes: - Fixed an issue in the Project Panel where duplicating or creating an entry sometimes didn’t focus the rename editing field. --- crates/project_panel/src/project_panel.rs | 43 +++++++++++++++---- .../project_panel/src/project_panel_tests.rs | 11 ++++- 2 files changed, 44 insertions(+), 10 deletions(-) diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index 5cc2b5dea7ffff4e2f3368705b59d6484affe448..212b301a788c96754137c83f98ef7bfda3560a26 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -136,10 +136,26 @@ pub struct ProjectPanel { previous_drag_position: Option>, sticky_items_count: usize, last_reported_update: Instant, - update_visible_entries_task: Task<()>, + update_visible_entries_task: UpdateVisibleEntriesTask, state: State, } +struct UpdateVisibleEntriesTask { + _visible_entries_task: Task<()>, + focus_filename_editor: bool, + autoscroll: bool, +} + +impl Default for UpdateVisibleEntriesTask { + fn default() -> Self { + UpdateVisibleEntriesTask { + _visible_entries_task: Task::ready(()), + focus_filename_editor: Default::default(), + autoscroll: Default::default(), + } + } +} + enum DragTarget { /// Dragging on an entry Entry { @@ -733,7 +749,7 @@ impl ProjectPanel { expanded_dir_ids: Default::default(), unfolded_dir_ids: Default::default(), }, - update_visible_entries_task: Task::ready(()), + update_visible_entries_task: Default::default(), }; this.update_visible_entries(None, false, false, window, cx); @@ -1823,6 +1839,9 @@ impl ProjectPanel { depth: 0, validation_state: ValidationState::None, }); + self.filename_editor.update(cx, |editor, cx| { + editor.clear(window, cx); + }); self.update_visible_entries(Some((worktree_id, NEW_ENTRY_ID)), true, true, window, cx); cx.notify(); } @@ -1889,9 +1908,8 @@ impl ProjectPanel { editor.change_selections(Default::default(), window, cx, |s| { s.select_ranges([selection]) }); - window.focus(&editor.focus_handle(cx)); }); - self.update_visible_entries(None, false, true, window, cx); + self.update_visible_entries(None, true, true, window, cx); cx.notify(); } } @@ -3229,7 +3247,8 @@ impl ProjectPanel { .collect(); let hide_root = settings.hide_root && visible_worktrees.len() == 1; let hide_hidden = settings.hide_hidden; - self.update_visible_entries_task = cx.spawn_in(window, async move |this, cx| { + + let visible_entries_task = cx.spawn_in(window, async move |this, cx| { let new_state = cx .background_spawn(async move { for worktree_snapshot in visible_worktrees { @@ -3475,19 +3494,27 @@ impl ProjectPanel { .sum::(), ) } - if focus_filename_editor { + if this.update_visible_entries_task.focus_filename_editor { + this.update_visible_entries_task.focus_filename_editor = false; this.filename_editor.update(cx, |editor, cx| { - editor.clear(window, cx); window.focus(&editor.focus_handle(cx)); }); } - if autoscroll { + if this.update_visible_entries_task.autoscroll { + this.update_visible_entries_task.autoscroll = false; this.autoscroll(cx); } cx.notify(); }) .ok(); }); + + self.update_visible_entries_task = UpdateVisibleEntriesTask { + _visible_entries_task: visible_entries_task, + focus_filename_editor: focus_filename_editor + || self.update_visible_entries_task.focus_filename_editor, + autoscroll: autoscroll || self.update_visible_entries_task.autoscroll, + }; } fn expand_entry( diff --git a/crates/project_panel/src/project_panel_tests.rs b/crates/project_panel/src/project_panel_tests.rs index baf4d2f8a6f529464733a171fd3d726d846d2faa..eb4c6280ccfb76134767f1de70112106a0594dc6 100644 --- a/crates/project_panel/src/project_panel_tests.rs +++ b/crates/project_panel/src/project_panel_tests.rs @@ -807,6 +807,7 @@ async fn test_editing_files(cx: &mut gpui::TestAppContext) { panel.update_in(cx, |panel, window, cx| { panel.rename(&Default::default(), window, cx) }); + cx.run_until_parked(); assert_eq!( visible_entries_as_strings(&panel, 0..10, cx), &[ @@ -1200,7 +1201,9 @@ async fn test_copy_paste(cx: &mut gpui::TestAppContext) { panel.paste(&Default::default(), window, cx); }); cx.executor().run_until_parked(); - + panel.update_in(cx, |panel, window, cx| { + assert!(panel.filename_editor.read(cx).is_focused(window)); + }); assert_eq!( visible_entries_as_strings(&panel, 0..50, cx), &[ @@ -1239,7 +1242,9 @@ async fn test_copy_paste(cx: &mut gpui::TestAppContext) { panel.paste(&Default::default(), window, cx); }); cx.executor().run_until_parked(); - + panel.update_in(cx, |panel, window, cx| { + assert!(panel.filename_editor.read(cx).is_focused(window)); + }); assert_eq!( visible_entries_as_strings(&panel, 0..50, cx), &[ @@ -2398,6 +2403,7 @@ async fn test_create_duplicate_items(cx: &mut gpui::TestAppContext) { ], ); panel.update_in(cx, |panel, window, cx| panel.rename(&Rename, window, cx)); + cx.executor().run_until_parked(); panel.update_in(cx, |panel, window, cx| { assert!(panel.filename_editor.read(cx).is_focused(window)); }); @@ -2603,6 +2609,7 @@ async fn test_create_duplicate_items_and_check_history(cx: &mut gpui::TestAppCon ], ); panel.update_in(cx, |panel, window, cx| panel.rename(&Rename, window, cx)); + cx.executor().run_until_parked(); panel.update_in(cx, |panel, window, cx| { assert!(panel.filename_editor.read(cx).is_focused(window)); }); From a1a599dac5fc0d10c03d30067ea11c5539abb262 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Fri, 14 Nov 2025 14:45:58 -0300 Subject: [PATCH 0125/1030] collab_ui: Fix search matching in the panel (#42743) Release Notes: - collab: Fixed a regression where search matches wouldn't expand the parent channel if that happened to be collapsed. --- crates/collab_ui/src/collab_panel.rs | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/crates/collab_ui/src/collab_panel.rs b/crates/collab_ui/src/collab_panel.rs index 97559f41a76bed36f22d3ef22bc95d913a462116..b57d0279545aed8f896179968c877efb72e7c772 100644 --- a/crates/collab_ui/src/collab_panel.rs +++ b/crates/collab_ui/src/collab_panel.rs @@ -672,20 +672,25 @@ impl CollabPanel { { self.entries.push(ListEntry::ChannelEditor { depth: 0 }); } + + let should_respect_collapse = query.is_empty(); let mut collapse_depth = None; + for (idx, channel) in channels.into_iter().enumerate() { let depth = channel.parent_path.len(); - if collapse_depth.is_none() && self.is_channel_collapsed(channel.id) { - collapse_depth = Some(depth); - } else if let Some(collapsed_depth) = collapse_depth { - if depth > collapsed_depth { - continue; - } - if self.is_channel_collapsed(channel.id) { + if should_respect_collapse { + if collapse_depth.is_none() && self.is_channel_collapsed(channel.id) { collapse_depth = Some(depth); - } else { - collapse_depth = None; + } else if let Some(collapsed_depth) = collapse_depth { + if depth > collapsed_depth { + continue; + } + if self.is_channel_collapsed(channel.id) { + collapse_depth = Some(depth); + } else { + collapse_depth = None; + } } } From a8e0de37ac99d402b598cbfd132f65ea033bf728 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Fri, 14 Nov 2025 18:51:26 +0100 Subject: [PATCH 0126/1030] gpui: Fix crashes when losing devices while resizing on windows (#42740) Fixes ZED-1HC Release Notes: - Fixed Zed panicking when moving Zed windows over different screens associated with different gpu devices on windows --- .../src/platform/windows/directx_atlas.rs | 13 +++-- .../src/platform/windows/directx_renderer.rs | 47 +++++++++++++++---- crates/gpui/src/platform/windows/events.rs | 11 ++++- crates/gpui/src/platform/windows/platform.rs | 19 +++++++- crates/gpui/src/platform/windows/window.rs | 11 ++++- 5 files changed, 81 insertions(+), 20 deletions(-) diff --git a/crates/gpui/src/platform/windows/directx_atlas.rs b/crates/gpui/src/platform/windows/directx_atlas.rs index 38c22a41bf9d32cf43f585050390b75602a6bf42..9deae392d1a5ef18a6af644031f047780fa23f70 100644 --- a/crates/gpui/src/platform/windows/directx_atlas.rs +++ b/crates/gpui/src/platform/windows/directx_atlas.rs @@ -234,11 +234,14 @@ impl DirectXAtlasState { } fn texture(&self, id: AtlasTextureId) -> &DirectXAtlasTexture { - let textures = match id.kind { - crate::AtlasTextureKind::Monochrome => &self.monochrome_textures, - crate::AtlasTextureKind::Polychrome => &self.polychrome_textures, - }; - textures[id.index as usize].as_ref().unwrap() + match id.kind { + crate::AtlasTextureKind::Monochrome => &self.monochrome_textures[id.index as usize] + .as_ref() + .unwrap(), + crate::AtlasTextureKind::Polychrome => &self.polychrome_textures[id.index as usize] + .as_ref() + .unwrap(), + } } } diff --git a/crates/gpui/src/platform/windows/directx_renderer.rs b/crates/gpui/src/platform/windows/directx_renderer.rs index b4180708aa510158456e6b9b0fe1ba1e0dfea85b..608ac2c3b065c598547be8b79f8d7fae8070ff48 100644 --- a/crates/gpui/src/platform/windows/directx_renderer.rs +++ b/crates/gpui/src/platform/windows/directx_renderer.rs @@ -48,6 +48,12 @@ pub(crate) struct DirectXRenderer { width: u32, height: u32, + + /// Whether we want to skip drwaing due to device lost events. + /// + /// In that case we want to discard the first frame that we draw as we got reset in the middle of a frame + /// meaning we lost all the allocated gpu textures and scene resources. + skip_draws: bool, } /// Direct3D objects @@ -167,6 +173,7 @@ impl DirectXRenderer { font_info: Self::get_font_info(), width: 1, height: 1, + skip_draws: false, }) } @@ -192,8 +199,13 @@ impl DirectXRenderer { }], )?; unsafe { - device_context - .ClearRenderTargetView(resources.render_target_view.as_ref().unwrap(), &[0.0; 4]); + device_context.ClearRenderTargetView( + resources + .render_target_view + .as_ref() + .context("missing render target view")?, + &[0.0; 4], + ); device_context .OMSetRenderTargets(Some(slice::from_ref(&resources.render_target_view)), None); device_context.RSSetViewports(Some(slice::from_ref(&resources.viewport))); @@ -283,10 +295,16 @@ impl DirectXRenderer { self.globals = globals; self.pipelines = pipelines; self.direct_composition = direct_composition; + self.skip_draws = true; Ok(()) } pub(crate) fn draw(&mut self, scene: &Scene) -> Result<()> { + if self.skip_draws { + // skip drawing this frame, we just recovered from a device lost event + // and so likely do not have the textures anymore that are required for drawing + return Ok(()); + } self.pre_draw()?; for batch in scene.batches() { match batch { @@ -306,14 +324,18 @@ impl DirectXRenderer { sprites, } => self.draw_polychrome_sprites(texture_id, sprites), PrimitiveBatch::Surfaces(surfaces) => self.draw_surfaces(surfaces), - }.context(format!("scene too large: {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces", - scene.paths.len(), - scene.shadows.len(), - scene.quads.len(), - scene.underlines.len(), - scene.monochrome_sprites.len(), - scene.polychrome_sprites.len(), - scene.surfaces.len(),))?; + } + .context(format!( + "scene too large:\ + {} paths, {} shadows, {} quads, {} underlines, {} mono, {} poly, {} surfaces", + scene.paths.len(), + scene.shadows.len(), + scene.quads.len(), + scene.underlines.len(), + scene.monochrome_sprites.len(), + scene.polychrome_sprites.len(), + scene.surfaces.len(), + ))?; } self.present() } @@ -352,6 +374,7 @@ impl DirectXRenderer { } resources.recreate_resources(devices, width, height)?; + unsafe { devices .device_context @@ -647,6 +670,10 @@ impl DirectXRenderer { } }) } + + pub(crate) fn mark_drawable(&mut self) { + self.skip_draws = false; + } } impl DirectXResources { diff --git a/crates/gpui/src/platform/windows/events.rs b/crates/gpui/src/platform/windows/events.rs index f80348fdc1f88aedc1231d6579c82af4c76f3c34..cc39d3bcedd370fb4dc2fdb4c1d8304ad6b99b79 100644 --- a/crates/gpui/src/platform/windows/events.rs +++ b/crates/gpui/src/platform/windows/events.rs @@ -201,8 +201,10 @@ impl WindowsWindowInner { let new_logical_size = device_size.to_pixels(scale_factor); let mut lock = self.state.borrow_mut(); lock.logical_size = new_logical_size; - if should_resize_renderer { - lock.renderer.resize(device_size).log_err(); + if should_resize_renderer && let Err(e) = lock.renderer.resize(device_size) { + log::error!("Failed to resize renderer, invalidating devices: {}", e); + lock.invalidate_devices + .store(true, std::sync::atomic::Ordering::Release); } if let Some(mut callback) = lock.callbacks.resize.take() { drop(lock); @@ -1138,6 +1140,11 @@ impl WindowsWindowInner { #[inline] fn draw_window(&self, handle: HWND, force_render: bool) -> Option { let mut request_frame = self.state.borrow_mut().callbacks.request_frame.take()?; + + // we are instructing gpui to force render a frame, this will + // re-populate all the gpu textures for us so we can resume drawing in + // case we disabled drawing earlier due to a device loss + self.state.borrow_mut().renderer.mark_drawable(); request_frame(RequestFrameOptions { require_presentation: false, force_render, diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index d845c9520f736d06a6cee637328871af7e329241..b7f13f1fab495b1040d1be8e7b86376c450b5f7e 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -3,7 +3,10 @@ use std::{ ffi::OsStr, path::{Path, PathBuf}, rc::{Rc, Weak}, - sync::{Arc, atomic::Ordering}, + sync::{ + Arc, + atomic::{AtomicBool, Ordering}, + }, }; use ::util::{ResultExt, paths::SanitizedPath}; @@ -36,6 +39,9 @@ pub(crate) struct WindowsPlatform { text_system: Arc, windows_version: WindowsVersion, drop_target_helper: IDropTargetHelper, + /// Flag to instruct the `VSyncProvider` thread to invalidate the directx devices + /// as resizing them has failed, causing us to have lost at least the render target. + invalidate_devices: Arc, handle: HWND, disable_direct_composition: bool, } @@ -162,6 +168,7 @@ impl WindowsPlatform { disable_direct_composition, windows_version, drop_target_helper, + invalidate_devices: Arc::new(AtomicBool::new(false)), }) } @@ -195,6 +202,7 @@ impl WindowsPlatform { platform_window_handle: self.handle, disable_direct_composition: self.disable_direct_composition, directx_devices: self.inner.state.borrow().directx_devices.clone().unwrap(), + invalidate_devices: self.invalidate_devices.clone(), } } @@ -247,13 +255,17 @@ impl WindowsPlatform { let validation_number = self.inner.validation_number; let all_windows = Arc::downgrade(&self.raw_window_handles); let text_system = Arc::downgrade(&self.text_system); + let invalidate_devices = self.invalidate_devices.clone(); + std::thread::Builder::new() .name("VSyncProvider".to_owned()) .spawn(move || { let vsync_provider = VSyncProvider::new(); loop { vsync_provider.wait_for_vsync(); - if check_device_lost(&directx_device.device) { + if check_device_lost(&directx_device.device) + || invalidate_devices.fetch_and(false, Ordering::Acquire) + { if let Err(err) = handle_gpu_device_lost( &mut directx_device, platform_window.as_raw(), @@ -877,6 +889,9 @@ pub(crate) struct WindowCreationInfo { pub(crate) platform_window_handle: HWND, pub(crate) disable_direct_composition: bool, pub(crate) directx_devices: DirectXDevices, + /// Flag to instruct the `VSyncProvider` thread to invalidate the directx devices + /// as resizing them has failed, causing us to have lost at least the render target. + pub(crate) invalidate_devices: Arc, } struct PlatformWindowCreateContext { diff --git a/crates/gpui/src/platform/windows/window.rs b/crates/gpui/src/platform/windows/window.rs index 241293f0caa6c13de350c8b2fc44cb9d5abd82ec..fe6e6ff664a6c8f9b9524501ca1e875b5023169e 100644 --- a/crates/gpui/src/platform/windows/window.rs +++ b/crates/gpui/src/platform/windows/window.rs @@ -6,7 +6,7 @@ use std::{ path::PathBuf, rc::{Rc, Weak}, str::FromStr, - sync::{Arc, Once}, + sync::{Arc, Once, atomic::AtomicBool}, time::{Duration, Instant}, }; @@ -53,6 +53,9 @@ pub struct WindowsWindowState { pub nc_button_pressed: Option, pub display: WindowsDisplay, + /// Flag to instruct the `VSyncProvider` thread to invalidate the directx devices + /// as resizing them has failed, causing us to have lost at least the render target. + pub invalidate_devices: Arc, fullscreen: Option, initial_placement: Option, hwnd: HWND, @@ -83,6 +86,7 @@ impl WindowsWindowState { min_size: Option>, appearance: WindowAppearance, disable_direct_composition: bool, + invalidate_devices: Arc, ) -> Result { let scale_factor = { let monitor_dpi = unsafe { GetDpiForWindow(hwnd) } as f32; @@ -138,6 +142,7 @@ impl WindowsWindowState { fullscreen, initial_placement, hwnd, + invalidate_devices, }) } @@ -211,6 +216,7 @@ impl WindowsWindowInner { context.min_size, context.appearance, context.disable_direct_composition, + context.invalidate_devices.clone(), )?); Ok(Rc::new(Self { @@ -361,6 +367,7 @@ struct WindowCreateContext { appearance: WindowAppearance, disable_direct_composition: bool, directx_devices: DirectXDevices, + invalidate_devices: Arc, } impl WindowsWindow { @@ -380,6 +387,7 @@ impl WindowsWindow { platform_window_handle, disable_direct_composition, directx_devices, + invalidate_devices, } = creation_info; register_window_class(icon); let hide_title_bar = params @@ -440,6 +448,7 @@ impl WindowsWindow { appearance, disable_direct_composition, directx_devices, + invalidate_devices, }; let creation_result = unsafe { CreateWindowExW( From a260ba6428daf6ab476a34fee0802be5b47623e9 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Fri, 14 Nov 2025 15:02:09 -0300 Subject: [PATCH 0127/1030] agent_ui: Simplify labels in new thread menu (#42746) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drop the "new", it's simpler! 😆 | Before | After | |--------|--------| | Screenshot 2025-11-14 at 2  48@2x | Screenshot 2025-11-14 at 2 
47@2x | Release Notes: - N/A --- crates/agent_ui/src/agent_panel.rs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/crates/agent_ui/src/agent_panel.rs b/crates/agent_ui/src/agent_panel.rs index 58839d5d8df2a6e2e149800ecf47b30c3383bc0b..d4138aa5bdf8f3731df7508ab8d6476455aca11b 100644 --- a/crates/agent_ui/src/agent_panel.rs +++ b/crates/agent_ui/src/agent_panel.rs @@ -1905,7 +1905,6 @@ impl AgentPanel { let active_thread = active_thread.clone(); Some(ContextMenu::build(window, cx, |menu, _window, cx| { menu.context(focus_handle.clone()) - .header("Zed Agent") .when_some(active_thread, |this, active_thread| { let thread = active_thread.read(cx); @@ -1929,9 +1928,9 @@ impl AgentPanel { } }) .item( - ContextMenuEntry::new("New Thread") + ContextMenuEntry::new("Zed Agent") .action(NewThread.boxed_clone()) - .icon(IconName::Thread) + .icon(IconName::ZedAgent) .icon_color(Color::Muted) .handler({ let workspace = workspace.clone(); @@ -1955,7 +1954,7 @@ impl AgentPanel { }), ) .item( - ContextMenuEntry::new("New Text Thread") + ContextMenuEntry::new("Text Thread") .icon(IconName::TextThread) .icon_color(Color::Muted) .action(NewTextThread.boxed_clone()) @@ -1983,7 +1982,7 @@ impl AgentPanel { .separator() .header("External Agents") .item( - ContextMenuEntry::new("New Claude Code") + ContextMenuEntry::new("Claude Code") .icon(IconName::AiClaude) .disabled(is_via_collab) .icon_color(Color::Muted) @@ -2009,7 +2008,7 @@ impl AgentPanel { }), ) .item( - ContextMenuEntry::new("New Codex CLI") + ContextMenuEntry::new("Codex CLI") .icon(IconName::AiOpenAi) .disabled(is_via_collab) .icon_color(Color::Muted) @@ -2035,7 +2034,7 @@ impl AgentPanel { }), ) .item( - ContextMenuEntry::new("New Gemini CLI") + ContextMenuEntry::new("Gemini CLI") .icon(IconName::AiGemini) .icon_color(Color::Muted) .disabled(is_via_collab) @@ -2079,7 +2078,7 @@ impl AgentPanel { for agent_name in agent_names { let icon_path = agent_server_store_read.agent_icon(&agent_name); let mut entry = - ContextMenuEntry::new(format!("New {}", agent_name)); + ContextMenuEntry::new(format!("{}", agent_name)); if let Some(icon_path) = icon_path { entry = entry.custom_icon_svg(icon_path); } else { From c387203ac8830c5da484605dd155311833793436 Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Fri, 14 Nov 2025 13:50:55 -0800 Subject: [PATCH 0128/1030] zeta2: Prediction prompt engineering (#42758) Closes #ISSUE Release Notes: - N/A *or* Added/Fixed/Improved ... --------- Co-authored-by: Agus Zubiaga Co-authored-by: Michael Sloan --- .../src/cloud_zeta2_prompt.rs | 61 +++++++++++-------- 1 file changed, 35 insertions(+), 26 deletions(-) diff --git a/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs b/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs index 89c7536f88e1c0bdcce7b67fb2f2704052b5a677..c84ba24ae3485f837278f61e1eeb8b40eb276840 100644 --- a/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs +++ b/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs @@ -31,7 +31,7 @@ const MARKED_EXCERPT_INSTRUCTIONS: &str = indoc! {" Other code is provided for context, and `…` indicates when code has been skipped. - # Edit History: + ## Edit History "}; @@ -49,7 +49,7 @@ const LABELED_SECTIONS_INSTRUCTIONS: &str = indoc! {r#" println!("{i}"); } - # Edit History: + ## Edit History "#}; @@ -89,7 +89,7 @@ const NUMBERED_LINES_INSTRUCTIONS: &str = indoc! {r#" const STUDENT_MODEL_INSTRUCTIONS: &str = indoc! {r#" You are a code completion assistant that analyzes edit history to identify and systematically complete incomplete refactorings or patterns across the entire codebase. - # Edit History: + ## Edit History "#}; @@ -119,14 +119,15 @@ const XML_TAGS_INSTRUCTIONS: &str = indoc! {r#" # Instructions You are an edit prediction agent in a code editor. - Your job is to predict the next edit that the user will make, - based on their last few edits and their current cursor location. - # Output Format + Analyze the history of edits made by the user in order to infer what they are currently trying to accomplish. + Then complete the remainder of the current change if it is incomplete, or predict the next edit the user intends to make. + Always continue along the user's current trajectory, rather than changing course. - You must briefly explain your understanding of the user's goal, in one - or two sentences, and then specify their next edit, using the following - XML format: + ## Output Format + + You should briefly explain your understanding of the user's overall goal in one sentence, then explain what the next change + along the users current trajectory will be in another, and finally specify the next edit using the following XML-like format: @@ -152,15 +153,14 @@ const XML_TAGS_INSTRUCTIONS: &str = indoc! {r#" - Always close all tags properly - Don't include the <|user_cursor|> marker in your output. - # Edit History: + ## Edit History "#}; const OLD_TEXT_NEW_TEXT_REMINDER: &str = indoc! {r#" --- - Remember that the edits in the edit history have already been deployed. - The files are currently as shown in the Code Excerpts section. + Remember that the edits in the edit history have already been applied. "#}; pub fn build_prompt( @@ -216,23 +216,32 @@ pub fn build_prompt( let excerpts_preamble = match request.prompt_format { PromptFormat::Minimal => indoc! {" - # Part of the file under the cursor: + ## Part of the file under the cursor - (The cursor marker <|user_cursor|> indicates the current user cursor position. - The file is in current state, edits from edit history has been applied. - We only show part of the file around the cursor. - You can only edit exactly this part of the file. - We prepend line numbers (e.g., `123|`); they are not part of the file.) - "}, - PromptFormat::NumLinesUniDiff => indoc! {" - # Code Excerpts + (The cursor marker <|user_cursor|> indicates the current user cursor position. + The file is in current state, edits from edit history has been applied. + We only show part of the file around the cursor. + You can only edit exactly this part of the file. + We prepend line numbers (e.g., `123|`); they are not part of the file.) + "}, + PromptFormat::NumLinesUniDiff | PromptFormat::OldTextNewText => indoc! {" + ## Code Excerpts - The cursor marker <|user_cursor|> indicates the current user cursor position. - The file is in current state, edits from edit history have been applied. - We prepend line numbers (e.g., `123|`); they are not part of the file. - "}, + Here is some excerpts of code that you should take into account to predict the next edit. + + The cursor position is marked by `<|user_cursor|>` as it stands after the last edit in the history. + + In addition other excerpts are included to better understand what the edit will be, including the declaration + or references of symbols around the cursor, or other similar code snippets that may need to be updated + following patterns that appear in the edit history. + + Consider each of them carefully in relation to the edit history, and that the user may not have navigated + to the next place they want to edit yet. + + Lines starting with `…` indicate omitted line ranges. These may appear inside multi-line code constructs. + "}, _ => indoc! {" - # Code Excerpts + ## Code Excerpts The cursor marker <|user_cursor|> indicates the current user cursor position. The file is in current state, edits from edit history have been applied. From 305206fd483076db9c021315e1840f2000c83d74 Mon Sep 17 00:00:00 2001 From: Ivan Pasquariello Date: Fri, 14 Nov 2025 23:46:35 +0100 Subject: [PATCH 0129/1030] Make drag and double click enabled on the whole title bar on macOS (#41839) Closes #4947 Taken inspiration from @tasuren implementation, plus the addition for the double click enabled on the whole title bar too to maximizes/restores the window. I was not able to test the application on Linux, no need to test on Windows since the feature is enabled by the OS. Release Notes: - Fixed title bar not fully draggable on macOS - Fixed not being able to maximizes/restores the window with double click on the whole title bar on macOS --- crates/gpui/src/platform/mac/window.rs | 11 ++++ crates/gpui/src/window.rs | 1 + crates/title_bar/src/platform_title_bar.rs | 77 ++++++++++++---------- 3 files changed, 53 insertions(+), 36 deletions(-) diff --git a/crates/gpui/src/platform/mac/window.rs b/crates/gpui/src/platform/mac/window.rs index 53a5688ad6b78cda8f610e4acc810a7df58cf47b..23752fc53edbc1062db19caf13c5c65fc282ca87 100644 --- a/crates/gpui/src/platform/mac/window.rs +++ b/crates/gpui/src/platform/mac/window.rs @@ -1543,6 +1543,17 @@ impl PlatformWindow for MacWindow { }) .detach(); } + + fn start_window_move(&self) { + let this = self.0.lock(); + let window = this.native_window; + + unsafe { + let app = NSApplication::sharedApplication(nil); + let mut event: id = msg_send![app, currentEvent]; + let _: () = msg_send![window, performWindowDragWithEvent: event]; + } + } } impl rwh::HasWindowHandle for MacWindow { diff --git a/crates/gpui/src/window.rs b/crates/gpui/src/window.rs index 51fd692f86805886529f17f03feea8bf7ff9db03..3505da3e7d85ed3dca5e9050787d11902941f364 100644 --- a/crates/gpui/src/window.rs +++ b/crates/gpui/src/window.rs @@ -1819,6 +1819,7 @@ impl Window { self.platform_window.show_window_menu(position) } + /// Handle window movement for Linux and macOS. /// Tells the compositor to take control of window movement (Wayland and X11) /// /// Events may not be received during a move operation. diff --git a/crates/title_bar/src/platform_title_bar.rs b/crates/title_bar/src/platform_title_bar.rs index fd03e764629454411c9726ef7dcf055d54582d7e..6ce7d089bb4641e2c1b7da710ebb0841fc51da4c 100644 --- a/crates/title_bar/src/platform_title_bar.rs +++ b/crates/title_bar/src/platform_title_bar.rs @@ -77,6 +77,47 @@ impl Render for PlatformTitleBar { .window_control_area(WindowControlArea::Drag) .w_full() .h(height) + .map(|this| { + this.on_mouse_down_out(cx.listener(move |this, _ev, _window, _cx| { + this.should_move = false; + })) + .on_mouse_up( + gpui::MouseButton::Left, + cx.listener(move |this, _ev, _window, _cx| { + this.should_move = false; + }), + ) + .on_mouse_down( + gpui::MouseButton::Left, + cx.listener(move |this, _ev, _window, _cx| { + this.should_move = true; + }), + ) + .on_mouse_move(cx.listener(move |this, _ev, window, _| { + if this.should_move { + this.should_move = false; + window.start_window_move(); + } + })) + }) + .map(|this| { + // Note: On Windows the title bar behavior is handled by the platform implementation. + this.id(self.id.clone()) + .when(self.platform_style == PlatformStyle::Mac, |this| { + this.on_click(|event, window, _| { + if event.click_count() == 2 { + window.titlebar_double_click(); + } + }) + }) + .when(self.platform_style == PlatformStyle::Linux, |this| { + this.on_click(|event, window, _| { + if event.click_count() == 2 { + window.zoom_window(); + } + }) + }) + }) .map(|this| { if window.is_fullscreen() { this.pl_2() @@ -112,21 +153,6 @@ impl Render for PlatformTitleBar { .justify_between() .overflow_x_hidden() .w_full() - // Note: On Windows the title bar behavior is handled by the platform implementation. - .when(self.platform_style == PlatformStyle::Mac, |this| { - this.on_click(|event, window, _| { - if event.click_count() == 2 { - window.titlebar_double_click(); - } - }) - }) - .when(self.platform_style == PlatformStyle::Linux, |this| { - this.on_click(|event, window, _| { - if event.click_count() == 2 { - window.zoom_window(); - } - }) - }) .children(children), ) .when(!window.is_fullscreen(), |title_bar| { @@ -142,27 +168,6 @@ impl Render for PlatformTitleBar { window.show_window_menu(ev.position) }) }) - .on_mouse_move(cx.listener(move |this, _ev, window, _| { - if this.should_move { - this.should_move = false; - window.start_window_move(); - } - })) - .on_mouse_down_out(cx.listener(move |this, _ev, _window, _cx| { - this.should_move = false; - })) - .on_mouse_up( - MouseButton::Left, - cx.listener(move |this, _ev, _window, _cx| { - this.should_move = false; - }), - ) - .on_mouse_down( - MouseButton::Left, - cx.listener(move |this, _ev, _window, _cx| { - this.should_move = true; - }), - ) } else { title_bar } From b3097cfc8a219b5b556137e0cda4b6c3c57eac33 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Fri, 14 Nov 2025 19:54:52 -0300 Subject: [PATCH 0130/1030] docs: Add section about keybinding for external agent threads (#42772) Release Notes: - N/A --- docs/src/ai/external-agents.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/docs/src/ai/external-agents.md b/docs/src/ai/external-agents.md index 59ad764483b74b9b7c1557082d953568b90b802f..7830400fde45ba76029c2e50d02126b2a6278677 100644 --- a/docs/src/ai/external-agents.md +++ b/docs/src/ai/external-agents.md @@ -206,6 +206,31 @@ This can be useful if you're in the middle of developing a new agent that speaks It's also possible to specify a custom path, arguments, or environment for the builtin integrations by using the `claude` and `gemini` names. +### Custom Keybinding For Extension-Based Agents + +To assign a custom keybinding to start a new thread for agents that were added by installing agent server extensions, add the following snippet to your `keymap.json` file: + +```json [keymap] +{ + "bindings": { + "cmd-alt-n": [ + "agent::NewExternalAgentThread", + { + "agent": { + "custom": { + "name": "My Agent", + "command": { + "command": "my-agent", + "args": ["acp"] + } + } + } + } + ] + } +}, +``` + ## Debugging Agents When using external agents in Zed, you can access the debug view via with `dev: open acp logs` from the Command Palette. This lets you see the messages being sent and received between Zed and the agent. From 1277f328c42c1e61f26717675b14f3e99627b2fe Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Fri, 14 Nov 2025 20:08:46 -0300 Subject: [PATCH 0131/1030] docs: Improve custom keybinding for external agent example (#42776) Follow up to https://github.com/zed-industries/zed/pull/42772 adding some comments to improve clarity. Release Notes: - N/A --- docs/src/ai/external-agents.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/src/ai/external-agents.md b/docs/src/ai/external-agents.md index 7830400fde45ba76029c2e50d02126b2a6278677..ade14c393dc5155d5b3cc68fa3597c643fbfe982 100644 --- a/docs/src/ai/external-agents.md +++ b/docs/src/ai/external-agents.md @@ -213,14 +213,14 @@ To assign a custom keybinding to start a new thread for agents that were added b ```json [keymap] { "bindings": { - "cmd-alt-n": [ + "cmd-alt-n": [ // Your custom keybinding "agent::NewExternalAgentThread", { "agent": { "custom": { - "name": "My Agent", + "name": "My Agent", // The agent name as it appears in the UI (e.g., "OpenCode", "Auggie CLI", etc.) "command": { - "command": "my-agent", + "command": "my-agent", // The agent name in lowercase with no spaces "args": ["acp"] } } From 07cc87b288b9b9c575ccb696f383a54b7474bd8b Mon Sep 17 00:00:00 2001 From: Alvaro Parker <64918109+AlvaroParker@users.noreply.github.com> Date: Sat, 15 Nov 2025 11:15:37 -0300 Subject: [PATCH 0132/1030] Fix wild install script (#42747) Use [`command`](https://www.gnu.org/software/bash/manual/bash.html#index-command) instead of `which` to check if `wild` is installed. Using `which` will result in an error being printed to stdout: ```bash ./script/install-wild which: invalid option -- 's' /usr/local/bin/wild Warning: existing wild 0.6.0 found at /usr/local/bin/wild. Skipping installation. ``` Release Notes: - N/A --- script/install-wild | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/script/install-wild b/script/install-wild index 3f8a5a2b3ff613285b3f02c0069e5c5829ba3744..4c2d0348965d54e45273800ffde3ce53bf3f7c83 100755 --- a/script/install-wild +++ b/script/install-wild @@ -11,8 +11,8 @@ if [ "$(uname -s)" != "Linux" ]; then elif [ -z "$WILD_VERSION" ]; then echo "Usage: $0 [version]" exit 1 -elif which -s wild && wild --version | grep -Fq "$WILD_VERSION" ; then - echo "Warning: existing wild $WILD_VERSION found at $(which wild). Skipping installation." +elif command -v wild >/dev/null 2>&1 && wild --version | grep -Fq "$WILD_VERSION" ; then + echo "Warning: existing wild $WILD_VERSION found at $(command -v wild). Skipping installation." exit 0 fi From 1683052e6ce44d206e947c397917b2ab7617c0fe Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Sat, 15 Nov 2025 23:57:49 +0530 Subject: [PATCH 0133/1030] editor: Fix MoveToEnclosingBracket and unmatched forward/backward Vim motions in Markdown code blocks (#42813) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We now correctly use bracket ranges from the deepest syntax layer when finding enclosing brackets. Release Notes: - Fixed an issue where `MoveToEnclosingBracket` didn’t work correctly inside Markdown code blocks. - Fixed an issue where unmatched forward/backward Vim motions didn’t work correctly inside Markdown code blocks. --------- Co-authored-by: MuskanPaliwal --- crates/editor/src/editor_tests.rs | 84 +++++++++++++++++ .../src/test/editor_lsp_test_context.rs | 18 +++- crates/language/Cargo.toml | 2 + crates/language/src/buffer.rs | 11 ++- crates/language/src/language.rs | 29 ++++++ crates/vim/src/motion.rs | 90 +++++++++++++++++++ .../src/test/neovim_backed_test_context.rs | 20 +++++ crates/vim/src/test/vim_test_context.rs | 5 ++ .../test_unmatched_backward_markdown.json | 9 ++ .../test_unmatched_forward_markdown.json | 9 ++ 10 files changed, 274 insertions(+), 3 deletions(-) create mode 100644 crates/vim/test_data/test_unmatched_backward_markdown.json create mode 100644 crates/vim/test_data/test_unmatched_forward_markdown.json diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index 0b485d4e1adac071a82d1ad8bde53f07d14f1434..20ad9ca076ed4ee68679bd351386ddc49f18491a 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -32,6 +32,7 @@ use language::{ tree_sitter_python, }; use language_settings::Formatter; +use languages::markdown_lang; use languages::rust_lang; use lsp::CompletionParams; use multi_buffer::{IndentGuide, PathKey}; @@ -17503,6 +17504,89 @@ async fn test_move_to_enclosing_bracket(cx: &mut TestAppContext) { ); } +#[gpui::test] +async fn test_move_to_enclosing_bracket_in_markdown_code_block(cx: &mut TestAppContext) { + init_test(cx, |_| {}); + let language_registry = Arc::new(language::LanguageRegistry::test(cx.executor())); + language_registry.add(markdown_lang()); + language_registry.add(rust_lang()); + let buffer = cx.new(|cx| { + let mut buffer = language::Buffer::local( + indoc! {" + ```rs + impl Worktree { + pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> { + } + } + ``` + "}, + cx, + ); + buffer.set_language_registry(language_registry.clone()); + buffer.set_language(Some(markdown_lang()), cx); + buffer + }); + let buffer = cx.new(|cx| MultiBuffer::singleton(buffer, cx)); + let editor = cx.add_window(|window, cx| build_editor(buffer.clone(), window, cx)); + cx.executor().run_until_parked(); + _ = editor.update(cx, |editor, window, cx| { + // Case 1: Test outer enclosing brackets + select_ranges( + editor, + &indoc! {" + ```rs + impl Worktree { + pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> { + } + }ˇ + ``` + "}, + window, + cx, + ); + editor.move_to_enclosing_bracket(&MoveToEnclosingBracket, window, cx); + assert_text_with_selections( + editor, + &indoc! {" + ```rs + impl Worktree ˇ{ + pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> { + } + } + ``` + "}, + cx, + ); + // Case 2: Test inner enclosing brackets + select_ranges( + editor, + &indoc! {" + ```rs + impl Worktree { + pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> { + }ˇ + } + ``` + "}, + window, + cx, + ); + editor.move_to_enclosing_bracket(&MoveToEnclosingBracket, window, cx); + assert_text_with_selections( + editor, + &indoc! {" + ```rs + impl Worktree { + pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> ˇ{ + } + } + ``` + "}, + cx, + ); + }); +} + #[gpui::test] async fn test_on_type_formatting_not_triggered(cx: &mut TestAppContext) { init_test(cx, |_| {}); diff --git a/crates/editor/src/test/editor_lsp_test_context.rs b/crates/editor/src/test/editor_lsp_test_context.rs index 427f0bd0de4d56bd01f6a1525ec8aaaf83fe3870..87cc3357783ef4503b584f9624d14a35a8487dd7 100644 --- a/crates/editor/src/test/editor_lsp_test_context.rs +++ b/crates/editor/src/test/editor_lsp_test_context.rs @@ -6,7 +6,7 @@ use std::{ }; use anyhow::Result; -use language::rust_lang; +use language::{markdown_lang, rust_lang}; use serde_json::json; use crate::{Editor, ToPoint}; @@ -313,6 +313,22 @@ impl EditorLspTestContext { Self::new(language, Default::default(), cx).await } + pub async fn new_markdown_with_rust(cx: &mut gpui::TestAppContext) -> Self { + let context = Self::new( + Arc::into_inner(markdown_lang()).unwrap(), + Default::default(), + cx, + ) + .await; + + let language_registry = context.workspace.read_with(cx, |workspace, cx| { + workspace.project().read(cx).languages().clone() + }); + language_registry.add(rust_lang()); + + context + } + /// Constructs lsp range using a marked string with '[', ']' range delimiters #[track_caller] pub fn lsp_range(&mut self, marked_text: &str) -> lsp::Range { diff --git a/crates/language/Cargo.toml b/crates/language/Cargo.toml index ffc5ad85d14c293eeeaff9172b21ef58cf9a1cf0..49ea681290c3edc878391a337c5423fa795dba4f 100644 --- a/crates/language/Cargo.toml +++ b/crates/language/Cargo.toml @@ -21,6 +21,7 @@ test-support = [ "tree-sitter-rust", "tree-sitter-python", "tree-sitter-typescript", + "tree-sitter-md", "settings/test-support", "util/test-support", ] @@ -59,6 +60,7 @@ sum_tree.workspace = true task.workspace = true text.workspace = true theme.workspace = true +tree-sitter-md = { workspace = true, optional = true } tree-sitter-python = { workspace = true, optional = true } tree-sitter-rust = { workspace = true, optional = true } tree-sitter-typescript = { workspace = true, optional = true } diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index 3b4f24a400403f7e4dbd4f09ee7fb829f4cbbe00..f46fc0db0a171349456b2c29a1c9fff556daee2d 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -823,6 +823,7 @@ pub struct BracketMatch { pub open_range: Range, pub close_range: Range, pub newline_only: bool, + pub depth: usize, } impl Buffer { @@ -4136,6 +4137,7 @@ impl BufferSnapshot { while let Some(mat) = matches.peek() { let mut open = None; let mut close = None; + let depth = mat.depth; let config = &configs[mat.grammar_index]; let pattern = &config.patterns[mat.pattern_index]; for capture in mat.captures { @@ -4161,6 +4163,7 @@ impl BufferSnapshot { open_range, close_range, newline_only: pattern.newline_only, + depth, }); } None @@ -4320,8 +4323,12 @@ impl BufferSnapshot { ) -> impl Iterator + '_ { let range = range.start.to_offset(self)..range.end.to_offset(self); - self.bracket_ranges(range.clone()).filter(move |pair| { - pair.open_range.start <= range.start && pair.close_range.end >= range.end + let result: Vec<_> = self.bracket_ranges(range.clone()).collect(); + let max_depth = result.iter().map(|mat| mat.depth).max().unwrap_or(0); + result.into_iter().filter(move |pair| { + pair.open_range.start <= range.start + && pair.close_range.end >= range.end + && pair.depth == max_depth }) } diff --git a/crates/language/src/language.rs b/crates/language/src/language.rs index ac94378c9cc1ae300f9dcbd5a088f25761f309b4..82e0d69cefa94cc0e03a694eea0f29031d8fe156 100644 --- a/crates/language/src/language.rs +++ b/crates/language/src/language.rs @@ -2658,6 +2658,35 @@ pub fn rust_lang() -> Arc { Arc::new(language) } +#[doc(hidden)] +#[cfg(any(test, feature = "test-support"))] +pub fn markdown_lang() -> Arc { + use std::borrow::Cow; + + let language = Language::new( + LanguageConfig { + name: "Markdown".into(), + matcher: LanguageMatcher { + path_suffixes: vec!["md".into()], + ..Default::default() + }, + ..Default::default() + }, + Some(tree_sitter_md::LANGUAGE.into()), + ) + .with_queries(LanguageQueries { + brackets: Some(Cow::from(include_str!( + "../../languages/src/markdown/brackets.scm" + ))), + injections: Some(Cow::from(include_str!( + "../../languages/src/markdown/injections.scm" + ))), + ..Default::default() + }) + .expect("Could not parse markdown queries"); + Arc::new(language) +} + #[cfg(test)] mod tests { use super::*; diff --git a/crates/vim/src/motion.rs b/crates/vim/src/motion.rs index 2da1083ee6623cc8a463ef31be7e90dca0063b34..0264ea9176fb2264bc693888fb861ff33d5be706 100644 --- a/crates/vim/src/motion.rs +++ b/crates/vim/src/motion.rs @@ -3312,6 +3312,96 @@ mod test { cx.shared_state().await.assert_eq("ˇ(\n {()} \n)"); } + #[gpui::test] + async fn test_unmatched_forward_markdown(cx: &mut gpui::TestAppContext) { + let mut cx = NeovimBackedTestContext::new_markdown_with_rust(cx).await; + + cx.neovim.exec("set filetype=markdown").await; + + cx.set_shared_state(indoc! {r" + ```rs + impl Worktree { + pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> { + ˇ } + } + ``` + "}) + .await; + cx.simulate_shared_keystrokes("] }").await; + cx.shared_state().await.assert_eq(indoc! {r" + ```rs + impl Worktree { + pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> { + ˇ} + } + ``` + "}); + + cx.set_shared_state(indoc! {r" + ```rs + impl Worktree { + pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> { + } ˇ + } + ``` + "}) + .await; + cx.simulate_shared_keystrokes("] }").await; + cx.shared_state().await.assert_eq(indoc! {r" + ```rs + impl Worktree { + pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> { + } • + ˇ} + ``` + "}); + } + + #[gpui::test] + async fn test_unmatched_backward_markdown(cx: &mut gpui::TestAppContext) { + let mut cx = NeovimBackedTestContext::new_markdown_with_rust(cx).await; + + cx.neovim.exec("set filetype=markdown").await; + + cx.set_shared_state(indoc! {r" + ```rs + impl Worktree { + pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> { + ˇ } + } + ``` + "}) + .await; + cx.simulate_shared_keystrokes("[ {").await; + cx.shared_state().await.assert_eq(indoc! {r" + ```rs + impl Worktree { + pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> ˇ{ + } + } + ``` + "}); + + cx.set_shared_state(indoc! {r" + ```rs + impl Worktree { + pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> { + } ˇ + } + ``` + "}) + .await; + cx.simulate_shared_keystrokes("[ {").await; + cx.shared_state().await.assert_eq(indoc! {r" + ```rs + impl Worktree ˇ{ + pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> { + } • + } + ``` + "}); + } + #[gpui::test] async fn test_matching_tags(cx: &mut gpui::TestAppContext) { let mut cx = NeovimBackedTestContext::new_html(cx).await; diff --git a/crates/vim/src/test/neovim_backed_test_context.rs b/crates/vim/src/test/neovim_backed_test_context.rs index 9d2452ab20a6a99138c4b0d86f597f084a0876d6..ce2bb6eb7b6f77788f3bc002ff979fdbb251cb94 100644 --- a/crates/vim/src/test/neovim_backed_test_context.rs +++ b/crates/vim/src/test/neovim_backed_test_context.rs @@ -183,6 +183,26 @@ impl NeovimBackedTestContext { } } + pub async fn new_markdown_with_rust(cx: &mut gpui::TestAppContext) -> NeovimBackedTestContext { + #[cfg(feature = "neovim")] + cx.executor().allow_parking(); + let thread = thread::current(); + let test_name = thread + .name() + .expect("thread is not named") + .split(':') + .next_back() + .unwrap() + .to_string(); + Self { + cx: VimTestContext::new_markdown_with_rust(cx).await, + neovim: NeovimConnection::new(test_name).await, + + last_set_state: None, + recent_keystrokes: Default::default(), + } + } + pub async fn new_typescript(cx: &mut gpui::TestAppContext) -> NeovimBackedTestContext { #[cfg(feature = "neovim")] cx.executor().allow_parking(); diff --git a/crates/vim/src/test/vim_test_context.rs b/crates/vim/src/test/vim_test_context.rs index 6300e3a3fcc079e064ef0e26c3e218b4032aa890..4d6859f1e56976fbb0d84d475e614325e0e52795 100644 --- a/crates/vim/src/test/vim_test_context.rs +++ b/crates/vim/src/test/vim_test_context.rs @@ -41,6 +41,11 @@ impl VimTestContext { Self::new_with_lsp(EditorLspTestContext::new_html(cx).await, true) } + pub async fn new_markdown_with_rust(cx: &mut gpui::TestAppContext) -> VimTestContext { + Self::init(cx); + Self::new_with_lsp(EditorLspTestContext::new_markdown_with_rust(cx).await, true) + } + pub async fn new_typescript(cx: &mut gpui::TestAppContext) -> VimTestContext { Self::init(cx); Self::new_with_lsp( diff --git a/crates/vim/test_data/test_unmatched_backward_markdown.json b/crates/vim/test_data/test_unmatched_backward_markdown.json new file mode 100644 index 0000000000000000000000000000000000000000..c2df848b812e1685c39b7b8c353401493cc5a4be --- /dev/null +++ b/crates/vim/test_data/test_unmatched_backward_markdown.json @@ -0,0 +1,9 @@ +{"Exec":{"command":"set filetype=markdown"}} +{"Put":{"state":"```rs\nimpl Worktree {\n pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> {\nˇ }\n}\n```\n"}} +{"Key":"["} +{"Key":"{"} +{"Get":{"state":"```rs\nimpl Worktree {\n pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> ˇ{\n }\n}\n```\n","mode":"Normal"}} +{"Put":{"state":"```rs\nimpl Worktree {\n pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> {\n } ˇ\n}\n```\n"}} +{"Key":"["} +{"Key":"{"} +{"Get":{"state":"```rs\nimpl Worktree ˇ{\n pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> {\n } \n}\n```\n","mode":"Normal"}} diff --git a/crates/vim/test_data/test_unmatched_forward_markdown.json b/crates/vim/test_data/test_unmatched_forward_markdown.json new file mode 100644 index 0000000000000000000000000000000000000000..753f68d04fb458891de915134b5da8219742c06f --- /dev/null +++ b/crates/vim/test_data/test_unmatched_forward_markdown.json @@ -0,0 +1,9 @@ +{"Exec":{"command":"set filetype=markdown"}} +{"Put":{"state":"```rs\nimpl Worktree {\n pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> {\nˇ }\n}\n```\n"}} +{"Key":"]"} +{"Key":"}"} +{"Get":{"state":"```rs\nimpl Worktree {\n pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> {\n ˇ}\n}\n```\n","mode":"Normal"}} +{"Put":{"state":"```rs\nimpl Worktree {\n pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> {\n } ˇ\n}\n```\n"}} +{"Key":"]"} +{"Key":"}"} +{"Get":{"state":"```rs\nimpl Worktree {\n pub async fn open_buffers(&self, path: &Path) -> impl Iterator<&Buffer> {\n } \nˇ}\n```\n","mode":"Normal"}} From b0525a26a684ab2822baaa6c94ee5acd7f50a1d4 Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Sun, 16 Nov 2025 16:51:13 -0300 Subject: [PATCH 0134/1030] Report automatically discarded zeta predictions (#42761) We weren't reporting predictions that were generated but never made it out of the provider, such as predictions that failed to interpolate, and those that are cancelled because another request completes before it. Release Notes: - N/A --------- Co-authored-by: Max Brunsfeld Co-authored-by: Ben Kunkle --- .../zed/src/zed/edit_prediction_registry.rs | 3 +- crates/zeta/src/zeta.rs | 103 ++++++++++++------ 2 files changed, 73 insertions(+), 33 deletions(-) diff --git a/crates/zed/src/zed/edit_prediction_registry.rs b/crates/zed/src/zed/edit_prediction_registry.rs index fd16478b5a7ade4b8ef86924d2ce737cb2f62c56..74b6687f62c641ce4076778efa4369a45529f4f9 100644 --- a/crates/zed/src/zed/edit_prediction_registry.rs +++ b/crates/zed/src/zed/edit_prediction_registry.rs @@ -251,11 +251,12 @@ fn assign_edit_prediction_provider( }); } - let provider = cx.new(|_| { + let provider = cx.new(|cx| { zeta::ZetaEditPredictionProvider::new( zeta, project.clone(), singleton_buffer, + cx, ) }); editor.set_edit_prediction_provider(Some(provider), window, cx); diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index 577ca77c13c0b9f8e0eff578c20d0a933c858bce..c2ef5cb826db0947c18e1e91a6163cccc12deb11 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -1283,6 +1283,7 @@ struct CurrentEditPrediction { buffer_id: EntityId, completion: EditPrediction, was_shown: bool, + was_accepted: bool, } impl CurrentEditPrediction { @@ -1310,7 +1311,7 @@ impl CurrentEditPrediction { struct PendingCompletion { id: usize, - _task: Task<()>, + task: Task<()>, } #[derive(Debug, Clone, Copy)] @@ -1386,6 +1387,7 @@ pub struct ZetaEditPredictionProvider { zeta: Entity, singleton_buffer: Option>, pending_completions: ArrayVec, + canceled_completions: HashMap>, next_pending_completion_id: usize, current_completion: Option, last_request_timestamp: Instant, @@ -1399,17 +1401,34 @@ impl ZetaEditPredictionProvider { zeta: Entity, project: Entity, singleton_buffer: Option>, + cx: &mut Context, ) -> Self { + cx.on_release(|this, cx| { + this.take_current_edit_prediction(cx); + }) + .detach(); + Self { zeta, singleton_buffer, pending_completions: ArrayVec::new(), + canceled_completions: HashMap::default(), next_pending_completion_id: 0, current_completion: None, last_request_timestamp: Instant::now(), project, } } + + fn take_current_edit_prediction(&mut self, cx: &mut App) { + if let Some(completion) = self.current_completion.take() { + if !completion.was_accepted { + self.zeta.update(cx, |zeta, cx| { + zeta.discard_completion(completion.completion.id, completion.was_shown, cx); + }); + } + } + } } impl edit_prediction::EditPredictionProvider for ZetaEditPredictionProvider { @@ -1531,42 +1550,65 @@ impl edit_prediction::EditPredictionProvider for ZetaEditPredictionProvider { buffer_id: buffer.entity_id(), completion, was_shown: false, + was_accepted: false, }) }) } Err(error) => Err(error), }; - let Some(new_completion) = completion - .context("edit prediction failed") - .log_err() - .flatten() - else { - this.update(cx, |this, cx| { - if this.pending_completions[0].id == pending_completion_id { + + let discarded = this + .update(cx, |this, cx| { + if this + .pending_completions + .first() + .is_some_and(|completion| completion.id == pending_completion_id) + { this.pending_completions.remove(0); } else { - this.pending_completions.clear(); + if let Some(discarded) = this.pending_completions.drain(..).next() { + this.canceled_completions + .insert(discarded.id, discarded.task); + } + } + + let canceled = this.canceled_completions.remove(&pending_completion_id); + + if canceled.is_some() + && let Ok(Some(new_completion)) = &completion + { + this.zeta.update(cx, |zeta, cx| { + zeta.discard_completion(new_completion.completion.id, false, cx); + }); + return true; } cx.notify(); + false }) - .ok(); + .ok() + .unwrap_or(true); + + if discarded { + return; + } + + let Some(new_completion) = completion + .context("edit prediction failed") + .log_err() + .flatten() + else { return; }; this.update(cx, |this, cx| { - if this.pending_completions[0].id == pending_completion_id { - this.pending_completions.remove(0); - } else { - this.pending_completions.clear(); - } - if let Some(old_completion) = this.current_completion.as_ref() { let snapshot = buffer.read(cx).snapshot(); if new_completion.should_replace_completion(old_completion, &snapshot) { this.zeta.update(cx, |zeta, cx| { zeta.completion_shown(&new_completion.completion, cx); }); + this.take_current_edit_prediction(cx); this.current_completion = Some(new_completion); } } else { @@ -1586,13 +1628,16 @@ impl edit_prediction::EditPredictionProvider for ZetaEditPredictionProvider { if self.pending_completions.len() <= 1 { self.pending_completions.push(PendingCompletion { id: pending_completion_id, - _task: task, + task, }); } else if self.pending_completions.len() == 2 { - self.pending_completions.pop(); + if let Some(discarded) = self.pending_completions.pop() { + self.canceled_completions + .insert(discarded.id, discarded.task); + } self.pending_completions.push(PendingCompletion { id: pending_completion_id, - _task: task, + task, }); } } @@ -1608,14 +1653,12 @@ impl edit_prediction::EditPredictionProvider for ZetaEditPredictionProvider { } fn accept(&mut self, cx: &mut Context) { - let completion_id = self - .current_completion - .as_ref() - .map(|completion| completion.completion.id); - if let Some(completion_id) = completion_id { + let completion = self.current_completion.as_mut(); + if let Some(completion) = completion { + completion.was_accepted = true; self.zeta .update(cx, |zeta, cx| { - zeta.accept_edit_prediction(completion_id, cx) + zeta.accept_edit_prediction(completion.completion.id, cx) }) .detach(); } @@ -1624,11 +1667,7 @@ impl edit_prediction::EditPredictionProvider for ZetaEditPredictionProvider { fn discard(&mut self, cx: &mut Context) { self.pending_completions.clear(); - if let Some(completion) = self.current_completion.take() { - self.zeta.update(cx, |zeta, cx| { - zeta.discard_completion(completion.completion.id, completion.was_shown, cx); - }); - } + self.take_current_edit_prediction(cx); } fn did_show(&mut self, _cx: &mut Context) { @@ -1651,13 +1690,13 @@ impl edit_prediction::EditPredictionProvider for ZetaEditPredictionProvider { // Invalidate previous completion if it was generated for a different buffer. if *buffer_id != buffer.entity_id() { - self.current_completion.take(); + self.take_current_edit_prediction(cx); return None; } let buffer = buffer.read(cx); let Some(edits) = completion.interpolate(&buffer.snapshot()) else { - self.current_completion.take(); + self.take_current_edit_prediction(cx); return None; }; From b463266fa17fe3d97e0964727afbe7c092ffad95 Mon Sep 17 00:00:00 2001 From: warrenjokinen <110791849+warrenjokinen@users.noreply.github.com> Date: Sun, 16 Nov 2025 21:26:38 -0700 Subject: [PATCH 0135/1030] Remove mention of Fireside Hacks (#42853) Fireside Hack events are no longer being held. Closes #ISSUE Release Notes: - N/A --- crates/gpui/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/gpui/README.md b/crates/gpui/README.md index 2c411f76cd4782904f5e704c446a6f0e76f7d9ab..30847c8b8cfb0ac5c662601acbe2008b41e42ee1 100644 --- a/crates/gpui/README.md +++ b/crates/gpui/README.md @@ -63,4 +63,4 @@ In addition to the systems above, GPUI provides a range of smaller services that - The `[gpui::test]` macro provides a convenient way to write tests for your GPUI applications. Tests also have their own kind of context, a `TestAppContext` which provides ways of simulating common platform input. See `app::test_context` and `test` modules for more details. -Currently, the best way to learn about these APIs is to read the Zed source code, ask us about it at a fireside hack, or drop a question in the [Zed Discord](https://zed.dev/community-links). We're working on improving the documentation, creating more examples, and will be publishing more guides to GPUI on our [blog](https://zed.dev/blog). +Currently, the best way to learn about these APIs is to read the Zed source code or drop a question in the [Zed Discord](https://zed.dev/community-links). We're working on improving the documentation, creating more examples, and will be publishing more guides to GPUI on our [blog](https://zed.dev/blog). From d32934a893584402ac49f0775ba77bd1af90ecbd Mon Sep 17 00:00:00 2001 From: Mayank Verma Date: Mon, 17 Nov 2025 14:35:54 +0530 Subject: [PATCH 0136/1030] languages: Fix indentation for if/else statements in C/C++ without braces (#41670) Closes #41179 Release Notes: - Fixed indentation for if/else statements in C/C++ without braces --- crates/languages/Cargo.toml | 1 + crates/languages/src/c.rs | 231 ++++++++++++++++++++++-- crates/languages/src/c/config.toml | 2 +- crates/languages/src/cpp.rs | 254 +++++++++++++++++++++++++++ crates/languages/src/cpp/config.toml | 2 +- crates/languages/src/lib.rs | 1 + 6 files changed, 477 insertions(+), 14 deletions(-) create mode 100644 crates/languages/src/cpp.rs diff --git a/crates/languages/Cargo.toml b/crates/languages/Cargo.toml index e78f29a8d6ef49726471fa186a2e9dee095fa0d5..635cb11ba5ef5bfd904eaac865d86cad6d6309fc 100644 --- a/crates/languages/Cargo.toml +++ b/crates/languages/Cargo.toml @@ -98,6 +98,7 @@ text.workspace = true theme = { workspace = true, features = ["test-support"] } tree-sitter-bash.workspace = true tree-sitter-c.workspace = true +tree-sitter-cpp.workspace = true tree-sitter-css.workspace = true tree-sitter-go.workspace = true tree-sitter-python.workspace = true diff --git a/crates/languages/src/c.rs b/crates/languages/src/c.rs index 3351c9df033a5e4550e34b5c9dbf1d119b189f6d..cffd01136be2b3762735087eaf5844866d47e174 100644 --- a/crates/languages/src/c.rs +++ b/crates/languages/src/c.rs @@ -395,10 +395,10 @@ mod tests { use language::{AutoindentMode, Buffer}; use settings::SettingsStore; use std::num::NonZeroU32; + use unindent::Unindent; #[gpui::test] - async fn test_c_autoindent(cx: &mut TestAppContext) { - // cx.executor().set_block_on_ticks(usize::MAX..=usize::MAX); + async fn test_c_autoindent_basic(cx: &mut TestAppContext) { cx.update(|cx| { let test_settings = SettingsStore::test(cx); cx.set_global(test_settings); @@ -413,23 +413,230 @@ mod tests { cx.new(|cx| { let mut buffer = Buffer::local("", cx).with_language(language, cx); - // empty function buffer.edit([(0..0, "int main() {}")], None, cx); - // indent inside braces let ix = buffer.len() - 1; buffer.edit([(ix..ix, "\n\n")], Some(AutoindentMode::EachLine), cx); - assert_eq!(buffer.text(), "int main() {\n \n}"); + assert_eq!( + buffer.text(), + "int main() {\n \n}", + "content inside braces should be indented" + ); - // indent body of single-statement if statement - let ix = buffer.len() - 2; - buffer.edit([(ix..ix, "if (a)\nb;")], Some(AutoindentMode::EachLine), cx); - assert_eq!(buffer.text(), "int main() {\n if (a)\n b;\n}"); + buffer + }); + } - // indent inside field expression - let ix = buffer.len() - 3; + #[gpui::test] + async fn test_c_autoindent_if_else(cx: &mut TestAppContext) { + cx.update(|cx| { + let test_settings = SettingsStore::test(cx); + cx.set_global(test_settings); + language::init(cx); + cx.update_global::(|store, cx| { + store.update_user_settings(cx, |s| { + s.project.all_languages.defaults.tab_size = NonZeroU32::new(2); + }); + }); + }); + let language = crate::language("c", tree_sitter_c::LANGUAGE.into()); + + cx.new(|cx| { + let mut buffer = Buffer::local("", cx).with_language(language, cx); + + buffer.edit( + [( + 0..0, + r#" + int main() { + if (a) + b; + } + "# + .unindent(), + )], + Some(AutoindentMode::EachLine), + cx, + ); + assert_eq!( + buffer.text(), + r#" + int main() { + if (a) + b; + } + "# + .unindent(), + "body of if-statement without braces should be indented" + ); + + let ix = buffer.len() - 4; buffer.edit([(ix..ix, "\n.c")], Some(AutoindentMode::EachLine), cx); - assert_eq!(buffer.text(), "int main() {\n if (a)\n b\n .c;\n}"); + assert_eq!( + buffer.text(), + r#" + int main() { + if (a) + b + .c; + } + "# + .unindent(), + "field expression (.c) should be indented further than the statement body" + ); + + buffer.edit([(0..buffer.len(), "")], Some(AutoindentMode::EachLine), cx); + buffer.edit( + [( + 0..0, + r#" + int main() { + if (a) a++; + else b++; + } + "# + .unindent(), + )], + Some(AutoindentMode::EachLine), + cx, + ); + assert_eq!( + buffer.text(), + r#" + int main() { + if (a) a++; + else b++; + } + "# + .unindent(), + "single-line if/else without braces should align at the same level" + ); + + buffer.edit([(0..buffer.len(), "")], Some(AutoindentMode::EachLine), cx); + buffer.edit( + [( + 0..0, + r#" + int main() { + if (a) + b++; + else + c++; + } + "# + .unindent(), + )], + Some(AutoindentMode::EachLine), + cx, + ); + assert_eq!( + buffer.text(), + r#" + int main() { + if (a) + b++; + else + c++; + } + "# + .unindent(), + "multi-line if/else without braces should indent statement bodies" + ); + + buffer.edit([(0..buffer.len(), "")], Some(AutoindentMode::EachLine), cx); + buffer.edit( + [( + 0..0, + r#" + int main() { + if (a) + if (b) + c++; + } + "# + .unindent(), + )], + Some(AutoindentMode::EachLine), + cx, + ); + assert_eq!( + buffer.text(), + r#" + int main() { + if (a) + if (b) + c++; + } + "# + .unindent(), + "nested if statements without braces should indent properly" + ); + + buffer.edit([(0..buffer.len(), "")], Some(AutoindentMode::EachLine), cx); + buffer.edit( + [( + 0..0, + r#" + int main() { + if (a) + b++; + else if (c) + d++; + else + f++; + } + "# + .unindent(), + )], + Some(AutoindentMode::EachLine), + cx, + ); + assert_eq!( + buffer.text(), + r#" + int main() { + if (a) + b++; + else if (c) + d++; + else + f++; + } + "# + .unindent(), + "else-if chains should align all conditions at same level with indented bodies" + ); + + buffer.edit([(0..buffer.len(), "")], Some(AutoindentMode::EachLine), cx); + buffer.edit( + [( + 0..0, + r#" + int main() { + if (a) { + b++; + } else + c++; + } + "# + .unindent(), + )], + Some(AutoindentMode::EachLine), + cx, + ); + assert_eq!( + buffer.text(), + r#" + int main() { + if (a) { + b++; + } else + c++; + } + "# + .unindent(), + "mixed braces should indent properly" + ); buffer }); diff --git a/crates/languages/src/c/config.toml b/crates/languages/src/c/config.toml index 76a27ccc81911bcf25c7da3efef191214eab7b00..c490269b12309632d2fd8fb944ed48ee74c46075 100644 --- a/crates/languages/src/c/config.toml +++ b/crates/languages/src/c/config.toml @@ -4,7 +4,7 @@ path_suffixes = ["c"] line_comments = ["// "] decrease_indent_patterns = [ { pattern = "^\\s*\\{.*\\}?\\s*$", valid_after = ["if", "for", "while", "do", "switch", "else"] }, - { pattern = "^\\s*else\\s*$", valid_after = ["if"] } + { pattern = "^\\s*else\\b", valid_after = ["if"] } ] autoclose_before = ";:.,=}])>" brackets = [ diff --git a/crates/languages/src/cpp.rs b/crates/languages/src/cpp.rs new file mode 100644 index 0000000000000000000000000000000000000000..2094d946eadbf32bdf3a4660d226c660eda9d4dc --- /dev/null +++ b/crates/languages/src/cpp.rs @@ -0,0 +1,254 @@ +#[cfg(test)] +mod tests { + use gpui::{AppContext as _, BorrowAppContext, TestAppContext}; + use language::{AutoindentMode, Buffer}; + use settings::SettingsStore; + use std::num::NonZeroU32; + use unindent::Unindent; + + #[gpui::test] + async fn test_cpp_autoindent_basic(cx: &mut TestAppContext) { + cx.update(|cx| { + let test_settings = SettingsStore::test(cx); + cx.set_global(test_settings); + language::init(cx); + cx.update_global::(|store, cx| { + store.update_user_settings(cx, |s| { + s.project.all_languages.defaults.tab_size = NonZeroU32::new(2); + }); + }); + }); + let language = crate::language("cpp", tree_sitter_cpp::LANGUAGE.into()); + + cx.new(|cx| { + let mut buffer = Buffer::local("", cx).with_language(language, cx); + + buffer.edit([(0..0, "int main() {}")], None, cx); + + let ix = buffer.len() - 1; + buffer.edit([(ix..ix, "\n\n")], Some(AutoindentMode::EachLine), cx); + assert_eq!( + buffer.text(), + "int main() {\n \n}", + "content inside braces should be indented" + ); + + buffer + }); + } + + #[gpui::test] + async fn test_cpp_autoindent_if_else(cx: &mut TestAppContext) { + cx.update(|cx| { + let test_settings = SettingsStore::test(cx); + cx.set_global(test_settings); + language::init(cx); + cx.update_global::(|store, cx| { + store.update_user_settings(cx, |s| { + s.project.all_languages.defaults.tab_size = NonZeroU32::new(2); + }); + }); + }); + let language = crate::language("cpp", tree_sitter_cpp::LANGUAGE.into()); + + cx.new(|cx| { + let mut buffer = Buffer::local("", cx).with_language(language, cx); + + buffer.edit( + [( + 0..0, + r#" + int main() { + if (a) + b; + } + "# + .unindent(), + )], + Some(AutoindentMode::EachLine), + cx, + ); + assert_eq!( + buffer.text(), + r#" + int main() { + if (a) + b; + } + "# + .unindent(), + "body of if-statement without braces should be indented" + ); + + let ix = buffer.len() - 4; + buffer.edit([(ix..ix, "\n.c")], Some(AutoindentMode::EachLine), cx); + assert_eq!( + buffer.text(), + r#" + int main() { + if (a) + b + .c; + } + "# + .unindent(), + "field expression (.c) should be indented further than the statement body" + ); + + buffer.edit([(0..buffer.len(), "")], Some(AutoindentMode::EachLine), cx); + buffer.edit( + [( + 0..0, + r#" + int main() { + if (a) a++; + else b++; + } + "# + .unindent(), + )], + Some(AutoindentMode::EachLine), + cx, + ); + assert_eq!( + buffer.text(), + r#" + int main() { + if (a) a++; + else b++; + } + "# + .unindent(), + "single-line if/else without braces should align at the same level" + ); + + buffer.edit([(0..buffer.len(), "")], Some(AutoindentMode::EachLine), cx); + buffer.edit( + [( + 0..0, + r#" + int main() { + if (a) + b++; + else + c++; + } + "# + .unindent(), + )], + Some(AutoindentMode::EachLine), + cx, + ); + assert_eq!( + buffer.text(), + r#" + int main() { + if (a) + b++; + else + c++; + } + "# + .unindent(), + "multi-line if/else without braces should indent statement bodies" + ); + + buffer.edit([(0..buffer.len(), "")], Some(AutoindentMode::EachLine), cx); + buffer.edit( + [( + 0..0, + r#" + int main() { + if (a) + if (b) + c++; + } + "# + .unindent(), + )], + Some(AutoindentMode::EachLine), + cx, + ); + assert_eq!( + buffer.text(), + r#" + int main() { + if (a) + if (b) + c++; + } + "# + .unindent(), + "nested if statements without braces should indent properly" + ); + + buffer.edit([(0..buffer.len(), "")], Some(AutoindentMode::EachLine), cx); + buffer.edit( + [( + 0..0, + r#" + int main() { + if (a) + b++; + else if (c) + d++; + else + f++; + } + "# + .unindent(), + )], + Some(AutoindentMode::EachLine), + cx, + ); + assert_eq!( + buffer.text(), + r#" + int main() { + if (a) + b++; + else if (c) + d++; + else + f++; + } + "# + .unindent(), + "else-if chains should align all conditions at same level with indented bodies" + ); + + buffer.edit([(0..buffer.len(), "")], Some(AutoindentMode::EachLine), cx); + buffer.edit( + [( + 0..0, + r#" + int main() { + if (a) { + b++; + } else + c++; + } + "# + .unindent(), + )], + Some(AutoindentMode::EachLine), + cx, + ); + assert_eq!( + buffer.text(), + r#" + int main() { + if (a) { + b++; + } else + c++; + } + "# + .unindent(), + "mixed braces should indent properly" + ); + + buffer + }); + } +} diff --git a/crates/languages/src/cpp/config.toml b/crates/languages/src/cpp/config.toml index 4d3c0a0a38664f4dd584a0ce3f3544662b19bbae..1a994789232e4a58f4bdb2436865c0c28b9164f0 100644 --- a/crates/languages/src/cpp/config.toml +++ b/crates/languages/src/cpp/config.toml @@ -4,7 +4,7 @@ path_suffixes = ["cc", "hh", "cpp", "h", "hpp", "cxx", "hxx", "c++", "ipp", "inl line_comments = ["// ", "/// ", "//! "] decrease_indent_patterns = [ { pattern = "^\\s*\\{.*\\}?\\s*$", valid_after = ["if", "for", "while", "do", "switch", "else"] }, - { pattern = "^\\s*else\\s*$", valid_after = ["if"] } + { pattern = "^\\s*else\\b", valid_after = ["if"] } ] autoclose_before = ";:.,=}])>" brackets = [ diff --git a/crates/languages/src/lib.rs b/crates/languages/src/lib.rs index 76e1ae5edd2593907bd374d398946a1f6083a82e..0fe5a5b77477e4358dbf03a9d4131f45b3b6efec 100644 --- a/crates/languages/src/lib.rs +++ b/crates/languages/src/lib.rs @@ -19,6 +19,7 @@ use crate::{ mod bash; mod c; +mod cpp; mod css; mod go; mod json; From 86484aaded76191002252d7204fddf657db9795d Mon Sep 17 00:00:00 2001 From: Mayank Verma Date: Mon, 17 Nov 2025 15:59:29 +0530 Subject: [PATCH 0137/1030] languages: Clean up invalid init calls after recent API changes (#42866) Related to https://github.com/zed-industries/zed/pull/41670 Release Notes: - Cleaned up invalid init calls after recent API changes in https://github.com/zed-industries/zed/pull/42238 --- crates/languages/src/c.rs | 1 - crates/languages/src/cpp.rs | 2 -- 2 files changed, 3 deletions(-) diff --git a/crates/languages/src/c.rs b/crates/languages/src/c.rs index cffd01136be2b3762735087eaf5844866d47e174..8fe2bae693d702346a1ecc96334d35b89d179b3b 100644 --- a/crates/languages/src/c.rs +++ b/crates/languages/src/c.rs @@ -432,7 +432,6 @@ mod tests { cx.update(|cx| { let test_settings = SettingsStore::test(cx); cx.set_global(test_settings); - language::init(cx); cx.update_global::(|store, cx| { store.update_user_settings(cx, |s| { s.project.all_languages.defaults.tab_size = NonZeroU32::new(2); diff --git a/crates/languages/src/cpp.rs b/crates/languages/src/cpp.rs index 2094d946eadbf32bdf3a4660d226c660eda9d4dc..85a3fb5045275648282c7a8cbad58779491ad7dc 100644 --- a/crates/languages/src/cpp.rs +++ b/crates/languages/src/cpp.rs @@ -11,7 +11,6 @@ mod tests { cx.update(|cx| { let test_settings = SettingsStore::test(cx); cx.set_global(test_settings); - language::init(cx); cx.update_global::(|store, cx| { store.update_user_settings(cx, |s| { s.project.all_languages.defaults.tab_size = NonZeroU32::new(2); @@ -42,7 +41,6 @@ mod tests { cx.update(|cx| { let test_settings = SettingsStore::test(cx); cx.set_global(test_settings); - language::init(cx); cx.update_global::(|store, cx| { store.update_user_settings(cx, |s| { s.project.all_languages.defaults.tab_size = NonZeroU32::new(2); From cdcc068906636f74a26e4ded50bb7140633188c0 Mon Sep 17 00:00:00 2001 From: Dino Date: Mon, 17 Nov 2025 11:14:49 +0000 Subject: [PATCH 0138/1030] vim: Fix temporary mode exit on end of line (#42742) When using the end of line motion ($) while in temporary mode, the cursor would be placed in insert mode just before the last character instead of after, just like in NeoVim. This happens because `EndOfLine` kind of assumes that we're in `Normal` mode and simply places the cursor in the last character instead of the newline character. This commit moves the cursor one position to the right when exiting temporary mode and the motion used was `Motion::EndOfLine` - Update `vim::normal::Vim.exit_temporary_normal` to now accept a `Option<&Motion>` argument, in case callers want this new logic to potentially be applied Closes #42278 Release Notes: - Fixed temporary mode exit when using `$` to move to the end of the line --- crates/vim/src/normal.rs | 53 ++++++++++++++++++- crates/vim/src/normal/scroll.rs | 2 +- crates/vim/src/normal/yank.rs | 4 +- .../src/test/neovim_backed_test_context.rs | 1 + crates/vim/test_data/test_temporary_mode.json | 27 ++++++++++ 5 files changed, 82 insertions(+), 5 deletions(-) create mode 100644 crates/vim/test_data/test_temporary_mode.json diff --git a/crates/vim/src/normal.rs b/crates/vim/src/normal.rs index e200c24b94468b141020e12c0230fb1908ffbe8e..fae810d64c587f96c587057615b138b4baabd227 100644 --- a/crates/vim/src/normal.rs +++ b/crates/vim/src/normal.rs @@ -386,6 +386,8 @@ impl Vim { window: &mut Window, cx: &mut Context, ) { + let temp_mode_motion = motion.clone(); + match operator { None => self.move_cursor(motion, times, window, cx), Some(Operator::Change) => self.change_motion(motion, times, forced_motion, window, cx), @@ -475,7 +477,7 @@ impl Vim { } } // Exit temporary normal mode (if active). - self.exit_temporary_normal(window, cx); + self.exit_temporary_normal(Some(&temp_mode_motion), window, cx); } pub fn normal_object( @@ -1052,9 +1054,25 @@ impl Vim { }); } - fn exit_temporary_normal(&mut self, window: &mut Window, cx: &mut Context) { + /// If temporary mode is enabled, switches back to insert mode, using the + /// provided `motion` to determine whether to move the cursor before + /// re-enabling insert mode, for example, when `EndOfLine` ($) is used. + fn exit_temporary_normal( + &mut self, + motion: Option<&Motion>, + window: &mut Window, + cx: &mut Context, + ) { if self.temp_mode { self.switch_mode(Mode::Insert, true, window, cx); + + // Since we're switching from `Normal` mode to `Insert` mode, we'll + // move the cursor one position to the right, to ensure that, for + // motions like `EndOfLine` ($), the cursor is actually at the end + // of line and not on the last character. + if matches!(motion, Some(Motion::EndOfLine { .. })) { + self.move_cursor(Motion::Right, Some(1), window, cx); + } } } } @@ -2269,4 +2287,35 @@ mod test { assert_eq!(workspace.active_pane().read(cx).active_item_index(), 1); }); } + + #[gpui::test] + async fn test_temporary_mode(cx: &mut gpui::TestAppContext) { + let mut cx = NeovimBackedTestContext::new(cx).await; + + // Test jumping to the end of the line ($). + cx.set_shared_state(indoc! {"lorem ˇipsum"}).await; + cx.simulate_shared_keystrokes("i").await; + cx.shared_state().await.assert_matches(); + cx.simulate_shared_keystrokes("ctrl-o $").await; + cx.shared_state().await.assert_eq(indoc! {"lorem ipsumˇ"}); + + // Test jumping to the next word. + cx.set_shared_state(indoc! {"loremˇ ipsum dolor"}).await; + cx.simulate_shared_keystrokes("a").await; + cx.shared_state().await.assert_matches(); + cx.simulate_shared_keystrokes("a n d space ctrl-o w").await; + cx.shared_state() + .await + .assert_eq(indoc! {"lorem and ipsum ˇdolor"}); + + // Test yanking to end of line ($). + cx.set_shared_state(indoc! {"lorem ˇipsum dolor"}).await; + cx.simulate_shared_keystrokes("i").await; + cx.shared_state().await.assert_matches(); + cx.simulate_shared_keystrokes("a n d space ctrl-o y $") + .await; + cx.shared_state() + .await + .assert_eq(indoc! {"lorem and ˇipsum dolor"}); + } } diff --git a/crates/vim/src/normal/scroll.rs b/crates/vim/src/normal/scroll.rs index ff884e3b7393b39b86114338fe2af11e384e1fa0..9346d76323c4fb6c181fb914587a710c94be4537 100644 --- a/crates/vim/src/normal/scroll.rs +++ b/crates/vim/src/normal/scroll.rs @@ -96,7 +96,7 @@ impl Vim { ) { let amount = by(Vim::take_count(cx).map(|c| c as f32)); Vim::take_forced_motion(cx); - self.exit_temporary_normal(window, cx); + self.exit_temporary_normal(None, window, cx); self.update_editor(cx, |_, editor, cx| { scroll_editor(editor, move_cursor, amount, window, cx) }); diff --git a/crates/vim/src/normal/yank.rs b/crates/vim/src/normal/yank.rs index d5a45fca544d61735f62a8f46e849db2c009847f..4f1274dd88359fe8c3eb1b08ab3910c513b2d98d 100644 --- a/crates/vim/src/normal/yank.rs +++ b/crates/vim/src/normal/yank.rs @@ -59,7 +59,7 @@ impl Vim { }); }); }); - self.exit_temporary_normal(window, cx); + self.exit_temporary_normal(None, window, cx); } pub fn yank_object( @@ -90,7 +90,7 @@ impl Vim { }); }); }); - self.exit_temporary_normal(window, cx); + self.exit_temporary_normal(None, window, cx); } pub fn yank_selections_content( diff --git a/crates/vim/src/test/neovim_backed_test_context.rs b/crates/vim/src/test/neovim_backed_test_context.rs index ce2bb6eb7b6f77788f3bc002ff979fdbb251cb94..21cdda111c4fdacaf0871dd087bca01de6f83957 100644 --- a/crates/vim/src/test/neovim_backed_test_context.rs +++ b/crates/vim/src/test/neovim_backed_test_context.rs @@ -31,6 +31,7 @@ pub struct SharedState { } impl SharedState { + /// Assert that both Zed and NeoVim have the same content and mode. #[track_caller] pub fn assert_matches(&self) { if self.neovim != self.editor || self.neovim_mode != self.editor_mode { diff --git a/crates/vim/test_data/test_temporary_mode.json b/crates/vim/test_data/test_temporary_mode.json new file mode 100644 index 0000000000000000000000000000000000000000..be370cf744f9fbd9bfed0a89a6db5ef7b6d568ad --- /dev/null +++ b/crates/vim/test_data/test_temporary_mode.json @@ -0,0 +1,27 @@ +{"Put":{"state":"lorem ˇipsum"}} +{"Key":"i"} +{"Get":{"state":"lorem ˇipsum","mode":"Insert"}} +{"Key":"ctrl-o"} +{"Key":"$"} +{"Get":{"state":"lorem ipsumˇ","mode":"Insert"}} +{"Put":{"state":"loremˇ ipsum dolor"}} +{"Key":"a"} +{"Get":{"state":"lorem ˇipsum dolor","mode":"Insert"}} +{"Key":"a"} +{"Key":"n"} +{"Key":"d"} +{"Key":"space"} +{"Key":"ctrl-o"} +{"Key":"w"} +{"Get":{"state":"lorem and ipsum ˇdolor","mode":"Insert"}} +{"Put":{"state":"lorem ˇipsum dolor"}} +{"Key":"i"} +{"Get":{"state":"lorem ˇipsum dolor","mode":"Insert"}} +{"Key":"a"} +{"Key":"n"} +{"Key":"d"} +{"Key":"space"} +{"Key":"ctrl-o"} +{"Key":"y"} +{"Key":"$"} +{"Get":{"state":"lorem and ˇipsum dolor","mode":"Insert"}} From 175162af4f53e177511e83d24067c3bbc438c04d Mon Sep 17 00:00:00 2001 From: Serophots <47299955+Serophots@users.noreply.github.com> Date: Mon, 17 Nov 2025 11:23:12 +0000 Subject: [PATCH 0139/1030] project_panel: Fix preview tabs disabling focusing files after just one click in project panel (#42836) Closes #41484 With preview tabs disabled, when you click once on a file in the project panel, rather than focusing on that file, zed will incorrectly focus on the text editor panel. This means if you click on a file to focus it, then follow up with a keybind like backspace to delete that file, it doesn't delete that file because the backspace goes through to the text editor instead. Incorrect behaviour seen here: https://github.com/user-attachments/assets/8c2dea90-bd90-4507-8ba6-344be348f151 Release Notes: - Fixed improper UI focus behaviour in the project panel when preview tabs are disabled --- crates/project_panel/src/project_panel.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index 212b301a788c96754137c83f98ef7bfda3560a26..eef1fab802180aaa3681a9d6ca3c2e3156c930b1 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -4698,7 +4698,7 @@ impl ProjectPanel { } else { let preview_tabs_enabled = PreviewTabsSettings::get_global(cx).enabled; let click_count = event.click_count(); - let focus_opened_item = !preview_tabs_enabled || click_count > 1; + let focus_opened_item = click_count > 1; let allow_preview = preview_tabs_enabled && click_count == 1; project_panel.open_entry(entry_id, focus_opened_item, allow_preview, cx); } From a2d3e3baf94d79e236a79020673f5b7929ec7de5 Mon Sep 17 00:00:00 2001 From: Lucas Parry Date: Mon, 17 Nov 2025 23:22:46 +1100 Subject: [PATCH 0140/1030] project_panel: Add sort mode (#40160) Closes #4533 (partly at least) Release Notes: - Added `project_panel.sort_mode` option to control explorer file sort (directories first, mixed, files first) ## Summary Adds three sorting modes for the project panel to give users more control over how files and directories are displayed: - **`directories_first`** (default): Current behaviour - directories grouped before files - **`mixed`**: Files and directories sorted together alphabetically - **`files_first`**: filed grouped before directories ## Motivation Users coming from different editors and file managers have different expectations for file sorting. Some prefer directories grouped at the top (traditional), while others prefer the macOS Finder-style mixed sorting where "Apple1/", "apple2.tsx" and "Apple3/" appear alphabetically mixed together. ### Screenshots New sort options in settings: image Directories first | Mixed | Files first -------------|-----|----- image | image | image ### Agent usage Copilot-cli/claude-code/codex-cli helped out a lot. I'm not from a rust background, but really wanted this solved, and it gave me a chance to play with some of the coding agents I'm not permitted to use for work stuff --------- Co-authored-by: Smit Barmase --- assets/settings/default.json | 10 + crates/project_panel/benches/sorting.rs | 38 +- crates/project_panel/src/project_panel.rs | 55 +- .../src/project_panel_settings.rs | 8 +- .../project_panel/src/project_panel_tests.rs | 210 ++++++++ .../src/settings_content/workspace.rs | 29 ++ crates/settings/src/vscode_import.rs | 1 + crates/settings_ui/src/page_data.rs | 18 + crates/settings_ui/src/settings_ui.rs | 1 + crates/util/src/paths.rs | 488 +++++++++++++++++- docs/src/configuring-zed.md | 33 ++ docs/src/visual-customization.md | 2 + 12 files changed, 849 insertions(+), 44 deletions(-) diff --git a/assets/settings/default.json b/assets/settings/default.json index f1b8d9e76bc600de6fd41834c08f40a9b2d51b42..6a04adf88e4593b4e04eda9a0bf64525293b2b0f 100644 --- a/assets/settings/default.json +++ b/assets/settings/default.json @@ -742,6 +742,16 @@ // "never" "show": "always" }, + // Sort order for entries in the project panel. + // This setting can take three values: + // + // 1. Show directories first, then files: + // "directories_first" + // 2. Mix directories and files together: + // "mixed" + // 3. Show files first, then directories: + // "files_first" + "sort_mode": "directories_first", // Whether to enable drag-and-drop operations in the project panel. "drag_and_drop": true, // Whether to hide the root entry when only one folder is open in the window. diff --git a/crates/project_panel/benches/sorting.rs b/crates/project_panel/benches/sorting.rs index 73d92ccd4913a008020a1480422c020117a723ca..df6740d346631fcd7745df44f32a5ed39fbdb521 100644 --- a/crates/project_panel/benches/sorting.rs +++ b/crates/project_panel/benches/sorting.rs @@ -1,13 +1,15 @@ use criterion::{Criterion, criterion_group, criterion_main}; use project::{Entry, EntryKind, GitEntry, ProjectEntryId}; -use project_panel::par_sort_worktree_entries; +use project_panel::par_sort_worktree_entries_with_mode; +use settings::ProjectPanelSortMode; use std::sync::Arc; use util::rel_path::RelPath; fn load_linux_repo_snapshot() -> Vec { - let file = std::fs::read_to_string( - "/Users/hiro/Projects/zed/crates/project_panel/benches/linux_repo_snapshot.txt", - ) + let file = std::fs::read_to_string(concat!( + env!("CARGO_MANIFEST_DIR"), + "/benches/linux_repo_snapshot.txt" + )) .expect("Failed to read file"); file.lines() .filter_map(|line| { @@ -42,10 +44,36 @@ fn load_linux_repo_snapshot() -> Vec { } fn criterion_benchmark(c: &mut Criterion) { let snapshot = load_linux_repo_snapshot(); + c.bench_function("Sort linux worktree snapshot", |b| { b.iter_batched( || snapshot.clone(), - |mut snapshot| par_sort_worktree_entries(&mut snapshot), + |mut snapshot| { + par_sort_worktree_entries_with_mode( + &mut snapshot, + ProjectPanelSortMode::DirectoriesFirst, + ) + }, + criterion::BatchSize::LargeInput, + ); + }); + + c.bench_function("Sort linux worktree snapshot (Mixed)", |b| { + b.iter_batched( + || snapshot.clone(), + |mut snapshot| { + par_sort_worktree_entries_with_mode(&mut snapshot, ProjectPanelSortMode::Mixed) + }, + criterion::BatchSize::LargeInput, + ); + }); + + c.bench_function("Sort linux worktree snapshot (FilesFirst)", |b| { + b.iter_batched( + || snapshot.clone(), + |mut snapshot| { + par_sort_worktree_entries_with_mode(&mut snapshot, ProjectPanelSortMode::FilesFirst) + }, criterion::BatchSize::LargeInput, ); }); diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index eef1fab802180aaa3681a9d6ca3c2e3156c930b1..d1c3c96c0ccd02d9696f8bfcedfd5af6e6e1da45 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -703,6 +703,9 @@ impl ProjectPanel { if project_panel_settings.hide_hidden != new_settings.hide_hidden { this.update_visible_entries(None, false, false, window, cx); } + if project_panel_settings.sort_mode != new_settings.sort_mode { + this.update_visible_entries(None, false, false, window, cx); + } if project_panel_settings.sticky_scroll && !new_settings.sticky_scroll { this.sticky_items_count = 0; } @@ -2102,7 +2105,8 @@ impl ProjectPanel { .map(|entry| entry.to_owned()) .collect(); - sort_worktree_entries(&mut siblings); + let mode = ProjectPanelSettings::get_global(cx).sort_mode; + sort_worktree_entries_with_mode(&mut siblings, mode); let sibling_entry_index = siblings .iter() .position(|sibling| sibling.id == latest_entry.id)?; @@ -3229,6 +3233,7 @@ impl ProjectPanel { let settings = ProjectPanelSettings::get_global(cx); let auto_collapse_dirs = settings.auto_fold_dirs; let hide_gitignore = settings.hide_gitignore; + let sort_mode = settings.sort_mode; let project = self.project.read(cx); let repo_snapshots = project.git_store().read(cx).repo_snapshots(cx); @@ -3440,7 +3445,10 @@ impl ProjectPanel { entry_iter.advance(); } - par_sort_worktree_entries(&mut visible_worktree_entries); + par_sort_worktree_entries_with_mode( + &mut visible_worktree_entries, + sort_mode, + ); new_state.visible_entries.push(VisibleEntriesForWorktree { worktree_id, entries: visible_worktree_entries, @@ -6101,21 +6109,42 @@ impl ClipboardEntry { } } -fn cmp>(lhs: T, rhs: T) -> cmp::Ordering { - let entry_a = lhs.as_ref(); - let entry_b = rhs.as_ref(); - util::paths::compare_rel_paths( - (&entry_a.path, entry_a.is_file()), - (&entry_b.path, entry_b.is_file()), - ) +#[inline] +fn cmp_directories_first(a: &Entry, b: &Entry) -> cmp::Ordering { + util::paths::compare_rel_paths((&a.path, a.is_file()), (&b.path, b.is_file())) +} + +#[inline] +fn cmp_mixed(a: &Entry, b: &Entry) -> cmp::Ordering { + util::paths::compare_rel_paths_mixed((&a.path, a.is_file()), (&b.path, b.is_file())) +} + +#[inline] +fn cmp_files_first(a: &Entry, b: &Entry) -> cmp::Ordering { + util::paths::compare_rel_paths_files_first((&a.path, a.is_file()), (&b.path, b.is_file())) +} + +#[inline] +fn cmp_with_mode(a: &Entry, b: &Entry, mode: &settings::ProjectPanelSortMode) -> cmp::Ordering { + match mode { + settings::ProjectPanelSortMode::DirectoriesFirst => cmp_directories_first(a, b), + settings::ProjectPanelSortMode::Mixed => cmp_mixed(a, b), + settings::ProjectPanelSortMode::FilesFirst => cmp_files_first(a, b), + } } -pub fn sort_worktree_entries(entries: &mut [impl AsRef]) { - entries.sort_by(|lhs, rhs| cmp(lhs, rhs)); +pub fn sort_worktree_entries_with_mode( + entries: &mut [impl AsRef], + mode: settings::ProjectPanelSortMode, +) { + entries.sort_by(|lhs, rhs| cmp_with_mode(lhs.as_ref(), rhs.as_ref(), &mode)); } -pub fn par_sort_worktree_entries(entries: &mut Vec) { - entries.par_sort_by(|lhs, rhs| cmp(lhs, rhs)); +pub fn par_sort_worktree_entries_with_mode( + entries: &mut Vec, + mode: settings::ProjectPanelSortMode, +) { + entries.par_sort_by(|lhs, rhs| cmp_with_mode(lhs, rhs, &mode)); } #[cfg(test)] diff --git a/crates/project_panel/src/project_panel_settings.rs b/crates/project_panel/src/project_panel_settings.rs index 266ab761a103fa4ca2a2e9a4e09b96514bfd25c1..b0316270340203177278edebaececd0d86e39869 100644 --- a/crates/project_panel/src/project_panel_settings.rs +++ b/crates/project_panel/src/project_panel_settings.rs @@ -3,8 +3,8 @@ use gpui::Pixels; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use settings::{ - DockSide, ProjectPanelEntrySpacing, RegisterSetting, Settings, ShowDiagnostics, - ShowIndentGuides, + DockSide, ProjectPanelEntrySpacing, ProjectPanelSortMode, RegisterSetting, Settings, + ShowDiagnostics, ShowIndentGuides, }; use ui::{ px, @@ -33,6 +33,7 @@ pub struct ProjectPanelSettings { pub hide_hidden: bool, pub drag_and_drop: bool, pub auto_open: AutoOpenSettings, + pub sort_mode: ProjectPanelSortMode, } #[derive(Copy, Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq, Eq)] @@ -115,6 +116,9 @@ impl Settings for ProjectPanelSettings { on_drop: auto_open.on_drop.unwrap(), } }, + sort_mode: project_panel + .sort_mode + .unwrap_or(ProjectPanelSortMode::DirectoriesFirst), } } } diff --git a/crates/project_panel/src/project_panel_tests.rs b/crates/project_panel/src/project_panel_tests.rs index eb4c6280ccfb76134767f1de70112106a0594dc6..a85ba36c5297d7f40eb08ff42ddf086408a01316 100644 --- a/crates/project_panel/src/project_panel_tests.rs +++ b/crates/project_panel/src/project_panel_tests.rs @@ -326,6 +326,7 @@ async fn test_auto_collapse_dir_paths(cx: &mut gpui::TestAppContext) { ProjectPanelSettings::override_global( ProjectPanelSettings { auto_fold_dirs: true, + sort_mode: settings::ProjectPanelSortMode::DirectoriesFirst, ..settings }, cx, @@ -7704,6 +7705,215 @@ fn visible_entries_as_strings( result } +/// Test that missing sort_mode field defaults to DirectoriesFirst +#[gpui::test] +async fn test_sort_mode_default_fallback(cx: &mut gpui::TestAppContext) { + init_test(cx); + + // Verify that when sort_mode is not specified, it defaults to DirectoriesFirst + let default_settings = cx.read(|cx| *ProjectPanelSettings::get_global(cx)); + assert_eq!( + default_settings.sort_mode, + settings::ProjectPanelSortMode::DirectoriesFirst, + "sort_mode should default to DirectoriesFirst" + ); +} + +/// Test sort modes: DirectoriesFirst (default) vs Mixed +#[gpui::test] +async fn test_sort_mode_directories_first(cx: &mut gpui::TestAppContext) { + init_test(cx); + + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "zebra.txt": "", + "Apple": {}, + "banana.rs": "", + "Carrot": {}, + "aardvark.txt": "", + }), + ) + .await; + + let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await; + let workspace = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); + let cx = &mut VisualTestContext::from_window(*workspace, cx); + let panel = workspace.update(cx, ProjectPanel::new).unwrap(); + cx.run_until_parked(); + + // Default sort mode should be DirectoriesFirst + assert_eq!( + visible_entries_as_strings(&panel, 0..50, cx), + &[ + "v root", + " > Apple", + " > Carrot", + " aardvark.txt", + " banana.rs", + " zebra.txt", + ] + ); +} + +#[gpui::test] +async fn test_sort_mode_mixed(cx: &mut gpui::TestAppContext) { + init_test(cx); + + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "Zebra.txt": "", + "apple": {}, + "Banana.rs": "", + "carrot": {}, + "Aardvark.txt": "", + }), + ) + .await; + + let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await; + let workspace = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); + let cx = &mut VisualTestContext::from_window(*workspace, cx); + + // Switch to Mixed mode + cx.update(|_, cx| { + cx.update_global::(|store, cx| { + store.update_user_settings(cx, |settings| { + settings.project_panel.get_or_insert_default().sort_mode = + Some(settings::ProjectPanelSortMode::Mixed); + }); + }); + }); + + let panel = workspace.update(cx, ProjectPanel::new).unwrap(); + cx.run_until_parked(); + + // Mixed mode: case-insensitive sorting + // Aardvark < apple < Banana < carrot < Zebra (all case-insensitive) + assert_eq!( + visible_entries_as_strings(&panel, 0..50, cx), + &[ + "v root", + " Aardvark.txt", + " > apple", + " Banana.rs", + " > carrot", + " Zebra.txt", + ] + ); +} + +#[gpui::test] +async fn test_sort_mode_files_first(cx: &mut gpui::TestAppContext) { + init_test(cx); + + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "Zebra.txt": "", + "apple": {}, + "Banana.rs": "", + "carrot": {}, + "Aardvark.txt": "", + }), + ) + .await; + + let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await; + let workspace = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); + let cx = &mut VisualTestContext::from_window(*workspace, cx); + + // Switch to FilesFirst mode + cx.update(|_, cx| { + cx.update_global::(|store, cx| { + store.update_user_settings(cx, |settings| { + settings.project_panel.get_or_insert_default().sort_mode = + Some(settings::ProjectPanelSortMode::FilesFirst); + }); + }); + }); + + let panel = workspace.update(cx, ProjectPanel::new).unwrap(); + cx.run_until_parked(); + + // FilesFirst mode: files first, then directories (both case-insensitive) + assert_eq!( + visible_entries_as_strings(&panel, 0..50, cx), + &[ + "v root", + " Aardvark.txt", + " Banana.rs", + " Zebra.txt", + " > apple", + " > carrot", + ] + ); +} + +#[gpui::test] +async fn test_sort_mode_toggle(cx: &mut gpui::TestAppContext) { + init_test(cx); + + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "file2.txt": "", + "dir1": {}, + "file1.txt": "", + }), + ) + .await; + + let project = Project::test(fs.clone(), ["/root".as_ref()], cx).await; + let workspace = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); + let cx = &mut VisualTestContext::from_window(*workspace, cx); + let panel = workspace.update(cx, ProjectPanel::new).unwrap(); + cx.run_until_parked(); + + // Initially DirectoriesFirst + assert_eq!( + visible_entries_as_strings(&panel, 0..50, cx), + &["v root", " > dir1", " file1.txt", " file2.txt",] + ); + + // Toggle to Mixed + cx.update(|_, cx| { + cx.update_global::(|store, cx| { + store.update_user_settings(cx, |settings| { + settings.project_panel.get_or_insert_default().sort_mode = + Some(settings::ProjectPanelSortMode::Mixed); + }); + }); + }); + cx.run_until_parked(); + + assert_eq!( + visible_entries_as_strings(&panel, 0..50, cx), + &["v root", " > dir1", " file1.txt", " file2.txt",] + ); + + // Toggle back to DirectoriesFirst + cx.update(|_, cx| { + cx.update_global::(|store, cx| { + store.update_user_settings(cx, |settings| { + settings.project_panel.get_or_insert_default().sort_mode = + Some(settings::ProjectPanelSortMode::DirectoriesFirst); + }); + }); + }); + cx.run_until_parked(); + + assert_eq!( + visible_entries_as_strings(&panel, 0..50, cx), + &["v root", " > dir1", " file1.txt", " file2.txt",] + ); +} + fn init_test(cx: &mut TestAppContext) { cx.update(|cx| { let settings_store = SettingsStore::test(cx); diff --git a/crates/settings/src/settings_content/workspace.rs b/crates/settings/src/settings_content/workspace.rs index 01c40528cb4a9b614270efbbf0d39b1b424bb7dc..fc4c7fdbda553c2a959ba1062ee0f43d675b2f54 100644 --- a/crates/settings/src/settings_content/workspace.rs +++ b/crates/settings/src/settings_content/workspace.rs @@ -609,6 +609,10 @@ pub struct ProjectPanelSettingsContent { pub drag_and_drop: Option, /// Settings for automatically opening files. pub auto_open: Option, + /// How to order sibling entries in the project panel. + /// + /// Default: directories_first + pub sort_mode: Option, } #[derive( @@ -634,6 +638,31 @@ pub enum ProjectPanelEntrySpacing { Standard, } +#[derive( + Copy, + Clone, + Debug, + Default, + Serialize, + Deserialize, + JsonSchema, + MergeFrom, + PartialEq, + Eq, + strum::VariantArray, + strum::VariantNames, +)] +#[serde(rename_all = "snake_case")] +pub enum ProjectPanelSortMode { + /// Show directories first, then files + #[default] + DirectoriesFirst, + /// Mix directories and files together + Mixed, + /// Show files first, then directories + FilesFirst, +} + #[skip_serializing_none] #[derive( Copy, Clone, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq, Default, diff --git a/crates/settings/src/vscode_import.rs b/crates/settings/src/vscode_import.rs index 0de37b5daecadb6d8da42d553bffa30d1ffeb1a7..4b87d6f5f30c075f90a6da698396bc4576a56b92 100644 --- a/crates/settings/src/vscode_import.rs +++ b/crates/settings/src/vscode_import.rs @@ -668,6 +668,7 @@ impl VsCodeSettings { show_diagnostics: self .read_bool("problems.decorations.enabled") .and_then(|b| if b { Some(ShowDiagnostics::Off) } else { None }), + sort_mode: None, starts_open: None, sticky_scroll: None, auto_open: None, diff --git a/crates/settings_ui/src/page_data.rs b/crates/settings_ui/src/page_data.rs index 611ce10a75e5e2e52c28b88d6583108a006e63b3..d776b9787eb804a77f2d4a6b6c605846eb6604ea 100644 --- a/crates/settings_ui/src/page_data.rs +++ b/crates/settings_ui/src/page_data.rs @@ -3822,6 +3822,24 @@ pub(crate) fn settings_data(cx: &App) -> Vec { metadata: None, files: USER, }), + SettingsPageItem::SettingItem(SettingItem { + title: "Sort Mode", + description: "Sort order for entries in the project panel.", + field: Box::new(SettingField { + pick: |settings_content| { + settings_content.project_panel.as_ref()?.sort_mode.as_ref() + }, + write: |settings_content, value| { + settings_content + .project_panel + .get_or_insert_default() + .sort_mode = value; + }, + json_path: Some("project_panel.sort_mode"), + }), + metadata: None, + files: USER, + }), SettingsPageItem::SectionHeader("Terminal Panel"), SettingsPageItem::SettingItem(SettingItem { title: "Terminal Dock", diff --git a/crates/settings_ui/src/settings_ui.rs b/crates/settings_ui/src/settings_ui.rs index 3911a4e0cd3023524df9e023cfdc670fc7c24a8a..329679bccd4c10aed3398ac60a6c05f7922d9a9f 100644 --- a/crates/settings_ui/src/settings_ui.rs +++ b/crates/settings_ui/src/settings_ui.rs @@ -451,6 +451,7 @@ fn init_renderers(cx: &mut App) { .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) + .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) diff --git a/crates/util/src/paths.rs b/crates/util/src/paths.rs index d4081d2edc113b58795845680feb00f703622364..5813c444af555dc90c65ce6f1584067b446cc79b 100644 --- a/crates/util/src/paths.rs +++ b/crates/util/src/paths.rs @@ -944,36 +944,47 @@ pub fn natural_sort(a: &str, b: &str) -> Ordering { } } +/// Case-insensitive natural sort without applying the final lowercase/uppercase tie-breaker. +/// This is useful when comparing individual path components where we want to keep walking +/// deeper components before deciding on casing. +fn natural_sort_no_tiebreak(a: &str, b: &str) -> Ordering { + if a.eq_ignore_ascii_case(b) { + Ordering::Equal + } else { + natural_sort(a, b) + } +} + +fn stem_and_extension(filename: &str) -> (Option<&str>, Option<&str>) { + if filename.is_empty() { + return (None, None); + } + + match filename.rsplit_once('.') { + // Case 1: No dot was found. The entire name is the stem. + None => (Some(filename), None), + + // Case 2: A dot was found. + Some((before, after)) => { + // This is the crucial check for dotfiles like ".bashrc". + // If `before` is empty, the dot was the first character. + // In that case, we revert to the "whole name is the stem" logic. + if before.is_empty() { + (Some(filename), None) + } else { + // Otherwise, we have a standard stem and extension. + (Some(before), Some(after)) + } + } + } +} + pub fn compare_rel_paths( (path_a, a_is_file): (&RelPath, bool), (path_b, b_is_file): (&RelPath, bool), ) -> Ordering { let mut components_a = path_a.components(); let mut components_b = path_b.components(); - - fn stem_and_extension(filename: &str) -> (Option<&str>, Option<&str>) { - if filename.is_empty() { - return (None, None); - } - - match filename.rsplit_once('.') { - // Case 1: No dot was found. The entire name is the stem. - None => (Some(filename), None), - - // Case 2: A dot was found. - Some((before, after)) => { - // This is the crucial check for dotfiles like ".bashrc". - // If `before` is empty, the dot was the first character. - // In that case, we revert to the "whole name is the stem" logic. - if before.is_empty() { - (Some(filename), None) - } else { - // Otherwise, we have a standard stem and extension. - (Some(before), Some(after)) - } - } - } - } loop { match (components_a.next(), components_b.next()) { (Some(component_a), Some(component_b)) => { @@ -1020,6 +1031,156 @@ pub fn compare_rel_paths( } } +/// Compare two relative paths with mixed files and directories using +/// case-insensitive natural sorting. For example, "Apple", "aardvark.txt", +/// and "Zebra" would be sorted as: aardvark.txt, Apple, Zebra +/// (case-insensitive alphabetical). +pub fn compare_rel_paths_mixed( + (path_a, a_is_file): (&RelPath, bool), + (path_b, b_is_file): (&RelPath, bool), +) -> Ordering { + let original_paths_equal = std::ptr::eq(path_a, path_b) || path_a == path_b; + let mut components_a = path_a.components(); + let mut components_b = path_b.components(); + + loop { + match (components_a.next(), components_b.next()) { + (Some(component_a), Some(component_b)) => { + let a_leaf_file = a_is_file && components_a.rest().is_empty(); + let b_leaf_file = b_is_file && components_b.rest().is_empty(); + + let (a_stem, a_ext) = a_leaf_file + .then(|| stem_and_extension(component_a)) + .unwrap_or_default(); + let (b_stem, b_ext) = b_leaf_file + .then(|| stem_and_extension(component_b)) + .unwrap_or_default(); + let a_key = if a_leaf_file { + a_stem + } else { + Some(component_a) + }; + let b_key = if b_leaf_file { + b_stem + } else { + Some(component_b) + }; + + let ordering = match (a_key, b_key) { + (Some(a), Some(b)) => natural_sort_no_tiebreak(a, b) + .then_with(|| match (a_leaf_file, b_leaf_file) { + (true, false) if a == b => Ordering::Greater, + (false, true) if a == b => Ordering::Less, + _ => Ordering::Equal, + }) + .then_with(|| { + if a_leaf_file && b_leaf_file { + let a_ext_str = a_ext.unwrap_or_default().to_lowercase(); + let b_ext_str = b_ext.unwrap_or_default().to_lowercase(); + b_ext_str.cmp(&a_ext_str) + } else { + Ordering::Equal + } + }), + (Some(_), None) => Ordering::Greater, + (None, Some(_)) => Ordering::Less, + (None, None) => Ordering::Equal, + }; + + if !ordering.is_eq() { + return ordering; + } + } + (Some(_), None) => return Ordering::Greater, + (None, Some(_)) => return Ordering::Less, + (None, None) => { + // Deterministic tie-break: use natural sort to prefer lowercase when paths + // are otherwise equal but still differ in casing. + if !original_paths_equal { + return natural_sort(path_a.as_unix_str(), path_b.as_unix_str()); + } + return Ordering::Equal; + } + } + } +} + +/// Compare two relative paths with files before directories using +/// case-insensitive natural sorting. At each directory level, all files +/// are sorted before all directories, with case-insensitive alphabetical +/// ordering within each group. +pub fn compare_rel_paths_files_first( + (path_a, a_is_file): (&RelPath, bool), + (path_b, b_is_file): (&RelPath, bool), +) -> Ordering { + let original_paths_equal = std::ptr::eq(path_a, path_b) || path_a == path_b; + let mut components_a = path_a.components(); + let mut components_b = path_b.components(); + + loop { + match (components_a.next(), components_b.next()) { + (Some(component_a), Some(component_b)) => { + let a_leaf_file = a_is_file && components_a.rest().is_empty(); + let b_leaf_file = b_is_file && components_b.rest().is_empty(); + + let (a_stem, a_ext) = a_leaf_file + .then(|| stem_and_extension(component_a)) + .unwrap_or_default(); + let (b_stem, b_ext) = b_leaf_file + .then(|| stem_and_extension(component_b)) + .unwrap_or_default(); + let a_key = if a_leaf_file { + a_stem + } else { + Some(component_a) + }; + let b_key = if b_leaf_file { + b_stem + } else { + Some(component_b) + }; + + let ordering = match (a_key, b_key) { + (Some(a), Some(b)) => { + if a_leaf_file && !b_leaf_file { + Ordering::Less + } else if !a_leaf_file && b_leaf_file { + Ordering::Greater + } else { + natural_sort_no_tiebreak(a, b).then_with(|| { + if a_leaf_file && b_leaf_file { + let a_ext_str = a_ext.unwrap_or_default().to_lowercase(); + let b_ext_str = b_ext.unwrap_or_default().to_lowercase(); + a_ext_str.cmp(&b_ext_str) + } else { + Ordering::Equal + } + }) + } + } + (Some(_), None) => Ordering::Greater, + (None, Some(_)) => Ordering::Less, + (None, None) => Ordering::Equal, + }; + + if !ordering.is_eq() { + return ordering; + } + } + (Some(_), None) => return Ordering::Greater, + (None, Some(_)) => return Ordering::Less, + (None, None) => { + // Deterministic tie-break: use natural sort to prefer lowercase when paths + // are otherwise equal but still differ in casing. + if !original_paths_equal { + return natural_sort(path_a.as_unix_str(), path_b.as_unix_str()); + } + return Ordering::Equal; + } + } + } +} + pub fn compare_paths( (path_a, a_is_file): (&Path, bool), (path_b, b_is_file): (&Path, bool), @@ -1265,6 +1426,285 @@ mod tests { ); } + #[perf] + fn compare_rel_paths_mixed_case_insensitive() { + // Test that mixed mode is case-insensitive + let mut paths = vec![ + (RelPath::unix("zebra.txt").unwrap(), true), + (RelPath::unix("Apple").unwrap(), false), + (RelPath::unix("banana.rs").unwrap(), true), + (RelPath::unix("Carrot").unwrap(), false), + (RelPath::unix("aardvark.txt").unwrap(), true), + ]; + paths.sort_by(|&a, &b| compare_rel_paths_mixed(a, b)); + // Case-insensitive: aardvark < Apple < banana < Carrot < zebra + assert_eq!( + paths, + vec![ + (RelPath::unix("aardvark.txt").unwrap(), true), + (RelPath::unix("Apple").unwrap(), false), + (RelPath::unix("banana.rs").unwrap(), true), + (RelPath::unix("Carrot").unwrap(), false), + (RelPath::unix("zebra.txt").unwrap(), true), + ] + ); + } + + #[perf] + fn compare_rel_paths_files_first_basic() { + // Test that files come before directories + let mut paths = vec![ + (RelPath::unix("zebra.txt").unwrap(), true), + (RelPath::unix("Apple").unwrap(), false), + (RelPath::unix("banana.rs").unwrap(), true), + (RelPath::unix("Carrot").unwrap(), false), + (RelPath::unix("aardvark.txt").unwrap(), true), + ]; + paths.sort_by(|&a, &b| compare_rel_paths_files_first(a, b)); + // Files first (case-insensitive), then directories (case-insensitive) + assert_eq!( + paths, + vec![ + (RelPath::unix("aardvark.txt").unwrap(), true), + (RelPath::unix("banana.rs").unwrap(), true), + (RelPath::unix("zebra.txt").unwrap(), true), + (RelPath::unix("Apple").unwrap(), false), + (RelPath::unix("Carrot").unwrap(), false), + ] + ); + } + + #[perf] + fn compare_rel_paths_files_first_case_insensitive() { + // Test case-insensitive sorting within files and directories + let mut paths = vec![ + (RelPath::unix("Zebra.txt").unwrap(), true), + (RelPath::unix("apple").unwrap(), false), + (RelPath::unix("Banana.rs").unwrap(), true), + (RelPath::unix("carrot").unwrap(), false), + (RelPath::unix("Aardvark.txt").unwrap(), true), + ]; + paths.sort_by(|&a, &b| compare_rel_paths_files_first(a, b)); + assert_eq!( + paths, + vec![ + (RelPath::unix("Aardvark.txt").unwrap(), true), + (RelPath::unix("Banana.rs").unwrap(), true), + (RelPath::unix("Zebra.txt").unwrap(), true), + (RelPath::unix("apple").unwrap(), false), + (RelPath::unix("carrot").unwrap(), false), + ] + ); + } + + #[perf] + fn compare_rel_paths_files_first_numeric() { + // Test natural number sorting with files first + let mut paths = vec![ + (RelPath::unix("file10.txt").unwrap(), true), + (RelPath::unix("dir2").unwrap(), false), + (RelPath::unix("file2.txt").unwrap(), true), + (RelPath::unix("dir10").unwrap(), false), + (RelPath::unix("file1.txt").unwrap(), true), + ]; + paths.sort_by(|&a, &b| compare_rel_paths_files_first(a, b)); + assert_eq!( + paths, + vec![ + (RelPath::unix("file1.txt").unwrap(), true), + (RelPath::unix("file2.txt").unwrap(), true), + (RelPath::unix("file10.txt").unwrap(), true), + (RelPath::unix("dir2").unwrap(), false), + (RelPath::unix("dir10").unwrap(), false), + ] + ); + } + + #[perf] + fn compare_rel_paths_mixed_case() { + // Test case-insensitive sorting with varied capitalization + let mut paths = vec![ + (RelPath::unix("README.md").unwrap(), true), + (RelPath::unix("readme.txt").unwrap(), true), + (RelPath::unix("ReadMe.rs").unwrap(), true), + ]; + paths.sort_by(|&a, &b| compare_rel_paths_mixed(a, b)); + // All "readme" variants should group together, sorted by extension + assert_eq!( + paths, + vec![ + (RelPath::unix("readme.txt").unwrap(), true), + (RelPath::unix("ReadMe.rs").unwrap(), true), + (RelPath::unix("README.md").unwrap(), true), + ] + ); + } + + #[perf] + fn compare_rel_paths_mixed_files_and_dirs() { + // Verify directories and files are still mixed + let mut paths = vec![ + (RelPath::unix("file2.txt").unwrap(), true), + (RelPath::unix("Dir1").unwrap(), false), + (RelPath::unix("file1.txt").unwrap(), true), + (RelPath::unix("dir2").unwrap(), false), + ]; + paths.sort_by(|&a, &b| compare_rel_paths_mixed(a, b)); + // Case-insensitive: dir1, dir2, file1, file2 (all mixed) + assert_eq!( + paths, + vec![ + (RelPath::unix("Dir1").unwrap(), false), + (RelPath::unix("dir2").unwrap(), false), + (RelPath::unix("file1.txt").unwrap(), true), + (RelPath::unix("file2.txt").unwrap(), true), + ] + ); + } + + #[perf] + fn compare_rel_paths_mixed_with_nested_paths() { + // Test that nested paths still work correctly + let mut paths = vec![ + (RelPath::unix("src/main.rs").unwrap(), true), + (RelPath::unix("Cargo.toml").unwrap(), true), + (RelPath::unix("src").unwrap(), false), + (RelPath::unix("target").unwrap(), false), + ]; + paths.sort_by(|&a, &b| compare_rel_paths_mixed(a, b)); + assert_eq!( + paths, + vec![ + (RelPath::unix("Cargo.toml").unwrap(), true), + (RelPath::unix("src").unwrap(), false), + (RelPath::unix("src/main.rs").unwrap(), true), + (RelPath::unix("target").unwrap(), false), + ] + ); + } + + #[perf] + fn compare_rel_paths_files_first_with_nested() { + // Files come before directories, even with nested paths + let mut paths = vec![ + (RelPath::unix("src/lib.rs").unwrap(), true), + (RelPath::unix("README.md").unwrap(), true), + (RelPath::unix("src").unwrap(), false), + (RelPath::unix("tests").unwrap(), false), + ]; + paths.sort_by(|&a, &b| compare_rel_paths_files_first(a, b)); + assert_eq!( + paths, + vec![ + (RelPath::unix("README.md").unwrap(), true), + (RelPath::unix("src").unwrap(), false), + (RelPath::unix("src/lib.rs").unwrap(), true), + (RelPath::unix("tests").unwrap(), false), + ] + ); + } + + #[perf] + fn compare_rel_paths_mixed_dotfiles() { + // Test that dotfiles are handled correctly in mixed mode + let mut paths = vec![ + (RelPath::unix(".gitignore").unwrap(), true), + (RelPath::unix("README.md").unwrap(), true), + (RelPath::unix(".github").unwrap(), false), + (RelPath::unix("src").unwrap(), false), + ]; + paths.sort_by(|&a, &b| compare_rel_paths_mixed(a, b)); + assert_eq!( + paths, + vec![ + (RelPath::unix(".github").unwrap(), false), + (RelPath::unix(".gitignore").unwrap(), true), + (RelPath::unix("README.md").unwrap(), true), + (RelPath::unix("src").unwrap(), false), + ] + ); + } + + #[perf] + fn compare_rel_paths_files_first_dotfiles() { + // Test that dotfiles come first when they're files + let mut paths = vec![ + (RelPath::unix(".gitignore").unwrap(), true), + (RelPath::unix("README.md").unwrap(), true), + (RelPath::unix(".github").unwrap(), false), + (RelPath::unix("src").unwrap(), false), + ]; + paths.sort_by(|&a, &b| compare_rel_paths_files_first(a, b)); + assert_eq!( + paths, + vec![ + (RelPath::unix(".gitignore").unwrap(), true), + (RelPath::unix("README.md").unwrap(), true), + (RelPath::unix(".github").unwrap(), false), + (RelPath::unix("src").unwrap(), false), + ] + ); + } + + #[perf] + fn compare_rel_paths_mixed_same_stem_different_extension() { + // Files with same stem but different extensions should sort by extension + let mut paths = vec![ + (RelPath::unix("file.rs").unwrap(), true), + (RelPath::unix("file.md").unwrap(), true), + (RelPath::unix("file.txt").unwrap(), true), + ]; + paths.sort_by(|&a, &b| compare_rel_paths_mixed(a, b)); + assert_eq!( + paths, + vec![ + (RelPath::unix("file.txt").unwrap(), true), + (RelPath::unix("file.rs").unwrap(), true), + (RelPath::unix("file.md").unwrap(), true), + ] + ); + } + + #[perf] + fn compare_rel_paths_files_first_same_stem() { + // Same stem files should still sort by extension with files_first + let mut paths = vec![ + (RelPath::unix("main.rs").unwrap(), true), + (RelPath::unix("main.c").unwrap(), true), + (RelPath::unix("main").unwrap(), false), + ]; + paths.sort_by(|&a, &b| compare_rel_paths_files_first(a, b)); + assert_eq!( + paths, + vec![ + (RelPath::unix("main.c").unwrap(), true), + (RelPath::unix("main.rs").unwrap(), true), + (RelPath::unix("main").unwrap(), false), + ] + ); + } + + #[perf] + fn compare_rel_paths_mixed_deep_nesting() { + // Test sorting with deeply nested paths + let mut paths = vec![ + (RelPath::unix("a/b/c.txt").unwrap(), true), + (RelPath::unix("A/B.txt").unwrap(), true), + (RelPath::unix("a.txt").unwrap(), true), + (RelPath::unix("A.txt").unwrap(), true), + ]; + paths.sort_by(|&a, &b| compare_rel_paths_mixed(a, b)); + assert_eq!( + paths, + vec![ + (RelPath::unix("A/B.txt").unwrap(), true), + (RelPath::unix("a/b/c.txt").unwrap(), true), + (RelPath::unix("a.txt").unwrap(), true), + (RelPath::unix("A.txt").unwrap(), true), + ] + ); + } + #[perf] fn path_with_position_parse_posix_path() { // Test POSIX filename edge cases diff --git a/docs/src/configuring-zed.md b/docs/src/configuring-zed.md index 43c36767f0ebb526ec6f12649d0d03b027eab636..13d42a5c4c99f3a4aba3709d829f289e9e9826f8 100644 --- a/docs/src/configuring-zed.md +++ b/docs/src/configuring-zed.md @@ -4298,6 +4298,7 @@ Run the {#action theme_selector::Toggle} action in the command palette to see a "indent_guides": { "show": "always" }, + "sort_mode": "directories_first", "hide_root": false, "hide_hidden": false, "starts_open": true, @@ -4514,6 +4515,38 @@ Run the {#action theme_selector::Toggle} action in the command palette to see a } ``` +### Sort Mode + +- Description: Sort order for entries in the project panel +- Setting: `sort_mode` +- Default: `directories_first` + +**Options** + +1. Show directories first, then files + +```json [settings] +{ + "sort_mode": "directories_first" +} +``` + +2. Mix directories and files together + +```json [settings] +{ + "sort_mode": "mixed" +} +``` + +3. Show files first, then directories + +```json [settings] +{ + "sort_mode": "files_first" +} +``` + ### Auto Open - Description: Control whether files are opened automatically after different creation flows in the project panel. diff --git a/docs/src/visual-customization.md b/docs/src/visual-customization.md index 98b07797a2f7904acd10fe54b04ab39fe0854667..3e4ff377f3cd54676f0b32f3f4853c9be6de706d 100644 --- a/docs/src/visual-customization.md +++ b/docs/src/visual-customization.md @@ -457,6 +457,8 @@ Project panel can be shown/hidden with {#action project_panel::ToggleFocus} ({#k // When to show indent guides in the project panel. (always, never) "show": "always" }, + // Sort order for entries (directories_first, mixed, files_first) + "sort_mode": "directories_first", // Whether to hide the root entry when only one folder is open in the window. "hide_root": false, // Whether to hide the hidden entries in the project panel. From a66a539a0984ad02908b79952e90d9978a603053 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Mon, 17 Nov 2025 13:31:00 +0100 Subject: [PATCH 0141/1030] Reduce macro burden for rust-analyzer (#42871) This enables optimizations for our own proc-macros as well as some heavy hitters. Additionally this gates the `derive_inspector_reflection` to be skipped for rust-analyzer as it currently slows down rust-analyzer way too much Release Notes: - N/A *or* Added/Fixed/Improved ... --- Cargo.toml | 16 +++++++++++++++- crates/gpui/src/styled.rs | 3 ++- .../tests/derive_inspector_reflection.rs | 5 ++--- crates/ui/src/traits/styled_ext.rs | 6 +++++- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 87d76a3636de6fcf33431ae63b977e3236dcacae..be56964f753cded4b1e054583b989f798c3ca1e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -631,6 +631,7 @@ scap = { git = "https://github.com/zed-industries/scap", rev = "4afea48c3b002197 schemars = { version = "1.0", features = ["indexmap2"] } semver = "1.0" serde = { version = "1.0.221", features = ["derive", "rc"] } +serde_derive = "1.0.221" serde_json = { version = "1.0.144", features = ["preserve_order", "raw_value"] } serde_json_lenient = { version = "0.2", features = [ "preserve_order", @@ -724,6 +725,7 @@ yawc = "0.2.5" zeroize = "1.8" zstd = "0.11" + [workspace.dependencies.windows] version = "0.61" features = [ @@ -792,6 +794,19 @@ codegen-units = 16 codegen-units = 16 [profile.dev.package] +# proc-macros start +gpui_macros = { opt-level = 3 } +derive_refineable = { opt-level = 3 } +settings_macros = { opt-level = 3 } +sqlez_macros = { opt-level = 3, codegen-units = 1 } +ui_macros = { opt-level = 3 } +util_macros = { opt-level = 3 } +serde_derive = { opt-level = 3 } +quote = { opt-level = 3 } +syn = { opt-level = 3 } +proc-macro2 = { opt-level = 3 } +# proc-macros end + taffy = { opt-level = 3 } cranelift-codegen = { opt-level = 3 } cranelift-codegen-meta = { opt-level = 3 } @@ -833,7 +848,6 @@ semantic_version = { codegen-units = 1 } session = { codegen-units = 1 } snippet = { codegen-units = 1 } snippets_ui = { codegen-units = 1 } -sqlez_macros = { codegen-units = 1 } story = { codegen-units = 1 } supermaven_api = { codegen-units = 1 } telemetry_events = { codegen-units = 1 } diff --git a/crates/gpui/src/styled.rs b/crates/gpui/src/styled.rs index f653f050c07848340c8da1fa0e01a2a4da985bdb..b50432d332f7e26fdd4528c1644be3c9761b6ad0 100644 --- a/crates/gpui/src/styled.rs +++ b/crates/gpui/src/styled.rs @@ -13,8 +13,9 @@ const ELLIPSIS: SharedString = SharedString::new_static("…"); /// A trait for elements that can be styled. /// Use this to opt-in to a utility CSS-like styling API. +// gate on rust-analyzer so rust-analyzer never needs to expand this macro, it takes up to 10 seconds to expand due to inefficiencies in rust-analyzers proc-macro srv #[cfg_attr( - any(feature = "inspector", debug_assertions), + all(any(feature = "inspector", debug_assertions), not(rust_analyzer)), gpui_macros::derive_inspector_reflection )] pub trait Styled: Sized { diff --git a/crates/gpui_macros/tests/derive_inspector_reflection.rs b/crates/gpui_macros/tests/derive_inspector_reflection.rs index a0adcb7801e55d7272191a1e4e831b2c9c6b115c..92f4e56e9c0c48d9c9279e7bf1bb283eeb1139a7 100644 --- a/crates/gpui_macros/tests/derive_inspector_reflection.rs +++ b/crates/gpui_macros/tests/derive_inspector_reflection.rs @@ -1,8 +1,7 @@ //! This code was generated using Zed Agent with Claude Opus 4. -use gpui_macros::derive_inspector_reflection; - -#[derive_inspector_reflection] +// gate on rust-analyzer so rust-analyzer never needs to expand this macro, it takes up to 10 seconds to expand due to inefficiencies in rust-analyzers proc-macro srv +#[cfg_attr(not(rust_analyzer), gpui_macros::derive_inspector_reflection)] trait Transform: Clone { /// Doubles the value fn double(self) -> Self; diff --git a/crates/ui/src/traits/styled_ext.rs b/crates/ui/src/traits/styled_ext.rs index cf452a2826e75bd88910b605a90fe34aa0ea62bd..849e56a024ac1da03bb7a9de8dd574ea53f39627 100644 --- a/crates/ui/src/traits/styled_ext.rs +++ b/crates/ui/src/traits/styled_ext.rs @@ -18,7 +18,11 @@ fn elevated_borderless(this: E, cx: &mut App, index: ElevationIndex) } /// Extends [`gpui::Styled`] with Zed-specific styling methods. -#[cfg_attr(debug_assertions, gpui_macros::derive_inspector_reflection)] +// gate on rust-analyzer so rust-analyzer never needs to expand this macro, it takes up to 10 seconds to expand due to inefficiencies in rust-analyzers proc-macro srv +#[cfg_attr( + all(debug_assertions, not(rust_analyzer)), + gpui_macros::derive_inspector_reflection +)] pub trait StyledExt: Styled + Sized { /// Horizontally stacks elements. /// From f1bebd79d1e1debf21efc206ddf9c0606ef6cca4 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Mon, 17 Nov 2025 14:37:51 +0100 Subject: [PATCH 0142/1030] zeta2: Add skip-prediction flag to eval CLI (#42872) Release Notes: - N/A --- crates/zeta_cli/src/evaluate.rs | 135 +++++++++++++++++++------------- 1 file changed, 81 insertions(+), 54 deletions(-) diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index b0b3820362889051e3e5c0eef03ef10c7f0d6fa8..a06662c8bf17535900923eb875261f911ded12f7 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -31,6 +31,8 @@ pub struct EvaluateArguments { cache: CacheMode, #[clap(short, long, default_value_t = 1, alias = "repeat")] repetitions: u16, + #[arg(long)] + skip_prediction: bool, } pub async fn run_evaluate( @@ -66,6 +68,7 @@ pub async fn run_evaluate( zeta, args.prompt_format, args.use_expected_context, + !args.skip_prediction, args.cache, cx, ) @@ -118,9 +121,14 @@ fn write_aggregated_scores( } if successful.len() > 1 { + let mut edit_predictions = successful + .iter() + .filter_map(|r| r.edit_prediction.as_ref()) + .peekable(); + let has_edit_predictions = edit_predictions.peek().is_some(); let aggregated_result = EvaluationResult { context: Scores::aggregate(successful.iter().map(|r| &r.context)), - edit_prediction: Scores::aggregate(successful.iter().map(|r| &r.edit_prediction)), + edit_prediction: has_edit_predictions.then(|| Scores::aggregate(edit_predictions)), }; writeln!(w, "\n{}", "-".repeat(80))?; @@ -149,6 +157,7 @@ pub async fn run_evaluate_one( zeta: Entity, prompt_format: PromptFormat, use_expected_context: bool, + predict: bool, cache_mode: CacheMode, cx: &mut AsyncApp, ) -> Result { @@ -164,7 +173,7 @@ pub async fn run_evaluate_one( ) .await?; - let evaluation_result = evaluate(&example.example, &predict_result); + let evaluation_result = evaluate(&example.example, &predict_result, predict); if repetition_ix.is_none() { write_eval_result( @@ -173,6 +182,7 @@ pub async fn run_evaluate_one( &evaluation_result, &mut std::io::stdout(), std::io::stdout().is_terminal(), + predict, )?; } @@ -185,6 +195,7 @@ pub async fn run_evaluate_one( &evaluation_result, &mut results_file, false, + predict, ) .log_err(); } @@ -198,25 +209,29 @@ fn write_eval_result( evaluation_result: &EvaluationResult, out: &mut impl Write, use_color: bool, + predict: bool, ) -> Result<()> { - writeln!( - out, - "## Expected edit prediction:\n\n```diff\n{}\n```\n", - compare_diffs( - &example.example.expected_patch, - &predictions.diff, - use_color - ) - )?; - writeln!( - out, - "## Actual edit prediction:\n\n```diff\n{}\n```\n", - compare_diffs( - &predictions.diff, - &example.example.expected_patch, - use_color - ) - )?; + if predict { + writeln!( + out, + "## Expected edit prediction:\n\n```diff\n{}\n```\n", + compare_diffs( + &example.example.expected_patch, + &predictions.diff, + use_color + ) + )?; + writeln!( + out, + "## Actual edit prediction:\n\n```diff\n{}\n```\n", + compare_diffs( + &predictions.diff, + &example.example.expected_patch, + use_color + ) + )?; + } + writeln!(out, "{:#}", evaluation_result)?; anyhow::Ok(()) @@ -224,7 +239,7 @@ fn write_eval_result( #[derive(Debug, Default)] pub struct EvaluationResult { - pub edit_prediction: Scores, + pub edit_prediction: Option, pub context: Scores, } @@ -328,13 +343,19 @@ impl EvaluationResult { r#" ### Context Scores {} - -### Edit Prediction Scores -{} "#, self.context.to_markdown(), - self.edit_prediction.to_markdown() - ) + )?; + if let Some(prediction) = &self.edit_prediction { + write!( + f, + r#" + ### Edit Prediction Scores + {}"#, + prediction.to_markdown() + )?; + } + Ok(()) } fn fmt_table(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { @@ -357,20 +378,23 @@ impl EvaluationResult { self.context.recall() * 100.0, self.context.f1_score() * 100.0 )?; - writeln!( - f, - "Edit Prediction {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", - self.edit_prediction.true_positives, - self.edit_prediction.false_positives, - self.edit_prediction.false_negatives, - self.edit_prediction.precision() * 100.0, - self.edit_prediction.recall() * 100.0, - self.edit_prediction.f1_score() * 100.0 - ) + if let Some(edit_prediction) = &self.edit_prediction { + writeln!( + f, + "Edit Prediction {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", + edit_prediction.true_positives, + edit_prediction.false_positives, + edit_prediction.false_negatives, + edit_prediction.precision() * 100.0, + edit_prediction.recall() * 100.0, + edit_prediction.f1_score() * 100.0 + )?; + } + Ok(()) } } -pub fn evaluate(example: &Example, preds: &PredictionDetails) -> EvaluationResult { +pub fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> EvaluationResult { let mut eval_result = EvaluationResult::default(); let actual_context_lines: HashSet<_> = preds @@ -420,24 +444,27 @@ pub fn evaluate(example: &Example, preds: &PredictionDetails) -> EvaluationResul eval_result.context.false_positives = false_positive_lines.len(); - // todo: alternatives for patches - let expected_patch_lines = example - .expected_patch - .lines() - .map(DiffLine::parse) - .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) - .map(|line| line.to_string()) - .collect(); - - let actual_patch_lines = preds - .diff - .lines() - .map(DiffLine::parse) - .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) - .map(|line| line.to_string()) - .collect(); + if predict { + // todo: alternatives for patches + let expected_patch_lines = example + .expected_patch + .lines() + .map(DiffLine::parse) + .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) + .map(|line| line.to_string()) + .collect(); + + let actual_patch_lines = preds + .diff + .lines() + .map(DiffLine::parse) + .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) + .map(|line| line.to_string()) + .collect(); + + eval_result.edit_prediction = Some(Scores::new(&expected_patch_lines, &actual_patch_lines)); + } - eval_result.edit_prediction = Scores::new(&expected_patch_lines, &actual_patch_lines); eval_result } From e0b64773d9f216368d67ba929301a8617f35e0d3 Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Mon, 17 Nov 2025 15:53:18 +0200 Subject: [PATCH 0143/1030] Properly sanitize out inlay hints from remote hosts (#42878) Part of https://github.com/zed-industries/zed/issues/42671 Release Notes: - Fixed remote hosts causing duplicate hints to be displayed --- crates/collab/src/tests/editor_tests.rs | 32 +++++++++++++++++-------- crates/project/src/lsp_store.rs | 11 +++++++++ 2 files changed, 33 insertions(+), 10 deletions(-) diff --git a/crates/collab/src/tests/editor_tests.rs b/crates/collab/src/tests/editor_tests.rs index bdc024aaca7242ab0fe261e3b673bf4d0efe23b1..5880d998925743d4cdd822574b647b53194e2116 100644 --- a/crates/collab/src/tests/editor_tests.rs +++ b/crates/collab/src/tests/editor_tests.rs @@ -2169,16 +2169,28 @@ async fn test_inlay_hint_refresh_is_forwarded( } else { "initial hint" }; - Ok(Some(vec![lsp::InlayHint { - position: lsp::Position::new(0, character), - label: lsp::InlayHintLabel::String(label.to_string()), - kind: None, - text_edits: None, - tooltip: None, - padding_left: None, - padding_right: None, - data: None, - }])) + Ok(Some(vec![ + lsp::InlayHint { + position: lsp::Position::new(0, character), + label: lsp::InlayHintLabel::String(label.to_string()), + kind: None, + text_edits: None, + tooltip: None, + padding_left: None, + padding_right: None, + data: None, + }, + lsp::InlayHint { + position: lsp::Position::new(1090, 1090), + label: lsp::InlayHintLabel::String("out-of-bounds hint".to_string()), + kind: None, + text_edits: None, + tooltip: None, + padding_left: None, + padding_right: None, + data: None, + }, + ])) } }) .next() diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index cae4d64c67d3261f59d87273a38865992da18284..069c12c75c44e790028f27abdc12ffd6d2b613ab 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -6881,6 +6881,7 @@ impl LspStore { })) .await; + let buffer_snapshot = buffer.read_with(cx, |buffer, _| buffer.snapshot())?; let mut has_errors = false; let inlay_hints = inlay_hints .into_iter() @@ -6892,6 +6893,16 @@ impl LspStore { None } }) + .map(|(server_id, mut new_hints)| { + new_hints.retain(|hint| { + hint.position.is_valid(&buffer_snapshot) + && range.start.is_valid(&buffer_snapshot) + && range.end.is_valid(&buffer_snapshot) + && hint.position.cmp(&range.start, &buffer_snapshot).is_ge() + && hint.position.cmp(&range.end, &buffer_snapshot).is_lt() + }); + (server_id, new_hints) + }) .collect::>(); anyhow::ensure!( !has_errors || !inlay_hints.is_empty(), From 4092e81ada231a6dbe0d0f5324a83816c30069d9 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Mon, 17 Nov 2025 10:59:46 -0300 Subject: [PATCH 0144/1030] keymap_editor: Adjust some items of the UI (#42876) - Only showing the "Create" menu item in the right-click context menu for actions that _do not_ contain a binding already assigned to them - Only show the "Clear Input" icon button in the keystroke modal when the input is focused/in recording mode - Add a subtle hover style to the table rows just to make it easier to navigate Release Notes: - N/A --- crates/keymap_editor/src/keymap_editor.rs | 4 ++- .../src/ui_components/keystroke_input.rs | 30 +++++++++++-------- crates/ui/src/components/data_table.rs | 5 ++-- 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/crates/keymap_editor/src/keymap_editor.rs b/crates/keymap_editor/src/keymap_editor.rs index b5b9f92a491b1f8f5b3f68828095b5a7b6cecb39..58a2a5d044f208d967b58e275a5e908ff0c63418 100644 --- a/crates/keymap_editor/src/keymap_editor.rs +++ b/crates/keymap_editor/src/keymap_editor.rs @@ -958,12 +958,14 @@ impl KeymapEditor { let context_menu = ContextMenu::build(window, cx, |menu, _window, _cx| { menu.context(self.focus_handle.clone()) + .when(selected_binding_is_unbound, |this| { + this.action("Create", Box::new(CreateBinding)) + }) .action_disabled_when( selected_binding_is_unbound, "Edit", Box::new(EditBinding), ) - .action("Create", Box::new(CreateBinding)) .action_disabled_when( selected_binding_is_unbound, "Delete", diff --git a/crates/keymap_editor/src/ui_components/keystroke_input.rs b/crates/keymap_editor/src/ui_components/keystroke_input.rs index 5f85e5124f84dc6fc9a9f3ab95e72f15dc5fefeb..f1f583be7a6bb0aa1796c7c03d9aea084b3bdd3b 100644 --- a/crates/keymap_editor/src/ui_components/keystroke_input.rs +++ b/crates/keymap_editor/src/ui_components/keystroke_input.rs @@ -636,18 +636,24 @@ impl Render for KeystrokeInput { ) } }) - .child( - IconButton::new("clear-btn", IconName::Backspace) - .shape(IconButtonShape::Square) - .tooltip(Tooltip::for_action_title( - "Clear Keystrokes", - &ClearKeystrokes, - )) - .when(!is_focused, |this| this.icon_color(Color::Muted)) - .on_click(cx.listener(|this, _event, window, cx| { - this.clear_keystrokes(&ClearKeystrokes, window, cx); - })), - ), + .when(is_recording, |this| { + this.child( + IconButton::new("clear-btn", IconName::Backspace) + .shape(IconButtonShape::Square) + .tooltip(move |_, cx| { + Tooltip::with_meta( + "Clear Keystrokes", + Some(&ClearKeystrokes), + "Hit it three times to execute", + cx, + ) + }) + .when(!is_focused, |this| this.icon_color(Color::Muted)) + .on_click(cx.listener(|this, _event, window, cx| { + this.clear_keystrokes(&ClearKeystrokes, window, cx); + })), + ) + }), ) } } diff --git a/crates/ui/src/components/data_table.rs b/crates/ui/src/components/data_table.rs index 4a1f4939cca2eb85bb7a549d06af1e9ea8cf04d0..a505281cf3fa9868a19a04c168d0b1b5c71a4f85 100644 --- a/crates/ui/src/components/data_table.rs +++ b/crates/ui/src/components/data_table.rs @@ -641,11 +641,10 @@ pub fn render_table_row( .map_or([None; COLS], |widths| widths.map(Some)); let mut row = h_flex() - .h_full() .id(("table_row", row_index)) - .w_full() - .justify_between() + .size_full() .when_some(bg, |row, bg| row.bg(bg)) + .hover(|s| s.bg(cx.theme().colors().element_hover.opacity(0.6))) .when(!is_striped, |row| { row.border_b_1() .border_color(transparent_black()) From 9bebf314e001a6d462d4787245705cf7ecec29f0 Mon Sep 17 00:00:00 2001 From: tidely <43219534+tidely@users.noreply.github.com> Date: Mon, 17 Nov 2025 16:28:22 +0200 Subject: [PATCH 0145/1030] http_client: Remove unused `HttpClient::type_name` method (#42803) Closes #ISSUE Remove unused method `HttpClient::type_name`. Looking at the PR from a year ago when it was added, it was never actually used for anything and seems like a prototyping artifact. Other misc changes for the `http_client` crate include: - Use `derive_more::Deref` for `HttpClientWithUrl` (already used for `HttpClientWithProxy`) - Move `http_client::proxy()` higher up in the trait definition. (It was in between methods that have default implementations) Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/gpui/src/app.rs | 4 --- crates/http_client/src/http_client.rs | 36 ++++----------------- crates/reqwest_client/src/reqwest_client.rs | 6 +--- 3 files changed, 7 insertions(+), 39 deletions(-) diff --git a/crates/gpui/src/app.rs b/crates/gpui/src/app.rs index 864968b9e7a9ad862d9b67a19cc8897524dffb9e..9d93de53d06f3130f28b39c7b21611f7996abf89 100644 --- a/crates/gpui/src/app.rs +++ b/crates/gpui/src/app.rs @@ -2400,10 +2400,6 @@ impl HttpClient for NullHttpClient { fn proxy(&self) -> Option<&Url> { None } - - fn type_name(&self) -> &'static str { - type_name::() - } } /// A mutable reference to an entity owned by GPUI diff --git a/crates/http_client/src/http_client.rs b/crates/http_client/src/http_client.rs index a75df61646f31c9dc997bea83acc9d669bf1e29e..98c67f4e27a8e8b20489cc3c4ad4a1207e8b848f 100644 --- a/crates/http_client/src/http_client.rs +++ b/crates/http_client/src/http_client.rs @@ -14,9 +14,9 @@ use futures::{ }; use parking_lot::Mutex; use serde::Serialize; +use std::sync::Arc; #[cfg(feature = "test-support")] -use std::fmt; -use std::{any::type_name, sync::Arc}; +use std::{any::type_name, fmt}; pub use url::{Host, Url}; #[derive(Default, Debug, Clone, PartialEq, Eq, Hash)] @@ -59,10 +59,10 @@ impl HttpRequestExt for http::request::Builder { } pub trait HttpClient: 'static + Send + Sync { - fn type_name(&self) -> &'static str; - fn user_agent(&self) -> Option<&HeaderValue>; + fn proxy(&self) -> Option<&Url>; + fn send( &self, req: http::Request, @@ -106,8 +106,6 @@ pub trait HttpClient: 'static + Send + Sync { } } - fn proxy(&self) -> Option<&Url>; - #[cfg(feature = "test-support")] fn as_fake(&self) -> &FakeHttpClient { panic!("called as_fake on {}", type_name::()) @@ -163,10 +161,6 @@ impl HttpClient for HttpClientWithProxy { self.proxy.as_ref() } - fn type_name(&self) -> &'static str { - self.client.type_name() - } - #[cfg(feature = "test-support")] fn as_fake(&self) -> &FakeHttpClient { self.client.as_fake() @@ -182,19 +176,13 @@ impl HttpClient for HttpClientWithProxy { } /// An [`HttpClient`] that has a base URL. +#[derive(Deref)] pub struct HttpClientWithUrl { base_url: Mutex, + #[deref] client: HttpClientWithProxy, } -impl std::ops::Deref for HttpClientWithUrl { - type Target = HttpClientWithProxy; - - fn deref(&self) -> &Self::Target { - &self.client - } -} - impl HttpClientWithUrl { /// Returns a new [`HttpClientWithUrl`] with the given base URL. pub fn new( @@ -314,10 +302,6 @@ impl HttpClient for HttpClientWithUrl { self.client.proxy.as_ref() } - fn type_name(&self) -> &'static str { - self.client.type_name() - } - #[cfg(feature = "test-support")] fn as_fake(&self) -> &FakeHttpClient { self.client.as_fake() @@ -384,10 +368,6 @@ impl HttpClient for BlockedHttpClient { None } - fn type_name(&self) -> &'static str { - type_name::() - } - #[cfg(feature = "test-support")] fn as_fake(&self) -> &FakeHttpClient { panic!("called as_fake on {}", type_name::()) @@ -482,10 +462,6 @@ impl HttpClient for FakeHttpClient { None } - fn type_name(&self) -> &'static str { - type_name::() - } - fn as_fake(&self) -> &FakeHttpClient { self } diff --git a/crates/reqwest_client/src/reqwest_client.rs b/crates/reqwest_client/src/reqwest_client.rs index d0d25bdf258e12a28bef1f29e608532075bbed7b..c2a58877b32ab6049edc5b50f7ad025f0c83f46e 100644 --- a/crates/reqwest_client/src/reqwest_client.rs +++ b/crates/reqwest_client/src/reqwest_client.rs @@ -1,6 +1,6 @@ use std::error::Error; use std::sync::{LazyLock, OnceLock}; -use std::{any::type_name, borrow::Cow, mem, pin::Pin, task::Poll, time::Duration}; +use std::{borrow::Cow, mem, pin::Pin, task::Poll, time::Duration}; use anyhow::anyhow; use bytes::{BufMut, Bytes, BytesMut}; @@ -215,10 +215,6 @@ impl http_client::HttpClient for ReqwestClient { self.proxy.as_ref() } - fn type_name(&self) -> &'static str { - type_name::() - } - fn user_agent(&self) -> Option<&HeaderValue> { self.user_agent.as_ref() } From 97792f7fb9ca8da39d150ebe40ef393ce126bb00 Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Mon, 17 Nov 2025 15:29:50 +0100 Subject: [PATCH 0146/1030] Prefer loading `extension.toml` before `extension.json` (#42884) Closes #42406 The issue for the fish-extension is that a `extension.json` is still present next to a `extension.toml`, although the former is deprecated. We should prefer the `extension.toml` if it is present and only fall back to the `extension.json` if needed. This PR tackles this. Release Notes: - N/A --- crates/extension/src/extension_manifest.rs | 25 +++++++++++----------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/crates/extension/src/extension_manifest.rs b/crates/extension/src/extension_manifest.rs index 7a15a3c58b7a907fa56a12633343a48d150b6bcf..a3374069f7da6a30f455601b3bc0d4b027f207ae 100644 --- a/crates/extension/src/extension_manifest.rs +++ b/crates/extension/src/extension_manifest.rs @@ -265,25 +265,26 @@ impl ExtensionManifest { .and_then(OsStr::to_str) .context("invalid extension name")?; - let mut extension_manifest_path = extension_dir.join("extension.json"); + let extension_manifest_path = extension_dir.join("extension.toml"); if fs.is_file(&extension_manifest_path).await { - let manifest_content = fs.load(&extension_manifest_path).await.with_context(|| { - format!("loading {extension_name} extension.json, {extension_manifest_path:?}") - })?; - let manifest_json = serde_json::from_str::(&manifest_content) - .with_context(|| { - format!("invalid extension.json for extension {extension_name}") - })?; - - Ok(manifest_from_old_manifest(manifest_json, extension_name)) - } else { - extension_manifest_path.set_extension("toml"); let manifest_content = fs.load(&extension_manifest_path).await.with_context(|| { format!("loading {extension_name} extension.toml, {extension_manifest_path:?}") })?; toml::from_str(&manifest_content).map_err(|err| { anyhow!("Invalid extension.toml for extension {extension_name}:\n{err}") }) + } else if let extension_manifest_path = extension_manifest_path.with_extension("json") + && fs.is_file(&extension_manifest_path).await + { + let manifest_content = fs.load(&extension_manifest_path).await.with_context(|| { + format!("loading {extension_name} extension.json, {extension_manifest_path:?}") + })?; + + serde_json::from_str::(&manifest_content) + .with_context(|| format!("invalid extension.json for extension {extension_name}")) + .map(|manifest_json| manifest_from_old_manifest(manifest_json, extension_name)) + } else { + anyhow::bail!("No extension manifest found for extension {extension_name}") } } } From d77ab99ab1a427964b1313b7fbeb55c7dfbaff7a Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Mon, 17 Nov 2025 11:43:15 -0300 Subject: [PATCH 0147/1030] keymap_editor: Make "toggle exact match mode" the default for binding search (#42883) I think having the "exact mode" turned on by default is usually what users will expect when searching for a specific keybinding. When it's turned off, it's very odd to search for a super common binding like "command-enter" and get no results. That happens because without that mode, we're trying to match for subsequent matches, which I'm betting it's an edge case. Hopefully, this change will make the keymap editor feel more like it works well. I'm also adding the toggle icon button inside the keystroke input for consistency with the project search input. Making this change very inspired by [Sam Rose's feedback](https://bsky.app/profile/samwho.dev/post/3m5juszqyd22w). Release Notes: - keymap editor: Made the "toggle exact match mode" the default keystroke search mode so that whatever you search for matches exactly to results. --- crates/keymap_editor/src/keymap_editor.rs | 71 ++++++++----------- .../src/ui_components/keystroke_input.rs | 10 ++- 2 files changed, 39 insertions(+), 42 deletions(-) diff --git a/crates/keymap_editor/src/keymap_editor.rs b/crates/keymap_editor/src/keymap_editor.rs index 58a2a5d044f208d967b58e275a5e908ff0c63418..5e39e80b4bd7e9b91c9419290a48b39d060aa267 100644 --- a/crates/keymap_editor/src/keymap_editor.rs +++ b/crates/keymap_editor/src/keymap_editor.rs @@ -184,7 +184,7 @@ enum SearchMode { impl SearchMode { fn invert(&self) -> Self { match self { - SearchMode::Normal => SearchMode::KeyStroke { exact_match: false }, + SearchMode::Normal => SearchMode::KeyStroke { exact_match: true }, SearchMode::KeyStroke { .. } => SearchMode::Normal, } } @@ -1600,9 +1600,33 @@ impl Item for KeymapEditor { impl Render for KeymapEditor { fn render(&mut self, _window: &mut Window, cx: &mut ui::Context) -> impl ui::IntoElement { + if let SearchMode::KeyStroke { exact_match } = self.search_mode { + let button = IconButton::new("keystrokes-exact-match", IconName::CaseSensitive) + .tooltip(move |_window, cx| { + Tooltip::for_action( + "Toggle Exact Match Mode", + &ToggleExactKeystrokeMatching, + cx, + ) + }) + .shape(IconButtonShape::Square) + .toggle_state(exact_match) + .on_click(cx.listener(|_, _, window, cx| { + window.dispatch_action(ToggleExactKeystrokeMatching.boxed_clone(), cx); + })); + + self.keystroke_editor.update(cx, |editor, _| { + editor.actions_slot = Some(button.into_any_element()); + }); + } else { + self.keystroke_editor.update(cx, |editor, _| { + editor.actions_slot = None; + }); + } + let row_count = self.matches.len(); - let theme = cx.theme(); let focus_handle = &self.focus_handle; + let theme = cx.theme(); v_flex() .id("keymap-editor") @@ -1786,49 +1810,14 @@ impl Render for KeymapEditor { ) ), ) - .when_some( - match self.search_mode { - SearchMode::Normal => None, - SearchMode::KeyStroke { exact_match } => Some(exact_match), - }, - |this, exact_match| { + .when( + matches!(self.search_mode, SearchMode::KeyStroke { .. }), + |this| { this.child( h_flex() .gap_2() .child(self.keystroke_editor.clone()) - .child( - h_flex() - .min_w_64() - .child( - IconButton::new( - "keystrokes-exact-match", - IconName::CaseSensitive, - ) - .tooltip({ - let keystroke_focus_handle = - self.keystroke_editor.read(cx).focus_handle(cx); - - move |_window, cx| { - Tooltip::for_action_in( - "Toggle Exact Match Mode", - &ToggleExactKeystrokeMatching, - &keystroke_focus_handle, - cx, - ) - } - }) - .shape(IconButtonShape::Square) - .toggle_state(exact_match) - .on_click( - cx.listener(|_, _, window, cx| { - window.dispatch_action( - ToggleExactKeystrokeMatching.boxed_clone(), - cx, - ); - }), - ), - ), - ) + .child(div().min_w_64()), // Spacer div to align with the search input ) }, ), diff --git a/crates/keymap_editor/src/ui_components/keystroke_input.rs b/crates/keymap_editor/src/ui_components/keystroke_input.rs index f1f583be7a6bb0aa1796c7c03d9aea084b3bdd3b..6936de784f9d5c16b218d0952c41d6336299a0f9 100644 --- a/crates/keymap_editor/src/ui_components/keystroke_input.rs +++ b/crates/keymap_editor/src/ui_components/keystroke_input.rs @@ -64,6 +64,7 @@ pub struct KeystrokeInput { clear_close_keystrokes_timer: Option>, #[cfg(test)] recording: bool, + pub actions_slot: Option, } impl KeystrokeInput { @@ -94,6 +95,7 @@ impl KeystrokeInput { clear_close_keystrokes_timer: None, #[cfg(test)] recording: false, + actions_slot: None, } } @@ -445,6 +447,11 @@ impl KeystrokeInput { // not get de-synced self.inner_focus_handle.is_focused(window) } + + pub fn actions_slot(mut self, action: impl IntoElement) -> Self { + self.actions_slot = Some(action.into_any_element()); + self + } } impl EventEmitter<()> for KeystrokeInput {} @@ -586,7 +593,7 @@ impl Render for KeystrokeInput { .min_w_0() .justify_center() .flex_wrap() - .gap(ui::DynamicSpacing::Base04.rems(cx)) + .gap_1() .children(self.render_keystrokes(is_recording)), ) .child( @@ -636,6 +643,7 @@ impl Render for KeystrokeInput { ) } }) + .when_some(self.actions_slot.take(), |this, action| this.child(action)) .when(is_recording, |this| { this.child( IconButton::new("clear-btn", IconName::Backspace) From b274f80dd98444df964a6e720d2135c0c1d28d35 Mon Sep 17 00:00:00 2001 From: Oleksiy Syvokon Date: Mon, 17 Nov 2025 16:56:58 +0200 Subject: [PATCH 0148/1030] zeta2: Print average length of prompts and outputs (#42885) Release Notes: - N/A --- crates/zeta_cli/src/evaluate.rs | 27 ++++++++++++++++++++------- crates/zeta_cli/src/predict.rs | 12 ++++++++---- 2 files changed, 28 insertions(+), 11 deletions(-) diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index a06662c8bf17535900923eb875261f911ded12f7..8904a7fc5707a59ef5ce3ce7b3d246adf3a8e16b 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -46,7 +46,7 @@ pub async fn run_evaluate( } let all_tasks = args.example_paths.into_iter().map(|path| { let app_state = app_state.clone(); - let example = NamedExample::load(&path).unwrap(); + let example = NamedExample::load(&path).expect("Failed to load example"); cx.spawn(async move |cx| { let (project, zetas, _edited_buffers) = example @@ -129,12 +129,15 @@ fn write_aggregated_scores( let aggregated_result = EvaluationResult { context: Scores::aggregate(successful.iter().map(|r| &r.context)), edit_prediction: has_edit_predictions.then(|| Scores::aggregate(edit_predictions)), + prompt_len: successful.iter().map(|r| r.prompt_len).sum::() / successful.len(), + generated_len: successful.iter().map(|r| r.generated_len).sum::() + / successful.len(), }; writeln!(w, "\n{}", "-".repeat(80))?; writeln!(w, "\n## TOTAL SCORES")?; writeln!(w, "\n### Success Rate")?; - writeln!(w, "{}", aggregated_result)?; + writeln!(w, "{:#}", aggregated_result)?; } if successful.len() + failed_count > 1 { @@ -241,6 +244,8 @@ fn write_eval_result( pub struct EvaluationResult { pub edit_prediction: Option, pub context: Scores, + pub prompt_len: usize, + pub generated_len: usize, } #[derive(Default, Debug)] @@ -362,15 +367,17 @@ impl EvaluationResult { writeln!(f, "### Scores\n")?; writeln!( f, - " TP FP FN Precision Recall F1" + " Prompt Generated TP FP FN Precision Recall F1" )?; writeln!( f, - "──────────────────────────────────────────────────────────────────" + "────────────────────────────────────────────────────────────────────────────────────" )?; writeln!( f, - "Context Retrieval {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", + "Context Retrieval {:<7} {:<10} {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", + "", + "", self.context.true_positives, self.context.false_positives, self.context.false_negatives, @@ -381,7 +388,9 @@ impl EvaluationResult { if let Some(edit_prediction) = &self.edit_prediction { writeln!( f, - "Edit Prediction {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", + "Edit Prediction {:<7} {:<10} {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", + self.prompt_len, + self.generated_len, edit_prediction.true_positives, edit_prediction.false_positives, edit_prediction.false_negatives, @@ -395,7 +404,11 @@ impl EvaluationResult { } pub fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> EvaluationResult { - let mut eval_result = EvaluationResult::default(); + let mut eval_result = EvaluationResult { + prompt_len: preds.prompt_len, + generated_len: preds.generated_len, + ..Default::default() + }; let actual_context_lines: HashSet<_> = preds .excerpts diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index 0618cf38bafd15a6b8a50b03cb745c9d3365cbf8..28eb7e426c21126b1c91dc62132c1bf460a93661 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -179,13 +179,12 @@ pub async fn zeta2_predict( zeta2::ZetaDebugInfo::EditPredictionRequested(request) => { let prediction_started_at = Instant::now(); start_time.get_or_insert(prediction_started_at); - fs::write( - example_run_dir.join("prediction_prompt.md"), - &request.local_prompt.unwrap_or_default(), - )?; + let prompt = request.local_prompt.unwrap_or_default(); + fs::write(example_run_dir.join("prediction_prompt.md"), &prompt)?; { let mut result = result.lock().unwrap(); + result.prompt_len = prompt.chars().count(); for included_file in request.request.included_files { let insertions = @@ -217,6 +216,7 @@ pub async fn zeta2_predict( fs::write(example_run_dir.join("prediction_response.md"), &response)?; let mut result = result.lock().unwrap(); + result.generated_len = response.chars().count(); if !use_expected_context { result.planning_search_time = @@ -411,6 +411,8 @@ pub struct PredictionDetails { pub prediction_time: Duration, pub total_time: Duration, pub run_example_dir: PathBuf, + pub prompt_len: usize, + pub generated_len: usize, } impl PredictionDetails { @@ -424,6 +426,8 @@ impl PredictionDetails { prediction_time: Default::default(), total_time: Default::default(), run_example_dir, + prompt_len: 0, + generated_len: 0, } } From bb46bc167ad3a8d6f51bd0255dcb46df78a7c6f1 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Mon, 17 Nov 2025 11:58:43 -0300 Subject: [PATCH 0149/1030] settings_ui: Add "Edit in settings.json" button to subpage header (#42886) Closes https://github.com/zed-industries/zed/issues/42094 This will make it consistent with the regular/main page. Also ended up fixing a bug along the way where this button wouldn't work for subpage items. Release Notes: - settings ui: Fixed a bug where the "Edit in settings.json" wouldn't work for subpages like all the Language pages. --- crates/settings_ui/src/settings_ui.rs | 58 ++++++++++++++++++--------- 1 file changed, 40 insertions(+), 18 deletions(-) diff --git a/crates/settings_ui/src/settings_ui.rs b/crates/settings_ui/src/settings_ui.rs index 329679bccd4c10aed3398ac60a6c05f7922d9a9f..1f32716f0639197cf6391e377b2619cc3843605f 100644 --- a/crates/settings_ui/src/settings_ui.rs +++ b/crates/settings_ui/src/settings_ui.rs @@ -409,16 +409,25 @@ pub fn init(cx: &mut App) { fn init_renderers(cx: &mut App) { cx.default_global::() - .add_basic_renderer::(|_, _, _, _, _| { - Button::new("open-in-settings-file", "Edit in settings.json") - .style(ButtonStyle::Outlined) - .size(ButtonSize::Medium) - .tab_index(0_isize) - .on_click(|_, window, cx| { - window.dispatch_action(Box::new(OpenCurrentFile), cx); - }) - .into_any_element() - }) + .add_renderer::( + |settings_window, item, _, settings_file, _, sub_field, _, cx| { + render_settings_item( + settings_window, + item, + settings_file, + Button::new("open-in-settings-file", "Edit in settings.json") + .style(ButtonStyle::Outlined) + .size(ButtonSize::Medium) + .tab_index(0_isize) + .on_click(cx.listener(|this, _, window, cx| { + this.open_current_settings_file(window, cx); + })) + .into_any_element(), + sub_field, + cx, + ) + }, + ) .add_basic_renderer::(render_toggle_button) .add_basic_renderer::(render_text_field) .add_basic_renderer::(render_text_field) @@ -2832,17 +2841,30 @@ impl SettingsWindow { .into_any_element(); } else { page_header = h_flex() - .ml_neg_1p5() - .gap_1() + .w_full() + .justify_between() .child( - IconButton::new("back-btn", IconName::ArrowLeft) - .icon_size(IconSize::Small) - .shape(IconButtonShape::Square) - .on_click(cx.listener(|this, _, _, cx| { - this.pop_sub_page(cx); + h_flex() + .ml_neg_1p5() + .gap_1() + .child( + IconButton::new("back-btn", IconName::ArrowLeft) + .icon_size(IconSize::Small) + .shape(IconButtonShape::Square) + .on_click(cx.listener(|this, _, _, cx| { + this.pop_sub_page(cx); + })), + ) + .child(self.render_sub_page_breadcrumbs()), + ) + .child( + Button::new("open-in-settings-file", "Edit in settings.json") + .tab_index(0_isize) + .style(ButtonStyle::OutlinedGhost) + .on_click(cx.listener(|this, _, window, cx| { + this.open_current_settings_file(window, cx); })), ) - .child(self.render_sub_page_breadcrumbs()) .into_any_element(); let active_page_render_fn = sub_page_stack().last().unwrap().link.render.clone(); From 4b050b651a70b594573ef5746bb2cfda60e67d65 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Mon, 17 Nov 2025 10:48:14 -0500 Subject: [PATCH 0150/1030] Support Agent Servers on remoting (#42683) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Screenshot 2025-11-13 at 6 53 39 PM Also added support for per-target env vars to Agent Server Extensions Closes https://github.com/zed-industries/zed/issues/42291 Release Notes: - Per-target env vars are now supported on Agent Server Extensions - Agent Server Extensions are now available when doing SSH remoting --------- Co-authored-by: Lukas Wirth Co-authored-by: Mikayla Maki --- Cargo.lock | 1 + crates/extension/Cargo.toml | 1 + crates/extension/src/extension_manifest.rs | 30 ++++ crates/project/src/agent_server_store.rs | 196 ++++++++++++++++++--- crates/proto/proto/ai.proto | 21 +++ crates/proto/proto/zed.proto | 5 +- crates/proto/src/proto.rs | 2 + docs/src/extensions/agent-servers.md | 19 +- 8 files changed, 241 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 67fa79b009fe59b052c22f77cf3b3b1c364d0c66..1b70d2680e5e1e15d916511440ea4b73174373aa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5861,6 +5861,7 @@ dependencies = [ "lsp", "parking_lot", "pretty_assertions", + "proto", "semantic_version", "serde", "serde_json", diff --git a/crates/extension/Cargo.toml b/crates/extension/Cargo.toml index e9f1c71908b633362b349df451f8e9743269412a..09492027a1bb59770e3ac70166f042cae8e22d29 100644 --- a/crates/extension/Cargo.toml +++ b/crates/extension/Cargo.toml @@ -25,6 +25,7 @@ language.workspace = true log.workspace = true lsp.workspace = true parking_lot.workspace = true +proto.workspace = true semantic_version.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/crates/extension/src/extension_manifest.rs b/crates/extension/src/extension_manifest.rs index a3374069f7da6a30f455601b3bc0d4b027f207ae..11cefa339b24f8d6707c0f683ec38b50394c6a9e 100644 --- a/crates/extension/src/extension_manifest.rs +++ b/crates/extension/src/extension_manifest.rs @@ -193,6 +193,36 @@ pub struct TargetConfig { /// If not provided and the URL is a GitHub release, we'll attempt to fetch it from GitHub. #[serde(default)] pub sha256: Option, + /// Environment variables to set when launching the agent server. + /// These target-specific env vars will override any env vars set at the agent level. + #[serde(default)] + pub env: HashMap, +} + +impl TargetConfig { + pub fn from_proto(proto: proto::ExternalExtensionAgentTarget) -> Self { + Self { + archive: proto.archive, + cmd: proto.cmd, + args: proto.args, + sha256: proto.sha256, + env: proto.env.into_iter().collect(), + } + } + + pub fn to_proto(&self) -> proto::ExternalExtensionAgentTarget { + proto::ExternalExtensionAgentTarget { + archive: self.archive.clone(), + cmd: self.cmd.clone(), + args: self.args.clone(), + sha256: self.sha256.clone(), + env: self + .env + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect(), + } + } } #[derive(Clone, PartialEq, Eq, Debug, Deserialize, Serialize)] diff --git a/crates/project/src/agent_server_store.rs b/crates/project/src/agent_server_store.rs index 67d3a0b8132be1db487fe347f3b79e42a8b5910d..f1fb210084fb118832f5ca8f5ffa78990c892aa1 100644 --- a/crates/project/src/agent_server_store.rs +++ b/crates/project/src/agent_server_store.rs @@ -17,7 +17,10 @@ use gpui::{ use http_client::{HttpClient, github::AssetKind}; use node_runtime::NodeRuntime; use remote::RemoteClient; -use rpc::{AnyProtoClient, TypedEnvelope, proto}; +use rpc::{ + AnyProtoClient, TypedEnvelope, + proto::{self, ExternalExtensionAgent}, +}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use settings::{RegisterSetting, SettingsStore}; @@ -114,6 +117,13 @@ enum AgentServerStoreState { downstream_client: Option<(u64, AnyProtoClient)>, settings: Option, http_client: Arc, + extension_agents: Vec<( + Arc, + String, + HashMap, + HashMap, + Option, + )>, _subscriptions: [Subscription; 1], }, Remote { @@ -257,20 +267,15 @@ impl AgentServerStore { }); // Insert agent servers from extension manifests - match &self.state { + match &mut self.state { AgentServerStoreState::Local { - node_runtime, - project_environment, - fs, - http_client, - .. + extension_agents, .. } => { + extension_agents.clear(); for (ext_id, manifest) in manifests { for (agent_name, agent_entry) in &manifest.agent_servers { - let display = SharedString::from(agent_entry.name.clone()); - // Store absolute icon path if provided, resolving symlinks for dev extensions - if let Some(icon) = &agent_entry.icon { + let icon_path = if let Some(icon) = &agent_entry.icon { let icon_path = extensions_dir.join(ext_id).join(icon); // Canonicalize to resolve symlinks (dev extensions are symlinked) let absolute_icon_path = icon_path @@ -279,30 +284,81 @@ impl AgentServerStore { .to_string_lossy() .to_string(); self.agent_icons.insert( - ExternalAgentServerName(display.clone()), - SharedString::from(absolute_icon_path), + ExternalAgentServerName(agent_name.clone().into()), + SharedString::from(absolute_icon_path.clone()), + ); + Some(absolute_icon_path) + } else { + None + }; + + extension_agents.push(( + agent_name.clone(), + ext_id.to_owned(), + agent_entry.targets.clone(), + agent_entry.env.clone(), + icon_path, + )); + } + } + self.reregister_agents(cx); + } + AgentServerStoreState::Remote { + project_id, + upstream_client, + } => { + let mut agents = vec![]; + for (ext_id, manifest) in manifests { + for (agent_name, agent_entry) in &manifest.agent_servers { + // Store absolute icon path if provided, resolving symlinks for dev extensions + let icon = if let Some(icon) = &agent_entry.icon { + let icon_path = extensions_dir.join(ext_id).join(icon); + // Canonicalize to resolve symlinks (dev extensions are symlinked) + let absolute_icon_path = icon_path + .canonicalize() + .unwrap_or(icon_path) + .to_string_lossy() + .to_string(); + + // Store icon locally for remote client + self.agent_icons.insert( + ExternalAgentServerName(agent_name.clone().into()), + SharedString::from(absolute_icon_path.clone()), ); - } - // Archive-based launcher (download from URL) - self.external_agents.insert( - ExternalAgentServerName(display), - Box::new(LocalExtensionArchiveAgent { - fs: fs.clone(), - http_client: http_client.clone(), - node_runtime: node_runtime.clone(), - project_environment: project_environment.clone(), - extension_id: Arc::from(ext_id), - agent_id: agent_name.clone(), - targets: agent_entry.targets.clone(), - env: agent_entry.env.clone(), - }) as Box, - ); + Some(absolute_icon_path) + } else { + None + }; + + agents.push(ExternalExtensionAgent { + name: agent_name.to_string(), + icon_path: icon, + extension_id: ext_id.to_string(), + targets: agent_entry + .targets + .iter() + .map(|(k, v)| (k.clone(), v.to_proto())) + .collect(), + env: agent_entry + .env + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect(), + }); } } + upstream_client + .read(cx) + .proto_client() + .send(proto::ExternalExtensionAgentsUpdated { + project_id: *project_id, + agents, + }) + .log_err(); } - _ => { - // Only local projects support local extension agents + AgentServerStoreState::Collab => { + // Do nothing } } @@ -320,6 +376,7 @@ impl AgentServerStore { } pub fn init_headless(session: &AnyProtoClient) { + session.add_entity_message_handler(Self::handle_external_extension_agents_updated); session.add_entity_request_handler(Self::handle_get_agent_server_command); } @@ -354,6 +411,7 @@ impl AgentServerStore { downstream_client, settings: old_settings, http_client, + extension_agents, .. } = &mut self.state else { @@ -420,6 +478,31 @@ impl AgentServerStore { }) as Box, ) })); + self.external_agents.extend(extension_agents.iter().map( + |(agent_name, ext_id, targets, env, icon_path)| { + let name = ExternalAgentServerName(agent_name.clone().into()); + + // Restore icon if present + if let Some(icon) = icon_path { + self.agent_icons + .insert(name.clone(), SharedString::from(icon.clone())); + } + + ( + name, + Box::new(LocalExtensionArchiveAgent { + fs: fs.clone(), + http_client: http_client.clone(), + node_runtime: node_runtime.clone(), + project_environment: project_environment.clone(), + extension_id: Arc::from(&**ext_id), + targets: targets.clone(), + env: env.clone(), + agent_id: agent_name.clone(), + }) as Box, + ) + }, + )); *old_settings = Some(new_settings.clone()); @@ -463,6 +546,7 @@ impl AgentServerStore { http_client, downstream_client: None, settings: None, + extension_agents: vec![], _subscriptions: [subscription], }, external_agents: Default::default(), @@ -728,6 +812,55 @@ impl AgentServerStore { })? } + async fn handle_external_extension_agents_updated( + this: Entity, + envelope: TypedEnvelope, + mut cx: AsyncApp, + ) -> Result<()> { + this.update(&mut cx, |this, cx| { + let AgentServerStoreState::Local { + extension_agents, .. + } = &mut this.state + else { + panic!( + "handle_external_extension_agents_updated \ + should not be called for a non-remote project" + ); + }; + + for ExternalExtensionAgent { + name, + icon_path, + extension_id, + targets, + env, + } in envelope.payload.agents + { + let icon_path_string = icon_path.clone(); + if let Some(icon_path) = icon_path { + this.agent_icons.insert( + ExternalAgentServerName(name.clone().into()), + icon_path.into(), + ); + } + extension_agents.push(( + Arc::from(&*name), + extension_id, + targets + .into_iter() + .map(|(k, v)| (k, extension::TargetConfig::from_proto(v))) + .collect(), + env.into_iter().collect(), + icon_path_string, + )); + } + + this.reregister_agents(cx); + cx.emit(AgentServersUpdated); + Ok(()) + })? + } + async fn handle_loading_status_updated( this: Entity, envelope: TypedEnvelope, @@ -1830,6 +1963,7 @@ mod extension_agent_tests { cmd: "./agent".into(), args: vec![], sha256: None, + env: Default::default(), }, ); @@ -1870,6 +2004,7 @@ mod extension_agent_tests { cmd: "./my-agent".into(), args: vec!["--serve".into()], sha256: None, + env: Default::default(), }, ); map @@ -1907,6 +2042,7 @@ mod extension_agent_tests { cmd: "./release-agent".into(), args: vec!["serve".into()], sha256: None, + env: Default::default(), }, ); @@ -1949,6 +2085,7 @@ mod extension_agent_tests { cmd: "node".into(), args: vec!["index.js".into()], sha256: None, + env: Default::default(), }, ); map @@ -1995,6 +2132,7 @@ mod extension_agent_tests { "./config.json".into(), ], sha256: None, + env: Default::default(), }, ); map diff --git a/crates/proto/proto/ai.proto b/crates/proto/proto/ai.proto index 9b4cc27dcb9755f5205907cc5fd93687aa76bc4f..2216446a825c9ca3954306e80b9ccaaf06215306 100644 --- a/crates/proto/proto/ai.proto +++ b/crates/proto/proto/ai.proto @@ -186,6 +186,27 @@ message ExternalAgentsUpdated { repeated string names = 2; } +message ExternalExtensionAgentTarget { + string archive = 1; + string cmd = 2; + repeated string args = 3; + optional string sha256 = 4; + map env = 5; +} + +message ExternalExtensionAgent { + string name = 1; + optional string icon_path = 2; + string extension_id = 3; + map targets = 4; + map env = 5; +} + +message ExternalExtensionAgentsUpdated { + uint64 project_id = 1; + repeated ExternalExtensionAgent agents = 2; +} + message ExternalAgentLoadingStatusUpdated { uint64 project_id = 1; string name = 2; diff --git a/crates/proto/proto/zed.proto b/crates/proto/proto/zed.proto index 34987ba06754be2db31aea51b384e7e099dca728..6ecea916ca5143ecd75678cd2e21587087f67b51 100644 --- a/crates/proto/proto/zed.proto +++ b/crates/proto/proto/zed.proto @@ -410,7 +410,6 @@ message Envelope { AgentServerCommand agent_server_command = 374; ExternalAgentsUpdated external_agents_updated = 375; - ExternalAgentLoadingStatusUpdated external_agent_loading_status_updated = 376; NewExternalAgentVersionAvailable new_external_agent_version_available = 377; @@ -436,7 +435,9 @@ message Envelope { OpenImageByPath open_image_by_path = 391; OpenImageResponse open_image_response = 392; - CreateImageForPeer create_image_for_peer = 393; // current max + CreateImageForPeer create_image_for_peer = 393; + + ExternalExtensionAgentsUpdated external_extension_agents_updated = 394; // current max } reserved 87 to 88; diff --git a/crates/proto/src/proto.rs b/crates/proto/src/proto.rs index 0d9ffd5e0491a65e5ff39a67af5a2efd015476fc..fa6af5c3899da3519ce13d772bdc61fb78194d19 100644 --- a/crates/proto/src/proto.rs +++ b/crates/proto/src/proto.rs @@ -331,6 +331,7 @@ messages!( (GetAgentServerCommand, Background), (AgentServerCommand, Background), (ExternalAgentsUpdated, Background), + (ExternalExtensionAgentsUpdated, Background), (ExternalAgentLoadingStatusUpdated, Background), (NewExternalAgentVersionAvailable, Background), (RemoteStarted, Background), @@ -681,6 +682,7 @@ entity_messages!( GitClone, GetAgentServerCommand, ExternalAgentsUpdated, + ExternalExtensionAgentsUpdated, ExternalAgentLoadingStatusUpdated, NewExternalAgentVersionAvailable, GitGetWorktrees, diff --git a/docs/src/extensions/agent-servers.md b/docs/src/extensions/agent-servers.md index ce6204e33ee0afd91d705cd90fe4134b9652f8be..c8367a8418d07f827258403587a9787779f55cb9 100644 --- a/docs/src/extensions/agent-servers.md +++ b/docs/src/extensions/agent-servers.md @@ -46,15 +46,25 @@ Each target must specify: - `archive`: URL to download the archive from (supports `.tar.gz`, `.zip`, etc.) - `cmd`: Command to run the agent server (relative to the extracted archive) - `args`: Command-line arguments to pass to the agent server (optional) +- `sha256`: SHA-256 hash string of the archive's bytes (optional, but recommended for security) +- `env`: Environment variables specific to this target (optional, overrides agent-level env vars with the same name) ### Optional Fields -You can also optionally specify: +You can also optionally specify at the agent server level: -- `sha256`: SHA-256 hash string of the archive's bytes. Zed will check this after the archive is downloaded and give an error if it doesn't match, so doing this improves security. -- `env`: Environment variables to set in the agent's spawned process. +- `env`: Environment variables to set in the agent's spawned process. These apply to all targets by default. - `icon`: Path to an SVG icon (relative to extension root) for display in menus. +### Environment Variables + +Environment variables can be configured at two levels: + +1. **Agent-level** (`[agent_servers.my-agent.env]`): Variables that apply to all platforms +2. **Target-level** (`[agent_servers.my-agent.targets.{platform}.env]`): Variables specific to a platform + +When both are specified, target-level environment variables override agent-level variables with the same name. Variables defined only at the agent level are inherited by all targets. + ### Complete Example Here's a more complete example with all optional fields: @@ -79,6 +89,9 @@ archive = "https://github.com/example/agent/releases/download/v2.0.0/agent-linux cmd = "./bin/agent" args = ["serve", "--port", "8080"] sha256 = "def456abc123..." + +[agent_servers.example-agent.targets.linux-x86_64.env] +AGENT_MEMORY_LIMIT = "2GB" # Linux-specific override ``` ## Installation Process From 23872b0523652508ea0e149ac622fec0954473d4 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Mon, 17 Nov 2025 12:23:18 -0500 Subject: [PATCH 0151/1030] Fix stale edits (#42895) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #34069 Screenshot 2025-11-17 at 11 14 19 AM Screenshot 2025-11-17 at 12 22 50 PM Release Notes: - Agent file edits now error if the file has changed since last read (allowing the agent to read changes and avoid overwriting changes made outside Zed) --- Cargo.lock | 1 + crates/agent/src/thread.rs | 5 + crates/agent/src/tools/edit_file_tool.rs | 461 ++++++++++++++++++ crates/agent/src/tools/read_file_tool.rs | 136 +++++- crates/remote_server/Cargo.toml | 1 + .../remote_server/src/remote_editing_tests.rs | 22 +- 6 files changed, 613 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1b70d2680e5e1e15d916511440ea4b73174373aa..69c75d7ce0f9184342fbf202a149c6feb1d6a982 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14035,6 +14035,7 @@ dependencies = [ "paths", "pretty_assertions", "project", + "prompt_store", "proto", "rayon", "release_channel", diff --git a/crates/agent/src/thread.rs b/crates/agent/src/thread.rs index 5cf230629c8e542a23ea7ffc5bdb0fa5a1c73a53..45c09675b2470bc399e7ad38fbf976fb2b06eea6 100644 --- a/crates/agent/src/thread.rs +++ b/crates/agent/src/thread.rs @@ -607,6 +607,8 @@ pub struct Thread { pub(crate) prompt_capabilities_rx: watch::Receiver, pub(crate) project: Entity, pub(crate) action_log: Entity, + /// Tracks the last time files were read by the agent, to detect external modifications + pub(crate) file_read_times: HashMap, } impl Thread { @@ -665,6 +667,7 @@ impl Thread { prompt_capabilities_rx, project, action_log, + file_read_times: HashMap::default(), } } @@ -860,6 +863,7 @@ impl Thread { updated_at: db_thread.updated_at, prompt_capabilities_tx, prompt_capabilities_rx, + file_read_times: HashMap::default(), } } @@ -999,6 +1003,7 @@ impl Thread { self.add_tool(NowTool); self.add_tool(OpenTool::new(self.project.clone())); self.add_tool(ReadFileTool::new( + cx.weak_entity(), self.project.clone(), self.action_log.clone(), )); diff --git a/crates/agent/src/tools/edit_file_tool.rs b/crates/agent/src/tools/edit_file_tool.rs index a507044ce51dce5e55c53106c11d8a9b2c2a3d28..de2dd384693c8af3e04007895c843743c5ead722 100644 --- a/crates/agent/src/tools/edit_file_tool.rs +++ b/crates/agent/src/tools/edit_file_tool.rs @@ -309,6 +309,40 @@ impl AgentTool for EditFileTool { })? .await?; + // Check if the file has been modified since the agent last read it + if let Some(abs_path) = abs_path.as_ref() { + let (last_read_mtime, current_mtime, is_dirty) = self.thread.update(cx, |thread, cx| { + let last_read = thread.file_read_times.get(abs_path).copied(); + let current = buffer.read(cx).file().and_then(|file| file.disk_state().mtime()); + let dirty = buffer.read(cx).is_dirty(); + (last_read, current, dirty) + })?; + + // Check for unsaved changes first - these indicate modifications we don't know about + if is_dirty { + anyhow::bail!( + "This file cannot be written to because it has unsaved changes. \ + Please end the current conversation immediately by telling the user you want to write to this file (mention its path explicitly) but you can't write to it because it has unsaved changes. \ + Ask the user to save that buffer's changes and to inform you when it's ok to proceed." + ); + } + + // Check if the file was modified on disk since we last read it + if let (Some(last_read), Some(current)) = (last_read_mtime, current_mtime) { + // MTime can be unreliable for comparisons, so our newtype intentionally + // doesn't support comparing them. If the mtime at all different + // (which could be because of a modification or because e.g. system clock changed), + // we pessimistically assume it was modified. + if current != last_read { + anyhow::bail!( + "The file {} has been modified since you last read it. \ + Please read the file again to get the current state before editing it.", + input.path.display() + ); + } + } + } + let diff = cx.new(|cx| Diff::new(buffer.clone(), cx))?; event_stream.update_diff(diff.clone()); let _finalize_diff = util::defer({ @@ -421,6 +455,17 @@ impl AgentTool for EditFileTool { log.buffer_edited(buffer.clone(), cx); })?; + // Update the recorded read time after a successful edit so consecutive edits work + if let Some(abs_path) = abs_path.as_ref() { + if let Some(new_mtime) = buffer.read_with(cx, |buffer, _| { + buffer.file().and_then(|file| file.disk_state().mtime()) + })? { + self.thread.update(cx, |thread, _| { + thread.file_read_times.insert(abs_path.to_path_buf(), new_mtime); + })?; + } + } + let new_snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot())?; let (new_text, unified_diff) = cx .background_spawn({ @@ -1748,10 +1793,426 @@ mod tests { } } + #[gpui::test] + async fn test_file_read_times_tracking(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "test.txt": "original content" + }), + ) + .await; + let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; + let context_server_registry = + cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx)); + let model = Arc::new(FakeLanguageModel::default()); + let thread = cx.new(|cx| { + Thread::new( + project.clone(), + cx.new(|_cx| ProjectContext::default()), + context_server_registry, + Templates::new(), + Some(model.clone()), + cx, + ) + }); + let action_log = thread.read_with(cx, |thread, _| thread.action_log().clone()); + + // Initially, file_read_times should be empty + let is_empty = thread.read_with(cx, |thread, _| thread.file_read_times.is_empty()); + assert!(is_empty, "file_read_times should start empty"); + + // Create read tool + let read_tool = Arc::new(crate::ReadFileTool::new( + thread.downgrade(), + project.clone(), + action_log, + )); + + // Read the file to record the read time + cx.update(|cx| { + read_tool.clone().run( + crate::ReadFileToolInput { + path: "root/test.txt".to_string(), + start_line: None, + end_line: None, + }, + ToolCallEventStream::test().0, + cx, + ) + }) + .await + .unwrap(); + + // Verify that file_read_times now contains an entry for the file + let has_entry = thread.read_with(cx, |thread, _| { + thread.file_read_times.len() == 1 + && thread + .file_read_times + .keys() + .any(|path| path.ends_with("test.txt")) + }); + assert!( + has_entry, + "file_read_times should contain an entry after reading the file" + ); + + // Read the file again - should update the entry + cx.update(|cx| { + read_tool.clone().run( + crate::ReadFileToolInput { + path: "root/test.txt".to_string(), + start_line: None, + end_line: None, + }, + ToolCallEventStream::test().0, + cx, + ) + }) + .await + .unwrap(); + + // Should still have exactly one entry + let has_one_entry = thread.read_with(cx, |thread, _| thread.file_read_times.len() == 1); + assert!( + has_one_entry, + "file_read_times should still have one entry after re-reading" + ); + } + fn init_test(cx: &mut TestAppContext) { cx.update(|cx| { let settings_store = SettingsStore::test(cx); cx.set_global(settings_store); }); } + + #[gpui::test] + async fn test_consecutive_edits_work(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "test.txt": "original content" + }), + ) + .await; + let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; + let context_server_registry = + cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx)); + let model = Arc::new(FakeLanguageModel::default()); + let thread = cx.new(|cx| { + Thread::new( + project.clone(), + cx.new(|_cx| ProjectContext::default()), + context_server_registry, + Templates::new(), + Some(model.clone()), + cx, + ) + }); + let languages = project.read_with(cx, |project, _| project.languages().clone()); + let action_log = thread.read_with(cx, |thread, _| thread.action_log().clone()); + + let read_tool = Arc::new(crate::ReadFileTool::new( + thread.downgrade(), + project.clone(), + action_log, + )); + let edit_tool = Arc::new(EditFileTool::new( + project.clone(), + thread.downgrade(), + languages, + Templates::new(), + )); + + // Read the file first + cx.update(|cx| { + read_tool.clone().run( + crate::ReadFileToolInput { + path: "root/test.txt".to_string(), + start_line: None, + end_line: None, + }, + ToolCallEventStream::test().0, + cx, + ) + }) + .await + .unwrap(); + + // First edit should work + let edit_result = { + let edit_task = cx.update(|cx| { + edit_tool.clone().run( + EditFileToolInput { + display_description: "First edit".into(), + path: "root/test.txt".into(), + mode: EditFileMode::Edit, + }, + ToolCallEventStream::test().0, + cx, + ) + }); + + cx.executor().run_until_parked(); + model.send_last_completion_stream_text_chunk( + "original contentmodified content" + .to_string(), + ); + model.end_last_completion_stream(); + + edit_task.await + }; + assert!( + edit_result.is_ok(), + "First edit should succeed, got error: {:?}", + edit_result.as_ref().err() + ); + + // Second edit should also work because the edit updated the recorded read time + let edit_result = { + let edit_task = cx.update(|cx| { + edit_tool.clone().run( + EditFileToolInput { + display_description: "Second edit".into(), + path: "root/test.txt".into(), + mode: EditFileMode::Edit, + }, + ToolCallEventStream::test().0, + cx, + ) + }); + + cx.executor().run_until_parked(); + model.send_last_completion_stream_text_chunk( + "modified contentfurther modified content".to_string(), + ); + model.end_last_completion_stream(); + + edit_task.await + }; + assert!( + edit_result.is_ok(), + "Second consecutive edit should succeed, got error: {:?}", + edit_result.as_ref().err() + ); + } + + #[gpui::test] + async fn test_external_modification_detected(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "test.txt": "original content" + }), + ) + .await; + let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; + let context_server_registry = + cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx)); + let model = Arc::new(FakeLanguageModel::default()); + let thread = cx.new(|cx| { + Thread::new( + project.clone(), + cx.new(|_cx| ProjectContext::default()), + context_server_registry, + Templates::new(), + Some(model.clone()), + cx, + ) + }); + let languages = project.read_with(cx, |project, _| project.languages().clone()); + let action_log = thread.read_with(cx, |thread, _| thread.action_log().clone()); + + let read_tool = Arc::new(crate::ReadFileTool::new( + thread.downgrade(), + project.clone(), + action_log, + )); + let edit_tool = Arc::new(EditFileTool::new( + project.clone(), + thread.downgrade(), + languages, + Templates::new(), + )); + + // Read the file first + cx.update(|cx| { + read_tool.clone().run( + crate::ReadFileToolInput { + path: "root/test.txt".to_string(), + start_line: None, + end_line: None, + }, + ToolCallEventStream::test().0, + cx, + ) + }) + .await + .unwrap(); + + // Simulate external modification - advance time and save file + cx.background_executor + .advance_clock(std::time::Duration::from_secs(2)); + fs.save( + path!("/root/test.txt").as_ref(), + &"externally modified content".into(), + language::LineEnding::Unix, + ) + .await + .unwrap(); + + // Reload the buffer to pick up the new mtime + let project_path = project + .read_with(cx, |project, cx| { + project.find_project_path("root/test.txt", cx) + }) + .expect("Should find project path"); + let buffer = project + .update(cx, |project, cx| project.open_buffer(project_path, cx)) + .await + .unwrap(); + buffer + .update(cx, |buffer, cx| buffer.reload(cx)) + .await + .unwrap(); + + cx.executor().run_until_parked(); + + // Try to edit - should fail because file was modified externally + let result = cx + .update(|cx| { + edit_tool.clone().run( + EditFileToolInput { + display_description: "Edit after external change".into(), + path: "root/test.txt".into(), + mode: EditFileMode::Edit, + }, + ToolCallEventStream::test().0, + cx, + ) + }) + .await; + + assert!( + result.is_err(), + "Edit should fail after external modification" + ); + let error_msg = result.unwrap_err().to_string(); + assert!( + error_msg.contains("has been modified since you last read it"), + "Error should mention file modification, got: {}", + error_msg + ); + } + + #[gpui::test] + async fn test_dirty_buffer_detected(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "test.txt": "original content" + }), + ) + .await; + let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; + let context_server_registry = + cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx)); + let model = Arc::new(FakeLanguageModel::default()); + let thread = cx.new(|cx| { + Thread::new( + project.clone(), + cx.new(|_cx| ProjectContext::default()), + context_server_registry, + Templates::new(), + Some(model.clone()), + cx, + ) + }); + let languages = project.read_with(cx, |project, _| project.languages().clone()); + let action_log = thread.read_with(cx, |thread, _| thread.action_log().clone()); + + let read_tool = Arc::new(crate::ReadFileTool::new( + thread.downgrade(), + project.clone(), + action_log, + )); + let edit_tool = Arc::new(EditFileTool::new( + project.clone(), + thread.downgrade(), + languages, + Templates::new(), + )); + + // Read the file first + cx.update(|cx| { + read_tool.clone().run( + crate::ReadFileToolInput { + path: "root/test.txt".to_string(), + start_line: None, + end_line: None, + }, + ToolCallEventStream::test().0, + cx, + ) + }) + .await + .unwrap(); + + // Open the buffer and make it dirty by editing without saving + let project_path = project + .read_with(cx, |project, cx| { + project.find_project_path("root/test.txt", cx) + }) + .expect("Should find project path"); + let buffer = project + .update(cx, |project, cx| project.open_buffer(project_path, cx)) + .await + .unwrap(); + + // Make an in-memory edit to the buffer (making it dirty) + buffer.update(cx, |buffer, cx| { + let end_point = buffer.max_point(); + buffer.edit([(end_point..end_point, " added text")], None, cx); + }); + + // Verify buffer is dirty + let is_dirty = buffer.read_with(cx, |buffer, _| buffer.is_dirty()); + assert!(is_dirty, "Buffer should be dirty after in-memory edit"); + + // Try to edit - should fail because buffer has unsaved changes + let result = cx + .update(|cx| { + edit_tool.clone().run( + EditFileToolInput { + display_description: "Edit with dirty buffer".into(), + path: "root/test.txt".into(), + mode: EditFileMode::Edit, + }, + ToolCallEventStream::test().0, + cx, + ) + }) + .await; + + assert!(result.is_err(), "Edit should fail when buffer is dirty"); + let error_msg = result.unwrap_err().to_string(); + assert!( + error_msg.contains("cannot be written to because it has unsaved changes"), + "Error should mention unsaved changes, got: {}", + error_msg + ); + } } diff --git a/crates/agent/src/tools/read_file_tool.rs b/crates/agent/src/tools/read_file_tool.rs index 52f88aa4db03a2bc01b0fd10fe99f8bad04c24f1..eccb40737c744d57792655cadb925e18a68d2835 100644 --- a/crates/agent/src/tools/read_file_tool.rs +++ b/crates/agent/src/tools/read_file_tool.rs @@ -1,7 +1,7 @@ use action_log::ActionLog; use agent_client_protocol::{self as acp, ToolCallUpdateFields}; use anyhow::{Context as _, Result, anyhow}; -use gpui::{App, Entity, SharedString, Task}; +use gpui::{App, Entity, SharedString, Task, WeakEntity}; use indoc::formatdoc; use language::Point; use language_model::{LanguageModelImage, LanguageModelToolResultContent}; @@ -12,7 +12,7 @@ use settings::Settings; use std::sync::Arc; use util::markdown::MarkdownCodeBlock; -use crate::{AgentTool, ToolCallEventStream, outline}; +use crate::{AgentTool, Thread, ToolCallEventStream, outline}; /// Reads the content of the given file in the project. /// @@ -42,13 +42,19 @@ pub struct ReadFileToolInput { } pub struct ReadFileTool { + thread: WeakEntity, project: Entity, action_log: Entity, } impl ReadFileTool { - pub fn new(project: Entity, action_log: Entity) -> Self { + pub fn new( + thread: WeakEntity, + project: Entity, + action_log: Entity, + ) -> Self { Self { + thread, project, action_log, } @@ -195,6 +201,17 @@ impl AgentTool for ReadFileTool { anyhow::bail!("{file_path} not found"); } + // Record the file read time and mtime + if let Some(mtime) = buffer.read_with(cx, |buffer, _| { + buffer.file().and_then(|file| file.disk_state().mtime()) + })? { + self.thread + .update(cx, |thread, _| { + thread.file_read_times.insert(abs_path.to_path_buf(), mtime); + }) + .ok(); + } + let mut anchor = None; // Check if specific line ranges are provided @@ -285,11 +302,15 @@ impl AgentTool for ReadFileTool { #[cfg(test)] mod test { use super::*; + use crate::{ContextServerRegistry, Templates, Thread}; use gpui::{AppContext, TestAppContext, UpdateGlobal as _}; use language::{Language, LanguageConfig, LanguageMatcher, tree_sitter_rust}; + use language_model::fake_provider::FakeLanguageModel; use project::{FakeFs, Project}; + use prompt_store::ProjectContext; use serde_json::json; use settings::SettingsStore; + use std::sync::Arc; use util::path; #[gpui::test] @@ -300,7 +321,20 @@ mod test { fs.insert_tree(path!("/root"), json!({})).await; let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; let action_log = cx.new(|_| ActionLog::new(project.clone())); - let tool = Arc::new(ReadFileTool::new(project, action_log)); + let context_server_registry = + cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx)); + let model = Arc::new(FakeLanguageModel::default()); + let thread = cx.new(|cx| { + Thread::new( + project.clone(), + cx.new(|_cx| ProjectContext::default()), + context_server_registry, + Templates::new(), + Some(model), + cx, + ) + }); + let tool = Arc::new(ReadFileTool::new(thread.downgrade(), project, action_log)); let (event_stream, _) = ToolCallEventStream::test(); let result = cx @@ -333,7 +367,20 @@ mod test { .await; let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; let action_log = cx.new(|_| ActionLog::new(project.clone())); - let tool = Arc::new(ReadFileTool::new(project, action_log)); + let context_server_registry = + cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx)); + let model = Arc::new(FakeLanguageModel::default()); + let thread = cx.new(|cx| { + Thread::new( + project.clone(), + cx.new(|_cx| ProjectContext::default()), + context_server_registry, + Templates::new(), + Some(model), + cx, + ) + }); + let tool = Arc::new(ReadFileTool::new(thread.downgrade(), project, action_log)); let result = cx .update(|cx| { let input = ReadFileToolInput { @@ -363,7 +410,20 @@ mod test { let language_registry = project.read_with(cx, |project, _| project.languages().clone()); language_registry.add(Arc::new(rust_lang())); let action_log = cx.new(|_| ActionLog::new(project.clone())); - let tool = Arc::new(ReadFileTool::new(project, action_log)); + let context_server_registry = + cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx)); + let model = Arc::new(FakeLanguageModel::default()); + let thread = cx.new(|cx| { + Thread::new( + project.clone(), + cx.new(|_cx| ProjectContext::default()), + context_server_registry, + Templates::new(), + Some(model), + cx, + ) + }); + let tool = Arc::new(ReadFileTool::new(thread.downgrade(), project, action_log)); let result = cx .update(|cx| { let input = ReadFileToolInput { @@ -435,7 +495,20 @@ mod test { let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; let action_log = cx.new(|_| ActionLog::new(project.clone())); - let tool = Arc::new(ReadFileTool::new(project, action_log)); + let context_server_registry = + cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx)); + let model = Arc::new(FakeLanguageModel::default()); + let thread = cx.new(|cx| { + Thread::new( + project.clone(), + cx.new(|_cx| ProjectContext::default()), + context_server_registry, + Templates::new(), + Some(model), + cx, + ) + }); + let tool = Arc::new(ReadFileTool::new(thread.downgrade(), project, action_log)); let result = cx .update(|cx| { let input = ReadFileToolInput { @@ -463,7 +536,20 @@ mod test { .await; let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; let action_log = cx.new(|_| ActionLog::new(project.clone())); - let tool = Arc::new(ReadFileTool::new(project, action_log)); + let context_server_registry = + cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx)); + let model = Arc::new(FakeLanguageModel::default()); + let thread = cx.new(|cx| { + Thread::new( + project.clone(), + cx.new(|_cx| ProjectContext::default()), + context_server_registry, + Templates::new(), + Some(model), + cx, + ) + }); + let tool = Arc::new(ReadFileTool::new(thread.downgrade(), project, action_log)); // start_line of 0 should be treated as 1 let result = cx @@ -607,7 +693,20 @@ mod test { let project = Project::test(fs.clone(), [path!("/project_root").as_ref()], cx).await; let action_log = cx.new(|_| ActionLog::new(project.clone())); - let tool = Arc::new(ReadFileTool::new(project, action_log)); + let context_server_registry = + cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx)); + let model = Arc::new(FakeLanguageModel::default()); + let thread = cx.new(|cx| { + Thread::new( + project.clone(), + cx.new(|_cx| ProjectContext::default()), + context_server_registry, + Templates::new(), + Some(model), + cx, + ) + }); + let tool = Arc::new(ReadFileTool::new(thread.downgrade(), project, action_log)); // Reading a file outside the project worktree should fail let result = cx @@ -821,7 +920,24 @@ mod test { .await; let action_log = cx.new(|_| ActionLog::new(project.clone())); - let tool = Arc::new(ReadFileTool::new(project.clone(), action_log.clone())); + let context_server_registry = + cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx)); + let model = Arc::new(FakeLanguageModel::default()); + let thread = cx.new(|cx| { + Thread::new( + project.clone(), + cx.new(|_cx| ProjectContext::default()), + context_server_registry, + Templates::new(), + Some(model), + cx, + ) + }); + let tool = Arc::new(ReadFileTool::new( + thread.downgrade(), + project.clone(), + action_log.clone(), + )); // Test reading allowed files in worktree1 let result = cx diff --git a/crates/remote_server/Cargo.toml b/crates/remote_server/Cargo.toml index 50e9fd73cb7d1a9b7eeb6b2bf5bf77320fa7a169..e4c7932973741015066efbcd07d0d0c71212acb0 100644 --- a/crates/remote_server/Cargo.toml +++ b/crates/remote_server/Cargo.toml @@ -94,6 +94,7 @@ project = { workspace = true, features = ["test-support"] } remote = { workspace = true, features = ["test-support"] } language_model = { workspace = true, features = ["test-support"] } lsp = { workspace = true, features = ["test-support"] } +prompt_store.workspace = true unindent.workspace = true serde_json.workspace = true zlog.workspace = true diff --git a/crates/remote_server/src/remote_editing_tests.rs b/crates/remote_server/src/remote_editing_tests.rs index 98a0aab70bcb4e5590f477f6e6de9aebd512b3c2..4ceaf2048c5967b7fe1fceeb47c68efc6cc15678 100644 --- a/crates/remote_server/src/remote_editing_tests.rs +++ b/crates/remote_server/src/remote_editing_tests.rs @@ -2,11 +2,12 @@ /// The tests in this file assume that server_cx is running on Windows too. /// We neead to find a way to test Windows-Non-Windows interactions. use crate::headless_project::HeadlessProject; -use agent::{AgentTool, ReadFileTool, ReadFileToolInput, ToolCallEventStream}; +use agent::{AgentTool, ReadFileTool, ReadFileToolInput, Templates, Thread, ToolCallEventStream}; use client::{Client, UserStore}; use clock::FakeSystemClock; use collections::{HashMap, HashSet}; -use language_model::LanguageModelToolResultContent; +use language_model::{LanguageModelToolResultContent, fake_provider::FakeLanguageModel}; +use prompt_store::ProjectContext; use extension::ExtensionHostProxy; use fs::{FakeFs, Fs}; @@ -1722,12 +1723,27 @@ async fn test_remote_agent_fs_tool_calls(cx: &mut TestAppContext, server_cx: &mu let action_log = cx.new(|_| action_log::ActionLog::new(project.clone())); + // Create a minimal thread for the ReadFileTool + let context_server_registry = + cx.new(|cx| agent::ContextServerRegistry::new(project.read(cx).context_server_store(), cx)); + let model = Arc::new(FakeLanguageModel::default()); + let thread = cx.new(|cx| { + Thread::new( + project.clone(), + cx.new(|_cx| ProjectContext::default()), + context_server_registry, + Templates::new(), + Some(model), + cx, + ) + }); + let input = ReadFileToolInput { path: "project/b.txt".into(), start_line: None, end_line: None, }; - let read_tool = Arc::new(ReadFileTool::new(project, action_log)); + let read_tool = Arc::new(ReadFileTool::new(thread.downgrade(), project, action_log)); let (event_stream, _) = ToolCallEventStream::test(); let exists_result = cx.update(|cx| read_tool.clone().run(input, event_stream.clone(), cx)); From 0519c645fbd7cdf8f009756eaa35a23fe8d50bd8 Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Mon, 17 Nov 2025 19:53:05 +0200 Subject: [PATCH 0152/1030] Deduplicate inlays when getting those from multiple language servers (#42899) Part of https://github.com/zed-industries/zed/issues/42671 Release Notes: - Deduplicate inlay hints from different language servers --- crates/editor/src/inlays/inlay_hints.rs | 118 +++++++++++++++++++++++- 1 file changed, 115 insertions(+), 3 deletions(-) diff --git a/crates/editor/src/inlays/inlay_hints.rs b/crates/editor/src/inlays/inlay_hints.rs index 50d4ed8df9871902033bfb4b55d85eed4ace51f1..7aacd1c86e6ec9a2034493d26df6d2271d33724e 100644 --- a/crates/editor/src/inlays/inlay_hints.rs +++ b/crates/editor/src/inlays/inlay_hints.rs @@ -1,4 +1,5 @@ use std::{ + collections::hash_map, ops::{ControlFlow, Range}, time::Duration, }; @@ -778,6 +779,7 @@ impl Editor { } let excerpts = self.buffer.read(cx).excerpt_ids(); + let mut inserted_hint_text = HashMap::default(); let hints_to_insert = new_hints .into_iter() .filter_map(|(chunk_range, hints_result)| { @@ -804,8 +806,35 @@ impl Editor { } } }) - .flat_map(|hints| hints.into_values()) - .flatten() + .flat_map(|new_hints| { + let mut hints_deduplicated = Vec::new(); + + if new_hints.len() > 1 { + for (server_id, new_hints) in new_hints { + for (new_id, new_hint) in new_hints { + let hints_text_for_position = inserted_hint_text + .entry(new_hint.position) + .or_insert_with(HashMap::default); + let insert = + match hints_text_for_position.entry(new_hint.text().to_string()) { + hash_map::Entry::Occupied(o) => o.get() == &server_id, + hash_map::Entry::Vacant(v) => { + v.insert(server_id); + true + } + }; + + if insert { + hints_deduplicated.push((new_id, new_hint)); + } + } + } + } else { + hints_deduplicated.extend(new_hints.into_values().flatten()); + } + + hints_deduplicated + }) .filter_map(|(hint_id, lsp_hint)| { if inlay_hints.allowed_hint_kinds.contains(&lsp_hint.kind) && inlay_hints @@ -3732,6 +3761,7 @@ let c = 3;"# let mut fake_servers = language_registry.register_fake_lsp( "Rust", FakeLspAdapter { + name: "rust-analyzer", capabilities: lsp::ServerCapabilities { inlay_hint_provider: Some(lsp::OneOf::Left(true)), ..lsp::ServerCapabilities::default() @@ -3804,6 +3834,78 @@ let c = 3;"# }, ); + // Add another server that does send the same, duplicate hints back + let mut fake_servers_2 = language_registry.register_fake_lsp( + "Rust", + FakeLspAdapter { + name: "CrabLang-ls", + capabilities: lsp::ServerCapabilities { + inlay_hint_provider: Some(lsp::OneOf::Left(true)), + ..lsp::ServerCapabilities::default() + }, + initializer: Some(Box::new(move |fake_server| { + fake_server.set_request_handler::( + move |params, _| async move { + if params.text_document.uri + == lsp::Uri::from_file_path(path!("/a/main.rs")).unwrap() + { + Ok(Some(vec![ + lsp::InlayHint { + position: lsp::Position::new(1, 9), + label: lsp::InlayHintLabel::String(": i32".to_owned()), + kind: Some(lsp::InlayHintKind::TYPE), + text_edits: None, + tooltip: None, + padding_left: None, + padding_right: None, + data: None, + }, + lsp::InlayHint { + position: lsp::Position::new(19, 9), + label: lsp::InlayHintLabel::String(": i33".to_owned()), + kind: Some(lsp::InlayHintKind::TYPE), + text_edits: None, + tooltip: None, + padding_left: None, + padding_right: None, + data: None, + }, + ])) + } else if params.text_document.uri + == lsp::Uri::from_file_path(path!("/a/lib.rs")).unwrap() + { + Ok(Some(vec![ + lsp::InlayHint { + position: lsp::Position::new(1, 10), + label: lsp::InlayHintLabel::String(": i34".to_owned()), + kind: Some(lsp::InlayHintKind::TYPE), + text_edits: None, + tooltip: None, + padding_left: None, + padding_right: None, + data: None, + }, + lsp::InlayHint { + position: lsp::Position::new(29, 10), + label: lsp::InlayHintLabel::String(": i35".to_owned()), + kind: Some(lsp::InlayHintKind::TYPE), + text_edits: None, + tooltip: None, + padding_left: None, + padding_right: None, + data: None, + }, + ])) + } else { + panic!("Unexpected file path {:?}", params.text_document.uri); + } + }, + ); + })), + ..FakeLspAdapter::default() + }, + ); + let (buffer_1, _handle_1) = project .update(cx, |project, cx| { project.open_local_buffer_with_lsp(path!("/a/main.rs"), cx) @@ -3847,6 +3949,7 @@ let c = 3;"# }); let fake_server = fake_servers.next().await.unwrap(); + let _fake_server_2 = fake_servers_2.next().await.unwrap(); cx.executor().advance_clock(Duration::from_millis(100)); cx.executor().run_until_parked(); @@ -3854,12 +3957,17 @@ let c = 3;"# .update(cx, |editor, _window, cx| { assert_eq!( vec![ + ": i32".to_string(), ": i32".to_string(), ": i33".to_string(), + ": i33".to_string(), + ": i34".to_string(), ": i34".to_string(), ": i35".to_string(), + ": i35".to_string(), ], sorted_cached_hint_labels(editor, cx), + "We receive duplicate hints from 2 servers and cache them all" ); assert_eq!( vec![ @@ -3869,7 +3977,7 @@ let c = 3;"# ": i33".to_string(), ], visible_hint_labels(editor, cx), - "lib.rs is added before main.rs , so its excerpts should be visible first" + "lib.rs is added before main.rs , so its excerpts should be visible first; hints should be deduplicated per label" ); }) .unwrap(); @@ -3918,10 +4026,14 @@ let c = 3;"# .update(cx, |editor, _window, cx| { assert_eq!( vec![ + ": i32".to_string(), ": i32".to_string(), ": i33".to_string(), + ": i33".to_string(), + ": i34".to_string(), ": i34".to_string(), ": i35".to_string(), + ": i35".to_string(), ], sorted_cached_hint_labels(editor, cx), "No hint changes/duplicates should occur in the cache", From 671500de1b821072967a05613b33462f20a116cc Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Mon, 17 Nov 2025 18:58:22 +0100 Subject: [PATCH 0153/1030] agent_ui: Fix images copied from win explorer not being pastable (#42858) Closes https://github.com/zed-industries/zed/issues/41505 A bit adhoc but it gets the job done for now Release Notes: - Fixed images copied from windows explorer not being pastable in the agent panel --- Cargo.lock | 2 + crates/agent_ui/Cargo.toml | 2 + crates/agent_ui/src/acp/message_editor.rs | 186 +++++++++++------- crates/agent_ui/src/text_thread_editor.rs | 36 +++- crates/gpui/src/interactive.rs | 2 +- crates/gpui/src/platform.rs | 21 +- crates/gpui/src/platform/mac/platform.rs | 1 + crates/gpui/src/platform/windows/clipboard.rs | 59 +++--- 8 files changed, 207 insertions(+), 102 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 69c75d7ce0f9184342fbf202a149c6feb1d6a982..8a68187705d129beee2384246a81a488a09cb6e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -322,6 +322,7 @@ dependencies = [ "assistant_slash_command", "assistant_slash_commands", "assistant_text_thread", + "async-fs", "audio", "buffer_diff", "chrono", @@ -343,6 +344,7 @@ dependencies = [ "gpui", "html_to_markdown", "http_client", + "image", "indoc", "itertools 0.14.0", "jsonschema", diff --git a/crates/agent_ui/Cargo.toml b/crates/agent_ui/Cargo.toml index 724b53a017911edbd6e9dd88c410daf794889d4e..6447b41335ece093718036091baf30c505ad76fd 100644 --- a/crates/agent_ui/Cargo.toml +++ b/crates/agent_ui/Cargo.toml @@ -98,6 +98,8 @@ util.workspace = true watch.workspace = true workspace.workspace = true zed_actions.workspace = true +image.workspace = true +async-fs.workspace = true [dev-dependencies] acp_thread = { workspace = true, features = ["test-support"] } diff --git a/crates/agent_ui/src/acp/message_editor.rs b/crates/agent_ui/src/acp/message_editor.rs index 9835dc929bd86085b481cbdb5e2ee667591c6e73..84896e7a74ab8f9514f0550bae1d28433650ce8d 100644 --- a/crates/agent_ui/src/acp/message_editor.rs +++ b/crates/agent_ui/src/acp/message_editor.rs @@ -28,6 +28,7 @@ use gpui::{ EventEmitter, FocusHandle, Focusable, Image, ImageFormat, Img, KeyContext, SharedString, Subscription, Task, TextStyle, WeakEntity, pulsating_between, }; +use itertools::Either; use language::{Buffer, Language, language_settings::InlayHintKind}; use language_model::LanguageModelImage; use postage::stream::Stream as _; @@ -912,74 +913,112 @@ impl MessageEditor { if !self.prompt_capabilities.borrow().image { return; } - - let images = cx - .read_from_clipboard() - .map(|item| { - item.into_entries() - .filter_map(|entry| { - if let ClipboardEntry::Image(image) = entry { - Some(image) - } else { - None - } - }) - .collect::>() - }) - .unwrap_or_default(); - - if images.is_empty() { + let Some(clipboard) = cx.read_from_clipboard() else { return; - } + }; cx.stop_propagation(); + cx.spawn_in(window, async move |this, cx| { + use itertools::Itertools; + let (mut images, paths) = clipboard + .into_entries() + .filter_map(|entry| match entry { + ClipboardEntry::Image(image) => Some(Either::Left(image)), + ClipboardEntry::ExternalPaths(paths) => Some(Either::Right(paths)), + _ => None, + }) + .partition_map::, Vec<_>, _, _, _>(std::convert::identity); + + if !paths.is_empty() { + images.extend( + cx.background_spawn(async move { + let mut images = vec![]; + for path in paths.into_iter().flat_map(|paths| paths.paths().to_owned()) { + let Ok(content) = async_fs::read(path).await else { + continue; + }; + let Ok(format) = image::guess_format(&content) else { + continue; + }; + images.push(gpui::Image::from_bytes( + match format { + image::ImageFormat::Png => gpui::ImageFormat::Png, + image::ImageFormat::Jpeg => gpui::ImageFormat::Jpeg, + image::ImageFormat::WebP => gpui::ImageFormat::Webp, + image::ImageFormat::Gif => gpui::ImageFormat::Gif, + image::ImageFormat::Bmp => gpui::ImageFormat::Bmp, + image::ImageFormat::Tiff => gpui::ImageFormat::Tiff, + image::ImageFormat::Ico => gpui::ImageFormat::Ico, + _ => continue, + }, + content, + )); + } + images + }) + .await, + ); + } - let replacement_text = MentionUri::PastedImage.as_link().to_string(); - for image in images { - let (excerpt_id, text_anchor, multibuffer_anchor) = - self.editor.update(cx, |message_editor, cx| { - let snapshot = message_editor.snapshot(window, cx); - let (excerpt_id, _, buffer_snapshot) = - snapshot.buffer_snapshot().as_singleton().unwrap(); - - let text_anchor = buffer_snapshot.anchor_before(buffer_snapshot.len()); - let multibuffer_anchor = snapshot - .buffer_snapshot() - .anchor_in_excerpt(*excerpt_id, text_anchor); - message_editor.edit( - [( - multi_buffer::Anchor::max()..multi_buffer::Anchor::max(), - format!("{replacement_text} "), - )], - cx, - ); - (*excerpt_id, text_anchor, multibuffer_anchor) - }); + if images.is_empty() { + return; + } - let content_len = replacement_text.len(); - let Some(start_anchor) = multibuffer_anchor else { - continue; + let replacement_text = MentionUri::PastedImage.as_link().to_string(); + let Ok(editor) = this.update(cx, |this, _| this.editor.clone()) else { + return; }; - let end_anchor = self.editor.update(cx, |editor, cx| { - let snapshot = editor.buffer().read(cx).snapshot(cx); - snapshot.anchor_before(start_anchor.to_offset(&snapshot) + content_len) - }); - let image = Arc::new(image); - let Some((crease_id, tx)) = insert_crease_for_mention( - excerpt_id, - text_anchor, - content_len, - MentionUri::PastedImage.name().into(), - IconName::Image.path().into(), - Some(Task::ready(Ok(image.clone())).shared()), - self.editor.clone(), - window, - cx, - ) else { - continue; - }; - let task = cx - .spawn_in(window, { - async move |_, cx| { + for image in images { + let Ok((excerpt_id, text_anchor, multibuffer_anchor)) = + editor.update_in(cx, |message_editor, window, cx| { + let snapshot = message_editor.snapshot(window, cx); + let (excerpt_id, _, buffer_snapshot) = + snapshot.buffer_snapshot().as_singleton().unwrap(); + + let text_anchor = buffer_snapshot.anchor_before(buffer_snapshot.len()); + let multibuffer_anchor = snapshot + .buffer_snapshot() + .anchor_in_excerpt(*excerpt_id, text_anchor); + message_editor.edit( + [( + multi_buffer::Anchor::max()..multi_buffer::Anchor::max(), + format!("{replacement_text} "), + )], + cx, + ); + (*excerpt_id, text_anchor, multibuffer_anchor) + }) + else { + break; + }; + + let content_len = replacement_text.len(); + let Some(start_anchor) = multibuffer_anchor else { + continue; + }; + let Ok(end_anchor) = editor.update(cx, |editor, cx| { + let snapshot = editor.buffer().read(cx).snapshot(cx); + snapshot.anchor_before(start_anchor.to_offset(&snapshot) + content_len) + }) else { + continue; + }; + let image = Arc::new(image); + let Ok(Some((crease_id, tx))) = cx.update(|window, cx| { + insert_crease_for_mention( + excerpt_id, + text_anchor, + content_len, + MentionUri::PastedImage.name().into(), + IconName::Image.path().into(), + Some(Task::ready(Ok(image.clone())).shared()), + editor.clone(), + window, + cx, + ) + }) else { + continue; + }; + let task = cx + .spawn(async move |cx| { let format = image.format; let image = cx .update(|_, cx| LanguageModelImage::from_image(image, cx)) @@ -994,15 +1033,16 @@ impl MessageEditor { } else { Err("Failed to convert image".into()) } - } - }) - .shared(); + }) + .shared(); - self.mention_set - .mentions - .insert(crease_id, (MentionUri::PastedImage, task.clone())); + this.update(cx, |this, _| { + this.mention_set + .mentions + .insert(crease_id, (MentionUri::PastedImage, task.clone())) + }) + .ok(); - cx.spawn_in(window, async move |this, cx| { if task.await.notify_async_err(cx).is_none() { this.update(cx, |this, cx| { this.editor.update(cx, |editor, cx| { @@ -1012,9 +1052,9 @@ impl MessageEditor { }) .ok(); } - }) - .detach(); - } + } + }) + .detach(); } pub fn insert_dragged_files( diff --git a/crates/agent_ui/src/text_thread_editor.rs b/crates/agent_ui/src/text_thread_editor.rs index e7f16b8886c719cf60763f651fe9abb9fe33d828..a46bf530217050a1f01ca777ee5e2af108989fbf 100644 --- a/crates/agent_ui/src/text_thread_editor.rs +++ b/crates/agent_ui/src/text_thread_editor.rs @@ -1679,7 +1679,7 @@ impl TextThreadEditor { ) { cx.stop_propagation(); - let images = if let Some(item) = cx.read_from_clipboard() { + let mut images = if let Some(item) = cx.read_from_clipboard() { item.into_entries() .filter_map(|entry| { if let ClipboardEntry::Image(image) = entry { @@ -1693,6 +1693,40 @@ impl TextThreadEditor { Vec::new() }; + if let Some(paths) = cx.read_from_clipboard() { + for path in paths + .into_entries() + .filter_map(|entry| { + if let ClipboardEntry::ExternalPaths(paths) = entry { + Some(paths.paths().to_owned()) + } else { + None + } + }) + .flatten() + { + let Ok(content) = std::fs::read(path) else { + continue; + }; + let Ok(format) = image::guess_format(&content) else { + continue; + }; + images.push(gpui::Image::from_bytes( + match format { + image::ImageFormat::Png => gpui::ImageFormat::Png, + image::ImageFormat::Jpeg => gpui::ImageFormat::Jpeg, + image::ImageFormat::WebP => gpui::ImageFormat::Webp, + image::ImageFormat::Gif => gpui::ImageFormat::Gif, + image::ImageFormat::Bmp => gpui::ImageFormat::Bmp, + image::ImageFormat::Tiff => gpui::ImageFormat::Tiff, + image::ImageFormat::Ico => gpui::ImageFormat::Ico, + _ => continue, + }, + content, + )); + } + } + let metadata = if let Some(item) = cx.read_from_clipboard() { item.entries().first().and_then(|entry| { if let ClipboardEntry::String(text) = entry { diff --git a/crates/gpui/src/interactive.rs b/crates/gpui/src/interactive.rs index 9e18b7990d9a30f2fd5e50010766d5f14a33e4e2..03acf81addaad1ae9800ef476a2dc7d13e690cf7 100644 --- a/crates/gpui/src/interactive.rs +++ b/crates/gpui/src/interactive.rs @@ -509,7 +509,7 @@ impl Deref for MouseExitEvent { } /// A collection of paths from the platform, such as from a file drop. -#[derive(Debug, Clone, Default)] +#[derive(Debug, Clone, Default, Eq, PartialEq)] pub struct ExternalPaths(pub(crate) SmallVec<[PathBuf; 2]>); impl ExternalPaths { diff --git a/crates/gpui/src/platform.rs b/crates/gpui/src/platform.rs index e50f407dc313038032c433aa0243d3c7791c5c1f..7168d0179424028e7f823d39df0f6f51f45095ac 100644 --- a/crates/gpui/src/platform.rs +++ b/crates/gpui/src/platform.rs @@ -1573,6 +1573,8 @@ pub enum ClipboardEntry { String(ClipboardString), /// An image entry Image(Image), + /// A file entry + ExternalPaths(crate::ExternalPaths), } impl ClipboardItem { @@ -1613,16 +1615,29 @@ impl ClipboardItem { /// Returns None if there were no ClipboardString entries. pub fn text(&self) -> Option { let mut answer = String::new(); - let mut any_entries = false; for entry in self.entries.iter() { if let ClipboardEntry::String(ClipboardString { text, metadata: _ }) = entry { answer.push_str(text); - any_entries = true; } } - if any_entries { Some(answer) } else { None } + if answer.is_empty() { + for entry in self.entries.iter() { + if let ClipboardEntry::ExternalPaths(paths) = entry { + for path in &paths.0 { + use std::fmt::Write as _; + _ = write!(answer, "{}", path.display()); + } + } + } + } + + if !answer.is_empty() { + Some(answer) + } else { + None + } } /// If this item is one ClipboardEntry::String, returns its metadata. diff --git a/crates/gpui/src/platform/mac/platform.rs b/crates/gpui/src/platform/mac/platform.rs index 21dab9d3b617d505b80cb2d48b579916a9eb1238..cc6390cdb887a6c08fbb4520c9ab9fac4b50f9cf 100644 --- a/crates/gpui/src/platform/mac/platform.rs +++ b/crates/gpui/src/platform/mac/platform.rs @@ -1046,6 +1046,7 @@ impl Platform for MacPlatform { ClipboardEntry::Image(image) => { self.write_image_to_clipboard(image); } + ClipboardEntry::ExternalPaths(_) => {} }, None => { // Writing an empty list of entries just clears the clipboard. diff --git a/crates/gpui/src/platform/windows/clipboard.rs b/crates/gpui/src/platform/windows/clipboard.rs index 90d97a84c0bedcc241f7432a7f14f09d46018b49..2a5e8dcbbe2426674f7eae173231e2919590ad49 100644 --- a/crates/gpui/src/platform/windows/clipboard.rs +++ b/crates/gpui/src/platform/windows/clipboard.rs @@ -1,7 +1,7 @@ use std::sync::LazyLock; use anyhow::Result; -use collections::{FxHashMap, FxHashSet}; +use collections::FxHashMap; use itertools::Itertools; use windows::Win32::{ Foundation::{HANDLE, HGLOBAL}, @@ -18,7 +18,9 @@ use windows::Win32::{ }; use windows_core::PCWSTR; -use crate::{ClipboardEntry, ClipboardItem, ClipboardString, Image, ImageFormat, hash}; +use crate::{ + ClipboardEntry, ClipboardItem, ClipboardString, ExternalPaths, Image, ImageFormat, hash, +}; // https://learn.microsoft.com/en-us/windows/win32/api/shellapi/nf-shellapi-dragqueryfilew const DRAGDROP_GET_FILES_COUNT: u32 = 0xFFFFFFFF; @@ -48,16 +50,6 @@ static FORMATS_MAP: LazyLock> = LazyLock::ne formats_map.insert(CF_HDROP.0 as u32, ClipboardFormatType::Files); formats_map }); -static FORMATS_SET: LazyLock> = LazyLock::new(|| { - let mut formats_map = FxHashSet::default(); - formats_map.insert(CF_UNICODETEXT.0 as u32); - formats_map.insert(*CLIPBOARD_PNG_FORMAT); - formats_map.insert(*CLIPBOARD_GIF_FORMAT); - formats_map.insert(*CLIPBOARD_JPG_FORMAT); - formats_map.insert(*CLIPBOARD_SVG_FORMAT); - formats_map.insert(CF_HDROP.0 as u32); - formats_map -}); static IMAGE_FORMATS_MAP: LazyLock> = LazyLock::new(|| { let mut formats_map = FxHashMap::default(); formats_map.insert(*CLIPBOARD_PNG_FORMAT, ImageFormat::Png); @@ -138,6 +130,11 @@ fn register_clipboard_format(format: PCWSTR) -> u32 { std::io::Error::last_os_error() ); } + log::debug!( + "Registered clipboard format {} as {}", + unsafe { format.display() }, + ret + ); ret } @@ -159,6 +156,7 @@ fn write_to_clipboard_inner(item: ClipboardItem) -> Result<()> { ClipboardEntry::Image(image) => { write_image_to_clipboard(image)?; } + ClipboardEntry::ExternalPaths(_) => {} }, None => { // Writing an empty list of entries just clears the clipboard. @@ -249,19 +247,33 @@ fn with_best_match_format(f: F) -> Option where F: Fn(u32) -> Option, { + let mut text = None; + let mut image = None; + let mut files = None; let count = unsafe { CountClipboardFormats() }; let mut clipboard_format = 0; for _ in 0..count { clipboard_format = unsafe { EnumClipboardFormats(clipboard_format) }; - let Some(item_format) = FORMATS_SET.get(&clipboard_format) else { + let Some(item_format) = FORMATS_MAP.get(&clipboard_format) else { continue; }; - if let Some(entry) = f(*item_format) { - return Some(ClipboardItem { - entries: vec![entry], - }); + let bucket = match item_format { + ClipboardFormatType::Text if text.is_none() => &mut text, + ClipboardFormatType::Image if image.is_none() => &mut image, + ClipboardFormatType::Files if files.is_none() => &mut files, + _ => continue, + }; + if let Some(entry) = f(clipboard_format) { + *bucket = Some(entry); } } + + if let Some(entry) = [image, files, text].into_iter().flatten().next() { + return Some(ClipboardItem { + entries: vec![entry], + }); + } + // log the formats that we don't support yet. { clipboard_format = 0; @@ -346,18 +358,17 @@ fn read_image_for_type(format_number: u32, format: ImageFormat) -> Option Option { - let text = with_clipboard_data(CF_HDROP.0 as u32, |data_ptr, _size| { + let filenames = with_clipboard_data(CF_HDROP.0 as u32, |data_ptr, _size| { let hdrop = HDROP(data_ptr); - let mut filenames = String::new(); + let mut filenames = Vec::new(); with_file_names(hdrop, |file_name| { - filenames.push_str(&file_name); + filenames.push(std::path::PathBuf::from(file_name)); }); filenames })?; - Some(ClipboardEntry::String(ClipboardString { - text, - metadata: None, - })) + Some(ClipboardEntry::ExternalPaths(ExternalPaths( + filenames.into(), + ))) } fn with_clipboard_data(format: u32, f: F) -> Option From 46ad6c0bbb91b3c7d6d181891a3f043068a2642a Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Mon, 17 Nov 2025 18:59:48 +0100 Subject: [PATCH 0154/1030] ci: Remove remaining nextest compiles (#42630) Follow up to https://github.com/zed-industries/zed/pull/42556 Release Notes: - N/A *or* Added/Fixed/Improved ... --- .github/actions/run_tests/action.yml | 6 ++---- .github/actions/run_tests_windows/action.yml | 3 +-- .github/workflows/bump_patch_version.yml | 2 +- .github/workflows/compare_perf.yml | 3 +-- .github/workflows/deploy_collab.yml | 4 +--- crates/languages/src/rust.rs | 2 +- crates/zed/src/main.rs | 4 +++- tooling/xtask/src/tasks/workflows/compare_perf.rs | 4 ++-- 8 files changed, 12 insertions(+), 16 deletions(-) diff --git a/.github/actions/run_tests/action.yml b/.github/actions/run_tests/action.yml index 3bc28249f3b8b2a08a48be040177530c5ecfd407..a071aba3a87dcf8e8f48f740115cfddf48b9f805 100644 --- a/.github/actions/run_tests/action.yml +++ b/.github/actions/run_tests/action.yml @@ -4,10 +4,8 @@ description: "Runs the tests" runs: using: "composite" steps: - - name: Install Rust - shell: bash -euxo pipefail {0} - run: | - cargo install cargo-nextest --locked + - name: Install nextest + uses: taiki-e/install-action@nextest - name: Install Node uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 diff --git a/.github/actions/run_tests_windows/action.yml b/.github/actions/run_tests_windows/action.yml index d85d47cb969e22ca3c73c9ab8caca279a9b5ba88..307b73f363b7d5fd7a3c9e5082c4f17d622ec165 100644 --- a/.github/actions/run_tests_windows/action.yml +++ b/.github/actions/run_tests_windows/action.yml @@ -11,9 +11,8 @@ runs: using: "composite" steps: - name: Install test runner - shell: powershell working-directory: ${{ inputs.working-directory }} - run: cargo install cargo-nextest --locked + uses: taiki-e/install-action@nextest - name: Install Node uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 diff --git a/.github/workflows/bump_patch_version.yml b/.github/workflows/bump_patch_version.yml index bfaf7a271b5e31b60c999c7dcf8d17538d135355..e1ae890043f31269a9c894f9f8ba408b3db81ffb 100644 --- a/.github/workflows/bump_patch_version.yml +++ b/.github/workflows/bump_patch_version.yml @@ -42,7 +42,7 @@ jobs: exit 1 ;; esac - which cargo-set-version > /dev/null || cargo install cargo-edit + which cargo-set-version > /dev/null || cargo install cargo-edit -f --no-default-features --features "set-version" output="$(cargo set-version -p zed --bump patch 2>&1 | sed 's/.* //')" export GIT_COMMITTER_NAME="Zed Bot" export GIT_COMMITTER_EMAIL="hi@zed.dev" diff --git a/.github/workflows/compare_perf.yml b/.github/workflows/compare_perf.yml index 5bcb733f3f21c95e530d7c221df080997dfc24eb..48fc850f8f039d5c25071ba91381ea9f905ab811 100644 --- a/.github/workflows/compare_perf.yml +++ b/.github/workflows/compare_perf.yml @@ -39,8 +39,7 @@ jobs: run: ./script/download-wasi-sdk shell: bash -euxo pipefail {0} - name: compare_perf::run_perf::install_hyperfine - run: cargo install hyperfine - shell: bash -euxo pipefail {0} + uses: taiki-e/install-action@hyperfine - name: steps::git_checkout run: git fetch origin ${{ inputs.base }} && git checkout ${{ inputs.base }} shell: bash -euxo pipefail {0} diff --git a/.github/workflows/deploy_collab.yml b/.github/workflows/deploy_collab.yml index c61879faa8cd0a5dbdbed03a140f8e558f13322b..ce0c0eac40c8c34992f8838af396e75e6cecc0c8 100644 --- a/.github/workflows/deploy_collab.yml +++ b/.github/workflows/deploy_collab.yml @@ -43,9 +43,7 @@ jobs: fetch-depth: 0 - name: Install cargo nextest - shell: bash -euxo pipefail {0} - run: | - cargo install cargo-nextest --locked + uses: taiki-e/install-action@nextest - name: Limit target directory size shell: bash -euxo pipefail {0} diff --git a/crates/languages/src/rust.rs b/crates/languages/src/rust.rs index f1b205f83a6bd3a9b26cb39da854817ebba11361..c2b0845940d1639629d59b634e9ece73c9e4cf3a 100644 --- a/crates/languages/src/rust.rs +++ b/crates/languages/src/rust.rs @@ -442,7 +442,7 @@ impl LspInstaller for RustLspAdapter { // It is surprisingly common for ~/.cargo/bin/rust-analyzer to be a symlink to // /usr/bin/rust-analyzer that fails when you run it; so we need to test it. - log::info!("found rust-analyzer in PATH. trying to run `rust-analyzer --help`"); + log::debug!("found rust-analyzer in PATH. trying to run `rust-analyzer --help`"); let result = delegate .try_exec(LanguageServerBinary { path: path.clone(), diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 01318a7636bb42916f115ad55339ff4df0937e83..422d8bbbf72a68668221ad3ee7ff2a9dc9947d45 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -341,7 +341,9 @@ pub fn main() { } else { None }; - log::info!("Using git binary path: {:?}", git_binary_path); + if let Some(git_binary_path) = &git_binary_path { + log::info!("Using git binary path: {:?}", git_binary_path); + } let fs = Arc::new(RealFs::new(git_binary_path, app.background_executor())); let user_settings_file_rx = watch_config_file( diff --git a/tooling/xtask/src/tasks/workflows/compare_perf.rs b/tooling/xtask/src/tasks/workflows/compare_perf.rs index b1d6e0c60bd6b49893de8877e7d1fd51e5967679..db9f21de15fe159c369ad603e3ab0ff93e1cc7f9 100644 --- a/tooling/xtask/src/tasks/workflows/compare_perf.rs +++ b/tooling/xtask/src/tasks/workflows/compare_perf.rs @@ -35,8 +35,8 @@ pub fn run_perf(base: &Input, head: &Input, crate_name: &Input) -> NamedJob { )) } - fn install_hyperfine() -> Step { - named::bash("cargo install hyperfine") + fn install_hyperfine() -> Step { + named::uses("taiki-e", "install-action", "hyperfine") } fn compare_runs(head: &Input, base: &Input) -> Step { From 6bf5e92a25e6bb87dfdecc861b5a94c4a6a632c9 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Mon, 17 Nov 2025 11:01:34 -0700 Subject: [PATCH 0155/1030] Revert "Keep selection in `SwitchToHelixNormalMode` (#41583)" (#42892) Closes #ISSUE Release Notes: - Fixes vim "go to definition" making a selection --- assets/keymaps/vim.json | 6 --- crates/agent_ui/src/text_thread_editor.rs | 3 +- crates/debugger_tools/src/dap_log.rs | 6 +-- crates/editor/src/editor.rs | 48 ++++++----------- crates/editor/src/items.rs | 3 +- crates/language_tools/src/lsp_log_view.rs | 6 +-- crates/search/src/buffer_search.rs | 21 +++----- crates/search/src/project_search.rs | 9 ++-- crates/terminal_view/src/terminal_view.rs | 1 - crates/vim/src/helix.rs | 64 ++--------------------- crates/vim/src/motion.rs | 45 +++++++--------- crates/vim/src/normal/search.rs | 19 +++---- crates/vim/src/state.rs | 8 +-- crates/vim/src/test.rs | 20 +++++++ crates/vim/src/test/vim_test_context.rs | 1 + crates/vim/src/vim.rs | 4 +- crates/vim/src/visual.rs | 8 ++- crates/workspace/src/searchable.rs | 5 +- 18 files changed, 94 insertions(+), 183 deletions(-) diff --git a/assets/keymaps/vim.json b/assets/keymaps/vim.json index c7b83daab67689d10a6b7c1e28312ceff4551e08..a3530140b39df88a0929df0a21cfb9379a9fc8bd 100644 --- a/assets/keymaps/vim.json +++ b/assets/keymaps/vim.json @@ -421,12 +421,6 @@ "ctrl-[": "editor::Cancel" } }, - { - "context": "vim_mode == helix_select && !menu", - "bindings": { - "escape": "vim::SwitchToHelixNormalMode" - } - }, { "context": "(vim_mode == helix_normal || vim_mode == helix_select) && !menu", "bindings": { diff --git a/crates/agent_ui/src/text_thread_editor.rs b/crates/agent_ui/src/text_thread_editor.rs index a46bf530217050a1f01ca777ee5e2af108989fbf..84f04f8821b2dd540e54f41f567a0b7735116875 100644 --- a/crates/agent_ui/src/text_thread_editor.rs +++ b/crates/agent_ui/src/text_thread_editor.rs @@ -2626,12 +2626,11 @@ impl SearchableItem for TextThreadEditor { &mut self, index: usize, matches: &[Self::Match], - collapse: bool, window: &mut Window, cx: &mut Context, ) { self.editor.update(cx, |editor, cx| { - editor.activate_match(index, matches, collapse, window, cx); + editor.activate_match(index, matches, window, cx); }); } diff --git a/crates/debugger_tools/src/dap_log.rs b/crates/debugger_tools/src/dap_log.rs index 738c60870f2200e11e710f9c94d02682b94677f7..4c994ad7eb749dcb5828daa83bad34a579f9f14c 100644 --- a/crates/debugger_tools/src/dap_log.rs +++ b/crates/debugger_tools/src/dap_log.rs @@ -1029,13 +1029,11 @@ impl SearchableItem for DapLogView { &mut self, index: usize, matches: &[Self::Match], - collapse: bool, window: &mut Window, cx: &mut Context, ) { - self.editor.update(cx, |e, cx| { - e.activate_match(index, matches, collapse, window, cx) - }) + self.editor + .update(cx, |e, cx| e.activate_match(index, matches, window, cx)) } fn select_matches( diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 8cb3d1abf7d026e7201c60834f355d0f5e56671d..339b0354c3fc0859cfe791fd69336535645c14c8 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -1099,6 +1099,7 @@ pub struct Editor { searchable: bool, cursor_shape: CursorShape, current_line_highlight: Option, + collapse_matches: bool, autoindent_mode: Option, workspace: Option<(WeakEntity, Option)>, input_enabled: bool, @@ -2211,7 +2212,7 @@ impl Editor { .unwrap_or_default(), current_line_highlight: None, autoindent_mode: Some(AutoindentMode::EachLine), - + collapse_matches: false, workspace: None, input_enabled: !is_minimap, use_modal_editing: full_mode, @@ -2384,7 +2385,10 @@ impl Editor { } } EditorEvent::Edited { .. } => { - if vim_flavor(cx).is_none() { + let vim_mode = vim_mode_setting::VimModeSetting::try_get(cx) + .map(|vim_mode| vim_mode.0) + .unwrap_or(false); + if !vim_mode { let display_map = editor.display_snapshot(cx); let selections = editor.selections.all_adjusted_display(&display_map); let pop_state = editor @@ -3013,12 +3017,12 @@ impl Editor { self.current_line_highlight = current_line_highlight; } - pub fn range_for_match( - &self, - range: &Range, - collapse: bool, - ) -> Range { - if collapse { + pub fn set_collapse_matches(&mut self, collapse_matches: bool) { + self.collapse_matches = collapse_matches; + } + + pub fn range_for_match(&self, range: &Range) -> Range { + if self.collapse_matches { return range.start..range.start; } range.clone() @@ -16921,7 +16925,7 @@ impl Editor { editor.update_in(cx, |editor, window, cx| { let range = target_range.to_point(target_buffer.read(cx)); - let range = editor.range_for_match(&range, false); + let range = editor.range_for_match(&range); let range = collapse_multiline_range(range); if !split @@ -21761,7 +21765,9 @@ impl Editor { .and_then(|e| e.to_str()) .map(|a| a.to_string())); - let vim_mode = vim_flavor(cx).is_some(); + let vim_mode = vim_mode_setting::VimModeSetting::try_get(cx) + .map(|vim_mode| vim_mode.0) + .unwrap_or(false); let edit_predictions_provider = all_language_settings(file, cx).edit_predictions.provider; let copilot_enabled = edit_predictions_provider @@ -22396,28 +22402,6 @@ fn edit_for_markdown_paste<'a>( (range, new_text) } -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] -pub enum VimFlavor { - Vim, - Helix, -} - -pub fn vim_flavor(cx: &App) -> Option { - if vim_mode_setting::HelixModeSetting::try_get(cx) - .map(|helix_mode| helix_mode.0) - .unwrap_or(false) - { - Some(VimFlavor::Helix) - } else if vim_mode_setting::VimModeSetting::try_get(cx) - .map(|vim_mode| vim_mode.0) - .unwrap_or(false) - { - Some(VimFlavor::Vim) - } else { - None // neither vim nor helix mode - } -} - fn process_completion_for_edit( completion: &Completion, intent: CompletionIntent, diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 12590e4b3f95648dd653d408252ced460e2e834e..a860e137a856a2e7982f1177c205391b80625944 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -1586,12 +1586,11 @@ impl SearchableItem for Editor { &mut self, index: usize, matches: &[Range], - collapse: bool, window: &mut Window, cx: &mut Context, ) { self.unfold_ranges(&[matches[index].clone()], false, true, cx); - let range = self.range_for_match(&matches[index], collapse); + let range = self.range_for_match(&matches[index]); let autoscroll = if EditorSettings::get_global(cx).search.center_on_match { Autoscroll::center() } else { diff --git a/crates/language_tools/src/lsp_log_view.rs b/crates/language_tools/src/lsp_log_view.rs index ef9cc1ef3af88310d5870aa4d2da3d1a077139f1..d480eadc73b9546e5a59b204b036a3ff88a018c7 100644 --- a/crates/language_tools/src/lsp_log_view.rs +++ b/crates/language_tools/src/lsp_log_view.rs @@ -812,13 +812,11 @@ impl SearchableItem for LspLogView { &mut self, index: usize, matches: &[Self::Match], - collapse: bool, window: &mut Window, cx: &mut Context, ) { - self.editor.update(cx, |e, cx| { - e.activate_match(index, matches, collapse, window, cx) - }) + self.editor + .update(cx, |e, cx| e.activate_match(index, matches, window, cx)) } fn select_matches( diff --git a/crates/search/src/buffer_search.rs b/crates/search/src/buffer_search.rs index 764d0a81f7ac8c7fd03fe63c478aea14b3e2e31b..a601f5a683f2c464e792c351c566358212bdf312 100644 --- a/crates/search/src/buffer_search.rs +++ b/crates/search/src/buffer_search.rs @@ -10,9 +10,8 @@ use any_vec::AnyVec; use anyhow::Context as _; use collections::HashMap; use editor::{ - DisplayPoint, Editor, EditorSettings, VimFlavor, + DisplayPoint, Editor, EditorSettings, actions::{Backtab, Tab}, - vim_flavor, }; use futures::channel::oneshot; use gpui::{ @@ -828,8 +827,7 @@ impl BufferSearchBar { .searchable_items_with_matches .get(&active_searchable_item.downgrade()) { - let collapse = editor::vim_flavor(cx) == Some(VimFlavor::Vim); - active_searchable_item.activate_match(match_ix, matches, collapse, window, cx) + active_searchable_item.activate_match(match_ix, matches, window, cx) } } @@ -976,8 +974,7 @@ impl BufferSearchBar { window: &mut Window, cx: &mut Context, ) { - let collapse = vim_flavor(cx) == Some(VimFlavor::Vim); - self.select_match(Direction::Next, 1, collapse, window, cx); + self.select_match(Direction::Next, 1, window, cx); } fn select_prev_match( @@ -986,8 +983,7 @@ impl BufferSearchBar { window: &mut Window, cx: &mut Context, ) { - let collapse = vim_flavor(cx) == Some(VimFlavor::Vim); - self.select_match(Direction::Prev, 1, collapse, window, cx); + self.select_match(Direction::Prev, 1, window, cx); } pub fn select_all_matches( @@ -1012,7 +1008,6 @@ impl BufferSearchBar { &mut self, direction: Direction, count: usize, - collapse: bool, window: &mut Window, cx: &mut Context, ) { @@ -1035,7 +1030,7 @@ impl BufferSearchBar { .match_index_for_direction(matches, index, direction, count, window, cx); searchable_item.update_matches(matches, window, cx); - searchable_item.activate_match(new_match_index, matches, collapse, window, cx); + searchable_item.activate_match(new_match_index, matches, window, cx); } } @@ -1049,8 +1044,7 @@ impl BufferSearchBar { return; } searchable_item.update_matches(matches, window, cx); - let collapse = vim_flavor(cx) == Some(VimFlavor::Vim); - searchable_item.activate_match(0, matches, collapse, window, cx); + searchable_item.activate_match(0, matches, window, cx); } } @@ -1065,8 +1059,7 @@ impl BufferSearchBar { } let new_match_index = matches.len() - 1; searchable_item.update_matches(matches, window, cx); - let collapse = vim_flavor(cx) == Some(VimFlavor::Vim); - searchable_item.activate_match(new_match_index, matches, collapse, window, cx); + searchable_item.activate_match(new_match_index, matches, window, cx); } } diff --git a/crates/search/src/project_search.rs b/crates/search/src/project_search.rs index 001b7e3fbb09b8d4888f2e4744ca8ecb1096d7a6..1768b0f18541fd289126bad77ae46eded1aad326 100644 --- a/crates/search/src/project_search.rs +++ b/crates/search/src/project_search.rs @@ -9,12 +9,11 @@ use anyhow::Context as _; use collections::HashMap; use editor::{ Anchor, Editor, EditorEvent, EditorSettings, MAX_TAB_TITLE_LEN, MultiBuffer, PathKey, - SelectionEffects, VimFlavor, + SelectionEffects, actions::{Backtab, SelectAll, Tab}, items::active_match_index, multibuffer_context_lines, scroll::Autoscroll, - vim_flavor, }; use futures::{StreamExt, stream::FuturesOrdered}; use gpui::{ @@ -1431,8 +1430,7 @@ impl ProjectSearchView { let range_to_select = match_ranges[new_index].clone(); self.results_editor.update(cx, |editor, cx| { - let collapse = vim_flavor(cx) == Some(VimFlavor::Vim); - let range_to_select = editor.range_for_match(&range_to_select, collapse); + let range_to_select = editor.range_for_match(&range_to_select); let autoscroll = if EditorSettings::get_global(cx).search.center_on_match { Autoscroll::center() } else { @@ -1509,10 +1507,9 @@ impl ProjectSearchView { let is_new_search = self.search_id != prev_search_id; self.results_editor.update(cx, |editor, cx| { if is_new_search { - let collapse = vim_flavor(cx) == Some(VimFlavor::Vim); let range_to_select = match_ranges .first() - .map(|range| editor.range_for_match(range, collapse)); + .map(|range| editor.range_for_match(range)); editor.change_selections(Default::default(), window, cx, |s| { s.select_ranges(range_to_select) }); diff --git a/crates/terminal_view/src/terminal_view.rs b/crates/terminal_view/src/terminal_view.rs index 43714a5cfeee690644e9b772d89c12bcbd909964..b9dabee7e82064ebe055893306241f995654b82b 100644 --- a/crates/terminal_view/src/terminal_view.rs +++ b/crates/terminal_view/src/terminal_view.rs @@ -1452,7 +1452,6 @@ impl SearchableItem for TerminalView { &mut self, index: usize, _: &[Self::Match], - _collapse: bool, _window: &mut Window, cx: &mut Context, ) { diff --git a/crates/vim/src/helix.rs b/crates/vim/src/helix.rs index ee7c0a14fb721116c3fc1f2c3d1bf7b716b43f18..6788a186fb45222f7b09fe756862e6cb337c6d90 100644 --- a/crates/vim/src/helix.rs +++ b/crates/vim/src/helix.rs @@ -450,7 +450,7 @@ impl Vim { prior_selections, prior_operator: self.operator_stack.last().cloned(), prior_mode: self.mode, - is_helix_regex_search: true, + helix_select: true, } }); } @@ -1278,24 +1278,6 @@ mod test { cx.assert_state("«one ˇ»two", Mode::HelixSelect); } - #[gpui::test] - async fn test_exit_visual_mode(cx: &mut gpui::TestAppContext) { - let mut cx = VimTestContext::new(cx, true).await; - - cx.set_state("ˇone two", Mode::Normal); - cx.simulate_keystrokes("v w"); - cx.assert_state("«one tˇ»wo", Mode::Visual); - cx.simulate_keystrokes("escape"); - cx.assert_state("one ˇtwo", Mode::Normal); - - cx.enable_helix(); - cx.set_state("ˇone two", Mode::HelixNormal); - cx.simulate_keystrokes("v w"); - cx.assert_state("«one ˇ»two", Mode::HelixSelect); - cx.simulate_keystrokes("escape"); - cx.assert_state("«one ˇ»two", Mode::HelixNormal); - } - #[gpui::test] async fn test_helix_select_regex(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -1315,47 +1297,9 @@ mod test { cx.simulate_keystrokes("enter"); cx.assert_state("«oneˇ» two «oneˇ»", Mode::HelixNormal); - // TODO: change "search_in_selection" to not perform any search when in helix select mode with no selection - // cx.set_state("ˇstuff one two one", Mode::HelixNormal); - // cx.simulate_keystrokes("s o n e enter"); - // cx.assert_state("ˇstuff one two one", Mode::HelixNormal); - } - - #[gpui::test] - async fn test_helix_select_next_match(cx: &mut gpui::TestAppContext) { - let mut cx = VimTestContext::new(cx, true).await; - - cx.set_state("ˇhello two one two one two one", Mode::Visual); - cx.simulate_keystrokes("/ o n e"); - cx.simulate_keystrokes("enter"); - cx.simulate_keystrokes("n n"); - cx.assert_state("«hello two one two one two oˇ»ne", Mode::Visual); - - cx.set_state("ˇhello two one two one two one", Mode::Normal); - cx.simulate_keystrokes("/ o n e"); - cx.simulate_keystrokes("enter"); - cx.simulate_keystrokes("n n"); - cx.assert_state("hello two one two one two ˇone", Mode::Normal); - - cx.set_state("ˇhello two one two one two one", Mode::Normal); - cx.simulate_keystrokes("/ o n e"); - cx.simulate_keystrokes("enter"); - cx.simulate_keystrokes("n g n g n"); - cx.assert_state("hello two one two «one two oneˇ»", Mode::Visual); - - cx.enable_helix(); - - cx.set_state("ˇhello two one two one two one", Mode::HelixNormal); - cx.simulate_keystrokes("/ o n e"); - cx.simulate_keystrokes("enter"); - cx.simulate_keystrokes("n n"); - cx.assert_state("hello two one two one two «oneˇ»", Mode::HelixNormal); - - cx.set_state("ˇhello two one two one two one", Mode::HelixSelect); - cx.simulate_keystrokes("/ o n e"); - cx.simulate_keystrokes("enter"); - cx.simulate_keystrokes("n n"); - cx.assert_state("ˇhello two «oneˇ» two «oneˇ» two «oneˇ»", Mode::HelixSelect); + cx.set_state("ˇone two one", Mode::HelixNormal); + cx.simulate_keystrokes("s o n e enter"); + cx.assert_state("ˇone two one", Mode::HelixNormal); } #[gpui::test] diff --git a/crates/vim/src/motion.rs b/crates/vim/src/motion.rs index 0264ea9176fb2264bc693888fb861ff33d5be706..c0be92b38e46e7d8c32c9da4a6980195ef71a91e 100644 --- a/crates/vim/src/motion.rs +++ b/crates/vim/src/motion.rs @@ -672,40 +672,31 @@ pub fn register(editor: &mut Editor, cx: &mut Context) { impl Vim { pub(crate) fn search_motion(&mut self, m: Motion, window: &mut Window, cx: &mut Context) { - let Motion::ZedSearchResult { - prior_selections, - new_selections, + if let Motion::ZedSearchResult { + prior_selections, .. } = &m - else { - return; - }; - - match self.mode { - Mode::Visual | Mode::VisualLine | Mode::VisualBlock => { - if !prior_selections.is_empty() { - self.update_editor(cx, |_, editor, cx| { - editor.change_selections(Default::default(), window, cx, |s| { - s.select_ranges(prior_selections.iter().cloned()); + { + match self.mode { + Mode::Visual | Mode::VisualLine | Mode::VisualBlock => { + if !prior_selections.is_empty() { + self.update_editor(cx, |_, editor, cx| { + editor.change_selections(Default::default(), window, cx, |s| { + s.select_ranges(prior_selections.iter().cloned()) + }) }); - }); + } } - self.motion(m, window, cx); - } - Mode::Normal | Mode::Replace | Mode::Insert => { - if self.active_operator().is_some() { - self.motion(m, window, cx); + Mode::Normal | Mode::Replace | Mode::Insert => { + if self.active_operator().is_none() { + return; + } } - } - Mode::HelixNormal => {} - Mode::HelixSelect => { - self.update_editor(cx, |_, editor, cx| { - editor.change_selections(Default::default(), window, cx, |s| { - s.select_ranges(prior_selections.iter().chain(new_selections).cloned()); - }); - }); + Mode::HelixNormal | Mode::HelixSelect => {} } } + + self.motion(m, window, cx) } pub(crate) fn motion(&mut self, motion: Motion, window: &mut Window, cx: &mut Context) { diff --git a/crates/vim/src/normal/search.rs b/crates/vim/src/normal/search.rs index 2e80a08eb824b93783bf1249970e5e7ad7378ff2..6c4294a474dad13c9d00e58ab117a4a6a74c28d3 100644 --- a/crates/vim/src/normal/search.rs +++ b/crates/vim/src/normal/search.rs @@ -1,6 +1,5 @@ -use editor::{Editor, EditorSettings, VimFlavor}; +use editor::{Editor, EditorSettings}; use gpui::{Action, Context, Window, actions}; - use language::Point; use schemars::JsonSchema; use search::{BufferSearchBar, SearchOptions, buffer_search}; @@ -196,7 +195,7 @@ impl Vim { prior_selections, prior_operator: self.operator_stack.last().cloned(), prior_mode, - is_helix_regex_search: false, + helix_select: false, } }); } @@ -220,7 +219,7 @@ impl Vim { let new_selections = self.editor_selections(window, cx); let result = pane.update(cx, |pane, cx| { let search_bar = pane.toolbar().read(cx).item_of_type::()?; - if self.search.is_helix_regex_search { + if self.search.helix_select { search_bar.update(cx, |search_bar, cx| { search_bar.select_all_matches(&Default::default(), window, cx) }); @@ -241,8 +240,7 @@ impl Vim { count = count.saturating_sub(1) } self.search.count = 1; - let collapse = !self.mode.is_helix(); - search_bar.select_match(direction, count, collapse, window, cx); + search_bar.select_match(direction, count, window, cx); search_bar.focus_editor(&Default::default(), window, cx); let prior_selections: Vec<_> = self.search.prior_selections.drain(..).collect(); @@ -309,8 +307,7 @@ impl Vim { if !search_bar.has_active_match() || !search_bar.show(window, cx) { return false; } - let collapse = !self.mode.is_helix(); - search_bar.select_match(direction, count, collapse, window, cx); + search_bar.select_match(direction, count, window, cx); true }) }); @@ -319,7 +316,6 @@ impl Vim { } let new_selections = self.editor_selections(window, cx); - self.search_motion( Motion::ZedSearchResult { prior_selections, @@ -385,8 +381,7 @@ impl Vim { cx.spawn_in(window, async move |_, cx| { search.await?; search_bar.update_in(cx, |search_bar, window, cx| { - let collapse = editor::vim_flavor(cx) == Some(VimFlavor::Vim); - search_bar.select_match(direction, count, collapse, window, cx); + search_bar.select_match(direction, count, window, cx); vim.update(cx, |vim, cx| { let new_selections = vim.editor_selections(window, cx); @@ -449,7 +444,7 @@ impl Vim { cx.spawn_in(window, async move |_, cx| { search.await?; search_bar.update_in(cx, |search_bar, window, cx| { - search_bar.select_match(direction, 1, true, window, cx) + search_bar.select_match(direction, 1, window, cx) })?; anyhow::Ok(()) }) diff --git a/crates/vim/src/state.rs b/crates/vim/src/state.rs index 8a7b85349273176f67e8eed8b6939ef047f83b4c..d1c52e8f53a2214c3e46473c59b15ea1f6f4f407 100644 --- a/crates/vim/src/state.rs +++ b/crates/vim/src/state.rs @@ -67,16 +67,12 @@ impl Display for Mode { } impl Mode { - pub fn is_visual(self) -> bool { + pub fn is_visual(&self) -> bool { match self { Self::Visual | Self::VisualLine | Self::VisualBlock | Self::HelixSelect => true, Self::Normal | Self::Insert | Self::Replace | Self::HelixNormal => false, } } - - pub fn is_helix(self) -> bool { - matches!(self, Mode::HelixNormal | Mode::HelixSelect) - } } #[derive(Clone, Debug, PartialEq)] @@ -991,7 +987,7 @@ pub struct SearchState { pub prior_selections: Vec>, pub prior_operator: Option, pub prior_mode: Mode, - pub is_helix_regex_search: bool, + pub helix_select: bool, } impl Operator { diff --git a/crates/vim/src/test.rs b/crates/vim/src/test.rs index d6aa116e8ddb12c0f3aff15fbe971b701fe90ab7..3cd0646ff4fc0a6966f12db75b64999e3655ab98 100644 --- a/crates/vim/src/test.rs +++ b/crates/vim/src/test.rs @@ -1139,6 +1139,26 @@ async fn test_rename(cx: &mut gpui::TestAppContext) { cx.assert_state("const afterˇ = 2; console.log(after)", Mode::Normal) } +#[gpui::test] +async fn test_go_to_definition(cx: &mut gpui::TestAppContext) { + let mut cx = VimTestContext::new_typescript(cx).await; + + cx.set_state("const before = 2; console.log(beforˇe)", Mode::Normal); + let def_range = cx.lsp_range("const «beforeˇ» = 2; console.log(before)"); + let mut go_to_request = + cx.set_request_handler::(move |url, _, _| async move { + Ok(Some(lsp::GotoDefinitionResponse::Scalar( + lsp::Location::new(url.clone(), def_range), + ))) + }); + + cx.simulate_keystrokes("g d"); + go_to_request.next().await.unwrap(); + cx.run_until_parked(); + + cx.assert_state("const ˇbefore = 2; console.log(before)", Mode::Normal); +} + #[perf] #[gpui::test] async fn test_remap(cx: &mut gpui::TestAppContext) { diff --git a/crates/vim/src/test/vim_test_context.rs b/crates/vim/src/test/vim_test_context.rs index 4d6859f1e56976fbb0d84d475e614325e0e52795..1e92715d2b3c874f110c0fa76b2a7d747fbf3b51 100644 --- a/crates/vim/src/test/vim_test_context.rs +++ b/crates/vim/src/test/vim_test_context.rs @@ -59,6 +59,7 @@ impl VimTestContext { prepare_provider: Some(true), work_done_progress_options: Default::default(), })), + definition_provider: Some(lsp::OneOf::Left(true)), ..Default::default() }, cx, diff --git a/crates/vim/src/vim.rs b/crates/vim/src/vim.rs index 6ffdbcce910c10229dc7c2e6df95055c5c812f28..a0efd1ee29a3c72793c331cf4ccbeb38444bd55b 100644 --- a/crates/vim/src/vim.rs +++ b/crates/vim/src/vim.rs @@ -668,7 +668,7 @@ impl Vim { editor, cx, |vim, _: &SwitchToHelixNormalMode, window, cx| { - vim.switch_mode(Mode::HelixNormal, true, window, cx) + vim.switch_mode(Mode::HelixNormal, false, window, cx) }, ); Vim::action(editor, cx, |_, _: &PushForcedMotion, _, cx| { @@ -954,6 +954,7 @@ impl Vim { fn deactivate(editor: &mut Editor, cx: &mut Context) { editor.set_cursor_shape(CursorShape::Bar, cx); editor.set_clip_at_line_ends(false, cx); + editor.set_collapse_matches(false); editor.set_input_enabled(true); editor.set_autoindent(true); editor.selections.set_line_mode(false); @@ -1929,6 +1930,7 @@ impl Vim { self.update_editor(cx, |vim, editor, cx| { editor.set_cursor_shape(vim.cursor_shape(cx), cx); editor.set_clip_at_line_ends(vim.clip_at_line_ends(), cx); + editor.set_collapse_matches(true); editor.set_input_enabled(vim.editor_input_enabled()); editor.set_autoindent(vim.should_autoindent()); editor diff --git a/crates/vim/src/visual.rs b/crates/vim/src/visual.rs index 4172de80afdc1beacbf3ea342846de03953e1fc6..0abba86e993a76b6c2a1c18f02d68d72d092e78c 100644 --- a/crates/vim/src/visual.rs +++ b/crates/vim/src/visual.rs @@ -847,6 +847,9 @@ impl Vim { let mut start_selection = 0usize; let mut end_selection = 0usize; + self.update_editor(cx, |_, editor, _| { + editor.set_collapse_matches(false); + }); if vim_is_normal { pane.update(cx, |pane, cx| { if let Some(search_bar) = pane.toolbar().read(cx).item_of_type::() @@ -857,7 +860,7 @@ impl Vim { } // without update_match_index there is a bug when the cursor is before the first match search_bar.update_match_index(window, cx); - search_bar.select_match(direction.opposite(), 1, false, window, cx); + search_bar.select_match(direction.opposite(), 1, window, cx); }); } }); @@ -875,7 +878,7 @@ impl Vim { if let Some(search_bar) = pane.toolbar().read(cx).item_of_type::() { search_bar.update(cx, |search_bar, cx| { search_bar.update_match_index(window, cx); - search_bar.select_match(direction, count, false, window, cx); + search_bar.select_match(direction, count, window, cx); match_exists = search_bar.match_exists(window, cx); }); } @@ -902,6 +905,7 @@ impl Vim { editor.change_selections(Default::default(), window, cx, |s| { s.select_ranges([start_selection..end_selection]); }); + editor.set_collapse_matches(true); }); match self.maybe_pop_operator() { diff --git a/crates/workspace/src/searchable.rs b/crates/workspace/src/searchable.rs index 9907df3be3eb8594f6cc8f63f05e2e93befd416c..0becddc1641e8abb388837187f47f0a80327a6b5 100644 --- a/crates/workspace/src/searchable.rs +++ b/crates/workspace/src/searchable.rs @@ -104,7 +104,6 @@ pub trait SearchableItem: Item + EventEmitter { &mut self, index: usize, matches: &[Self::Match], - collapse: bool, window: &mut Window, cx: &mut Context, ); @@ -186,7 +185,6 @@ pub trait SearchableItemHandle: ItemHandle { &self, index: usize, matches: &AnyVec, - collapse: bool, window: &mut Window, cx: &mut App, ); @@ -279,13 +277,12 @@ impl SearchableItemHandle for Entity { &self, index: usize, matches: &AnyVec, - collapse: bool, window: &mut Window, cx: &mut App, ) { let matches = matches.downcast_ref().unwrap(); self.update(cx, |this, cx| { - this.activate_match(index, matches.as_slice(), collapse, window, cx) + this.activate_match(index, matches.as_slice(), window, cx) }); } From faa1136651c10d0eeed59ec6aefb29ad5d798f6a Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Mon, 17 Nov 2025 15:05:38 -0300 Subject: [PATCH 0156/1030] agent_ui: Don't create a new terminal when hitting the new thread binding from the terminal (#42898) Closes https://github.com/zed-industries/zed/issues/32701 Release Notes: - agent: Fixed a bug where hitting the `NewThread` keybinding when focused inside a terminal within the agent panel would create a new terminal tab instead of a new thread. --- crates/agent_ui/src/acp/thread_view.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index f4c76b10573dd2e36e797c20230739d6d6a77e46..82237d86ba9f66b5d68321e03092660dea29d65d 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -51,7 +51,7 @@ use ui::{ PopoverMenuHandle, SpinnerLabel, TintColor, Tooltip, WithScrollbar, prelude::*, }; use util::{ResultExt, size::format_file_size, time::duration_alt_display}; -use workspace::{CollaboratorId, Workspace}; +use workspace::{CollaboratorId, NewTerminal, Workspace}; use zed_actions::agent::{Chat, ToggleModelSelector}; use zed_actions::assistant::OpenRulesLibrary; @@ -69,8 +69,8 @@ use crate::ui::{ }; use crate::{ AgentDiffPane, AgentPanel, AllowAlways, AllowOnce, ContinueThread, ContinueWithBurnMode, - CycleModeSelector, ExpandMessageEditor, Follow, KeepAll, OpenAgentDiff, OpenHistory, RejectAll, - RejectOnce, ToggleBurnMode, ToggleProfileSelector, + CycleModeSelector, ExpandMessageEditor, Follow, KeepAll, NewThread, OpenAgentDiff, OpenHistory, + RejectAll, RejectOnce, ToggleBurnMode, ToggleProfileSelector, }; #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -3144,7 +3144,7 @@ impl AcpThreadView { .text_ui_sm(cx) .h_full() .children(terminal_view.map(|terminal_view| { - if terminal_view + let element = if terminal_view .read(cx) .content_mode(window, cx) .is_scrollable() @@ -3152,7 +3152,15 @@ impl AcpThreadView { div().h_72().child(terminal_view).into_any_element() } else { terminal_view.into_any_element() - } + }; + + div() + .on_action(cx.listener(|_this, _: &NewTerminal, window, cx| { + window.dispatch_action(NewThread.boxed_clone(), cx); + cx.stop_propagation(); + })) + .child(element) + .into_any_element() })), ) }) From fd1494c31aad047cd86b9aa486d238861889faf5 Mon Sep 17 00:00:00 2001 From: localcc Date: Mon, 17 Nov 2025 19:07:49 +0100 Subject: [PATCH 0157/1030] Fix remote server completions not being queried from all LSP servers (#42723) Closes #41294 Release Notes: - Fixed remote LSPs not being queried --- crates/collab/src/tests/editor_tests.rs | 108 +++++++++++- crates/project/src/lsp_command.rs | 5 + crates/project/src/lsp_store.rs | 163 +++++++++++++++--- crates/proto/proto/lsp.proto | 1 + .../remote_server/src/remote_editing_tests.rs | 57 +++++- 5 files changed, 303 insertions(+), 31 deletions(-) diff --git a/crates/collab/src/tests/editor_tests.rs b/crates/collab/src/tests/editor_tests.rs index 5880d998925743d4cdd822574b647b53194e2116..e015550df9482c5850396b8bcf10e9cee24d5b76 100644 --- a/crates/collab/src/tests/editor_tests.rs +++ b/crates/collab/src/tests/editor_tests.rs @@ -288,7 +288,7 @@ async fn test_newline_above_or_below_does_not_move_guest_cursor( "}); } -#[gpui::test(iterations = 10)] +#[gpui::test] async fn test_collaborating_with_completion(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { let mut server = TestServer::start(cx_a.executor()).await; let client_a = server.create_client(cx_a, "user_a").await; @@ -307,17 +307,35 @@ async fn test_collaborating_with_completion(cx_a: &mut TestAppContext, cx_b: &mu ..lsp::ServerCapabilities::default() }; client_a.language_registry().add(rust_lang()); - let mut fake_language_servers = client_a.language_registry().register_fake_lsp( + let mut fake_language_servers = [ + client_a.language_registry().register_fake_lsp( + "Rust", + FakeLspAdapter { + capabilities: capabilities.clone(), + ..FakeLspAdapter::default() + }, + ), + client_a.language_registry().register_fake_lsp( + "Rust", + FakeLspAdapter { + name: "fake-analyzer", + capabilities: capabilities.clone(), + ..FakeLspAdapter::default() + }, + ), + ]; + client_b.language_registry().add(rust_lang()); + client_b.language_registry().register_fake_lsp_adapter( "Rust", FakeLspAdapter { capabilities: capabilities.clone(), ..FakeLspAdapter::default() }, ); - client_b.language_registry().add(rust_lang()); client_b.language_registry().register_fake_lsp_adapter( "Rust", FakeLspAdapter { + name: "fake-analyzer", capabilities, ..FakeLspAdapter::default() }, @@ -352,7 +370,8 @@ async fn test_collaborating_with_completion(cx_a: &mut TestAppContext, cx_b: &mu Editor::for_buffer(buffer_b.clone(), Some(project_b.clone()), window, cx) }); - let fake_language_server = fake_language_servers.next().await.unwrap(); + let fake_language_server = fake_language_servers[0].next().await.unwrap(); + let second_fake_language_server = fake_language_servers[1].next().await.unwrap(); cx_a.background_executor.run_until_parked(); buffer_b.read_with(cx_b, |buffer, _| { @@ -414,6 +433,11 @@ async fn test_collaborating_with_completion(cx_a: &mut TestAppContext, cx_b: &mu .next() .await .unwrap(); + second_fake_language_server + .set_request_handler::(|_, _| async move { Ok(None) }) + .next() + .await + .unwrap(); cx_a.executor().finish_waiting(); // Open the buffer on the host. @@ -522,6 +546,10 @@ async fn test_collaborating_with_completion(cx_a: &mut TestAppContext, cx_b: &mu ]))) }); + // Second language server also needs to handle the request (returns None) + let mut second_completion_response = second_fake_language_server + .set_request_handler::(|_, _| async move { Ok(None) }); + // The completion now gets a new `text_edit.new_text` when resolving the completion item let mut resolve_completion_response = fake_language_server .set_request_handler::(|params, _| async move { @@ -545,6 +573,7 @@ async fn test_collaborating_with_completion(cx_a: &mut TestAppContext, cx_b: &mu cx_b.executor().run_until_parked(); completion_response.next().await.unwrap(); + second_completion_response.next().await.unwrap(); editor_b.update_in(cx_b, |editor, window, cx| { assert!(editor.context_menu_visible()); @@ -563,6 +592,77 @@ async fn test_collaborating_with_completion(cx_a: &mut TestAppContext, cx_b: &mu "use d::SomeTrait;\nfn main() { a.first_method(); a.third_method(, , ) }" ); }); + + // Ensure buffer is synced before proceeding with the next test + cx_a.executor().run_until_parked(); + cx_b.executor().run_until_parked(); + + // Test completions from the second fake language server + // Add another completion trigger to test the second language server + editor_b.update_in(cx_b, |editor, window, cx| { + editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { + s.select_ranges([68..68]) + }); + editor.handle_input("; b", window, cx); + editor.handle_input(".", window, cx); + }); + + buffer_b.read_with(cx_b, |buffer, _| { + assert_eq!( + buffer.text(), + "use d::SomeTrait;\nfn main() { a.first_method(); a.third_method(, , ); b. }" + ); + }); + + // Set up completion handlers for both language servers + let mut first_lsp_completion = fake_language_server + .set_request_handler::(|_, _| async move { Ok(None) }); + + let mut second_lsp_completion = second_fake_language_server + .set_request_handler::(|params, _| async move { + assert_eq!( + params.text_document_position.text_document.uri, + lsp::Uri::from_file_path(path!("/a/main.rs")).unwrap(), + ); + assert_eq!( + params.text_document_position.position, + lsp::Position::new(1, 54), + ); + + Ok(Some(lsp::CompletionResponse::Array(vec![ + lsp::CompletionItem { + label: "analyzer_method(…)".into(), + detail: Some("fn(&self) -> Result".into()), + text_edit: Some(lsp::CompletionTextEdit::Edit(lsp::TextEdit { + new_text: "analyzer_method()".to_string(), + range: lsp::Range::new( + lsp::Position::new(1, 54), + lsp::Position::new(1, 54), + ), + })), + insert_text_format: Some(lsp::InsertTextFormat::SNIPPET), + ..Default::default() + }, + ]))) + }); + + cx_b.executor().run_until_parked(); + + // Await both language server responses + first_lsp_completion.next().await.unwrap(); + second_lsp_completion.next().await.unwrap(); + + cx_b.executor().run_until_parked(); + + // Confirm the completion from the second language server works + editor_b.update_in(cx_b, |editor, window, cx| { + assert!(editor.context_menu_visible()); + editor.confirm_completion(&ConfirmCompletion { item_ix: Some(0) }, window, cx); + assert_eq!( + editor.text(cx), + "use d::SomeTrait;\nfn main() { a.first_method(); a.third_method(, , ); b.analyzer_method() }" + ); + }); } #[gpui::test(iterations = 10)] diff --git a/crates/project/src/lsp_command.rs b/crates/project/src/lsp_command.rs index 89b3315272b137e507a65df19f98ac28aa194d6a..adea507f00eda72e715fe535da7016af44a4f723 100644 --- a/crates/project/src/lsp_command.rs +++ b/crates/project/src/lsp_command.rs @@ -218,6 +218,7 @@ pub(crate) struct GetHover { pub(crate) struct GetCompletions { pub position: PointUtf16, pub context: CompletionContext, + pub server_id: Option, } #[derive(Clone, Debug)] @@ -2395,6 +2396,7 @@ impl LspCommand for GetCompletions { buffer_id: buffer.remote_id().into(), position: Some(language::proto::serialize_anchor(&anchor)), version: serialize_version(&buffer.version()), + server_id: self.server_id.map(|id| id.to_proto()), } } @@ -2423,6 +2425,9 @@ impl LspCommand for GetCompletions { trigger_kind: CompletionTriggerKind::INVOKED, trigger_character: None, }, + server_id: message + .server_id + .map(|id| lsp::LanguageServerId::from_proto(id)), }) } diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index 069c12c75c44e790028f27abdc12ffd6d2b613ab..53c1db7ad4aa2a0a98f5aba740133fbde823cf17 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -3754,7 +3754,7 @@ impl LspStore { client.add_entity_request_handler(Self::handle_register_buffer_with_language_servers); client.add_entity_request_handler(Self::handle_rename_project_entry); client.add_entity_request_handler(Self::handle_pull_workspace_diagnostics); - client.add_entity_request_handler(Self::handle_lsp_command::); + client.add_entity_request_handler(Self::handle_lsp_get_completions); client.add_entity_request_handler(Self::handle_lsp_command::); client.add_entity_request_handler(Self::handle_lsp_command::); client.add_entity_request_handler(Self::handle_lsp_command::); @@ -4463,6 +4463,41 @@ impl LspStore { .any(check) } + fn all_capable_for_proto_request( + &self, + buffer: &Entity, + mut check: F, + cx: &App, + ) -> Vec + where + F: FnMut(&lsp::LanguageServerName, &lsp::ServerCapabilities) -> bool, + { + let Some(language) = buffer.read(cx).language().cloned() else { + return Vec::default(); + }; + let relevant_language_servers = self + .languages + .lsp_adapters(&language.name()) + .into_iter() + .map(|lsp_adapter| lsp_adapter.name()) + .collect::>(); + self.language_server_statuses + .iter() + .filter_map(|(server_id, server_status)| { + relevant_language_servers + .contains(&server_status.name) + .then_some((server_id, &server_status.name)) + }) + .filter_map(|(server_id, server_name)| { + self.lsp_server_capabilities + .get(server_id) + .map(|c| (server_id, server_name, c)) + }) + .filter(|(_, server_name, capabilities)| check(server_name, capabilities)) + .map(|(server_id, _, _)| *server_id) + .collect() + } + pub fn request_lsp( &mut self, buffer: Entity, @@ -5902,17 +5937,24 @@ impl LspStore { let language_registry = self.languages.clone(); if let Some((upstream_client, project_id)) = self.upstream_client() { - let request = GetCompletions { position, context }; - if !self.is_capable_for_proto_request(buffer, &request, cx) { - return Task::ready(Ok(Vec::new())); - } - let task = self.send_lsp_proto_request( - buffer.clone(), - upstream_client, - project_id, - request, + let snapshot = buffer.read(cx).snapshot(); + let offset = position.to_offset(&snapshot); + let scope = snapshot.language_scope_at(offset); + let capable_lsps = self.all_capable_for_proto_request( + buffer, + |server_name, capabilities| { + capabilities.completion_provider.is_some() + && scope + .as_ref() + .map(|scope| scope.language_allowed(server_name)) + .unwrap_or(true) + }, cx, ); + if capable_lsps.is_empty() { + return Task::ready(Ok(Vec::new())); + } + let language = buffer.read(cx).language().cloned(); // In the future, we should provide project guests with the names of LSP adapters, @@ -5925,19 +5967,53 @@ impl LspStore { .cloned() }); - cx.foreground_executor().spawn(async move { - let completion_response = task.await?; - let completions = populate_labels_for_completions( - completion_response.completions, - language, - lsp_adapter, - ) - .await; - Ok(vec![CompletionResponse { - completions, - display_options: CompletionDisplayOptions::default(), - is_incomplete: completion_response.is_incomplete, - }]) + let buffer = buffer.clone(); + + cx.spawn(async move |this, cx| { + let requests = join_all( + capable_lsps + .into_iter() + .map(|id| { + let request = GetCompletions { + position, + context: context.clone(), + server_id: Some(id), + }; + let buffer = buffer.clone(); + let language = language.clone(); + let lsp_adapter = lsp_adapter.clone(); + let upstream_client = upstream_client.clone(); + let response = this + .update(cx, |this, cx| { + this.send_lsp_proto_request( + buffer, + upstream_client, + project_id, + request, + cx, + ) + }) + .log_err(); + async move { + let response = response?.await.log_err()?; + + let completions = populate_labels_for_completions( + response.completions, + language, + lsp_adapter, + ) + .await; + + Some(CompletionResponse { + completions, + display_options: CompletionDisplayOptions::default(), + is_incomplete: response.is_incomplete, + }) + } + }) + .collect::>(), + ); + Ok(requests.await.into_iter().flatten().collect::>()) }) } else if let Some(local) = self.as_local() { let snapshot = buffer.read(cx).snapshot(); @@ -5998,6 +6074,7 @@ impl LspStore { GetCompletions { position, context: context.clone(), + server_id: Some(server_id), }, cx, ).fuse(); @@ -8461,6 +8538,46 @@ impl LspStore { }) } + async fn handle_lsp_get_completions( + this: Entity, + envelope: TypedEnvelope, + mut cx: AsyncApp, + ) -> Result { + let sender_id = envelope.original_sender_id().unwrap_or_default(); + + let buffer_id = GetCompletions::buffer_id_from_proto(&envelope.payload)?; + let buffer_handle = this.update(&mut cx, |this, cx| { + this.buffer_store.read(cx).get_existing(buffer_id) + })??; + let request = GetCompletions::from_proto( + envelope.payload, + this.clone(), + buffer_handle.clone(), + cx.clone(), + ) + .await?; + + let server_to_query = match request.server_id { + Some(server_id) => LanguageServerToQuery::Other(server_id), + None => LanguageServerToQuery::FirstCapable, + }; + + let response = this + .update(&mut cx, |this, cx| { + this.request_lsp(buffer_handle.clone(), server_to_query, request, cx) + })? + .await?; + this.update(&mut cx, |this, cx| { + Ok(GetCompletions::response_to_proto( + response, + this, + sender_id, + &buffer_handle.read(cx).version(), + cx, + )) + })? + } + async fn handle_lsp_command( this: Entity, envelope: TypedEnvelope, diff --git a/crates/proto/proto/lsp.proto b/crates/proto/proto/lsp.proto index 644e492ef6a5d639a99f75b18465ca93b0c0ef92..3bdd46c4572acbc570c198288ba5c79b93aa4286 100644 --- a/crates/proto/proto/lsp.proto +++ b/crates/proto/proto/lsp.proto @@ -703,6 +703,7 @@ message GetCompletions { uint64 buffer_id = 2; Anchor position = 3; repeated VectorClockEntry version = 4; + optional uint64 server_id = 5; } message CancelLanguageServerWork { diff --git a/crates/remote_server/src/remote_editing_tests.rs b/crates/remote_server/src/remote_editing_tests.rs index 4ceaf2048c5967b7fe1fceeb47c68efc6cc15678..4ff0c57d5571c5fb9e16df18078514e16ea12867 100644 --- a/crates/remote_server/src/remote_editing_tests.rs +++ b/crates/remote_server/src/remote_editing_tests.rs @@ -398,12 +398,17 @@ async fn test_remote_lsp(cx: &mut TestAppContext, server_cx: &mut TestAppContext json!({ "settings.json": r#" { - "languages": {"Rust":{"language_servers":["rust-analyzer"]}}, + "languages": {"Rust":{"language_servers":["rust-analyzer", "fake-analyzer"]}}, "lsp": { "rust-analyzer": { "binary": { "path": "~/.cargo/bin/rust-analyzer" } + }, + "fake-analyzer": { + "binary": { + "path": "~/.cargo/bin/rust-analyzer" + } } } }"# @@ -431,6 +436,18 @@ async fn test_remote_lsp(cx: &mut TestAppContext, server_cx: &mut TestAppContext }, ..FakeLspAdapter::default() }, + ); + project.languages().register_fake_lsp_adapter( + "Rust", + FakeLspAdapter { + name: "fake-analyzer", + capabilities: lsp::ServerCapabilities { + completion_provider: Some(lsp::CompletionOptions::default()), + rename_provider: Some(lsp::OneOf::Left(true)), + ..lsp::ServerCapabilities::default() + }, + ..FakeLspAdapter::default() + }, ) }); @@ -446,6 +463,30 @@ async fn test_remote_lsp(cx: &mut TestAppContext, server_cx: &mut TestAppContext ) }); + let mut fake_second_lsp = server_cx.update(|cx| { + headless.read(cx).languages.register_fake_lsp_adapter( + "Rust", + FakeLspAdapter { + name: "fake-analyzer", + capabilities: lsp::ServerCapabilities { + completion_provider: Some(lsp::CompletionOptions::default()), + rename_provider: Some(lsp::OneOf::Left(true)), + ..lsp::ServerCapabilities::default() + }, + ..FakeLspAdapter::default() + }, + ); + headless.read(cx).languages.register_fake_language_server( + LanguageServerName("fake-analyzer".into()), + lsp::ServerCapabilities { + completion_provider: Some(lsp::CompletionOptions::default()), + rename_provider: Some(lsp::OneOf::Left(true)), + ..lsp::ServerCapabilities::default() + }, + None, + ) + }); + cx.run_until_parked(); let worktree_id = project @@ -469,12 +510,13 @@ async fn test_remote_lsp(cx: &mut TestAppContext, server_cx: &mut TestAppContext cx.run_until_parked(); let fake_lsp = fake_lsp.next().await.unwrap(); + let fake_second_lsp = fake_second_lsp.next().await.unwrap(); cx.read(|cx| { let file = buffer.read(cx).file(); assert_eq!( language_settings(Some("Rust".into()), file, cx).language_servers, - ["rust-analyzer".to_string()] + ["rust-analyzer".to_string(), "fake-analyzer".to_string()] ) }); @@ -497,7 +539,7 @@ async fn test_remote_lsp(cx: &mut TestAppContext, server_cx: &mut TestAppContext server_cx.read(|cx| { let lsp_store = headless.read(cx).lsp_store.read(cx); - assert_eq!(lsp_store.as_local().unwrap().language_servers.len(), 1); + assert_eq!(lsp_store.as_local().unwrap().language_servers.len(), 2); }); fake_lsp.set_request_handler::(|_, _| async move { @@ -507,6 +549,13 @@ async fn test_remote_lsp(cx: &mut TestAppContext, server_cx: &mut TestAppContext }]))) }); + fake_second_lsp.set_request_handler::(|_, _| async move { + Ok(Some(CompletionResponse::Array(vec![lsp::CompletionItem { + label: "beep".to_string(), + ..Default::default() + }]))) + }); + let result = project .update(cx, |project, cx| { project.completions( @@ -528,7 +577,7 @@ async fn test_remote_lsp(cx: &mut TestAppContext, server_cx: &mut TestAppContext .flat_map(|response| response.completions) .map(|c| c.label.text) .collect::>(), - vec!["boop".to_string()] + vec!["boop".to_string(), "beep".to_string()] ); fake_lsp.set_request_handler::(|_, _| async move { From b2f561165f4731f90a77df2fb43601ef3f4eb671 Mon Sep 17 00:00:00 2001 From: Oleksiy Syvokon Date: Mon, 17 Nov 2025 20:36:05 +0200 Subject: [PATCH 0158/1030] zeta2: Support qwen3-minimal prompt format (#42902) This prompt is for a fine-tuned model. It has the following changes, compared to `minimal`: - No instructions at all, except for one sentence at the beginning of the prompt. - Output is a simplified unified diff -- hunk headers have no line counts (e.g., `@@ -20 +20 @@`) - Qwen's FIM tokens are used where possible (`<|file_sep|>`, `<|fim_prefix|>`, `<|fim_suffix|>`, etc.) To evaluate this model: ``` ZED_ZETA2_MODEL=zeta2-exp [usual zeta-cli eval params ...] --prompt-format minimal-qwen ``` This will point to the most recent Baseten deployment of zeta2-exp (which may change in the future, so the prompt-format may get out of sync). Release Notes: - N/A --- .../cloud_llm_client/src/predict_edits_v3.rs | 3 + .../src/cloud_zeta2_prompt.rs | 89 ++++++++++++++++++- crates/zeta2/src/udiff.rs | 10 ++- crates/zeta2/src/zeta2.rs | 2 +- crates/zeta_cli/src/main.rs | 2 + 5 files changed, 98 insertions(+), 8 deletions(-) diff --git a/crates/cloud_llm_client/src/predict_edits_v3.rs b/crates/cloud_llm_client/src/predict_edits_v3.rs index e17a92387e68b5cf6e0993ec91f382f6c14cc765..2d7a1aec52ae9cb007238dbd61e58597a9e81666 100644 --- a/crates/cloud_llm_client/src/predict_edits_v3.rs +++ b/crates/cloud_llm_client/src/predict_edits_v3.rs @@ -78,6 +78,8 @@ pub enum PromptFormat { OnlySnippets, /// One-sentence instructions used in fine-tuned models Minimal, + /// One-sentence instructions + FIM-like template + MinimalQwen, } impl PromptFormat { @@ -105,6 +107,7 @@ impl std::fmt::Display for PromptFormat { PromptFormat::NumLinesUniDiff => write!(f, "Numbered Lines / Unified Diff"), PromptFormat::OldTextNewText => write!(f, "Old Text / New Text"), PromptFormat::Minimal => write!(f, "Minimal"), + PromptFormat::MinimalQwen => write!(f, "Minimal + Qwen FIM"), } } } diff --git a/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs b/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs index c84ba24ae3485f837278f61e1eeb8b40eb276840..48ab2097d4ca960c28f7edb498e57ded95e208f7 100644 --- a/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs +++ b/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs @@ -3,7 +3,8 @@ pub mod retrieval_prompt; use anyhow::{Context as _, Result, anyhow}; use cloud_llm_client::predict_edits_v3::{ - self, DiffPathFmt, Excerpt, Line, Point, PromptFormat, ReferencedDeclaration, + self, DiffPathFmt, Event, Excerpt, IncludedFile, Line, Point, PromptFormat, + ReferencedDeclaration, }; use indoc::indoc; use ordered_float::OrderedFloat; @@ -166,6 +167,21 @@ const OLD_TEXT_NEW_TEXT_REMINDER: &str = indoc! {r#" pub fn build_prompt( request: &predict_edits_v3::PredictEditsRequest, ) -> Result<(String, SectionLabels)> { + let mut section_labels = Default::default(); + + match request.prompt_format { + PromptFormat::MinimalQwen => { + let prompt = MinimalQwenPrompt { + events: request.events.clone(), + cursor_point: request.cursor_point, + cursor_path: request.excerpt_path.clone(), + included_files: request.included_files.clone(), + }; + return Ok((prompt.render(), section_labels)); + } + _ => (), + }; + let mut insertions = match request.prompt_format { PromptFormat::MarkedExcerpt => vec![ ( @@ -191,6 +207,7 @@ pub fn build_prompt( vec![(request.cursor_point, CURSOR_MARKER)] } PromptFormat::OnlySnippets => vec![], + PromptFormat::MinimalQwen => unreachable!(), }; let mut prompt = match request.prompt_format { @@ -200,6 +217,7 @@ pub fn build_prompt( PromptFormat::OldTextNewText => XML_TAGS_INSTRUCTIONS.to_string(), PromptFormat::OnlySnippets => String::new(), PromptFormat::Minimal => STUDENT_MODEL_INSTRUCTIONS.to_string(), + PromptFormat::MinimalQwen => unreachable!(), }; if request.events.is_empty() { @@ -251,8 +269,6 @@ pub fn build_prompt( prompt.push_str(excerpts_preamble); prompt.push('\n'); - let mut section_labels = Default::default(); - if !request.referenced_declarations.is_empty() || !request.signatures.is_empty() { let syntax_based_prompt = SyntaxBasedPrompt::populate(request)?; section_labels = syntax_based_prompt.write(&mut insertions, &mut prompt)?; @@ -769,6 +785,7 @@ impl<'a> SyntaxBasedPrompt<'a> { writeln!(output, "<|section_{}|>", section_index).ok(); } } + PromptFormat::MinimalQwen => unreachable!(), } let push_full_snippet = |output: &mut String| { @@ -878,3 +895,69 @@ fn declaration_size(declaration: &ReferencedDeclaration, style: DeclarationStyle DeclarationStyle::Declaration => declaration.text.len(), } } + +struct MinimalQwenPrompt { + events: Vec, + cursor_point: Point, + cursor_path: Arc, // TODO: make a common struct with cursor_point + included_files: Vec, +} + +impl MinimalQwenPrompt { + const INSTRUCTIONS: &str = "You are a code completion assistant that analyzes edit history to identify and systematically complete incomplete refactorings or patterns across the entire codebase.\n"; + + fn render(&self) -> String { + let edit_history = self.fmt_edit_history(); + let context = self.fmt_context(); + + format!( + "{instructions}\n\n{edit_history}\n\n{context}", + instructions = MinimalQwenPrompt::INSTRUCTIONS, + edit_history = edit_history, + context = context + ) + } + + fn fmt_edit_history(&self) -> String { + if self.events.is_empty() { + "(No edit history)\n\n".to_string() + } else { + let mut events_str = String::new(); + push_events(&mut events_str, &self.events); + format!( + "The following are the latest edits made by the user, from earlier to later.\n\n{}", + events_str + ) + } + } + + fn fmt_context(&self) -> String { + let mut context = String::new(); + let include_line_numbers = true; + + for related_file in &self.included_files { + writeln!(context, "<|file_sep|>{}", DiffPathFmt(&related_file.path)).unwrap(); + + if related_file.path == self.cursor_path { + write!(context, "<|fim_prefix|>").unwrap(); + write_excerpts( + &related_file.excerpts, + &[(self.cursor_point, "<|fim_suffix|>")], + related_file.max_row, + include_line_numbers, + &mut context, + ); + writeln!(context, "<|fim_middle|>").unwrap(); + } else { + write_excerpts( + &related_file.excerpts, + &[], + related_file.max_row, + include_line_numbers, + &mut context, + ); + } + } + context + } +} diff --git a/crates/zeta2/src/udiff.rs b/crates/zeta2/src/udiff.rs index d565fab1b0c2bbf1e27fe183df1c95e27cac871d..5ae029c6c16c2c6b6d0c2451cc961e8399a64a8f 100644 --- a/crates/zeta2/src/udiff.rs +++ b/crates/zeta2/src/udiff.rs @@ -391,10 +391,12 @@ impl<'a> DiffLine<'a> { return Some(Self::HunkHeader(None)); } - let (start_line_old, header) = header.strip_prefix('-')?.split_once(',')?; - let mut parts = header.split_ascii_whitespace(); - let count_old = parts.next()?; - let (start_line_new, count_new) = parts.next()?.strip_prefix('+')?.split_once(',')?; + let mut tokens = header.split_whitespace(); + let old_range = tokens.next()?.strip_prefix('-')?; + let new_range = tokens.next()?.strip_prefix('+')?; + + let (start_line_old, count_old) = old_range.split_once(',').unwrap_or((old_range, "1")); + let (start_line_new, count_new) = new_range.split_once(',').unwrap_or((new_range, "1")); Some(Self::HunkHeader(Some(HunkLocation { start_line_old: start_line_old.parse::().ok()?.saturating_sub(1), diff --git a/crates/zeta2/src/zeta2.rs b/crates/zeta2/src/zeta2.rs index 1521fbd9291c7a69cc56152d193734f41cf0451e..881a7254f876e1b2df636513480115bf36489a24 100644 --- a/crates/zeta2/src/zeta2.rs +++ b/crates/zeta2/src/zeta2.rs @@ -1015,7 +1015,7 @@ impl Zeta { // TODO: Implement parsing of multi-file diffs crate::udiff::parse_diff(&output_text, get_buffer_from_context).await? } - PromptFormat::Minimal => { + PromptFormat::Minimal | PromptFormat::MinimalQwen => { if output_text.contains("--- a/\n+++ b/\nNo edits") { let edits = vec![]; (&active_snapshot, edits) diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index 7305d3bb2479452e0b8a54392a0a84cbea1be426..517deb6ec7482ca2712a347531b24eca5ed16796 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -176,6 +176,7 @@ enum PromptFormat { NumberedLines, OldTextNewText, Minimal, + MinimalQwen, } impl Into for PromptFormat { @@ -187,6 +188,7 @@ impl Into for PromptFormat { Self::NumberedLines => predict_edits_v3::PromptFormat::NumLinesUniDiff, Self::OldTextNewText => predict_edits_v3::PromptFormat::OldTextNewText, Self::Minimal => predict_edits_v3::PromptFormat::Minimal, + Self::MinimalQwen => predict_edits_v3::PromptFormat::MinimalQwen, } } } From 57e3bcfcf8a164bb6a1ee4b2c19b1d6cc1671537 Mon Sep 17 00:00:00 2001 From: Davis Vaughan Date: Mon, 17 Nov 2025 13:53:42 -0500 Subject: [PATCH 0159/1030] Revise R documentation - about Air in particular (#42755) Returning the favor from @rgbkrk in https://github.com/posit-dev/air/pull/445 I noticed the R docs around Air are a bit incorrect / out of date. I'll make a few more comments inline. Feel free to take over for any other edits. Release Notes: - Improved R language support documentation --- docs/src/languages/r.md | 70 +++++++++++++++++++++++++++++++---------- 1 file changed, 54 insertions(+), 16 deletions(-) diff --git a/docs/src/languages/r.md b/docs/src/languages/r.md index 4907d09c5e5daaa32d081ff0da618f5b26cd577b..a21afb997639ffcc418e8638c5451394c5f5e4a1 100644 --- a/docs/src/languages/r.md +++ b/docs/src/languages/r.md @@ -8,7 +8,7 @@ R support is available via multiple R Zed extensions: - Language-Server: [REditorSupport/languageserver](https://github.com/REditorSupport/languageserver) - [posit-dev/air](https://github.com/posit-dev/air/tree/main/editors/zed) - - Language-Server: [posit-dev/air](https://github.com/posit-dev/air) + - Formatter: [posit-dev/air](https://posit-dev.github.io/air/) ## Installation @@ -20,22 +20,11 @@ install.packages("languageserver") install.packages("lintr") ``` -3. Install the [ocsmit/zed-r](https://github.com/ocsmit/zed-r) through Zed's extensions manager. +3. Install the [R](https://github.com/ocsmit/zed-r) extension through Zed's extensions manager for basic R language support (syntax highlighting, tree-sitter support) and for [REditorSupport/languageserver](https://github.com/REditorSupport/languageserver) support. -For example on macOS: +4. Install the [Air](https://posit-dev.github.io/air/) extension through Zed's extensions manager for R code formatting via Air. -```sh -brew install --cask r -Rscript --version -Rscript -e 'options(repos = "https://cran.rstudio.com/"); install.packages("languageserver")' -Rscript -e 'options(repos = "https://cran.rstudio.com/"); install.packages("lintr")' -Rscript -e 'packageVersion("languageserver")' -Rscript -e 'packageVersion("lintr")' -``` - -## Configuration - -### Linting +## Linting `REditorSupport/languageserver` bundles support for [r-lib/lintr](https://github.com/r-lib/lintr) as a linter. This can be configured via the use of a `.lintr` inside your project (or in your home directory for global defaults). @@ -59,7 +48,56 @@ exclusions: list(".") See [Using lintr](https://lintr.r-lib.org/articles/lintr.html) for a complete list of options, -### Formatting +## Formatting + +### Air + +[Air](https://posit-dev.github.io/air/) provides code formatting for R, including support for format-on-save. The [Air documentation for Zed](https://posit-dev.github.io/air/editor-zed.html) contains the most up-to-date advice for running Air in Zed. + +Ensure that you have installed both the [ocsmit/zed-r](https://github.com/ocsmit/zed-r) extension (for general R language awareness in Zed) and the [Air](https://posit-dev.github.io/air/) extension. + +Enable Air in your `settings.json`: + +```json [settings] +{ + "languages": { + "R": { + "language_servers": ["air"] + } + } +} +``` + +If you use the `"r_language_server"` from `REditorSupport/languageserver`, but would still like to use Air for formatting, use the following configuration: + +```json [settings] +{ + "languages": { + "R": { + "language_servers": ["air", "r_language_server"], + "use_on_type_format": false + } + } +} +``` + +Note that `"air"` must come first in this list, otherwise [r-lib/styler](https://github.com/r-lib/styler) will be invoked via `"r_language_server"`. + +`"r_language_server"` provides on-type-formatting that differs from Air's formatting rules. To avoid this entirely and let Air be fully in charge of formatting your R files, also set `"use_on_type_format": false` as shown above. + +#### Configuring Air + +Air is minimally configurable via an `air.toml` file placed in the root directory of your project: + +```toml +[format] +line-width = 80 +indent-width = 2 +``` + +For more details, refer to the Air documentation about [configuration](https://posit-dev.github.io/air/configuration.html). + +### Styler `REditorSupport/languageserver` bundles support for [r-lib/styler](https://github.com/r-lib/styler) as a formatter. See [Customizing Styler](https://cran.r-project.org/web/packages/styler/vignettes/customizing_styler.html) for more information on how to customize its behavior. From b0a7defd0990c315c27f51f82dbf13a736279eba Mon Sep 17 00:00:00 2001 From: ozzy <109994179+ddoemonn@users.noreply.github.com> Date: Mon, 17 Nov 2025 22:25:51 +0300 Subject: [PATCH 0160/1030] Fix track file renames in git panel (#42352) Closes #30549 Release Notes: - Fixed: Git renames now properly show as renamed files in the git panel instead of appearing as deleted + untracked files Screenshot 2025-11-10 at 17 39 44 Screenshot 2025-11-10 at 17 39 55 --- crates/collab/src/db/queries/projects.rs | 1 + crates/collab/src/db/queries/rooms.rs | 1 + crates/fs/src/fake_git_repo.rs | 1 + crates/git/src/repository.rs | 2 +- crates/git/src/status.rs | 95 +++++++++++++++++++----- crates/git_ui/src/git_panel.rs | 64 ++++++++++------ crates/git_ui/src/git_ui.rs | 5 ++ crates/project/src/git_store.rs | 24 ++++++ crates/proto/proto/git.proto | 1 + 9 files changed, 150 insertions(+), 44 deletions(-) diff --git a/crates/collab/src/db/queries/projects.rs b/crates/collab/src/db/queries/projects.rs index 51a0ef83323ec70675283d2fdec7ca1ad791b12d..c8651216434d404f7ab4a88fbb5fbb5f7d0aa3ee 100644 --- a/crates/collab/src/db/queries/projects.rs +++ b/crates/collab/src/db/queries/projects.rs @@ -1005,6 +1005,7 @@ impl Database { is_last_update: true, merge_message: db_repository_entry.merge_message, stash_entries: Vec::new(), + renamed_paths: Default::default(), }); } } diff --git a/crates/collab/src/db/queries/rooms.rs b/crates/collab/src/db/queries/rooms.rs index f020b99b5f1030cfe9391498512258e6db249bac..151e4c442bd7d0a25053e35b94d9e2ad9817a6a3 100644 --- a/crates/collab/src/db/queries/rooms.rs +++ b/crates/collab/src/db/queries/rooms.rs @@ -796,6 +796,7 @@ impl Database { is_last_update: true, merge_message: db_repository.merge_message, stash_entries: Vec::new(), + renamed_paths: Default::default(), }); } } diff --git a/crates/fs/src/fake_git_repo.rs b/crates/fs/src/fake_git_repo.rs index 97cd13d185817453c369356bdc60cbc1517bf1e1..de7c0561ebc9918a2686402fb9b62608566c7d9c 100644 --- a/crates/fs/src/fake_git_repo.rs +++ b/crates/fs/src/fake_git_repo.rs @@ -359,6 +359,7 @@ impl GitRepository for FakeGitRepository { entries.sort_by(|a, b| a.0.cmp(&b.0)); anyhow::Ok(GitStatus { entries: entries.into(), + renamed_paths: HashMap::default(), }) }); Task::ready(match result { diff --git a/crates/git/src/repository.rs b/crates/git/src/repository.rs index 2c9189962492daa75dba86e9e2ebd247ad85254e..2eb37038cde2f4d0c4dc4903fdc06f86ab543827 100644 --- a/crates/git/src/repository.rs +++ b/crates/git/src/repository.rs @@ -2045,7 +2045,7 @@ fn git_status_args(path_prefixes: &[RepoPath]) -> Vec { OsString::from("status"), OsString::from("--porcelain=v1"), OsString::from("--untracked-files=all"), - OsString::from("--no-renames"), + OsString::from("--find-renames"), OsString::from("-z"), ]; args.extend( diff --git a/crates/git/src/status.rs b/crates/git/src/status.rs index 2cf7cc7c1810620f1cf1aaea831fb337810c83d8..9b76fe75dd284c08c0f2e9b20116bc51dc4bc56c 100644 --- a/crates/git/src/status.rs +++ b/crates/git/src/status.rs @@ -203,6 +203,14 @@ impl FileStatus { matches!(self, FileStatus::Untracked) } + pub fn is_renamed(self) -> bool { + let FileStatus::Tracked(tracked) = self else { + return false; + }; + tracked.index_status == StatusCode::Renamed + || tracked.worktree_status == StatusCode::Renamed + } + pub fn summary(self) -> GitSummary { match self { FileStatus::Ignored => GitSummary::UNCHANGED, @@ -430,34 +438,79 @@ impl std::ops::Sub for GitSummary { #[derive(Clone, Debug)] pub struct GitStatus { pub entries: Arc<[(RepoPath, FileStatus)]>, + pub renamed_paths: HashMap, } impl FromStr for GitStatus { type Err = anyhow::Error; fn from_str(s: &str) -> Result { - let mut entries = s - .split('\0') - .filter_map(|entry| { - let sep = entry.get(2..3)?; - if sep != " " { - return None; + let mut parts = s.split('\0').peekable(); + let mut entries = Vec::new(); + let mut renamed_paths = HashMap::default(); + + while let Some(entry) = parts.next() { + if entry.is_empty() { + continue; + } + + if !matches!(entry.get(2..3), Some(" ")) { + continue; + } + + let path_or_old_path = &entry[3..]; + + if path_or_old_path.ends_with('/') { + continue; + } + + let status = match entry.as_bytes()[0..2].try_into() { + Ok(bytes) => match FileStatus::from_bytes(bytes).log_err() { + Some(s) => s, + None => continue, + }, + Err(_) => continue, + }; + + let is_rename = matches!( + status, + FileStatus::Tracked(TrackedStatus { + index_status: StatusCode::Renamed | StatusCode::Copied, + .. + }) | FileStatus::Tracked(TrackedStatus { + worktree_status: StatusCode::Renamed | StatusCode::Copied, + .. + }) + ); + + let (old_path_str, new_path_str) = if is_rename { + let new_path = match parts.next() { + Some(new_path) if !new_path.is_empty() => new_path, + _ => continue, }; - let path = &entry[3..]; - // The git status output includes untracked directories as well as untracked files. - // We do our own processing to compute the "summary" status of each directory, - // so just skip any directories in the output, since they'll otherwise interfere - // with our handling of nested repositories. - if path.ends_with('/') { - return None; + (path_or_old_path, new_path) + } else { + (path_or_old_path, path_or_old_path) + }; + + if new_path_str.ends_with('/') { + continue; + } + + let new_path = match RelPath::unix(new_path_str).log_err() { + Some(p) => RepoPath::from_rel_path(p), + None => continue, + }; + + if is_rename { + if let Some(old_path_rel) = RelPath::unix(old_path_str).log_err() { + let old_path_repo = RepoPath::from_rel_path(old_path_rel); + renamed_paths.insert(new_path.clone(), old_path_repo); } - let status = entry.as_bytes()[0..2].try_into().unwrap(); - let status = FileStatus::from_bytes(status).log_err()?; - // git-status outputs `/`-delimited repo paths, even on Windows. - let path = RepoPath::from_rel_path(RelPath::unix(path).log_err()?); - Some((path, status)) - }) - .collect::>(); + } + + entries.push((new_path, status)); + } entries.sort_unstable_by(|(a, _), (b, _)| a.cmp(b)); // When a file exists in HEAD, is deleted in the index, and exists again in the working copy, // git produces two lines for it, one reading `D ` (deleted in index, unmodified in working copy) @@ -481,6 +534,7 @@ impl FromStr for GitStatus { }); Ok(Self { entries: entries.into(), + renamed_paths, }) } } @@ -489,6 +543,7 @@ impl Default for GitStatus { fn default() -> Self { Self { entries: Arc::new([]), + renamed_paths: HashMap::default(), } } } diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index e2a4a26b320284fed727a7f7e60acf807c39abf0..0691ba78560e38f5d3a297d033bd41459dff78c4 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -3957,6 +3957,20 @@ impl GitPanel { let path_style = self.project.read(cx).path_style(cx); let display_name = entry.display_name(path_style); + let active_repo = self + .project + .read(cx) + .active_repository(cx) + .expect("active repository must be set"); + let repo = active_repo.read(cx); + let repo_snapshot = repo.snapshot(); + + let old_path = if entry.status.is_renamed() { + repo_snapshot.renamed_paths.get(&entry.repo_path) + } else { + None + }; + let selected = self.selected_entry == Some(ix); let marked = self.marked_entries.contains(&ix); let status_style = GitPanelSettings::get_global(cx).status_style; @@ -3965,15 +3979,16 @@ impl GitPanel { let has_conflict = status.is_conflicted(); let is_modified = status.is_modified(); let is_deleted = status.is_deleted(); + let is_renamed = status.is_renamed(); let label_color = if status_style == StatusStyle::LabelColor { if has_conflict { Color::VersionControlConflict - } else if is_modified { - Color::VersionControlModified } else if is_deleted { // We don't want a bunch of red labels in the list Color::Disabled + } else if is_renamed || is_modified { + Color::VersionControlModified } else { Color::VersionControlAdded } @@ -3993,12 +4008,6 @@ impl GitPanel { let checkbox_id: ElementId = ElementId::Name(format!("entry_{}_{}_checkbox", display_name, ix).into()); - let active_repo = self - .project - .read(cx) - .active_repository(cx) - .expect("active repository must be set"); - let repo = active_repo.read(cx); // Checking for current staged/unstaged file status is a chained operation: // 1. first, we check for any pending operation recorded in repository // 2. if there are no pending ops either running or finished, we then ask the repository @@ -4153,23 +4162,32 @@ impl GitPanel { .items_center() .flex_1() // .overflow_hidden() - .when_some(entry.parent_dir(path_style), |this, parent| { - if !parent.is_empty() { - this.child( - self.entry_label( - format!("{parent}{}", path_style.separator()), - path_color, + .when_some(old_path.as_ref(), |this, old_path| { + let new_display = old_path.display(path_style).to_string(); + let old_display = entry.repo_path.display(path_style).to_string(); + this.child(self.entry_label(old_display, Color::Muted).strikethrough()) + .child(self.entry_label(" → ", Color::Muted)) + .child(self.entry_label(new_display, label_color)) + }) + .when(old_path.is_none(), |this| { + this.when_some(entry.parent_dir(path_style), |this, parent| { + if !parent.is_empty() { + this.child( + self.entry_label( + format!("{parent}{}", path_style.separator()), + path_color, + ) + .when(status.is_deleted(), |this| this.strikethrough()), ) + } else { + this + } + }) + .child( + self.entry_label(display_name, label_color) .when(status.is_deleted(), |this| this.strikethrough()), - ) - } else { - this - } - }) - .child( - self.entry_label(display_name, label_color) - .when(status.is_deleted(), |this| this.strikethrough()), - ), + ) + }), ) .into_any_element() } diff --git a/crates/git_ui/src/git_ui.rs b/crates/git_ui/src/git_ui.rs index b4e833f7af72cf7843d3797b51ea349b24c7adc5..3a664b484a8ec6d31bd243917888d864280b281d 100644 --- a/crates/git_ui/src/git_ui.rs +++ b/crates/git_ui/src/git_ui.rs @@ -708,6 +708,11 @@ impl RenderOnce for GitStatusIcon { IconName::SquareMinus, cx.theme().colors().version_control_deleted, ) + } else if status.is_renamed() { + ( + IconName::ArrowRight, + cx.theme().colors().version_control_modified, + ) } else if status.is_modified() { ( IconName::SquareDot, diff --git a/crates/project/src/git_store.rs b/crates/project/src/git_store.rs index 4cac71c6ae3e2eb3f3615821443db7c82e01d810..94af9859df1156d7a10286a843a31e8351fe050c 100644 --- a/crates/project/src/git_store.rs +++ b/crates/project/src/git_store.rs @@ -256,6 +256,7 @@ pub struct RepositorySnapshot { pub id: RepositoryId, pub statuses_by_path: SumTree, pub pending_ops_by_path: SumTree, + pub renamed_paths: HashMap, pub work_directory_abs_path: Arc, pub path_style: PathStyle, pub branch: Option, @@ -3063,6 +3064,7 @@ impl RepositorySnapshot { id, statuses_by_path: Default::default(), pending_ops_by_path: Default::default(), + renamed_paths: HashMap::default(), work_directory_abs_path, branch: None, head_commit: None, @@ -3104,6 +3106,11 @@ impl RepositorySnapshot { .iter() .map(stash_to_proto) .collect(), + renamed_paths: self + .renamed_paths + .iter() + .map(|(new_path, old_path)| (new_path.to_proto(), old_path.to_proto())) + .collect(), } } @@ -3173,6 +3180,11 @@ impl RepositorySnapshot { .iter() .map(stash_to_proto) .collect(), + renamed_paths: self + .renamed_paths + .iter() + .map(|(new_path, old_path)| (new_path.to_proto(), old_path.to_proto())) + .collect(), } } @@ -4968,6 +4980,17 @@ impl Repository { } self.snapshot.stash_entries = new_stash_entries; + self.snapshot.renamed_paths = update + .renamed_paths + .into_iter() + .filter_map(|(new_path_str, old_path_str)| { + Some(( + RepoPath::from_proto(&new_path_str).log_err()?, + RepoPath::from_proto(&old_path_str).log_err()?, + )) + }) + .collect(); + let edits = update .removed_statuses .into_iter() @@ -5743,6 +5766,7 @@ async fn compute_snapshot( id, statuses_by_path, pending_ops_by_path, + renamed_paths: statuses.renamed_paths, work_directory_abs_path, path_style: prev_snapshot.path_style, scan_id: prev_snapshot.scan_id + 1, diff --git a/crates/proto/proto/git.proto b/crates/proto/proto/git.proto index efbd7f616f9e75c4e0409f4dc73c67f9eb1836e0..8ed17864ec0c0403a4bb71f918d21b44a9b6cb13 100644 --- a/crates/proto/proto/git.proto +++ b/crates/proto/proto/git.proto @@ -124,6 +124,7 @@ message UpdateRepository { optional GitCommitDetails head_commit_details = 11; optional string merge_message = 12; repeated StashEntry stash_entries = 13; + map renamed_paths = 14; } message RemoveRepository { From 599a217ea5756ada9d1d26058f80ac04835fcd76 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Mon, 17 Nov 2025 20:39:50 +0100 Subject: [PATCH 0161/1030] workspace: Fix logging of errors in `prompt_err` (#42908) Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/workspace/src/notifications.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/workspace/src/notifications.rs b/crates/workspace/src/notifications.rs index 70be040df7c3718ba903565100b8548dcfc8b785..6c1156b83396d1266bc46bca67f10f3f57adfec4 100644 --- a/crates/workspace/src/notifications.rs +++ b/crates/workspace/src/notifications.rs @@ -1071,9 +1071,9 @@ where window.spawn(cx, async move |cx| { let result = self.await; if let Err(err) = result.as_ref() { - log::error!("{err:?}"); + log::error!("{err:#}"); if let Ok(prompt) = cx.update(|window, cx| { - let mut display = format!("{err}"); + let mut display = format!("{err:#}"); if !display.ends_with('\n') { display.push('.'); display.push(' ') From 4bf3b9d62e8e7e1988aaae0f829c11733d34ebfe Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Mon, 17 Nov 2025 12:17:39 -0800 Subject: [PATCH 0162/1030] zeta2: Output `bucketed_analysis.md` (#42890) Closes #ISSUE Makes it so that a file named `bucketed_analysis.md` is written to the runs directory after an eval is ran with > 1 repetitions. This file buckets the predictions made by the model by comparing the edits made so that seeing how many times different failure modes were encountered becomes much easier. Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/zeta_cli/src/evaluate.rs | 283 ++++++++++++++++++++++++++++++-- 1 file changed, 268 insertions(+), 15 deletions(-) diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index 8904a7fc5707a59ef5ce3ce7b3d246adf3a8e16b..9f087188b7f7a615398eaab19ae934cdcd5c64ff 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -1,4 +1,5 @@ use std::{ + collections::HashMap, io::{IsTerminal, Write}, path::PathBuf, sync::Arc, @@ -35,6 +36,13 @@ pub struct EvaluateArguments { skip_prediction: bool, } +#[derive(Debug)] +pub(crate) struct ExecutionData { + execution_id: String, + diff: String, + reasoning: String, +} + pub async fn run_evaluate( args: EvaluateArguments, app_state: &Arc, @@ -87,35 +95,35 @@ pub async fn run_evaluate( { write_aggregated_scores(&mut output_file, &all_results).log_err(); }; + + if args.repetitions > 1 { + if let Err(e) = write_bucketed_analysis(&all_results) { + eprintln!("Failed to write bucketed analysis: {:?}", e); + } + } + print_run_data_dir(args.repetitions == 1, std::io::stdout().is_terminal()); } fn write_aggregated_scores( w: &mut impl std::io::Write, - all_results: &Vec)>>>, + all_results: &Vec< + Vec)>>, + >, ) -> Result<()> { let mut successful = Vec::new(); let mut failed_count = 0; for result in all_results.iter().flatten() { match result { - Ok(eval_result) => successful.push(eval_result), + Ok((eval_result, _execution_data)) => successful.push(eval_result), Err((err, name, repetition_ix)) => { if failed_count == 0 { writeln!(w, "## Errors\n")?; } failed_count += 1; - let err = format!("{err:?}") - .replace("", "\n```"); - writeln!( - w, - "### ERROR {name}{}\n\n{err}\n", - repetition_ix - .map(|ix| format!(" [RUN {ix:03}]")) - .unwrap_or_default() - )?; + writeln!(w, "{}", fmt_evaluation_error(err, name, repetition_ix))?; } } } @@ -136,7 +144,6 @@ fn write_aggregated_scores( writeln!(w, "\n{}", "-".repeat(80))?; writeln!(w, "\n## TOTAL SCORES")?; - writeln!(w, "\n### Success Rate")?; writeln!(w, "{:#}", aggregated_result)?; } @@ -163,7 +170,7 @@ pub async fn run_evaluate_one( predict: bool, cache_mode: CacheMode, cx: &mut AsyncApp, -) -> Result { +) -> Result<(EvaluationResult, ExecutionData)> { let predict_result = zeta2_predict( example.clone(), project, @@ -203,7 +210,22 @@ pub async fn run_evaluate_one( .log_err(); } - anyhow::Ok(evaluation_result) + let execution_data = ExecutionData { + execution_id: if let Some(rep_ix) = repetition_ix { + format!("{:03}", rep_ix) + } else { + example.name.clone() + }, + diff: predict_result.diff.clone(), + reasoning: std::fs::read_to_string( + predict_result + .run_example_dir + .join("prediction_response.md"), + ) + .unwrap_or_default(), + }; + + anyhow::Ok((evaluation_result, execution_data)) } fn write_eval_result( @@ -507,3 +529,234 @@ pub fn compare_diffs(patch_a: &str, patch_b: &str, use_color: bool) -> String { annotated.join("\n") } + +fn write_bucketed_analysis( + all_results: &Vec< + Vec)>>, + >, +) -> Result<()> { + #[derive(Debug)] + struct EditBucket { + diff: String, + is_correct: bool, + execution_indices: Vec, + reasoning_samples: Vec, + } + + let mut total_executions = 0; + let mut empty_predictions = Vec::new(); + let mut errors = Vec::new(); + + let mut buckets: HashMap = HashMap::new(); + + for result in all_results.iter().flatten() { + total_executions += 1; + + let (evaluation_result, execution_data) = match result { + Ok((eval_result, execution_data)) => { + if execution_data.diff.is_empty() { + empty_predictions.push(execution_data); + continue; + } + (eval_result, execution_data) + } + Err(err) => { + errors.push(err); + continue; + } + }; + + buckets + .entry(execution_data.diff.clone()) + .and_modify(|bucket| { + bucket + .execution_indices + .push(execution_data.execution_id.clone()); + bucket + .reasoning_samples + .push(execution_data.reasoning.clone()); + }) + .or_insert_with(|| EditBucket { + diff: execution_data.diff.clone(), + is_correct: { + evaluation_result + .edit_prediction + .as_ref() + .map_or(false, |edit_prediction| { + edit_prediction.false_positives == 0 + && edit_prediction.false_negatives == 0 + && edit_prediction.true_positives > 0 + }) + }, + execution_indices: vec![execution_data.execution_id.clone()], + reasoning_samples: vec![execution_data.reasoning.clone()], + }); + } + + let mut sorted_buckets = buckets.into_values().collect::>(); + sorted_buckets.sort_by(|a, b| match (a.is_correct, b.is_correct) { + (true, false) => std::cmp::Ordering::Less, + (false, true) => std::cmp::Ordering::Greater, + _ => b.execution_indices.len().cmp(&a.execution_indices.len()), + }); + + let output_path = crate::paths::RUN_DIR.join("bucketed_analysis.md"); + let mut output = std::fs::File::create(&output_path)?; + + writeln!(output, "# Bucketed Edit Analysis\n")?; + + writeln!(output, "## Summary\n")?; + writeln!(output, "- **Total executions**: {}", total_executions)?; + + let correct_count: usize = sorted_buckets + .iter() + .filter(|b| b.is_correct) + .map(|b| b.execution_indices.len()) + .sum(); + + let incorrect_count: usize = sorted_buckets + .iter() + .filter(|b| !b.is_correct) + .map(|b| b.execution_indices.len()) + .sum(); + + writeln!( + output, + "- **Correct predictions**: {} ({:.1}%)", + correct_count, + (correct_count as f64 / total_executions as f64) * 100.0 + )?; + + writeln!( + output, + "- **Incorrect predictions**: {} ({:.1}%)", + incorrect_count, + (incorrect_count as f64 / total_executions as f64) * 100.0 + )?; + + writeln!( + output, + "- **No Predictions**: {} ({:.1}%)", + empty_predictions.len(), + (empty_predictions.len() as f64 / total_executions as f64) * 100.0 + )?; + + let unique_incorrect = sorted_buckets.iter().filter(|b| !b.is_correct).count(); + writeln!( + output, + "- **Unique incorrect edit patterns**: {}\n", + unique_incorrect + )?; + + writeln!(output, "---\n")?; + + for (idx, bucket) in sorted_buckets.iter().filter(|b| b.is_correct).enumerate() { + if idx == 0 { + writeln!( + output, + "## Correct Predictions ({} occurrences)\n", + bucket.execution_indices.len() + )?; + } + + writeln!(output, "**Predicted Edit:**\n")?; + writeln!(output, "```diff")?; + writeln!(output, "{}", bucket.diff)?; + writeln!(output, "```\n")?; + + writeln!( + output, + "**Executions:** {}\n", + bucket.execution_indices.join(", ") + )?; + writeln!(output, "---\n")?; + } + + for (idx, bucket) in sorted_buckets.iter().filter(|b| !b.is_correct).enumerate() { + writeln!( + output, + "## Incorrect Prediction #{} ({} occurrences)\n", + idx + 1, + bucket.execution_indices.len() + )?; + + writeln!(output, "**Predicted Edit:**\n")?; + writeln!(output, "```diff")?; + writeln!(output, "{}", bucket.diff)?; + writeln!(output, "```\n")?; + + writeln!( + output, + "**Executions:** {}\n", + bucket.execution_indices.join(", ") + )?; + + for (exec_id, reasoning) in bucket + .execution_indices + .iter() + .zip(bucket.reasoning_samples.iter()) + { + writeln!(output, "{}", fmt_execution(exec_id, reasoning))?; + } + + writeln!(output, "\n---\n")?; + } + + if !empty_predictions.is_empty() { + writeln!( + output, + "## No Predictions ({} occurrences)\n", + empty_predictions.len() + )?; + + for execution_data in &empty_predictions { + writeln!( + output, + "{}", + fmt_execution(&execution_data.execution_id, &execution_data.reasoning) + )?; + } + writeln!(output, "\n---\n")?; + } + + if !errors.is_empty() { + writeln!(output, "## Errors ({} occurrences)\n", errors.len())?; + + for (err, name, repetition_ix) in &errors { + writeln!(output, "{}", fmt_evaluation_error(err, name, repetition_ix))?; + } + writeln!(output, "\n---\n")?; + } + + fn fmt_execution(exec_id: &str, reasoning: &str) -> String { + let exec_content = format!( + "\n### Execution {} `{}/{}/prediction_response.md`{}", + exec_id, + crate::paths::RUN_DIR.display(), + exec_id, + indent_text(&format!("\n\n```\n{}\n```\n", reasoning,), 2) + ); + indent_text(&exec_content, 2) + } + + fn indent_text(text: &str, spaces: usize) -> String { + let indent = " ".repeat(spaces); + text.lines() + .collect::>() + .join(&format!("\n{}", indent)) + } + + Ok(()) +} + +fn fmt_evaluation_error(err: &anyhow::Error, name: &str, repetition_ix: &Option) -> String { + let err = format!("{err:?}") + .replace("", "\n```"); + format!( + "### ERROR {name}{}\n\n{err}\n", + repetition_ix + .map(|ix| format!(" [RUN {ix:03}]")) + .unwrap_or_default() + ) +} From f015368586fb1520cb587ee1eabe4b8f094b1245 Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Mon, 17 Nov 2025 15:20:22 -0500 Subject: [PATCH 0163/1030] Update top-ranking issues script (#42911) - Added Windows category - Removed unused import - Fixed a type error reported by `ty` Release Notes: - N/A --- script/update_top_ranking_issues/main.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/script/update_top_ranking_issues/main.py b/script/update_top_ranking_issues/main.py index fb193d65643bd11180e5acf03425568cd153b6a4..c6ea1a6cde6ea7641d48dddfec3cf26c3b5175bb 100644 --- a/script/update_top_ranking_issues/main.py +++ b/script/update_top_ranking_issues/main.py @@ -1,5 +1,4 @@ import os -from collections import defaultdict from datetime import datetime, timedelta from typing import Optional @@ -106,7 +105,7 @@ def get_label_to_issue_data( repository: Repository, start_date: datetime | None = None, ) -> dict[str, list[IssueData]]: - common_filters = [ + common_queries = [ f"repo:{repository.full_name}", "is:open", "is:issue", @@ -119,9 +118,9 @@ def get_label_to_issue_data( ) if date_query: - common_filters.append(date_query) + common_queries.append(date_query) - common_filter_string = " ".join(common_filters) + common_query = " ".join(common_queries) # Because PyGithub doesn't seem to support logical operators `AND` and `OR` # that GitHub issue queries can use, we use lists as values, rather than @@ -135,16 +134,17 @@ def get_label_to_issue_data( "crash": ["label:crash", "type:Crash"], "feature": ["label:feature", "type:Feature"], "meta": ["type:Meta"], + "windows": ["label:windows"], "unlabeled": ["no:label no:type"], } label_to_issue_data: dict[str, list[IssueData]] = {} - for section, section_queries in section_queries.items(): + for section, queries in section_queries.items(): unique_issues = set() - for section_query in section_queries: - query: str = f"{common_filter_string} {section_query}" + for query in queries: + query: str = f"{common_query} {query}" issues = github.search_issues(query) for issue in issues: From 3f25d36b3c26eeb47d4dee43da08e7dc7a29c10f Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Mon, 17 Nov 2025 22:04:50 +0100 Subject: [PATCH 0164/1030] agent_ui: Fix text pasting no longer working (#42914) Regressed in https://github.com/zed-industries/zed/pull/42908 Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/agent_ui/src/acp/message_editor.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/agent_ui/src/acp/message_editor.rs b/crates/agent_ui/src/acp/message_editor.rs index 84896e7a74ab8f9514f0550bae1d28433650ce8d..120fdb26b3937e92312a63056d67cc2f35809068 100644 --- a/crates/agent_ui/src/acp/message_editor.rs +++ b/crates/agent_ui/src/acp/message_editor.rs @@ -916,7 +916,6 @@ impl MessageEditor { let Some(clipboard) = cx.read_from_clipboard() else { return; }; - cx.stop_propagation(); cx.spawn_in(window, async move |this, cx| { use itertools::Itertools; let (mut images, paths) = clipboard @@ -964,7 +963,10 @@ impl MessageEditor { } let replacement_text = MentionUri::PastedImage.as_link().to_string(); - let Ok(editor) = this.update(cx, |this, _| this.editor.clone()) else { + let Ok(editor) = this.update(cx, |this, cx| { + cx.stop_propagation(); + this.editor.clone() + }) else { return; }; for image in images { From d801d0950e91d04f907668f55f252b6001b8257d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Raz=20Guzm=C3=A1n=20Macedo?= Date: Mon, 17 Nov 2025 15:22:34 -0600 Subject: [PATCH 0165/1030] Add @miguelraz to reviewers and support sections (#42904) Release Notes: - N/A *or* Added/Fixed/Improved ... --- REVIEWERS.conl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/REVIEWERS.conl b/REVIEWERS.conl index 230e6e4d3cc6e7cf51339ea84bfd1dac5e065c12..ca39a5c28002f6851beaf6710c44ac04b88592a3 100644 --- a/REVIEWERS.conl +++ b/REVIEWERS.conl @@ -44,6 +44,7 @@ design docs = @probably-neb + = @miguelraz extension = @kubkon @@ -98,6 +99,9 @@ settings_ui = @danilo-leal = @probably-neb +support + = @miguelraz + tasks = @SomeoneToIgnore = @Veykril From ee420d530eb49781638b664f3d9807bcd34dd2ea Mon Sep 17 00:00:00 2001 From: Dino Date: Mon, 17 Nov 2025 21:34:37 +0000 Subject: [PATCH 0166/1030] vim: Change approach to fixing vim's temporary mode bug (#42894) The `Vim.exit_temporary_normal` method had been updated (https://github.com/zed-industries/zed/pull/42742) to expect and `Option<&Motion>` that would then be used to determine whether to move the cursor right in case the motion was `Some(EndOfLine { ..})`. Unfortunately this meant that all callers now had to provide this argument, even if just `None`. After merging those changes I remember that we could probably play around with `clip_at_line_ends` so this commit removes those intial changes in favor of updating the `vim::normal::Vim.move_cursor` method so that, if vim is in temporary mode and `EndOfLine` is used, it disables clipping at line ends so that the newline character can be selected. Closes [#42278](https://github.com/zed-industries/zed/issues/42278) Release Notes: - N/A --- crates/editor/src/editor.rs | 4 +++ crates/vim/src/normal.rs | 43 ++++++++++++++++----------------- crates/vim/src/normal/scroll.rs | 2 +- crates/vim/src/normal/yank.rs | 4 +-- 4 files changed, 28 insertions(+), 25 deletions(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 339b0354c3fc0859cfe791fd69336535645c14c8..6476002396c35bd25d419013833e37b96a6c0395 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -3028,6 +3028,10 @@ impl Editor { range.clone() } + pub fn clip_at_line_ends(&mut self, cx: &mut Context) -> bool { + self.display_map.read(cx).clip_at_line_ends + } + pub fn set_clip_at_line_ends(&mut self, clip: bool, cx: &mut Context) { if self.display_map.read(cx).clip_at_line_ends != clip { self.display_map diff --git a/crates/vim/src/normal.rs b/crates/vim/src/normal.rs index fae810d64c587f96c587057615b138b4baabd227..e53e2306d6f14b1c230ea383103bdf67fe8196c0 100644 --- a/crates/vim/src/normal.rs +++ b/crates/vim/src/normal.rs @@ -386,8 +386,6 @@ impl Vim { window: &mut Window, cx: &mut Context, ) { - let temp_mode_motion = motion.clone(); - match operator { None => self.move_cursor(motion, times, window, cx), Some(Operator::Change) => self.change_motion(motion, times, forced_motion, window, cx), @@ -477,7 +475,7 @@ impl Vim { } } // Exit temporary normal mode (if active). - self.exit_temporary_normal(Some(&temp_mode_motion), window, cx); + self.exit_temporary_normal(window, cx); } pub fn normal_object( @@ -580,8 +578,21 @@ impl Vim { window: &mut Window, cx: &mut Context, ) { - self.update_editor(cx, |_, editor, cx| { + self.update_editor(cx, |vim, editor, cx| { let text_layout_details = editor.text_layout_details(window); + + // If vim is in temporary mode and the motion being used is + // `EndOfLine` ($), we'll want to disable clipping at line ends so + // that the newline character can be selected so that, when moving + // back to visual mode, the cursor will be placed after the last + // character and not before it. + let clip_at_line_ends = editor.clip_at_line_ends(cx); + let should_disable_clip = matches!(motion, Motion::EndOfLine { .. }) && vim.temp_mode; + + if should_disable_clip { + editor.set_clip_at_line_ends(false, cx) + }; + editor.change_selections( SelectionEffects::default().nav_history(motion.push_to_jump_list()), window, @@ -593,7 +604,11 @@ impl Vim { .unwrap_or((cursor, goal)) }) }, - ) + ); + + if should_disable_clip { + editor.set_clip_at_line_ends(clip_at_line_ends, cx); + }; }); } @@ -1054,25 +1069,9 @@ impl Vim { }); } - /// If temporary mode is enabled, switches back to insert mode, using the - /// provided `motion` to determine whether to move the cursor before - /// re-enabling insert mode, for example, when `EndOfLine` ($) is used. - fn exit_temporary_normal( - &mut self, - motion: Option<&Motion>, - window: &mut Window, - cx: &mut Context, - ) { + fn exit_temporary_normal(&mut self, window: &mut Window, cx: &mut Context) { if self.temp_mode { self.switch_mode(Mode::Insert, true, window, cx); - - // Since we're switching from `Normal` mode to `Insert` mode, we'll - // move the cursor one position to the right, to ensure that, for - // motions like `EndOfLine` ($), the cursor is actually at the end - // of line and not on the last character. - if matches!(motion, Some(Motion::EndOfLine { .. })) { - self.move_cursor(Motion::Right, Some(1), window, cx); - } } } } diff --git a/crates/vim/src/normal/scroll.rs b/crates/vim/src/normal/scroll.rs index 9346d76323c4fb6c181fb914587a710c94be4537..ff884e3b7393b39b86114338fe2af11e384e1fa0 100644 --- a/crates/vim/src/normal/scroll.rs +++ b/crates/vim/src/normal/scroll.rs @@ -96,7 +96,7 @@ impl Vim { ) { let amount = by(Vim::take_count(cx).map(|c| c as f32)); Vim::take_forced_motion(cx); - self.exit_temporary_normal(None, window, cx); + self.exit_temporary_normal(window, cx); self.update_editor(cx, |_, editor, cx| { scroll_editor(editor, move_cursor, amount, window, cx) }); diff --git a/crates/vim/src/normal/yank.rs b/crates/vim/src/normal/yank.rs index 4f1274dd88359fe8c3eb1b08ab3910c513b2d98d..d5a45fca544d61735f62a8f46e849db2c009847f 100644 --- a/crates/vim/src/normal/yank.rs +++ b/crates/vim/src/normal/yank.rs @@ -59,7 +59,7 @@ impl Vim { }); }); }); - self.exit_temporary_normal(None, window, cx); + self.exit_temporary_normal(window, cx); } pub fn yank_object( @@ -90,7 +90,7 @@ impl Vim { }); }); }); - self.exit_temporary_normal(None, window, cx); + self.exit_temporary_normal(window, cx); } pub fn yank_selections_content( From d9cfc2c8831bc2a51f0aeb67307026387fc002f1 Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Mon, 17 Nov 2025 22:48:09 +0100 Subject: [PATCH 0167/1030] Fix formatting in various files (#42917) This fixes various issues where rustfmt failed to format code due to too long strings, most of which I stumbled across over the last week and some additonal ones I searched for whilst fixing the others. Release Notes: - N/A --- crates/extension_host/src/extension_host.rs | 29 ++- .../src/provider/copilot_chat.rs | 8 +- crates/languages/src/bash.rs | 6 +- crates/project_panel/src/project_panel.rs | 79 ++++--- crates/workspace/src/workspace.rs | 50 ++--- crates/zed/src/zed.rs | 98 +++++---- crates/zeta/src/rate_completion_modal.rs | 198 +++++++++++------- .../xtask/src/tasks/workflows/run_tests.rs | 31 +-- 8 files changed, 296 insertions(+), 203 deletions(-) diff --git a/crates/extension_host/src/extension_host.rs b/crates/extension_host/src/extension_host.rs index 7a2e13972e31091a22b0390a51732d68393c99a0..89ea83a905d75ae546125cd7479860bea9f21f9c 100644 --- a/crates/extension_host/src/extension_host.rs +++ b/crates/extension_host/src/extension_host.rs @@ -343,7 +343,7 @@ impl ExtensionStore { let index = this .update(cx, |this, cx| this.rebuild_extension_index(cx))? .await; - this.update( cx, |this, cx| this.extensions_updated(index, cx))? + this.update(cx, |this, cx| this.extensions_updated(index, cx))? .await; index_changed = false; } @@ -758,29 +758,28 @@ impl ExtensionStore { if let Some(content_length) = content_length { let actual_len = tar_gz_bytes.len(); if content_length != actual_len { - bail!("downloaded extension size {actual_len} does not match content length {content_length}"); + bail!(concat!( + "downloaded extension size {actual_len} ", + "does not match content length {content_length}" + )); } } let decompressed_bytes = GzipDecoder::new(BufReader::new(tar_gz_bytes.as_slice())); let archive = Archive::new(decompressed_bytes); archive.unpack(extension_dir).await?; - this.update( cx, |this, cx| { - this.reload(Some(extension_id.clone()), cx) - })? - .await; + this.update(cx, |this, cx| this.reload(Some(extension_id.clone()), cx))? + .await; if let ExtensionOperation::Install = operation { - this.update( cx, |this, cx| { + this.update(cx, |this, cx| { cx.emit(Event::ExtensionInstalled(extension_id.clone())); if let Some(events) = ExtensionEvents::try_global(cx) - && let Some(manifest) = this.extension_manifest_for_id(&extension_id) { - events.update(cx, |this, cx| { - this.emit( - extension::Event::ExtensionInstalled(manifest.clone()), - cx, - ) - }); - } + && let Some(manifest) = this.extension_manifest_for_id(&extension_id) + { + events.update(cx, |this, cx| { + this.emit(extension::Event::ExtensionInstalled(manifest.clone()), cx) + }); + } }) .ok(); } diff --git a/crates/language_models/src/provider/copilot_chat.rs b/crates/language_models/src/provider/copilot_chat.rs index 0d95120322a592f1732aa53b3470108ccde76473..2f2469fa770821c208e037665c02d9ea8c20408f 100644 --- a/crates/language_models/src/provider/copilot_chat.rs +++ b/crates/language_models/src/provider/copilot_chat.rs @@ -143,9 +143,11 @@ impl LanguageModelProvider for CopilotChatLanguageModelProvider { }; let Some(copilot) = Copilot::global(cx) else { - return Task::ready( Err(anyhow!( - "Copilot must be enabled for Copilot Chat to work. Please enable Copilot and try again." - ).into())); + return Task::ready(Err(anyhow!(concat!( + "Copilot must be enabled for Copilot Chat to work. ", + "Please enable Copilot and try again." + )) + .into())); }; let err = match copilot.read(cx).status() { diff --git a/crates/languages/src/bash.rs b/crates/languages/src/bash.rs index 482556ba5f9ae7abe992c93fab34a7635004a51e..9720007d09a87132aaa063516039336cc0453e39 100644 --- a/crates/languages/src/bash.rs +++ b/crates/languages/src/bash.rs @@ -44,7 +44,11 @@ mod tests { let expect_indents_to = |buffer: &mut Buffer, cx: &mut Context, input: &str, expected: &str| { - buffer.edit( [(0..buffer.len(), input)], Some(AutoindentMode::EachLine), cx, ); + buffer.edit( + [(0..buffer.len(), input)], + Some(AutoindentMode::EachLine), + cx, + ); assert_eq!(buffer.text(), expected); }; diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index d1c3c96c0ccd02d9696f8bfcedfd5af6e6e1da45..410bea6b9c268a3009c7ff41ebf565eaa4a79bff 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -1653,21 +1653,24 @@ impl ProjectPanel { match new_entry { Err(e) => { - project_panel.update_in( cx, |project_panel, window, cx| { - project_panel.marked_entries.clear(); - project_panel.update_visible_entries(None, false, false, window, cx); - }).ok(); + project_panel + .update_in(cx, |project_panel, window, cx| { + project_panel.marked_entries.clear(); + project_panel.update_visible_entries(None, false, false, window, cx); + }) + .ok(); Err(e)?; } Ok(CreatedEntry::Included(new_entry)) => { - project_panel.update_in( cx, |project_panel, window, cx| { + project_panel.update_in(cx, |project_panel, window, cx| { if let Some(selection) = &mut project_panel.state.selection - && selection.entry_id == edited_entry_id { - selection.worktree_id = worktree_id; - selection.entry_id = new_entry.id; - project_panel.marked_entries.clear(); - project_panel.expand_to_selection(cx); - } + && selection.entry_id == edited_entry_id + { + selection.worktree_id = worktree_id; + selection.entry_id = new_entry.id; + project_panel.marked_entries.clear(); + project_panel.expand_to_selection(cx); + } project_panel.update_visible_entries(None, false, false, window, cx); if is_new_entry && !is_dir { let settings = ProjectPanelSettings::get_global(cx); @@ -1688,7 +1691,14 @@ impl ProjectPanel { project_panel.project.update(cx, |_, cx| { cx.emit(project::Event::Toast { notification_id: "excluded-directory".into(), - message: format!("Created an excluded directory at {abs_path:?}.\nAlter `file_scan_exclusions` in the settings to show it in the panel") + message: format!( + concat!( + "Created an excluded directory at {:?}.\n", + "Alter `file_scan_exclusions` in the settings ", + "to show it in the panel" + ), + abs_path + ), }) }); None @@ -1696,7 +1706,15 @@ impl ProjectPanel { project_panel .workspace .update(cx, |workspace, cx| { - workspace.open_abs_path(abs_path, OpenOptions { visible: Some(OpenVisible::All), ..Default::default() }, window, cx) + workspace.open_abs_path( + abs_path, + OpenOptions { + visible: Some(OpenVisible::All), + ..Default::default() + }, + window, + cx, + ) }) .ok() } @@ -3600,32 +3618,44 @@ impl ProjectPanel { cx.spawn_in(window, async move |this, cx| { async move { for (filename, original_path) in &paths_to_replace { - let answer = cx.update(|window, cx| { - window - .prompt( + let prompt_message = format!( + concat!( + "A file or folder with name {} ", + "already exists in the destination folder. ", + "Do you want to replace it?" + ), + filename + ); + let answer = cx + .update(|window, cx| { + window.prompt( PromptLevel::Info, - format!("A file or folder with name {filename} already exists in the destination folder. Do you want to replace it?").as_str(), + &prompt_message, None, &["Replace", "Cancel"], cx, ) - })?.await?; + })? + .await?; if answer == 1 - && let Some(item_idx) = paths.iter().position(|p| p == original_path) { - paths.remove(item_idx); - } + && let Some(item_idx) = paths.iter().position(|p| p == original_path) + { + paths.remove(item_idx); + } } if paths.is_empty() { return Ok(()); } - let task = worktree.update( cx, |worktree, cx| { + let task = worktree.update(cx, |worktree, cx| { worktree.copy_external_entries(target_directory, paths, fs, cx) })?; - let opened_entries = task.await.with_context(|| "failed to copy external paths")?; + let opened_entries = task + .await + .with_context(|| "failed to copy external paths")?; this.update(cx, |this, cx| { if open_file_after_drop && !opened_entries.is_empty() { let settings = ProjectPanelSettings::get_global(cx); @@ -3635,7 +3665,8 @@ impl ProjectPanel { } }) } - .log_err().await + .log_err() + .await }) .detach(); } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 316969812ac34e84f4019a191fda225e255700f0..20139e3ae8104fc0d4c1bce98f265144ef344f0d 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -7306,14 +7306,9 @@ pub fn join_channel( ) -> Task> { let active_call = ActiveCall::global(cx); cx.spawn(async move |cx| { - let result = join_channel_internal( - channel_id, - &app_state, - requesting_window, - &active_call, - cx, - ) - .await; + let result = + join_channel_internal(channel_id, &app_state, requesting_window, &active_call, cx) + .await; // join channel succeeded, and opened a window if matches!(result, Ok(true)) { @@ -7321,8 +7316,7 @@ pub fn join_channel( } // find an existing workspace to focus and show call controls - let mut active_window = - requesting_window.or_else(|| activate_any_workspace_window( cx)); + let mut active_window = requesting_window.or_else(|| activate_any_workspace_window(cx)); if active_window.is_none() { // no open workspaces, make one to show the error in (blergh) let (window_handle, _) = cx @@ -7334,7 +7328,8 @@ pub fn join_channel( if result.is_ok() { cx.update(|cx| { cx.dispatch_action(&OpenChannelNotes); - }).log_err(); + }) + .log_err(); } active_window = Some(window_handle); @@ -7346,19 +7341,25 @@ pub fn join_channel( active_window .update(cx, |_, window, cx| { let detail: SharedString = match err.error_code() { - ErrorCode::SignedOut => { - "Please sign in to continue.".into() - } - ErrorCode::UpgradeRequired => { - "Your are running an unsupported version of Zed. Please update to continue.".into() - } - ErrorCode::NoSuchChannel => { - "No matching channel was found. Please check the link and try again.".into() - } - ErrorCode::Forbidden => { - "This channel is private, and you do not have access. Please ask someone to add you and try again.".into() + ErrorCode::SignedOut => "Please sign in to continue.".into(), + ErrorCode::UpgradeRequired => concat!( + "Your are running an unsupported version of Zed. ", + "Please update to continue." + ) + .into(), + ErrorCode::NoSuchChannel => concat!( + "No matching channel was found. ", + "Please check the link and try again." + ) + .into(), + ErrorCode::Forbidden => concat!( + "This channel is private, and you do not have access. ", + "Please ask someone to add you and try again." + ) + .into(), + ErrorCode::Disconnected => { + "Please check your internet connection and try again.".into() } - ErrorCode::Disconnected => "Please check your internet connection and try again.".into(), _ => format!("{}\n\nPlease try again.", err).into(), }; window.prompt( @@ -7366,7 +7367,8 @@ pub fn join_channel( "Failed to join channel", Some(&detail), &["Ok"], - cx) + cx, + ) })? .await .ok(); diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 617534ee0f8a68f91e8e55d58d0d9550d265c6b2..83307818ac0ec73ca704e1dbb10fd551ecdd3a42 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -1918,53 +1918,67 @@ fn open_telemetry_log_file( window: &mut Window, cx: &mut Context, ) { - workspace.with_local_workspace(window, cx, move |workspace, window, cx| { - let app_state = workspace.app_state().clone(); - cx.spawn_in(window, async move |workspace, cx| { - async fn fetch_log_string(app_state: &Arc) -> Option { - let path = client::telemetry::Telemetry::log_file_path(); - app_state.fs.load(&path).await.log_err() - } + const HEADER: &str = concat!( + "// Zed collects anonymous usage data to help us understand how people are using the app.\n", + "// Telemetry can be disabled via the `settings.json` file.\n", + "// Here is the data that has been reported for the current session:\n", + ); + workspace + .with_local_workspace(window, cx, move |workspace, window, cx| { + let app_state = workspace.app_state().clone(); + cx.spawn_in(window, async move |workspace, cx| { + async fn fetch_log_string(app_state: &Arc) -> Option { + let path = client::telemetry::Telemetry::log_file_path(); + app_state.fs.load(&path).await.log_err() + } - let log = fetch_log_string(&app_state).await.unwrap_or_else(|| "// No data has been collected yet".to_string()); + let log = fetch_log_string(&app_state) + .await + .unwrap_or_else(|| "// No data has been collected yet".to_string()); - const MAX_TELEMETRY_LOG_LEN: usize = 5 * 1024 * 1024; - let mut start_offset = log.len().saturating_sub(MAX_TELEMETRY_LOG_LEN); - if let Some(newline_offset) = log[start_offset..].find('\n') { - start_offset += newline_offset + 1; - } - let log_suffix = &log[start_offset..]; - let header = concat!( - "// Zed collects anonymous usage data to help us understand how people are using the app.\n", - "// Telemetry can be disabled via the `settings.json` file.\n", - "// Here is the data that has been reported for the current session:\n", - ); - let content = format!("{}\n{}", header, log_suffix); - let json = app_state.languages.language_for_name("JSON").await.log_err(); - - workspace.update_in( cx, |workspace, window, cx| { - let project = workspace.project().clone(); - let buffer = project.update(cx, |project, cx| project.create_local_buffer(&content, json,false, cx)); - let buffer = cx.new(|cx| { - MultiBuffer::singleton(buffer, cx).with_title("Telemetry Log".into()) - }); - workspace.add_item_to_active_pane( - Box::new(cx.new(|cx| { - let mut editor = Editor::for_multibuffer(buffer, Some(project), window, cx); - editor.set_read_only(true); - editor.set_breadcrumb_header("Telemetry Log".into()); - editor - })), - None, - true, - window, cx, - ); - }).log_err()?; + const MAX_TELEMETRY_LOG_LEN: usize = 5 * 1024 * 1024; + let mut start_offset = log.len().saturating_sub(MAX_TELEMETRY_LOG_LEN); + if let Some(newline_offset) = log[start_offset..].find('\n') { + start_offset += newline_offset + 1; + } + let log_suffix = &log[start_offset..]; + let content = format!("{}\n{}", HEADER, log_suffix); + let json = app_state + .languages + .language_for_name("JSON") + .await + .log_err(); + + workspace + .update_in(cx, |workspace, window, cx| { + let project = workspace.project().clone(); + let buffer = project.update(cx, |project, cx| { + project.create_local_buffer(&content, json, false, cx) + }); + let buffer = cx.new(|cx| { + MultiBuffer::singleton(buffer, cx).with_title("Telemetry Log".into()) + }); + workspace.add_item_to_active_pane( + Box::new(cx.new(|cx| { + let mut editor = + Editor::for_multibuffer(buffer, Some(project), window, cx); + editor.set_read_only(true); + editor.set_breadcrumb_header("Telemetry Log".into()); + editor + })), + None, + true, + window, + cx, + ); + }) + .log_err()?; - Some(()) + Some(()) + }) + .detach(); }) .detach(); - }).detach(); } fn open_bundled_file( diff --git a/crates/zeta/src/rate_completion_modal.rs b/crates/zeta/src/rate_completion_modal.rs index cc1787ab01c6dd8f6429c3ac821a485355629462..a081538f5528946ea5b959981b7bd70d44b8b11b 100644 --- a/crates/zeta/src/rate_completion_modal.rs +++ b/crates/zeta/src/rate_completion_modal.rs @@ -410,10 +410,15 @@ impl RateCompletionModal { .overflow_hidden() .relative() .child(self.render_view_nav(cx)) - .when_some(match self.current_view { - RateCompletionView::SuggestedEdits => self.render_suggested_edits(cx), - RateCompletionView::RawInput => self.render_raw_input(cx), - }, |this, element| this.child(element)) + .when_some( + match self.current_view { + RateCompletionView::SuggestedEdits => { + self.render_suggested_edits(cx) + } + RateCompletionView::RawInput => self.render_raw_input(cx), + }, + |this, element| this.child(element), + ), ) .when(!rated, |this| { this.child( @@ -425,19 +430,18 @@ impl RateCompletionModal { .child( Icon::new(IconName::Info) .size(IconSize::XSmall) - .color(Color::Muted) + .color(Color::Muted), ) .child( - div() - .w_full() - .pr_2() - .flex_wrap() - .child( - Label::new("Explain why this completion is good or bad. If it's negative, describe what you expected instead.") - .size(LabelSize::Small) - .color(Color::Muted) - ) - ) + div().w_full().pr_2().flex_wrap().child( + Label::new(concat!( + "Explain why this completion is good or bad. ", + "If it's negative, describe what you expected instead." + )) + .size(LabelSize::Small) + .color(Color::Muted), + ), + ), ) }) .when(!rated, |this| { @@ -446,7 +450,7 @@ impl RateCompletionModal { .h_40() .pt_1() .bg(bg_color) - .child(active_completion.feedback_editor.clone()) + .child(active_completion.feedback_editor.clone()), ) }) .child( @@ -491,18 +495,21 @@ impl RateCompletionModal { .icon_position(IconPosition::Start) .disabled(rated || feedback_empty) .when(feedback_empty, |this| { - this.tooltip(Tooltip::text("Explain what's bad about it before reporting it")) + this.tooltip(Tooltip::text( + "Explain what's bad about it before reporting it", + )) }) .key_binding(KeyBinding::for_action_in( &ThumbsDownActiveCompletion, focus_handle, - cx + cx, )) .on_click(cx.listener(move |this, _, window, cx| { if this.active_completion.is_some() { this.thumbs_down_active( &ThumbsDownActiveCompletion, - window, cx, + window, + cx, ); } })), @@ -516,11 +523,15 @@ impl RateCompletionModal { .key_binding(KeyBinding::for_action_in( &ThumbsUpActiveCompletion, focus_handle, - cx + cx, )) .on_click(cx.listener(move |this, _, window, cx| { if this.active_completion.is_some() { - this.thumbs_up_active(&ThumbsUpActiveCompletion, window, cx); + this.thumbs_up_active( + &ThumbsUpActiveCompletion, + window, + cx, + ); } })), ), @@ -528,6 +539,80 @@ impl RateCompletionModal { ), ) } + + fn render_shown_completions(&self, cx: &Context) -> impl Iterator { + self.zeta + .read(cx) + .shown_completions() + .cloned() + .enumerate() + .map(|(index, completion)| { + let selected = self + .active_completion + .as_ref() + .is_some_and(|selected| selected.completion.id == completion.id); + let rated = self.zeta.read(cx).is_completion_rated(completion.id); + + let (icon_name, icon_color, tooltip_text) = + match (rated, completion.edits.is_empty()) { + (true, _) => (IconName::Check, Color::Success, "Rated Completion"), + (false, true) => (IconName::File, Color::Muted, "No Edits Produced"), + (false, false) => (IconName::FileDiff, Color::Accent, "Edits Available"), + }; + + let file_name = completion + .path + .file_name() + .map(|f| f.to_string_lossy().into_owned()) + .unwrap_or("untitled".to_string()); + let file_path = completion + .path + .parent() + .map(|p| p.to_string_lossy().into_owned()); + + ListItem::new(completion.id) + .inset(true) + .spacing(ListItemSpacing::Sparse) + .focused(index == self.selected_index) + .toggle_state(selected) + .child( + h_flex() + .id("completion-content") + .gap_3() + .child(Icon::new(icon_name).color(icon_color).size(IconSize::Small)) + .child( + v_flex() + .child( + h_flex() + .gap_1() + .child(Label::new(file_name).size(LabelSize::Small)) + .when_some(file_path, |this, p| { + this.child( + Label::new(p) + .size(LabelSize::Small) + .color(Color::Muted), + ) + }), + ) + .child( + Label::new(format!( + "{} ago, {:.2?}", + format_time_ago( + completion.response_received_at.elapsed() + ), + completion.latency() + )) + .color(Color::Muted) + .size(LabelSize::XSmall), + ), + ), + ) + .tooltip(Tooltip::text(tooltip_text)) + .on_click(cx.listener(move |this, _, window, cx| { + this.select_completion(Some(completion.clone()), true, window, cx); + })) + }) + } } impl Render for RateCompletionModal { @@ -571,15 +656,12 @@ impl Render for RateCompletionModal { .justify_between() .border_b_1() .border_color(border_color) - .child( - Icon::new(IconName::ZedPredict) - .size(IconSize::Small) - ) + .child(Icon::new(IconName::ZedPredict).size(IconSize::Small)) .child( Label::new("From most recent to oldest") .color(Color::Muted) .size(LabelSize::Small), - ) + ), ) .child( div() @@ -593,66 +675,20 @@ impl Render for RateCompletionModal { div() .p_2() .child( - Label::new("No completions yet. Use the editor to generate some, and make sure to rate them!") - .color(Color::Muted), + Label::new(concat!( + "No completions yet. ", + "Use the editor to generate some, ", + "and make sure to rate them!" + )) + .color(Color::Muted), ) .into_any_element(), ) - .children(self.zeta.read(cx).shown_completions().cloned().enumerate().map( - |(index, completion)| { - let selected = - self.active_completion.as_ref().is_some_and(|selected| { - selected.completion.id == completion.id - }); - let rated = - self.zeta.read(cx).is_completion_rated(completion.id); - - let (icon_name, icon_color, tooltip_text) = match (rated, completion.edits.is_empty()) { - (true, _) => (IconName::Check, Color::Success, "Rated Completion"), - (false, true) => (IconName::File, Color::Muted, "No Edits Produced"), - (false, false) => (IconName::FileDiff, Color::Accent, "Edits Available"), - }; - - let file_name = completion.path.file_name().map(|f| f.to_string_lossy().into_owned()).unwrap_or("untitled".to_string()); - let file_path = completion.path.parent().map(|p| p.to_string_lossy().into_owned()); - - ListItem::new(completion.id) - .inset(true) - .spacing(ListItemSpacing::Sparse) - .focused(index == self.selected_index) - .toggle_state(selected) - .child( - h_flex() - .id("completion-content") - .gap_3() - .child( - Icon::new(icon_name) - .color(icon_color) - .size(IconSize::Small) - ) - .child( - v_flex() - .child( - h_flex().gap_1() - .child(Label::new(file_name).size(LabelSize::Small)) - .when_some(file_path, |this, p| this.child(Label::new(p).size(LabelSize::Small).color(Color::Muted))) - ) - .child(Label::new(format!("{} ago, {:.2?}", format_time_ago(completion.response_received_at.elapsed()), completion.latency())) - .color(Color::Muted) - .size(LabelSize::XSmall) - ) - ) - ) - .tooltip(Tooltip::text(tooltip_text)) - .on_click(cx.listener(move |this, _, window, cx| { - this.select_completion(Some(completion.clone()), true, window, cx); - })) - }, - )), - ) + .children(self.render_shown_completions(cx)), + ), ), ) - .children(self.render_active_completion( cx)) + .children(self.render_active_completion(cx)) .on_mouse_down_out(cx.listener(|_, _, _, cx| cx.emit(DismissEvent))) } } diff --git a/tooling/xtask/src/tasks/workflows/run_tests.rs b/tooling/xtask/src/tasks/workflows/run_tests.rs index f8212fed243c1c3bccffe7240ee152fa203e14d2..826881ab8a1e248450a2f3c4a3a62f4d449a8117 100644 --- a/tooling/xtask/src/tasks/workflows/run_tests.rs +++ b/tooling/xtask/src/tasks/workflows/run_tests.rs @@ -76,21 +76,26 @@ pub(crate) fn run_tests() -> Workflow { jobs.push(should_run_tests.guard(check_postgres_and_protobuf_migrations())); // could be more specific here? named::workflow() - .add_event(Event::default() - .push( - Push::default() - .add_branch("main") - .add_branch("v[0-9]+.[0-9]+.x") - ) - .pull_request(PullRequest::default().add_branch("**")) + .add_event( + Event::default() + .push( + Push::default() + .add_branch("main") + .add_branch("v[0-9]+.[0-9]+.x"), + ) + .pull_request(PullRequest::default().add_branch("**")), ) - .concurrency(Concurrency::default() - .group("${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}") - .cancel_in_progress(true) + .concurrency( + Concurrency::default() + .group(concat!( + "${{ github.workflow }}-${{ github.ref_name }}-", + "${{ github.ref_name == 'main' && github.sha || 'anysha' }}" + )) + .cancel_in_progress(true), ) - .add_env(( "CARGO_TERM_COLOR", "always" )) - .add_env(( "RUST_BACKTRACE", 1 )) - .add_env(( "CARGO_INCREMENTAL", 0 )) + .add_env(("CARGO_TERM_COLOR", "always")) + .add_env(("RUST_BACKTRACE", 1)) + .add_env(("CARGO_INCREMENTAL", 0)) .map(|mut workflow| { for job in jobs { workflow = workflow.add_job(job.name, job.job) From 73d3f9611eddac5883950199e60dab3c1116ba56 Mon Sep 17 00:00:00 2001 From: Marshall Bowers Date: Mon, 17 Nov 2025 17:12:00 -0500 Subject: [PATCH 0168/1030] collab: Add `external_id` column to `billing_customers` table (#42923) This PR adds an `external_id` column to the `billing_customers` table. Release Notes: - N/A --- .../20251117215316_add_external_id_to_billing_customers.sql | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 crates/collab/migrations/20251117215316_add_external_id_to_billing_customers.sql diff --git a/crates/collab/migrations/20251117215316_add_external_id_to_billing_customers.sql b/crates/collab/migrations/20251117215316_add_external_id_to_billing_customers.sql new file mode 100644 index 0000000000000000000000000000000000000000..6add45d4ad5d83cea6d86d2edc1e06613bd25560 --- /dev/null +++ b/crates/collab/migrations/20251117215316_add_external_id_to_billing_customers.sql @@ -0,0 +1,4 @@ +alter table billing_customers + add column external_id text; + +create unique index uix_billing_customers_on_external_id on billing_customers (external_id); From 5f356d04ffd667889593a8f59958acd54f1e0d7c Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Mon, 17 Nov 2025 19:12:26 -0300 Subject: [PATCH 0169/1030] agent_ui: Fix model name label truncation (#42921) Closes https://github.com/zed-industries/zed/issues/32739 Release Notes: - agent: Fixed an issue where the label for model names wouldn't use all the available space in the model picker. --- crates/agent_ui/src/acp/model_selector.rs | 14 +++++++------- crates/agent_ui/src/language_model_selector.rs | 12 +++++------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/crates/agent_ui/src/acp/model_selector.rs b/crates/agent_ui/src/acp/model_selector.rs index 45fec558720fc5e88548f6dd7bc24fe624a908f5..91aacde2aebcd0a2d4c8098119bbc43342d3ef74 100644 --- a/crates/agent_ui/src/acp/model_selector.rs +++ b/crates/agent_ui/src/acp/model_selector.rs @@ -251,17 +251,17 @@ impl PickerDelegate for AcpModelPickerDelegate { .inset(true) .spacing(ListItemSpacing::Sparse) .toggle_state(selected) - .start_slot::(model_info.icon.map(|icon| { - Icon::new(icon) - .color(model_icon_color) - .size(IconSize::Small) - })) .child( h_flex() .w_full() - .pl_0p5() .gap_1p5() - .w(px(240.)) + .when_some(model_info.icon, |this, icon| { + this.child( + Icon::new(icon) + .color(model_icon_color) + .size(IconSize::Small) + ) + }) .child(Label::new(model_info.name.clone()).truncate()), ) .end_slot(div().pr_3().when(is_selected, |this| { diff --git a/crates/agent_ui/src/language_model_selector.rs b/crates/agent_ui/src/language_model_selector.rs index 1de6bee791f782713d869bac7974ad3ec4e08b9f..996e6a19828c741adbf6f8f824470f9a66c2f049 100644 --- a/crates/agent_ui/src/language_model_selector.rs +++ b/crates/agent_ui/src/language_model_selector.rs @@ -492,17 +492,15 @@ impl PickerDelegate for LanguageModelPickerDelegate { .inset(true) .spacing(ListItemSpacing::Sparse) .toggle_state(selected) - .start_slot( - Icon::new(model_info.icon) - .color(model_icon_color) - .size(IconSize::Small), - ) .child( h_flex() .w_full() - .pl_0p5() .gap_1p5() - .w(px(240.)) + .child( + Icon::new(model_info.icon) + .color(model_icon_color) + .size(IconSize::Small), + ) .child(Label::new(model_info.model.name().0).truncate()), ) .end_slot(div().pr_3().when(is_selected, |this| { From 98a83b47e6ebf3a0527d32d13645bcde763a548b Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Mon, 17 Nov 2025 19:13:15 -0300 Subject: [PATCH 0170/1030] agent_ui: Make input fields in Bedrock settings keyboard navigable (#42916) Closes https://github.com/zed-industries/zed/issues/36587 This PR enables jumping from one input to the other, in the Bedrock settings section, with tab. Release Notes: - N/A --- .../language_models/src/provider/bedrock.rs | 74 +++++++++++++++---- crates/ui_input/src/input_field.rs | 27 +++++++ crates/zed/src/zed.rs | 1 + 3 files changed, 87 insertions(+), 15 deletions(-) diff --git a/crates/language_models/src/provider/bedrock.rs b/crates/language_models/src/provider/bedrock.rs index 14dd575f23952ee732c5d9714d2e091cf50d606f..61f36428d2e69af013103c8ca06b38d8d4a96e8d 100644 --- a/crates/language_models/src/provider/bedrock.rs +++ b/crates/language_models/src/provider/bedrock.rs @@ -24,7 +24,10 @@ use bedrock::{ use collections::{BTreeMap, HashMap}; use credentials_provider::CredentialsProvider; use futures::{FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream}; -use gpui::{AnyView, App, AsyncApp, Context, Entity, FontWeight, Subscription, Task}; +use gpui::{ + AnyView, App, AsyncApp, Context, Entity, FocusHandle, FontWeight, Subscription, Task, Window, + actions, +}; use gpui_tokio::Tokio; use http_client::HttpClient; use language_model::{ @@ -47,6 +50,8 @@ use util::ResultExt; use crate::AllLanguageModelSettings; +actions!(bedrock, [Tab, TabPrev]); + const PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("amazon-bedrock"); const PROVIDER_NAME: LanguageModelProviderName = LanguageModelProviderName::new("Amazon Bedrock"); @@ -1012,6 +1017,7 @@ struct ConfigurationView { region_editor: Entity, state: Entity, load_credentials_task: Option>, + focus_handle: FocusHandle, } impl ConfigurationView { @@ -1022,11 +1028,41 @@ impl ConfigurationView { const PLACEHOLDER_REGION: &'static str = "us-east-1"; fn new(state: Entity, window: &mut Window, cx: &mut Context) -> Self { + let focus_handle = cx.focus_handle(); + cx.observe(&state, |_, _, cx| { cx.notify(); }) .detach(); + let access_key_id_editor = cx.new(|cx| { + InputField::new(window, cx, Self::PLACEHOLDER_ACCESS_KEY_ID_TEXT) + .label("Access Key ID") + .tab_index(0) + .tab_stop(true) + }); + + let secret_access_key_editor = cx.new(|cx| { + InputField::new(window, cx, Self::PLACEHOLDER_SECRET_ACCESS_KEY_TEXT) + .label("Secret Access Key") + .tab_index(1) + .tab_stop(true) + }); + + let session_token_editor = cx.new(|cx| { + InputField::new(window, cx, Self::PLACEHOLDER_SESSION_TOKEN_TEXT) + .label("Session Token (Optional)") + .tab_index(2) + .tab_stop(true) + }); + + let region_editor = cx.new(|cx| { + InputField::new(window, cx, Self::PLACEHOLDER_REGION) + .label("Region") + .tab_index(3) + .tab_stop(true) + }); + let load_credentials_task = Some(cx.spawn({ let state = state.clone(); async move |this, cx| { @@ -1046,22 +1082,13 @@ impl ConfigurationView { })); Self { - access_key_id_editor: cx.new(|cx| { - InputField::new(window, cx, Self::PLACEHOLDER_ACCESS_KEY_ID_TEXT) - .label("Access Key ID") - }), - secret_access_key_editor: cx.new(|cx| { - InputField::new(window, cx, Self::PLACEHOLDER_SECRET_ACCESS_KEY_TEXT) - .label("Secret Access Key") - }), - session_token_editor: cx.new(|cx| { - InputField::new(window, cx, Self::PLACEHOLDER_SESSION_TOKEN_TEXT) - .label("Session Token (Optional)") - }), - region_editor: cx - .new(|cx| InputField::new(window, cx, Self::PLACEHOLDER_REGION).label("Region")), + access_key_id_editor, + secret_access_key_editor, + session_token_editor, + region_editor, state, load_credentials_task, + focus_handle, } } @@ -1141,6 +1168,19 @@ impl ConfigurationView { fn should_render_editor(&self, cx: &Context) -> bool { self.state.read(cx).is_authenticated() } + + fn on_tab(&mut self, _: &menu::SelectNext, window: &mut Window, _: &mut Context) { + window.focus_next(); + } + + fn on_tab_prev( + &mut self, + _: &menu::SelectPrevious, + window: &mut Window, + _: &mut Context, + ) { + window.focus_prev(); + } } impl Render for ConfigurationView { @@ -1190,6 +1230,9 @@ impl Render for ConfigurationView { v_flex() .size_full() + .track_focus(&self.focus_handle) + .on_action(cx.listener(Self::on_tab)) + .on_action(cx.listener(Self::on_tab_prev)) .on_action(cx.listener(ConfigurationView::save_credentials)) .child(Label::new("To use Zed's agent with Bedrock, you can set a custom authentication strategy through the settings.json, or use static credentials.")) .child(Label::new("But, to access models on AWS, you need to:").mt_1()) @@ -1234,6 +1277,7 @@ impl ConfigurationView { fn render_static_credentials_ui(&self) -> impl IntoElement { v_flex() .my_2() + .tab_group() .gap_1p5() .child( Label::new("Static Keys") diff --git a/crates/ui_input/src/input_field.rs b/crates/ui_input/src/input_field.rs index 82f7f0261facef8a7c6a422b2ff4ed335229aeb3..9e8c519ca9acc68c0d968f099f62ad336ee0754a 100644 --- a/crates/ui_input/src/input_field.rs +++ b/crates/ui_input/src/input_field.rs @@ -37,6 +37,10 @@ pub struct InputField { disabled: bool, /// The minimum width of for the input min_width: Length, + /// The tab index for keyboard navigation order. + tab_index: Option, + /// Whether this field is a tab stop (can be focused via Tab key). + tab_stop: bool, } impl Focusable for InputField { @@ -63,6 +67,8 @@ impl InputField { start_icon: None, disabled: false, min_width: px(192.).into(), + tab_index: None, + tab_stop: true, } } @@ -86,6 +92,16 @@ impl InputField { self } + pub fn tab_index(mut self, index: isize) -> Self { + self.tab_index = Some(index); + self + } + + pub fn tab_stop(mut self, tab_stop: bool) -> Self { + self.tab_stop = tab_stop; + self + } + pub fn set_disabled(&mut self, disabled: bool, cx: &mut Context) { self.disabled = disabled; self.editor @@ -151,6 +167,16 @@ impl Render for InputField { ..Default::default() }; + let focus_handle = self.editor.focus_handle(cx); + + let configured_handle = if let Some(tab_index) = self.tab_index { + focus_handle.tab_index(tab_index).tab_stop(self.tab_stop) + } else if !self.tab_stop { + focus_handle.tab_stop(false) + } else { + focus_handle + }; + v_flex() .id(self.placeholder.clone()) .w_full() @@ -168,6 +194,7 @@ impl Render for InputField { }) .child( h_flex() + .track_focus(&configured_handle) .min_w(self.min_width) .min_h_8() .w_full() diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 83307818ac0ec73ca704e1dbb10fd551ecdd3a42..998d1831a1b5e4179677d33a80fd36718e833511 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -4702,6 +4702,7 @@ mod tests { "assistant", "assistant2", "auto_update", + "bedrock", "branches", "buffer_search", "channel_modal", From 98da1ea169f3bc35248aa196904b73cad5927f6a Mon Sep 17 00:00:00 2001 From: Julia Ryan Date: Mon, 17 Nov 2025 14:13:58 -0800 Subject: [PATCH 0171/1030] Fix remote extension syncing (#42918) Closes #40906 Closes #39729 SFTP uploads weren't quoting the install directory which was causing extension syncing to fail. We were also only running `install_extension` once per remote-connection instead of once per project (thx @feeiyu for pointing this out) so extension weren't being loaded in subsequently opened remote projects. Release Notes: - N/A --------- Co-authored-by: Conrad Irwin --- crates/extension_host/src/extension_host.rs | 56 ++++++++++++--------- crates/extension_host/src/headless_host.rs | 3 +- crates/remote/src/transport/ssh.rs | 2 +- 3 files changed, 35 insertions(+), 26 deletions(-) diff --git a/crates/extension_host/src/extension_host.rs b/crates/extension_host/src/extension_host.rs index 89ea83a905d75ae546125cd7479860bea9f21f9c..2c99acbc7f4028598ab110c54d7f0cb4714d2c67 100644 --- a/crates/extension_host/src/extension_host.rs +++ b/crates/extension_host/src/extension_host.rs @@ -11,7 +11,7 @@ use async_compression::futures::bufread::GzipDecoder; use async_tar::Archive; use client::ExtensionProvides; use client::{Client, ExtensionMetadata, GetExtensionsResponse, proto, telemetry::Telemetry}; -use collections::{BTreeMap, BTreeSet, HashMap, HashSet, btree_map}; +use collections::{BTreeMap, BTreeSet, HashSet, btree_map}; pub use extension::ExtensionManifest; use extension::extension_builder::{CompileExtensionOptions, ExtensionBuilder}; use extension::{ @@ -43,7 +43,7 @@ use language::{ use node_runtime::NodeRuntime; use project::ContextProviderWithTasks; use release_channel::ReleaseChannel; -use remote::{RemoteClient, RemoteConnectionOptions}; +use remote::RemoteClient; use semantic_version::SemanticVersion; use serde::{Deserialize, Serialize}; use settings::Settings; @@ -123,7 +123,7 @@ pub struct ExtensionStore { pub wasm_host: Arc, pub wasm_extensions: Vec<(Arc, WasmExtension)>, pub tasks: Vec>, - pub remote_clients: HashMap>, + pub remote_clients: Vec>, pub ssh_registered_tx: UnboundedSender<()>, } @@ -274,7 +274,7 @@ impl ExtensionStore { reload_tx, tasks: Vec::new(), - remote_clients: HashMap::default(), + remote_clients: Default::default(), ssh_registered_tx: connection_registered_tx, }; @@ -348,7 +348,7 @@ impl ExtensionStore { index_changed = false; } - Self::update_ssh_clients(&this, cx).await?; + Self::update_remote_clients(&this, cx).await?; } _ = connection_registered_rx.next() => { debounce_timer = cx @@ -1725,7 +1725,7 @@ impl ExtensionStore { }) } - async fn sync_extensions_over_ssh( + async fn sync_extensions_to_remotes( this: &WeakEntity, client: WeakEntity, cx: &mut AsyncApp, @@ -1778,7 +1778,11 @@ impl ExtensionStore { })?, path_style, ); - log::info!("Uploading extension {}", missing_extension.clone().id); + log::info!( + "Uploading extension {} to {:?}", + missing_extension.clone().id, + dest_dir + ); client .update(cx, |client, cx| { @@ -1791,27 +1795,35 @@ impl ExtensionStore { missing_extension.clone().id ); - client + let result = client .update(cx, |client, _cx| { client.proto_client().request(proto::InstallExtension { tmp_dir: dest_dir.to_proto(), - extension: Some(missing_extension), + extension: Some(missing_extension.clone()), }) })? - .await?; + .await; + + if let Err(e) = result { + log::error!( + "Failed to install extension {}: {}", + missing_extension.id, + e + ); + } } anyhow::Ok(()) } - pub async fn update_ssh_clients(this: &WeakEntity, cx: &mut AsyncApp) -> Result<()> { + pub async fn update_remote_clients(this: &WeakEntity, cx: &mut AsyncApp) -> Result<()> { let clients = this.update(cx, |this, _cx| { - this.remote_clients.retain(|_k, v| v.upgrade().is_some()); - this.remote_clients.values().cloned().collect::>() + this.remote_clients.retain(|v| v.upgrade().is_some()); + this.remote_clients.clone() })?; for client in clients { - Self::sync_extensions_over_ssh(this, client, cx) + Self::sync_extensions_to_remotes(this, client, cx) .await .log_err(); } @@ -1819,16 +1831,12 @@ impl ExtensionStore { anyhow::Ok(()) } - pub fn register_remote_client(&mut self, client: Entity, cx: &mut Context) { - let options = client.read(cx).connection_options(); - - if let Some(existing_client) = self.remote_clients.get(&options) - && existing_client.upgrade().is_some() - { - return; - } - - self.remote_clients.insert(options, client.downgrade()); + pub fn register_remote_client( + &mut self, + client: Entity, + _cx: &mut Context, + ) { + self.remote_clients.push(client.downgrade()); self.ssh_registered_tx.unbounded_send(()).ok(); } } diff --git a/crates/extension_host/src/headless_host.rs b/crates/extension_host/src/headless_host.rs index f14bb811a6742a60899ac4301cfac096bb41a07f..1fe1905e0b89a89fd8b7304b9a175a832f3ffc29 100644 --- a/crates/extension_host/src/headless_host.rs +++ b/crates/extension_host/src/headless_host.rs @@ -279,7 +279,8 @@ impl HeadlessExtensionStore { } fs.rename(&tmp_path, &path, RenameOptions::default()) - .await?; + .await + .context("Failed to rename {tmp_path:?} to {path:?}")?; Self::load_extension(this, extension, cx).await }) diff --git a/crates/remote/src/transport/ssh.rs b/crates/remote/src/transport/ssh.rs index cf8e6f3e9cc9599aa7d2d05ea204c550892ac4c4..8fed12a4cb9d594160732cc94637f8b53692f9cf 100644 --- a/crates/remote/src/transport/ssh.rs +++ b/crates/remote/src/transport/ssh.rs @@ -304,7 +304,7 @@ impl RemoteConnection for SshRemoteConnection { let mut child = sftp_command.spawn()?; if let Some(mut stdin) = child.stdin.take() { use futures::AsyncWriteExt; - let sftp_batch = format!("put -r {src_path_display} {dest_path_str}\n"); + let sftp_batch = format!("put -r \"{src_path_display}\" \"{dest_path_str}\"\n"); stdin.write_all(sftp_batch.as_bytes()).await?; stdin.flush().await?; } From 6bea23e990850adc7e29801cce086b8202d6d31f Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Mon, 17 Nov 2025 23:20:45 +0100 Subject: [PATCH 0172/1030] text: Temporarily remove `assert_char_boundary` panics (#42919) As discussed in the first responders meeting. We have collected a lot of backtraces from these, but it's not quite clear yet what causes this. Removing these should ideally make things a bit more stable even if we may run into panics later one when the faulty anchor is used still. Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/multi_buffer/src/multi_buffer.rs | 11 ----------- crates/text/src/text.rs | 1 - 2 files changed, 12 deletions(-) diff --git a/crates/multi_buffer/src/multi_buffer.rs b/crates/multi_buffer/src/multi_buffer.rs index 5be61d1efe153fcd6902b33e46f2f5b84d4055e6..43def73ae257e29f007ef56fb181e03432023edb 100644 --- a/crates/multi_buffer/src/multi_buffer.rs +++ b/crates/multi_buffer/src/multi_buffer.rs @@ -3828,21 +3828,10 @@ impl MultiBufferSnapshot { { let main_buffer_position = cursor.main_buffer_position()?; let buffer_snapshot = &cursor.excerpt()?.buffer; - // remove this assert once we figure out the cause of the panics for #40453 - buffer_snapshot - .text - .as_rope() - .assert_char_boundary(main_buffer_position); return Some((buffer_snapshot, main_buffer_position)); } else if buffer_offset > region.buffer.len() { return None; } - // remove this assert once we figure out the cause of the panics for #40453 - region - .buffer - .text - .as_rope() - .assert_char_boundary(buffer_offset); Some((region.buffer, buffer_offset)) } diff --git a/crates/text/src/text.rs b/crates/text/src/text.rs index d9a6b2bb26606c4192ef66351a5bd8f6bca667e7..316bdb59faed8438d9664a904c7900491c59376b 100644 --- a/crates/text/src/text.rs +++ b/crates/text/src/text.rs @@ -2447,7 +2447,6 @@ impl BufferSnapshot { if offset > self.visible_text.len() { panic!("offset {} is out of bounds", offset) } - self.visible_text.assert_char_boundary(offset); let (start, _, item) = self.fragments.find::(&None, &offset, bias); let fragment = item.unwrap(); let overshoot = offset - start; From 2c7bcfcb7b5612c84ece6bd6ce96c64f2cafb73e Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Mon, 17 Nov 2025 23:38:48 +0100 Subject: [PATCH 0173/1030] multi_buffer: Work around another panic bug in path_key (#42920) Fixes ZED-346 for now until I find the time to dig into this bug properly Release Notes: - Fixed a panic in the diagnostics pane --- crates/diagnostics/src/diagnostics.rs | 6 ++++-- crates/multi_buffer/src/path_key.rs | 8 +++++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index 92a4ba097f21d1f5894235bb2356c7ded9413359..eca7ae359a3ebafbbe13316bb757c1fbfc7f72ce 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -491,7 +491,7 @@ impl ProjectDiagnosticsEditor { cx: &mut Context, ) -> Task> { let was_empty = self.multibuffer.read(cx).is_empty(); - let mut buffer_snapshot = buffer.read(cx).snapshot(); + let buffer_snapshot = buffer.read(cx).snapshot(); let buffer_id = buffer_snapshot.remote_id(); let max_severity = if self.include_warnings { @@ -602,7 +602,6 @@ impl ProjectDiagnosticsEditor { cx, ) .await; - buffer_snapshot = cx.update(|_, cx| buffer.read(cx).snapshot())?; let initial_range = buffer_snapshot.anchor_after(b.initial_range.start) ..buffer_snapshot.anchor_before(b.initial_range.end); let excerpt_range = ExcerptRange { @@ -1010,11 +1009,14 @@ async fn heuristic_syntactic_expand( snapshot: BufferSnapshot, cx: &mut AsyncApp, ) -> Option> { + let start = snapshot.clip_point(input_range.start, Bias::Right); + let end = snapshot.clip_point(input_range.end, Bias::Left); let input_row_count = input_range.end.row - input_range.start.row; if input_row_count > max_row_count { return None; } + let input_range = start..end; // If the outline node contains the diagnostic and is small enough, just use that. let outline_range = snapshot.outline_range_containing(input_range.clone()); if let Some(outline_range) = outline_range.clone() { diff --git a/crates/multi_buffer/src/path_key.rs b/crates/multi_buffer/src/path_key.rs index 56f28d26642439f7869bad714a8b3191dd4edbec..926ceff202837d13fe14350ee0334cbf4036bd89 100644 --- a/crates/multi_buffer/src/path_key.rs +++ b/crates/multi_buffer/src/path_key.rs @@ -288,7 +288,6 @@ impl MultiBuffer { .get(&path) .cloned() .unwrap_or_default(); - let mut new_iter = new.into_iter().peekable(); let mut existing_iter = existing.into_iter().peekable(); @@ -413,14 +412,17 @@ impl MultiBuffer { // todo(lw): There is a logic bug somewhere that causes the to_remove vector to be not ordered correctly to_remove.sort_by_cached_key(|&id| snapshot.excerpt_locator_for_id(id)); self.remove_excerpts(to_remove, cx); + if excerpt_ids.is_empty() { self.excerpts_by_path.remove(&path); } else { for excerpt_id in &excerpt_ids { self.paths_by_excerpt.insert(*excerpt_id, path.clone()); } - self.excerpts_by_path - .insert(path, excerpt_ids.iter().dedup().cloned().collect()); + let snapshot = &*self.snapshot.get_mut(); + let mut excerpt_ids: Vec<_> = excerpt_ids.iter().dedup().cloned().collect(); + excerpt_ids.sort_by_cached_key(|&id| snapshot.excerpt_locator_for_id(id)); + self.excerpts_by_path.insert(path, excerpt_ids); } (excerpt_ids, added_a_new_excerpt) From a39ba03bccff4eaf7e4fbc6496b2f4ee6faf7bb6 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Mon, 17 Nov 2025 16:44:59 -0700 Subject: [PATCH 0174/1030] Use metrics-id for sentry user id when we have it (#42931) This should make it easier to correlate Sentry reports with user reports and github issues (for users who have diagnostics enabled) Release Notes: - N/A --- Cargo.lock | 3 - crates/client/src/telemetry.rs | 9 +- .../telemetry_events/src/telemetry_events.rs | 63 ---- crates/zed/Cargo.toml | 3 - crates/zed/src/main.rs | 6 +- crates/zed/src/reliability.rs | 275 +++--------------- 6 files changed, 41 insertions(+), 318 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a68187705d129beee2384246a81a488a09cb6e6..9aa674cbb69aaa52df5466dda41d8b9c2d9be5b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21217,7 +21217,6 @@ dependencies = [ "audio", "auto_update", "auto_update_ui", - "backtrace", "bincode 1.3.3", "breadcrumbs", "call", @@ -21282,7 +21281,6 @@ dependencies = [ "mimalloc", "miniprofiler_ui", "nc", - "nix 0.29.0", "node_runtime", "notifications", "onboarding", @@ -21324,7 +21322,6 @@ dependencies = [ "task", "tasks_ui", "telemetry", - "telemetry_events", "terminal_view", "theme", "theme_extension", diff --git a/crates/client/src/telemetry.rs b/crates/client/src/telemetry.rs index 50bd4ace80341cd2616d5bc88d5ab2475e094b8e..018e81d3e3a9bc27670bf96f2de8896b16875b20 100644 --- a/crates/client/src/telemetry.rs +++ b/crates/client/src/telemetry.rs @@ -293,10 +293,11 @@ impl Telemetry { } pub fn metrics_enabled(self: &Arc) -> bool { - let state = self.state.lock(); - let enabled = state.settings.metrics; - drop(state); - enabled + self.state.lock().settings.metrics + } + + pub fn diagnostics_enabled(self: &Arc) -> bool { + self.state.lock().settings.diagnostics } pub fn set_authenticated_user_info( diff --git a/crates/telemetry_events/src/telemetry_events.rs b/crates/telemetry_events/src/telemetry_events.rs index 12d8d4c04b1da0b2483cc8bc60e1d94b5cbb9193..aeb6fe99b3d284f7f9599edf04966a4c9876cf42 100644 --- a/crates/telemetry_events/src/telemetry_events.rs +++ b/crates/telemetry_events/src/telemetry_events.rs @@ -124,66 +124,3 @@ pub struct AssistantEventData { pub error_message: Option, pub language_name: Option, } - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct BacktraceFrame { - pub ip: usize, - pub symbol_addr: usize, - pub base: Option, - pub symbols: Vec, -} - -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct HangReport { - pub backtrace: Vec, - pub app_version: Option, - pub os_name: String, - pub os_version: Option, - pub architecture: String, - /// Identifier unique to each Zed installation (differs for stable, preview, dev) - pub installation_id: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct LocationData { - pub file: String, - pub line: u32, -} - -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct Panic { - /// The name of the thread that panicked - pub thread: String, - /// The panic message - pub payload: String, - /// The location of the panic (file, line number) - #[serde(skip_serializing_if = "Option::is_none")] - pub location_data: Option, - pub backtrace: Vec, - /// Zed version number - pub app_version: String, - /// The Git commit SHA that Zed was built at. - #[serde(skip_serializing_if = "Option::is_none")] - pub app_commit_sha: Option, - /// Zed release channel (stable, preview, dev) - pub release_channel: String, - pub target: Option, - pub os_name: String, - pub os_version: Option, - pub architecture: String, - /// The time the panic occurred (UNIX millisecond timestamp) - pub panicked_on: i64, - /// Identifier unique to each system Zed is installed on - #[serde(skip_serializing_if = "Option::is_none")] - pub system_id: Option, - /// Identifier unique to each Zed installation (differs for stable, preview, dev) - #[serde(skip_serializing_if = "Option::is_none")] - pub installation_id: Option, - /// Identifier unique to each Zed session (differs for each time you open Zed) - pub session_id: String, -} - -#[derive(Serialize, Deserialize)] -pub struct PanicRequest { - pub panic: Panic, -} diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 547855ec8f54ec8970a7470088bcc824a0a98148..ca81955e33b524fe27b8777566473e89a03a5558 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -29,7 +29,6 @@ assets.workspace = true audio.workspace = true auto_update.workspace = true auto_update_ui.workspace = true -backtrace = "0.3" bincode.workspace = true breadcrumbs.workspace = true call.workspace = true @@ -100,7 +99,6 @@ migrator.workspace = true miniprofiler_ui.workspace = true mimalloc = { version = "0.1", optional = true } nc.workspace = true -nix = { workspace = true, features = ["pthread", "signal"] } node_runtime.workspace = true notifications.workspace = true onboarding.workspace = true @@ -140,7 +138,6 @@ tab_switcher.workspace = true task.workspace = true tasks_ui.workspace = true telemetry.workspace = true -telemetry_events.workspace = true terminal_view.workspace = true theme.workspace = true theme_extension.workspace = true diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 422d8bbbf72a68668221ad3ee7ff2a9dc9947d45..9dba1b427d35db3e236e166b5a90984deca1747b 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -549,11 +549,7 @@ pub fn main() { auto_update::init(client.clone(), cx); dap_adapters::init(cx); auto_update_ui::init(cx); - reliability::init( - client.http_client(), - system_id.as_ref().map(|id| id.to_string()), - cx, - ); + reliability::init(client.clone(), cx); extension_host::init( extension_host_proxy.clone(), app_state.fs.clone(), diff --git a/crates/zed/src/reliability.rs b/crates/zed/src/reliability.rs index 5a45c0c558389cd9d2aff02efaed5753bdb5d1f2..9d2f7f5da021cda38cef5a205f2d2ec77eb2b386 100644 --- a/crates/zed/src/reliability.rs +++ b/crates/zed/src/reliability.rs @@ -1,46 +1,42 @@ use anyhow::{Context as _, Result}; -use client::{TelemetrySettings, telemetry::MINIDUMP_ENDPOINT}; +use client::{Client, telemetry::MINIDUMP_ENDPOINT}; use futures::AsyncReadExt; use gpui::{App, AppContext as _, SerializedThreadTaskTimings}; -use http_client::{self, HttpClient, HttpClientWithUrl}; +use http_client::{self, HttpClient}; use log::info; use project::Project; use proto::{CrashReport, GetCrashFilesResponse}; use reqwest::multipart::{Form, Part}; -use settings::Settings; use smol::stream::StreamExt; use std::{ffi::OsStr, fs, sync::Arc, thread::ThreadId, time::Duration}; use util::ResultExt; use crate::STARTUP_TIME; -pub fn init(http_client: Arc, installation_id: Option, cx: &mut App) { +pub fn init(client: Arc, cx: &mut App) { monitor_hangs(cx); - #[cfg(target_os = "macos")] - monitor_main_thread_hangs(http_client.clone(), installation_id.clone(), cx); - - if client::TelemetrySettings::get_global(cx).diagnostics { - let client = http_client.clone(); - let id = installation_id.clone(); + if client.telemetry().diagnostics_enabled() { + let client = client.clone(); cx.background_spawn(async move { - upload_previous_minidumps(client, id).await.warn_on_err(); + upload_previous_minidumps(client).await.warn_on_err(); }) .detach() } cx.observe_new(move |project: &mut Project, _, cx| { - let http_client = http_client.clone(); - let installation_id = installation_id.clone(); + let client = client.clone(); let Some(remote_client) = project.remote_client() else { return; }; - remote_client.update(cx, |client, cx| { - if !TelemetrySettings::get_global(cx).diagnostics { + remote_client.update(cx, |remote_client, cx| { + if !client.telemetry().diagnostics_enabled() { return; } - let request = client.proto_client().request(proto::GetCrashFiles {}); + let request = remote_client + .proto_client() + .request(proto::GetCrashFiles {}); cx.background_spawn(async move { let GetCrashFilesResponse { crashes } = request.await?; @@ -53,15 +49,9 @@ pub fn init(http_client: Arc, installation_id: Option } in crashes { if let Some(metadata) = serde_json::from_str(&metadata).log_err() { - upload_minidump( - http_client.clone(), - endpoint, - minidump_contents, - &metadata, - installation_id.clone(), - ) - .await - .log_err(); + upload_minidump(client.clone(), endpoint, minidump_contents, &metadata) + .await + .log_err(); } } @@ -73,210 +63,6 @@ pub fn init(http_client: Arc, installation_id: Option .detach(); } -#[cfg(target_os = "macos")] -pub fn monitor_main_thread_hangs( - http_client: Arc, - installation_id: Option, - cx: &App, -) { - // This is too noisy to ship to stable for now. - if !matches!( - ReleaseChannel::global(cx), - ReleaseChannel::Dev | ReleaseChannel::Nightly | ReleaseChannel::Preview - ) { - return; - } - - use nix::sys::signal::{ - SaFlags, SigAction, SigHandler, SigSet, - Signal::{self, SIGUSR2}, - sigaction, - }; - - use parking_lot::Mutex; - - use http_client::Method; - use release_channel::ReleaseChannel; - use std::{ - ffi::c_int, - sync::{OnceLock, mpsc}, - time::Duration, - }; - use telemetry_events::{BacktraceFrame, HangReport}; - - use nix::sys::pthread; - - let foreground_executor = cx.foreground_executor(); - let background_executor = cx.background_executor(); - let telemetry_settings = *client::TelemetrySettings::get_global(cx); - - // Initialize SIGUSR2 handler to send a backtrace to a channel. - let (backtrace_tx, backtrace_rx) = mpsc::channel(); - static BACKTRACE: Mutex> = Mutex::new(Vec::new()); - static BACKTRACE_SENDER: OnceLock> = OnceLock::new(); - BACKTRACE_SENDER.get_or_init(|| backtrace_tx); - BACKTRACE.lock().reserve(100); - - fn handle_backtrace_signal() { - unsafe { - extern "C" fn handle_sigusr2(_i: c_int) { - unsafe { - // ASYNC SIGNAL SAFETY: This lock is only accessed one other time, - // which can only be triggered by This signal handler. In addition, - // this signal handler is immediately removed by SA_RESETHAND, and this - // signal handler cannot be re-entrant due to the SIGUSR2 mask defined - // below - let mut bt = BACKTRACE.lock(); - bt.clear(); - backtrace::trace_unsynchronized(|frame| { - if bt.len() < bt.capacity() { - bt.push(frame.clone()); - true - } else { - false - } - }); - } - - BACKTRACE_SENDER.get().unwrap().send(()).ok(); - } - - let mut mask = SigSet::empty(); - mask.add(SIGUSR2); - sigaction( - Signal::SIGUSR2, - &SigAction::new( - SigHandler::Handler(handle_sigusr2), - SaFlags::SA_RESTART | SaFlags::SA_RESETHAND, - mask, - ), - ) - .log_err(); - } - } - - handle_backtrace_signal(); - let main_thread = pthread::pthread_self(); - - let (mut tx, mut rx) = futures::channel::mpsc::channel(3); - foreground_executor - .spawn(async move { while (rx.next().await).is_some() {} }) - .detach(); - - background_executor - .spawn({ - let background_executor = background_executor.clone(); - async move { - loop { - background_executor.timer(Duration::from_secs(1)).await; - match tx.try_send(()) { - Ok(_) => continue, - Err(e) => { - if e.into_send_error().is_full() { - pthread::pthread_kill(main_thread, SIGUSR2).log_err(); - } - // Only detect the first hang - break; - } - } - } - } - }) - .detach(); - - let app_version = release_channel::AppVersion::global(cx); - let os_name = client::telemetry::os_name(); - - background_executor - .clone() - .spawn(async move { - let os_version = client::telemetry::os_version(); - - loop { - while backtrace_rx.recv().is_ok() { - if !telemetry_settings.diagnostics { - return; - } - - // ASYNC SIGNAL SAFETY: This lock is only accessed _after_ - // the backtrace transmitter has fired, which itself is only done - // by the signal handler. And due to SA_RESETHAND the signal handler - // will not run again until `handle_backtrace_signal` is called. - let raw_backtrace = BACKTRACE.lock().drain(..).collect::>(); - let backtrace: Vec<_> = raw_backtrace - .into_iter() - .map(|frame| { - let mut btf = BacktraceFrame { - ip: frame.ip() as usize, - symbol_addr: frame.symbol_address() as usize, - base: frame.module_base_address().map(|addr| addr as usize), - symbols: vec![], - }; - - backtrace::resolve_frame(&frame, |symbol| { - if let Some(name) = symbol.name() { - btf.symbols.push(name.to_string()); - } - }); - - btf - }) - .collect(); - - // IMPORTANT: Don't move this to before `BACKTRACE.lock()` - handle_backtrace_signal(); - - log::error!( - "Suspected hang on main thread:\n{}", - backtrace - .iter() - .flat_map(|bt| bt.symbols.first().as_ref().map(|s| s.as_str())) - .collect::>() - .join("\n") - ); - - let report = HangReport { - backtrace, - app_version: Some(app_version), - os_name: os_name.clone(), - os_version: Some(os_version.clone()), - architecture: std::env::consts::ARCH.into(), - installation_id: installation_id.clone(), - }; - - let Some(json_bytes) = serde_json::to_vec(&report).log_err() else { - continue; - }; - - let Some(checksum) = client::telemetry::calculate_json_checksum(&json_bytes) - else { - continue; - }; - - let Ok(url) = http_client.build_zed_api_url("/telemetry/hangs", &[]) else { - continue; - }; - - let Ok(request) = http_client::Request::builder() - .method(Method::POST) - .uri(url.as_ref()) - .header("x-zed-checksum", checksum) - .body(json_bytes.into()) - else { - continue; - }; - - if let Some(response) = http_client.send(request).await.log_err() - && response.status() != 200 - { - log::error!("Failed to send hang report: HTTP {:?}", response.status()); - } - } - } - }) - .detach() -} - fn monitor_hangs(cx: &App) { let main_thread_id = std::thread::current().id(); @@ -365,10 +151,7 @@ fn save_hang_trace( ); } -pub async fn upload_previous_minidumps( - http: Arc, - installation_id: Option, -) -> anyhow::Result<()> { +pub async fn upload_previous_minidumps(client: Arc) -> anyhow::Result<()> { let Some(minidump_endpoint) = MINIDUMP_ENDPOINT.as_ref() else { log::warn!("Minidump endpoint not set"); return Ok(()); @@ -385,13 +168,12 @@ pub async fn upload_previous_minidumps( json_path.set_extension("json"); if let Ok(metadata) = serde_json::from_slice(&smol::fs::read(&json_path).await?) && upload_minidump( - http.clone(), + client.clone(), minidump_endpoint, smol::fs::read(&child_path) .await .context("Failed to read minidump")?, &metadata, - installation_id.clone(), ) .await .log_err() @@ -405,11 +187,10 @@ pub async fn upload_previous_minidumps( } async fn upload_minidump( - http: Arc, + client: Arc, endpoint: &str, minidump: Vec, metadata: &crashes::CrashInfo, - installation_id: Option, ) -> Result<()> { let mut form = Form::new() .part( @@ -436,8 +217,19 @@ async fn upload_minidump( if let Some(minidump_error) = metadata.minidump_error.clone() { form = form.text("minidump_error", minidump_error); } - if let Some(id) = installation_id.clone() { - form = form.text("sentry[user][id]", id) + + if let Some(id) = client.telemetry().metrics_id() { + form = form.text("sentry[user][id]", id.to_string()); + form = form.text( + "sentry[user][is_staff]", + if client.telemetry().is_staff().unwrap_or_default() { + "true" + } else { + "false" + }, + ); + } else if let Some(id) = client.telemetry().installation_id() { + form = form.text("sentry[user][id]", format!("installation-{}", id)) } ::telemetry::event!( @@ -505,7 +297,10 @@ async fn upload_minidump( // TODO: feature-flag-context, and more of device-context like screen resolution, available ram, device model, etc let mut response_text = String::new(); - let mut response = http.send_multipart_form(endpoint, form).await?; + let mut response = client + .http_client() + .send_multipart_form(endpoint, form) + .await?; response .body_mut() .read_to_string(&mut response_text) From 0386f240a9f3ab463f91ab1fec3cba92dd77d097 Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Mon, 17 Nov 2025 22:00:26 -0300 Subject: [PATCH 0175/1030] Add experimental Sweep edit prediction provider (#42927) Only for staff Release Notes: - N/A --------- Co-authored-by: Max Brunsfeld Co-authored-by: Ben Kunkle --- Cargo.lock | 29 + Cargo.toml | 3 + assets/icons/sweep_ai.svg | 1 + crates/agent_ui/src/agent_ui.rs | 4 +- crates/edit_prediction_button/Cargo.toml | 3 +- .../src/edit_prediction_button.rs | 18 +- crates/icons/src/icons.rs | 1 + crates/reqwest_client/src/reqwest_client.rs | 22 +- .../settings/src/settings_content/language.rs | 50 +- crates/sweep_ai/Cargo.toml | 43 + crates/sweep_ai/LICENSE-GPL | 1 + crates/sweep_ai/src/api.rs | 90 ++ crates/sweep_ai/src/sweep_ai.rs | 776 ++++++++++++++++++ crates/zed/Cargo.toml | 1 + .../zed/src/zed/edit_prediction_registry.rs | 35 +- 15 files changed, 1056 insertions(+), 21 deletions(-) create mode 100644 assets/icons/sweep_ai.svg create mode 100644 crates/sweep_ai/Cargo.toml create mode 120000 crates/sweep_ai/LICENSE-GPL create mode 100644 crates/sweep_ai/src/api.rs create mode 100644 crates/sweep_ai/src/sweep_ai.rs diff --git a/Cargo.lock b/Cargo.lock index 9aa674cbb69aaa52df5466dda41d8b9c2d9be5b1..c0f6ef03c296306a73264461a8767ccd1b346c20 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5314,6 +5314,7 @@ dependencies = [ "serde_json", "settings", "supermaven", + "sweep_ai", "telemetry", "theme", "ui", @@ -16590,6 +16591,33 @@ dependencies = [ "zeno", ] +[[package]] +name = "sweep_ai" +version = "0.1.0" +dependencies = [ + "anyhow", + "arrayvec", + "brotli", + "client", + "collections", + "edit_prediction", + "feature_flags", + "futures 0.3.31", + "gpui", + "http_client", + "indoc", + "language", + "project", + "release_channel", + "reqwest_client", + "serde", + "serde_json", + "tree-sitter-rust", + "util", + "workspace", + "zlog", +] + [[package]] name = "symphonia" version = "0.5.5" @@ -21316,6 +21344,7 @@ dependencies = [ "snippets_ui", "supermaven", "svg_preview", + "sweep_ai", "sysinfo 0.37.2", "system_specs", "tab_switcher", diff --git a/Cargo.toml b/Cargo.toml index be56964f753cded4b1e054583b989f798c3ca1e3..e74647c6320f149d8eadad08ff3624859fe76624 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -165,6 +165,7 @@ members = [ "crates/sum_tree", "crates/supermaven", "crates/supermaven_api", + "crates/sweep_ai", "crates/codestral", "crates/svg_preview", "crates/system_specs", @@ -398,6 +399,7 @@ streaming_diff = { path = "crates/streaming_diff" } sum_tree = { path = "crates/sum_tree" } supermaven = { path = "crates/supermaven" } supermaven_api = { path = "crates/supermaven_api" } +sweep_ai = { path = "crates/sweep_ai" } codestral = { path = "crates/codestral" } system_specs = { path = "crates/system_specs" } tab_switcher = { path = "crates/tab_switcher" } @@ -478,6 +480,7 @@ bitflags = "2.6.0" blade-graphics = { version = "0.7.0" } blade-macros = { version = "0.3.0" } blade-util = { version = "0.3.0" } +brotli = "8.0.2" bytes = "1.0" cargo_metadata = "0.19" cargo_toml = "0.21" diff --git a/assets/icons/sweep_ai.svg b/assets/icons/sweep_ai.svg new file mode 100644 index 0000000000000000000000000000000000000000..c78d12727d78ddcc2a86bdb3e46349752cadaf7d --- /dev/null +++ b/assets/icons/sweep_ai.svg @@ -0,0 +1 @@ + diff --git a/crates/agent_ui/src/agent_ui.rs b/crates/agent_ui/src/agent_ui.rs index da1543a2790599fbe590f4e29d3594588bd2f351..6396b68cbc5f805466618bd460f9ed46ce05d086 100644 --- a/crates/agent_ui/src/agent_ui.rs +++ b/crates/agent_ui/src/agent_ui.rs @@ -346,7 +346,9 @@ fn update_command_palette_filter(cx: &mut App) { filter.show_namespace("supermaven"); filter.show_action_types(edit_prediction_actions.iter()); } - EditPredictionProvider::Zed | EditPredictionProvider::Codestral => { + EditPredictionProvider::Zed + | EditPredictionProvider::Codestral + | EditPredictionProvider::Experimental(_) => { filter.show_namespace("edit_prediction"); filter.hide_namespace("copilot"); filter.hide_namespace("supermaven"); diff --git a/crates/edit_prediction_button/Cargo.toml b/crates/edit_prediction_button/Cargo.toml index 189db7f7bac3eaea36a154424c4e7702f1387d24..3ed3d9411510ad2d978b221d8cb3412465a66879 100644 --- a/crates/edit_prediction_button/Cargo.toml +++ b/crates/edit_prediction_button/Cargo.toml @@ -18,18 +18,19 @@ client.workspace = true cloud_llm_client.workspace = true codestral.workspace = true copilot.workspace = true +edit_prediction.workspace = true editor.workspace = true feature_flags.workspace = true fs.workspace = true gpui.workspace = true indoc.workspace = true -edit_prediction.workspace = true language.workspace = true paths.workspace = true project.workspace = true regex.workspace = true settings.workspace = true supermaven.workspace = true +sweep_ai.workspace = true telemetry.workspace = true ui.workspace = true workspace.workspace = true diff --git a/crates/edit_prediction_button/src/edit_prediction_button.rs b/crates/edit_prediction_button/src/edit_prediction_button.rs index 685d408205863e5ad110a5e57891c0695f998cfb..51f228db76aaee5e286dd950c17dd01b303d29b8 100644 --- a/crates/edit_prediction_button/src/edit_prediction_button.rs +++ b/crates/edit_prediction_button/src/edit_prediction_button.rs @@ -18,12 +18,15 @@ use language::{ }; use project::DisableAiSettings; use regex::Regex; -use settings::{Settings, SettingsStore, update_settings_file}; +use settings::{ + EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, Settings, SettingsStore, update_settings_file, +}; use std::{ sync::{Arc, LazyLock}, time::Duration, }; use supermaven::{AccountStatus, Supermaven}; +use sweep_ai::SweepFeatureFlag; use ui::{ Clickable, ContextMenu, ContextMenuEntry, DocumentationEdge, DocumentationSide, IconButton, IconButtonShape, Indicator, PopoverMenu, PopoverMenuHandle, ProgressBar, Tooltip, prelude::*, @@ -78,7 +81,7 @@ impl Render for EditPredictionButton { let all_language_settings = all_language_settings(None, cx); - match all_language_settings.edit_predictions.provider { + match &all_language_settings.edit_predictions.provider { EditPredictionProvider::None => div().hidden(), EditPredictionProvider::Copilot => { @@ -297,6 +300,15 @@ impl Render for EditPredictionButton { .with_handle(self.popover_menu_handle.clone()), ) } + EditPredictionProvider::Experimental(provider_name) => { + if *provider_name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME + && cx.has_flag::() + { + div().child(Icon::new(IconName::SweepAi)) + } else { + div() + } + } EditPredictionProvider::Zed => { let enabled = self.editor_enabled.unwrap_or(true); @@ -525,7 +537,7 @@ impl EditPredictionButton { set_completion_provider(fs.clone(), cx, provider); }) } - EditPredictionProvider::None => continue, + EditPredictionProvider::None | EditPredictionProvider::Experimental(_) => continue, }; } } diff --git a/crates/icons/src/icons.rs b/crates/icons/src/icons.rs index a0865773ac394722c113a43fe323de218b2f145a..da3b298751d9c1921d14722490e3cbc680292099 100644 --- a/crates/icons/src/icons.rs +++ b/crates/icons/src/icons.rs @@ -217,6 +217,7 @@ pub enum IconName { SupermavenError, SupermavenInit, SwatchBook, + SweepAi, Tab, Terminal, TerminalAlt, diff --git a/crates/reqwest_client/src/reqwest_client.rs b/crates/reqwest_client/src/reqwest_client.rs index c2a58877b32ab6049edc5b50f7ad025f0c83f46e..4213a239ec813f255139a97770a74608371fb73e 100644 --- a/crates/reqwest_client/src/reqwest_client.rs +++ b/crates/reqwest_client/src/reqwest_client.rs @@ -80,20 +80,22 @@ impl ReqwestClient { } } +pub fn runtime() -> &'static tokio::runtime::Runtime { + RUNTIME.get_or_init(|| { + tokio::runtime::Builder::new_multi_thread() + // Since we now have two executors, let's try to keep our footprint small + .worker_threads(1) + .enable_all() + .build() + .expect("Failed to initialize HTTP client") + }) +} + impl From for ReqwestClient { fn from(client: reqwest::Client) -> Self { let handle = tokio::runtime::Handle::try_current().unwrap_or_else(|_| { log::debug!("no tokio runtime found, creating one for Reqwest..."); - let runtime = RUNTIME.get_or_init(|| { - tokio::runtime::Builder::new_multi_thread() - // Since we now have two executors, let's try to keep our footprint small - .worker_threads(1) - .enable_all() - .build() - .expect("Failed to initialize HTTP client") - }); - - runtime.handle().clone() + runtime().handle().clone() }); Self { client, diff --git a/crates/settings/src/settings_content/language.rs b/crates/settings/src/settings_content/language.rs index fc11dd4956a50906951af8fa43a7dacc61568f70..ed70116862bbda6af22d4027a406535ae0c19d67 100644 --- a/crates/settings/src/settings_content/language.rs +++ b/crates/settings/src/settings_content/language.rs @@ -3,7 +3,7 @@ use std::num::NonZeroU32; use collections::{HashMap, HashSet}; use gpui::{Modifiers, SharedString}; use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Serialize, de::Error as _}; use serde_with::skip_serializing_none; use settings_macros::MergeFrom; use std::sync::Arc; @@ -68,9 +68,7 @@ pub struct FeaturesContent { } /// The provider that supplies edit predictions. -#[derive( - Copy, Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom, -)] +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Serialize, JsonSchema, MergeFrom)] #[serde(rename_all = "snake_case")] pub enum EditPredictionProvider { None, @@ -79,6 +77,47 @@ pub enum EditPredictionProvider { Supermaven, Zed, Codestral, + Experimental(&'static str), +} + +pub const EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME: &str = "sweep"; + +impl<'de> Deserialize<'de> for EditPredictionProvider { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(rename_all = "snake_case")] + pub enum Content { + None, + Copilot, + Supermaven, + Zed, + Codestral, + Experimental(String), + } + + Ok(match Content::deserialize(deserializer)? { + Content::None => EditPredictionProvider::None, + Content::Copilot => EditPredictionProvider::Copilot, + Content::Supermaven => EditPredictionProvider::Supermaven, + Content::Zed => EditPredictionProvider::Zed, + Content::Codestral => EditPredictionProvider::Codestral, + Content::Experimental(name) => { + if name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME { + EditPredictionProvider::Experimental( + EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, + ) + } else { + return Err(D::Error::custom(format!( + "Unknown experimental edit prediction provider: {}", + name + ))); + } + } + }) + } } impl EditPredictionProvider { @@ -88,7 +127,8 @@ impl EditPredictionProvider { EditPredictionProvider::None | EditPredictionProvider::Copilot | EditPredictionProvider::Supermaven - | EditPredictionProvider::Codestral => false, + | EditPredictionProvider::Codestral + | EditPredictionProvider::Experimental(_) => false, } } } diff --git a/crates/sweep_ai/Cargo.toml b/crates/sweep_ai/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..4edf7ea1bb6af9a6657ccfe310c0253b118ec2e7 --- /dev/null +++ b/crates/sweep_ai/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "sweep_ai" +version = "0.1.0" +edition.workspace = true +publish.workspace = true +license = "GPL-3.0-or-later" +exclude = ["fixtures"] + +[lints] +workspace = true + +[lib] +path = "src/sweep_ai.rs" +doctest = false + +[dependencies] +anyhow.workspace = true +arrayvec.workspace = true +brotli.workspace = true +client.workspace = true +collections.workspace = true +edit_prediction.workspace = true +feature_flags.workspace = true +futures.workspace = true +gpui.workspace = true +http_client.workspace = true +language.workspace = true +project.workspace = true +release_channel.workspace = true +serde.workspace = true +serde_json.workspace = true +util.workspace = true +workspace.workspace = true + +[dev-dependencies] +gpui = { workspace = true, features = ["test-support"] } +http_client = { workspace = true, features = ["test-support"] } +indoc.workspace = true +language = { workspace = true, features = ["test-support"] } +reqwest_client = { workspace = true, features = ["test-support"] } +tree-sitter-rust.workspace = true +workspace = { workspace = true, features = ["test-support"] } +zlog.workspace = true diff --git a/crates/sweep_ai/LICENSE-GPL b/crates/sweep_ai/LICENSE-GPL new file mode 120000 index 0000000000000000000000000000000000000000..89e542f750cd3860a0598eff0dc34b56d7336dc4 --- /dev/null +++ b/crates/sweep_ai/LICENSE-GPL @@ -0,0 +1 @@ +../../LICENSE-GPL \ No newline at end of file diff --git a/crates/sweep_ai/src/api.rs b/crates/sweep_ai/src/api.rs new file mode 100644 index 0000000000000000000000000000000000000000..edb392885e476e3924d285613af1f0a4e8be8599 --- /dev/null +++ b/crates/sweep_ai/src/api.rs @@ -0,0 +1,90 @@ +use std::{path::Path, sync::Arc}; + +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize)] +pub struct AutocompleteRequest { + pub debug_info: Arc, + pub repo_name: String, + pub branch: Option, + pub file_path: Arc, + pub file_contents: String, + pub recent_changes: String, + pub cursor_position: usize, + pub original_file_contents: String, + pub file_chunks: Vec, + pub retrieval_chunks: Vec, + pub recent_user_actions: Vec, + pub multiple_suggestions: bool, + pub privacy_mode_enabled: bool, + pub changes_above_cursor: bool, +} + +#[derive(Debug, Clone, Serialize)] +pub struct FileChunk { + pub file_path: String, + pub start_line: usize, + pub end_line: usize, + pub content: String, + pub timestamp: Option, +} + +#[derive(Debug, Clone, Serialize)] +pub struct RetrievalChunk { + pub file_path: String, + pub start_line: usize, + pub end_line: usize, + pub content: String, + pub timestamp: u64, +} + +#[derive(Debug, Clone, Serialize)] +pub struct UserAction { + pub action_type: ActionType, + pub line_number: usize, + pub offset: usize, + pub file_path: String, + pub timestamp: u64, +} + +#[allow(dead_code)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +pub enum ActionType { + CursorMovement, + InsertChar, + DeleteChar, + InsertSelection, + DeleteSelection, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct AutocompleteResponse { + pub autocomplete_id: String, + pub start_index: usize, + pub end_index: usize, + pub completion: String, + #[allow(dead_code)] + pub confidence: f64, + #[allow(dead_code)] + pub logprobs: Option, + #[allow(dead_code)] + pub finish_reason: Option, + #[allow(dead_code)] + pub elapsed_time_ms: u64, + #[allow(dead_code)] + #[serde(default, rename = "completions")] + pub additional_completions: Vec, +} + +#[allow(dead_code)] +#[derive(Debug, Clone, Deserialize)] +pub struct AdditionalCompletion { + pub start_index: usize, + pub end_index: usize, + pub completion: String, + pub confidence: f64, + pub autocomplete_id: String, + pub logprobs: Option, + pub finish_reason: Option, +} diff --git a/crates/sweep_ai/src/sweep_ai.rs b/crates/sweep_ai/src/sweep_ai.rs new file mode 100644 index 0000000000000000000000000000000000000000..e8a2522c0b34896ad09fd8a8d346e2ba31c9a1e7 --- /dev/null +++ b/crates/sweep_ai/src/sweep_ai.rs @@ -0,0 +1,776 @@ +mod api; + +use anyhow::{Context as _, Result}; +use arrayvec::ArrayVec; +use client::telemetry; +use collections::HashMap; +use feature_flags::FeatureFlag; +use futures::AsyncReadExt as _; +use gpui::{App, AppContext, Context, Entity, EntityId, Global, Task, WeakEntity}; +use http_client::{AsyncBody, Method}; +use language::{Anchor, Buffer, BufferSnapshot, EditPreview, ToOffset as _, ToPoint, text_diff}; +use project::Project; +use release_channel::{AppCommitSha, AppVersion}; +use std::collections::{VecDeque, hash_map}; +use std::fmt::{self, Display}; +use std::mem; +use std::{ + cmp, + fmt::Write, + ops::Range, + path::Path, + sync::Arc, + time::{Duration, Instant}, +}; +use util::ResultExt; +use util::rel_path::RelPath; +use workspace::Workspace; + +use crate::api::{AutocompleteRequest, AutocompleteResponse, FileChunk}; + +const BUFFER_CHANGE_GROUPING_INTERVAL: Duration = Duration::from_secs(1); +const MAX_EVENT_COUNT: usize = 16; + +const SWEEP_API_URL: &str = "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; + +pub struct SweepFeatureFlag; + +impl FeatureFlag for SweepFeatureFlag { + const NAME: &str = "sweep-ai"; + + fn enabled_for_staff() -> bool { + false + } +} + +#[derive(Clone)] +struct SweepAiGlobal(Entity); + +impl Global for SweepAiGlobal {} + +#[derive(Clone)] +pub struct EditPrediction { + id: EditPredictionId, + path: Arc, + edits: Arc<[(Range, Arc)]>, + snapshot: BufferSnapshot, + edit_preview: EditPreview, +} + +impl EditPrediction { + fn interpolate(&self, new_snapshot: &BufferSnapshot) -> Option, Arc)>> { + edit_prediction::interpolate_edits(&self.snapshot, new_snapshot, &self.edits) + } +} + +impl fmt::Debug for EditPrediction { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("EditPrediction") + .field("path", &self.path) + .field("edits", &self.edits) + .finish_non_exhaustive() + } +} + +#[derive(Clone, Default, Debug, PartialEq, Eq, Hash)] +pub struct EditPredictionId(String); + +impl Display for EditPredictionId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +pub struct SweepAi { + projects: HashMap, + debug_info: Arc, +} + +struct SweepAiProject { + events: VecDeque, + registered_buffers: HashMap, +} + +impl SweepAi { + pub fn global(cx: &mut App) -> Option> { + cx.try_global::() + .map(|global| global.0.clone()) + } + + pub fn register(cx: &mut App) -> Entity { + Self::global(cx).unwrap_or_else(|| { + let entity = cx.new(|cx| Self::new(cx)); + cx.set_global(SweepAiGlobal(entity.clone())); + entity + }) + } + + pub fn clear_history(&mut self) { + for sweep_ai_project in self.projects.values_mut() { + sweep_ai_project.events.clear(); + } + } + + fn new(cx: &mut Context) -> Self { + Self { + projects: HashMap::default(), + debug_info: format!( + "Zed v{version} ({sha}) - OS: {os} - Zed v{version}", + version = AppVersion::global(cx), + sha = AppCommitSha::try_global(cx).map_or("unknown".to_string(), |sha| sha.full()), + os = telemetry::os_name(), + ) + .into(), + } + } + + fn get_or_init_sweep_ai_project( + &mut self, + project: &Entity, + cx: &mut Context, + ) -> &mut SweepAiProject { + let project_id = project.entity_id(); + match self.projects.entry(project_id) { + hash_map::Entry::Occupied(entry) => entry.into_mut(), + hash_map::Entry::Vacant(entry) => { + cx.observe_release(project, move |this, _, _cx| { + this.projects.remove(&project_id); + }) + .detach(); + entry.insert(SweepAiProject { + events: VecDeque::with_capacity(MAX_EVENT_COUNT), + registered_buffers: HashMap::default(), + }) + } + } + } + + fn push_event(sweep_ai_project: &mut SweepAiProject, event: Event) { + let events = &mut sweep_ai_project.events; + + if let Some(Event::BufferChange { + new_snapshot: last_new_snapshot, + timestamp: last_timestamp, + .. + }) = events.back_mut() + { + // Coalesce edits for the same buffer when they happen one after the other. + let Event::BufferChange { + old_snapshot, + new_snapshot, + timestamp, + } = &event; + + if timestamp.duration_since(*last_timestamp) <= BUFFER_CHANGE_GROUPING_INTERVAL + && old_snapshot.remote_id() == last_new_snapshot.remote_id() + && old_snapshot.version == last_new_snapshot.version + { + *last_new_snapshot = new_snapshot.clone(); + *last_timestamp = *timestamp; + return; + } + } + + if events.len() >= MAX_EVENT_COUNT { + // These are halved instead of popping to improve prompt caching. + events.drain(..MAX_EVENT_COUNT / 2); + } + + events.push_back(event); + } + + pub fn register_buffer( + &mut self, + buffer: &Entity, + project: &Entity, + cx: &mut Context, + ) { + let sweep_ai_project = self.get_or_init_sweep_ai_project(project, cx); + Self::register_buffer_impl(sweep_ai_project, buffer, project, cx); + } + + fn register_buffer_impl<'a>( + sweep_ai_project: &'a mut SweepAiProject, + buffer: &Entity, + project: &Entity, + cx: &mut Context, + ) -> &'a mut RegisteredBuffer { + let buffer_id = buffer.entity_id(); + match sweep_ai_project.registered_buffers.entry(buffer_id) { + hash_map::Entry::Occupied(entry) => entry.into_mut(), + hash_map::Entry::Vacant(entry) => { + let snapshot = buffer.read(cx).snapshot(); + let project_entity_id = project.entity_id(); + entry.insert(RegisteredBuffer { + snapshot, + _subscriptions: [ + cx.subscribe(buffer, { + let project = project.downgrade(); + move |this, buffer, event, cx| { + if let language::BufferEvent::Edited = event + && let Some(project) = project.upgrade() + { + this.report_changes_for_buffer(&buffer, &project, cx); + } + } + }), + cx.observe_release(buffer, move |this, _buffer, _cx| { + let Some(sweep_ai_project) = this.projects.get_mut(&project_entity_id) + else { + return; + }; + sweep_ai_project.registered_buffers.remove(&buffer_id); + }), + ], + }) + } + } + } + + pub fn request_completion( + &mut self, + workspace: &WeakEntity, + project: &Entity, + active_buffer: &Entity, + position: language::Anchor, + cx: &mut Context, + ) -> Task>> { + let snapshot = active_buffer.read(cx).snapshot(); + let debug_info = self.debug_info.clone(); + let full_path: Arc = snapshot + .file() + .map(|file| file.full_path(cx)) + .unwrap_or_else(|| "untitled".into()) + .into(); + + let project_file = project::File::from_dyn(snapshot.file()); + let repo_name = project_file + .map(|file| file.worktree.read(cx).root_name_str()) + .unwrap_or("untitled") + .into(); + let offset = position.to_offset(&snapshot); + + let project_state = self.get_or_init_sweep_ai_project(project, cx); + let events = project_state.events.clone(); + let http_client = cx.http_client(); + + let Some(recent_buffers) = workspace + .read_with(cx, |workspace, cx| { + workspace + .recent_navigation_history_iter(cx) + .filter_map(|(project_path, _)| { + let buffer = project.read(cx).get_open_buffer(&project_path, cx)?; + + if active_buffer == &buffer { + None + } else { + Some(buffer.read(cx).snapshot()) + } + }) + .take(3) + .collect::>() + }) + .log_err() + else { + return Task::ready(Ok(None)); + }; + + let result = cx.background_spawn({ + let full_path = full_path.clone(); + async move { + let text = snapshot.text(); + + let mut recent_changes = String::new(); + + for event in events { + writeln!(&mut recent_changes, "{event}")?; + } + + let file_chunks = recent_buffers + .into_iter() + .map(|snapshot| { + let end_point = language::Point::new(30, 0).min(snapshot.max_point()); + FileChunk { + content: snapshot + .text_for_range(language::Point::zero()..end_point) + .collect(), + file_path: snapshot + .file() + .map(|f| f.path().as_unix_str()) + .unwrap_or("untitled") + .to_string(), + start_line: 0, + end_line: end_point.row as usize, + timestamp: snapshot.file().and_then(|file| { + Some( + file.disk_state() + .mtime()? + .to_seconds_and_nanos_for_persistence()? + .0, + ) + }), + } + }) + .collect(); + + let request_body = AutocompleteRequest { + debug_info, + repo_name, + file_path: full_path.clone(), + file_contents: text.clone(), + original_file_contents: text, + cursor_position: offset, + recent_changes: recent_changes.clone(), + changes_above_cursor: true, + multiple_suggestions: false, + branch: None, + file_chunks, + retrieval_chunks: vec![], + recent_user_actions: vec![], + // TODO + privacy_mode_enabled: false, + }; + + let mut buf: Vec = Vec::new(); + let writer = brotli::CompressorWriter::new(&mut buf, 4096, 11, 22); + serde_json::to_writer(writer, &request_body)?; + let body: AsyncBody = buf.into(); + + let request = http_client::Request::builder() + .uri(SWEEP_API_URL) + .header("Content-Type", "application/json") + .header( + "Authorization", + format!("Bearer {}", std::env::var("SWEEP_TOKEN").unwrap()), + ) + .header("Connection", "keep-alive") + .header("Content-Encoding", "br") + .method(Method::POST) + .body(body)?; + + let mut response = http_client.send(request).await?; + + let mut body: Vec = Vec::new(); + response.body_mut().read_to_end(&mut body).await?; + + if !response.status().is_success() { + anyhow::bail!( + "Request failed with status: {:?}\nBody: {}", + response.status(), + String::from_utf8_lossy(&body), + ); + }; + + let response: AutocompleteResponse = serde_json::from_slice(&body)?; + + let old_text = snapshot + .text_for_range(response.start_index..response.end_index) + .collect::(); + let edits = text_diff(&old_text, &response.completion) + .into_iter() + .map(|(range, text)| { + ( + snapshot.anchor_after(response.start_index + range.start) + ..snapshot.anchor_before(response.start_index + range.end), + text, + ) + }) + .collect::>(); + + anyhow::Ok((response.autocomplete_id, edits, snapshot)) + } + }); + + let buffer = active_buffer.clone(); + + cx.spawn(async move |_, cx| { + let (id, edits, old_snapshot) = result.await?; + + if edits.is_empty() { + return anyhow::Ok(None); + } + + let Some((edits, new_snapshot, preview_task)) = + buffer.read_with(cx, |buffer, cx| { + let new_snapshot = buffer.snapshot(); + + let edits: Arc<[(Range, Arc)]> = + edit_prediction::interpolate_edits(&old_snapshot, &new_snapshot, &edits)? + .into(); + let preview_task = buffer.preview_edits(edits.clone(), cx); + + Some((edits, new_snapshot, preview_task)) + })? + else { + return anyhow::Ok(None); + }; + + let prediction = EditPrediction { + id: EditPredictionId(id), + path: full_path, + edits, + snapshot: new_snapshot, + edit_preview: preview_task.await, + }; + + anyhow::Ok(Some(prediction)) + }) + } + + fn report_changes_for_buffer( + &mut self, + buffer: &Entity, + project: &Entity, + cx: &mut Context, + ) -> BufferSnapshot { + let sweep_ai_project = self.get_or_init_sweep_ai_project(project, cx); + let registered_buffer = Self::register_buffer_impl(sweep_ai_project, buffer, project, cx); + + let new_snapshot = buffer.read(cx).snapshot(); + if new_snapshot.version != registered_buffer.snapshot.version { + let old_snapshot = mem::replace(&mut registered_buffer.snapshot, new_snapshot.clone()); + Self::push_event( + sweep_ai_project, + Event::BufferChange { + old_snapshot, + new_snapshot: new_snapshot.clone(), + timestamp: Instant::now(), + }, + ); + } + + new_snapshot + } +} + +struct RegisteredBuffer { + snapshot: BufferSnapshot, + _subscriptions: [gpui::Subscription; 2], +} + +#[derive(Clone)] +pub enum Event { + BufferChange { + old_snapshot: BufferSnapshot, + new_snapshot: BufferSnapshot, + timestamp: Instant, + }, +} + +impl Display for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Event::BufferChange { + old_snapshot, + new_snapshot, + .. + } => { + let old_path = old_snapshot + .file() + .map(|f| f.path().as_ref()) + .unwrap_or(RelPath::unix("untitled").unwrap()); + let new_path = new_snapshot + .file() + .map(|f| f.path().as_ref()) + .unwrap_or(RelPath::unix("untitled").unwrap()); + if old_path != new_path { + // TODO confirm how to do this for sweep + // writeln!(f, "User renamed {:?} to {:?}\n", old_path, new_path)?; + } + + let diff = language::unified_diff(&old_snapshot.text(), &new_snapshot.text()); + if !diff.is_empty() { + write!( + f, + "File: {}:\n{}\n", + new_path.display(util::paths::PathStyle::Posix), + diff + )? + } + + fmt::Result::Ok(()) + } + } + } +} + +#[derive(Debug, Clone)] +struct CurrentEditPrediction { + buffer_id: EntityId, + completion: EditPrediction, +} + +impl CurrentEditPrediction { + fn should_replace_completion(&self, old_completion: &Self, snapshot: &BufferSnapshot) -> bool { + if self.buffer_id != old_completion.buffer_id { + return true; + } + + let Some(old_edits) = old_completion.completion.interpolate(snapshot) else { + return true; + }; + let Some(new_edits) = self.completion.interpolate(snapshot) else { + return false; + }; + + if old_edits.len() == 1 && new_edits.len() == 1 { + let (old_range, old_text) = &old_edits[0]; + let (new_range, new_text) = &new_edits[0]; + new_range == old_range && new_text.starts_with(old_text.as_ref()) + } else { + true + } + } +} + +struct PendingCompletion { + id: usize, + _task: Task<()>, +} + +pub struct SweepAiEditPredictionProvider { + workspace: WeakEntity, + sweep_ai: Entity, + pending_completions: ArrayVec, + next_pending_completion_id: usize, + current_completion: Option, + last_request_timestamp: Instant, + project: Entity, +} + +impl SweepAiEditPredictionProvider { + pub const THROTTLE_TIMEOUT: Duration = Duration::from_millis(300); + + pub fn new( + sweep_ai: Entity, + workspace: WeakEntity, + project: Entity, + ) -> Self { + Self { + sweep_ai, + pending_completions: ArrayVec::new(), + next_pending_completion_id: 0, + current_completion: None, + last_request_timestamp: Instant::now(), + project, + workspace, + } + } +} + +impl edit_prediction::EditPredictionProvider for SweepAiEditPredictionProvider { + fn name() -> &'static str { + "zed-predict" + } + + fn display_name() -> &'static str { + "Zed's Edit Predictions" + } + + fn show_completions_in_menu() -> bool { + true + } + + fn show_tab_accept_marker() -> bool { + true + } + + fn is_enabled( + &self, + _buffer: &Entity, + _cursor_position: language::Anchor, + _cx: &App, + ) -> bool { + true + } + + fn is_refreshing(&self) -> bool { + !self.pending_completions.is_empty() + } + + fn refresh( + &mut self, + buffer: Entity, + position: language::Anchor, + _debounce: bool, + cx: &mut Context, + ) { + if let Some(current_completion) = self.current_completion.as_ref() { + let snapshot = buffer.read(cx).snapshot(); + if current_completion + .completion + .interpolate(&snapshot) + .is_some() + { + return; + } + } + + let pending_completion_id = self.next_pending_completion_id; + self.next_pending_completion_id += 1; + let last_request_timestamp = self.last_request_timestamp; + + let project = self.project.clone(); + let workspace = self.workspace.clone(); + let task = cx.spawn(async move |this, cx| { + if let Some(timeout) = (last_request_timestamp + Self::THROTTLE_TIMEOUT) + .checked_duration_since(Instant::now()) + { + cx.background_executor().timer(timeout).await; + } + + let completion_request = this.update(cx, |this, cx| { + this.last_request_timestamp = Instant::now(); + this.sweep_ai.update(cx, |sweep_ai, cx| { + sweep_ai.request_completion(&workspace, &project, &buffer, position, cx) + }) + }); + + let completion = match completion_request { + Ok(completion_request) => { + let completion_request = completion_request.await; + completion_request.map(|c| { + c.map(|completion| CurrentEditPrediction { + buffer_id: buffer.entity_id(), + completion, + }) + }) + } + Err(error) => Err(error), + }; + + let Some(new_completion) = completion + .context("edit prediction failed") + .log_err() + .flatten() + else { + this.update(cx, |this, cx| { + if this.pending_completions[0].id == pending_completion_id { + this.pending_completions.remove(0); + } else { + this.pending_completions.clear(); + } + + cx.notify(); + }) + .ok(); + return; + }; + + this.update(cx, |this, cx| { + if this.pending_completions[0].id == pending_completion_id { + this.pending_completions.remove(0); + } else { + this.pending_completions.clear(); + } + + if let Some(old_completion) = this.current_completion.as_ref() { + let snapshot = buffer.read(cx).snapshot(); + if new_completion.should_replace_completion(old_completion, &snapshot) { + this.current_completion = Some(new_completion); + } + } else { + this.current_completion = Some(new_completion); + } + + cx.notify(); + }) + .ok(); + }); + + // We always maintain at most two pending completions. When we already + // have two, we replace the newest one. + if self.pending_completions.len() <= 1 { + self.pending_completions.push(PendingCompletion { + id: pending_completion_id, + _task: task, + }); + } else if self.pending_completions.len() == 2 { + self.pending_completions.pop(); + self.pending_completions.push(PendingCompletion { + id: pending_completion_id, + _task: task, + }); + } + } + + fn cycle( + &mut self, + _buffer: Entity, + _cursor_position: language::Anchor, + _direction: edit_prediction::Direction, + _cx: &mut Context, + ) { + // Right now we don't support cycling. + } + + fn accept(&mut self, _cx: &mut Context) { + self.pending_completions.clear(); + } + + fn discard(&mut self, _cx: &mut Context) { + self.pending_completions.clear(); + self.current_completion.take(); + } + + fn suggest( + &mut self, + buffer: &Entity, + cursor_position: language::Anchor, + cx: &mut Context, + ) -> Option { + let CurrentEditPrediction { + buffer_id, + completion, + .. + } = self.current_completion.as_mut()?; + + // Invalidate previous completion if it was generated for a different buffer. + if *buffer_id != buffer.entity_id() { + self.current_completion.take(); + return None; + } + + let buffer = buffer.read(cx); + let Some(edits) = completion.interpolate(&buffer.snapshot()) else { + self.current_completion.take(); + return None; + }; + + let cursor_row = cursor_position.to_point(buffer).row; + let (closest_edit_ix, (closest_edit_range, _)) = + edits.iter().enumerate().min_by_key(|(_, (range, _))| { + let distance_from_start = cursor_row.abs_diff(range.start.to_point(buffer).row); + let distance_from_end = cursor_row.abs_diff(range.end.to_point(buffer).row); + cmp::min(distance_from_start, distance_from_end) + })?; + + let mut edit_start_ix = closest_edit_ix; + for (range, _) in edits[..edit_start_ix].iter().rev() { + let distance_from_closest_edit = + closest_edit_range.start.to_point(buffer).row - range.end.to_point(buffer).row; + if distance_from_closest_edit <= 1 { + edit_start_ix -= 1; + } else { + break; + } + } + + let mut edit_end_ix = closest_edit_ix + 1; + for (range, _) in &edits[edit_end_ix..] { + let distance_from_closest_edit = + range.start.to_point(buffer).row - closest_edit_range.end.to_point(buffer).row; + if distance_from_closest_edit <= 1 { + edit_end_ix += 1; + } else { + break; + } + } + + Some(edit_prediction::EditPrediction::Local { + id: Some(completion.id.to_string().into()), + edits: edits[edit_start_ix..edit_end_ix].to_vec(), + edit_preview: Some(completion.edit_preview.clone()), + }) + } +} diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index ca81955e33b524fe27b8777566473e89a03a5558..79892fefdd7776e2fd7f99cbfa6caf24bb174a4b 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -133,6 +133,7 @@ snippet_provider.workspace = true snippets_ui.workspace = true supermaven.workspace = true svg_preview.workspace = true +sweep_ai.workspace = true sysinfo.workspace = true tab_switcher.workspace = true task.workspace = true diff --git a/crates/zed/src/zed/edit_prediction_registry.rs b/crates/zed/src/zed/edit_prediction_registry.rs index 74b6687f62c641ce4076778efa4369a45529f4f9..1723ca91f143c8529e14e24e0bdd85dc7b1c14d4 100644 --- a/crates/zed/src/zed/edit_prediction_registry.rs +++ b/crates/zed/src/zed/edit_prediction_registry.rs @@ -7,9 +7,10 @@ use feature_flags::FeatureFlagAppExt; use gpui::{AnyWindowHandle, App, AppContext as _, Context, Entity, WeakEntity}; use language::language_settings::{EditPredictionProvider, all_language_settings}; use language_models::MistralLanguageModelProvider; -use settings::SettingsStore; +use settings::{EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, SettingsStore}; use std::{cell::RefCell, rc::Rc, sync::Arc}; use supermaven::{Supermaven, SupermavenCompletionProvider}; +use sweep_ai::{SweepAiEditPredictionProvider, SweepFeatureFlag}; use ui::Window; use zeta::ZetaEditPredictionProvider; use zeta2::Zeta2FeatureFlag; @@ -202,6 +203,38 @@ fn assign_edit_prediction_provider( let provider = cx.new(|_| CodestralCompletionProvider::new(http_client)); editor.set_edit_prediction_provider(Some(provider), window, cx); } + EditPredictionProvider::Experimental(name) => { + if name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME + && cx.has_flag::() + { + if let Some(project) = editor.project() + && let Some(workspace) = editor.workspace() + { + let sweep_ai = sweep_ai::SweepAi::register(cx); + + if let Some(buffer) = &singleton_buffer + && buffer.read(cx).file().is_some() + { + sweep_ai.update(cx, |sweep_ai, cx| { + sweep_ai.register_buffer(buffer, project, cx); + }); + } + + let provider = cx.new(|_| { + sweep_ai::SweepAiEditPredictionProvider::new( + sweep_ai, + workspace.downgrade(), + project.clone(), + ) + }); + editor.set_edit_prediction_provider(Some(provider), window, cx); + } + } else { + editor.set_edit_prediction_provider::( + None, window, cx, + ); + } + } EditPredictionProvider::Zed => { if user_store.read(cx).current_user().is_some() { let mut worktree = None; From 10efbd5eb4258bb6d157e352bb1b5e7669ed6aca Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Tue, 18 Nov 2025 01:53:37 -0300 Subject: [PATCH 0176/1030] agent_ui: Show the "new thread" keybinding for the currently active agent (#42939) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR's goal is to improve discoverability of how Zed "remembers" the currently selected agent when hitting `cmd-n` (or `ctrl-n`). Hitting that binding starts a new thread with whatever agent is currently selected. In the example below, I am in a Claude Code thread and if I hit `cmd-n`, a new, fresh CC thread will be started: Screenshot 2025-11-18 at 1  13@2x Release Notes: - agent: Improved discoverability of the `cmd-n` keybinding to create a new thread with the currently selected agent. --- assets/keymaps/default-macos.json | 2 +- crates/agent_ui/src/agent_panel.rs | 50 ++++++++++++++++++++++++------ 2 files changed, 42 insertions(+), 10 deletions(-) diff --git a/assets/keymaps/default-macos.json b/assets/keymaps/default-macos.json index 9d23eeb8cde071e20e5d3e4d7f873b1f668501b2..fe65a53aa70522ff48728d3eaded16ac3312f2e0 100644 --- a/assets/keymaps/default-macos.json +++ b/assets/keymaps/default-macos.json @@ -313,7 +313,7 @@ "use_key_equivalents": true, "bindings": { "cmd-n": "agent::NewTextThread", - "cmd-alt-t": "agent::NewThread" + "cmd-alt-n": "agent::NewExternalAgentThread" } }, { diff --git a/crates/agent_ui/src/agent_panel.rs b/crates/agent_ui/src/agent_panel.rs index d4138aa5bdf8f3731df7508ab8d6476455aca11b..dfc4d27e1153c61dc07c00a807c958f69db77b5a 100644 --- a/crates/agent_ui/src/agent_panel.rs +++ b/crates/agent_ui/src/agent_panel.rs @@ -1892,6 +1892,9 @@ impl AgentPanel { .anchor(Corner::TopRight) .with_handle(self.new_thread_menu_handle.clone()) .menu({ + let selected_agent = self.selected_agent.clone(); + let is_agent_selected = move |agent_type: AgentType| selected_agent == agent_type; + let workspace = self.workspace.clone(); let is_via_collab = workspace .update(cx, |workspace, cx| { @@ -1929,7 +1932,9 @@ impl AgentPanel { }) .item( ContextMenuEntry::new("Zed Agent") - .action(NewThread.boxed_clone()) + .when(is_agent_selected(AgentType::NativeAgent) | is_agent_selected(AgentType::TextThread) , |this| { + this.action(Box::new(NewExternalAgentThread { agent: None })) + }) .icon(IconName::ZedAgent) .icon_color(Color::Muted) .handler({ @@ -1955,9 +1960,9 @@ impl AgentPanel { ) .item( ContextMenuEntry::new("Text Thread") + .action(NewTextThread.boxed_clone()) .icon(IconName::TextThread) .icon_color(Color::Muted) - .action(NewTextThread.boxed_clone()) .handler({ let workspace = workspace.clone(); move |window, cx| { @@ -1983,6 +1988,9 @@ impl AgentPanel { .header("External Agents") .item( ContextMenuEntry::new("Claude Code") + .when(is_agent_selected(AgentType::ClaudeCode), |this| { + this.action(Box::new(NewExternalAgentThread { agent: None })) + }) .icon(IconName::AiClaude) .disabled(is_via_collab) .icon_color(Color::Muted) @@ -2009,6 +2017,9 @@ impl AgentPanel { ) .item( ContextMenuEntry::new("Codex CLI") + .when(is_agent_selected(AgentType::Codex), |this| { + this.action(Box::new(NewExternalAgentThread { agent: None })) + }) .icon(IconName::AiOpenAi) .disabled(is_via_collab) .icon_color(Color::Muted) @@ -2035,6 +2046,9 @@ impl AgentPanel { ) .item( ContextMenuEntry::new("Gemini CLI") + .when(is_agent_selected(AgentType::Gemini), |this| { + this.action(Box::new(NewExternalAgentThread { agent: None })) + }) .icon(IconName::AiGemini) .icon_color(Color::Muted) .disabled(is_via_collab) @@ -2060,8 +2074,8 @@ impl AgentPanel { }), ) .map(|mut menu| { - let agent_server_store_read = agent_server_store.read(cx); - let agent_names = agent_server_store_read + let agent_server_store = agent_server_store.read(cx); + let agent_names = agent_server_store .external_agents() .filter(|name| { name.0 != GEMINI_NAME @@ -2070,21 +2084,38 @@ impl AgentPanel { }) .cloned() .collect::>(); + let custom_settings = cx .global::() .get::(None) .custom .clone(); + for agent_name in agent_names { - let icon_path = agent_server_store_read.agent_icon(&agent_name); - let mut entry = - ContextMenuEntry::new(format!("{}", agent_name)); + let icon_path = agent_server_store.agent_icon(&agent_name); + + let mut entry = ContextMenuEntry::new(agent_name.clone()); + + let command = custom_settings + .get(&agent_name.0) + .map(|settings| settings.command.clone()) + .unwrap_or(placeholder_command()); + if let Some(icon_path) = icon_path { entry = entry.custom_icon_svg(icon_path); } else { entry = entry.icon(IconName::Terminal); } entry = entry + .when( + is_agent_selected(AgentType::Custom { + name: agent_name.0.clone(), + command: command.clone(), + }), + |this| { + this.action(Box::new(NewExternalAgentThread { agent: None })) + }, + ) .icon_color(Color::Muted) .disabled(is_via_collab) .handler({ @@ -2124,6 +2155,7 @@ impl AgentPanel { } } }); + menu = menu.item(entry); } @@ -2156,7 +2188,7 @@ impl AgentPanel { .id("selected_agent_icon") .when_some(selected_agent_custom_icon, |this, icon_path| { let label = selected_agent_label.clone(); - this.px(DynamicSpacing::Base02.rems(cx)) + this.px_1() .child(Icon::from_external_svg(icon_path).color(Color::Muted)) .tooltip(move |_window, cx| { Tooltip::with_meta(label.clone(), None, "Selected Agent", cx) @@ -2165,7 +2197,7 @@ impl AgentPanel { .when(!has_custom_icon, |this| { this.when_some(self.selected_agent.icon(), |this, icon| { let label = selected_agent_label.clone(); - this.px(DynamicSpacing::Base02.rems(cx)) + this.px_1() .child(Icon::new(icon).color(Color::Muted)) .tooltip(move |_window, cx| { Tooltip::with_meta(label.clone(), None, "Selected Agent", cx) From 5c70f8391f42a2d8dff25b214c183285a9c184ae Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 17 Nov 2025 22:23:14 -0800 Subject: [PATCH 0177/1030] Fix panic when using sweep AI without token env var (#42940) Release Notes: - N/A --- crates/sweep_ai/src/sweep_ai.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/crates/sweep_ai/src/sweep_ai.rs b/crates/sweep_ai/src/sweep_ai.rs index e8a2522c0b34896ad09fd8a8d346e2ba31c9a1e7..c5c0fe31dd8ff530a685b5074b28dac945e97c1c 100644 --- a/crates/sweep_ai/src/sweep_ai.rs +++ b/crates/sweep_ai/src/sweep_ai.rs @@ -84,6 +84,7 @@ impl Display for EditPredictionId { pub struct SweepAi { projects: HashMap, debug_info: Arc, + api_token: Option, } struct SweepAiProject { @@ -113,6 +114,7 @@ impl SweepAi { fn new(cx: &mut Context) -> Self { Self { + api_token: std::env::var("SWEEP_AI_TOKEN").ok(), projects: HashMap::default(), debug_info: format!( "Zed v{version} ({sha}) - OS: {os} - Zed v{version}", @@ -237,6 +239,9 @@ impl SweepAi { ) -> Task>> { let snapshot = active_buffer.read(cx).snapshot(); let debug_info = self.debug_info.clone(); + let Some(api_token) = self.api_token.clone() else { + return Task::ready(Ok(None)); + }; let full_path: Arc = snapshot .file() .map(|file| file.full_path(cx)) @@ -339,10 +344,7 @@ impl SweepAi { let request = http_client::Request::builder() .uri(SWEEP_API_URL) .header("Content-Type", "application/json") - .header( - "Authorization", - format!("Bearer {}", std::env::var("SWEEP_TOKEN").unwrap()), - ) + .header("Authorization", format!("Bearer {}", api_token)) .header("Connection", "keep-alive") .header("Content-Encoding", "br") .method(Method::POST) @@ -579,9 +581,9 @@ impl edit_prediction::EditPredictionProvider for SweepAiEditPredictionProvider { &self, _buffer: &Entity, _cursor_position: language::Anchor, - _cx: &App, + cx: &App, ) -> bool { - true + self.sweep_ai.read(cx).api_token.is_some() } fn is_refreshing(&self) -> bool { From 5225a84affc55167645a2f31677bc5c9915e2844 Mon Sep 17 00:00:00 2001 From: aleanon <104034507+aleanon@users.noreply.github.com> Date: Tue, 18 Nov 2025 09:11:36 +0100 Subject: [PATCH 0178/1030] For and await highlighting rust (#42924) Closes #42922 Release Notes: - Fixed Correctly highlighting the 'for' keyword in Rust as keyword.control only in for loops. - Fixed Highlighting the 'await' keyword in Rust as keyword.control --- crates/languages/src/rust/highlights.scm | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/languages/src/rust/highlights.scm b/crates/languages/src/rust/highlights.scm index ea3935257648b31f666ea64c7c302644ab3eb24e..1a08126b55d2084e39611a9e73cde83b6cfc9999 100644 --- a/crates/languages/src/rust/highlights.scm +++ b/crates/languages/src/rust/highlights.scm @@ -85,7 +85,6 @@ [ "as" "async" - "await" "const" "default" "dyn" @@ -102,6 +101,7 @@ "ref" "static" "struct" + "for" "trait" "type" "union" @@ -114,10 +114,10 @@ ] @keyword [ + "await" "break" "continue" "else" - "for" "if" "in" "loop" @@ -127,6 +127,9 @@ "yield" ] @keyword.control +(for_expression + ("for" @keyword.control)) + [ (string_literal) (raw_string_literal) From 2a3bcbfe0f948f4411b95ae459e826b0f906ed37 Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Tue, 18 Nov 2025 11:13:32 +0200 Subject: [PATCH 0179/1030] Properly check chunk version on lsp store update (#42951) Release Notes: - N/A Co-authored-by: Lukas Wirth --- crates/project/src/lsp_store.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index 53c1db7ad4aa2a0a98f5aba740133fbde823cf17..4c6ed0b7c535504de7ea63f8196e35553bd7d829 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -6732,6 +6732,7 @@ impl LspStore { let buffer_snapshot = buffer.read(cx).snapshot(); let next_hint_id = self.next_hint_id.clone(); let lsp_data = self.latest_lsp_data(&buffer, cx); + let query_version = lsp_data.buffer_version.clone(); let mut lsp_refresh_requested = false; let for_server = if let InvalidationStrategy::RefreshRequested { server_id, @@ -6834,6 +6835,7 @@ impl LspStore { for (chunk, range_to_query) in ranges_to_query.into_iter().flatten() { let next_hint_id = next_hint_id.clone(); let buffer = buffer.clone(); + let query_version = query_version.clone(); let new_inlay_hints = cx .spawn(async move |lsp_store, cx| { let new_fetch_task = lsp_store.update(cx, |lsp_store, cx| { @@ -6844,9 +6846,7 @@ impl LspStore { .and_then(|new_hints_by_server| { lsp_store.update(cx, |lsp_store, cx| { let lsp_data = lsp_store.latest_lsp_data(&buffer, cx); - let update_cache = !lsp_data - .buffer_version - .changed_since(&buffer.read(cx).version()); + let update_cache = lsp_data.buffer_version == query_version; if new_hints_by_server.is_empty() { if update_cache { lsp_data.inlay_hints.invalidate_for_chunk(chunk); From 980f8bff2aa011dcb0f595db2da33ed1a5be190d Mon Sep 17 00:00:00 2001 From: Lena <241371603+zelenenka@users.noreply.github.com> Date: Tue, 18 Nov 2025 10:15:09 +0100 Subject: [PATCH 0180/1030] Add a github issue label to shoo the stalebot away (#42950) Labeling an issue with "never stale" will keep the stalebot away; the bot can get annoying in some situations otherwise. Release Notes: - N/A --- .github/workflows/community_close_stale_issues.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/community_close_stale_issues.yml b/.github/workflows/community_close_stale_issues.yml index faa42669d0206965ba99ab683fea7165ef351c8f..f1359b72a6afe6008472ec3d59d90091b6779a59 100644 --- a/.github/workflows/community_close_stale_issues.yml +++ b/.github/workflows/community_close_stale_issues.yml @@ -26,3 +26,4 @@ jobs: ascending: true enable-statistics: true stale-issue-label: "stale" + exempt-issue-labels: "never stale" From 696fdd8fed12da625357dc75939b0787c171b5d7 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 18 Nov 2025 11:22:34 +0100 Subject: [PATCH 0181/1030] git: Remove JobStatus from PendingOp in favour of in-flight pruning (#42955) The idea is that we only store running (`!self.finished`) or finished (`self.finished`) pending ops, while everything else (skipped, errored) jobs are pruned out immediately. We don't really need them in the grand scheme of things anyway. Release Notes: - N/A --- crates/git_ui/src/git_panel.rs | 2 +- crates/project/src/git_store.rs | 25 ++++++++---- crates/project/src/git_store/pending_op.rs | 28 ++----------- crates/project/src/project_tests.rs | 46 +++++++++------------- 4 files changed, 39 insertions(+), 62 deletions(-) diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index 0691ba78560e38f5d3a297d033bd41459dff78c4..85b4b67a5586f157d4337a0564051712279258d6 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -2662,7 +2662,7 @@ impl GitPanel { && pending .ops .iter() - .any(|op| op.git_status == pending_op::GitStatus::Reverted && op.finished()) + .any(|op| op.git_status == pending_op::GitStatus::Reverted && op.finished) { continue; } diff --git a/crates/project/src/git_store.rs b/crates/project/src/git_store.rs index 94af9859df1156d7a10286a843a31e8351fe050c..dda60a2482b7bb08f0f3d73366e96b59bc7fe636 100644 --- a/crates/project/src/git_store.rs +++ b/crates/project/src/git_store.rs @@ -5404,18 +5404,27 @@ impl Repository { let ids = self.new_pending_ops_for_paths(paths, git_status); cx.spawn(async move |this, cx| { - let (job_status, result) = match f(this.clone(), cx).await { - Ok(()) => (pending_op::JobStatus::Finished, Ok(())), - Err(err) if err.is::() => (pending_op::JobStatus::Skipped, Ok(())), - Err(err) => (pending_op::JobStatus::Error, Err(err)), + let (finished, result) = match f(this.clone(), cx).await { + Ok(()) => (true, Ok(())), + Err(err) if err.is::() => (false, Ok(())), + Err(err) => (false, Err(err)), }; this.update(cx, |this, _| { let mut edits = Vec::with_capacity(ids.len()); for (id, entry) in ids { if let Some(mut ops) = this.snapshot.pending_ops_for_path(&entry) { - if let Some(op) = ops.op_by_id_mut(id) { - op.job_status = job_status; + if finished { + if let Some(op) = ops.op_by_id_mut(id) { + op.finished = true; + } + } else { + let idx = ops + .ops + .iter() + .position(|op| op.id == id) + .expect("pending operation must exist"); + ops.ops.remove(idx); } edits.push(sum_tree::Edit::Insert(ops)); } @@ -5443,7 +5452,7 @@ impl Repository { ops.ops.push(PendingOp { id, git_status, - job_status: pending_op::JobStatus::Running, + finished: false, }); edits.push(sum_tree::Edit::Insert(ops)); ids.push((id, path)); @@ -5721,7 +5730,7 @@ async fn compute_snapshot( let pending_ops_by_path = SumTree::from_iter( prev_snapshot.pending_ops_by_path.iter().filter_map(|ops| { let inner_ops: Vec = - ops.ops.iter().filter(|op| op.running()).cloned().collect(); + ops.ops.iter().filter(|op| !op.finished).cloned().collect(); if inner_ops.is_empty() { None } else { diff --git a/crates/project/src/git_store/pending_op.rs b/crates/project/src/git_store/pending_op.rs index 1991eed407833d47fd35f6f573fbb46c692aed91..b5c7d9f00ec639576d498aaa3107c83758085c2e 100644 --- a/crates/project/src/git_store/pending_op.rs +++ b/crates/project/src/git_store/pending_op.rs @@ -11,14 +11,6 @@ pub enum GitStatus { Unchanged, } -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum JobStatus { - Running, - Finished, - Skipped, - Error, -} - #[derive(Clone, Debug, PartialEq, Eq)] pub struct PendingOps { pub repo_path: RepoPath, @@ -29,7 +21,7 @@ pub struct PendingOps { pub struct PendingOp { pub id: PendingOpId, pub git_status: GitStatus, - pub job_status: JobStatus, + pub finished: bool, } #[derive(Clone, Debug)] @@ -114,7 +106,7 @@ impl PendingOps { /// File is staged if the last job is finished and has status Staged. pub fn staged(&self) -> bool { if let Some(last) = self.ops.last() { - if last.git_status == GitStatus::Staged && last.job_status == JobStatus::Finished { + if last.git_status == GitStatus::Staged && last.finished { return true; } } @@ -124,24 +116,10 @@ impl PendingOps { /// File is staged if the last job is not finished and has status Staged. pub fn staging(&self) -> bool { if let Some(last) = self.ops.last() { - if last.git_status == GitStatus::Staged && last.job_status != JobStatus::Finished { + if last.git_status == GitStatus::Staged && !last.finished { return true; } } false } } - -impl PendingOp { - pub fn running(&self) -> bool { - self.job_status == JobStatus::Running - } - - pub fn finished(&self) -> bool { - matches!(self.job_status, JobStatus::Finished | JobStatus::Skipped) - } - - pub fn error(&self) -> bool { - self.job_status == JobStatus::Error - } -} diff --git a/crates/project/src/project_tests.rs b/crates/project/src/project_tests.rs index f3c935f3390305c8c78074439084f20b4d1562b2..f65f28c6045e55c669b35592738c08f6abbe26d4 100644 --- a/crates/project/src/project_tests.rs +++ b/crates/project/src/project_tests.rs @@ -8538,11 +8538,8 @@ fn merge_pending_ops_snapshots( .find_map(|(op, idx)| if op.id == s_op.id { Some(idx) } else { None }) { let t_op = &mut t_ops.ops[op_idx]; - match (s_op.job_status, t_op.job_status) { - (pending_op::JobStatus::Running, _) => {} - (s_st, pending_op::JobStatus::Running) => t_op.job_status = s_st, - (s_st, t_st) if s_st == t_st => {} - _ => unreachable!(), + if s_op.finished { + t_op.finished = true; } } else { t_ops.ops.push(s_op); @@ -8634,7 +8631,7 @@ async fn test_repository_pending_ops_staging( Some(&pending_op::PendingOp { id: id.into(), git_status, - job_status: pending_op::JobStatus::Running + finished: false, }) ); task @@ -8649,7 +8646,7 @@ async fn test_repository_pending_ops_staging( Some(&pending_op::PendingOp { id: id.into(), git_status, - job_status: pending_op::JobStatus::Finished + finished: true, }) ); }); @@ -8675,27 +8672,27 @@ async fn test_repository_pending_ops_staging( pending_op::PendingOp { id: 1u16.into(), git_status: pending_op::GitStatus::Staged, - job_status: pending_op::JobStatus::Finished + finished: true, }, pending_op::PendingOp { id: 2u16.into(), git_status: pending_op::GitStatus::Unstaged, - job_status: pending_op::JobStatus::Finished + finished: true, }, pending_op::PendingOp { id: 3u16.into(), git_status: pending_op::GitStatus::Staged, - job_status: pending_op::JobStatus::Finished + finished: true, }, pending_op::PendingOp { id: 4u16.into(), git_status: pending_op::GitStatus::Unstaged, - job_status: pending_op::JobStatus::Finished + finished: true, }, pending_op::PendingOp { id: 5u16.into(), git_status: pending_op::GitStatus::Staged, - job_status: pending_op::JobStatus::Finished + finished: true, } ], ); @@ -8792,18 +8789,11 @@ async fn test_repository_pending_ops_long_running_staging( .get(&worktree::PathKey(repo_path("a.txt").as_ref().clone()), ()) .unwrap() .ops, - vec![ - pending_op::PendingOp { - id: 1u16.into(), - git_status: pending_op::GitStatus::Staged, - job_status: pending_op::JobStatus::Skipped - }, - pending_op::PendingOp { - id: 2u16.into(), - git_status: pending_op::GitStatus::Staged, - job_status: pending_op::JobStatus::Finished - } - ], + vec![pending_op::PendingOp { + id: 2u16.into(), + git_status: pending_op::GitStatus::Staged, + finished: true, + }], ); repo.update(cx, |repo, _cx| { @@ -8904,12 +8894,12 @@ async fn test_repository_pending_ops_stage_all( pending_op::PendingOp { id: 1u16.into(), git_status: pending_op::GitStatus::Staged, - job_status: pending_op::JobStatus::Finished + finished: true, }, pending_op::PendingOp { id: 2u16.into(), git_status: pending_op::GitStatus::Unstaged, - job_status: pending_op::JobStatus::Finished + finished: true, }, ], ); @@ -8923,12 +8913,12 @@ async fn test_repository_pending_ops_stage_all( pending_op::PendingOp { id: 1u16.into(), git_status: pending_op::GitStatus::Staged, - job_status: pending_op::JobStatus::Finished + finished: true, }, pending_op::PendingOp { id: 2u16.into(), git_status: pending_op::GitStatus::Unstaged, - job_status: pending_op::JobStatus::Finished + finished: true, }, ], ); From c1d9dc369c1edcae0b3f84bdabc2bdf73d5c2e72 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Tue, 18 Nov 2025 12:00:02 +0100 Subject: [PATCH 0182/1030] Try reducing flakiness of fs-event tests by bumping timeout to 4s on CI (#42960) Release Notes: - N/A --- crates/fsevent/src/fsevent.rs | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/crates/fsevent/src/fsevent.rs b/crates/fsevent/src/fsevent.rs index e4060f3ae06a8d9412baf1cd75a9503c1b6d359b..0004b7521019f4d563cd568f2875fe1edf1ac207 100644 --- a/crates/fsevent/src/fsevent.rs +++ b/crates/fsevent/src/fsevent.rs @@ -395,19 +395,19 @@ mod tests { thread::spawn(move || stream.run(move |events| tx.send(events.to_vec()).is_ok())); fs::write(path.join("new-file"), "").unwrap(); - let events = rx.recv_timeout(Duration::from_secs(2)).unwrap(); + let events = rx.recv_timeout(timeout()).unwrap(); let event = events.last().unwrap(); assert_eq!(event.path, path.join("new-file")); assert!(event.flags.contains(StreamFlags::ITEM_CREATED)); fs::remove_file(path.join("existing-file-5")).unwrap(); - let mut events = rx.recv_timeout(Duration::from_secs(2)).unwrap(); + let mut events = rx.recv_timeout(timeout()).unwrap(); let mut event = events.last().unwrap(); // we see this duplicate about 1/100 test runs. if event.path == path.join("new-file") && event.flags.contains(StreamFlags::ITEM_CREATED) { - events = rx.recv_timeout(Duration::from_secs(2)).unwrap(); + events = rx.recv_timeout(timeout()).unwrap(); event = events.last().unwrap(); } assert_eq!(event.path, path.join("existing-file-5")); @@ -440,13 +440,13 @@ mod tests { }); fs::write(path.join("new-file"), "").unwrap(); - let events = rx.recv_timeout(Duration::from_secs(2)).unwrap(); + let events = rx.recv_timeout(timeout()).unwrap(); let event = events.last().unwrap(); assert_eq!(event.path, path.join("new-file")); assert!(event.flags.contains(StreamFlags::ITEM_CREATED)); fs::remove_file(path.join("existing-file-5")).unwrap(); - let events = rx.recv_timeout(Duration::from_secs(2)).unwrap(); + let events = rx.recv_timeout(timeout()).unwrap(); let event = events.last().unwrap(); assert_eq!(event.path, path.join("existing-file-5")); assert!(event.flags.contains(StreamFlags::ITEM_REMOVED)); @@ -477,11 +477,11 @@ mod tests { }); fs::write(path.join("new-file"), "").unwrap(); - assert_eq!(rx.recv_timeout(Duration::from_secs(2)).unwrap(), "running"); + assert_eq!(rx.recv_timeout(timeout()).unwrap(), "running"); // Dropping the handle causes `EventStream::run` to return. drop(handle); - assert_eq!(rx.recv_timeout(Duration::from_secs(2)).unwrap(), "stopped"); + assert_eq!(rx.recv_timeout(timeout()).unwrap(), "stopped"); } #[test] @@ -500,11 +500,14 @@ mod tests { } fn flush_historical_events() { - let duration = if std::env::var("CI").is_ok() { - Duration::from_secs(2) + thread::sleep(timeout()); + } + + fn timeout() -> Duration { + if std::env::var("CI").is_ok() { + Duration::from_secs(4) } else { Duration::from_millis(500) - }; - thread::sleep(duration); + } } } From f17d2c92b6f8982d3be9d5858edb008efe1112f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Engin=20A=C3=A7=C4=B1kg=C3=B6z?= Date: Tue, 18 Nov 2025 15:37:48 +0300 Subject: [PATCH 0183/1030] terminal_view: Fix terminal opening in root directory when editing single file corktree (#42953) Fixes #42945 ## Problem When opening a single file via command line (e.g., `zed ~/Downloads/file.txt`), the terminal panel was opening in the root directory (/) instead of the file's directory. ## Root Cause The code only checked for active project directory, which returns None when a single file is opened. Additionally, file worktrees weren't handling parent directory lookup. ## Solution Added fallback logic to use the first project directory when there's no active entry, and made file worktrees return their parent directory instead of None. ## Testing - All existing tests pass - Added test coverage for file worktree scenarios - Manually tested with `zed ~/Downloads/file.txt` - terminal now opens in correct directory This improves the user experience for users who frequently open single files from the command line. ## Release Notes - Fixed terminal opening in root directory when editing single files from the command line --- crates/terminal_view/src/terminal_view.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/crates/terminal_view/src/terminal_view.rs b/crates/terminal_view/src/terminal_view.rs index b9dabee7e82064ebe055893306241f995654b82b..66e6c605f9b560dc36db3dde16e84c2ee8c0c5b5 100644 --- a/crates/terminal_view/src/terminal_view.rs +++ b/crates/terminal_view/src/terminal_view.rs @@ -1556,7 +1556,8 @@ pub(crate) fn default_working_directory(workspace: &Workspace, cx: &App) -> Opti .read(cx) .active_project_directory(cx) .as_deref() - .map(Path::to_path_buf), + .map(Path::to_path_buf) + .or_else(|| first_project_directory(workspace, cx)), WorkingDirectory::FirstProjectDirectory => first_project_directory(workspace, cx), WorkingDirectory::AlwaysHome => None, WorkingDirectory::Always { directory } => { @@ -1570,10 +1571,13 @@ pub(crate) fn default_working_directory(workspace: &Workspace, cx: &App) -> Opti ///Gets the first project's home directory, or the home directory fn first_project_directory(workspace: &Workspace, cx: &App) -> Option { let worktree = workspace.worktrees(cx).next()?.read(cx); - if !worktree.root_entry()?.is_dir() { - return None; + let worktree_path = worktree.abs_path(); + if worktree.root_entry()?.is_dir() { + Some(worktree_path.to_path_buf()) + } else { + // If worktree is a file, return its parent directory + worktree_path.parent().map(|p| p.to_path_buf()) } - Some(worktree.abs_path().to_path_buf()) } #[cfg(test)] @@ -1606,7 +1610,7 @@ mod tests { }); } - // No active entry, but a worktree, worktree is a file -> home_dir() + // No active entry, but a worktree, worktree is a file -> parent directory #[gpui::test] async fn no_active_entry_worktree_is_file(cx: &mut TestAppContext) { let (project, workspace) = init_test(cx).await; @@ -1621,9 +1625,9 @@ mod tests { assert!(workspace.worktrees(cx).next().is_some()); let res = default_working_directory(workspace, cx); - assert_eq!(res, None); + assert_eq!(res, Some(Path::new("/").to_path_buf())); let res = first_project_directory(workspace, cx); - assert_eq!(res, None); + assert_eq!(res, Some(Path::new("/").to_path_buf())); }); } From d2988ffc779e74afc8b03e83d51284afb165aa88 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Tue, 18 Nov 2025 14:02:40 +0100 Subject: [PATCH 0184/1030] vim: Fix snapshot out of bounds indexing (#42969) Fixes ZED-38X Release Notes: - N/A --- crates/vim/src/normal/increment.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/vim/src/normal/increment.rs b/crates/vim/src/normal/increment.rs index 888d9ff25b63fad2e7fc0cf6cf534bfb1a7aaf76..7eadf5053a15f33946031a332cb8a7f2dcb8ed52 100644 --- a/crates/vim/src/normal/increment.rs +++ b/crates/vim/src/normal/increment.rs @@ -211,7 +211,7 @@ fn find_target( let mut pre_char = String::new(); // Backward scan to find the start of the number, but stop at start_offset - for ch in snapshot.reversed_chars_at(offset + 1) { + for ch in snapshot.reversed_chars_at(offset + if offset < snapshot.len() { 1 } else { 0 }) { // Search boundaries if offset == 0 || ch.is_whitespace() || (need_range && offset <= start_offset) { break; From ea120dfe18b476e0255127fc951e30af252817b9 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 18 Nov 2025 14:30:40 +0100 Subject: [PATCH 0185/1030] =?UTF-8?q?Revert=20"git:=20Remove=20JobStatus?= =?UTF-8?q?=20from=20PendingOp=20in=20favour=20of=20in-flight=20p=E2=80=A6?= =?UTF-8?q?=20(#42970)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …runing (#42955)" This reverts commit 696fdd8fed12da625357dc75939b0787c171b5d7. Release Notes: - N/A --- crates/git_ui/src/git_panel.rs | 2 +- crates/project/src/git_store.rs | 25 ++++-------- crates/project/src/git_store/pending_op.rs | 28 +++++++++++-- crates/project/src/project_tests.rs | 46 +++++++++++++--------- 4 files changed, 62 insertions(+), 39 deletions(-) diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index 85b4b67a5586f157d4337a0564051712279258d6..0691ba78560e38f5d3a297d033bd41459dff78c4 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -2662,7 +2662,7 @@ impl GitPanel { && pending .ops .iter() - .any(|op| op.git_status == pending_op::GitStatus::Reverted && op.finished) + .any(|op| op.git_status == pending_op::GitStatus::Reverted && op.finished()) { continue; } diff --git a/crates/project/src/git_store.rs b/crates/project/src/git_store.rs index dda60a2482b7bb08f0f3d73366e96b59bc7fe636..94af9859df1156d7a10286a843a31e8351fe050c 100644 --- a/crates/project/src/git_store.rs +++ b/crates/project/src/git_store.rs @@ -5404,27 +5404,18 @@ impl Repository { let ids = self.new_pending_ops_for_paths(paths, git_status); cx.spawn(async move |this, cx| { - let (finished, result) = match f(this.clone(), cx).await { - Ok(()) => (true, Ok(())), - Err(err) if err.is::() => (false, Ok(())), - Err(err) => (false, Err(err)), + let (job_status, result) = match f(this.clone(), cx).await { + Ok(()) => (pending_op::JobStatus::Finished, Ok(())), + Err(err) if err.is::() => (pending_op::JobStatus::Skipped, Ok(())), + Err(err) => (pending_op::JobStatus::Error, Err(err)), }; this.update(cx, |this, _| { let mut edits = Vec::with_capacity(ids.len()); for (id, entry) in ids { if let Some(mut ops) = this.snapshot.pending_ops_for_path(&entry) { - if finished { - if let Some(op) = ops.op_by_id_mut(id) { - op.finished = true; - } - } else { - let idx = ops - .ops - .iter() - .position(|op| op.id == id) - .expect("pending operation must exist"); - ops.ops.remove(idx); + if let Some(op) = ops.op_by_id_mut(id) { + op.job_status = job_status; } edits.push(sum_tree::Edit::Insert(ops)); } @@ -5452,7 +5443,7 @@ impl Repository { ops.ops.push(PendingOp { id, git_status, - finished: false, + job_status: pending_op::JobStatus::Running, }); edits.push(sum_tree::Edit::Insert(ops)); ids.push((id, path)); @@ -5730,7 +5721,7 @@ async fn compute_snapshot( let pending_ops_by_path = SumTree::from_iter( prev_snapshot.pending_ops_by_path.iter().filter_map(|ops| { let inner_ops: Vec = - ops.ops.iter().filter(|op| !op.finished).cloned().collect(); + ops.ops.iter().filter(|op| op.running()).cloned().collect(); if inner_ops.is_empty() { None } else { diff --git a/crates/project/src/git_store/pending_op.rs b/crates/project/src/git_store/pending_op.rs index b5c7d9f00ec639576d498aaa3107c83758085c2e..1991eed407833d47fd35f6f573fbb46c692aed91 100644 --- a/crates/project/src/git_store/pending_op.rs +++ b/crates/project/src/git_store/pending_op.rs @@ -11,6 +11,14 @@ pub enum GitStatus { Unchanged, } +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum JobStatus { + Running, + Finished, + Skipped, + Error, +} + #[derive(Clone, Debug, PartialEq, Eq)] pub struct PendingOps { pub repo_path: RepoPath, @@ -21,7 +29,7 @@ pub struct PendingOps { pub struct PendingOp { pub id: PendingOpId, pub git_status: GitStatus, - pub finished: bool, + pub job_status: JobStatus, } #[derive(Clone, Debug)] @@ -106,7 +114,7 @@ impl PendingOps { /// File is staged if the last job is finished and has status Staged. pub fn staged(&self) -> bool { if let Some(last) = self.ops.last() { - if last.git_status == GitStatus::Staged && last.finished { + if last.git_status == GitStatus::Staged && last.job_status == JobStatus::Finished { return true; } } @@ -116,10 +124,24 @@ impl PendingOps { /// File is staged if the last job is not finished and has status Staged. pub fn staging(&self) -> bool { if let Some(last) = self.ops.last() { - if last.git_status == GitStatus::Staged && !last.finished { + if last.git_status == GitStatus::Staged && last.job_status != JobStatus::Finished { return true; } } false } } + +impl PendingOp { + pub fn running(&self) -> bool { + self.job_status == JobStatus::Running + } + + pub fn finished(&self) -> bool { + matches!(self.job_status, JobStatus::Finished | JobStatus::Skipped) + } + + pub fn error(&self) -> bool { + self.job_status == JobStatus::Error + } +} diff --git a/crates/project/src/project_tests.rs b/crates/project/src/project_tests.rs index f65f28c6045e55c669b35592738c08f6abbe26d4..f3c935f3390305c8c78074439084f20b4d1562b2 100644 --- a/crates/project/src/project_tests.rs +++ b/crates/project/src/project_tests.rs @@ -8538,8 +8538,11 @@ fn merge_pending_ops_snapshots( .find_map(|(op, idx)| if op.id == s_op.id { Some(idx) } else { None }) { let t_op = &mut t_ops.ops[op_idx]; - if s_op.finished { - t_op.finished = true; + match (s_op.job_status, t_op.job_status) { + (pending_op::JobStatus::Running, _) => {} + (s_st, pending_op::JobStatus::Running) => t_op.job_status = s_st, + (s_st, t_st) if s_st == t_st => {} + _ => unreachable!(), } } else { t_ops.ops.push(s_op); @@ -8631,7 +8634,7 @@ async fn test_repository_pending_ops_staging( Some(&pending_op::PendingOp { id: id.into(), git_status, - finished: false, + job_status: pending_op::JobStatus::Running }) ); task @@ -8646,7 +8649,7 @@ async fn test_repository_pending_ops_staging( Some(&pending_op::PendingOp { id: id.into(), git_status, - finished: true, + job_status: pending_op::JobStatus::Finished }) ); }); @@ -8672,27 +8675,27 @@ async fn test_repository_pending_ops_staging( pending_op::PendingOp { id: 1u16.into(), git_status: pending_op::GitStatus::Staged, - finished: true, + job_status: pending_op::JobStatus::Finished }, pending_op::PendingOp { id: 2u16.into(), git_status: pending_op::GitStatus::Unstaged, - finished: true, + job_status: pending_op::JobStatus::Finished }, pending_op::PendingOp { id: 3u16.into(), git_status: pending_op::GitStatus::Staged, - finished: true, + job_status: pending_op::JobStatus::Finished }, pending_op::PendingOp { id: 4u16.into(), git_status: pending_op::GitStatus::Unstaged, - finished: true, + job_status: pending_op::JobStatus::Finished }, pending_op::PendingOp { id: 5u16.into(), git_status: pending_op::GitStatus::Staged, - finished: true, + job_status: pending_op::JobStatus::Finished } ], ); @@ -8789,11 +8792,18 @@ async fn test_repository_pending_ops_long_running_staging( .get(&worktree::PathKey(repo_path("a.txt").as_ref().clone()), ()) .unwrap() .ops, - vec![pending_op::PendingOp { - id: 2u16.into(), - git_status: pending_op::GitStatus::Staged, - finished: true, - }], + vec![ + pending_op::PendingOp { + id: 1u16.into(), + git_status: pending_op::GitStatus::Staged, + job_status: pending_op::JobStatus::Skipped + }, + pending_op::PendingOp { + id: 2u16.into(), + git_status: pending_op::GitStatus::Staged, + job_status: pending_op::JobStatus::Finished + } + ], ); repo.update(cx, |repo, _cx| { @@ -8894,12 +8904,12 @@ async fn test_repository_pending_ops_stage_all( pending_op::PendingOp { id: 1u16.into(), git_status: pending_op::GitStatus::Staged, - finished: true, + job_status: pending_op::JobStatus::Finished }, pending_op::PendingOp { id: 2u16.into(), git_status: pending_op::GitStatus::Unstaged, - finished: true, + job_status: pending_op::JobStatus::Finished }, ], ); @@ -8913,12 +8923,12 @@ async fn test_repository_pending_ops_stage_all( pending_op::PendingOp { id: 1u16.into(), git_status: pending_op::GitStatus::Staged, - finished: true, + job_status: pending_op::JobStatus::Finished }, pending_op::PendingOp { id: 2u16.into(), git_status: pending_op::GitStatus::Unstaged, - finished: true, + job_status: pending_op::JobStatus::Finished }, ], ); From f1c2afdee08a5d6942a54ad95045e77c055264e7 Mon Sep 17 00:00:00 2001 From: Ben Brandt Date: Tue, 18 Nov 2025 13:50:59 +0000 Subject: [PATCH 0186/1030] Update codex docs to include configuration for third-party providers (#42973) Release Notes: - N/A --- docs/src/ai/external-agents.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/src/ai/external-agents.md b/docs/src/ai/external-agents.md index ade14c393dc5155d5b3cc68fa3597c643fbfe982..d396a9b72bf1b51e4fd3994805c7b0d5268a0cd0 100644 --- a/docs/src/ai/external-agents.md +++ b/docs/src/ai/external-agents.md @@ -168,6 +168,8 @@ To ensure you're using your billing method of choice, [open a new Codex thread]( If you are already logged in and want to change your authentication method, type `/logout` in the thread and authenticate again. +If you want to use a third-party provider with Codex, you can configure that with your [Codex config.toml](https://github.com/openai/codex/blob/main/docs/config.md#model-selection) or pass extra [args/env variables](https://github.com/openai/codex/blob/main/docs/config.md#model-selection) to your Codex agent servers settings. + #### Installation The first time you create a Codex thread, Zed will install [codex-acp](https://github.com/zed-industries/codex-acp). This installation is only available to Zed and is kept up to date as you use the agent. From 097024d46ffc12756c6451517f55d41c14849866 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Tue, 18 Nov 2025 15:31:39 +0100 Subject: [PATCH 0187/1030] util: Use process spawn helpers in more places (#42976) Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/auto_update/src/auto_update.rs | 15 +++++++------- crates/gpui/src/platform/linux/platform.rs | 6 +++--- crates/gpui/src/platform/mac/platform.rs | 10 ++++++---- crates/languages/src/rust.rs | 9 +++++++-- crates/project/src/debugger/locators/cargo.rs | 9 +++------ crates/project/src/environment.rs | 4 ++-- crates/project/src/terminals.rs | 6 +++--- crates/remote/src/transport.rs | 20 +++++++++++++------ crates/remote_server/src/unix.rs | 7 ++++--- crates/util/src/shell_env.rs | 4 +++- 10 files changed, 53 insertions(+), 37 deletions(-) diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index accda1b1ce4b09db0bc4cc0fb5824290725cb8ee..010e011526ffc6d0332a5c22da107f380ff37c91 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -10,8 +10,8 @@ use paths::remote_servers_dir; use release_channel::{AppCommitSha, ReleaseChannel}; use serde::{Deserialize, Serialize}; use settings::{RegisterSetting, Settings, SettingsStore}; +use smol::fs::File; use smol::{fs, io::AsyncReadExt}; -use smol::{fs::File, process::Command}; use std::mem; use std::{ env::{ @@ -23,6 +23,7 @@ use std::{ sync::Arc, time::Duration, }; +use util::command::new_smol_command; use workspace::Workspace; const SHOULD_SHOW_UPDATE_NOTIFICATION_KEY: &str = "auto-updater-should-show-updated-notification"; @@ -121,7 +122,7 @@ impl Drop for MacOsUnmounter<'_> { let mount_path = mem::take(&mut self.mount_path); self.background_executor .spawn(async move { - let unmount_output = Command::new("hdiutil") + let unmount_output = new_smol_command("hdiutil") .args(["detach", "-force"]) .arg(&mount_path) .output() @@ -799,7 +800,7 @@ async fn install_release_linux( .await .context("failed to create directory into which to extract update")?; - let output = Command::new("tar") + let output = new_smol_command("tar") .arg("-xzf") .arg(&downloaded_tar_gz) .arg("-C") @@ -834,7 +835,7 @@ async fn install_release_linux( to = PathBuf::from(prefix); } - let output = Command::new("rsync") + let output = new_smol_command("rsync") .args(["-av", "--delete"]) .arg(&from) .arg(&to) @@ -866,7 +867,7 @@ async fn install_release_macos( let mut mounted_app_path: OsString = mount_path.join(running_app_filename).into(); mounted_app_path.push("/"); - let output = Command::new("hdiutil") + let output = new_smol_command("hdiutil") .args(["attach", "-nobrowse"]) .arg(&downloaded_dmg) .arg("-mountroot") @@ -886,7 +887,7 @@ async fn install_release_macos( background_executor: cx.background_executor(), }; - let output = Command::new("rsync") + let output = new_smol_command("rsync") .args(["-av", "--delete"]) .arg(&mounted_app_path) .arg(&running_app_path) @@ -917,7 +918,7 @@ async fn cleanup_windows() -> Result<()> { } async fn install_release_windows(downloaded_installer: PathBuf) -> Result> { - let output = Command::new(downloaded_installer) + let output = new_smol_command(downloaded_installer) .arg("/verysilent") .arg("/update=true") .arg("!desktopicon") diff --git a/crates/gpui/src/platform/linux/platform.rs b/crates/gpui/src/platform/linux/platform.rs index 6c2d13d2e78f003a950e5c1dc135b503ae6d4087..21468370c499058af207a7e7b02ca1bbd7563ec1 100644 --- a/crates/gpui/src/platform/linux/platform.rs +++ b/crates/gpui/src/platform/linux/platform.rs @@ -1,7 +1,6 @@ use std::{ env, path::{Path, PathBuf}, - process::Command, rc::Rc, sync::Arc, }; @@ -18,6 +17,7 @@ use anyhow::{Context as _, anyhow}; use calloop::{LoopSignal, channel::Channel}; use futures::channel::oneshot; use util::ResultExt as _; +use util::command::{new_smol_command, new_std_command}; #[cfg(any(feature = "wayland", feature = "x11"))] use xkbcommon::xkb::{self, Keycode, Keysym, State}; @@ -215,7 +215,7 @@ impl Platform for P { clippy::disallowed_methods, reason = "We are restarting ourselves, using std command thus is fine" )] - let restart_process = Command::new("/usr/bin/env") + let restart_process = new_std_command("/usr/bin/env") .arg("bash") .arg("-c") .arg(script) @@ -422,7 +422,7 @@ impl Platform for P { let path = path.to_owned(); self.background_executor() .spawn(async move { - let _ = smol::process::Command::new("xdg-open") + let _ = new_smol_command("xdg-open") .arg(path) .spawn() .context("invoking xdg-open") diff --git a/crates/gpui/src/platform/mac/platform.rs b/crates/gpui/src/platform/mac/platform.rs index cc6390cdb887a6c08fbb4520c9ab9fac4b50f9cf..46477045722e132a275f926140915dbb9bd6cd5c 100644 --- a/crates/gpui/src/platform/mac/platform.rs +++ b/crates/gpui/src/platform/mac/platform.rs @@ -53,14 +53,16 @@ use std::{ ffi::{CStr, OsStr, c_void}, os::{raw::c_char, unix::ffi::OsStrExt}, path::{Path, PathBuf}, - process::Command, ptr, rc::Rc, slice, str, sync::{Arc, OnceLock}, }; use strum::IntoEnumIterator; -use util::ResultExt; +use util::{ + ResultExt, + command::{new_smol_command, new_std_command}, +}; #[allow(non_upper_case_globals)] const NSUTF8StringEncoding: NSUInteger = 4; @@ -552,7 +554,7 @@ impl Platform for MacPlatform { clippy::disallowed_methods, reason = "We are restarting ourselves, using std command thus is fine" )] - let restart_process = Command::new("/bin/bash") + let restart_process = new_std_command("/bin/bash") .arg("-c") .arg(script) .arg(app_pid) @@ -867,7 +869,7 @@ impl Platform for MacPlatform { .lock() .background_executor .spawn(async move { - if let Some(mut child) = smol::process::Command::new("open") + if let Some(mut child) = new_smol_command("open") .arg(path) .spawn() .context("invoking open command") diff --git a/crates/languages/src/rust.rs b/crates/languages/src/rust.rs index c2b0845940d1639629d59b634e9ece73c9e4cf3a..0b29ee93f3d4d1f1bc79e9ff93d99f80a24f473b 100644 --- a/crates/languages/src/rust.rs +++ b/crates/languages/src/rust.rs @@ -67,10 +67,15 @@ impl RustLspAdapter { #[cfg(target_os = "linux")] async fn determine_libc_type() -> LibcType { use futures::pin_mut; - use smol::process::Command; async fn from_ldd_version() -> Option { - let ldd_output = Command::new("ldd").arg("--version").output().await.ok()?; + use util::command::new_smol_command; + + let ldd_output = new_smol_command("ldd") + .arg("--version") + .output() + .await + .ok()?; let ldd_version = String::from_utf8_lossy(&ldd_output.stdout); if ldd_version.contains("GNU libc") || ldd_version.contains("GLIBC") { diff --git a/crates/project/src/debugger/locators/cargo.rs b/crates/project/src/debugger/locators/cargo.rs index 72aafff51609c8e55ac15f671a7b7f553b34d44f..0633f4881b33c096da1dddb59f1cfa11a39e4849 100644 --- a/crates/project/src/debugger/locators/cargo.rs +++ b/crates/project/src/debugger/locators/cargo.rs @@ -3,13 +3,10 @@ use async_trait::async_trait; use dap::{DapLocator, DebugRequest, adapters::DebugAdapterName}; use gpui::SharedString; use serde_json::{Value, json}; -use smol::{ - Timer, - io::AsyncReadExt, - process::{Command, Stdio}, -}; +use smol::{Timer, io::AsyncReadExt, process::Stdio}; use std::time::Duration; use task::{BuildTaskDefinition, DebugScenario, ShellBuilder, SpawnInTerminal, TaskTemplate}; +use util::command::new_smol_command; pub(crate) struct CargoLocator; @@ -18,7 +15,7 @@ async fn find_best_executable(executables: &[String], test_name: &str) -> Option return executables.first().cloned(); } for executable in executables { - let Some(mut child) = Command::new(&executable) + let Some(mut child) = new_smol_command(&executable) .arg("--list") .stdout(Stdio::piped()) .spawn() diff --git a/crates/project/src/environment.rs b/crates/project/src/environment.rs index 478ba6dd17f2ac0dfcedbd5f42ed12856b413ea3..32ed2f87668979802a44dcc233a1e87dc3c2e958 100644 --- a/crates/project/src/environment.rs +++ b/crates/project/src/environment.rs @@ -6,7 +6,7 @@ use rpc::proto::{self, REMOTE_SERVER_PROJECT_ID}; use std::{collections::VecDeque, path::Path, sync::Arc}; use task::{Shell, shell_to_proto}; use terminal::terminal_settings::TerminalSettings; -use util::{ResultExt, rel_path::RelPath}; +use util::{ResultExt, command::new_smol_command, rel_path::RelPath}; use worktree::Worktree; use collections::HashMap; @@ -389,7 +389,7 @@ async fn load_direnv_environment( }; let args = &["export", "json"]; - let direnv_output = smol::process::Command::new(&direnv_path) + let direnv_output = new_smol_command(&direnv_path) .args(args) .envs(env) .env("TERM", "dumb") diff --git a/crates/project/src/terminals.rs b/crates/project/src/terminals.rs index 769e75de3e8a001bf140708dd7443a48c55ed280..ef21c97f8178181493968c984e6534772eac9beb 100644 --- a/crates/project/src/terminals.rs +++ b/crates/project/src/terminals.rs @@ -16,7 +16,7 @@ use task::{Shell, ShellBuilder, ShellKind, SpawnInTerminal}; use terminal::{ TaskState, TaskStatus, Terminal, TerminalBuilder, terminal_settings::TerminalSettings, }; -use util::{get_default_system_shell, maybe, rel_path::RelPath}; +use util::{command::new_std_command, get_default_system_shell, maybe, rel_path::RelPath}; use crate::{Project, ProjectPath}; @@ -505,13 +505,13 @@ impl Project { None, None, )?; - let mut command = std::process::Command::new(command_template.program); + let mut command = new_std_command(command_template.program); command.args(command_template.args); command.envs(command_template.env); Ok(command) } None => { - let mut command = std::process::Command::new(command); + let mut command = new_std_command(command); command.args(args); command.envs(env); if let Some(path) = path { diff --git a/crates/remote/src/transport.rs b/crates/remote/src/transport.rs index 14a23257ce0bffbe138567f7aa27fc6a6d63d817..211851c0629c13f1f79ce425cafc582899d1b58f 100644 --- a/crates/remote/src/transport.rs +++ b/crates/remote/src/transport.rs @@ -124,6 +124,7 @@ async fn build_remote_server_from_source( use smol::process::{Command, Stdio}; use std::env::VarError; use std::path::Path; + use util::command::new_smol_command; // By default, we make building remote server from source opt-out and we do not force artifact compression // for quicker builds. @@ -189,7 +190,7 @@ async fn build_remote_server_from_source( delegate.set_status(Some("Building remote server binary from source"), cx); log::info!("building remote server binary from source"); run_cmd( - Command::new("cargo") + new_smol_command("cargo") .current_dir(concat!(env!("CARGO_MANIFEST_DIR"), "/../..")) .args([ "build", @@ -219,12 +220,18 @@ async fn build_remote_server_from_source( .context("rustup not found on $PATH, install rustup (see https://rustup.rs/)")?; delegate.set_status(Some("Adding rustup target for cross-compilation"), cx); log::info!("adding rustup target"); - run_cmd(Command::new(rustup).args(["target", "add"]).arg(&triple)).await?; + run_cmd( + new_smol_command(rustup) + .args(["target", "add"]) + .arg(&triple), + ) + .await?; if which("cargo-zigbuild", cx).await?.is_none() { delegate.set_status(Some("Installing cargo-zigbuild for cross-compilation"), cx); log::info!("installing cargo-zigbuild"); - run_cmd(Command::new("cargo").args(["install", "--locked", "cargo-zigbuild"])).await?; + run_cmd(new_smol_command("cargo").args(["install", "--locked", "cargo-zigbuild"])) + .await?; } delegate.set_status( @@ -235,7 +242,7 @@ async fn build_remote_server_from_source( ); log::info!("building remote binary from source for {triple} with Zig"); run_cmd( - Command::new("cargo") + new_smol_command("cargo") .args([ "zigbuild", "--package", @@ -262,12 +269,13 @@ async fn build_remote_server_from_source( #[cfg(not(target_os = "windows"))] { - run_cmd(Command::new("gzip").args(["-f", &bin_path.to_string_lossy()])).await?; + run_cmd(new_smol_command("gzip").args(["-f", &bin_path.to_string_lossy()])).await?; } #[cfg(target_os = "windows")] { // On Windows, we use 7z to compress the binary + let seven_zip = which("7z.exe",cx) .await? .context("7z.exe not found on $PATH, install it (e.g. with `winget install -e --id 7zip.7zip`) or, if you don't want this behaviour, set $env:ZED_BUILD_REMOTE_SERVER=\"nocompress\"")?; @@ -275,7 +283,7 @@ async fn build_remote_server_from_source( if smol::fs::metadata(&gz_path).await.is_ok() { smol::fs::remove_file(&gz_path).await?; } - run_cmd(Command::new(seven_zip).args([ + run_cmd(new_smol_command(seven_zip).args([ "a", "-tgzip", &gz_path, diff --git a/crates/remote_server/src/unix.rs b/crates/remote_server/src/unix.rs index c51b93d4554aca86d13cefeb9dd4aaadacda399c..29e5ef735f5a001c23e3215c1a3fc5d291830282 100644 --- a/crates/remote_server/src/unix.rs +++ b/crates/remote_server/src/unix.rs @@ -16,6 +16,7 @@ use language::LanguageRegistry; use node_runtime::{NodeBinaryOptions, NodeRuntime}; use paths::logs_dir; use project::project_settings::ProjectSettings; +use util::command::new_smol_command; use proto::CrashReport; use release_channel::{AppVersion, RELEASE_CHANNEL, ReleaseChannel}; @@ -656,7 +657,7 @@ pub(crate) fn execute_proxy( async fn kill_running_server(pid: u32, paths: &ServerPaths) -> Result<(), ExecuteProxyError> { log::info!("killing existing server with PID {}", pid); - smol::process::Command::new("kill") + new_smol_command("kill") .arg(pid.to_string()) .output() .await @@ -707,7 +708,7 @@ async fn spawn_server(paths: &ServerPaths) -> Result<(), SpawnServerError> { } let binary_name = std::env::current_exe().map_err(SpawnServerError::CurrentExe)?; - let mut server_process = smol::process::Command::new(binary_name); + let mut server_process = new_smol_command(binary_name); server_process .arg("run") .arg("--log-file") @@ -772,7 +773,7 @@ async fn check_pid_file(path: &Path) -> Result, CheckPidError> { }; log::debug!("Checking if process with PID {} exists...", pid); - match smol::process::Command::new("kill") + match new_smol_command("kill") .arg("-0") .arg(pid.to_string()) .output() diff --git a/crates/util/src/shell_env.rs b/crates/util/src/shell_env.rs index 7b8239007980158bb7e5d5956bebb4c5bfb576dd..02b262298795f8a50d9612f65479c0299e7e6a3e 100644 --- a/crates/util/src/shell_env.rs +++ b/crates/util/src/shell_env.rs @@ -34,11 +34,13 @@ async fn capture_unix( ) -> Result> { use std::os::unix::process::CommandExt; + use crate::command::new_std_command; + let shell_kind = ShellKind::new(shell_path, false); let zed_path = super::get_shell_safe_zed_path(shell_kind)?; let mut command_string = String::new(); - let mut command = std::process::Command::new(shell_path); + let mut command = new_std_command(shell_path); command.args(args); // In some shells, file descriptors greater than 2 cannot be used in interactive mode, // so file descriptor 0 (stdin) is used instead. This impacts zsh, old bash; perhaps others. From b4e4e0d3ac6b7b6252394270a4d83cf8ab177101 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Tue, 18 Nov 2025 16:14:52 +0100 Subject: [PATCH 0188/1030] remote: Fix up incorrect logs (#42979) Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/remote/src/transport/ssh.rs | 2 +- crates/remote/src/transport/wsl.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/remote/src/transport/ssh.rs b/crates/remote/src/transport/ssh.rs index 8fed12a4cb9d594160732cc94637f8b53692f9cf..6244045d1eba3a3f267fbdaf1cb906c58a3d48b2 100644 --- a/crates/remote/src/transport/ssh.rs +++ b/crates/remote/src/transport/ssh.rs @@ -489,7 +489,7 @@ impl SshRemoteConnection { let ssh_shell = socket.shell().await; log::info!("Remote shell discovered: {}", ssh_shell); let ssh_platform = socket.platform(ShellKind::new(&ssh_shell, false)).await?; - log::info!("Remote platform discovered: {}", ssh_shell); + log::info!("Remote platform discovered: {:?}", ssh_platform); let ssh_path_style = match ssh_platform.os { "windows" => PathStyle::Windows, _ => PathStyle::Posix, diff --git a/crates/remote/src/transport/wsl.rs b/crates/remote/src/transport/wsl.rs index d9a7c7ea36f2c620bf0ba01d7735537b09883f08..c075e4fb1213512792191cb3b5ff5eefc423b339 100644 --- a/crates/remote/src/transport/wsl.rs +++ b/crates/remote/src/transport/wsl.rs @@ -92,7 +92,7 @@ impl WslRemoteConnection { .detect_platform() .await .context("failed detecting platform")?; - log::info!("Remote platform discovered: {}", this.shell); + log::info!("Remote platform discovered: {:?}", this.platform); this.remote_binary_path = Some( this.ensure_server_binary(&delegate, release_channel, version, commit, cx) .await From c44d93745a3c444699bfe4912f7fbd194039bcdb Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Tue, 18 Nov 2025 12:28:14 -0300 Subject: [PATCH 0189/1030] agent_ui: Improve the modal to add LLM providers (#42983) Closes https://github.com/zed-industries/zed/issues/42807 This PR makes the modal to add LLM providers a bit better to interact with: 1. Added a scrollbar 2. Made the inputs navigable with tab 3. Added some responsiveness to ensure it resizes on shorter windows https://github.com/user-attachments/assets/758ea5f0-6bcc-4a2b-87ea-114982f37caf Release Notes: - agent: Improved the modal to add LLM providers by making it responsive and keyboard navigable. --- .../add_llm_provider_modal.rs | 144 +++++++++++++----- crates/ui/src/components/modal.rs | 2 +- 2 files changed, 103 insertions(+), 43 deletions(-) diff --git a/crates/agent_ui/src/agent_configuration/add_llm_provider_modal.rs b/crates/agent_ui/src/agent_configuration/add_llm_provider_modal.rs index f41b59132d8c0dfe5ed39fe84775e61b8131cc0a..3427dab0d22c6900a3078f1dcb4cc7e892cce7db 100644 --- a/crates/agent_ui/src/agent_configuration/add_llm_provider_modal.rs +++ b/crates/agent_ui/src/agent_configuration/add_llm_provider_modal.rs @@ -3,16 +3,42 @@ use std::sync::Arc; use anyhow::Result; use collections::HashSet; use fs::Fs; -use gpui::{DismissEvent, Entity, EventEmitter, FocusHandle, Focusable, Render, Task}; +use gpui::{ + DismissEvent, Entity, EventEmitter, FocusHandle, Focusable, Render, ScrollHandle, Task, +}; use language_model::LanguageModelRegistry; use language_models::provider::open_ai_compatible::{AvailableModel, ModelCapabilities}; use settings::{OpenAiCompatibleSettingsContent, update_settings_file}; use ui::{ - Banner, Checkbox, KeyBinding, Modal, ModalFooter, ModalHeader, Section, ToggleState, prelude::*, + Banner, Checkbox, KeyBinding, Modal, ModalFooter, ModalHeader, Section, ToggleState, + WithScrollbar, prelude::*, }; use ui_input::InputField; use workspace::{ModalView, Workspace}; +fn single_line_input( + label: impl Into, + placeholder: impl Into, + text: Option<&str>, + tab_index: isize, + window: &mut Window, + cx: &mut App, +) -> Entity { + cx.new(|cx| { + let input = InputField::new(window, cx, placeholder) + .label(label) + .tab_index(tab_index) + .tab_stop(true); + + if let Some(text) = text { + input + .editor() + .update(cx, |editor, cx| editor.set_text(text, window, cx)); + } + input + }) +} + #[derive(Clone, Copy)] pub enum LlmCompatibleProvider { OpenAi, @@ -41,12 +67,14 @@ struct AddLlmProviderInput { impl AddLlmProviderInput { fn new(provider: LlmCompatibleProvider, window: &mut Window, cx: &mut App) -> Self { - let provider_name = single_line_input("Provider Name", provider.name(), None, window, cx); - let api_url = single_line_input("API URL", provider.api_url(), None, window, cx); + let provider_name = + single_line_input("Provider Name", provider.name(), None, 1, window, cx); + let api_url = single_line_input("API URL", provider.api_url(), None, 2, window, cx); let api_key = single_line_input( "API Key", "000000000000000000000000000000000000000000000000", None, + 3, window, cx, ); @@ -55,12 +83,13 @@ impl AddLlmProviderInput { provider_name, api_url, api_key, - models: vec![ModelInput::new(window, cx)], + models: vec![ModelInput::new(0, window, cx)], } } fn add_model(&mut self, window: &mut Window, cx: &mut App) { - self.models.push(ModelInput::new(window, cx)); + let model_index = self.models.len(); + self.models.push(ModelInput::new(model_index, window, cx)); } fn remove_model(&mut self, index: usize) { @@ -84,11 +113,14 @@ struct ModelInput { } impl ModelInput { - fn new(window: &mut Window, cx: &mut App) -> Self { + fn new(model_index: usize, window: &mut Window, cx: &mut App) -> Self { + let base_tab_index = (3 + (model_index * 4)) as isize; + let model_name = single_line_input( "Model Name", "e.g. gpt-4o, claude-opus-4, gemini-2.5-pro", None, + base_tab_index + 1, window, cx, ); @@ -96,6 +128,7 @@ impl ModelInput { "Max Completion Tokens", "200000", Some("200000"), + base_tab_index + 2, window, cx, ); @@ -103,16 +136,26 @@ impl ModelInput { "Max Output Tokens", "Max Output Tokens", Some("32000"), + base_tab_index + 3, window, cx, ); - let max_tokens = single_line_input("Max Tokens", "Max Tokens", Some("200000"), window, cx); + let max_tokens = single_line_input( + "Max Tokens", + "Max Tokens", + Some("200000"), + base_tab_index + 4, + window, + cx, + ); + let ModelCapabilities { tools, images, parallel_tool_calls, prompt_cache_key, } = ModelCapabilities::default(); + Self { name: model_name, max_completion_tokens, @@ -165,24 +208,6 @@ impl ModelInput { } } -fn single_line_input( - label: impl Into, - placeholder: impl Into, - text: Option<&str>, - window: &mut Window, - cx: &mut App, -) -> Entity { - cx.new(|cx| { - let input = InputField::new(window, cx, placeholder).label(label); - if let Some(text) = text { - input - .editor() - .update(cx, |editor, cx| editor.set_text(text, window, cx)); - } - input - }) -} - fn save_provider_to_settings( input: &AddLlmProviderInput, cx: &mut App, @@ -258,6 +283,7 @@ fn save_provider_to_settings( pub struct AddLlmProviderModal { provider: LlmCompatibleProvider, input: AddLlmProviderInput, + scroll_handle: ScrollHandle, focus_handle: FocusHandle, last_error: Option, } @@ -278,6 +304,7 @@ impl AddLlmProviderModal { provider, last_error: None, focus_handle: cx.focus_handle(), + scroll_handle: ScrollHandle::new(), } } @@ -418,6 +445,19 @@ impl AddLlmProviderModal { ) }) } + + fn on_tab(&mut self, _: &menu::SelectNext, window: &mut Window, _: &mut Context) { + window.focus_next(); + } + + fn on_tab_prev( + &mut self, + _: &menu::SelectPrevious, + window: &mut Window, + _: &mut Context, + ) { + window.focus_prev(); + } } impl EventEmitter for AddLlmProviderModal {} @@ -431,15 +471,27 @@ impl Focusable for AddLlmProviderModal { impl ModalView for AddLlmProviderModal {} impl Render for AddLlmProviderModal { - fn render(&mut self, _window: &mut ui::Window, cx: &mut ui::Context) -> impl IntoElement { + fn render(&mut self, window: &mut ui::Window, cx: &mut ui::Context) -> impl IntoElement { let focus_handle = self.focus_handle(cx); - div() + let window_size = window.viewport_size(); + let rem_size = window.rem_size(); + let is_large_window = window_size.height / rem_size > rems_from_px(600.).0; + + let modal_max_height = if is_large_window { + rems_from_px(450.) + } else { + rems_from_px(200.) + }; + + v_flex() .id("add-llm-provider-modal") .key_context("AddLlmProviderModal") .w(rems(34.)) .elevation_3(cx) .on_action(cx.listener(Self::cancel)) + .on_action(cx.listener(Self::on_tab)) + .on_action(cx.listener(Self::on_tab_prev)) .capture_any_mouse_down(cx.listener(|this, _, window, cx| { this.focus_handle(cx).focus(window); })) @@ -462,17 +514,25 @@ impl Render for AddLlmProviderModal { ) }) .child( - v_flex() - .id("modal_content") + div() .size_full() - .max_h_128() - .overflow_y_scroll() - .px(DynamicSpacing::Base12.rems(cx)) - .gap(DynamicSpacing::Base04.rems(cx)) - .child(self.input.provider_name.clone()) - .child(self.input.api_url.clone()) - .child(self.input.api_key.clone()) - .child(self.render_model_section(cx)), + .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx) + .child( + v_flex() + .id("modal_content") + .size_full() + .tab_group() + .max_h(modal_max_height) + .pl_3() + .pr_4() + .gap_2() + .overflow_y_scroll() + .track_scroll(&self.scroll_handle) + .child(self.input.provider_name.clone()) + .child(self.input.api_url.clone()) + .child(self.input.api_key.clone()) + .child(self.render_model_section(cx)), + ), ) .footer( ModalFooter::new().end_slot( @@ -642,7 +702,7 @@ mod tests { let cx = setup_test(cx).await; cx.update(|window, cx| { - let model_input = ModelInput::new(window, cx); + let model_input = ModelInput::new(0, window, cx); model_input.name.update(cx, |input, cx| { input.editor().update(cx, |editor, cx| { editor.set_text("somemodel", window, cx); @@ -678,7 +738,7 @@ mod tests { let cx = setup_test(cx).await; cx.update(|window, cx| { - let mut model_input = ModelInput::new(window, cx); + let mut model_input = ModelInput::new(0, window, cx); model_input.name.update(cx, |input, cx| { input.editor().update(cx, |editor, cx| { editor.set_text("somemodel", window, cx); @@ -703,7 +763,7 @@ mod tests { let cx = setup_test(cx).await; cx.update(|window, cx| { - let mut model_input = ModelInput::new(window, cx); + let mut model_input = ModelInput::new(0, window, cx); model_input.name.update(cx, |input, cx| { input.editor().update(cx, |editor, cx| { editor.set_text("somemodel", window, cx); @@ -767,7 +827,7 @@ mod tests { models.iter().enumerate() { if i >= input.models.len() { - input.models.push(ModelInput::new(window, cx)); + input.models.push(ModelInput::new(i, window, cx)); } let model = &mut input.models[i]; set_text(&model.name, name, window, cx); diff --git a/crates/ui/src/components/modal.rs b/crates/ui/src/components/modal.rs index a70f5e1ea5a53a043086f3e102878f3614990d6e..85565f54885a06ddf2bc84f3639ca00fc4acc50e 100644 --- a/crates/ui/src/components/modal.rs +++ b/crates/ui/src/components/modal.rs @@ -77,6 +77,7 @@ impl RenderOnce for Modal { .w_full() .flex_1() .gap(DynamicSpacing::Base08.rems(cx)) + .when(self.footer.is_some(), |this| this.pb_4()) .when_some( self.container_scroll_handler, |this, container_scroll_handle| { @@ -276,7 +277,6 @@ impl RenderOnce for ModalFooter { fn render(self, _window: &mut Window, cx: &mut App) -> impl IntoElement { h_flex() .w_full() - .mt_4() .p(DynamicSpacing::Base08.rems(cx)) .flex_none() .justify_between() From 7e591a7e9ac6272a4fb2387c860138d94c035ad0 Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Tue, 18 Nov 2025 12:33:03 -0300 Subject: [PATCH 0190/1030] Fix sweep icon spacing (#42986) Release Notes: - N/A --- assets/icons/sweep_ai.svg | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/assets/icons/sweep_ai.svg b/assets/icons/sweep_ai.svg index c78d12727d78ddcc2a86bdb3e46349752cadaf7d..bf3459c7ea9896bc6c1d2297d1f7671cfc8a4d46 100644 --- a/assets/icons/sweep_ai.svg +++ b/assets/icons/sweep_ai.svg @@ -1 +1,32 @@ - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 1c66c3991dadd4faa23e0c03344c4dab05bbb0e1 Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Tue, 18 Nov 2025 12:39:27 -0300 Subject: [PATCH 0191/1030] Enable sweep flag for staff (#42987) Release Notes: - N/A --- crates/sweep_ai/src/sweep_ai.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/crates/sweep_ai/src/sweep_ai.rs b/crates/sweep_ai/src/sweep_ai.rs index c5c0fe31dd8ff530a685b5074b28dac945e97c1c..41e8f1b2932c49c577ccfa8c4a420366a0c02cb9 100644 --- a/crates/sweep_ai/src/sweep_ai.rs +++ b/crates/sweep_ai/src/sweep_ai.rs @@ -37,10 +37,6 @@ pub struct SweepFeatureFlag; impl FeatureFlag for SweepFeatureFlag { const NAME: &str = "sweep-ai"; - - fn enabled_for_staff() -> bool { - false - } } #[derive(Clone)] From c0fadae8813669e0166ab3fa524f1b43559b4d53 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Tue, 18 Nov 2025 10:41:19 -0500 Subject: [PATCH 0192/1030] Thought signatures (#42915) Implement Gemini API's [thought signatures](https://ai.google.dev/gemini-api/docs/thinking#signatures) Release Notes: - Added thought signatures for Gemini tool calls --- crates/agent/src/db.rs | 1 + crates/agent/src/edit_agent/evals.rs | 1 + crates/agent/src/tests/mod.rs | 16 + crates/google_ai/src/google_ai.rs | 110 +++++ crates/language_model/src/language_model.rs | 84 ++++ .../language_models/src/provider/anthropic.rs | 2 + .../language_models/src/provider/bedrock.rs | 1 + .../src/provider/copilot_chat.rs | 2 + .../language_models/src/provider/deepseek.rs | 1 + crates/language_models/src/provider/google.rs | 431 ++++++++++++++++++ .../language_models/src/provider/lmstudio.rs | 1 + .../language_models/src/provider/mistral.rs | 1 + crates/language_models/src/provider/ollama.rs | 1 + .../language_models/src/provider/open_ai.rs | 1 + .../src/provider/open_router.rs | 1 + 15 files changed, 654 insertions(+) diff --git a/crates/agent/src/db.rs b/crates/agent/src/db.rs index c72e20571e2761788157a5fd10df147c2b414e4a..84d080ff48107e7173226df81a419b90603d82fd 100644 --- a/crates/agent/src/db.rs +++ b/crates/agent/src/db.rs @@ -150,6 +150,7 @@ impl DbThread { .unwrap_or_default(), input: tool_use.input, is_input_complete: true, + thought_signature: None, }, )); } diff --git a/crates/agent/src/edit_agent/evals.rs b/crates/agent/src/edit_agent/evals.rs index 54aa6ae5c95022ee1ef022aed78d46533de356be..ddb9052b84b986229720efa89b9e912452411d86 100644 --- a/crates/agent/src/edit_agent/evals.rs +++ b/crates/agent/src/edit_agent/evals.rs @@ -1108,6 +1108,7 @@ fn tool_use( raw_input: serde_json::to_string_pretty(&input).unwrap(), input: serde_json::to_value(input).unwrap(), is_input_complete: true, + thought_signature: None, }) } diff --git a/crates/agent/src/tests/mod.rs b/crates/agent/src/tests/mod.rs index 5d4bdce27cc05d1cf46a4b73821f0a97878fd6f4..ffc5dbc6d30e58b5d819c3778b063951b0ed0861 100644 --- a/crates/agent/src/tests/mod.rs +++ b/crates/agent/src/tests/mod.rs @@ -274,6 +274,7 @@ async fn test_prompt_caching(cx: &mut TestAppContext) { raw_input: json!({"text": "test"}).to_string(), input: json!({"text": "test"}), is_input_complete: true, + thought_signature: None, }; fake_model .send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(tool_use.clone())); @@ -461,6 +462,7 @@ async fn test_tool_authorization(cx: &mut TestAppContext) { raw_input: "{}".into(), input: json!({}), is_input_complete: true, + thought_signature: None, }, )); fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse( @@ -470,6 +472,7 @@ async fn test_tool_authorization(cx: &mut TestAppContext) { raw_input: "{}".into(), input: json!({}), is_input_complete: true, + thought_signature: None, }, )); fake_model.end_last_completion_stream(); @@ -520,6 +523,7 @@ async fn test_tool_authorization(cx: &mut TestAppContext) { raw_input: "{}".into(), input: json!({}), is_input_complete: true, + thought_signature: None, }, )); fake_model.end_last_completion_stream(); @@ -554,6 +558,7 @@ async fn test_tool_authorization(cx: &mut TestAppContext) { raw_input: "{}".into(), input: json!({}), is_input_complete: true, + thought_signature: None, }, )); fake_model.end_last_completion_stream(); @@ -592,6 +597,7 @@ async fn test_tool_hallucination(cx: &mut TestAppContext) { raw_input: "{}".into(), input: json!({}), is_input_complete: true, + thought_signature: None, }, )); fake_model.end_last_completion_stream(); @@ -621,6 +627,7 @@ async fn test_resume_after_tool_use_limit(cx: &mut TestAppContext) { raw_input: "{}".into(), input: serde_json::to_value(&EchoToolInput { text: "def".into() }).unwrap(), is_input_complete: true, + thought_signature: None, }; fake_model .send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(tool_use.clone())); @@ -731,6 +738,7 @@ async fn test_send_after_tool_use_limit(cx: &mut TestAppContext) { raw_input: "{}".into(), input: serde_json::to_value(&EchoToolInput { text: "def".into() }).unwrap(), is_input_complete: true, + thought_signature: None, }; let tool_result = LanguageModelToolResult { tool_use_id: "tool_id_1".into(), @@ -1037,6 +1045,7 @@ async fn test_mcp_tools(cx: &mut TestAppContext) { raw_input: json!({"text": "test"}).to_string(), input: json!({"text": "test"}), is_input_complete: true, + thought_signature: None, }, )); fake_model.end_last_completion_stream(); @@ -1080,6 +1089,7 @@ async fn test_mcp_tools(cx: &mut TestAppContext) { raw_input: json!({"text": "mcp"}).to_string(), input: json!({"text": "mcp"}), is_input_complete: true, + thought_signature: None, }, )); fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse( @@ -1089,6 +1099,7 @@ async fn test_mcp_tools(cx: &mut TestAppContext) { raw_input: json!({"text": "native"}).to_string(), input: json!({"text": "native"}), is_input_complete: true, + thought_signature: None, }, )); fake_model.end_last_completion_stream(); @@ -1788,6 +1799,7 @@ async fn test_building_request_with_pending_tools(cx: &mut TestAppContext) { raw_input: "{}".into(), input: json!({}), is_input_complete: true, + thought_signature: None, }; let echo_tool_use = LanguageModelToolUse { id: "tool_id_2".into(), @@ -1795,6 +1807,7 @@ async fn test_building_request_with_pending_tools(cx: &mut TestAppContext) { raw_input: json!({"text": "test"}).to_string(), input: json!({"text": "test"}), is_input_complete: true, + thought_signature: None, }; fake_model.send_last_completion_stream_text_chunk("Hi!"); fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse( @@ -2000,6 +2013,7 @@ async fn test_tool_updates_to_completion(cx: &mut TestAppContext) { raw_input: input.to_string(), input, is_input_complete: false, + thought_signature: None, }, )); @@ -2012,6 +2026,7 @@ async fn test_tool_updates_to_completion(cx: &mut TestAppContext) { raw_input: input.to_string(), input, is_input_complete: true, + thought_signature: None, }, )); fake_model.end_last_completion_stream(); @@ -2214,6 +2229,7 @@ async fn test_send_retry_finishes_tool_calls_on_error(cx: &mut TestAppContext) { raw_input: json!({"text": "test"}).to_string(), input: json!({"text": "test"}), is_input_complete: true, + thought_signature: None, }; fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse( tool_use_1.clone(), diff --git a/crates/google_ai/src/google_ai.rs b/crates/google_ai/src/google_ai.rs index 9b7e5ec8d1c42fc846d131cfd063de5bba8287ae..84f8e8ef8dbaac1d55f73515f625b670a4a52709 100644 --- a/crates/google_ai/src/google_ai.rs +++ b/crates/google_ai/src/google_ai.rs @@ -229,6 +229,10 @@ pub struct GenerativeContentBlob { #[serde(rename_all = "camelCase")] pub struct FunctionCallPart { pub function_call: FunctionCall, + /// Thought signature returned by the model for function calls. + /// Only present on the first function call in parallel call scenarios. + #[serde(skip_serializing_if = "Option::is_none")] + pub thought_signature: Option, } #[derive(Debug, Serialize, Deserialize)] @@ -636,3 +640,109 @@ impl std::fmt::Display for Model { write!(f, "{}", self.id()) } } + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn test_function_call_part_with_signature_serializes_correctly() { + let part = FunctionCallPart { + function_call: FunctionCall { + name: "test_function".to_string(), + args: json!({"arg": "value"}), + }, + thought_signature: Some("test_signature".to_string()), + }; + + let serialized = serde_json::to_value(&part).unwrap(); + + assert_eq!(serialized["functionCall"]["name"], "test_function"); + assert_eq!(serialized["functionCall"]["args"]["arg"], "value"); + assert_eq!(serialized["thoughtSignature"], "test_signature"); + } + + #[test] + fn test_function_call_part_without_signature_omits_field() { + let part = FunctionCallPart { + function_call: FunctionCall { + name: "test_function".to_string(), + args: json!({"arg": "value"}), + }, + thought_signature: None, + }; + + let serialized = serde_json::to_value(&part).unwrap(); + + assert_eq!(serialized["functionCall"]["name"], "test_function"); + assert_eq!(serialized["functionCall"]["args"]["arg"], "value"); + // thoughtSignature field should not be present when None + assert!(serialized.get("thoughtSignature").is_none()); + } + + #[test] + fn test_function_call_part_deserializes_with_signature() { + let json = json!({ + "functionCall": { + "name": "test_function", + "args": {"arg": "value"} + }, + "thoughtSignature": "test_signature" + }); + + let part: FunctionCallPart = serde_json::from_value(json).unwrap(); + + assert_eq!(part.function_call.name, "test_function"); + assert_eq!(part.thought_signature, Some("test_signature".to_string())); + } + + #[test] + fn test_function_call_part_deserializes_without_signature() { + let json = json!({ + "functionCall": { + "name": "test_function", + "args": {"arg": "value"} + } + }); + + let part: FunctionCallPart = serde_json::from_value(json).unwrap(); + + assert_eq!(part.function_call.name, "test_function"); + assert_eq!(part.thought_signature, None); + } + + #[test] + fn test_function_call_part_round_trip() { + let original = FunctionCallPart { + function_call: FunctionCall { + name: "test_function".to_string(), + args: json!({"arg": "value", "nested": {"key": "val"}}), + }, + thought_signature: Some("round_trip_signature".to_string()), + }; + + let serialized = serde_json::to_value(&original).unwrap(); + let deserialized: FunctionCallPart = serde_json::from_value(serialized).unwrap(); + + assert_eq!(deserialized.function_call.name, original.function_call.name); + assert_eq!(deserialized.function_call.args, original.function_call.args); + assert_eq!(deserialized.thought_signature, original.thought_signature); + } + + #[test] + fn test_function_call_part_with_empty_signature_serializes() { + let part = FunctionCallPart { + function_call: FunctionCall { + name: "test_function".to_string(), + args: json!({"arg": "value"}), + }, + thought_signature: Some("".to_string()), + }; + + let serialized = serde_json::to_value(&part).unwrap(); + + // Empty string should still be serialized (normalization happens at a higher level) + assert_eq!(serialized["thoughtSignature"], ""); + } +} diff --git a/crates/language_model/src/language_model.rs b/crates/language_model/src/language_model.rs index 606b0921b29f056ddea22947f08b2686af37d639..785bb0dbdc7b6bb82d052cce16eb1c4b2fd66a48 100644 --- a/crates/language_model/src/language_model.rs +++ b/crates/language_model/src/language_model.rs @@ -515,6 +515,9 @@ pub struct LanguageModelToolUse { pub raw_input: String, pub input: serde_json::Value, pub is_input_complete: bool, + /// Thought signature the model sent us. Some models require that this + /// signature be preserved and sent back in conversation history for validation. + pub thought_signature: Option, } pub struct LanguageModelTextStream { @@ -921,4 +924,85 @@ mod tests { ), } } + + #[test] + fn test_language_model_tool_use_serializes_with_signature() { + use serde_json::json; + + let tool_use = LanguageModelToolUse { + id: LanguageModelToolUseId::from("test_id"), + name: "test_tool".into(), + raw_input: json!({"arg": "value"}).to_string(), + input: json!({"arg": "value"}), + is_input_complete: true, + thought_signature: Some("test_signature".to_string()), + }; + + let serialized = serde_json::to_value(&tool_use).unwrap(); + + assert_eq!(serialized["id"], "test_id"); + assert_eq!(serialized["name"], "test_tool"); + assert_eq!(serialized["thought_signature"], "test_signature"); + } + + #[test] + fn test_language_model_tool_use_deserializes_with_missing_signature() { + use serde_json::json; + + let json = json!({ + "id": "test_id", + "name": "test_tool", + "raw_input": "{\"arg\":\"value\"}", + "input": {"arg": "value"}, + "is_input_complete": true + }); + + let tool_use: LanguageModelToolUse = serde_json::from_value(json).unwrap(); + + assert_eq!(tool_use.id, LanguageModelToolUseId::from("test_id")); + assert_eq!(tool_use.name.as_ref(), "test_tool"); + assert_eq!(tool_use.thought_signature, None); + } + + #[test] + fn test_language_model_tool_use_round_trip_with_signature() { + use serde_json::json; + + let original = LanguageModelToolUse { + id: LanguageModelToolUseId::from("round_trip_id"), + name: "round_trip_tool".into(), + raw_input: json!({"key": "value"}).to_string(), + input: json!({"key": "value"}), + is_input_complete: true, + thought_signature: Some("round_trip_sig".to_string()), + }; + + let serialized = serde_json::to_value(&original).unwrap(); + let deserialized: LanguageModelToolUse = serde_json::from_value(serialized).unwrap(); + + assert_eq!(deserialized.id, original.id); + assert_eq!(deserialized.name, original.name); + assert_eq!(deserialized.thought_signature, original.thought_signature); + } + + #[test] + fn test_language_model_tool_use_round_trip_without_signature() { + use serde_json::json; + + let original = LanguageModelToolUse { + id: LanguageModelToolUseId::from("no_sig_id"), + name: "no_sig_tool".into(), + raw_input: json!({"key": "value"}).to_string(), + input: json!({"key": "value"}), + is_input_complete: true, + thought_signature: None, + }; + + let serialized = serde_json::to_value(&original).unwrap(); + let deserialized: LanguageModelToolUse = serde_json::from_value(serialized).unwrap(); + + assert_eq!(deserialized.id, original.id); + assert_eq!(deserialized.name, original.name); + assert_eq!(deserialized.thought_signature, None); + } } diff --git a/crates/language_models/src/provider/anthropic.rs b/crates/language_models/src/provider/anthropic.rs index 287c76fc6dfea530ce53b48178024ef185b98134..2491e8277a8b2632f6835af13736c23e94966c4c 100644 --- a/crates/language_models/src/provider/anthropic.rs +++ b/crates/language_models/src/provider/anthropic.rs @@ -711,6 +711,7 @@ impl AnthropicEventMapper { is_input_complete: false, raw_input: tool_use.input_json.clone(), input, + thought_signature: None, }, ))]; } @@ -734,6 +735,7 @@ impl AnthropicEventMapper { is_input_complete: true, input, raw_input: tool_use.input_json.clone(), + thought_signature: None, }, )), Err(json_parse_err) => { diff --git a/crates/language_models/src/provider/bedrock.rs b/crates/language_models/src/provider/bedrock.rs index 61f36428d2e69af013103c8ca06b38d8d4a96e8d..9672d61f90512be62ea58e77682d63cc8553710f 100644 --- a/crates/language_models/src/provider/bedrock.rs +++ b/crates/language_models/src/provider/bedrock.rs @@ -970,6 +970,7 @@ pub fn map_to_language_model_completion_events( is_input_complete: true, raw_input: tool_use.input_json, input, + thought_signature: None, }, )) }), diff --git a/crates/language_models/src/provider/copilot_chat.rs b/crates/language_models/src/provider/copilot_chat.rs index 2f2469fa770821c208e037665c02d9ea8c20408f..f62b899318ae56452509f8d9e7cca05f8859cf27 100644 --- a/crates/language_models/src/provider/copilot_chat.rs +++ b/crates/language_models/src/provider/copilot_chat.rs @@ -458,6 +458,7 @@ pub fn map_to_language_model_completion_events( is_input_complete: true, input, raw_input: tool_call.arguments, + thought_signature: None, }, )), Err(error) => Ok( @@ -560,6 +561,7 @@ impl CopilotResponsesEventMapper { is_input_complete: true, input, raw_input: arguments.clone(), + thought_signature: None, }, ))), Err(error) => { diff --git a/crates/language_models/src/provider/deepseek.rs b/crates/language_models/src/provider/deepseek.rs index 1d573fd964d0f183393bb766c492566f622a4901..4bc7164f421bfbaa075c72faff7f731c0defcdba 100644 --- a/crates/language_models/src/provider/deepseek.rs +++ b/crates/language_models/src/provider/deepseek.rs @@ -501,6 +501,7 @@ impl DeepSeekEventMapper { is_input_complete: true, input, raw_input: tool_call.arguments.clone(), + thought_signature: None, }, )), Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError { diff --git a/crates/language_models/src/provider/google.rs b/crates/language_models/src/provider/google.rs index e33b118e30fca60e147bd2f311e844626da9b368..68b6f976418b2125027e5800527f73cc49e5a1bb 100644 --- a/crates/language_models/src/provider/google.rs +++ b/crates/language_models/src/provider/google.rs @@ -439,11 +439,15 @@ pub fn into_google( })] } language_model::MessageContent::ToolUse(tool_use) => { + // Normalize empty string signatures to None + let thought_signature = tool_use.thought_signature.filter(|s| !s.is_empty()); + vec![Part::FunctionCallPart(google_ai::FunctionCallPart { function_call: google_ai::FunctionCall { name: tool_use.name.to_string(), args: tool_use.input, }, + thought_signature, })] } language_model::MessageContent::ToolResult(tool_result) => { @@ -655,6 +659,11 @@ impl GoogleEventMapper { let id: LanguageModelToolUseId = format!("{}-{}", name, next_tool_id).into(); + // Normalize empty string signatures to None + let thought_signature = function_call_part + .thought_signature + .filter(|s| !s.is_empty()); + events.push(Ok(LanguageModelCompletionEvent::ToolUse( LanguageModelToolUse { id, @@ -662,6 +671,7 @@ impl GoogleEventMapper { is_input_complete: true, raw_input: function_call_part.function_call.args.to_string(), input: function_call_part.function_call.args, + thought_signature, }, ))); } @@ -891,3 +901,424 @@ impl Render for ConfigurationView { } } } + +#[cfg(test)] +mod tests { + use super::*; + use google_ai::{ + Content, FunctionCall, FunctionCallPart, GenerateContentCandidate, GenerateContentResponse, + Part, Role as GoogleRole, TextPart, + }; + use language_model::{LanguageModelToolUseId, MessageContent, Role}; + use serde_json::json; + + #[test] + fn test_function_call_with_signature_creates_tool_use_with_signature() { + let mut mapper = GoogleEventMapper::new(); + + let response = GenerateContentResponse { + candidates: Some(vec![GenerateContentCandidate { + index: Some(0), + content: Content { + parts: vec![Part::FunctionCallPart(FunctionCallPart { + function_call: FunctionCall { + name: "test_function".to_string(), + args: json!({"arg": "value"}), + }, + thought_signature: Some("test_signature_123".to_string()), + })], + role: GoogleRole::Model, + }, + finish_reason: None, + finish_message: None, + safety_ratings: None, + citation_metadata: None, + }]), + prompt_feedback: None, + usage_metadata: None, + }; + + let events = mapper.map_event(response); + + assert_eq!(events.len(), 2); // ToolUse event + Stop event + + if let Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) = &events[0] { + assert_eq!(tool_use.name.as_ref(), "test_function"); + assert_eq!( + tool_use.thought_signature.as_deref(), + Some("test_signature_123") + ); + } else { + panic!("Expected ToolUse event"); + } + } + + #[test] + fn test_function_call_without_signature_has_none() { + let mut mapper = GoogleEventMapper::new(); + + let response = GenerateContentResponse { + candidates: Some(vec![GenerateContentCandidate { + index: Some(0), + content: Content { + parts: vec![Part::FunctionCallPart(FunctionCallPart { + function_call: FunctionCall { + name: "test_function".to_string(), + args: json!({"arg": "value"}), + }, + thought_signature: None, + })], + role: GoogleRole::Model, + }, + finish_reason: None, + finish_message: None, + safety_ratings: None, + citation_metadata: None, + }]), + prompt_feedback: None, + usage_metadata: None, + }; + + let events = mapper.map_event(response); + + if let Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) = &events[0] { + assert_eq!(tool_use.thought_signature, None); + } else { + panic!("Expected ToolUse event"); + } + } + + #[test] + fn test_empty_string_signature_normalized_to_none() { + let mut mapper = GoogleEventMapper::new(); + + let response = GenerateContentResponse { + candidates: Some(vec![GenerateContentCandidate { + index: Some(0), + content: Content { + parts: vec![Part::FunctionCallPart(FunctionCallPart { + function_call: FunctionCall { + name: "test_function".to_string(), + args: json!({"arg": "value"}), + }, + thought_signature: Some("".to_string()), + })], + role: GoogleRole::Model, + }, + finish_reason: None, + finish_message: None, + safety_ratings: None, + citation_metadata: None, + }]), + prompt_feedback: None, + usage_metadata: None, + }; + + let events = mapper.map_event(response); + + if let Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) = &events[0] { + assert_eq!(tool_use.thought_signature, None); + } else { + panic!("Expected ToolUse event"); + } + } + + #[test] + fn test_parallel_function_calls_preserve_signatures() { + let mut mapper = GoogleEventMapper::new(); + + let response = GenerateContentResponse { + candidates: Some(vec![GenerateContentCandidate { + index: Some(0), + content: Content { + parts: vec![ + Part::FunctionCallPart(FunctionCallPart { + function_call: FunctionCall { + name: "function_1".to_string(), + args: json!({"arg": "value1"}), + }, + thought_signature: Some("signature_1".to_string()), + }), + Part::FunctionCallPart(FunctionCallPart { + function_call: FunctionCall { + name: "function_2".to_string(), + args: json!({"arg": "value2"}), + }, + thought_signature: None, + }), + ], + role: GoogleRole::Model, + }, + finish_reason: None, + finish_message: None, + safety_ratings: None, + citation_metadata: None, + }]), + prompt_feedback: None, + usage_metadata: None, + }; + + let events = mapper.map_event(response); + + assert_eq!(events.len(), 3); // 2 ToolUse events + Stop event + + if let Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) = &events[0] { + assert_eq!(tool_use.name.as_ref(), "function_1"); + assert_eq!(tool_use.thought_signature.as_deref(), Some("signature_1")); + } else { + panic!("Expected ToolUse event for function_1"); + } + + if let Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) = &events[1] { + assert_eq!(tool_use.name.as_ref(), "function_2"); + assert_eq!(tool_use.thought_signature, None); + } else { + panic!("Expected ToolUse event for function_2"); + } + } + + #[test] + fn test_tool_use_with_signature_converts_to_function_call_part() { + let tool_use = language_model::LanguageModelToolUse { + id: LanguageModelToolUseId::from("test_id"), + name: "test_function".into(), + raw_input: json!({"arg": "value"}).to_string(), + input: json!({"arg": "value"}), + is_input_complete: true, + thought_signature: Some("test_signature_456".to_string()), + }; + + let request = super::into_google( + LanguageModelRequest { + messages: vec![language_model::LanguageModelRequestMessage { + role: Role::Assistant, + content: vec![MessageContent::ToolUse(tool_use)], + cache: false, + }], + ..Default::default() + }, + "gemini-2.5-flash".to_string(), + GoogleModelMode::Default, + ); + + assert_eq!(request.contents[0].parts.len(), 1); + if let Part::FunctionCallPart(fc_part) = &request.contents[0].parts[0] { + assert_eq!(fc_part.function_call.name, "test_function"); + assert_eq!( + fc_part.thought_signature.as_deref(), + Some("test_signature_456") + ); + } else { + panic!("Expected FunctionCallPart"); + } + } + + #[test] + fn test_tool_use_without_signature_omits_field() { + let tool_use = language_model::LanguageModelToolUse { + id: LanguageModelToolUseId::from("test_id"), + name: "test_function".into(), + raw_input: json!({"arg": "value"}).to_string(), + input: json!({"arg": "value"}), + is_input_complete: true, + thought_signature: None, + }; + + let request = super::into_google( + LanguageModelRequest { + messages: vec![language_model::LanguageModelRequestMessage { + role: Role::Assistant, + content: vec![MessageContent::ToolUse(tool_use)], + cache: false, + }], + ..Default::default() + }, + "gemini-2.5-flash".to_string(), + GoogleModelMode::Default, + ); + + assert_eq!(request.contents[0].parts.len(), 1); + if let Part::FunctionCallPart(fc_part) = &request.contents[0].parts[0] { + assert_eq!(fc_part.thought_signature, None); + } else { + panic!("Expected FunctionCallPart"); + } + } + + #[test] + fn test_empty_signature_in_tool_use_normalized_to_none() { + let tool_use = language_model::LanguageModelToolUse { + id: LanguageModelToolUseId::from("test_id"), + name: "test_function".into(), + raw_input: json!({"arg": "value"}).to_string(), + input: json!({"arg": "value"}), + is_input_complete: true, + thought_signature: Some("".to_string()), + }; + + let request = super::into_google( + LanguageModelRequest { + messages: vec![language_model::LanguageModelRequestMessage { + role: Role::Assistant, + content: vec![MessageContent::ToolUse(tool_use)], + cache: false, + }], + ..Default::default() + }, + "gemini-2.5-flash".to_string(), + GoogleModelMode::Default, + ); + + if let Part::FunctionCallPart(fc_part) = &request.contents[0].parts[0] { + assert_eq!(fc_part.thought_signature, None); + } else { + panic!("Expected FunctionCallPart"); + } + } + + #[test] + fn test_round_trip_preserves_signature() { + let mut mapper = GoogleEventMapper::new(); + + // Simulate receiving a response from Google with a signature + let response = GenerateContentResponse { + candidates: Some(vec![GenerateContentCandidate { + index: Some(0), + content: Content { + parts: vec![Part::FunctionCallPart(FunctionCallPart { + function_call: FunctionCall { + name: "test_function".to_string(), + args: json!({"arg": "value"}), + }, + thought_signature: Some("round_trip_sig".to_string()), + })], + role: GoogleRole::Model, + }, + finish_reason: None, + finish_message: None, + safety_ratings: None, + citation_metadata: None, + }]), + prompt_feedback: None, + usage_metadata: None, + }; + + let events = mapper.map_event(response); + + let tool_use = if let Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) = &events[0] { + tool_use.clone() + } else { + panic!("Expected ToolUse event"); + }; + + // Convert back to Google format + let request = super::into_google( + LanguageModelRequest { + messages: vec![language_model::LanguageModelRequestMessage { + role: Role::Assistant, + content: vec![MessageContent::ToolUse(tool_use)], + cache: false, + }], + ..Default::default() + }, + "gemini-2.5-flash".to_string(), + GoogleModelMode::Default, + ); + + // Verify signature is preserved + if let Part::FunctionCallPart(fc_part) = &request.contents[0].parts[0] { + assert_eq!(fc_part.thought_signature.as_deref(), Some("round_trip_sig")); + } else { + panic!("Expected FunctionCallPart"); + } + } + + #[test] + fn test_mixed_text_and_function_call_with_signature() { + let mut mapper = GoogleEventMapper::new(); + + let response = GenerateContentResponse { + candidates: Some(vec![GenerateContentCandidate { + index: Some(0), + content: Content { + parts: vec![ + Part::TextPart(TextPart { + text: "I'll help with that.".to_string(), + }), + Part::FunctionCallPart(FunctionCallPart { + function_call: FunctionCall { + name: "helper_function".to_string(), + args: json!({"query": "help"}), + }, + thought_signature: Some("mixed_sig".to_string()), + }), + ], + role: GoogleRole::Model, + }, + finish_reason: None, + finish_message: None, + safety_ratings: None, + citation_metadata: None, + }]), + prompt_feedback: None, + usage_metadata: None, + }; + + let events = mapper.map_event(response); + + assert_eq!(events.len(), 3); // Text event + ToolUse event + Stop event + + if let Ok(LanguageModelCompletionEvent::Text(text)) = &events[0] { + assert_eq!(text, "I'll help with that."); + } else { + panic!("Expected Text event"); + } + + if let Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) = &events[1] { + assert_eq!(tool_use.name.as_ref(), "helper_function"); + assert_eq!(tool_use.thought_signature.as_deref(), Some("mixed_sig")); + } else { + panic!("Expected ToolUse event"); + } + } + + #[test] + fn test_special_characters_in_signature_preserved() { + let mut mapper = GoogleEventMapper::new(); + + let signature_with_special_chars = "sig<>\"'&%$#@!{}[]".to_string(); + + let response = GenerateContentResponse { + candidates: Some(vec![GenerateContentCandidate { + index: Some(0), + content: Content { + parts: vec![Part::FunctionCallPart(FunctionCallPart { + function_call: FunctionCall { + name: "test_function".to_string(), + args: json!({"arg": "value"}), + }, + thought_signature: Some(signature_with_special_chars.clone()), + })], + role: GoogleRole::Model, + }, + finish_reason: None, + finish_message: None, + safety_ratings: None, + citation_metadata: None, + }]), + prompt_feedback: None, + usage_metadata: None, + }; + + let events = mapper.map_event(response); + + if let Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) = &events[0] { + assert_eq!( + tool_use.thought_signature.as_deref(), + Some(signature_with_special_chars.as_str()) + ); + } else { + panic!("Expected ToolUse event"); + } + } +} diff --git a/crates/language_models/src/provider/lmstudio.rs b/crates/language_models/src/provider/lmstudio.rs index c0b3509c0e2c9636ca48cdb0de0cc6ed32a2b792..a16bd351a9d779bcba5b2a4111fc62e0dc9dc639 100644 --- a/crates/language_models/src/provider/lmstudio.rs +++ b/crates/language_models/src/provider/lmstudio.rs @@ -569,6 +569,7 @@ impl LmStudioEventMapper { is_input_complete: true, input, raw_input: tool_call.arguments, + thought_signature: None, }, )), Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError { diff --git a/crates/language_models/src/provider/mistral.rs b/crates/language_models/src/provider/mistral.rs index 2d30dfca21d8cbc4fd1be3575801919148f705b3..0c45913bea83e32c508daa6c6579ecd0382b3dc0 100644 --- a/crates/language_models/src/provider/mistral.rs +++ b/crates/language_models/src/provider/mistral.rs @@ -720,6 +720,7 @@ impl MistralEventMapper { is_input_complete: true, input, raw_input: tool_call.arguments, + thought_signature: None, }, ))), Err(error) => { diff --git a/crates/language_models/src/provider/ollama.rs b/crates/language_models/src/provider/ollama.rs index b6870f5f72b08d2ca4decc101deae59b6a56c224..8345db3cce9fc51c487ec039c4257bfb39b162c3 100644 --- a/crates/language_models/src/provider/ollama.rs +++ b/crates/language_models/src/provider/ollama.rs @@ -592,6 +592,7 @@ fn map_to_language_model_completion_events( raw_input: function.arguments.to_string(), input: function.arguments, is_input_complete: true, + thought_signature: None, }); events.push(Ok(event)); state.used_tools = true; diff --git a/crates/language_models/src/provider/open_ai.rs b/crates/language_models/src/provider/open_ai.rs index 792d280950ceafa24cdf5e4104b80dd49bd45f3f..ee62522882c214dfa1384f75ced6eba46c9ec35f 100644 --- a/crates/language_models/src/provider/open_ai.rs +++ b/crates/language_models/src/provider/open_ai.rs @@ -586,6 +586,7 @@ impl OpenAiEventMapper { is_input_complete: true, input, raw_input: tool_call.arguments.clone(), + thought_signature: None, }, )), Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError { diff --git a/crates/language_models/src/provider/open_router.rs b/crates/language_models/src/provider/open_router.rs index 6326968a916a7d6a21811ee22c328564e1ec4682..c98ee02efd7b7af32ea6c649f29eef685753ba7d 100644 --- a/crates/language_models/src/provider/open_router.rs +++ b/crates/language_models/src/provider/open_router.rs @@ -635,6 +635,7 @@ impl OpenRouterEventMapper { is_input_complete: true, input, raw_input: tool_call.arguments.clone(), + thought_signature: None, }, )), Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError { From 03132921c7ad0c6e9d5f210c1a10ed64f3d53a54 Mon Sep 17 00:00:00 2001 From: Artur Shirokov Date: Tue, 18 Nov 2025 16:39:08 +0000 Subject: [PATCH 0193/1030] Add HTTP transport support for MCP servers (#39021) ### What this solves This PR adds support for HTTP and SSE (Server-Sent Events) transports to Zed's context server implementation, enabling communication with remote MCP servers. Currently, Zed only supports local MCP servers via stdio transport. This limitation prevents users from: - Connecting to cloud-hosted MCP servers - Using MCP servers running in containers or on remote machines - Leveraging MCP servers that are designed to work over HTTP/SSE ### Why it's important The MCP (Model Context Protocol) specification includes HTTP/SSE as standard transport options, and many MCP server implementations are being built with these transports in mind. Without this support, Zed users are limited to a subset of the MCP ecosystem. This is particularly important for: - Enterprise users who need to connect to centralized MCP services - Developers working with MCP servers that require network isolation - Users wanting to leverage cloud-based context providers (e.g., knowledge bases, API integrations) ### Implementation approach The implementation follows Zed's existing architectural patterns: - **Transports**: Added `HttpTransport` and `SseTransport` to the `context_server` crate, built on top of the existing `http_client` crate - **Async handling**: Uses `gpui::spawn` for network operations instead of introducing a new Tokio runtime - **Settings**: Extended `ContextServerSettings` enum with a `Remote` variant to support URL-based configuration - **UI**: Updated the agent configuration UI with an "Add Remote Server" option and dedicated modal for remote server management ### Changes included - [x] HTTP transport implementation with request/response handling - [x] SSE transport for server-sent events streaming - [x] `build_transport` function to construct appropriate transport based on URL scheme - [x] Settings system updates to support remote server configuration - [x] UI updates for adding/editing remote servers - [x] Unit tests using `FakeHttpClient` for both transports - [x] Integration tests (WIP) - [x] Documentation updates (WIP) ### Testing - Unit tests for both `HttpTransport` and `SseTransport` using mocked HTTP client - Manual testing with example MCP servers over HTTP/SSE - Settings validation and UI interaction testing ### Screenshots/Recordings [TODO: Add screenshots of the new "Add Remote Server" UI and configuration modal] ### Example configuration Users can now configure remote MCP servers in their `settings.json`: ```json { "context_servers": { "my-remote-server": { "enabled": true, "url": "http://localhost:3000/mcp" } } } ``` ### AI assistance disclosure I used AI to help with: - Understanding the MCP protocol specification and how HTTP/SSE transports should work - Reviewing Zed's existing patterns for async operations and suggesting consistent approaches - Generating boilerplate for test cases - Debugging SSE streaming issues All code has been manually reviewed, tested, and adapted to fit Zed's architecture. The core logic, architectural decisions, and integration with Zed's systems were done with human understanding of the codebase. AI was primarily used as a reference tool and for getting unstuck on specific technical issues. Release notes: * You can now configure MCP Servers that connect over HTTP in your settings file. These are not yet available in the extensions API. ``` { "context_servers": { "my-remote-server": { "enabled": true, "url": "http://localhost:3000/mcp" } } } ``` --------- Co-authored-by: Conrad Irwin --- Cargo.lock | 1 + crates/agent_servers/src/acp.rs | 81 ++++-- crates/agent_ui/src/agent_configuration.rs | 45 ++- .../configure_context_server_modal.rs | 165 ++++++++++- crates/context_server/Cargo.toml | 6 +- crates/context_server/src/context_server.rs | 24 ++ crates/context_server/src/transport.rs | 5 +- crates/context_server/src/transport/http.rs | 259 ++++++++++++++++++ .../src/wasm_host/wit/since_v0_6_0.rs | 3 + crates/project/src/context_server_store.rs | 175 ++++++++---- crates/project/src/project_settings.rs | 30 ++ .../settings/src/settings_content/project.rs | 21 +- crates/zed/src/zed.rs | 127 --------- docs/src/ai/mcp.md | 7 +- 14 files changed, 709 insertions(+), 240 deletions(-) create mode 100644 crates/context_server/src/transport/http.rs diff --git a/Cargo.lock b/Cargo.lock index c0f6ef03c296306a73264461a8767ccd1b346c20..09ee945d1c34cf1ae93a3cc538d62860ad3a1c78 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3690,6 +3690,7 @@ dependencies = [ "collections", "futures 0.3.31", "gpui", + "http_client", "log", "net", "parking_lot", diff --git a/crates/agent_servers/src/acp.rs b/crates/agent_servers/src/acp.rs index 15f56bf2ed4ee100fd22dc0d7df73f2e8a3274ea..2ec9beb71bf08c90ea85b8752410405714d31537 100644 --- a/crates/agent_servers/src/acp.rs +++ b/crates/agent_servers/src/acp.rs @@ -247,37 +247,58 @@ impl AgentConnection for AcpConnection { let default_mode = self.default_mode.clone(); let cwd = cwd.to_path_buf(); let context_server_store = project.read(cx).context_server_store().read(cx); - let mcp_servers = if project.read(cx).is_local() { - context_server_store - .configured_server_ids() - .iter() - .filter_map(|id| { - let configuration = context_server_store.configuration_for_server(id)?; - let command = configuration.command(); - Some(acp::McpServer::Stdio { - name: id.0.to_string(), - command: command.path.clone(), - args: command.args.clone(), - env: if let Some(env) = command.env.as_ref() { - env.iter() - .map(|(name, value)| acp::EnvVariable { - name: name.clone(), - value: value.clone(), - meta: None, - }) - .collect() - } else { - vec![] - }, + let mcp_servers = + if project.read(cx).is_local() { + context_server_store + .configured_server_ids() + .iter() + .filter_map(|id| { + let configuration = context_server_store.configuration_for_server(id)?; + match &*configuration { + project::context_server_store::ContextServerConfiguration::Custom { + command, + .. + } + | project::context_server_store::ContextServerConfiguration::Extension { + command, + .. + } => Some(acp::McpServer::Stdio { + name: id.0.to_string(), + command: command.path.clone(), + args: command.args.clone(), + env: if let Some(env) = command.env.as_ref() { + env.iter() + .map(|(name, value)| acp::EnvVariable { + name: name.clone(), + value: value.clone(), + meta: None, + }) + .collect() + } else { + vec![] + }, + }), + project::context_server_store::ContextServerConfiguration::Http { + url, + headers, + } => Some(acp::McpServer::Http { + name: id.0.to_string(), + url: url.to_string(), + headers: headers.iter().map(|(name, value)| acp::HttpHeader { + name: name.clone(), + value: value.clone(), + meta: None, + }).collect(), + }), + } }) - }) - .collect() - } else { - // In SSH projects, the external agent is running on the remote - // machine, and currently we only run MCP servers on the local - // machine. So don't pass any MCP servers to the agent in that case. - Vec::new() - }; + .collect() + } else { + // In SSH projects, the external agent is running on the remote + // machine, and currently we only run MCP servers on the local + // machine. So don't pass any MCP servers to the agent in that case. + Vec::new() + }; cx.spawn(async move |cx| { let response = conn diff --git a/crates/agent_ui/src/agent_configuration.rs b/crates/agent_ui/src/agent_configuration.rs index 125dc223796f6d9b7e96bee452bee25a2409adb1..60f8606baf7bcbd55a7e4bd9ee6dc44f394319bc 100644 --- a/crates/agent_ui/src/agent_configuration.rs +++ b/crates/agent_ui/src/agent_configuration.rs @@ -1,5 +1,5 @@ mod add_llm_provider_modal; -mod configure_context_server_modal; +pub mod configure_context_server_modal; mod configure_context_server_tools_modal; mod manage_profiles_modal; mod tool_picker; @@ -46,9 +46,8 @@ pub(crate) use configure_context_server_modal::ConfigureContextServerModal; pub(crate) use configure_context_server_tools_modal::ConfigureContextServerToolsModal; pub(crate) use manage_profiles_modal::ManageProfilesModal; -use crate::{ - AddContextServer, - agent_configuration::add_llm_provider_modal::{AddLlmProviderModal, LlmCompatibleProvider}, +use crate::agent_configuration::add_llm_provider_modal::{ + AddLlmProviderModal, LlmCompatibleProvider, }; pub struct AgentConfiguration { @@ -553,7 +552,9 @@ impl AgentConfiguration { move |window, cx| { Some(ContextMenu::build(window, cx, |menu, _window, _cx| { menu.entry("Add Custom Server", None, { - |window, cx| window.dispatch_action(AddContextServer.boxed_clone(), cx) + |window, cx| { + window.dispatch_action(crate::AddContextServer.boxed_clone(), cx) + } }) .entry("Install from Extensions", None, { |window, cx| { @@ -651,7 +652,7 @@ impl AgentConfiguration { let is_running = matches!(server_status, ContextServerStatus::Running); let item_id = SharedString::from(context_server_id.0.clone()); // Servers without a configuration can only be provided by extensions. - let provided_by_extension = server_configuration.is_none_or(|config| { + let provided_by_extension = server_configuration.as_ref().is_none_or(|config| { matches!( config.as_ref(), ContextServerConfiguration::Extension { .. } @@ -707,7 +708,10 @@ impl AgentConfiguration { "Server is stopped.", ), }; - + let is_remote = server_configuration + .as_ref() + .map(|config| matches!(config.as_ref(), ContextServerConfiguration::Http { .. })) + .unwrap_or(false); let context_server_configuration_menu = PopoverMenu::new("context-server-config-menu") .trigger_with_tooltip( IconButton::new("context-server-config-menu", IconName::Settings) @@ -730,14 +734,25 @@ impl AgentConfiguration { let language_registry = language_registry.clone(); let workspace = workspace.clone(); move |window, cx| { - ConfigureContextServerModal::show_modal_for_existing_server( - context_server_id.clone(), - language_registry.clone(), - workspace.clone(), - window, - cx, - ) - .detach_and_log_err(cx); + if is_remote { + crate::agent_configuration::configure_context_server_modal::ConfigureContextServerModal::show_modal_for_existing_server( + context_server_id.clone(), + language_registry.clone(), + workspace.clone(), + window, + cx, + ) + .detach(); + } else { + ConfigureContextServerModal::show_modal_for_existing_server( + context_server_id.clone(), + language_registry.clone(), + workspace.clone(), + window, + cx, + ) + .detach(); + } } }).when(tool_count > 0, |this| this.entry("View Tools", None, { let context_server_id = context_server_id.clone(); diff --git a/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs b/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs index ed1e8afd1b3b3220d31119f7292b6b0934cd2ba7..ebea8c25fb68a8a5055d4ccaa8b9068583c4b91c 100644 --- a/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs +++ b/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs @@ -4,6 +4,7 @@ use std::{ }; use anyhow::{Context as _, Result}; +use collections::HashMap; use context_server::{ContextServerCommand, ContextServerId}; use editor::{Editor, EditorElement, EditorStyle}; use gpui::{ @@ -20,6 +21,7 @@ use project::{ project_settings::{ContextServerSettings, ProjectSettings}, worktree_store::WorktreeStore, }; +use serde::Deserialize; use settings::{Settings as _, update_settings_file}; use theme::ThemeSettings; use ui::{ @@ -37,6 +39,11 @@ enum ConfigurationTarget { id: ContextServerId, command: ContextServerCommand, }, + ExistingHttp { + id: ContextServerId, + url: String, + headers: HashMap, + }, Extension { id: ContextServerId, repository_url: Option, @@ -47,9 +54,11 @@ enum ConfigurationTarget { enum ConfigurationSource { New { editor: Entity, + is_http: bool, }, Existing { editor: Entity, + is_http: bool, }, Extension { id: ContextServerId, @@ -97,6 +106,7 @@ impl ConfigurationSource { match target { ConfigurationTarget::New => ConfigurationSource::New { editor: create_editor(context_server_input(None), jsonc_language, window, cx), + is_http: false, }, ConfigurationTarget::Existing { id, command } => ConfigurationSource::Existing { editor: create_editor( @@ -105,6 +115,20 @@ impl ConfigurationSource { window, cx, ), + is_http: false, + }, + ConfigurationTarget::ExistingHttp { + id, + url, + headers: auth, + } => ConfigurationSource::Existing { + editor: create_editor( + context_server_http_input(Some((id, url, auth))), + jsonc_language, + window, + cx, + ), + is_http: true, }, ConfigurationTarget::Extension { id, @@ -141,16 +165,30 @@ impl ConfigurationSource { fn output(&self, cx: &mut App) -> Result<(ContextServerId, ContextServerSettings)> { match self { - ConfigurationSource::New { editor } | ConfigurationSource::Existing { editor } => { - parse_input(&editor.read(cx).text(cx)).map(|(id, command)| { - ( - id, - ContextServerSettings::Custom { - enabled: true, - command, - }, - ) - }) + ConfigurationSource::New { editor, is_http } + | ConfigurationSource::Existing { editor, is_http } => { + if *is_http { + parse_http_input(&editor.read(cx).text(cx)).map(|(id, url, auth)| { + ( + id, + ContextServerSettings::Http { + enabled: true, + url, + headers: auth, + }, + ) + }) + } else { + parse_input(&editor.read(cx).text(cx)).map(|(id, command)| { + ( + id, + ContextServerSettings::Custom { + enabled: true, + command, + }, + ) + }) + } } ConfigurationSource::Extension { id, @@ -212,6 +250,66 @@ fn context_server_input(existing: Option<(ContextServerId, ContextServerCommand) ) } +fn context_server_http_input( + existing: Option<(ContextServerId, String, HashMap)>, +) -> String { + let (name, url, headers) = match existing { + Some((id, url, headers)) => { + let header = if headers.is_empty() { + r#"// "Authorization": "Bearer "#.to_string() + } else { + let json = serde_json::to_string_pretty(&headers).unwrap(); + let mut lines = json.split("\n").collect::>(); + if lines.len() > 1 { + lines.remove(0); + lines.pop(); + } + lines + .into_iter() + .map(|line| format!(" {}", line)) + .collect::() + }; + (id.0.to_string(), url, header) + } + None => ( + "some-remote-server".to_string(), + "https://example.com/mcp".to_string(), + r#"// "Authorization": "Bearer "#.to_string(), + ), + }; + + format!( + r#"{{ + /// The name of your remote MCP server + "{name}": {{ + /// The URL of the remote MCP server + "url": "{url}", + "headers": {{ + /// Any headers to send along + {headers} + }} + }} +}}"# + ) +} + +fn parse_http_input(text: &str) -> Result<(ContextServerId, String, HashMap)> { + #[derive(Deserialize)] + struct Temp { + url: String, + #[serde(default)] + headers: HashMap, + } + let value: HashMap = serde_json_lenient::from_str(text)?; + if value.len() != 1 { + anyhow::bail!("Expected exactly one context server configuration"); + } + + let (key, value) = value.into_iter().next().unwrap(); + + Ok((ContextServerId(key.into()), value.url, value.headers)) +} + fn resolve_context_server_extension( id: ContextServerId, worktree_store: Entity, @@ -312,6 +410,15 @@ impl ConfigureContextServerModal { id: server_id, command, }), + ContextServerSettings::Http { + enabled: _, + url, + headers, + } => Some(ConfigurationTarget::ExistingHttp { + id: server_id, + url, + headers, + }), ContextServerSettings::Extension { .. } => { match workspace .update(cx, |workspace, cx| { @@ -353,6 +460,7 @@ impl ConfigureContextServerModal { state: State::Idle, original_server_id: match &target { ConfigurationTarget::Existing { id, .. } => Some(id.clone()), + ConfigurationTarget::ExistingHttp { id, .. } => Some(id.clone()), ConfigurationTarget::Extension { id, .. } => Some(id.clone()), ConfigurationTarget::New => None, }, @@ -481,7 +589,7 @@ impl ModalView for ConfigureContextServerModal {} impl Focusable for ConfigureContextServerModal { fn focus_handle(&self, cx: &App) -> FocusHandle { match &self.source { - ConfigurationSource::New { editor } => editor.focus_handle(cx), + ConfigurationSource::New { editor, .. } => editor.focus_handle(cx), ConfigurationSource::Existing { editor, .. } => editor.focus_handle(cx), ConfigurationSource::Extension { editor, .. } => editor .as_ref() @@ -527,9 +635,10 @@ impl ConfigureContextServerModal { } fn render_modal_content(&self, cx: &App) -> AnyElement { + // All variants now use single editor approach let editor = match &self.source { - ConfigurationSource::New { editor } => editor, - ConfigurationSource::Existing { editor } => editor, + ConfigurationSource::New { editor, .. } => editor, + ConfigurationSource::Existing { editor, .. } => editor, ConfigurationSource::Extension { editor, .. } => { let Some(editor) = editor else { return div().into_any_element(); @@ -601,6 +710,36 @@ impl ConfigureContextServerModal { move |_, _, cx| cx.open_url(&repository_url) }), ) + } else if let ConfigurationSource::New { is_http, .. } = &self.source { + let label = if *is_http { + "Run command" + } else { + "Connect via HTTP" + }; + let tooltip = if *is_http { + "Configure an MCP serevr that runs on stdin/stdout." + } else { + "Configure an MCP server that you connect to over HTTP" + }; + + Some( + Button::new("toggle-kind", label) + .tooltip(Tooltip::text(tooltip)) + .on_click(cx.listener(|this, _, window, cx| match &mut this.source { + ConfigurationSource::New { editor, is_http } => { + *is_http = !*is_http; + let new_text = if *is_http { + context_server_http_input(None) + } else { + context_server_input(None) + }; + editor.update(cx, |editor, cx| { + editor.set_text(new_text, window, cx); + }) + } + _ => {} + })), + ) } else { None }, diff --git a/crates/context_server/Cargo.toml b/crates/context_server/Cargo.toml index 846a53fde4b6f87493ec2b75da6c08d2b081df47..f73e6a9bab011c5d675040d1ee3a05dfa708dc45 100644 --- a/crates/context_server/Cargo.toml +++ b/crates/context_server/Cargo.toml @@ -12,7 +12,7 @@ workspace = true path = "src/context_server.rs" [features] -test-support = [] +test-support = ["gpui/test-support"] [dependencies] anyhow.workspace = true @@ -20,6 +20,7 @@ async-trait.workspace = true collections.workspace = true futures.workspace = true gpui.workspace = true +http_client = { workspace = true, features = ["test-support"] } log.workspace = true net.workspace = true parking_lot.workspace = true @@ -32,3 +33,6 @@ smol.workspace = true tempfile.workspace = true url = { workspace = true, features = ["serde"] } util.workspace = true + +[dev-dependencies] +gpui = { workspace = true, features = ["test-support"] } diff --git a/crates/context_server/src/context_server.rs b/crates/context_server/src/context_server.rs index 52ed524220947430df3e63fced367ca4eb223fff..553e845df87a2fec30b1afbffa05b970d5d672f6 100644 --- a/crates/context_server/src/context_server.rs +++ b/crates/context_server/src/context_server.rs @@ -6,6 +6,8 @@ pub mod test; pub mod transport; pub mod types; +use collections::HashMap; +use http_client::HttpClient; use std::path::Path; use std::sync::Arc; use std::{fmt::Display, path::PathBuf}; @@ -15,6 +17,9 @@ use client::Client; use gpui::AsyncApp; use parking_lot::RwLock; pub use settings::ContextServerCommand; +use url::Url; + +use crate::transport::HttpTransport; #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct ContextServerId(pub Arc); @@ -52,6 +57,25 @@ impl ContextServer { } } + pub fn http( + id: ContextServerId, + endpoint: &Url, + headers: HashMap, + http_client: Arc, + executor: gpui::BackgroundExecutor, + ) -> Result { + let transport = match endpoint.scheme() { + "http" | "https" => { + log::info!("Using HTTP transport for {}", endpoint); + let transport = + HttpTransport::new(http_client, endpoint.to_string(), headers, executor); + Arc::new(transport) as _ + } + _ => anyhow::bail!("unsupported MCP url scheme {}", endpoint.scheme()), + }; + Ok(Self::new(id, transport)) + } + pub fn new(id: ContextServerId, transport: Arc) -> Self { Self { id, diff --git a/crates/context_server/src/transport.rs b/crates/context_server/src/transport.rs index b4f56b0ef03ac6adf4ee81f878818ec3fecc5ef9..a3d6f998d49872c44513da00c506b68534c36b65 100644 --- a/crates/context_server/src/transport.rs +++ b/crates/context_server/src/transport.rs @@ -1,11 +1,12 @@ +pub mod http; mod stdio_transport; -use std::pin::Pin; - use anyhow::Result; use async_trait::async_trait; use futures::Stream; +use std::pin::Pin; +pub use http::*; pub use stdio_transport::*; #[async_trait] diff --git a/crates/context_server/src/transport/http.rs b/crates/context_server/src/transport/http.rs new file mode 100644 index 0000000000000000000000000000000000000000..70248f0278fcf80024d75d7f78cae5c29f26cc43 --- /dev/null +++ b/crates/context_server/src/transport/http.rs @@ -0,0 +1,259 @@ +use anyhow::{Result, anyhow}; +use async_trait::async_trait; +use collections::HashMap; +use futures::{Stream, StreamExt}; +use gpui::BackgroundExecutor; +use http_client::{AsyncBody, HttpClient, Request, Response, http::Method}; +use parking_lot::Mutex as SyncMutex; +use smol::channel; +use std::{pin::Pin, sync::Arc}; + +use crate::transport::Transport; + +// Constants from MCP spec +const HEADER_SESSION_ID: &str = "Mcp-Session-Id"; +const EVENT_STREAM_MIME_TYPE: &str = "text/event-stream"; +const JSON_MIME_TYPE: &str = "application/json"; + +/// HTTP Transport with session management and SSE support +pub struct HttpTransport { + http_client: Arc, + endpoint: String, + session_id: Arc>>, + executor: BackgroundExecutor, + response_tx: channel::Sender, + response_rx: channel::Receiver, + error_tx: channel::Sender, + error_rx: channel::Receiver, + // Authentication headers to include in requests + headers: HashMap, +} + +impl HttpTransport { + pub fn new( + http_client: Arc, + endpoint: String, + headers: HashMap, + executor: BackgroundExecutor, + ) -> Self { + let (response_tx, response_rx) = channel::unbounded(); + let (error_tx, error_rx) = channel::unbounded(); + + Self { + http_client, + executor, + endpoint, + session_id: Arc::new(SyncMutex::new(None)), + response_tx, + response_rx, + error_tx, + error_rx, + headers, + } + } + + /// Send a message and handle the response based on content type + async fn send_message(&self, message: String) -> Result<()> { + let is_notification = + !message.contains("\"id\":") || message.contains("notifications/initialized"); + + let mut request_builder = Request::builder() + .method(Method::POST) + .uri(&self.endpoint) + .header("Content-Type", JSON_MIME_TYPE) + .header( + "Accept", + format!("{}, {}", JSON_MIME_TYPE, EVENT_STREAM_MIME_TYPE), + ); + + for (key, value) in &self.headers { + request_builder = request_builder.header(key.as_str(), value.as_str()); + } + + // Add session ID if we have one (except for initialize) + if let Some(ref session_id) = *self.session_id.lock() { + request_builder = request_builder.header(HEADER_SESSION_ID, session_id.as_str()); + } + + let request = request_builder.body(AsyncBody::from(message.into_bytes()))?; + let mut response = self.http_client.send(request).await?; + + // Handle different response types based on status and content-type + match response.status() { + status if status.is_success() => { + // Check content type + let content_type = response + .headers() + .get("content-type") + .and_then(|v| v.to_str().ok()); + + // Extract session ID from response headers if present + if let Some(session_id) = response + .headers() + .get(HEADER_SESSION_ID) + .and_then(|v| v.to_str().ok()) + { + *self.session_id.lock() = Some(session_id.to_string()); + log::debug!("Session ID set: {}", session_id); + } + + match content_type { + Some(ct) if ct.starts_with(JSON_MIME_TYPE) => { + // JSON response - read and forward immediately + let mut body = String::new(); + futures::AsyncReadExt::read_to_string(response.body_mut(), &mut body) + .await?; + + // Only send non-empty responses + if !body.is_empty() { + self.response_tx + .send(body) + .await + .map_err(|_| anyhow!("Failed to send JSON response"))?; + } + } + Some(ct) if ct.starts_with(EVENT_STREAM_MIME_TYPE) => { + // SSE stream - set up streaming + self.setup_sse_stream(response).await?; + } + _ => { + // For notifications, 202 Accepted with no content type is ok + if is_notification && status.as_u16() == 202 { + log::debug!("Notification accepted"); + } else { + return Err(anyhow!("Unexpected content type: {:?}", content_type)); + } + } + } + } + status if status.as_u16() == 202 => { + // Accepted - notification acknowledged, no response needed + log::debug!("Notification accepted"); + } + _ => { + let mut error_body = String::new(); + futures::AsyncReadExt::read_to_string(response.body_mut(), &mut error_body).await?; + + self.error_tx + .send(format!("HTTP {}: {}", response.status(), error_body)) + .await + .map_err(|_| anyhow!("Failed to send error"))?; + } + } + + Ok(()) + } + + /// Set up SSE streaming from the response + async fn setup_sse_stream(&self, mut response: Response) -> Result<()> { + let response_tx = self.response_tx.clone(); + let error_tx = self.error_tx.clone(); + + // Spawn a task to handle the SSE stream + smol::spawn(async move { + let reader = futures::io::BufReader::new(response.body_mut()); + let mut lines = futures::AsyncBufReadExt::lines(reader); + + let mut data_buffer = Vec::new(); + let mut in_message = false; + + while let Some(line_result) = lines.next().await { + match line_result { + Ok(line) => { + if line.is_empty() { + // Empty line signals end of event + if !data_buffer.is_empty() { + let message = data_buffer.join("\n"); + + // Filter out ping messages and empty data + if !message.trim().is_empty() && message != "ping" { + if let Err(e) = response_tx.send(message).await { + log::error!("Failed to send SSE message: {}", e); + break; + } + } + data_buffer.clear(); + } + in_message = false; + } else if let Some(data) = line.strip_prefix("data: ") { + // Handle data lines + let data = data.trim(); + if !data.is_empty() { + // Check if this is a ping message + if data == "ping" { + log::trace!("Received SSE ping"); + continue; + } + data_buffer.push(data.to_string()); + in_message = true; + } + } else if line.starts_with("event:") + || line.starts_with("id:") + || line.starts_with("retry:") + { + // Ignore other SSE fields + continue; + } else if in_message { + // Continuation of data + data_buffer.push(line); + } + } + Err(e) => { + let _ = error_tx.send(format!("SSE stream error: {}", e)).await; + break; + } + } + } + }) + .detach(); + + Ok(()) + } +} + +#[async_trait] +impl Transport for HttpTransport { + async fn send(&self, message: String) -> Result<()> { + self.send_message(message).await + } + + fn receive(&self) -> Pin + Send>> { + Box::pin(self.response_rx.clone()) + } + + fn receive_err(&self) -> Pin + Send>> { + Box::pin(self.error_rx.clone()) + } +} + +impl Drop for HttpTransport { + fn drop(&mut self) { + // Try to cleanup session on drop + let http_client = self.http_client.clone(); + let endpoint = self.endpoint.clone(); + let session_id = self.session_id.lock().clone(); + let headers = self.headers.clone(); + + if let Some(session_id) = session_id { + self.executor + .spawn(async move { + let mut request_builder = Request::builder() + .method(Method::DELETE) + .uri(&endpoint) + .header(HEADER_SESSION_ID, &session_id); + + // Add authentication headers if present + for (key, value) in headers { + request_builder = request_builder.header(key.as_str(), value.as_str()); + } + + let request = request_builder.body(AsyncBody::empty()); + + if let Ok(request) = request { + let _ = http_client.send(request).await; + } + }) + .detach(); + } + } +} diff --git a/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs b/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs index 8b44efdfb196d93df0a609983c2b97147bbe38a8..bb16ab879eac90b7a943b02f5f97dfc004167ea0 100644 --- a/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs +++ b/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs @@ -990,6 +990,9 @@ impl ExtensionImports for WasmState { command: None, settings: Some(settings), })?), + project::project_settings::ContextServerSettings::Http { .. } => { + bail!("remote context server settings not supported in 0.6.0") + } } } _ => { diff --git a/crates/project/src/context_server_store.rs b/crates/project/src/context_server_store.rs index 8316bd7466d113c9211d79afb6e4d1a325e32e52..efc2bbf686a273fe18ca3a34f071176d07532981 100644 --- a/crates/project/src/context_server_store.rs +++ b/crates/project/src/context_server_store.rs @@ -99,13 +99,18 @@ pub enum ContextServerConfiguration { command: ContextServerCommand, settings: serde_json::Value, }, + Http { + url: url::Url, + headers: HashMap, + }, } impl ContextServerConfiguration { - pub fn command(&self) -> &ContextServerCommand { + pub fn command(&self) -> Option<&ContextServerCommand> { match self { - ContextServerConfiguration::Custom { command } => command, - ContextServerConfiguration::Extension { command, .. } => command, + ContextServerConfiguration::Custom { command } => Some(command), + ContextServerConfiguration::Extension { command, .. } => Some(command), + ContextServerConfiguration::Http { .. } => None, } } @@ -142,6 +147,14 @@ impl ContextServerConfiguration { } } } + ContextServerSettings::Http { + enabled: _, + url, + headers: auth, + } => { + let url = url::Url::parse(&url).log_err()?; + Some(ContextServerConfiguration::Http { url, headers: auth }) + } } } } @@ -207,7 +220,7 @@ impl ContextServerStore { #[cfg(any(test, feature = "test-support"))] pub fn test_maintain_server_loop( - context_server_factory: ContextServerFactory, + context_server_factory: Option, registry: Entity, worktree_store: Entity, weak_project: WeakEntity, @@ -215,7 +228,7 @@ impl ContextServerStore { ) -> Self { Self::new_internal( true, - Some(context_server_factory), + context_server_factory, registry, worktree_store, weak_project, @@ -385,17 +398,6 @@ impl ContextServerStore { result } - pub fn restart_server(&mut self, id: &ContextServerId, cx: &mut Context) -> Result<()> { - if let Some(state) = self.servers.get(id) { - let configuration = state.configuration(); - - self.stop_server(&state.server().id(), cx)?; - let new_server = self.create_context_server(id.clone(), configuration.clone(), cx); - self.run_server(new_server, configuration, cx); - } - Ok(()) - } - fn run_server( &mut self, server: Arc, @@ -479,33 +481,42 @@ impl ContextServerStore { id: ContextServerId, configuration: Arc, cx: &mut Context, - ) -> Arc { - let project = self.project.upgrade(); - let mut root_path = None; - if let Some(project) = project { - let project = project.read(cx); - if project.is_local() { - if let Some(path) = project.active_project_directory(cx) { - root_path = Some(path); - } else { - for worktree in self.worktree_store.read(cx).visible_worktrees(cx) { - if let Some(path) = worktree.read(cx).root_dir() { - root_path = Some(path); - break; - } - } - } - } - }; - + ) -> Result> { if let Some(factory) = self.context_server_factory.as_ref() { - factory(id, configuration) - } else { - Arc::new(ContextServer::stdio( + return Ok(factory(id, configuration)); + } + + match configuration.as_ref() { + ContextServerConfiguration::Http { url, headers } => Ok(Arc::new(ContextServer::http( id, - configuration.command().clone(), - root_path, - )) + url, + headers.clone(), + cx.http_client(), + cx.background_executor().clone(), + )?)), + _ => { + let root_path = self + .project + .read_with(cx, |project, cx| project.active_project_directory(cx)) + .ok() + .flatten() + .or_else(|| { + self.worktree_store.read_with(cx, |store, cx| { + store.visible_worktrees(cx).fold(None, |acc, item| { + if acc.is_none() { + item.read(cx).root_dir() + } else { + acc + } + }) + }) + }); + Ok(Arc::new(ContextServer::stdio( + id, + configuration.command().unwrap().clone(), + root_path, + ))) + } } } @@ -621,14 +632,16 @@ impl ContextServerStore { let existing_config = state.as_ref().map(|state| state.configuration()); if existing_config.as_deref() != Some(&config) || is_stopped { let config = Arc::new(config); - let server = this.create_context_server(id.clone(), config.clone(), cx); + let server = this.create_context_server(id.clone(), config.clone(), cx)?; servers_to_start.push((server, config)); if this.servers.contains_key(&id) { servers_to_stop.insert(id); } } } - })?; + + anyhow::Ok(()) + })??; this.update(cx, |this, cx| { for id in servers_to_stop { @@ -654,6 +667,7 @@ mod tests { }; use context_server::test::create_fake_transport; use gpui::{AppContext, TestAppContext, UpdateGlobal as _}; + use http_client::{FakeHttpClient, Response}; use serde_json::json; use std::{cell::RefCell, path::PathBuf, rc::Rc}; use util::path; @@ -894,12 +908,12 @@ mod tests { }); let store = cx.new(|cx| { ContextServerStore::test_maintain_server_loop( - Box::new(move |id, _| { + Some(Box::new(move |id, _| { Arc::new(ContextServer::new( id.clone(), Arc::new(create_fake_transport(id.0.to_string(), executor.clone())), )) - }), + })), registry.clone(), project.read(cx).worktree_store(), project.downgrade(), @@ -1130,12 +1144,12 @@ mod tests { let registry = cx.new(|_| ContextServerDescriptorRegistry::new()); let store = cx.new(|cx| { ContextServerStore::test_maintain_server_loop( - Box::new(move |id, _| { + Some(Box::new(move |id, _| { Arc::new(ContextServer::new( id.clone(), Arc::new(create_fake_transport(id.0.to_string(), executor.clone())), )) - }), + })), registry.clone(), project.read(cx).worktree_store(), project.downgrade(), @@ -1228,6 +1242,73 @@ mod tests { }); } + #[gpui::test] + async fn test_remote_context_server(cx: &mut TestAppContext) { + const SERVER_ID: &str = "remote-server"; + let server_id = ContextServerId(SERVER_ID.into()); + let server_url = "http://example.com/api"; + + let (_fs, project) = setup_context_server_test( + cx, + json!({ "code.rs": "" }), + vec![( + SERVER_ID.into(), + ContextServerSettings::Http { + enabled: true, + url: server_url.to_string(), + headers: Default::default(), + }, + )], + ) + .await; + + let client = FakeHttpClient::create(|_| async move { + use http_client::AsyncBody; + + let response = Response::builder() + .status(200) + .header("Content-Type", "application/json") + .body(AsyncBody::from( + serde_json::to_string(&json!({ + "jsonrpc": "2.0", + "id": 0, + "result": { + "protocolVersion": "2024-11-05", + "capabilities": {}, + "serverInfo": { + "name": "test-server", + "version": "1.0.0" + } + } + })) + .unwrap(), + )) + .unwrap(); + Ok(response) + }); + cx.update(|cx| cx.set_http_client(client)); + let registry = cx.new(|_| ContextServerDescriptorRegistry::new()); + let store = cx.new(|cx| { + ContextServerStore::test_maintain_server_loop( + None, + registry.clone(), + project.read(cx).worktree_store(), + project.downgrade(), + cx, + ) + }); + + let _server_events = assert_server_events( + &store, + vec![ + (server_id.clone(), ContextServerStatus::Starting), + (server_id.clone(), ContextServerStatus::Running), + ], + cx, + ); + cx.run_until_parked(); + } + struct ServerEvents { received_event_count: Rc>, expected_event_count: usize, diff --git a/crates/project/src/project_settings.rs b/crates/project/src/project_settings.rs index 10ffc986fa798011c28261d2ff899da66261669a..1bfd44957b2b0d75f8fda2b42a875c92e37d63f4 100644 --- a/crates/project/src/project_settings.rs +++ b/crates/project/src/project_settings.rs @@ -135,6 +135,16 @@ pub enum ContextServerSettings { /// are supported. settings: serde_json::Value, }, + Http { + /// Whether the context server is enabled. + #[serde(default = "default_true")] + enabled: bool, + /// The URL of the remote context server. + url: String, + /// Optional authentication configuration for the remote server. + #[serde(skip_serializing_if = "HashMap::is_empty", default)] + headers: HashMap, + }, } impl From for ContextServerSettings { @@ -146,6 +156,15 @@ impl From for ContextServerSettings { settings::ContextServerSettingsContent::Extension { enabled, settings } => { ContextServerSettings::Extension { enabled, settings } } + settings::ContextServerSettingsContent::Http { + enabled, + url, + headers, + } => ContextServerSettings::Http { + enabled, + url, + headers, + }, } } } @@ -158,6 +177,15 @@ impl Into for ContextServerSettings { ContextServerSettings::Extension { enabled, settings } => { settings::ContextServerSettingsContent::Extension { enabled, settings } } + ContextServerSettings::Http { + enabled, + url, + headers, + } => settings::ContextServerSettingsContent::Http { + enabled, + url, + headers, + }, } } } @@ -174,6 +202,7 @@ impl ContextServerSettings { match self { ContextServerSettings::Custom { enabled, .. } => *enabled, ContextServerSettings::Extension { enabled, .. } => *enabled, + ContextServerSettings::Http { enabled, .. } => *enabled, } } @@ -181,6 +210,7 @@ impl ContextServerSettings { match self { ContextServerSettings::Custom { enabled: e, .. } => *e = enabled, ContextServerSettings::Extension { enabled: e, .. } => *e = enabled, + ContextServerSettings::Http { enabled: e, .. } => *e = enabled, } } } diff --git a/crates/settings/src/settings_content/project.rs b/crates/settings/src/settings_content/project.rs index b6bebd76e28a316f19c400db2877219aeb2c7cc8..6c450bc8384d61acf9d6f894f2ae3de500611618 100644 --- a/crates/settings/src/settings_content/project.rs +++ b/crates/settings/src/settings_content/project.rs @@ -196,7 +196,7 @@ pub struct SessionSettingsContent { } #[derive(Deserialize, Serialize, Clone, PartialEq, Eq, JsonSchema, MergeFrom, Debug)] -#[serde(tag = "source", rename_all = "snake_case")] +#[serde(untagged, rename_all = "snake_case")] pub enum ContextServerSettingsContent { Custom { /// Whether the context server is enabled. @@ -206,6 +206,16 @@ pub enum ContextServerSettingsContent { #[serde(flatten)] command: ContextServerCommand, }, + Http { + /// Whether the context server is enabled. + #[serde(default = "default_true")] + enabled: bool, + /// The URL of the remote context server. + url: String, + /// Optional headers to send. + #[serde(skip_serializing_if = "HashMap::is_empty", default)] + headers: HashMap, + }, Extension { /// Whether the context server is enabled. #[serde(default = "default_true")] @@ -217,19 +227,24 @@ pub enum ContextServerSettingsContent { settings: serde_json::Value, }, } + impl ContextServerSettingsContent { pub fn set_enabled(&mut self, enabled: bool) { match self { ContextServerSettingsContent::Custom { enabled: custom_enabled, - command: _, + .. } => { *custom_enabled = enabled; } ContextServerSettingsContent::Extension { enabled: ext_enabled, - settings: _, + .. } => *ext_enabled = enabled, + ContextServerSettingsContent::Http { + enabled: remote_enabled, + .. + } => *remote_enabled = enabled, } } } diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 998d1831a1b5e4179677d33a80fd36718e833511..92b78704163c7852867df8fefc018eaf4135210b 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -4657,133 +4657,6 @@ mod tests { }); } - /// Checks that action namespaces are the expected set. The purpose of this is to prevent typos - /// and let you know when introducing a new namespace. - #[gpui::test] - async fn test_action_namespaces(cx: &mut gpui::TestAppContext) { - use itertools::Itertools; - - init_keymap_test(cx); - cx.update(|cx| { - let all_actions = cx.all_action_names(); - - let mut actions_without_namespace = Vec::new(); - let all_namespaces = all_actions - .iter() - .filter_map(|action_name| { - let namespace = action_name - .split("::") - .collect::>() - .into_iter() - .rev() - .skip(1) - .rev() - .join("::"); - if namespace.is_empty() { - actions_without_namespace.push(*action_name); - } - if &namespace == "test_only" || &namespace == "stories" { - None - } else { - Some(namespace) - } - }) - .sorted() - .dedup() - .collect::>(); - assert_eq!(actions_without_namespace, Vec::<&str>::new()); - - let expected_namespaces = vec![ - "action", - "activity_indicator", - "agent", - #[cfg(not(target_os = "macos"))] - "app_menu", - "assistant", - "assistant2", - "auto_update", - "bedrock", - "branches", - "buffer_search", - "channel_modal", - "cli", - "client", - "collab", - "collab_panel", - "command_palette", - "console", - "context_server", - "copilot", - "debug_panel", - "debugger", - "dev", - "diagnostics", - "edit_prediction", - "editor", - "feedback", - "file_finder", - "git", - "git_onboarding", - "git_panel", - "go_to_line", - "icon_theme_selector", - "journal", - "keymap_editor", - "keystroke_input", - "language_selector", - "line_ending_selector", - "lsp_tool", - "markdown", - "menu", - "notebook", - "notification_panel", - "onboarding", - "outline", - "outline_panel", - "pane", - "panel", - "picker", - "project_panel", - "project_search", - "project_symbols", - "projects", - "repl", - "rules_library", - "search", - "settings_editor", - "settings_profile_selector", - "snippets", - "stash_picker", - "supermaven", - "svg", - "syntax_tree_view", - "tab_switcher", - "task", - "terminal", - "terminal_panel", - "theme_selector", - "toast", - "toolchain", - "variable_list", - "vim", - "window", - "workspace", - "zed", - "zed_actions", - "zed_predict_onboarding", - "zeta", - ]; - assert_eq!( - all_namespaces, - expected_namespaces - .into_iter() - .map(|namespace| namespace.to_string()) - .sorted() - .collect::>() - ); - }); - } - #[gpui::test] fn test_bundled_settings_and_themes(cx: &mut App) { cx.text_system() diff --git a/docs/src/ai/mcp.md b/docs/src/ai/mcp.md index 8fa36675ec46ed6ae1830dd32196815c34ab587f..d8d2de2a014459ddeed0f2a0fe92c2cbe84045e4 100644 --- a/docs/src/ai/mcp.md +++ b/docs/src/ai/mcp.md @@ -40,11 +40,14 @@ You can connect them by adding their commands directly to your `settings.json`, ```json [settings] { "context_servers": { - "your-mcp-server": { - "source": "custom", + "run-command": { "command": "some-command", "args": ["arg-1", "arg-2"], "env": {} + }, + "over-http": { + "url": "custom", + "headers": { "Authorization": "Bearer " } } } } From 3c4ca3f37205c51704801b6dbb027493e0167d4e Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Tue, 18 Nov 2025 10:23:16 -0700 Subject: [PATCH 0194/1030] Remove `settings::Maybe` (#42933) It's unclear how this would ever be useful cc @probably-neb Release Notes: - N/A --- crates/settings/src/settings_content.rs | 215 ------------------ .../settings/src/settings_content/project.rs | 6 +- crates/settings/src/vscode_import.rs | 2 +- crates/settings_ui/src/page_data.rs | 4 +- crates/settings_ui/src/settings_ui.rs | 1 - crates/worktree/src/worktree_settings.rs | 2 +- 6 files changed, 7 insertions(+), 223 deletions(-) diff --git a/crates/settings/src/settings_content.rs b/crates/settings/src/settings_content.rs index 42b88bd3654159ca3ad55dfecffbe3d4e2b547d0..9cd8ff46e8bea5be69bd5415b5668f21dc71f13a 100644 --- a/crates/settings/src/settings_content.rs +++ b/crates/settings/src/settings_content.rs @@ -1039,218 +1039,3 @@ impl std::fmt::Display for DelayMs { write!(f, "{}ms", self.0) } } - -/// A wrapper type that distinguishes between an explicitly set value (including null) and an unset value. -/// -/// This is useful for configuration where you need to differentiate between: -/// - A field that is not present in the configuration file (`Maybe::Unset`) -/// - A field that is explicitly set to `null` (`Maybe::Set(None)`) -/// - A field that is explicitly set to a value (`Maybe::Set(Some(value))`) -/// -/// # Examples -/// -/// In JSON: -/// - `{}` (field missing) deserializes to `Maybe::Unset` -/// - `{"field": null}` deserializes to `Maybe::Set(None)` -/// - `{"field": "value"}` deserializes to `Maybe::Set(Some("value"))` -/// -/// WARN: This type should not be wrapped in an option inside of settings, otherwise the default `serde_json` behavior -/// of treating `null` and missing as the `Option::None` will be used -#[derive(Debug, Clone, PartialEq, Eq, strum::EnumDiscriminants, Default)] -#[strum_discriminants(derive(strum::VariantArray, strum::VariantNames, strum::FromRepr))] -pub enum Maybe { - /// An explicitly set value, which may be `None` (representing JSON `null`) or `Some(value)`. - Set(Option), - /// A value that was not present in the configuration. - #[default] - Unset, -} - -impl merge_from::MergeFrom for Maybe { - fn merge_from(&mut self, other: &Self) { - if self.is_unset() { - *self = other.clone(); - } - } -} - -impl From>> for Maybe { - fn from(value: Option>) -> Self { - match value { - Some(value) => Maybe::Set(value), - None => Maybe::Unset, - } - } -} - -impl Maybe { - pub fn is_set(&self) -> bool { - matches!(self, Maybe::Set(_)) - } - - pub fn is_unset(&self) -> bool { - matches!(self, Maybe::Unset) - } - - pub fn into_inner(self) -> Option { - match self { - Maybe::Set(value) => value, - Maybe::Unset => None, - } - } - - pub fn as_ref(&self) -> Option<&Option> { - match self { - Maybe::Set(value) => Some(value), - Maybe::Unset => None, - } - } -} - -impl serde::Serialize for Maybe { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - match self { - Maybe::Set(value) => value.serialize(serializer), - Maybe::Unset => serializer.serialize_none(), - } - } -} - -impl<'de, T: serde::Deserialize<'de>> serde::Deserialize<'de> for Maybe { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - Option::::deserialize(deserializer).map(Maybe::Set) - } -} - -impl JsonSchema for Maybe { - fn schema_name() -> std::borrow::Cow<'static, str> { - format!("Nullable<{}>", T::schema_name()).into() - } - - fn json_schema(generator: &mut schemars::generate::SchemaGenerator) -> schemars::Schema { - let mut schema = generator.subschema_for::>(); - // Add description explaining that null is an explicit value - let description = if let Some(existing_desc) = - schema.get("description").and_then(|desc| desc.as_str()) - { - format!( - "{}. Note: `null` is treated as an explicit value, different from omitting the field entirely.", - existing_desc - ) - } else { - "This field supports explicit `null` values. Omitting the field is different from setting it to `null`.".to_string() - }; - - schema.insert("description".to_string(), description.into()); - - schema - } -} - -#[cfg(test)] -mod tests { - use super::*; - use serde_json; - - #[test] - fn test_maybe() { - #[derive(Debug, PartialEq, Serialize, Deserialize)] - struct TestStruct { - #[serde(default)] - #[serde(skip_serializing_if = "Maybe::is_unset")] - field: Maybe, - } - - #[derive(Debug, PartialEq, Serialize, Deserialize)] - struct NumericTest { - #[serde(default)] - value: Maybe, - } - - let json = "{}"; - let result: TestStruct = serde_json::from_str(json).unwrap(); - assert!(result.field.is_unset()); - assert_eq!(result.field, Maybe::Unset); - - let json = r#"{"field": null}"#; - let result: TestStruct = serde_json::from_str(json).unwrap(); - assert!(result.field.is_set()); - assert_eq!(result.field, Maybe::Set(None)); - - let json = r#"{"field": "hello"}"#; - let result: TestStruct = serde_json::from_str(json).unwrap(); - assert!(result.field.is_set()); - assert_eq!(result.field, Maybe::Set(Some("hello".to_string()))); - - let test = TestStruct { - field: Maybe::Unset, - }; - let json = serde_json::to_string(&test).unwrap(); - assert_eq!(json, "{}"); - - let test = TestStruct { - field: Maybe::Set(None), - }; - let json = serde_json::to_string(&test).unwrap(); - assert_eq!(json, r#"{"field":null}"#); - - let test = TestStruct { - field: Maybe::Set(Some("world".to_string())), - }; - let json = serde_json::to_string(&test).unwrap(); - assert_eq!(json, r#"{"field":"world"}"#); - - let default_maybe: Maybe = Maybe::default(); - assert!(default_maybe.is_unset()); - - let unset: Maybe = Maybe::Unset; - assert!(unset.is_unset()); - assert!(!unset.is_set()); - - let set_none: Maybe = Maybe::Set(None); - assert!(set_none.is_set()); - assert!(!set_none.is_unset()); - - let set_some: Maybe = Maybe::Set(Some("value".to_string())); - assert!(set_some.is_set()); - assert!(!set_some.is_unset()); - - let original = TestStruct { - field: Maybe::Set(Some("test".to_string())), - }; - let json = serde_json::to_string(&original).unwrap(); - let deserialized: TestStruct = serde_json::from_str(&json).unwrap(); - assert_eq!(original, deserialized); - - let json = r#"{"value": 42}"#; - let result: NumericTest = serde_json::from_str(json).unwrap(); - assert_eq!(result.value, Maybe::Set(Some(42))); - - let json = r#"{"value": null}"#; - let result: NumericTest = serde_json::from_str(json).unwrap(); - assert_eq!(result.value, Maybe::Set(None)); - - let json = "{}"; - let result: NumericTest = serde_json::from_str(json).unwrap(); - assert_eq!(result.value, Maybe::Unset); - - // Test JsonSchema implementation - use schemars::schema_for; - let schema = schema_for!(Maybe); - let schema_json = serde_json::to_value(&schema).unwrap(); - - // Verify the description mentions that null is an explicit value - let description = schema_json["description"].as_str().unwrap(); - assert!( - description.contains("null") && description.contains("explicit"), - "Schema description should mention that null is an explicit value. Got: {}", - description - ); - } -} diff --git a/crates/settings/src/settings_content/project.rs b/crates/settings/src/settings_content/project.rs index 6c450bc8384d61acf9d6f894f2ae3de500611618..83e0537940870bd944cb75f20e35cc522059570c 100644 --- a/crates/settings/src/settings_content/project.rs +++ b/crates/settings/src/settings_content/project.rs @@ -8,7 +8,7 @@ use settings_macros::MergeFrom; use util::serde::default_true; use crate::{ - AllLanguageSettingsContent, DelayMs, ExtendingVec, Maybe, ProjectTerminalSettingsContent, + AllLanguageSettingsContent, DelayMs, ExtendingVec, ProjectTerminalSettingsContent, SlashCommandSettings, }; @@ -61,8 +61,8 @@ pub struct WorktreeSettingsContent { /// /// Default: null #[serde(default)] - #[serde(skip_serializing_if = "Maybe::is_unset")] - pub project_name: Maybe, + #[serde(skip_serializing_if = "Option::is_none")] + pub project_name: Option, /// Whether to prevent this project from being shared in public channels. /// diff --git a/crates/settings/src/vscode_import.rs b/crates/settings/src/vscode_import.rs index 4b87d6f5f30c075f90a6da698396bc4576a56b92..5644cd7a1a8463a1f072d838a0a1b16bd7ad991b 100644 --- a/crates/settings/src/vscode_import.rs +++ b/crates/settings/src/vscode_import.rs @@ -870,7 +870,7 @@ impl VsCodeSettings { fn worktree_settings_content(&self) -> WorktreeSettingsContent { WorktreeSettingsContent { - project_name: crate::Maybe::Unset, + project_name: None, prevent_sharing_in_public_channels: false, file_scan_exclusions: self .read_value("files.watcherExclude") diff --git a/crates/settings_ui/src/page_data.rs b/crates/settings_ui/src/page_data.rs index d776b9787eb804a77f2d4a6b6c605846eb6604ea..a6baaf94842955a323f348dcbae8130dcfd060c6 100644 --- a/crates/settings_ui/src/page_data.rs +++ b/crates/settings_ui/src/page_data.rs @@ -33,10 +33,10 @@ pub(crate) fn settings_data(cx: &App) -> Vec { SettingField { json_path: Some("project_name"), pick: |settings_content| { - settings_content.project.worktree.project_name.as_ref()?.as_ref().or(DEFAULT_EMPTY_STRING) + settings_content.project.worktree.project_name.as_ref().or(DEFAULT_EMPTY_STRING) }, write: |settings_content, value| { - settings_content.project.worktree.project_name = settings::Maybe::Set(value.filter(|name| !name.is_empty())); + settings_content.project.worktree.project_name = value.filter(|name| !name.is_empty()); }, } ), diff --git a/crates/settings_ui/src/settings_ui.rs b/crates/settings_ui/src/settings_ui.rs index 1f32716f0639197cf6391e377b2619cc3843605f..7b90464633c47caf7a2b11421fdbc6ac5aefe129 100644 --- a/crates/settings_ui/src/settings_ui.rs +++ b/crates/settings_ui/src/settings_ui.rs @@ -507,7 +507,6 @@ fn init_renderers(cx: &mut App) { .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) - .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) diff --git a/crates/worktree/src/worktree_settings.rs b/crates/worktree/src/worktree_settings.rs index 94e83a16decd6b5d68498944e26ddcabecd27eed..97723829dd78a3dab517634971f8d0753500aa4b 100644 --- a/crates/worktree/src/worktree_settings.rs +++ b/crates/worktree/src/worktree_settings.rs @@ -66,7 +66,7 @@ impl Settings for WorktreeSettings { .collect(); Self { - project_name: worktree.project_name.into_inner(), + project_name: worktree.project_name, prevent_sharing_in_public_channels: worktree.prevent_sharing_in_public_channels, file_scan_exclusions: path_matchers(file_scan_exclusions, "file_scan_exclusions") .log_err() From bf0dd4057ce262f6213a685fb73ae5239494673b Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Tue, 18 Nov 2025 09:36:37 -0800 Subject: [PATCH 0195/1030] zeta2: Make `new_text`/`old_text` parsing more robust (#42997) Closes #ISSUE The model often uses the wrong closing tag, or has spaces around the closing tag name. This PR makes it so that opening tags are treated as authoritative and any closing tag with the name `new_text` `old_text` or `edits` is accepted based on depth. This has the additional benefit that the parsing is more robust with contents that contain `new_text` `old_text` or `edits. I.e. the following test passes ```rust #[test] fn test_extract_xml_edits_with_conflicting_content() { let input = indoc! {r#" "#}; let result = extract_xml_replacements(input).unwrap(); assert_eq!(result.file_path, "component.tsx"); assert_eq!(result.replacements.len(), 1); assert_eq!(result.replacements[0].0, ""); assert_eq!(result.replacements[0].1, ""); } ``` Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/zeta2/src/xml_edits.rs | 318 +++++++++++++++++++++++++++------- 1 file changed, 257 insertions(+), 61 deletions(-) diff --git a/crates/zeta2/src/xml_edits.rs b/crates/zeta2/src/xml_edits.rs index 468efa8b202141c4cca04459233ea91c5bff9d44..ee8dd47cb25ad3dcd2c3d7d172b62e724b41c22d 100644 --- a/crates/zeta2/src/xml_edits.rs +++ b/crates/zeta2/src/xml_edits.rs @@ -2,6 +2,11 @@ use anyhow::{Context as _, Result}; use language::{Anchor, BufferSnapshot, OffsetRangeExt as _, Point}; use std::{cmp, ops::Range, path::Path, sync::Arc}; +const EDITS_TAG_NAME: &'static str = "edits"; +const OLD_TEXT_TAG_NAME: &'static str = "old_text"; +const NEW_TEXT_TAG_NAME: &'static str = "new_text"; +const XML_TAGS: &[&str] = &[EDITS_TAG_NAME, OLD_TEXT_TAG_NAME, NEW_TEXT_TAG_NAME]; + pub async fn parse_xml_edits<'a>( input: &'a str, get_buffer: impl Fn(&Path) -> Option<(&'a BufferSnapshot, &'a [Range])> + Send, @@ -12,38 +17,22 @@ pub async fn parse_xml_edits<'a>( } async fn parse_xml_edits_inner<'a>( - mut input: &'a str, + input: &'a str, get_buffer: impl Fn(&Path) -> Option<(&'a BufferSnapshot, &'a [Range])> + Send, ) -> Result<(&'a BufferSnapshot, Vec<(Range, Arc)>)> { - let edits_tag = parse_tag(&mut input, "edits")?.context("No edits tag")?; - - input = edits_tag.body; - - let file_path = edits_tag - .attributes - .trim_start() - .strip_prefix("path") - .context("no file attribute on edits tag")? - .trim_end() - .strip_prefix('=') - .context("no value for path attribute")? - .trim() - .trim_start_matches('"') - .trim_end_matches('"'); + let xml_edits = extract_xml_replacements(input)?; - let (buffer, context_ranges) = get_buffer(file_path.as_ref()) - .with_context(|| format!("no buffer for file {file_path}"))?; + let (buffer, context_ranges) = get_buffer(xml_edits.file_path.as_ref()) + .with_context(|| format!("no buffer for file {}", xml_edits.file_path))?; - let mut edits = vec![]; - while let Some(old_text_tag) = parse_tag(&mut input, "old_text")? { - let new_text_tag = - parse_tag(&mut input, "new_text")?.context("no new_text tag following old_text")?; - let match_range = fuzzy_match_in_ranges(old_text_tag.body, buffer, context_ranges)?; - let old_text = buffer + let mut all_edits = vec![]; + for (old_text, new_text) in xml_edits.replacements { + let match_range = fuzzy_match_in_ranges(old_text, buffer, context_ranges)?; + let matched_old_text = buffer .text_for_range(match_range.clone()) .collect::(); - let edits_within_hunk = language::text_diff(&old_text, &new_text_tag.body); - edits.extend( + let edits_within_hunk = language::text_diff(&matched_old_text, new_text); + all_edits.extend( edits_within_hunk .into_iter() .map(move |(inner_range, inner_text)| { @@ -56,7 +45,7 @@ async fn parse_xml_edits_inner<'a>( ); } - Ok((buffer, edits)) + Ok((buffer, all_edits)) } fn fuzzy_match_in_ranges( @@ -110,32 +99,128 @@ fn fuzzy_match_in_ranges( ); } -struct ParsedTag<'a> { - attributes: &'a str, - body: &'a str, +#[derive(Debug)] +struct XmlEdits<'a> { + file_path: &'a str, + /// Vec of (old_text, new_text) pairs + replacements: Vec<(&'a str, &'a str)>, } -fn parse_tag<'a>(input: &mut &'a str, tag: &str) -> Result>> { - let open_tag = format!("<{}", tag); - let close_tag = format!("", tag); - let Some(start_ix) = input.find(&open_tag) else { - return Ok(None); - }; - let start_ix = start_ix + open_tag.len(); - let closing_bracket_ix = start_ix - + input[start_ix..] +fn extract_xml_replacements(input: &str) -> Result> { + let mut cursor = 0; + + let (edits_body_start, edits_attrs) = + find_tag_open(input, &mut cursor, EDITS_TAG_NAME)?.context("No edits tag found")?; + + let file_path = edits_attrs + .trim_start() + .strip_prefix("path") + .context("no path attribute on edits tag")? + .trim_end() + .strip_prefix('=') + .context("no value for path attribute")? + .trim() + .trim_start_matches('"') + .trim_end_matches('"'); + + cursor = edits_body_start; + let mut edits_list = Vec::new(); + + while let Some((old_body_start, _)) = find_tag_open(input, &mut cursor, OLD_TEXT_TAG_NAME)? { + let old_body_end = find_tag_close(input, &mut cursor)?; + let old_text = trim_surrounding_newlines(&input[old_body_start..old_body_end]); + + let (new_body_start, _) = find_tag_open(input, &mut cursor, NEW_TEXT_TAG_NAME)? + .context("no new_text tag following old_text")?; + let new_body_end = find_tag_close(input, &mut cursor)?; + let new_text = trim_surrounding_newlines(&input[new_body_start..new_body_end]); + + edits_list.push((old_text, new_text)); + } + + Ok(XmlEdits { + file_path, + replacements: edits_list, + }) +} + +/// Trims a single leading and trailing newline +fn trim_surrounding_newlines(input: &str) -> &str { + let start = input.strip_prefix('\n').unwrap_or(input); + let end = start.strip_suffix('\n').unwrap_or(start); + end +} + +fn find_tag_open<'a>( + input: &'a str, + cursor: &mut usize, + expected_tag: &str, +) -> Result> { + let mut search_pos = *cursor; + + while search_pos < input.len() { + let Some(tag_start) = input[search_pos..].find("<") else { + break; + }; + let tag_start = search_pos + tag_start; + if !input[tag_start + 1..].starts_with(expected_tag) { + search_pos = search_pos + tag_start + 1; + continue; + }; + + let after_tag_name = tag_start + expected_tag.len() + 1; + let close_bracket = input[after_tag_name..] .find('>') - .with_context(|| format!("missing > after {tag}"))?; - let attributes = &input[start_ix..closing_bracket_ix].trim(); - let end_ix = closing_bracket_ix - + input[closing_bracket_ix..] - .find(&close_tag) - .with_context(|| format!("no `{close_tag}` tag"))?; - let body = &input[closing_bracket_ix + '>'.len_utf8()..end_ix]; - let body = body.strip_prefix('\n').unwrap_or(body); - let body = body.strip_suffix('\n').unwrap_or(body); - *input = &input[end_ix + close_tag.len()..]; - Ok(Some(ParsedTag { attributes, body })) + .with_context(|| format!("missing > after <{}", expected_tag))?; + let attrs_end = after_tag_name + close_bracket; + let body_start = attrs_end + 1; + + let attributes = input[after_tag_name..attrs_end].trim(); + *cursor = body_start; + + return Ok(Some((body_start, attributes))); + } + + Ok(None) +} + +fn find_tag_close(input: &str, cursor: &mut usize) -> Result { + let mut depth = 1; + let mut search_pos = *cursor; + + while search_pos < input.len() && depth > 0 { + let Some(bracket_offset) = input[search_pos..].find('<') else { + break; + }; + let bracket_pos = search_pos + bracket_offset; + + if input[bracket_pos..].starts_with("') + { + let close_start = bracket_pos + 2; + let tag_name = input[close_start..close_start + close_end].trim(); + + if XML_TAGS.contains(&tag_name) { + depth -= 1; + if depth == 0 { + *cursor = close_start + close_end + 1; + return Ok(bracket_pos); + } + } + search_pos = close_start + close_end + 1; + continue; + } else if let Some(close_bracket_offset) = input[bracket_pos..].find('>') { + let close_bracket_pos = bracket_pos + close_bracket_offset; + let tag_name = &input[bracket_pos + 1..close_bracket_pos].trim(); + if XML_TAGS.contains(&tag_name) { + depth += 1; + } + } + + search_pos = bracket_pos + 1; + } + + anyhow::bail!("no closing tag found") } const REPLACEMENT_COST: u32 = 1; @@ -357,17 +442,128 @@ mod tests { use util::path; #[test] - fn test_parse_tags() { - let mut input = indoc! {r#" - Prelude - - tag value - - "# }; - let parsed = parse_tag(&mut input, "tag").unwrap().unwrap(); - assert_eq!(parsed.attributes, "attr=\"foo\""); - assert_eq!(parsed.body, "tag value"); - assert_eq!(input, "\n"); + fn test_extract_xml_edits() { + let input = indoc! {r#" + + + old content + + + new content + + + "#}; + + let result = extract_xml_replacements(input).unwrap(); + assert_eq!(result.file_path, "test.rs"); + assert_eq!(result.replacements.len(), 1); + assert_eq!(result.replacements[0].0, "old content"); + assert_eq!(result.replacements[0].1, "new content"); + } + + #[test] + fn test_extract_xml_edits_with_wrong_closing_tags() { + let input = indoc! {r#" + + + old content + + + new content + + + "#}; + + let result = extract_xml_replacements(input).unwrap(); + assert_eq!(result.file_path, "test.rs"); + assert_eq!(result.replacements.len(), 1); + assert_eq!(result.replacements[0].0, "old content"); + assert_eq!(result.replacements[0].1, "new content"); + } + + #[test] + fn test_extract_xml_edits_with_xml_like_content() { + let input = indoc! {r#" + + + + + + + + + "#}; + + let result = extract_xml_replacements(input).unwrap(); + assert_eq!(result.file_path, "component.tsx"); + assert_eq!(result.replacements.len(), 1); + assert_eq!(result.replacements[0].0, ""); + assert_eq!( + result.replacements[0].1, + "" + ); + } + + #[test] + fn test_extract_xml_edits_with_conflicting_content() { + let input = indoc! {r#" + + + + + + + + + "#}; + + let result = extract_xml_replacements(input).unwrap(); + assert_eq!(result.file_path, "component.tsx"); + assert_eq!(result.replacements.len(), 1); + assert_eq!(result.replacements[0].0, ""); + assert_eq!(result.replacements[0].1, ""); + } + + #[test] + fn test_extract_xml_edits_multiple_pairs() { + let input = indoc! {r#" + Some reasoning before edits. Lots of thinking going on here + + + + first old + + + first new + + + second old + + + second new + + + "#}; + + let result = extract_xml_replacements(input).unwrap(); + assert_eq!(result.file_path, "test.rs"); + assert_eq!(result.replacements.len(), 2); + assert_eq!(result.replacements[0].0, "first old"); + assert_eq!(result.replacements[0].1, "first new"); + assert_eq!(result.replacements[1].0, "second old"); + assert_eq!(result.replacements[1].1, "second new"); + } + + #[test] + fn test_extract_xml_edits_unexpected_eof() { + let input = indoc! {r#" + + + first old + Date: Tue, 18 Nov 2025 19:20:34 +0100 Subject: [PATCH 0196/1030] chore: Fix build graph - again (#42999) 11.3s -> 10.0s for silly stuff like extracting actions from crates. project panel still depends on git_ui though.. Release Notes: - N/A --- Cargo.lock | 1 - crates/keymap_editor/Cargo.toml | 1 - crates/keymap_editor/src/keymap_editor.rs | 2 +- crates/project_panel/src/project_panel.rs | 4 +--- crates/vim/Cargo.toml | 1 - crates/vim/src/vim.rs | 6 ++---- crates/zed/src/zed.rs | 2 +- crates/zed/src/zed/app_menus.rs | 2 +- crates/zed_actions/src/lib.rs | 23 +++++++++++++++++++++++ 9 files changed, 29 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 09ee945d1c34cf1ae93a3cc538d62860ad3a1c78..a39ff712e3a5b9cefe42bcec8359fdf297d55f71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8729,7 +8729,6 @@ dependencies = [ "ui", "ui_input", "util", - "vim", "workspace", "zed_actions", ] diff --git a/crates/keymap_editor/Cargo.toml b/crates/keymap_editor/Cargo.toml index b6086566c3be01b60527d497b836fc53d101e467..33ba95ddd6d8df7efe2f551451af0340d83369c7 100644 --- a/crates/keymap_editor/Cargo.toml +++ b/crates/keymap_editor/Cargo.toml @@ -41,7 +41,6 @@ tree-sitter-rust.workspace = true ui_input.workspace = true ui.workspace = true util.workspace = true -vim.workspace = true workspace.workspace = true zed_actions.workspace = true diff --git a/crates/keymap_editor/src/keymap_editor.rs b/crates/keymap_editor/src/keymap_editor.rs index 5e39e80b4bd7e9b91c9419290a48b39d060aa267..ce78a1d60ac610bbf10383377fef667c0a4eaa36 100644 --- a/crates/keymap_editor/src/keymap_editor.rs +++ b/crates/keymap_editor/src/keymap_editor.rs @@ -1769,7 +1769,7 @@ impl Render for KeymapEditor { ) .action( "Vim Bindings", - vim::OpenDefaultKeymap.boxed_clone(), + zed_actions::vim::OpenDefaultKeymap.boxed_clone(), ) })) }) diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index 410bea6b9c268a3009c7ff41ebf565eaa4a79bff..b21fd02a05c7fca93f09b436488318fdc3bd33c4 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -67,7 +67,7 @@ use workspace::{ notifications::{DetachAndPromptErr, NotifyResultExt, NotifyTaskExt}, }; use worktree::CreatedEntry; -use zed_actions::workspace::OpenWithSystem; +use zed_actions::{project_panel::ToggleFocus, workspace::OpenWithSystem}; const PROJECT_PANEL_KEY: &str = "ProjectPanel"; const NEW_ENTRY_ID: ProjectEntryId = ProjectEntryId::MAX; @@ -306,8 +306,6 @@ actions!( OpenSplitVertical, /// Opens the selected file in a horizontal split. OpenSplitHorizontal, - /// Toggles focus on the project panel. - ToggleFocus, /// Toggles visibility of git-ignored files. ToggleHideGitIgnore, /// Toggles visibility of hidden files. diff --git a/crates/vim/Cargo.toml b/crates/vim/Cargo.toml index 4cea29508f437d6753a78155965b94259a2d7884..c935fd76dba79d24d6637ef5acd70f50870f82e7 100644 --- a/crates/vim/Cargo.toml +++ b/crates/vim/Cargo.toml @@ -35,7 +35,6 @@ multi_buffer.workspace = true nvim-rs = { git = "https://github.com/KillTheMule/nvim-rs", rev = "764dd270c642f77f10f3e19d05cc178a6cbe69f3", features = ["use_tokio"], optional = true } picker.workspace = true project.workspace = true -project_panel.workspace = true regex.workspace = true schemars.workspace = true search.workspace = true diff --git a/crates/vim/src/vim.rs b/crates/vim/src/vim.rs index a0efd1ee29a3c72793c331cf4ccbeb38444bd55b..ce359b6b1eea24d862f68813b97e23ea27829435 100644 --- a/crates/vim/src/vim.rs +++ b/crates/vim/src/vim.rs @@ -183,8 +183,6 @@ actions!( InnerObject, /// Maximizes the current pane. MaximizePane, - /// Opens the default keymap file. - OpenDefaultKeymap, /// Resets all pane sizes to default. ResetPaneSizes, /// Resizes the pane to the right. @@ -314,7 +312,7 @@ pub fn init(cx: &mut App) { workspace.register_action(|_, _: &ToggleProjectPanelFocus, window, cx| { if Vim::take_count(cx).is_none() { - window.dispatch_action(project_panel::ToggleFocus.boxed_clone(), cx); + window.dispatch_action(zed_actions::project_panel::ToggleFocus.boxed_clone(), cx); } }); @@ -343,7 +341,7 @@ pub fn init(cx: &mut App) { }; }); - workspace.register_action(|_, _: &OpenDefaultKeymap, _, cx| { + workspace.register_action(|_, _: &zed_actions::vim::OpenDefaultKeymap, _, cx| { cx.emit(workspace::Event::OpenBundledFile { text: settings::vim_keymap(), title: "Default Vim Bindings", diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 92b78704163c7852867df8fefc018eaf4135210b..18f4a12b5d4abf8a11ae825bed2ad44feb1563ec 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -1002,7 +1002,7 @@ fn register_actions( .register_action(open_project_debug_tasks_file) .register_action( |workspace: &mut Workspace, - _: &project_panel::ToggleFocus, + _: &zed_actions::project_panel::ToggleFocus, window: &mut Window, cx: &mut Context| { workspace.toggle_panel_focus::(window, cx); diff --git a/crates/zed/src/zed/app_menus.rs b/crates/zed/src/zed/app_menus.rs index 20a4f8be3b25991b4b22e4fcabd6008c7e502b65..9f91b237101044a870dda66d38edaeee08bc733d 100644 --- a/crates/zed/src/zed/app_menus.rs +++ b/crates/zed/src/zed/app_menus.rs @@ -39,7 +39,7 @@ pub fn app_menus(cx: &mut App) -> Vec { ], }), MenuItem::separator(), - MenuItem::action("Project Panel", project_panel::ToggleFocus), + MenuItem::action("Project Panel", zed_actions::project_panel::ToggleFocus), MenuItem::action("Outline Panel", outline_panel::ToggleFocus), MenuItem::action("Collab Panel", collab_panel::ToggleFocus), MenuItem::action("Terminal Panel", terminal_panel::ToggleFocus), diff --git a/crates/zed_actions/src/lib.rs b/crates/zed_actions/src/lib.rs index b9fde1f34402c56becd811a76a1d33da93413c50..803fde3f8787b4f6489bd6390d289c35b1c96199 100644 --- a/crates/zed_actions/src/lib.rs +++ b/crates/zed_actions/src/lib.rs @@ -250,6 +250,17 @@ pub mod command_palette { ); } +pub mod project_panel { + use gpui::actions; + + actions!( + project_panel, + [ + /// Toggles focus on the project panel. + ToggleFocus + ] + ); +} pub mod feedback { use gpui::actions; @@ -532,6 +543,18 @@ actions!( ] ); +pub mod vim { + use gpui::actions; + + actions!( + vim, + [ + /// Opens the default keymap file. + OpenDefaultKeymap + ] + ); +} + #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct WslConnectionOptions { pub distro_name: String, From 917148c5ce6b4801dee7ab191b6fb5144f84d0ea Mon Sep 17 00:00:00 2001 From: Barani S Date: Wed, 19 Nov 2025 04:50:32 +0530 Subject: [PATCH 0197/1030] gpui: Use DWM API for backdrop effects and add Mica/Mica Alt support (#41842) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR updates window background rendering to use the **official DWM backdrop API** (`DwmSetWindowAttribute`) instead of the legacy `SetWindowCompositionAttribute`. It also adds **Mica** and **Mica Alt** options to `WindowBackgroundAppearance` for native Windows 11 effects. ### Motivation Enables modern, stable, and GPU-accelerated backdrops consistent with Windows 11’s Fluent Design. Removes reliance on undocumented APIs while maintaining backward compatibility with older Windows versions. ### Changes * Added `MicaBackdrop` and `MicaAltBackdrop` variants. * Switched to DWM API for applying backdrop effects. * Verified fallback behavior on Windows 10. ### Release Notes: - Added `WindowBackgroundAppearance::MicaBackdrop` and `WindowBackgroundAppearance::MicaAltBackdrop` for Windows 11 Mica and Mica Alt window backdrops. ### Screenshots - `WindowBackgroundAppearance::Blurred` image - `WindowBackgroundAppearance::MicaBackdrop` image - `WindowBackgroundAppearance::MicaAltBackdrop` image --------- Co-authored-by: John Tur --- crates/gpui/src/platform.rs | 4 +++ crates/gpui/src/platform/windows/window.rs | 40 +++++++++++++++++++--- 2 files changed, 40 insertions(+), 4 deletions(-) diff --git a/crates/gpui/src/platform.rs b/crates/gpui/src/platform.rs index 7168d0179424028e7f823d39df0f6f51f45095ac..caedf0317f21b9bceb548b31543da2f33bfac254 100644 --- a/crates/gpui/src/platform.rs +++ b/crates/gpui/src/platform.rs @@ -1389,6 +1389,10 @@ pub enum WindowBackgroundAppearance { /// /// Not always supported. Blurred, + /// The Mica backdrop material, supported on Windows 11. + MicaBackdrop, + /// The Mica Alt backdrop material, supported on Windows 11. + MicaAltBackdrop, } /// The options that can be configured for a file dialog prompt diff --git a/crates/gpui/src/platform/windows/window.rs b/crates/gpui/src/platform/windows/window.rs index fe6e6ff664a6c8f9b9524501ca1e875b5023169e..334f0519f15a608a8b36b3610c88fb456a4a8f5b 100644 --- a/crates/gpui/src/platform/windows/window.rs +++ b/crates/gpui/src/platform/windows/window.rs @@ -18,6 +18,7 @@ use smallvec::SmallVec; use windows::{ Win32::{ Foundation::*, + Graphics::Dwm::*, Graphics::Gdi::*, System::{Com::*, LibraryLoader::*, Ole::*, SystemServices::*}, UI::{Controls::*, HiDpi::*, Input::KeyboardAndMouse::*, Shell::*, WindowsAndMessaging::*}, @@ -773,20 +774,26 @@ impl PlatformWindow for WindowsWindow { fn set_background_appearance(&self, background_appearance: WindowBackgroundAppearance) { let hwnd = self.0.hwnd; + // using Dwm APIs for Mica and MicaAlt backdrops. + // others follow the set_window_composition_attribute approach match background_appearance { WindowBackgroundAppearance::Opaque => { - // ACCENT_DISABLED set_window_composition_attribute(hwnd, None, 0); } WindowBackgroundAppearance::Transparent => { - // Use ACCENT_ENABLE_TRANSPARENTGRADIENT for transparent background set_window_composition_attribute(hwnd, None, 2); } WindowBackgroundAppearance::Blurred => { - // Enable acrylic blur - // ACCENT_ENABLE_ACRYLICBLURBEHIND set_window_composition_attribute(hwnd, Some((0, 0, 0, 0)), 4); } + WindowBackgroundAppearance::MicaBackdrop => { + // DWMSBT_MAINWINDOW => MicaBase + dwm_set_window_composition_attribute(hwnd, 2); + } + WindowBackgroundAppearance::MicaAltBackdrop => { + // DWMSBT_TABBEDWINDOW => MicaAlt + dwm_set_window_composition_attribute(hwnd, 4); + } } } @@ -1330,9 +1337,34 @@ fn retrieve_window_placement( Ok(placement) } +fn dwm_set_window_composition_attribute(hwnd: HWND, backdrop_type: u32) { + let mut version = unsafe { std::mem::zeroed() }; + let status = unsafe { windows::Wdk::System::SystemServices::RtlGetVersion(&mut version) }; + + // DWMWA_SYSTEMBACKDROP_TYPE is available only on version 22621 or later + // using SetWindowCompositionAttributeType as a fallback + if !status.is_ok() || version.dwBuildNumber < 22621 { + return; + } + + unsafe { + let result = DwmSetWindowAttribute( + hwnd, + DWMWA_SYSTEMBACKDROP_TYPE, + &backdrop_type as *const _ as *const _, + std::mem::size_of_val(&backdrop_type) as u32, + ); + + if !result.is_ok() { + return; + } + } +} + fn set_window_composition_attribute(hwnd: HWND, color: Option, state: u32) { let mut version = unsafe { std::mem::zeroed() }; let status = unsafe { windows::Wdk::System::SystemServices::RtlGetVersion(&mut version) }; + if !status.is_ok() || version.dwBuildNumber < 17763 { return; } From 16b24e892ebbb22da8538a673352d99b407f7902 Mon Sep 17 00:00:00 2001 From: Julia Ryan Date: Tue, 18 Nov 2025 15:25:12 -0800 Subject: [PATCH 0198/1030] Increase error verbosity (#43013) Closes #42288 This will actually print the parsing error that prevented the vscode settings file from being loaded which should make it easier for users to self help when they have an invalid config. Release Notes: - N/A --- crates/onboarding/src/onboarding.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/onboarding/src/onboarding.rs b/crates/onboarding/src/onboarding.rs index 562dea8748eaddad415d7098f6c34f0bea7b5169..404af2c74f9524aa1d52db39de2354bbe4564240 100644 --- a/crates/onboarding/src/onboarding.rs +++ b/crates/onboarding/src/onboarding.rs @@ -449,7 +449,7 @@ pub async fn handle_import_vscode_settings( match settings::VsCodeSettings::load_user_settings(source, fs.clone()).await { Ok(vscode_settings) => vscode_settings, Err(err) => { - zlog::error!("{err}"); + zlog::error!("{err:?}"); let _ = cx.prompt( gpui::PromptLevel::Info, &format!("Could not find or load a {source} settings file"), From d7c340c739ec89e29c3cda3ab95c7eaeadb89ee1 Mon Sep 17 00:00:00 2001 From: Tom Zaspel <40226087+tzabbi@users.noreply.github.com> Date: Wed, 19 Nov 2025 00:40:09 +0100 Subject: [PATCH 0199/1030] docs: Add documenation for OpenTofu support (#42448) Closes - Release Notes: - N/A Signed-off-by: Tom Zaspel <40226087+tzabbi@users.noreply.github.com> --- docs/src/SUMMARY.md | 1 + docs/src/languages/opentofu.md | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 docs/src/languages/opentofu.md diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index c57802afa61cde6bc0ae6998c95c7980e7c60d64..dc42cfbdbb89d06162016f8ec2548ad630d20bc9 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -126,6 +126,7 @@ - [Markdown](./languages/markdown.md) - [Nim](./languages/nim.md) - [OCaml](./languages/ocaml.md) +- [OpenTofu](./languages/opentofu.md) - [PHP](./languages/php.md) - [PowerShell](./languages/powershell.md) - [Prisma](./languages/prisma.md) diff --git a/docs/src/languages/opentofu.md b/docs/src/languages/opentofu.md new file mode 100644 index 0000000000000000000000000000000000000000..dfe8fa7b8185ad9950b6102dad19df4aab45a3dd --- /dev/null +++ b/docs/src/languages/opentofu.md @@ -0,0 +1,20 @@ +# OpenTofu + +OpenTofu support is available through the [OpenTofu extension](https://github.com/ashpool37/zed-extension-opentofu). + +- Tree-sitter: [MichaHoffmann/tree-sitter-hcl](https://github.com/MichaHoffmann/tree-sitter-hcl) +- Language Server: [opentofu/tofu-ls](https://github.com/opentofu/tofu-ls) + +## Configuration + +In order to automatically use the OpenTofu extension and language server when editing .tf and .tfvars files, +either uninstall the Terraform extension or add this to your settings.json: + +```json +"file_types": { + "OpenTofu": ["tf"], + "OpenTofu Vars": ["tfvars"] +}, +``` + +See the [full list of server settings here](https://github.com/opentofu/tofu-ls/blob/main/docs/SETTINGS.md). From e8e0707256547ca5c3601dc2ce0479a406b5197b Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Tue, 18 Nov 2025 15:46:29 -0800 Subject: [PATCH 0200/1030] zeta2: Improve queries parsing (#43012) Closes #ISSUE Release Notes: - N/A *or* Added/Fixed/Improved ... --------- Co-authored-by: Agus Co-authored-by: Max --- Cargo.lock | 1 + crates/cloud_zeta2_prompt/Cargo.toml | 1 + .../src/retrieval_prompt.rs | 150 ++++++++++++++++++ 3 files changed, 152 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index a39ff712e3a5b9cefe42bcec8359fdf297d55f71..f076630a2e36c2fcca70db8cbdbf20c606b7e2c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3211,6 +3211,7 @@ dependencies = [ "rustc-hash 2.1.1", "schemars 1.0.4", "serde", + "serde_json", "strum 0.27.2", ] diff --git a/crates/cloud_zeta2_prompt/Cargo.toml b/crates/cloud_zeta2_prompt/Cargo.toml index 8be10265cb23e7dd0983c52e7c2d6984b62c4be4..fa8246950f8d03029388e0276954de946efc2346 100644 --- a/crates/cloud_zeta2_prompt/Cargo.toml +++ b/crates/cloud_zeta2_prompt/Cargo.toml @@ -19,4 +19,5 @@ ordered-float.workspace = true rustc-hash.workspace = true schemars.workspace = true serde.workspace = true +serde_json.workspace = true strum.workspace = true diff --git a/crates/cloud_zeta2_prompt/src/retrieval_prompt.rs b/crates/cloud_zeta2_prompt/src/retrieval_prompt.rs index e334674ef8004b485608e3864cf1e4e8d4c97cdb..fd35f63f03ff967491a28d817852f6622e4919ca 100644 --- a/crates/cloud_zeta2_prompt/src/retrieval_prompt.rs +++ b/crates/cloud_zeta2_prompt/src/retrieval_prompt.rs @@ -40,9 +40,47 @@ pub fn build_prompt(request: predict_edits_v3::PlanContextRetrievalRequest) -> R pub struct SearchToolInput { /// An array of queries to run for gathering context relevant to the next prediction #[schemars(length(max = 3))] + #[serde(deserialize_with = "deserialize_queries")] pub queries: Box<[SearchToolQuery]>, } +fn deserialize_queries<'de, D>(deserializer: D) -> Result, D::Error> +where + D: serde::Deserializer<'de>, +{ + use serde::de::Error; + + #[derive(Deserialize)] + #[serde(untagged)] + enum QueryCollection { + Array(Box<[SearchToolQuery]>), + DoubleArray(Box<[Box<[SearchToolQuery]>]>), + Single(SearchToolQuery), + } + + #[derive(Deserialize)] + #[serde(untagged)] + enum MaybeDoubleEncoded { + SingleEncoded(QueryCollection), + DoubleEncoded(String), + } + + let result = MaybeDoubleEncoded::deserialize(deserializer)?; + + let normalized = match result { + MaybeDoubleEncoded::SingleEncoded(value) => value, + MaybeDoubleEncoded::DoubleEncoded(value) => { + serde_json::from_str(&value).map_err(D::Error::custom)? + } + }; + + Ok(match normalized { + QueryCollection::Array(items) => items, + QueryCollection::Single(search_tool_query) => Box::new([search_tool_query]), + QueryCollection::DoubleArray(double_array) => double_array.into_iter().flatten().collect(), + }) +} + /// Search for relevant code by path, syntax hierarchy, and content. #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, Hash)] pub struct SearchToolQuery { @@ -92,3 +130,115 @@ const TOOL_USE_REMINDER: &str = indoc! {" -- Analyze the user's intent in one to two sentences, then call the `search` tool. "}; + +#[cfg(test)] +mod tests { + use serde_json::json; + + use super::*; + + #[test] + fn test_deserialize_queries() { + let single_query_json = indoc! {r#"{ + "queries": { + "glob": "**/*.rs", + "syntax_node": ["fn test"], + "content": "assert" + } + }"#}; + + let flat_input: SearchToolInput = serde_json::from_str(single_query_json).unwrap(); + assert_eq!(flat_input.queries.len(), 1); + assert_eq!(flat_input.queries[0].glob, "**/*.rs"); + assert_eq!(flat_input.queries[0].syntax_node, vec!["fn test"]); + assert_eq!(flat_input.queries[0].content, Some("assert".to_string())); + + let flat_json = indoc! {r#"{ + "queries": [ + { + "glob": "**/*.rs", + "syntax_node": ["fn test"], + "content": "assert" + }, + { + "glob": "**/*.ts", + "syntax_node": [], + "content": null + } + ] + }"#}; + + let flat_input: SearchToolInput = serde_json::from_str(flat_json).unwrap(); + assert_eq!(flat_input.queries.len(), 2); + assert_eq!(flat_input.queries[0].glob, "**/*.rs"); + assert_eq!(flat_input.queries[0].syntax_node, vec!["fn test"]); + assert_eq!(flat_input.queries[0].content, Some("assert".to_string())); + assert_eq!(flat_input.queries[1].glob, "**/*.ts"); + assert_eq!(flat_input.queries[1].syntax_node.len(), 0); + assert_eq!(flat_input.queries[1].content, None); + + let nested_json = indoc! {r#"{ + "queries": [ + [ + { + "glob": "**/*.rs", + "syntax_node": ["fn test"], + "content": "assert" + } + ], + [ + { + "glob": "**/*.ts", + "syntax_node": [], + "content": null + } + ] + ] + }"#}; + + let nested_input: SearchToolInput = serde_json::from_str(nested_json).unwrap(); + + assert_eq!(nested_input.queries.len(), 2); + + assert_eq!(nested_input.queries[0].glob, "**/*.rs"); + assert_eq!(nested_input.queries[0].syntax_node, vec!["fn test"]); + assert_eq!(nested_input.queries[0].content, Some("assert".to_string())); + assert_eq!(nested_input.queries[1].glob, "**/*.ts"); + assert_eq!(nested_input.queries[1].syntax_node.len(), 0); + assert_eq!(nested_input.queries[1].content, None); + + let double_encoded_queries = serde_json::to_string(&json!({ + "queries": serde_json::to_string(&json!([ + { + "glob": "**/*.rs", + "syntax_node": ["fn test"], + "content": "assert" + }, + { + "glob": "**/*.ts", + "syntax_node": [], + "content": null + } + ])).unwrap() + })) + .unwrap(); + + let double_encoded_input: SearchToolInput = + serde_json::from_str(&double_encoded_queries).unwrap(); + + assert_eq!(double_encoded_input.queries.len(), 2); + + assert_eq!(double_encoded_input.queries[0].glob, "**/*.rs"); + assert_eq!(double_encoded_input.queries[0].syntax_node, vec!["fn test"]); + assert_eq!( + double_encoded_input.queries[0].content, + Some("assert".to_string()) + ); + assert_eq!(double_encoded_input.queries[1].glob, "**/*.ts"); + assert_eq!(double_encoded_input.queries[1].syntax_node.len(), 0); + assert_eq!(double_encoded_input.queries[1].content, None); + + // ### ERROR Switching from var declarations to lexical declarations [RUN 073] + // invalid search json {"queries": ["express/lib/response.js", "var\\s+[a-zA-Z_][a-zA-Z0-9_]*\\s*=.*;", "function.*\\(.*\\).*\\{.*\\}"]} + } +} From 94a43dc73a905f690627812dceb615fcea212900 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 19 Nov 2025 00:49:22 +0100 Subject: [PATCH 0201/1030] extension_host: Fix `IS_WASM_THREAD` being set for wrong threads (#43005) https://github.com/zed-industries/zed/pull/40883 implemented this incorrectly. It was marking a random background thread as a wasm thread (whatever thread picked up the wasm epoch timer background task), instead of marking the threads that actually run the wasm extension. This has two implications: 1. it didn't prevent extension panics from tearing down as planned 2. Worse, it actually made us hide legit panics in sentry for one of our background workers. Now 2 still technically applies for all tokio threads after this, but we basically only use these for wasm extensions in the main zed binary. Release Notes: - Fixed extension panics crashing Zed on Linux --- crates/crashes/src/crashes.rs | 8 ++------ crates/extension_host/src/wasm_host.rs | 11 ++++++++--- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/crates/crashes/src/crashes.rs b/crates/crashes/src/crashes.rs index 560ca5a009d5ddf8f3866591ebd9e6247bc98942..f1d39afdd9a88eea70466594d04df09e034fc928 100644 --- a/crates/crashes/src/crashes.rs +++ b/crates/crashes/src/crashes.rs @@ -289,15 +289,11 @@ impl minidumper::ServerHandler for CrashServer { pub fn panic_hook(info: &PanicHookInfo) { // Don't handle a panic on threads that are not relevant to the main execution. if extension_host::wasm_host::IS_WASM_THREAD.with(|v| v.load(Ordering::Acquire)) { + log::error!("wasm thread panicked!"); return; } - let message = info - .payload() - .downcast_ref::<&str>() - .map(|s| s.to_string()) - .or_else(|| info.payload().downcast_ref::().cloned()) - .unwrap_or_else(|| "Box".to_string()); + let message = info.payload_as_str().unwrap_or("Box").to_owned(); let span = info .location() diff --git a/crates/extension_host/src/wasm_host.rs b/crates/extension_host/src/wasm_host.rs index 1e4bed7a50b44c710384f19c901e4e74854df0e2..1fe8d0117f36b25a5a83fe574006adeb7f7ef035 100644 --- a/crates/extension_host/src/wasm_host.rs +++ b/crates/extension_host/src/wasm_host.rs @@ -537,7 +537,6 @@ fn wasm_engine(executor: &BackgroundExecutor) -> wasmtime::Engine { let engine_ref = engine.weak(); executor .spawn(async move { - IS_WASM_THREAD.with(|v| v.store(true, Ordering::Release)); // Somewhat arbitrary interval, as it isn't a guaranteed interval. // But this is a rough upper bound for how long the extension execution can block on // `Future::poll`. @@ -643,6 +642,12 @@ impl WasmHost { let (tx, mut rx) = mpsc::unbounded::(); let extension_task = async move { + // note: Setting the thread local here will slowly "poison" all tokio threads + // causing us to not record their panics any longer. + // + // This is fine though, the main zed binary only uses tokio for livekit and wasm extensions. + // Livekit seldom (if ever) panics 🤞 so the likelihood of us missing a panic in sentry is very low. + IS_WASM_THREAD.with(|v| v.store(true, Ordering::Release)); while let Some(call) = rx.next().await { (call)(&mut extension, &mut store).await; } @@ -659,8 +664,8 @@ impl WasmHost { cx.spawn(async move |cx| { let (extension_task, manifest, work_dir, tx, zed_api_version) = cx.background_executor().spawn(load_extension_task).await?; - // we need to run run the task in an extension context as wasmtime_wasi may - // call into tokio, accessing its runtime handle + // we need to run run the task in a tokio context as wasmtime_wasi may + // call into tokio, accessing its runtime handle when we trigger the `engine.increment_epoch()` above. let task = Arc::new(gpui_tokio::Tokio::spawn(cx, extension_task)?); Ok(WasmExtension { From 7c0663b825c434c493f2d158f8448fb4eff9e9b2 Mon Sep 17 00:00:00 2001 From: Martin Bergo Date: Wed, 19 Nov 2025 00:51:32 +0100 Subject: [PATCH 0202/1030] google_ai: Add gemini-3-pro-preview model (#43015) Release Notes: - Added the newly released Gemini 3 Pro Preview Model https://docs.cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/3-pro --- crates/google_ai/src/google_ai.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/crates/google_ai/src/google_ai.rs b/crates/google_ai/src/google_ai.rs index 84f8e8ef8dbaac1d55f73515f625b670a4a52709..941f58fc13372114fee7731b2b9bbf69c8e10619 100644 --- a/crates/google_ai/src/google_ai.rs +++ b/crates/google_ai/src/google_ai.rs @@ -521,6 +521,8 @@ pub enum Model { alias = "gemini-2.5-pro-preview-06-05" )] Gemini25Pro, + #[serde(rename = "gemini-3-pro-preview")] + Gemini3ProPreview, #[serde(rename = "custom")] Custom { name: String, @@ -547,6 +549,7 @@ impl Model { Self::Gemini25FlashLitePreview => "gemini-2.5-flash-lite-preview", Self::Gemini25Flash => "gemini-2.5-flash", Self::Gemini25Pro => "gemini-2.5-pro", + Self::Gemini3ProPreview => "gemini-3-pro-preview", Self::Custom { name, .. } => name, } } @@ -560,6 +563,7 @@ impl Model { Self::Gemini25FlashLitePreview => "gemini-2.5-flash-lite-preview-06-17", Self::Gemini25Flash => "gemini-2.5-flash", Self::Gemini25Pro => "gemini-2.5-pro", + Self::Gemini3ProPreview => "gemini-3-pro-preview", Self::Custom { name, .. } => name, } } @@ -574,6 +578,7 @@ impl Model { Self::Gemini25FlashLitePreview => "Gemini 2.5 Flash-Lite Preview", Self::Gemini25Flash => "Gemini 2.5 Flash", Self::Gemini25Pro => "Gemini 2.5 Pro", + Self::Gemini3ProPreview => "Gemini 3 Pro Preview", Self::Custom { name, display_name, .. } => display_name.as_ref().unwrap_or(name), @@ -590,6 +595,7 @@ impl Model { Self::Gemini25FlashLitePreview => 1_000_000, Self::Gemini25Flash => 1_048_576, Self::Gemini25Pro => 1_048_576, + Self::Gemini3ProPreview => 1_048_576, Self::Custom { max_tokens, .. } => *max_tokens, } } @@ -604,6 +610,7 @@ impl Model { Model::Gemini25FlashLitePreview => Some(64_000), Model::Gemini25Flash => Some(65_536), Model::Gemini25Pro => Some(65_536), + Model::Gemini3ProPreview => Some(65_536), Model::Custom { .. } => None, } } @@ -623,7 +630,10 @@ impl Model { | Self::Gemini15Flash | Self::Gemini20FlashLite | Self::Gemini20Flash => GoogleModelMode::Default, - Self::Gemini25FlashLitePreview | Self::Gemini25Flash | Self::Gemini25Pro => { + Self::Gemini25FlashLitePreview + | Self::Gemini25Flash + | Self::Gemini25Pro + | Self::Gemini3ProPreview => { GoogleModelMode::Thinking { // By default these models are set to "auto", so we preserve that behavior // but indicate they are capable of thinking mode From 1e2f15a3d70258ab366e9ac9309605749d5b0a27 Mon Sep 17 00:00:00 2001 From: Julia Ryan Date: Tue, 18 Nov 2025 16:38:19 -0800 Subject: [PATCH 0203/1030] Disable phpactor by default on windows (#43011) We install phpactor by default, but on windows it doesn't work out of the box (see [here](https://github.com/phpactor/phpactor/discussions/2579) for details). For now we'll default to using intelephense, but in the future we'd like to switch back if phpactor lands windows support given that it's open source. Release Notes: - N/A --- assets/settings/default.json | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/assets/settings/default.json b/assets/settings/default.json index 6a04adf88e4593b4e04eda9a0bf64525293b2b0f..36c140dce7f8949ea73c163b9786b63ebeed0869 100644 --- a/assets/settings/default.json +++ b/assets/settings/default.json @@ -2059,6 +2059,18 @@ "dev": { // "theme": "Andromeda" }, + // Settings overrides to use when using linux + "linux": {}, + // Settings overrides to use when using macos + "macos": {}, + // Settings overrides to use when using windows + "windows": { + "languages": { + "PHP": { + "language_servers": ["intelephense", "!phpactor", "..."] + } + } + }, // Whether to show full labels in line indicator or short ones // // Values: From 24c1617e74abcc94ddbd6a667b13a7f87a974bb3 Mon Sep 17 00:00:00 2001 From: Ben Heimberg Date: Wed, 19 Nov 2025 04:57:30 +0200 Subject: [PATCH 0204/1030] git_ui: Dismiss pickers only on active window (#41320) Small QOL improvement for branch picker to only dismiss when focus lost in active window. This can benefit those who need to switch windows mid branch creation to fetch correct jira ticket number or etc. Added `window.is_active_window()` guard in `picker.rs` -> `cancel` event Release Notes: - (Let's Git Together) Fixed a behavior where pickers would automatically close upon the window becoming inactive. --- crates/picker/src/picker.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/picker/src/picker.rs b/crates/picker/src/picker.rs index 1a2c6509f24843210014c8c868f7eec6c7918d91..4e7dba59ad39399b9edab30f553bdc17545540dd 100644 --- a/crates/picker/src/picker.rs +++ b/crates/picker/src/picker.rs @@ -607,7 +607,7 @@ impl Picker { self.update_matches(query, window, cx); } editor::EditorEvent::Blurred => { - if self.is_modal { + if self.is_modal && window.is_window_active() { self.cancel(&menu::Cancel, window, cx); } } @@ -619,7 +619,9 @@ impl Picker { let Head::Empty(_) = &self.head else { panic!("unexpected call"); }; - self.cancel(&menu::Cancel, window, cx); + if window.is_window_active() { + self.cancel(&menu::Cancel, window, cx); + } } pub fn refresh_placeholder(&mut self, window: &mut Window, cx: &mut App) { From 785b81aa3a74e527d2e33037b07233459f7d118b Mon Sep 17 00:00:00 2001 From: Cole Miller Date: Tue, 18 Nov 2025 22:56:45 -0500 Subject: [PATCH 0205/1030] Revert "Fix track file renames in git panel (#42352)" (#43030) This reverts commit b0a7defd0990c315c27f51f82dbf13a736279eba. It looks like this doesn't interact correctly with the project diff or with staging, let's revert and reland with bugs fixed. Release Notes: - N/A --- crates/collab/src/db/queries/projects.rs | 1 - crates/collab/src/db/queries/rooms.rs | 1 - crates/fs/src/fake_git_repo.rs | 1 - crates/git/src/repository.rs | 2 +- crates/git/src/status.rs | 95 +++++------------------- crates/git_ui/src/git_panel.rs | 64 ++++++---------- crates/git_ui/src/git_ui.rs | 5 -- crates/project/src/git_store.rs | 24 ------ crates/proto/proto/git.proto | 1 - 9 files changed, 44 insertions(+), 150 deletions(-) diff --git a/crates/collab/src/db/queries/projects.rs b/crates/collab/src/db/queries/projects.rs index c8651216434d404f7ab4a88fbb5fbb5f7d0aa3ee..51a0ef83323ec70675283d2fdec7ca1ad791b12d 100644 --- a/crates/collab/src/db/queries/projects.rs +++ b/crates/collab/src/db/queries/projects.rs @@ -1005,7 +1005,6 @@ impl Database { is_last_update: true, merge_message: db_repository_entry.merge_message, stash_entries: Vec::new(), - renamed_paths: Default::default(), }); } } diff --git a/crates/collab/src/db/queries/rooms.rs b/crates/collab/src/db/queries/rooms.rs index 151e4c442bd7d0a25053e35b94d9e2ad9817a6a3..f020b99b5f1030cfe9391498512258e6db249bac 100644 --- a/crates/collab/src/db/queries/rooms.rs +++ b/crates/collab/src/db/queries/rooms.rs @@ -796,7 +796,6 @@ impl Database { is_last_update: true, merge_message: db_repository.merge_message, stash_entries: Vec::new(), - renamed_paths: Default::default(), }); } } diff --git a/crates/fs/src/fake_git_repo.rs b/crates/fs/src/fake_git_repo.rs index de7c0561ebc9918a2686402fb9b62608566c7d9c..97cd13d185817453c369356bdc60cbc1517bf1e1 100644 --- a/crates/fs/src/fake_git_repo.rs +++ b/crates/fs/src/fake_git_repo.rs @@ -359,7 +359,6 @@ impl GitRepository for FakeGitRepository { entries.sort_by(|a, b| a.0.cmp(&b.0)); anyhow::Ok(GitStatus { entries: entries.into(), - renamed_paths: HashMap::default(), }) }); Task::ready(match result { diff --git a/crates/git/src/repository.rs b/crates/git/src/repository.rs index 2eb37038cde2f4d0c4dc4903fdc06f86ab543827..2c9189962492daa75dba86e9e2ebd247ad85254e 100644 --- a/crates/git/src/repository.rs +++ b/crates/git/src/repository.rs @@ -2045,7 +2045,7 @@ fn git_status_args(path_prefixes: &[RepoPath]) -> Vec { OsString::from("status"), OsString::from("--porcelain=v1"), OsString::from("--untracked-files=all"), - OsString::from("--find-renames"), + OsString::from("--no-renames"), OsString::from("-z"), ]; args.extend( diff --git a/crates/git/src/status.rs b/crates/git/src/status.rs index 9b76fe75dd284c08c0f2e9b20116bc51dc4bc56c..2cf7cc7c1810620f1cf1aaea831fb337810c83d8 100644 --- a/crates/git/src/status.rs +++ b/crates/git/src/status.rs @@ -203,14 +203,6 @@ impl FileStatus { matches!(self, FileStatus::Untracked) } - pub fn is_renamed(self) -> bool { - let FileStatus::Tracked(tracked) = self else { - return false; - }; - tracked.index_status == StatusCode::Renamed - || tracked.worktree_status == StatusCode::Renamed - } - pub fn summary(self) -> GitSummary { match self { FileStatus::Ignored => GitSummary::UNCHANGED, @@ -438,79 +430,34 @@ impl std::ops::Sub for GitSummary { #[derive(Clone, Debug)] pub struct GitStatus { pub entries: Arc<[(RepoPath, FileStatus)]>, - pub renamed_paths: HashMap, } impl FromStr for GitStatus { type Err = anyhow::Error; fn from_str(s: &str) -> Result { - let mut parts = s.split('\0').peekable(); - let mut entries = Vec::new(); - let mut renamed_paths = HashMap::default(); - - while let Some(entry) = parts.next() { - if entry.is_empty() { - continue; - } - - if !matches!(entry.get(2..3), Some(" ")) { - continue; - } - - let path_or_old_path = &entry[3..]; - - if path_or_old_path.ends_with('/') { - continue; - } - - let status = match entry.as_bytes()[0..2].try_into() { - Ok(bytes) => match FileStatus::from_bytes(bytes).log_err() { - Some(s) => s, - None => continue, - }, - Err(_) => continue, - }; - - let is_rename = matches!( - status, - FileStatus::Tracked(TrackedStatus { - index_status: StatusCode::Renamed | StatusCode::Copied, - .. - }) | FileStatus::Tracked(TrackedStatus { - worktree_status: StatusCode::Renamed | StatusCode::Copied, - .. - }) - ); - - let (old_path_str, new_path_str) = if is_rename { - let new_path = match parts.next() { - Some(new_path) if !new_path.is_empty() => new_path, - _ => continue, + let mut entries = s + .split('\0') + .filter_map(|entry| { + let sep = entry.get(2..3)?; + if sep != " " { + return None; }; - (path_or_old_path, new_path) - } else { - (path_or_old_path, path_or_old_path) - }; - - if new_path_str.ends_with('/') { - continue; - } - - let new_path = match RelPath::unix(new_path_str).log_err() { - Some(p) => RepoPath::from_rel_path(p), - None => continue, - }; - - if is_rename { - if let Some(old_path_rel) = RelPath::unix(old_path_str).log_err() { - let old_path_repo = RepoPath::from_rel_path(old_path_rel); - renamed_paths.insert(new_path.clone(), old_path_repo); + let path = &entry[3..]; + // The git status output includes untracked directories as well as untracked files. + // We do our own processing to compute the "summary" status of each directory, + // so just skip any directories in the output, since they'll otherwise interfere + // with our handling of nested repositories. + if path.ends_with('/') { + return None; } - } - - entries.push((new_path, status)); - } + let status = entry.as_bytes()[0..2].try_into().unwrap(); + let status = FileStatus::from_bytes(status).log_err()?; + // git-status outputs `/`-delimited repo paths, even on Windows. + let path = RepoPath::from_rel_path(RelPath::unix(path).log_err()?); + Some((path, status)) + }) + .collect::>(); entries.sort_unstable_by(|(a, _), (b, _)| a.cmp(b)); // When a file exists in HEAD, is deleted in the index, and exists again in the working copy, // git produces two lines for it, one reading `D ` (deleted in index, unmodified in working copy) @@ -534,7 +481,6 @@ impl FromStr for GitStatus { }); Ok(Self { entries: entries.into(), - renamed_paths, }) } } @@ -543,7 +489,6 @@ impl Default for GitStatus { fn default() -> Self { Self { entries: Arc::new([]), - renamed_paths: HashMap::default(), } } } diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index 0691ba78560e38f5d3a297d033bd41459dff78c4..e2a4a26b320284fed727a7f7e60acf807c39abf0 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -3957,20 +3957,6 @@ impl GitPanel { let path_style = self.project.read(cx).path_style(cx); let display_name = entry.display_name(path_style); - let active_repo = self - .project - .read(cx) - .active_repository(cx) - .expect("active repository must be set"); - let repo = active_repo.read(cx); - let repo_snapshot = repo.snapshot(); - - let old_path = if entry.status.is_renamed() { - repo_snapshot.renamed_paths.get(&entry.repo_path) - } else { - None - }; - let selected = self.selected_entry == Some(ix); let marked = self.marked_entries.contains(&ix); let status_style = GitPanelSettings::get_global(cx).status_style; @@ -3979,16 +3965,15 @@ impl GitPanel { let has_conflict = status.is_conflicted(); let is_modified = status.is_modified(); let is_deleted = status.is_deleted(); - let is_renamed = status.is_renamed(); let label_color = if status_style == StatusStyle::LabelColor { if has_conflict { Color::VersionControlConflict + } else if is_modified { + Color::VersionControlModified } else if is_deleted { // We don't want a bunch of red labels in the list Color::Disabled - } else if is_renamed || is_modified { - Color::VersionControlModified } else { Color::VersionControlAdded } @@ -4008,6 +3993,12 @@ impl GitPanel { let checkbox_id: ElementId = ElementId::Name(format!("entry_{}_{}_checkbox", display_name, ix).into()); + let active_repo = self + .project + .read(cx) + .active_repository(cx) + .expect("active repository must be set"); + let repo = active_repo.read(cx); // Checking for current staged/unstaged file status is a chained operation: // 1. first, we check for any pending operation recorded in repository // 2. if there are no pending ops either running or finished, we then ask the repository @@ -4162,32 +4153,23 @@ impl GitPanel { .items_center() .flex_1() // .overflow_hidden() - .when_some(old_path.as_ref(), |this, old_path| { - let new_display = old_path.display(path_style).to_string(); - let old_display = entry.repo_path.display(path_style).to_string(); - this.child(self.entry_label(old_display, Color::Muted).strikethrough()) - .child(self.entry_label(" → ", Color::Muted)) - .child(self.entry_label(new_display, label_color)) - }) - .when(old_path.is_none(), |this| { - this.when_some(entry.parent_dir(path_style), |this, parent| { - if !parent.is_empty() { - this.child( - self.entry_label( - format!("{parent}{}", path_style.separator()), - path_color, - ) - .when(status.is_deleted(), |this| this.strikethrough()), + .when_some(entry.parent_dir(path_style), |this, parent| { + if !parent.is_empty() { + this.child( + self.entry_label( + format!("{parent}{}", path_style.separator()), + path_color, ) - } else { - this - } - }) - .child( - self.entry_label(display_name, label_color) .when(status.is_deleted(), |this| this.strikethrough()), - ) - }), + ) + } else { + this + } + }) + .child( + self.entry_label(display_name, label_color) + .when(status.is_deleted(), |this| this.strikethrough()), + ), ) .into_any_element() } diff --git a/crates/git_ui/src/git_ui.rs b/crates/git_ui/src/git_ui.rs index 3a664b484a8ec6d31bd243917888d864280b281d..b4e833f7af72cf7843d3797b51ea349b24c7adc5 100644 --- a/crates/git_ui/src/git_ui.rs +++ b/crates/git_ui/src/git_ui.rs @@ -708,11 +708,6 @@ impl RenderOnce for GitStatusIcon { IconName::SquareMinus, cx.theme().colors().version_control_deleted, ) - } else if status.is_renamed() { - ( - IconName::ArrowRight, - cx.theme().colors().version_control_modified, - ) } else if status.is_modified() { ( IconName::SquareDot, diff --git a/crates/project/src/git_store.rs b/crates/project/src/git_store.rs index 94af9859df1156d7a10286a843a31e8351fe050c..4cac71c6ae3e2eb3f3615821443db7c82e01d810 100644 --- a/crates/project/src/git_store.rs +++ b/crates/project/src/git_store.rs @@ -256,7 +256,6 @@ pub struct RepositorySnapshot { pub id: RepositoryId, pub statuses_by_path: SumTree, pub pending_ops_by_path: SumTree, - pub renamed_paths: HashMap, pub work_directory_abs_path: Arc, pub path_style: PathStyle, pub branch: Option, @@ -3064,7 +3063,6 @@ impl RepositorySnapshot { id, statuses_by_path: Default::default(), pending_ops_by_path: Default::default(), - renamed_paths: HashMap::default(), work_directory_abs_path, branch: None, head_commit: None, @@ -3106,11 +3104,6 @@ impl RepositorySnapshot { .iter() .map(stash_to_proto) .collect(), - renamed_paths: self - .renamed_paths - .iter() - .map(|(new_path, old_path)| (new_path.to_proto(), old_path.to_proto())) - .collect(), } } @@ -3180,11 +3173,6 @@ impl RepositorySnapshot { .iter() .map(stash_to_proto) .collect(), - renamed_paths: self - .renamed_paths - .iter() - .map(|(new_path, old_path)| (new_path.to_proto(), old_path.to_proto())) - .collect(), } } @@ -4980,17 +4968,6 @@ impl Repository { } self.snapshot.stash_entries = new_stash_entries; - self.snapshot.renamed_paths = update - .renamed_paths - .into_iter() - .filter_map(|(new_path_str, old_path_str)| { - Some(( - RepoPath::from_proto(&new_path_str).log_err()?, - RepoPath::from_proto(&old_path_str).log_err()?, - )) - }) - .collect(); - let edits = update .removed_statuses .into_iter() @@ -5766,7 +5743,6 @@ async fn compute_snapshot( id, statuses_by_path, pending_ops_by_path, - renamed_paths: statuses.renamed_paths, work_directory_abs_path, path_style: prev_snapshot.path_style, scan_id: prev_snapshot.scan_id + 1, diff --git a/crates/proto/proto/git.proto b/crates/proto/proto/git.proto index 8ed17864ec0c0403a4bb71f918d21b44a9b6cb13..efbd7f616f9e75c4e0409f4dc73c67f9eb1836e0 100644 --- a/crates/proto/proto/git.proto +++ b/crates/proto/proto/git.proto @@ -124,7 +124,6 @@ message UpdateRepository { optional GitCommitDetails head_commit_details = 11; optional string merge_message = 12; repeated StashEntry stash_entries = 13; - map renamed_paths = 14; } message RemoveRepository { From 19d2532cf8d74ebb114c3100817274f39ada0eb6 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 18 Nov 2025 21:41:24 -0800 Subject: [PATCH 0206/1030] Update google_ai.rs (#43034) Release Notes: - N/A --- crates/google_ai/src/google_ai.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/google_ai/src/google_ai.rs b/crates/google_ai/src/google_ai.rs index 941f58fc13372114fee7731b2b9bbf69c8e10619..465dab402598594473e9c1adaf06945dba7247bb 100644 --- a/crates/google_ai/src/google_ai.rs +++ b/crates/google_ai/src/google_ai.rs @@ -578,7 +578,7 @@ impl Model { Self::Gemini25FlashLitePreview => "Gemini 2.5 Flash-Lite Preview", Self::Gemini25Flash => "Gemini 2.5 Flash", Self::Gemini25Pro => "Gemini 2.5 Pro", - Self::Gemini3ProPreview => "Gemini 3 Pro Preview", + Self::Gemini3ProPreview => "Gemini 3 Pro", Self::Custom { name, display_name, .. } => display_name.as_ref().unwrap_or(name), From a910c594d6c5f6a808aa3c45ca4f8e5bba66ae75 Mon Sep 17 00:00:00 2001 From: Bennet Bo Fenner Date: Wed, 19 Nov 2025 09:49:56 +0100 Subject: [PATCH 0207/1030] agent_ui: Add mode_id to telemetry (#43045) Release Notes: - N/A --- crates/agent_ui/src/acp/mode_selector.rs | 4 ++++ crates/agent_ui/src/acp/thread_view.rs | 15 ++++++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/crates/agent_ui/src/acp/mode_selector.rs b/crates/agent_ui/src/acp/mode_selector.rs index aed151de728ce2e802154a73d4add9681a410933..83ab9c299976848b973af28192462fda4eb69409 100644 --- a/crates/agent_ui/src/acp/mode_selector.rs +++ b/crates/agent_ui/src/acp/mode_selector.rs @@ -56,6 +56,10 @@ impl ModeSelector { self.set_mode(all_modes[next_index].id.clone(), cx); } + pub fn mode(&self) -> acp::SessionModeId { + self.connection.current_mode() + } + pub fn set_mode(&mut self, mode: acp::SessionModeId, cx: &mut Context) { let task = self.connection.set_mode(mode, cx); self.setting_mode = true; diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 82237d86ba9f66b5d68321e03092660dea29d65d..312ca136a2bf6f8b134a0dae0ab01bb71497a3b2 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -1135,6 +1135,7 @@ impl AcpThreadView { self.is_loading_contents = true; let model_id = self.current_model_id(cx); + let mode_id = self.current_mode_id(cx); let guard = cx.new(|_| ()); cx.observe_release(&guard, |this, _guard, cx| { this.is_loading_contents = false; @@ -1169,7 +1170,8 @@ impl AcpThreadView { "Agent Message Sent", agent = agent_telemetry_id, session = session_id, - model = model_id + model = model_id, + mode = mode_id ); thread.send(contents, cx) @@ -1182,6 +1184,7 @@ impl AcpThreadView { agent = agent_telemetry_id, session = session_id, model = model_id, + mode = mode_id, status, turn_time_ms, ); @@ -5405,6 +5408,16 @@ impl AcpThreadView { ) } + fn current_mode_id(&self, cx: &App) -> Option> { + if let Some(thread) = self.as_native_thread(cx) { + Some(thread.read(cx).profile().0.clone()) + } else if let Some(mode_selector) = self.mode_selector() { + Some(mode_selector.read(cx).mode().0) + } else { + None + } + } + fn current_model_id(&self, cx: &App) -> Option { self.model_selector .as_ref() From 5ccbe945a606e28579b164203e970f87c47fc523 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 19 Nov 2025 10:26:49 +0100 Subject: [PATCH 0208/1030] util: Check whether discovered powershell is actually executable (#43044) Closes https://github.com/zed-industries/zed/issues/42944 The powershell we discovered might be in a directory with higher permission requirements which will cause us to fail using it. Release Notes: - Fixed powershell discovery disregarding admin requirements --- crates/askpass/src/askpass.rs | 1 + crates/gpui/src/platform/windows/platform.rs | 10 ++- crates/util/src/shell.rs | 80 ++++++++++++-------- 3 files changed, 55 insertions(+), 36 deletions(-) diff --git a/crates/askpass/src/askpass.rs b/crates/askpass/src/askpass.rs index 0974409477d452958df13893e316845a919723c5..f7d81641f47f8be62adb9606ffd3e47e1d89ca73 100644 --- a/crates/askpass/src/askpass.rs +++ b/crates/askpass/src/askpass.rs @@ -250,6 +250,7 @@ impl PasswordProxy { .await .with_context(|| format!("creating askpass script at {askpass_script_path:?}"))?; make_file_executable(&askpass_script_path).await?; + // todo(shell): There might be no powershell on the system #[cfg(target_os = "windows")] let askpass_helper = format!( "powershell.exe -ExecutionPolicy Bypass -File {}", diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index b7f13f1fab495b1040d1be8e7b86376c450b5f7e..110bc02633515b417edc6707347fbae77e2888e4 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -390,10 +390,12 @@ impl Platform for WindowsPlatform { clippy::disallowed_methods, reason = "We are restarting ourselves, using std command thus is fine" )] - let restart_process = util::command::new_std_command("powershell.exe") - .arg("-command") - .arg(script) - .spawn(); + // todo(shell): There might be no powershell on the system + let restart_process = + util::command::new_std_command(util::shell::get_windows_system_shell()) + .arg("-command") + .arg(script) + .spawn(); match restart_process { Ok(_) => self.quit(), diff --git a/crates/util/src/shell.rs b/crates/util/src/shell.rs index ba54f7b7784b45613b28067afe2748339e6b6c64..1eeb483defbe6f21d3018b3ce0cbdc8e4109a367 100644 --- a/crates/util/src/shell.rs +++ b/crates/util/src/shell.rs @@ -2,6 +2,8 @@ use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::{borrow::Cow, fmt, path::Path, sync::LazyLock}; +use crate::command::new_std_command; + /// Shell configuration to open the terminal with. #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq, JsonSchema, Hash)] #[serde(rename_all = "snake_case")] @@ -108,16 +110,12 @@ pub fn get_windows_system_shell() -> String { use std::path::PathBuf; fn find_pwsh_in_programfiles(find_alternate: bool, find_preview: bool) -> Option { - #[cfg(target_pointer_width = "64")] - let env_var = if find_alternate { - "ProgramFiles(x86)" - } else { - "ProgramFiles" - }; - - #[cfg(target_pointer_width = "32")] let env_var = if find_alternate { - "ProgramW6432" + if cfg!(target_pointer_width = "64") { + "ProgramFiles(x86)" + } else { + "ProgramW6432" + } } else { "ProgramFiles" }; @@ -165,23 +163,19 @@ pub fn get_windows_system_shell() -> String { } else { "Microsoft.PowerShell_" }; - msix_app_dir - .read_dir() - .ok()? - .filter_map(|entry| { - let entry = entry.ok()?; - if !matches!(entry.file_type(), Ok(ft) if ft.is_dir()) { - return None; - } + msix_app_dir.read_dir().ok()?.find_map(|entry| { + let entry = entry.ok()?; + if !matches!(entry.file_type(), Ok(ft) if ft.is_dir()) { + return None; + } - if !entry.file_name().to_string_lossy().starts_with(prefix) { - return None; - } + if !entry.file_name().to_string_lossy().starts_with(prefix) { + return None; + } - let exe_path = entry.path().join("pwsh.exe"); - exe_path.exists().then_some(exe_path) - }) - .next() + let exe_path = entry.path().join("pwsh.exe"); + exe_path.exists().then_some(exe_path) + }) } fn find_pwsh_in_scoop() -> Option { @@ -190,15 +184,37 @@ pub fn get_windows_system_shell() -> String { pwsh_exe.exists().then_some(pwsh_exe) } + // check whether the found powershell is executable for us static SYSTEM_SHELL: LazyLock = LazyLock::new(|| { - find_pwsh_in_programfiles(false, false) - .or_else(|| find_pwsh_in_programfiles(true, false)) - .or_else(|| find_pwsh_in_msix(false)) - .or_else(|| find_pwsh_in_programfiles(false, true)) - .or_else(|| find_pwsh_in_msix(true)) - .or_else(|| find_pwsh_in_programfiles(true, true)) - .or_else(find_pwsh_in_scoop) - .map(|p| p.to_string_lossy().into_owned()) + let can_execute_pwsh = |p: &PathBuf| { + #[allow(clippy::disallowed_methods)] + let status = new_std_command(p).arg("-NoProfile").arg("-Help").status(); + let success = status.as_ref().is_ok_and(|status| status.success()); + if !success { + log::warn!( + "Powershell found at `{}` is not executable: {status:?}", + p.display() + ); + } + success + }; + + let locations = [ + || find_pwsh_in_programfiles(false, false), + || find_pwsh_in_programfiles(true, false), + || find_pwsh_in_msix(false), + || find_pwsh_in_programfiles(false, true), + || find_pwsh_in_msix(true), + || find_pwsh_in_programfiles(true, true), + || find_pwsh_in_scoop(), + || which::which_global("pwsh.exe").ok(), + || which::which_global("powershell.exe").ok(), + ]; + locations + .into_iter() + .filter_map(|f| f()) + .find(|p| can_execute_pwsh(&p)) + .map(|p| p.to_string_lossy().trim().to_owned()) .inspect(|shell| log::info!("Found powershell in: {}", shell)) .unwrap_or_else(|| { log::warn!("Powershell not found, falling back to `cmd`"); From 9feb2602163a8dee91954184926f1b1a674c5dc2 Mon Sep 17 00:00:00 2001 From: xdBronch <51252236+xdBronch@users.noreply.github.com> Date: Wed, 19 Nov 2025 06:19:58 -0500 Subject: [PATCH 0209/1030] lsp: Support deprecated completion item tag and advertise capability (#43000) Release Notes: - N/A --- crates/editor/src/code_context_menus.rs | 12 +++++++++++- crates/lsp/src/lsp.rs | 4 ++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/crates/editor/src/code_context_menus.rs b/crates/editor/src/code_context_menus.rs index f220cadee5acca5c7c1d3c91b9350380bc0bf10e..9a2b8c385689f284fc42e49a5c7451b3774fe018 100644 --- a/crates/editor/src/code_context_menus.rs +++ b/crates/editor/src/code_context_menus.rs @@ -8,6 +8,7 @@ use gpui::{ use itertools::Itertools; use language::CodeLabel; use language::{Buffer, LanguageName, LanguageRegistry}; +use lsp::CompletionItemTag; use markdown::{Markdown, MarkdownElement}; use multi_buffer::{Anchor, ExcerptId}; use ordered_float::OrderedFloat; @@ -840,7 +841,16 @@ impl CompletionsMenu { if completion .source .lsp_completion(false) - .and_then(|lsp_completion| lsp_completion.deprecated) + .and_then(|lsp_completion| { + match (lsp_completion.deprecated, &lsp_completion.tags) + { + (Some(true), _) => Some(true), + (_, Some(tags)) => Some( + tags.contains(&CompletionItemTag::DEPRECATED), + ), + _ => None, + } + }) .unwrap_or(false) { highlight.strikethrough = Some(StrikethroughStyle { diff --git a/crates/lsp/src/lsp.rs b/crates/lsp/src/lsp.rs index 84e5a95ed80e75bf7d338b589f5b1c1c6495a616..af6760a36817ed4857ad070768c645832f053ca8 100644 --- a/crates/lsp/src/lsp.rs +++ b/crates/lsp/src/lsp.rs @@ -764,6 +764,10 @@ impl LanguageServer { // "textEdit".to_string(), ], }), + deprecated_support: Some(true), + tag_support: Some(TagSupport { + value_set: vec![CompletionItemTag::DEPRECATED], + }), insert_replace_support: Some(true), label_details_support: Some(true), insert_text_mode_support: Some(InsertTextModeSupport { From 40dd4e2270871941695778fe35e7006a1698b6d7 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 19 Nov 2025 12:25:53 +0100 Subject: [PATCH 0210/1030] zeta: Add stats about context lines from patch that were retrieved during context retrieval (#43053) A.K.A: Eval: Expect lines necessary to uniquely target every change in "Expected Patch" to be included as context Release Notes: - N/A --- crates/zeta_cli/src/evaluate.rs | 54 ++++++++++++++++++++++++++++----- 1 file changed, 47 insertions(+), 7 deletions(-) diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index 9f087188b7f7a615398eaab19ae934cdcd5c64ff..14dc0f6c0c105919b822b9077211a1e1d9686d04 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -1,5 +1,5 @@ use std::{ - collections::HashMap, + collections::{BTreeSet, HashMap}, io::{IsTerminal, Write}, path::PathBuf, sync::Arc, @@ -140,6 +140,16 @@ fn write_aggregated_scores( prompt_len: successful.iter().map(|r| r.prompt_len).sum::() / successful.len(), generated_len: successful.iter().map(|r| r.generated_len).sum::() / successful.len(), + context_lines_found_in_context: successful + .iter() + .map(|r| r.context_lines_found_in_context) + .sum::() + / successful.len(), + context_lines_in_expected_patch: successful + .iter() + .map(|r| r.context_lines_in_expected_patch) + .sum::() + / successful.len(), }; writeln!(w, "\n{}", "-".repeat(80))?; @@ -268,6 +278,8 @@ pub struct EvaluationResult { pub context: Scores, pub prompt_len: usize, pub generated_len: usize, + pub context_lines_in_expected_patch: usize, + pub context_lines_found_in_context: usize, } #[derive(Default, Debug)] @@ -389,15 +401,17 @@ impl EvaluationResult { writeln!(f, "### Scores\n")?; writeln!( f, - " Prompt Generated TP FP FN Precision Recall F1" + " Prompt Generated RetrievedContext PatchContext TP FP FN Precision Recall F1" )?; writeln!( f, - "────────────────────────────────────────────────────────────────────────────────────" + "─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────" )?; writeln!( f, - "Context Retrieval {:<7} {:<10} {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", + "Context Retrieval {:<7} {:<9} {:<16} {:<16} {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", + "", + "", "", "", self.context.true_positives, @@ -410,9 +424,11 @@ impl EvaluationResult { if let Some(edit_prediction) = &self.edit_prediction { writeln!( f, - "Edit Prediction {:<7} {:<10} {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", + "Edit Prediction {:<7} {:<9} {:<16} {:<16} {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", self.prompt_len, self.generated_len, + self.context_lines_found_in_context, + self.context_lines_in_expected_patch, edit_prediction.true_positives, edit_prediction.false_positives, edit_prediction.false_negatives, @@ -425,7 +441,7 @@ impl EvaluationResult { } } -pub fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> EvaluationResult { +fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> EvaluationResult { let mut eval_result = EvaluationResult { prompt_len: preds.prompt_len, generated_len: preds.generated_len, @@ -481,13 +497,35 @@ pub fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> if predict { // todo: alternatives for patches - let expected_patch_lines = example + let expected_patch = example .expected_patch .lines() .map(DiffLine::parse) + .collect::>(); + let expected_patch_lines = expected_patch + .iter() .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) .map(|line| line.to_string()) .collect(); + let expected_context_lines = expected_patch + .iter() + .filter_map(|line| { + if let DiffLine::Context(str) = line { + Some(String::from(*str)) + } else { + None + } + }) + .collect::>(); + let actual_context_lines = preds + .excerpts + .iter() + .flat_map(|excerpt| excerpt.text.lines().map(ToOwned::to_owned)) + .collect::>(); + + let matched = expected_context_lines + .intersection(&actual_context_lines) + .count(); let actual_patch_lines = preds .diff @@ -498,6 +536,8 @@ pub fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> .collect(); eval_result.edit_prediction = Some(Scores::new(&expected_patch_lines, &actual_patch_lines)); + eval_result.context_lines_in_expected_patch = expected_context_lines.len(); + eval_result.context_lines_found_in_context = matched; } eval_result From 74d61aad7fe771adcf1e737f70fccd7ee835e321 Mon Sep 17 00:00:00 2001 From: Vasyl Protsiv Date: Wed, 19 Nov 2025 13:34:41 +0200 Subject: [PATCH 0211/1030] util: Fix zip extraction (#42714) I was trying to use Zed for Rust debugging on windows, but was getting this warning in debugger console: "Could not initialize Python interpreter - some features will be unavailable (e.g. debug visualizers)." As the warning suggests this led to bad debugging experience where the variables were not visualized properly in the "Variables" panel. After some investigation I found that the problem is that Zed silently failed to extract all files from the debug adapter package (https://github.com/vadimcn/codelldb/releases/download/v1.11.8/codelldb-win32-x64.vsix). Particularly `python-lldb` folder was missing, which caused the warning. The error occurred here: https://github.com/zed-industries/zed/blob/cf7c64d77f1806cdd34b3812bbf27681fb3cb905/crates/util/src/archive.rs#L47 And then gets ignored here: https://github.com/zed-industries/zed/blob/cf7c64d77f1806cdd34b3812bbf27681fb3cb905/crates/dap/src/adapters.rs#L323-L326 The simple fix is to update `async_zip` crate to version 0.0.18 where this issue appears to be fixed. I also added logging instead of silently ignoring the error, as I believe that would have helped to catch it earlier. To reproduce the original issue you can try to follow these steps: 0. (Optional) Remove/rename old codelldb adapter at `%localappdata%\Zed\debug_adapters\CodeLLDB`. Restart Zed. 1. Create a simple Rust project. Make sure you use gnu toolchain (target `x86_64-pc-windows-gnu`) ```rust fn world() -> String { "world".into() } fn main() { let w = world(); println!("hello {}", w); } ``` 2. Put a breakpoint on line 7 (`println`) 3. In the command palette choose "debugger: start" and then select "run *crate name*" Screenshot before the fix: image
Console before the fix ``` Checking latest version of CodeLLDB... Downloading from https://github.com/vadimcn/codelldb/releases/download/v1.11.8/codelldb-win32-x64.vsix... Download complete Could not initialize Python interpreter - some features will be unavailable (e.g. debug visualizers). Console is in 'commands' mode, prefix expressions with '?'. warning: (x86_64) D:\repro\target\x86_64-pc-windows-gnu\debug\repro.exe unable to locate separate debug file (dwo, dwp). Debugging will be degraded. Launching: D:\repro\target\x86_64-pc-windows-gnu\debug\repro.exe Launched process 13836 from 'D:\repro\target\x86_64-pc-windows-gnu\debug\repro.exe' error: repro.exe [0x0000000000002074]: DIE has DW_AT_ranges(DW_FORM_sec_offset 0x000000000000001a) attribute, but range extraction failed (invalid range list offset 0x1a), please file a bug and attach the file at the start of this error message error: repro.exe [0x000000000000208c]: DIE has DW_AT_ranges(DW_FORM_sec_offset 0x0000000000000025) attribute, but range extraction failed (invalid range list offset 0x25), please file a bug and attach the file at the start of this error message error: repro.exe [0x00000000000020af]: DIE has DW_AT_ranges(DW_FORM_sec_offset 0x0000000000000030) attribute, but range extraction failed (invalid range list offset 0x30), please file a bug and attach the file at the start of this error message error: repro.exe [0x00000000000020c4]: DIE has DW_AT_ranges(DW_FORM_sec_offset 0x000000000000003b) attribute, but range extraction failed (invalid range list offset 0x3b), please file a bug and attach the file at the start of this error message error: repro.exe [0x00000000000020fc]: DIE has DW_AT_ranges(DW_FORM_sec_offset 0x0000000000000046) attribute, but range extraction failed (invalid range list offset 0x46), please file a bug and attach the file at the start of this error message error: repro.exe [0x0000000000002130]: DIE has DW_AT_ranges(DW_FORM_sec_offset 0x0000000000000046) attribute, but range extraction failed (invalid range list offset 0x46), please file a bug and attach the file at the start of this error message > ? w < {...} ```
Screenshot after the fix: image
Console after the fix ``` Checking latest version of CodeLLDB... Downloading from https://github.com/vadimcn/codelldb/releases/download/v1.11.8/codelldb-win32-x64.vsix... Download complete Console is in 'commands' mode, prefix expressions with '?'. Loading Rust formatters from C:\Users\Vasyl\.rustup\toolchains\1.91.1-x86_64-pc-windows-msvc\lib/rustlib/etc warning: (x86_64) D:\repro\target\x86_64-pc-windows-gnu\debug\repro.exe unable to locate separate debug file (dwo, dwp). Debugging will be degraded. Launching: D:\repro\target\x86_64-pc-windows-gnu\debug\repro.exe Launched process 10364 from 'D:\repro\target\x86_64-pc-windows-gnu\debug\repro.exe' error: repro.exe [0x0000000000002074]: DIE has DW_AT_ranges(DW_FORM_sec_offset 0x000000000000001a) attribute, but range extraction failed (invalid range list offset 0x1a), please file a bug and attach the file at the start of this error message error: repro.exe [0x000000000000208c]: DIE has DW_AT_ranges(DW_FORM_sec_offset 0x0000000000000025) attribute, but range extraction failed (invalid range list offset 0x25), please file a bug and attach the file at the start of this error message error: repro.exe [0x00000000000020af]: DIE has DW_AT_ranges(DW_FORM_sec_offset 0x0000000000000030) attribute, but range extraction failed (invalid range list offset 0x30), please file a bug and attach the file at the start of this error message error: repro.exe [0x00000000000020c4]: DIE has DW_AT_ranges(DW_FORM_sec_offset 0x000000000000003b) attribute, but range extraction failed (invalid range list offset 0x3b), please file a bug and attach the file at the start of this error message error: repro.exe [0x00000000000020fc]: DIE has DW_AT_ranges(DW_FORM_sec_offset 0x0000000000000046) attribute, but range extraction failed (invalid range list offset 0x46), please file a bug and attach the file at the start of this error message error: repro.exe [0x0000000000002130]: DIE has DW_AT_ranges(DW_FORM_sec_offset 0x0000000000000046) attribute, but range extraction failed (invalid range list offset 0x46), please file a bug and attach the file at the start of this error message > ? w < "world" ```
This fixes #33753 Release Notes: - util: Fixed archive::extract_zip failing to extract some archives --- Cargo.lock | 6 +++--- Cargo.toml | 2 +- crates/dap/src/adapters.rs | 1 + 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f076630a2e36c2fcca70db8cbdbf20c606b7e2c1..c2062b5faefef4d5a5ec52c0d397a2b01a525d54 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1240,15 +1240,15 @@ dependencies = [ [[package]] name = "async_zip" -version = "0.0.17" +version = "0.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b9f7252833d5ed4b00aa9604b563529dd5e11de9c23615de2dcdf91eb87b52" +checksum = "0d8c50d65ce1b0e0cb65a785ff615f78860d7754290647d3b983208daa4f85e6" dependencies = [ "async-compression", "crc32fast", "futures-lite 2.6.1", "pin-project", - "thiserror 1.0.69", + "thiserror 2.0.17", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index e74647c6320f149d8eadad08ff3624859fe76624..75ad1e34e07894fd0892ff836da758e68efdc824 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -463,7 +463,7 @@ async-tar = "0.5.1" async-task = "4.7" async-trait = "0.1" async-tungstenite = "0.31.0" -async_zip = { version = "0.0.17", features = ["deflate", "deflate64"] } +async_zip = { version = "0.0.18", features = ["deflate", "deflate64"] } aws-config = { version = "1.6.1", features = ["behavior-version-latest"] } aws-credential-types = { version = "1.2.2", features = [ "hardcoded-credentials", diff --git a/crates/dap/src/adapters.rs b/crates/dap/src/adapters.rs index b303a0c0268c7e7812e49d1ff3fbe827f6eac2aa..96a35bc8ab66c4f3d71e4eca46488af90eb14e7c 100644 --- a/crates/dap/src/adapters.rs +++ b/crates/dap/src/adapters.rs @@ -324,6 +324,7 @@ pub async fn download_adapter_from_github( extract_zip(&version_path, file) .await // we cannot check the status as some adapter include files with names that trigger `Illegal byte sequence` + .inspect_err(|e| log::warn!("ZIP extraction error: {}. Ignoring...", e)) .ok(); util::fs::remove_matching(&adapter_path, |entry| { From 3125e789041b145dd13eb6f662a1bbc9aa8bcf93 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 19 Nov 2025 14:12:57 +0100 Subject: [PATCH 0212/1030] windows: Bundle new conpty.dll/OpenConsole.exe and use it for local builds on x86_64 (#43059) Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/zed/build.rs | 21 +++++++++++++++++- .../resources/windows/bin/x64/OpenConsole.exe | Bin 0 -> 1145344 bytes .../zed/resources/windows/bin/x64/conpty.dll | Bin 0 -> 98304 bytes 3 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 crates/zed/resources/windows/bin/x64/OpenConsole.exe create mode 100644 crates/zed/resources/windows/bin/x64/conpty.dll diff --git a/crates/zed/build.rs b/crates/zed/build.rs index be420defa3aba17a739ffe18b24512078fce2b3a..f37996b644c2966a74998be9e6d40a3fc70557df 100644 --- a/crates/zed/build.rs +++ b/crates/zed/build.rs @@ -37,7 +37,7 @@ fn main() { { // This is currently the best way to make `cargo build ...`'s build script // to print something to stdout without extra verbosity. - println!("cargo:warning=Info: using '{git_sha}' hash for ZED_COMMIT_SHA env var"); + println!("cargo::warning=Info: using '{git_sha}' hash for ZED_COMMIT_SHA env var"); } } @@ -49,6 +49,25 @@ fn main() { println!("cargo:rustc-link-arg=/stack:{}", 8 * 1024 * 1024); } + if cfg!(target_arch = "x86_64") { + println!("cargo::rerun-if-changed=\\..\\..\\..\\conpty.dll"); + println!("cargo::rerun-if-changed=\\..\\..\\..\\OpenConsole.exe"); + let conpty_target = std::env::var("OUT_DIR").unwrap() + "\\..\\..\\..\\conpty.dll"; + match std::fs::copy("resources/windows/bin/x64/conpty.dll", &conpty_target) { + Ok(_) => println!("Copied conpty.dll to {conpty_target}"), + Err(e) => println!("cargo::warning=Failed to copy conpty.dll: {}", e), + } + let open_console_target = + std::env::var("OUT_DIR").unwrap() + "\\..\\..\\..\\OpenConsole.exe"; + match std::fs::copy( + "resources/windows/bin/x64/OpenConsole.exe", + &open_console_target, + ) { + Ok(_) => println!("Copied OpenConsole.exe to {open_console_target}"), + Err(e) => println!("cargo::warning=Failed to copy OpenConsole.exe: {}", e), + } + } + let release_channel = option_env!("RELEASE_CHANNEL").unwrap_or("dev"); let icon = match release_channel { "stable" => "resources/windows/app-icon.ico", diff --git a/crates/zed/resources/windows/bin/x64/OpenConsole.exe b/crates/zed/resources/windows/bin/x64/OpenConsole.exe new file mode 100644 index 0000000000000000000000000000000000000000..8bb6ab2188fd7a56adc941c3f8449265e762cf06 GIT binary patch literal 1145344 zcmdqKd3;k<`ahm7p+y?fDv_cBQKMGt(y=bBh$gg=8%U)pNR`z=5gn8cBv=Kdm{z%7 zBAro3X2cP?>+1D zJm)#jd7kGy=cMe~I$N5}W=qGPVc2X-@#H_R`2Ugrmz`p>_3Yo!)Anljbtf)O39dVF z_@tYrxhtnmxpC@sx40)ZcVXJ88%9|!kVqb^DtlV$2U4Ku8?LRN} zy~!%G+jf$zM{3VAZQo|uY|oNXHrx19@t!SSy&!{pPqo>)iKN8SwlPN#_q{Pk+fr@g zms^$0x@N`1gVklTe6!hRre@o!?06~2v2C1#XLYvi!M`)d{MVRc+uak{5B9WGT4~A8 zzx1@_;XAw1VUvJbnGV|=)LU#gyBzDYLpM}~Q1r~5CZ5DyqHZ-{bAt?LPrd%S&~-Lj z<^Xb$!)Ch}&+kz->7N(ubha#JD=0vQ7Z?)Bv-!WoKd;T!cy^^MC~yR>wuQ*Qq*FPx zcM}Q%0A$qnmf_9*x?LK^L?^Mov_S73DPeDP_(gvCe_-@8C z>7UnT8_9{+hh+LG_Bfe`8~MVgtJP~YCMaP9Ms-Xv`iB91f``B;20AlGkvKxCF)HH{}@g8v)5Rb z5_No>*3-6>UqG|cU%1NodaA#$aryRKd>1*F3`)WCVnrL2rbJfsAT7KBZBQm(6jF8) znn;<~)7Dbz;jFbem$0F7=aN)B#w$_#Yj5xr%QcoUI-I&WZ!!`a8vF8|q6fp-rX0IgDBmwYWgjeE!mWh9jr%t5QcsAo1)K&*I zn~8lHsDHAewN9Mq>|cXSn{A>mGE*Tz0x97iqFH5^N;@y9T`ZuEf*Qo!)Z>_%|QGXlik5g2C z3!Wp<&WC8{bEU<Ti}N4g83KL*`~4VaKgftOoiT-fPv6qlVBHeivKY(ivLCMNkKjNr0f~6$#6}uiT_EHOBSG^7XK>=Chj&zA~>AOqqOgiKBeGHt13@(Muzm-|d67xDv4ehR@qtQaAVhO3W2fU*i0Jk=c- zL{?U{hkotap!RV>q&{|f9E-R}1rC9h5}hkJ@PKp$dR>UgfI4(Lu@0yuO9RoX7K0`? zfhPC_?xt)^0mK3U6y6A|8r7c6A+q-@F%cAk5eSx|GV%%n(UL|qvJh2G={RUF$HkP6 z>oUc_P+&>SpI^sp-Em1W4Jkp5_-{S8tn7qaBG@pj~{Ekn-jvE9!ZUd5!zb@GEL!kaP zbU{ch|GOqTjzEho{?7$F`oXVg8`z6Aff}yi3jRc_%TD81Zq5)$6TuGLXpJWY8Q|P- znu&Y+CBhu1&4LCsLdt^G_7ks}V=P27#@LO<%rRbsMyol-B%9$H`y;VU$mg%R35*XbpLb)tX&nK= zt*2V!jj!5jG}lQ)_YuOe23e?KH2SJ9w%NjajmB6Oy$ zyc<-4sQDSgu2V*svMn3Up_$U6^wjV^wgPcBE78hk)Sd)EwGmZ6Hme+qDp-ac z>94~RO3sLdsQf%|ozF2b8*wPca=?2O5M3@1{R`^XlU%6eCC^rN65vpzqy&c&g#o&f z1B8O*YS|o9hF60V(b-%f!xgYW6}wFE6iSnUu6rf1?AO$q%w#S5$Dmo%Jj}A+1ML~E z3>Zq7L{J;o>JOv~mLk_APa-J~&G|FW(_#Kdl?XCvJ7)_gjmiH&z^|5kp=c?{*bMz9 zlatJ+mTdpkDK+R6Ii(Wlc9=qct%*;eaW8k6LVZ!W>nU{np7vAd$+Rw~ki?f>2n@|* zC&%$!C{k)XyZMxI-I5nNl1zC)y?Z-8f%MYDB3g~yIcWVFP!q57DLXj6q@DR}Y(L&f z1Xu(aGCvY0z}S~M5a0wfY7$_6Tx8Nb!TPsbQeYvzIO30|3JMU{#44CrFR1JS%12JJ z8!(%3(o45?2wfCST*p)#O8KHGyIRyTbpZ zxtctNyk`{6?jzN?u(bHwtseN^X3@p}mRLnLU>1y8ELM>g5N6ihf*SrNf(3!POk zU-KQ$598!mBFF(6*n=+$a=Zm{RL+* z@*Jl@Hm89WosXib9}9M^;2zbF)@m0b5k+>(3A0_6tP>@5F#AsYK}TRK8)aU#$b$}1 zJ4aS+l-Y|#c3BnI`WAmHdq-WY^{l&4W>%{HMM!)ee+y(r9W%rXosYqUR^qS5b3CU- zW!LgsF3HIEDw_Ra(gN@0=yHynlZRWYUwbaGR&T!3WK?Udz7g#leyxU66QXL=H)6ot zoSR?&EC!4t(s97rtzX+tFvqIKvyjw+2DuwgjFdI6)sf*{+WU@WRr|IH_!v|KQVyrC!sNc!CA+k z&34Nc(f22ep6CA=pR^liw4b!&(aI4g?MOW_X+J;M<)oGPC+q*mS@?r!Wffw*s;!n> z0FW{B5;{G;H=6UHQO*PZ;-ncQCQ1jbSWFo2Z=Em`IT+@I*)IqW)4^QeHk(f|cR@s< zx^ueXeLgsy3*N6xvhqcVNjB|{4wEbueII_3NriatSAxJoJ-YjVAaDnTIKf}0J}{$? zMPKsQZ`6;#h|rIKTpExPrm-aRzBmU$h4>#SO<5dfsWf$^Bwg@~;5sTv*WNC<&Qg-T z0Rl%JOCvH~L}@N5IqV|3{n@ z!>ZboVjEyTA}K!jB0-AZ@9RK{Bz<9d!rwBgw?&V0O#V0xz&be(P32DN&-s!A`X}^f z>KCMk#R}h9N+Xm#>dnhcr4dT4|1CwccPHf`YS6Aw7V+E{R}eoG3gXvL5c8iF_`?C? z7kccLnEiGW*>E|g};~| zQcG|WE=huRvQpBZw^)M`21JHA7mnLQi{@k4+{w@oDqA|IhO>??X_{I7VR$}_=U_bV{SC^_vqXfl z8y&t2-g-+aJpLEWIro_PJT-)>b4ZXqu|;tsnr`t&1mD5O$vvRN z4#iPm6~i@phoF20IZyvVP@V+tsQ#>#{6~Aur2p1qEc!#f2>IL_2n(8Go{;h7{5xLK z%qixdkk7qgA?-uiZ9w*GIYX!=d}o%JLbERF$2j=k0!a?Z`dH{KmGVOnm-g<9c#`;! z{RSfYdznN=pDpYYEpNs95|g;kSj5GU(W?)m)_(2df}vs1IqsS!8Co+ZbWb$b;(z2w zgiYHn2rF#9dEa*;Y+^o}>z#JAMbFbf&yM<36gIFhcZEKcpoOJSk$ZoO4`3iiP0Dns zph2lJ$w39{Nb-H?LO~8LbyKEFa#%~90oacy-}io+nBRBr5yVI=bwa+k)30acn6$9; z>;F^#P%)sj{6o~0ES(+CBm0M{{utM*FL}(IYL<`rXMlVdz52i>L_VQc&)zKva5(?a zc7UX4_8Hm9{O~{0e=vIt4D#0;(8T=Om*5&}<+El|i?ssIlPll@<_b6utw5C7GdLcQ zZl*{SiVzy#5T7(Jib(_Ci2anm3ocm#E(ughE_ubAG+TgCi@!-s8f>yfAuX*{ki%fO zHh!GolF{D@B9RzE7Im60qnyd~`F}NE92^C6zL=uox6YU2AC1qKOQ*D-FB<^+5$8+u z)&wQ4{I;w4l90!y{21EPq6g%0+^r#&5w*V{C}{7Q++IEF&$a4DcII)cqOPfc=vF1Ff>I#%)f%*6KrexH!|?!E%wS&8pH z{Qeo-X5x1`zjr3Si~0R=;yb|awTbU*`29xW`zC%rZ+!zleI%B909uI*;?OqF%#`#D zG@<;=r1JISG5ht8kb+7>SE7=N|Gb$P*8f}xva*R_a`{S@f0pIr1urf_!4|s@5B_tG zY@FzvWxqtGmf%4TEed$?N-5N0{{>CkY}JNn@6*y`{xh=8!`cfgJ$ zmH3B@n~>4=hM9={`aF!gogHyqsLw@Hde3&S?Isc}_Mb$DaoE0>sr&FSVfzuU61JB` z(RQ#cLz2F}H(_hBABFDtuf?7je@n$1WiB8bX@P!hGUyV10k7lz9%!xnBg9ul9V$vU z+n>C^LFh@MJ^Np=1ruj`?^U3%TVaIT3bZ{2UObGg61@uC0qaD`bmpY$U#Fr4 zgnLC+x<|!pn&YKi0S~49tcx6ojoYL+{VUoRV7yu;L)F|UWeCsM0Yy#GK2jpfGxeU> z)X7Ofd<6keKY9lil|6;K^igO%h%i8)uYEvkOk>1FWEB+FQcoUhx((O; z?`7F)8|xXyn?DcsGfNjN$FLz$%ukOWL`OdEND;B z1b$a>l!_GXAd#4)cn|dnf}t5+xW;g80J$dm ziqCe|dRR9)u&Nkdv_JFVEZraL zZ>EQ3`dBMH1BHC}DBR>*Y7>0~Jzjmo5U@8I3Y}W2&By|jx0+B6Mk&Y=I_teYZG+)D z3lPSBhf|Gj^YYl;R{cg41zNf6{b*!uOJoIQmSSanoXGl}0BmR;YC?K^z^Kw= z2^!IsaJ1?Ul!={$2us%Qi44})_x+4i29tcsuQq&f*L1GnJsR}Azo1ZB;F-^noxPGw z>Ko@9J8tY4$i$uKaFv>)t=7viLO#`(9?+KiT6|n-gxpOqdt&~FJlRqxY9DC_4A-~N zxfE@8P;1g)OSBrU_eBD@-adf)d9{t<%ZltPu?-U*0x5kdz%*P}tVSiUd><$~enAFQ z3^Gx*+fgf)f$Be_PP1Lb|5I2NO!%&%tv6h6yu%)wOi>#`fLlZeTQk0RvWzcwE7~^< zC$8NTzFg6|lceV@rPSH&T%N+%=iZ9e-*BA+XaRfSCINd6VQ=A@i^pI=hQ%rD2y8kT z+#)&(B80iXaQ)PZno)ncS@lzXYP$)k(!vG+qm#X!)kUxMqOjHLBlxnq3#u0?>Xgjg zKBL>>lpq8j_@RCGarvVI#ws=FYlcHa&rDdo%4^g@im5nT4ASSHZ?CTRrKu?sO2Ry-Mo6N(e`aMhXng{#;5zG$N&N7sR5rE`UKI zt4m2)R=YQp7latc_XqVdAgu)S0UKp0$h%lQyRA_Wm+Kl596qf%e>ti-{knR#QoGez z!!s1VrRk=W!ceVl5e;OV+t^max|sO|rnpCtQuW0twlMrnOiXL#5>lWHXq|pc_3g&! z5TcNvk;|mpb}6++s6?4fHkviv-kfHOo!`JERmuNQ$zOv-;v!P!80YKheGpjRV@{f4 zV48iG68-`;m1v2fIQ>n{IGO>b!W<=pn4{CSV~%2O;#15_bB&Sujq`Q;JZ!JjG=|Ss zqJ0|U=u>h2G1v3N zeBR^3Vik3~cP<6yLBsW2Gbb7-;@W{E^Bed%OF8>6+c6o>S;oxK zPiFNC^_qRa?zL4rYqNl31O6Jd6-|uG=X3VKV^cG#o%8MnR-vls>|c!ivj*z;aNhd~ z5sliUstzZy{q|L3)#50iAYV2r$StnZSZ9XqDpXZrRMCeqrx0B89S?06Q ze4d{o>pvvFXCJ&P9WW0Gd1oKI1aO54W*@waPxtJDgZa#!eQ+wC;nId_sj}t-vxV!- z=P72(*H|rP*-`J7*$0R5IcE03dOpjbwUs&>K09GrhO^=O69A&N0yT~P195Z#D~)r> zCT#_f%V>n=fIa#ZO~~kO3-_(vbl0iKoV^;K#JmvYni?VtB`{x_o#&^EZ)bnoFJ7B% zFaEy8-&goM)`zoQ`0IziQ}Ndge^24hmeM``FE#%5U%sRru6}wv!``iX_l)@0|NECr za|X2Me=p<@6i984;rauNQ^*^tG6f4T)YM}UoJk!PNp4Vzj%{o>0LD&zOPyE-g=3K-{DrIZ>5yFj z51M^qz2u=B_ekGx-w2`0!s2merv`Zz=b+NkOljx)H}JDLb5fkyL$aMYfox~?Ah$Ee zhk`>2@Lhm!FTTC_F2{E{zQ^NxJiaUOUFpoeyIQm`n~(3E+4oU*`retN@~LP;av$NDK+^Tc-hHxRAjKn<|~iHGD-h+r976}FdZ-^7kVg}UmQDJhWcIDYd}Fgghm zc36QZ5Y#@4AtJT8UH*9H7m0jD`>12S60KmqV%G1J9~AlXsx26T+NO?81f%6_qS9<) zb?1DA`BfsnOlyWl3#5~d5CtuiwP6(~6zYJ2BHJI&EJPW1pjR+@o}gDCdgif#G`%dk zWIkXGXgvdIZGp6J0%zQ#N4bw4^j0jYEw*DpF~e1k_m~Aq?1IBXBHAG&QwYh;gyaT5 z@|$2N0t`j%VQ7Wp#e!ii-s3Rbcmx%89-vo;SSd{j#T$}`pHyM_# z=O3OPqMgnob66N)U|CG?a5YE|4(B6wMe@kJ40obMmDlker^*$Fha|bR|K<2pf+m&i zX_A)&$(hNJw48T%nuvD(%kilKELE{#$y8Z@<;|kXLcGVRGT;cXh;{&rvt}m9On$jd zOxQmPh9T*B{;Sh`?RpYb0H})g1jfkZnb&~yUyM>6o_1U!f3on!$r z9-xeG56Vm|#}<96@gAqo@kanfv_nuJkT-=!3EGSxMW*6})^waeBPF8eg}IDEtSAGT zr(Qtw#S$VJNuBl(od&gLinD*f6KaVwCGFEBah4w|+!REsD0E`6)WR!hrjDlyGKqRW zEq&h@-)QMK6Z(!Y-3wuh*v~rMA^fG<7Od0o-O%ZntElx{DDYT_hrVT>uqzaGS))i@ z*2;%a_tAWNcbuh9@OvnW|&uhd?)&ad##9;Ae=O?FMPsfh_}4LP1q!%@6B*+c>_I6*bVP1ye9gg?+gQW zQg5R12xvJ^_p%Hn71^IG?h4O-*ZtdY3CC-#f0lI z7BE~3mL@N-ci~G=9DW`3UY5W^3`2BSuD+DtIIi+83Pp!u6f^WNT;<-q%~-$CE8No? zSp@Fsf$!53l;oLCz$ZcbRlw5P0rusRBLM~-m4LNwiUUT59M|?CoaClPoZP=)y2BHO z*@i2E9uvNr8`||Rx%S?L?AByW@ZE`7(pQlI+qV{)N|JYk{_Np&Wd}W(+(;-88wvJt z*!?hE*Ptvl&DzG>;hoYb+VgM+Le^9@EaYM*=x zowkaa=FA?P>&z+1b!HFCbLI@q!*>zBi}0=BTfuh)zANxO3Ez|OU4`!|XZE}~0MMDE ziAVOmB%(7X!Ur_;d5gp|N8?jbA6P6OOZm|56<^vs@m1Rx%2CvaETA!Ix4{~XvPP?{ zA-=SE;>)Zd3usJY4bC&V6664itqbAgPZ}xH-5%LFzD(OwrtODw9bP_25=h;&iRy8_ zX4n9ly|ud>_+y}12TWH*&6_l{st}E2KVW8`m6Ux0vegW&RWF4YQ3t21<3M&!m+{hJ z3Xe);;H9W&3y+S$29XCdKqs82g&kwVIni=e$xN#}_+Ogf?A;8l^ONxoKcZ;kKFHK!9J$uZ8Nr$Z*{tw1nji*{9O9JqF zA-*9!!Z|+Sjg;KIE@l{Ol?f%8+_%DHQsBwwY(oqhU3Z*b?2z{m=#PuCk)2q-YCL2= zWqAuD~c8_3SpxJ$8o{Y-QRC zAAQMdFnBi2n~COe@|pA7>S&SG5e63N`gJgYt^F_4UskDTuX}d#!k}u=4&W>SociAhLc4#a-#0E(5x*ri z!YYL;#QtV1vF0;{VuAk}%d~f}!3Fn1u(s7%I|q3IbyPM7pdg_BA(ynm%e39g=)n!B zGj#3UGIhfCV18qnI&E{}z5^wC_rYJ_d&L|$%}pX6tWLmhb8xB%;3ApJ5zkm8#ccqwL?D zeLiiIUu*U|2d^kx1^?=+TxGef5I$Q!1M)-LjYa7^MVsrX0_89|xAb+`O10JoP7ZxD zHi@+6wl=&Qt|wn)-46m-{1;-r0DXbFz`Hx~4&<*7@1fL9V0>8;x|>6%W4&63_OYdk z^Sp3==K|ZKCIVR}H&BfY#5IurY4~%jlGZ*oQLWu&VJff`=#@JCKWXm+H{vz&6~YFe zKw~&xzj7C##`P_?pPPi&LnlbQqQuKayqaRLw%7lQdxubUw0HH@5XQI}MtjOrA$aZ$ zkJw=K>G8EQoZuNef+$>Col&w+h!gS=KkMNk1Lk#Ce?SvAC)Rvd8|N zZgMjQkZv(;3w6cEU(T{c_8VcWDcDEEU~E6$VQcs+A-)x>eOoC8l$gw(zeEg~h};5;_?1RLzj27kbt0oe_iDm`G7^l7qA4(`Eu zAqTLMQj*Ei%*Nv4t`kUdDUu z`gYwtC~FTfT!XCc!UMSrhL-D)M}p*91le#Ln&#Bk$Drxh1)xSoLp>XHLX^4HjKap) zi4CtI!K(2nYEU`1RqIEg3N&R%B9J$a`)^1fsJo8FQ?~`Uuc~#8{CUpw##jyRitZ z_=P42B1k5xd8T;5eKo~-N#X_btmWSc@q#V+w?jehqEHGXiLD{@tDR!TaP?mtpJ#c7>rc;dcv3@HXXlDJ0+v0i4@PJmM`+mrOfo$-fcU#f zHw-Br9{qc;Ua1f~*Cti`F;`B|cUgBF>iV>A>Eo-t5Ddu4MnGtvCZ?PEwrM&c2c{D) zjrv0oOef#CabuzSf$$iJ&SaC~NlEoACqXk z{wa7<+YgzA(&r_8Nn3T!{%mQWkj{ZuI z)_{swZcL&XvOs>@67Y-Fdc(TzXtM}yu5F)mO6R1bnI!a;+!0qxw>}nkcPUg)f8iIn zTnZbnst^DFCa7MQZMygo_v@Xg`f?EnJjZt&GBEsiON|S+erb18ja5`1SnN7zW)teH~qSdSevrhQuld|c&4rpb$R2nnHX*B8nE-k*^2OEbsXeB)U zGcntvXJ*5V9Yml;MId^YQdT_C8?r0K=S;&&_OBzxt|wf&jvQWdW}w`LC{nrzogla25QooC1D41iV{% zuMnydm*?c?tI5lXc1^Coa4!Pd@!AH@TD7wLt8^vp2W7ygp<)G$q9~Cjuc8ewTtlB> z>j*y*F&<~}ODznl(-0*!3anMq2ya~YeRd7~dQ7h2I!;uu!ZUvU2c-knoKiLH4yst& z1ZWmm?eqo9e($^Qpwk97TkY1+T}ttc%s=uHj@JU3d1_a*vE}(=xEuTAv=}gs?6|LN_1$t%$1gFJC~mDM{xbbB5GdR z=RUX5hwz1f)+VAgpa#jfHm8_JS7z(2P-YcvmK%fYtT})g5xLYB&WXpUoVDGF7!@@m za#uN$o!J@AoXg5#{S3>7p>sJl3p30c&agX$J7kbMDV|#;LN(GDs*$!?P6h>Qne%=M zrslNK47Jc^WhU%{>X`sbT>teGcnz2%;t#9QPl|I8^y$5!t3lpyPs24Ai=vz_v4Lbx zmonhnso8>k5#z8%_w>aaIz6bpEp#0HwUZ@7H>23JqYYP?sD2WFKwJ~vn@jr{BEUlg zMNFm{8?r)&;=IBa(l;JI2$l{obPeNmpgGm%J$J7k?4$MFwjat0MHSUx!_^921RsDK zj>rmGO>s_`7UzV3CKzgPuy{adkZ;ZfL2Vt}ABqOyyg_$jsX@p}dQiO$li+yXx2I@B z!I1DHf(q!`|9qCsdlalB*lHl$4k*P}3gB=bq_<{oID7A4+)8v*;VQ_u=r!4~2VP=T z!{tD|I>XfykZX>$s4<|`Sz`d3H^DSEZ5?(O9rZ{ymPZbxJENZ>QMEsX1pPUv9nwZ{ z)-`@Zh|Qd+8aEC*pm^!?J+G0xHEqAOU9vH1~4-rH|o~!i{F|Tog9K zcY${=Sd#{N5&k-OFDwU2o}wijC~loMw?cG{w?bIk4#D@5#F24+wBi7w&wIRZ#4l~^ zh2^!U6KtQq9XfF-nRHK3dpZy4g~mkn%|iNz4-u&%;(Avz7$ z$|r?$)+@AuQi!JUr4UlrOPLSpS{_tySfr@o#Y~w5zbWp-%kj@qRR4VE$W_jqATE3q zw^7)SMu+5?EW>ppv;(9rjHN5;oeRO+V}Fu)klHZP4A*5WpxI}c$u5M!B4!eYU>nT> zr^o_Vcc|A>CQmVwkKPL-EENHyven%X?Zq=ac{7ity-ZW5URXYjh`%TGyhQ_F`_X9& zpN6g%;DylkFkJPh=6rpB41oXTF)Ei{=R++JT^XYF5KG@hil;}eoRkL^i#V1C@INQR<3TxHet%HXPgqff}#)T=ea#E6s*kOGmx44CJF<#N$5MIat94|=C zgPyGI(J#g%)LPL=xwcvFZ^D3OqOehWU(7^AKXd?rm30rg4o0EUXiczrplJTV+%lR0 z`{#4tSPP^M@R++t#lpi4Pp=W`i}P`hk_$J$ljs`pcVShywRN!jMKaeKwNS3-i8V*q z&eD1~m>@Rj>>Y6x4CAE9z8~33GhG$+)V-iIOgf8O0!J>er5|Uz0sd{Ifdgp`G!C)I8(krOmQZ4+fit67CeQ&#> zj)CNmVq_^2TZI@YTZq@iAko@AaVc`*(?W`Dq!bym5qY_0o|y0DhRenRn!VUePDqj6 zoDL|k?I}?pyFn>|5dv_j_B5!96M$rxGk zBHcm7uL<`Zc{aY+J$4jA+eu7)%~ys|r;mW^zm8(X;&&d`z|vO0rEMcvL@&&=xV`)P z7PmJFZr?5Rv7q)QS0lmL6nAsRGzPU=FOqy(Cip7Pr+MXCr$P>vYTKo64^9-5Q+>td zndV}Lb+x^SRC`9hg+CbJ(jkTpus6);UBJ%4P0)E_0({rr>{A=I+EYcPExP z@U+FBT`hC@=+9i{s-F-HDwer13&A0`eQRU z1Z^&L{(bi~8=^Win_hUQ7o4^6@x>u9)ARK|PW|Uoi`VK{wzvnLz>h z)2J^Pbgo$p47x}ZOE75Ck~SU1kF=!qTbs0`6$7C7l2*t@#gcZn*%KJ_G&DyB^`wMP z#Hf;e$e{CI77R)@6AU_foW-7Q{md`Op0ATh#j3|~%;o+jw)vzV+kX2dlOJn@x#etK98Ld|7pi8kb1={yHgO)t?2yFfbEXRjozj9YJ_ml$gu zbaptg{th7{-QL;%oP|VZ?T3I%(SC@f7Vf16Q54^glW>-OV>;9>WnbpzMqRpes9le` z`XLZ3DK8}QHZ=;{u1iF6 zXsF>Dj<-a($OZUPw75AgoIUX4U{@cS)o$oihHI-3JE4Nu4vSRu^q>THAEXAg6uWg8 zT7HY6ZcTiPah{*}7K1#e!*^BUTa=%a_!i|WI(#eqHve&|C;1Bsg4~8WP`I~&6J8j? z0kxn^+pS-IMaaL(Y9+2ex{CMpY~wKR9PWaO`bL zbJjYLjYFKA1n$7{wp1FgCjkmE4dBI+&F#0zc{voN^$BQ+P7c_clRfU(tu1LtWwQY- zL&2T^99tSTNYF!or<+Q$`3|kGB$t+1$w~M-7k`8BcP0LQua-=msFqBh2ur@g)>7gT zt>u&;wgoXQIVFf`K`cv7$s`2$D%z)HLx*szmpmY5^Khkh7gNBpzmq9vQmSF^a1cHd z9O1ziXLuE)TV!Q{jPii)KosR@uig&o>%D-U7;`iARi@&f+*%UO$mmVwbcGFAhgfEe z64Y8a`54O%al2R1`mui;^~6Osqu?AFmaP{+)izu&|C#Vuj>!k{MbFBILG=P9(wwOb z_$l1PBVb;-CgDah9ZGO)u5`m{kowHk)MYBUQ80UFotQZGlC|clQCkhtakmn>Mg=2j z0A=lVrr{lKo?6{}M-Q*<=6mhlFh(&-hn=sUXN$o)=Ye$^@-U+M9owKXF}1B0rkbg8 zSb-r5Y--xs_?r)`p*SzX-(pLAhO_s6=o0=(NaW>?71d}N1;?W~ zXHKn7&dI6WFCV<}5?{IUktZKT@Cu2S-=^RADW(p$wgYA^ymu(D-N>>v;5?5_KNSV)l-YL*I)vZ~ zE-Tm9zd)_w&cqyBZ_%Rs?WZ$lV z{0j?QN+3Umeclnsn&&%F@{dRYC3y)ADd{y_`4*5T9TrG$5|HoR-x0{8<3Lu?N6>k= z_xuMydI{u(T>!c5xlTZ?MG}DQM4Q5fa{e~-Bynx9v@IvM<5YvEaCVxjMsI3>L{ofTk zE6bvSFr|rkaNuV^5hN7j00ow$_I&=;vz-us7fA^8$m>w@jm{ug z_`vyTXgzg`CeysxhU*h1Xx-G3UPujc|D3xJ!UewqS=ff|wkRcZD%>dj7Nz5uxaRm_ z2jo2+_1;YA-6#{wEZiDPk4)%w&{XFWtYwD_m{|kOckUS{iEZ7qTA!7{~mkBvM=0vEOWv;m$Cp3PI9Ovyg1Qb zxHX{WYN-&(gEaen_=Th5LFvxg$1w!(--^#nXYHT)=?L+2NS3oUg3mG)QJIjyK0II| z4F^E6X#+>L-1AlVRL!0u8w!us><16B&CKu>n*9gyniakbm1E}&Z2ZHmq3}hTeXdAO z51*siYt6zQ&3>0z7{}hxhl(XBGd^x#L5j!h%Tm2Kt(r^)$-Df&GW+(-YJeF+rS5Id zg4^3OIQSB4x8uugg0kV>2yft_Tf}?x}!r&NO-VwsZ^kiyTS`NNpzXo#V!-?rHw-_SBReG>%yBG)_ENsKeFYlQCU=vVkZV7~X(Q#@ zWlvjjEqs(PBHA8C5r{QPH;A>V`=nTl+RMcoH?kdLK9OXEEHy<_8sLW8->$`rpJ)~r zI|}G>3%`YMyb*7P>m6_|W4sgc>n)_zLH2>}Fi_C6*r&zjJxV`mmSzS-3$CEHkg;UU zY}SUlkGdk9?7%`I245P6wSc0K<7EAl0LW-xnTg|4o0ml~4Y+?wdiRGsgIu#`qXxtK<#OMve!4mT!n3 zXj58NfS=&l*wYLh0xGT!G~)9`F;+~BTs#uLM}wBOeT0@If8hQPF|P19FHrF%ofsXCZ-(tgN&QuzDW5Z%O0-^`&p24vi_arsv~=iwMJk zEw6ZoL}C{0K(>?xiL+G={^lwTL;;=2nm}s^-d;fz&g9mq;yTPqSAN6pODQpA~ao0aH>h z!^qSDZ-#D*e-rDhH~B5)xwyFmevOGWRIU*8BxN-iSlXzk!sVP?-cj~3yvwey@9k8) ziH@j!f>fe{kQWT?z8PZcRT~3 zK@cU~l>r-_wJz+rIEL{@9~56qC9t|1cTUuq4cbr)*0tker?9T)3a}?4 z5R@5;`i93Xo(nvAcoHq50W*BAxPXKOnIP$2@Fxd|IagHGADNM2o7ht0xdPb`_cflY z#S2CObw=a88E$DNv5n#K`+9!Ifp5d59C4U;)=EpoIp+$f=+$p{Xu8{+HFw~_LobGF zYBh;B*Ru#I@I21RNn>?K{x=w~`!k@gG2ZeDtAwP1sIpN z2jfP9aSnSKA;5TXRx%hna5_9r*7wYh?dV!>!H3oo8;;*46*V-+`OBO(^?WD(uc+te z;eq?IFag>wLgR6~>XI9bBsVyn4a#3yBZPIV1-az>n~c0&h;O+IJq^3iIJAgyxCUvq z(B)(@!7>)>EgLVcJCuy5MBebsLBR~gIoM1=d4a>p2>6B7V^#YRyK%8UqHnSYmtYz{ zKH;DYMoq{}el}b!LQdU2tvxmQdrH!%Z|MVcU{F`N%?2SN%o#&?MgL8uWGtv02kS|? z_wav^ZWkVg>)v}H2e(!sKAjA9`fTVE*ql)DTnmIY0I2l`o6E>YfT{oClc2SuUItrzw!LvVNX+Wnl1)# zlrD{YMiCXGAH-Hpon-qLW3;Ss`bWP3JzHHiyoW2j^+If2LBdOY1S>v+mh`bK79Hx= zM~L!ku#UjG=u z8NcL)D1P@lPU9^+yQqmM#8Ch0+cRynIV?*MBlA-&N}0XU(5T0dC05!Hc#JPVONA z{G?{o{&_dd>HJTj6_*9n^nRw+hdVf=hIa)tys~KeKEri0*xqOkM4B_9R7dU4q27R> zrXp~ts#lq|u2g$FpuLCFLa<*rPxiyHehPvHE&B=)tk^e14#H+LmB_a#;axGiFb4wK zTK&`cJs?;%=pW!IRBEc(lNr$~e^%KY-=Mb!up|Azf9}FvKAI|qQBy+rwr59Goa6}Ef1t^5Dq!RrQe>QC`h>$-~rGT zNEM63@gSj#WNSrAsd_Rfmo6w5BXwwZBuE5bwo-hqv$PGTJTe0V-k-Xwc9*lpfl}Hh zOF3Z5zR$%}in)nSvH*r(K~P z!im+Y-zl1FWpj!Mc^(IuC?+daB{5mK`>K5!(0-}&BPM!Iiy&k;XhDyH{j>Dt zMpMi#zLZD{X!4(_D!Q#2&fk464``{_>CGe0dBx^cnT8*oTVA*+psfb=%MI6ciM|MP zPt!ve$kj?8wc*5w(ZpdX1E+t+ZqN=kwoVLaKg)PwK*XVi10_ z-w%@V08@VzZb!t{)u`D<|LF9#Pt0EGjN(BjtE&-SqO>4@?QPs zc$?$RHY@OiI<+{cU608n1#g1C3ZS1u%fcO@I29QklS!xkfX&Xx42=K8I&HPFB2Zl5 zEPW>sY0L}`cpLnLJ6p#hQ(J8^%MO#Dw!^_gewvB?gJe_tb3gyrxx zHQQI?|By8HN7i5lNA2lf16S4lG*ZQqj|KTryz`$W%0(}xz7e$-vVvIulUyeM{xiov z*<}(hpVpx~r<{I&r}C6HIR7E<+co_kFUDoec(EOhu;lZea@Q&OZ(HkUDSjcrxujQ0Y5s~Z#%p;ht|pD~;BbFAc-yAFiPO6E zIVp6@&BB6Fx0LU(n;<97qnx0#!yT08NY3Lp-+{!sSiqTW&-)VgB@{~B`QSKHykn+m z_TS+ZvI##HV?P706Y)M;#NISObF_&KUc+^Q=t`=<6gsdLc0@T?3#1AfNw{W`xJj;0 zN#o%~f2&@Ij|V329$dl7W8OFlSV!u@E#HrW2=e*Bxk4ukAw~dd187!cxIUX9DN3d5 z>f88!Im!tS$R&tBsp4`Wge_JyxENDJ#ExtD7dR12hy9bN&fOP-d#2E04o$~*Q`@`f zJCXPm5;?sJunT(}YJ%htPl|?X6egfP&CVxKVXxnYJp-PEO$N}H1Yj5qB>)U#w+4-3 z|HXn^O^Nj}Tn~%S%>rI*Mbr5Bnd}t@@?UHQQWW8us2>-g9||5!PI((C6uP9J+7J3D z$`^64&YLR-tD1O5R(dTznpoeH^i#}#tjo`fOAB~>hid^tNcQO8|Euc@3qE=V5)9l| zgqA^ial`L=;q~iuX#yLQ9x?qqSn6aH^j9Qeu|R3>=P*}pK7nf;eP7o>G~zyV`+=LC z2^n$(eFAjZgL~@y+Lv+V;dO8={YhFk%vrDxbcZKJlJ3*~BPJAu`zO;*i2FOi5-q`E zx3lyB4uL>eQoiGq(g(l|hZ3Z|u%rDte?Vg=t@=avQ_Szj^bQ~6dqZxQwdygE_=IP(NZY{U}t!9eOfC-y-!V_=QA3qqbSf(KK0`SlGY6oI5?Fj1P+Q;7_P6t z-KE-gQ@H2&h}4y6B5NL`yN_#x6!4$Ma5lVi^=D*tYT&~GmcRCks>yNMfGzJuK7|!l zJ(^gMS!F>?ARvUhtd|3_4O731T~r3BQ40^;?knl4;QX9y8*2}|?n1_)4R3Bxon$UM z+opaU?+bd}@$3u2Qjx+^$kYs^aiQdL`%h4u=XE0k;Qg704_OdBL(+bV2}7sLLn0lH ze+B8{0_aKcJ|HTVh>!f zB+Pl7Ll7?;<>bs(pa8hWv0aclFyK??-Pdvjw-_uJ@k7Y0^YNNNMBGyS!F&qgYEVmS zk6*PV8z&mXQKq99->nTwhuAt(zwaq=l!^UV zXAthN&LAX?F};WOx;n-r&LAYhyZ>{m72b`r5sa!BZysYx#CKcAn0_U`dnh`j(1GQ- zFa%PX$CyNX_ru&FLY{TtXch}-_N8WW=lJfivcS9!^#;r2MP_nuZ;cV|5ajJa5e9kB z?56L)DWVoWopBn@h)zcUK&w6$P~i#g4fg^N!tWzE&~TLy1n3f{DUt2l6;%OR z{2>+muPqO?blw7y3{TXqZpGNQ_&^6D2z@YWzXX>(DD;$exV;4Mqdd3Bkckz#?a>7t zP%!N$7+Ui8Z%`J_=8nIA^9aC?+BfqQBB+^cfOV+fx6As^$ojbgkoNUKKSi?dNtD@9 zSC286<$|A=KMGiR|Gzj-m#4f5OO*d+LGKi2ocF-IcHX;)WQZKfxa~AWy(U{p!*yx+ z85W-yfBglxxcECQ>4l(Zo^R~fF^t3pd0R7}yG;pHZ%u=H`SKp75=x89Z+dqjVM zQ%=u7mN}-9e%hAWRaKdSg%Be)`w+I3pi{|^D!SMKv;Adm=0NI6wbP`a7_(-7R;w)B5;KDP5=ck*Js zJS>94ktO!UlVAmZpF$*dXcb2Kt&fAV{ z8R(feVMfRgHh^O9F(o>$0NHvqMhEZsNoJewK}ZJPH}gGLzAxo_mVAf58~0Pm_bR^s zco5|yZ)M{>mGOo1GMRdCHYegdof|ZpWQjVw%Mx>#x>=T(#P`K00W2QN#0QS!vpKxk zLT4xgL|W)XMY{@G?xql!`*{4=1v2&6DY);zEE)a)ow2t5^s{0EdERzz|5t&A$QX}z zGE7mzY2G#vT_%_fZ=ik!%C&jSjJ5%&oKzx-A0zct9pQ#NAOb2mul4Q{ojH8paeyS5 zRgg-{d{~~otD3~%C0>l?Ah)*t8%2^D$7_;&Qs<);G0ui-%mkq|)G#au8(Q#wAv{E% zhf%{$bKJ9#)=U_A9EWZDdzJuw)| zkAPQ2!!70X z<}OinDA&aVh&PEuSZYYag`}%MWeN-ISPf&25FGRem@y$oFD5Nb&k657%>jy19tin? znH6=M)qw5nNp~WMgVJ%c{^smd94!jLkFiF72k(6TQ>2A=>IK*;LS_#sb6v%lQIu;- zq+A1GPlKXFpUaEr$s9&&{Or?OaTf}1=!8|rOU6*~8l-SVy`er23Hrgi$PSJ}6C5_R zqR&LNqVJrF{tChYRTz6Q@cVWD;HLUK@}52iJsH6nfGt@H`n|-5 zE(-nIOhI`k8|#2ithpmT^+-t?pKI?FV;%4*L|XgtX?wdJKKIQO_^`cS!Dlfms`h`1`W^8p z1cYGbxw7$&_`HB*-~;&~@v(QpXW4j(504h=mqY1AW4Ey}6CbYCsBWJ&A8BSJXW?FN z(fQ}T>a;m)8P0=T2Wwz8_Hw`~<%Ig@41wUY=s>N+gsSZ8ol_}iR3&CqWi~!Br7A&e z$2vG2fMPQrkSy(|RJok!{}h>xCW?E)km7yANW*<-oLj@8Ou+$&lN>&*C=%UcMa{E@ zp%gq3kn?UH%Q~6Ty4-MOjv?vR@rc2QQBrd38qFl+(#BU1MAi#co9%vVBc{~uy&Vn1 z--%gTJC0Oa-$2bn&xKq94ceLjJo@koAxNia|Jy(T*bcl`t||68%DgSgKt?j#7SB$s zIZ|GMafTAbG_Lp zaF$h|?GWTN&p!auUMu|d_C;L?Th26E+ACV(vHmdqSonR8o~^MJ^~?S-j5co-z|g}SYcQZ zyTTB|(Awa9XayjFUuQGM5I>10HXfA7GS2l(s+Jn#4fTJq)u=`RSbe+O7P9NrxN=#E zF5}95xF&0pn~Z9WD5?iUHEki^`P-;ybPnG|s7-X}B%NP43k2DbqX{2xd{dQBYZ&oQish%%9;?U*#1%<=Ae{`~yw+;vI#w{q9y zvF7}d)6^lfPHK^#kF-RAfJ=;LVt?s~_T|Riv@fM^i%t}Xa%pDLr9v~4=A>z1@}O!S z%}tO0vCa`LobL;=XJBwI|JCw8l&DcfM&oV*hR?7A*5X z6TYV7i+sQ}8#J25&m?^W<7N57oYT*w_%T)-aj;<6wd#L@yDEOv9u6`19dX5-SkOk{ znuS&IlMMnYMm8?N>X|@Yymj)2p>rA#_#MSnA&%!US9dj@rv2^ZIRf5MQivZIL~y(p z6CvuDbS-}fumKgRZ4A)18LojNc&F^ha6eHjQxx-wVh_k-3V!ggHNV^b+&u|Ziy2bq5qLXsi&qmGmDhPXbqD%JqhjrA0s z985xpc35GEb%AcuQ{l;Q+?j+^-(q9PTCgx)cR}-D-y--*hNgMtPj{I7oWNh4n=UBa znvDN70)L<^@&5t%5AOnhQS1R(4EXOR{@C17k%Fr@cpn2rhcL~HnBhU zfU41p)=vw9Y48uoe^5a(VCBQ)0M^u};7bA42wy;eE3kIr*FWG!1Yo@W5y34M)>0v> z1&5%=E)X~QrW5{MjX$8k_ygs{_+Qlp{-W3evRLx?BjpI=Uu=$lW%BsH4_adUlep`6 zLEZD&`fug~Snu2*%uY<)F!jzZ8R2mB=z z9DyJD$m^w9+3k&jei&TP67-Vf1nu0ci>Zxvv`@5chgIKE}&o$_5svAi;i$JUE2@f)s1YH$mKAk39+;KB@6kLk!{^j5vr=hj-*vk_D?2 zb%BUcfgC$Z$i=pQAlvjj-8%$OMD5vEI(W2Td^vIm{z5*n^)McJPvISTAvgk&SIYa| zt{IA2T3I%JiX18!N>_#3Ta(J9NMfSoJDreJ0MZ4w390&%Vghrt@rxpf!}GKM`v zp0~SS7e9u~=;!Y-1d)R&;htp>;=C#Z@qa*Eyef=b@CLhF~Hgez;XYCVAGQKvWDI!&F5UJ7~J{|XOXVQcDSFA-iQWN)pwUyJhZf!V(bUlb8 zT|upip#e#!V7n&;m&C#k->T=!v>g84I)aNBhXs8~3>DqNa09^T=k+Jhr?5l|deMLB zg*89!UXyj9yE$L?P_IJ$vf-}hdI`^Uh|nYS_mnb#dZX@;lzin1G{biJ@qpPBTzKGC7atT=x!CO)l5i5|% zABo>*23Ks+yL}IJpe2nh!KqmRr;=0}p16sv>?e=9;s^RcME>-cpr3ge0Vc?(VH7mb z6&&@COH0qRhcglXf>jmgUF`Q-Wytl;T44~;!5qI8=I>yY22L^k`oAJTcBEx>y5sRy z()@J3G%PFBBc2~_iw?q%e#zBQbDU~|1mn}((OHWOaqEnfmf`uz8%?RTouwo*KO!WhI7pD(ZlE*P-73Lx`@D5Iwk15-DL0UxW;Z4~$tdfvr>A zq4xI`WQA~Ffdukb?<>Ih`j0?V9+JO7v%f1|GsD;7-ekVweEqNv=j#_^L5*EH@SRDY ze83a-Z8(d7AD_=|@Y{FfHxV<;ki>x4wdrZNDk$#FLqa%opMu}Bqlv(&c8l4x7zHUIOvNo( zf0O&AQM++@m+kKl9JYPOueC4f)A4+`uwdS~fs?TVnYyq?lkB)v`p-`%_Ckq57v+Od z`(3XgoFV5S*r>>_fFvAocU%%GIOgp|$M&2@$+C_7xgsoV(saceLc?ZZIo2ri;pRh# zFy+sW*8$kb$_gx9I9t&0A~sHa;taOfUrx{z{pDfDkwG~S1OVlcAb+Wafx`{B?hxlI z0hOGn5rmevq^2TKc#CA5IqNgvFkXLDqWv@sL?b7& zkq;hB00ABa?$Nu~-G})8QZ+5IzvpdNLu!8Jg$$jq&Aq$oE$~v>YGcR5mb5g$3QntR zLXu!_s5Ymgam0fU0+Z0vpk;-UnG!3sj*lMr9Vz5lUNOYkory+o!JpTB@_mB&$M>%O zto#JE#rU_UcmB{gy@NzG_CNwv-s=(I`kNr{&qG)|B{jXep^U}fU~zr|Jru|P-~;Fl zWyb7O&=&&Do>m>_s60ARIWgf0a6_q*cd^ynhxTqzwDeOd@X{bcXSGZ%{itI5!~b&H z?kWc9?#GM%5av*1;N5sN!iY!5u4dHL7g%7OxePRgKLJ^Ancu|1KZ{9YAHyV2`gGq? ziV(58AarP)t13e!G9TG?;5fmyhz_E9*%(hhC7zy4OuP&Hyuk0|S#kWZ`7H6HtHWI? z6oDgPSC9C1f!$)f=t}@_WZ+YHO<;E^h>rV({Q|rDkOk~C7Vampo5duFoe*T)JE@1F zg-Tygrvr9EDHu0aVCPN3PNWZxrzh~a>aI9GP?)wL8c1_EpJUgh5+_{R6!e@&=E$H^ zEswv}!o?NxUy``NA}ISljz8EJ=T5DmB-&?+ z5q8#S=tppBoNwVH5iI)!NF@e!D_-=^CJ?a=cqL*{*R2W{j-gPEo-ZM(>~E5?QOCbn zIDo=H>{%uOu`_(~xR@AJ>`VbMClITPkE%Ml6J}ym{}@kC;@Nu2PfXEXSOb|XFGF&({SEv-MfCU_-beG1zJfK9_Q)Pw*hB36C+hPfqW+hKT8NL8Sk&^E4N zS~%a1_!#j!%O?9@f33wN?dj9r9`K^qYww5?3T{fDw)_9%>`efps;)o&$uba@@r7lS zDwI)!MuR0aE?|t#zzodjMB;*C-KZ$VYJZKA5iE;|lPJ^2SZcLuYn5N?PieJowJbp; z0fYc5n+r;c zlEN=Xcze{YDtJ_lL%;iRGkO4rNNyJYO;On#ykg7>H_Pyw<{ozzeT2WdpFC;bWIg0t zX97vP5pt=j(MG%$#O(}i_|#VJL=}_ct*|+;k*cocS_fV(stDczZXDd z>Z&A}i(2zqqGR$C<>j!9_h-I%=!`!8+TAiW^?p!}-UCIugzBUIv*u+nPE1Wqy1Vj5`IsN*`z} z+t26PP5~n>V&MBHr!$;w9erN_raW>^M`x*mIX@?v$xF<}-{4);18t=>qWuQ1To8Bx zvqs9DKXZQ1qCbr>C_kZq-&0P&VHn1XCyXoNapno*`geTC_Wtvi7K?qYyI*ZnerJYG z{u#2M=Iz4^-1S~7(b+6UtLCcJ#wUvNfbAxeJMCI#L@47(_B%)WJj;+liW>i3J@lKW zH+Aa-Ft`KgW7mQeJ-0kRpe58ND2!y?)6$KtUwUb|DQ^FHI&WNBRjUWLEl<~hVv#UuI=vGP)_I$| zM0&)vkjq(5T%=-0IQ2UnDsXT7olpg-+r!cgU5CzlCSxwC4(+}DIq|CeVbJJ>(GX@7 zl38@BfEr*6HWO1{u&QZQqJP7_UU_--R@1elq`|dFZR+xXX&G=wUDsd;Vj>tuZ*k=P z70FH1c>=dm2xc#m+txWHQ{;w;u)F~Sk0TcU)?#tJE|qpZF5Zv>Jji z%B6wUSQINF^X?{!ueO8)o@G;rH;TvZVH9;j$0E(1PR1# zZ;VHEL5u4y#wmEBAELyHt=tO)a$U#@cLJO88RfZ?D_T-bn;8rZY@s{YukvteRXX1m zGb+4EL$&Q*5lvT7yKSr^9PEyOy8pvAlb>zoOey29vY@%t9U+E zgXov^J(^EY&=Bq;hStkwcU6z8l{}tKYd+n?Erg&<>v?4SQ@YbPn0S+(BGpietmV8v zmt@w}G(*Mld6D~hO2+@)uFvB%7)s*g?=ZJLo*a6PF3^W;bicP%W_mOb2{|$te8Sqi z81YE+gQRm_;^2qu^IaOP7(e5NFm$UlUN)W~BGj509%|S{hL$;J*k(ZSFY58kEMo3% zUn7uQ+N9?3CV>jz%mv`T(-7795MBN5LzYelJKqir<21H_@T2_d)9WOX=a5+;dth|S zN4l?gQ)n63@>ZNdGKglf$~IZU>&f6bCtYZ4pUq|LeRK&gHzmt~?%PQ7*6QTDucu^R3*6UoUR&D>lkbx8&Y-?UIw?X{3U=h* zgw7-sv$t8p!veJpQC;Xzv4Qk>HA-vJq7|*tidEKrt=BfP4(19c?*>4`T45b zEpnmxy1@N96o|8%7P>D1^U~qgT}`dBxfcm;C!qYNKB$wOWKodsze+x*=H z=^X-TuItP?6U-Q=q+(J6uyznxF*!j^Ya8O8!INCTUv4Fb*(rxuj8QX@iM1}|D+!Ij~Gh< zCmgtBDd6oM{bR|F?i$BN-7$}!qq-^I*tW96Df|A+TKvn_;(g56lHKDDuC}K4aENBM zGHqGrk{h#Qt$2}x#HNQCCjhY|(EcJJXZ#7mL7nzr6;EwYduB&p-khdO4&G37*I%_`Z2t#2bECT0*Tmi^KicD@VHJZ^)<7o7$cf0ZmwFTCD{>^rrL zGX8f*X`T~4+xz70-IN6PnSp~vd|nq7@%cbW5THh3z{8`dU2$@Z zXKkp^Ge-)2XV9K*hEVh+BAf(GapsRc$aqxrr09E*_T{=F1i2|!o?d6v6_fv8Ym7vb{+ZX2KA+V_f8au2=XM-E}&`w!Mc7p<&7 zj^8#Qhkqij^N_GNG{m_J!1*Reo#ltS^eeKS-=;tx8U>4t|F)4(XWLo1a#6FG+0wz1 z1P!{LPz+9~ak(QZ+g@k%6P!ypN?P#%+ho%YG(a{=17u^vdNe@lq2y|uq93_eQ-)Cz z;Kmg!(_AA;;F*il4^`mJ6e!imf0ogf)y9a@Wf!k6&9RJE!89_hF8vQfW8@#sFsM&y zu8+>mxcXb1wHbK}7ZLb@_@1aAZ-H2zN>1@bgGzlNC}oHud-hQH(vBZM&z=7Zh(!2E z9@rAaD_7N>G7ZwDsQwmi+r{im?ow7I{)zh;&JcY1J~)QqhgN}o%TW8$z}nD|ybeed z2MCl=M}V5KDcgs`N?6vMgbNLiB9!m~_#4CjBl9cm#b8bpBhJLBaQUjswR9?9)wH@7 z6&OPfmwQ=58dp?!1_`=B#LV|W{K6Cl34URb(7x?G@!BjaV|EyL0uS*qR3=*;UG+c6lLKo0+{Cbe)8tymbZpWCt2DIzRp9UcRfar@#u8cJ9spH_P zrLLEGzg!!}nvdO?fMIaQes3$*w;B2QWWrSPl^y6f1MkJOBW!|^8wwvMALI~uT>-|b`F){C!Udrv6&VSo4JiL8UHrUp&; z68~kC+_*I{cTa0x-7Hl%!&vts6S76G?Pb37fgmNa@MzsI=R3+jVBV+C!Q zccH+Y_dNxk_EXg4_2c=xE<&+15{g*n?=G>ae_p}$QX=xac^LrN{3$vOs00s?s z>*owd^6Q@^#;W!z)4S7|d#gz_p}Ltki3XlPw}Gm8_k#;umQOo@z#XER!jpHrq8d?MYVCPNMdB2a=??uJ4`+ zMTJ)5%S1W%Bl2kffm^E8D0JnPY&AVWSfxM0lhT*b12xNPl%F6|`mtnNwt}~5mD}rv zQP1^tM=X0j``(N9pCoQrj&L{A;We}aa$v$(h*6{MC?@NMGHB ztbqQ6#?{66J*jS7ebvAe(!si6>2aH`(uhNETt&U|a+3b9D0>i(9uZxQk71ojoy+%*)qCuZ6Io`+3F10Pt zgEPm;*M@17`8GKZ?#*XP&!$y@x+6_T?;@RnzHNnW#s(q)uKtOWgY_Q{3fAv0wwgrH z8|Z&0+1cB=qls5U6QSW+0VrrJc#n4}XykGvrsdX(cjjfJJR+C!@mxxv<2UoOKsyGY z{VU_IoeB)!uwBKEY1qsoG0Zgb6H}o(bk`TT?R=BCz1|xsq}+pb(TOBXj8Kn%zRUEO zb1UDIz^==fPs#XCWY=r|gG#c!ujnwE>GCC)3BP-+U+75vVXjdL_(Q$cBoa~YR~ z8Gm13g`_tKam_;NQO^cb3rD%oY)B$RObJUj6D(yujW=ybMW*cyrj4xrj!_MBO2YY! ztU=KHE`M_czS`(L`H8nSGP^X@0=I2I+y$8GO#p}kobz4nYNDysX=;-b3m zH>}Oz-6Ofb*UTZyV|38^!)G%N3$3IEuU zd^d|+h@0#p&Wg+)@9&y~j+*YLYcN848}|1FuoAti?Jr}k|17Rp?_B$jeD-M-%I) zYZN>uno-F3M+I0i|3dP`J;%cd?jmU9{ko|%QZQ|n>0HDfKPOy3pTcwkSCQk(t)&aK zh@lN2Bikq=0*HANL%*&bKbbst&bhu6gvlb|8)oYDj3tEvI-O?v) z?;>Yy$6=Amcjxqpq<@%ktA{l0#fQ3%e$dZ|6x!4bcgU=!Ra!55UM#X2we({60CXW@ zS!(|L`Fq>L=~D)TTKD$~*=xBNC0wwZw1ssEww)PwKz6WkKxD0*R?sqyZ_`4pALfUw zm9Mkwn~kJk^K$J=m0RW&+FNoP(Pk^Y*;=yd%xYW}hvu7frkodq8zS~Axm`$ZW6yya zKW7*td$hYw?!1L*Nc-;41NVOTKPc5y80f1pELCI4!xUrTdH&h#b4~UsMpp43@Z=hZ zMLaIzkICIV%S>)Oy}-mG>_C)yA2nwDe##Hq4nv47)`AjAUR()hVJY|1n*^2}9t^|kbo`)`l`mO`IAo1QAmo8o9uT&cBbR{ke>THrjk|9%gk4W z&JWpdH@r{XnUy9(sQaCj?CHRDmpf--TG7va=s47%fc`+20(ccM&1XS$Ilnp#QsW&^ z$BFL>b!@h`B{$>&?Ri#sTNv&jxxSFM;;X$!5$+$t9% z)8HaDhAZFG^QCE8zELnyZM5~HfVFaiy~^A%G%aFxR-K&D#f+;C#GS40l!w0q3=-5 z(%{eP)+d@JBBj=8#^*B>C8toPH)d9}ec$y*jj8raWFO=D>ob$)cJ>iEw02x?4xl6| znBFx~hvgcSHfxx{m0fwgsB^BhVd?&xZTpY>p%-;35e@r$n->0#>2^XKGKgNVed`*u zdIz`%)?>*n8w*!eQf@np8d;Z883~O(+3%Spqfd4}0cT;Q^vTi$U5&z#ft*I+500X| zly|l!HSN1WiT9o%n#?<0QNUW`CYl3TkNpn>UwbQW4f{PbH1T6mFIMA|#8}1S2Ad(P z#`gxBnzS}nv4-ZH*rmkLnhxcKD|gNvqJ_Duw$#t=?MW2E z`(MRSX9qV15Ohq;Zg`De2%LufhfzB$;5yu_pRP6}Mgid8$-?wze>XSrz%cMgKOxc? zUS}mQNnZfsSgO+C7SS~#LT|31!wqXh&2m#s#A+{kwG2!Hp1Ob-?Q&o1Km`y+pa zj>q$CHr$`^o7wPS!r$fZtR8>gBP_ZSdD>)jVT$<+>Ide10H=sLh2^dm)B%ung}GR1 zl{<>oyw-FB_!4~IXXeOsgQPxxG~%A&&7DMR)VE~(j~vdNiY*dI2b^_BWmVWwukRtB z^S%RtXWY2@-&Uw{mDTuLjfg#@el{{UJX9H(o7LDvY=?uu&b=rJDcu@nNc&#Y;kVG2 zv+WF4B}b&UobHl2_?CJ&22Yjd-qc8KoVDyA~Cw)iJ{ybf3O5AAR1#thY)^zD3Jq&TO9hxvY#`_s8QzC|c z7Ze@AwS8}@qA{naF~HnttJ&$B^1du3NYk+CoVSyY!9Uy#1U)a+Y3eV5Sm%Y0w4{A| z#h%%D_GQR3jQ%qjvggL=&h-jvH$K)hiXjrDenb2i8BUS48q5_ddx{@jdAiY;LI3Ka zX!`MzDU7A0kJL{mAFrWmaVkvYKnMR0O7uKS#g#L!-%}CkNIjJDtRgjtc&xby zA-Helfx!D389RQ$w!{#Yfn-sC*|{G`+#jt{7Au*g+Y$$`>0aZlH#N?q#?0#E*ZJ0M z*9)1VzQ%1}ElPgLdKEcY3n56g!O_%Zly#z_*QZ?LzQ#+Y^A^63w~T)`l}D4EnZzmR z!vil>D^7zCIv%D~G&bFM6pIX_;LYStCER=}`G8mFl`EBhF|DzE0|5zpdu1Ddm-hV! zkaWtc@;#|xUlGiBJNad}M-Th!Peg}*5r;5nqrGbEEGbl@L*h(CS6ckYEPH$V8mbsQ zECATzX=EC%TAd&A!=V)LYwG(wB_bti`mstu-foCke<6>oH2qZfet@FUmk#L`r+@lw z;`AH4#Oe35{8Y^rJ$E~QdPdbEPDf=IM~f^Ww%$;{h|?c|_5%grNW}~l!}Gw<9Jwk~4?F`&mjS}(LtJ|fjfj|9hWF>E`F#`vHY{gc_tyrzK z8YOBm1St64+4!{Cb>m~Eu(63jJ#b?^VvO%n@PXM_YX~@}pQ4R5Ot4!~d)E-D%H;t% zH2Fk6vA>on@q^lDZ0~SIAvmdW^%Vu0+>69MOxy{GQlh#GJ~2o-HZ^^itGzJKv?P$| zZ<_Dy<J7ofD0hoEPc&OWia5%#kdFN}B1EuHf?l z8G0#$`De~Y4zxGUc>=}USe`&R7juq#%$#81|C4RPwF~FuODHO8zpR+W)T8$vCtHfv z`ZYL(U9Q_PsPw@>BsIdL$U5M+qIAAqX_ITF^e}d%g>n0D7qTbUgzPtC>2c*!JmIFB zaZa^;ZfSMu+?^C$UY)vdvs1y?a(b9tQ`1xZ)!^UaOO^$T8HXwIz*n}D2|w-JEY@u- zDd!6Y)|?v3f(*C69F}*S3DLx2+4~)aQ0J{Eq_7-X#==A%t%)sno+WH@hf>(k%ynLW!Cn3(Psv%N}gpNyUI>ytFQQiI6;lgZ<^LdEQVDr78 zq()=&0k2}%^&*%ExLu;JHO>%qifyAt>s6B+w_g;GRBpd+jxQla%{JkzgmqW%)m-)d z$rI>H!y5I=ka4(X$6v1tc-dd(c6yw#N4Dfj3&QqR7`z&1Tg*K)t>z>y3vxOn6U@bA zHS+%sb8H}5rO-Fpag%GNtLFbRJWV|`Jh}ELK%*EB4OF>-?*nW$qX-)eC}OA;oYYf5 ziTd@s?EP)D3MhOknw{VXlD2oz_f1`KZ`juDDA82)Ly?L-)v52Zu|CN?seSuPaIL5IP z)#!F*liG>uw3sY}R&VotAuQ8cQ|&LdMP=*nc)Gqn6@ zPd;L`wL1B)m;4I93)#lsQbDF%!?P_cIdsE)if(6Jv0h9)jLbXeSw7TkhRU#|?d(^E zf`gZ}0M3d^<#!S1Y=GjysZ@ zs443}AZu0h@c&Zc{6m*`qbc#k?hx^J?#J{|mIQcK_<$!tAddYcRVISq}ysBL+iH9H4J;L&;`PUOA%$%6ooX4&{|f zAwDXkcX^4#a>2O${6dO*K?}f$o+F*`*}mDo_K82JE~-FNU~VS{BHQ)q@UlH*4M2UU zn)4Iy-v+u&5{yLziyk-$?O2mRIxUjq!7o*JI+j=3Jl6bGqO;NwWHxh)r87+^g%l(e zLGV3Sr7bx`Y5K@X5&1}IL{SHozukEfq^t6;rhIFuF0uZ}Afd+AczW0jLeR5e+Y_qX zJg+|tRAH=gbpkV!RbhylD7qIPkuq!-MJn5Sd`CvjUcw;)2QxcKU>76A>HfYuWWQ=H z-P-yd!iuFW{Whm2wV^?gO~0G_>yYs5R`OP=+n6BvT5G9v;Yi-vuTv6)o0yu2F15Rk zK;^bXpHSsjR-^9eMg3PAwD+vepcGfVo#EQ8;i}#aRve8B6S?Op*Vb3c#K9`z10`Z_ z2(vS)OVGmqEC674KEiDlh#bFKMs;$x|q zMTf4G`pOw#$P^=mcBdnNWCLu{CLxCXM#f)fYQ~fPwbk|}(7oD(q6+Xk#F>s{2Ck^L z0kw`WcSt2WF_Da?eljGvKW|<|HBQLE@Qf_U1I`#X`5@YjeUF#hT2rIktbKC%0xCDV zvtCUK<;%Wa$SZVi*i3SVuPdmA#*$|vLp`vUD8Mtg-F;+my9bubwlePoOj;ipRM*9PQ3R`t0Fx-cL zv-3EyRsZ=fu~jkl^|Wv6-hP_Vl=ojm%4bu``!S*UUdR0I;FpE!%|3na^X@D;h6bg5 zOBFtb5?N}0Aq;VryLC{kNC1l1sH{~$c7}i#b2%=@ww0T83ri?qE|>48P62?d_)Cj4 zL{sAFTZ-eApV#%NuH4IkjlR1VZxzK-c}DB1`|A<_C^&kI)wF;>vxj2_S!PVjWA1P( z2q(ZbZL@^=5TVUOpRcn4p=xIF_PgXgj(^4?ASheb@HN(`ACGYFbXu>~(R#WQtg%>5Nf1Dt~fa4Blk({=E<8 zM5hr>^v{OBbE9oVe@LCq_j^XyDY{+JM=HAOU(`nSG`Z`>zVzZD``JQp1|}4CLT#IM z!mMTBZSPrGyzPpq%qhBA&R|4g>DLw-ERB!^#Ys31uB|m&8l?cC3!QszgO$}B2`f(# z{^Smc$b2L3m1+YKlessqmPKPV{#m(XkO3b6c{^VSoUanqzt)c_+0NoUUToNO=XdvO z!!!AEo@s~RZ(gZZw~sRAsJ<#yG;?Y^9R#88_`TA*gMclMI|w!IAZ&&ka0@LniPS^X!r>L7ndlhiz`D(>csAHs2Up45 zoC$5`X0h(5)@EB=S2EoifTK*ScuvVI7A>fWN%eYuVI~!kpF5)}tDH-B>n6wSi-%wj z(EDbbdbl_F2yDCDHR~cDUFcKR)^(ZBg)j)trtjhyg@PZw6DK6rfYbP!S4@5IbfyMG zlH#q~?#+$)3kr2lZ+6T#L$KpE>_QhWX7BLO@?F=vzmb5r6DbP!ihfL_2E*BQUarvV z4q51I3Kf0RC4KqVk*@SbD*@?CFl3d^*v-%Eosz*w{TAhGr~gD&lhe!m7L&4TVPykg zxKLk_CK%On%|L2w(SiK$s)MrR?6G6)K+ONZn2lmlpN#(^A^nytZbF_HXIsGU4@EcM zAs|TNd#T-`kKks7bG1-^dmmH3RU9g^28=D@s>aw5Ze=`Yh|(B(O*^SEvMvW<#%!c4 zxrS}(vZ3-WQ$n%ZR$+m)<`4AhsFF2iB`U0 z-SJnd)k>U+6MZHudry+?fa!-I$bJ#~o@t*y#_B4$xC50k1Z27?Y2D3CWUETVh6ad#|=v93Ao>GojEa@?$3=Nbnl(JwH(`wf`~C|=31pFb)@DEku`^;XdAP& z{Xo+*C92pP%;puyNq|;6+vTXrxrxHE@F#nNDKWsv-UoQiqADbATkDR`?l6Iz`J=%P z0cRQcg^L0!fE7=N-zb=D7VZc-kJG6YYOETG=Htjsy1zUh1?l(ynhX)oDi!mjyr|C7I*W>kij9$z2I!LdsF83;<%l*tZb-7P6D~5G_ zbZ@zGl2pj7MbN=0Vwz8)TF3thLzKh}Ko>9Mq2rB!V*?-+D#n^Eya6qvC7~lt5VN{& zYXNiqS&Ei#(P)3>N{%-2pUHDB?M;EtuFaRI03^QQW%()0QEb8=#e!d$A{b<8)){v( zU=rV=t05Y4r-Mfsu5x$qUahHBfhs5lb!lS`m&$ zsWfW;9U&$LUR(mBwg2yzUG_*kS**8Ew@bJH<-eWCkc)n z>BBf>RLKVxn*bs1NECN~M>jjClXaoYEBYESm4_?~br&FVxWT48ohPzRYJ?jnGq&Ky z-2Oou*)|y898ruJGhzsHWMYmH2JQjlIs!Q9l%^-;XRUhQ>v?M}^L7@Suk;jzw{iHH zZ@(SpA~V*l6vG&S4PW;2RKA!9O1Qd~TRF-MJ%j0GKo<+;OfNg|oxPX4Q1h1^Lv5Jv zpjv;`eqE_%zwQh-{gl4(;i}8HX1M_@kRx5S--ywa5?j~CgSAn^$4@cHkeLrwWXVkQ z$%x?^GPyt)y(R~H=WUeMif_Uc%aa`8TyA;Z5tmhLfULfrQZxR)h=k`{m5=u1fQV2D zYTTu7&4OiVe!tHQUwu_G;O(h1Y7P}G^BI#kcbGu{Kx+n_!!_rkl7bqpa)oLX;&?+E zkhZUy;+=(y%}U^Ws94xm=;s7FPDVu8suV%V;{#nPLLG#P;1;?W0qBPV=2!QVf=4*t z>Ev>3`?DB_&O%`s{|l@DqjCam575?K>dv0vZX=)6UP;fmi`aO_X8Am9a%|ItZuvr) zvpG>V()F|gEyxzu&(s3e4?K;>NwI8@2zt*~%zDMlBqj%cZ21ha49QNQXrr&dQin;? zfh#MY^$uiC%Gyx{@gY*JZu|>G9-u7Kg6ReyzRnPI&4>Mp({MBGOOL0tUeR^qi?reL z)T2w1$Y&H`1ULpG~XI*!X0@j3MT z-~hkPgY>(CAJC>y1*fu2KY(}T_`J(MDL8WIt!b-yoB__S1P)~j26`Epdk0OQFmja%y`H?B`ygw{>XG?2#Sp}oNXLT5~% zO#y`z`zD&__-*``F~{d)6YGb+|?#q9sN zL47vwe;HKaFOY)l(1Id`WA^pYED;>K0+zH({qHbJY5s)N&Ih*3Wpch{c==%5J{eJd zQPloUEIo33yz)z{@iP_63{TM++H@Tu)w;&;BJ9OT%3jnSCsEaDP%ziXZ{0N|fWWLg z1xgLiFmf6td@owrY29YjMJs0)-OOS{cE8bihn-8>egT!@rc2Mv%M~@J1i>T4K;G0x z?E<$nYe3<{sH*AcOeQY8uDj z%Feh}dNtF9*r$k@pT!akhI9<(d`4Lu%ZmR7!`D*dtRa)S{aP#@O$nezVUix4`0Vdx zQDmeRW0J4tOXpc$A|mA#doXK?rV_!>3zQ7Iorq3LCD2-m6vK3#I<~=ZbBRE3egfOe z$ZjI{!+a$?MC^99q4El<*%K_vw;HcUkO`LiX|=n7r+wEF%J?t&w=l}AoHBXmr~RZZ z-iDrXyA>QgqcAaC(>Yl@0td{H@UKKu)tiNbnBwZ~9p^H1DC)Ju(6_cP9E|l~fqmgn z@W^EQ+#%u0R$kAQcJ}3jZyk^*tXy?#KO>a||4#1Uj0X=Gro>cayS3z8ueIdDUS~ko z+}#psL~gK>HFCxVt8pn$Gz!)m$EV@>Ac^^~l{`!J65#Ly9vyM6yl7O{IDcfcc&|Y+ zK-?zhguR7elY(N+o~j)(Erldn+G+f7IcxQ1 zw2u&|>*8!tr|d*0#u>!4?^r$LG;N>=hV*@#&6*mA{<#xK7q+zSEVzGf)ilnfQIBR4 zH?NCqCxvI)y2#(ahY@39VOq@&G$ooob%&0y!J>~n(Hj%9LPl*e@KCje=upe21)=&~ znDcuh`z6paq6RRjD-1|Q78yoh_K@B;qd$pW-wa?+Tv$}|2S>=W>J22bA8O~6k3Q0~09`(ai^&umOM?$HoFKL z%;>tYF0GVog>-_W7{}@zTWp^8pm@5lKGwQ3-$_ox{|+jT1xP^n#tx%fE|nG&`SiFU za-gjh5JlX*Zk)Roz^?%f_1@m}j+P=tf*9#*ylb})?8oqd;z9>Vuvp3MBeQq8x$Spp zSzQ_-B?+8%gRmfZJDt2g^=2H-r4?^D|9A|n6)!1ComZ$zrA4|nbzYH6pLPkaD3J0S z_6bvT$#L8yLh3;j+{J@p!ObA--$(q4~LJeba&0A$5Ml;?LN>gNg+E7{jH_;TZBTHw#<6%#$kF?#J1PJvTaW9M88O7>)c+U ztX6@99V(0SQX~j)Uo=E6gI85(M6jHZG&QnBP5N&B?2jK2Wi82D zn7zNkw~vQdCSEy%D%6yHL5s!5(1GX7os4{>V5{V^=j8c0I%;aEFwt*fFC z#HG>n-KNNRSqrBKWAR9+6Q5cwizyj@$7lUKh^jqkej}`g{}%dWOeQz&2wC^Fa=X>g z6=&g*!``UkorjNg)x}f~)lfJ+;Te~BQp3rtu~M0WZlyK0+Pd#cr;SVj(t4cz<6XHk zeWjN|g&*hB(S|h|E+g@WZolr-9&=*WGxHU7=j(TfA0-@%=1`P@S2N_HOXPiXJrw}AZay3Syu1w-tDG2S)jbNYG`D}Dt5v-c&do+K!b25H~gxB7XU>$HN> zzTYUpYefiw$o9)zw)u)JW6Mld&FRYaQ)N5Z&GsWDc#v!jpB6JQcv1FZjFHK+n${7p zmcCy0hHGeol)33yq9i7QrrG#`Irc>^I)KZD2SncBZx?@i`P)>n2h#N&PWEKEiF7Ej zDTV%7DzSHd1QzwyNN=4JqrGtiamPn(d}gM+9~}$mr6+BoE4v6fKf}rZ_nRs2K7Ftw zdr7m0Abz7V^SxiVse6<9Go}7F{6C3cJaXxe&!&f{=IO}8HF=sZ zg^&s%_7$Advp3(DLb>GN&t?=O>GKfMQ9xCYyrz%<0n>QX>=S8rSfWbb5YDV>Xe`wyxDE=C&bBYxvRn6ZN$XYQPW`Gv%J#a*f$#-Iz)r-WHy?Az_6N@ z-S-|q>A#B-u%TR^mwazOQp)g_l>b(uny<(uyU`@OKAS8*`QCp>79rVWCA&PA>>QHu z;cjn)=!}2cE(-6oeU)g*W|jI&2|AbgQ0lK5K``Szmfy9J`=Ecmj@!QWqt!G2S3F{z zn8d6J`{LR6+s6~uOZiE@j zeL4FJYiZuJF@=`Z^q9)Xxw&h%y*9bVct@DLztA_^TT9i5UfqM_uOWn^_WVwL8sqbl zE^)C@9mtAt>#O3#adlCI!!NI&Ff+ajA+5SXo2@beZN1t z7fA$nc=_Mpwc{$rK>5hD%RGFWd1OZfnfMh?##51{+c!H z+-rKhJ!YSU=G8;u$Xh=Oh`_E}Ug)Wx^)uYMwQk#$n)|37`9mu5Sj?UW-81*eu7n$% za}P}TkRAC;D)I+fc9&a^lq#e4q({-%6W-1E@9Kc#$oPNt58%i5p9!*h_?q8ttB3pl z)MfSX9v>1%rA~dFCq@q&dC>ngJn-)T04{7l4ctj(BXXUp7jK7uXDVTazjvm|aJ(MI zgvqb?G2ZVn^9gt_-`p5qTuoPv$P~^$*u`ny7ob_&cO^C1{>%C8Zr)eeJ!XwE#_>VT zJDn%Z8_9#_jo^{-+wA=dD_YurLwl<2wT6Z_tMabDgTR+~GvCF0d3)rj^`VAVt+D3I z_z5lRT5f+KS=jczO0v-2%^_S1b}#IQ+;7YDZHr^Rb*<(k-{5WM@Qt;zfQP|ZHxTH? zS<^`50yj$QMH<3}HJY8=v3fVblH-SlUC5B32Mn@9{y$#B65+RdSl&k%-*JMmRT;^L z#CO+uhw{`vhGqakhNdqM?An-moyLU39f9f$dq2BfGv+&Gj3C_?>4cc^j?#^Xq^JIJj`6wd)Kmu0hf%UiKdFpD`hVrOuq-pT4BWm#7{B}3^WHtCi)8N^B zNDN=+y`E>b!BYs*;7fUA{3`(lQ5AUgYF|Nu?%_Fr|M^PvH~!jg6p<*NhP@8h!dTk9 zayt3m?rt3o>1Rk!qlb>h1TDO>|MZ0ksEDR!u+k6=;o(~>#RT4D(>nnMQcUFOQR(NU zW|BZGkjvg?nI2;%VD{J+*Zuk^U@_a~+HR^#ZKlP_%LGNg&wZ*nDET5G#T!iw)h;#zqTB{ipZMH$`pyBW!bCxVRsiOs4KI zYiVQ2LJDIAbHILXy6|}8hIcUjDGPrBbb3&6D)xA8f*CoNd2L}{-;5?ZV5q+yt~p}3 zH;baE{<^QH;|w!FaOFBgL~Ujv;DsRib!XzYRo6dT-FflUX*vjC&dbH+26bvVTJ$a} z7weAI&I5H2$Ic$Q=T5jrGph=rSZ*OP#}x&E2G4t3Du7eEciGkCG|= zpqtd)^042SbTKsaeI~v4Cr7YMBqeDqsV8Pyvg(XewM&X?X}6lTF{iQAW$Mz z_tOq*{uk6D)(1&r%XZYHQr;U+fnGyh(2pK16(6Wo(}L#i{1%hPKb9Qkm7|c1y%pm@PcwD3K z^zi(dKeh%;;2>dQnCC@xaii?Px#K41Z3ftMZ8XHwQ)5A6V4kW+33ME%W6;I4irrw( zE*w=Xhv#c)zDDirh0X;BW2#Q9=0f6xK^`bsMktC&Q;T`JET`D8Yrmvp4Qn*(9AEY* z`2k99{u$BE*haxz7rX&L-iiJ!bcmI{;AfI0b~+!;)^oFKCRqvr&N&)L8XLLHt@g&s z)zkM=OVjymrvs+WpPM>6>4k(>MqfDdxx4s+ zLaXi(wZ;=MBiP)W_jdbT?>e*pXi(P@G>6`Xr!^jyWX6mq!QLxAWu7|@!@PFcZbCW? z4sUlZ5UX6jvJuBgp1Pkd9|G&(^xCTi>^93uN5zU1-oJRVHs@EICmt2ixmnItAv$YK z+p&rh<`%TS3cnOHtBzYoaR%M^77xk;;SRNQmb@%Q$usTJmx?Pfs zP@Q$GB$lcU{U&eT!PRl{(p6oK{x@|zo~<_Fi>8W0YwPFJz`Twl)rOA0YgB-6=_rEd zFn&WTR*U&*(^v1;-)A_}zAyAJB;$KK#g;l6lj&;Qk!R(Do^CC&@0EjUIX{udDQo2_ zaNu^vGPR(D6C>!A#*&>p_0Rr=@oT+OK2WB#wv%3=YvRaAltX?%rm(X(nE~=~maY^P zk_R8OOL^~*Gv!vN-(vjgy{^=VeUwWUyNAEdKr!-7&-eIXRzt@B)oZSxV7-N_l_~UA zPpM)?X<vrU`2!1O|7%2YPpHmm6=#wlz!;vL`P8JjN5 zquV=G1UXEq(Po_RdI8z_x zH=Z&d+K(hYX5R`@!MT>vJX*ZPg>V|wyLmP}@8CiZl&}_V)-sf?RpN~1dBhuwzG%AY z-ccwkWLvz#8x%Z9cgOkk7<`eKrdbOnQDyrd$&^LTs7uV+)GvpSc^hj=|HXYI5tV#K z6qquYM2jTjzn;WYe5|RH@~JjJwN%rbQWkLL6z8l3>|_1;yo z+0;}j?!HZ5pQToc9ptvS`B$c>jn0>-OUU!o^lB(=*8xLMzg-iH;aeG{tg2RqAOq9< z1qH*i)N=rn@&C(2b~4wKIhQy7-kjfm<{~&D$Fs%6q!{*DMUoIyg_Z}XTmUwOVw(4$ zlqLShl=aH1YGvJbWD%^d>NUF%2^hFvQqe(~r-0YVythmZ`K8HhHEJfcXgYbaj-Zqp zje{3g2DqR^Vn=-sWS`|c!3h-Qso{^HXrdPWB+OMaMCO(1$+yuwK}>XDfyn|b+w3-j zCiePf6Fc4hU1Xa0&us_xPkW7d2e0XaA~yDdx$5foLM0@_B9mc(p?sa` zZk{b`-F_>Lb2r&?mE=s_8(s34p;@dX-*>b9CY#N~U7^}~{_CdSa)+e9IYFD2?^k+= zO$rue{PW;A_IU4wVr0HMt)}0odGNnM=egfxbuUq%b)Nj4QNP@NnhPG#pLt<>!W{dO zS>dwrco_>0DLXvl|2~CT#l3(}bMyQ<+1Kjn;k zGcbmVl}8ES-1`f)0Nyz5Yce4X#f&$!lG@bt(ax3RBHfSlHSL>X649!Oq_KV1K@Z~D z`G}iFSk3K6u%J@Di-|I7!{-(^=Sznwi75sLaeFpUsKTp}e>8l&Q^Sk>e-XvH%&XRc zJ?sdwC*Kb^6Yj^EwXcupO{iJi57w3J@t|GxOrFsWJWgJlKO_faW0Mqvn)?^CX_lS5 zl{Nb|DeJc5z3X*`(^87(l%k(f^kKAv%|3!G@^g-pq>EFr(DEQDIR5|(V7M{U9F@hg z=Xo*C^TsC640;k@YDj82`kHz54}BXdA?^DIZFJ_Z6B_m|H|_OrH4iunCxGikaaw8L z-<9mY9|9Mq^#Sw=wSYAYLL@iuV&7_5vy<1$42E$CQN~~E_aB&%QQp1QF%h};Fx6=CU_49&|$RB7|wa;UJ{%XC3&uZFA%(Tq->B|Jc zwq4IJZduFDoyC&ezsWH??DL)McorXt5W)wr546Ef)t=ZAMOs#7CJ#HS{P$IEud zrmn?S%WAq$Eu0!TDmr!Uv0N1row`oSl1)e-vHJa_?!362bO&J@j9CntGx@Vwp2QI- zYvNaR{&scZHgzJ?f^9;tf~prdDkde1mc(sNWkhaY6TQO)r!LYVGGoDMp2Q51g8iHc z@?lrT|8g4(U}~gybm}Cl2W5Va+qQnusnvsekoYe!5~0f1=ky6xzA?8D-CE2LDmUr% zC0<)P@}J7XM&ZX)J1sh9*t^V$)g&xe42%;0 zg;aks4M~i}w_V?xkh%K)TPz&lU2Zjh)Hmfnqi3}~Q*FYJa+9k3HEMcpl_RNAV+cBt zObSO=9_ANPxs=w*X zC-pYYZ>u{(>Hfv3YO1N;)zkZJwfSBEgca~9wR~V z7tpZDk4B?C7_(>h+~}{+M$h@{E^|t}3*g=BBnL4cY5&J7waHcQa{iL(Yxc#1l3ng5 zdoh=6!J(6#>LzGT-WkI_jtFl z%-57(;*!|N4H1}WHgFY+B-D=1irO{hVLLbn=F8}hlvqo%5J&_J#YhMxR7(f}8B#~! zCED5%DBCVcNx)kAb1gK22&52J>2tw$B*E4lhNn&Y!>jh=L-tP(p9M-SSe4m{wo1n( zp-fB11(AZ5aM>L~D;?v*wi~Ew@U4SZ^>_l{p0%{Gq+=AEV8G*y{6gMX-Ke1$xvLi? z9Rsr^P-?hfb(m(B*q6-FMo?0AuZlK!JF~%f!YJ4lX=;JFERR|-+~R}Z0aP|=tD5_c z*0bH)9uqYZOz<7;BhkZ1ngZ-ppt89jD>SsRi5W% z{cmBTOZx??)I)4OzJtxr2Uyig{$?+rewu}e!)@dz$%O!6ff|t`JnRb*(isD6D zvt%v3-ypZBCaom-HDk>_ru^GmG}iZjakj>~HM2944rMy3_|5A$eL}jhPo&Hs#Eugq zE@tdF8qaGi(Ll#AR$|AX5b8W`l1y8upp}|hG^Bw@*>CEpA(TEp6Druo@-F2KB&@+4 zsZk7=VeT=kBfEwbcW!1_1MaZCcyjl!${OmPt?saf;d-6L=sLL4c_YSKcnTdkgTEku z)%;zY{kw|iHT)eso@T!^N_A6M3_mtUoBJ4-DTItk4#Nt)5EIXltZL_E&tvqg!g#0H z!gXx+LTzQDv_{Jp1P~uAuiTvzoH z(PYlzPnWy|j8#jwV=kyg4MbiB)a7bqPL{*6HdD%*ar@zt7J{_adUCb>k(ArfN3#zL zy}+jFu`ezRt(4$}f`KMlDUCU>j6n$O<1GcNh`2}>XB}g1MddmZ&VN@StN0k1G;d*h zcgFud5l|4$W!KZ`7Ub9e7!uiYRq&%hU%o|N}oqSt!k{JQtC zR}zw-W=O=|1lrRXP{@AW)H&-Vn7o0+_lPT@+RAt4e#aR^m0{c_0bq<>oAZ7{CX+|I z?Zy%jm}6W=;d766KDydX$O1PBn@Pyf*A%od0yQ1d_pFId)uc|{(nSg+uETzu$auj9 zsew+7@1|eP z{mu5run+ZSWW6{3JKLg2r?upkptS^M?oz<~v-#%-4Imk5&0{S9(ry5WH~9V$nM`>m z)UcDstqhN+KGXmlZ}Nmh&W5c>3+tMG7|=8MuUCEnqMIKQswl)oIYrqFVKNLML-35f zR-?R8#2{W67=#l700=-V88(-?!JW}m1W+T{m94iUf2-IEUM&w$U#P4F zfGbt8Ktlju1LH5s`1@0L`=z2U>*s|$btML#GyWGTxS~Z_k0+~mIj}R!p#5`|WEGF+ zW)kb?> z?oVah;0sC50=W7+W^=tv4=S_n`SnE+nkGty3ciGRoY3fdJ&KoNSOD;7b{|66lbho`9O2G3pEFolS z5??Z=AQK8^?>@T`4sDSp%GVbPUh?>*)P)RRdT01gzJZ4<(4_z%PBHrb?Y^^1huXJv zIcV)<$;c^8x$5h5`dlH|E)H68P{ex2j;x4PA21GDz3Vi#RA=^Uy2i0TGJLxB`C7MY zm(xsoQhsH72uYO*QHYyn6KPx*pAo)kxERk3u)ue&$ccfn{Bct*g1t=R37S}i&@*dh z`cuAEtcQ8Mr`2vX!)D5I%Zn9LVAFHi0u4S4gzeDKP(vqJ;nup+1MMf%HF)lC*=;?2 z{?IyKk93KDgwm5U*hH6$CmIL?mQ|?^j^_MRdlmPH=qQn|7^^}>jAhFH!3PE{+2zrK zimObuM`E%W=0K+=&#kLC_a*T$OiSk0)vlR<+g{A_D|W8SC+%w!P49b!o3_2ipg2u|CLzZq6X`MTgxWk%RKh*qIi zK&W-SLMRHQy?467V&?Sv0q&fl$}d?mjU1pxk|@=c^K=t3ISbt&GK7f%=PI@k61KFN zf~OFC%?(cH!pK_c&uW(kqBfQ;o1Cl8Loe_uAkMrX?#eYv?z3mx(ib5PeXG%0h??j1h|uX%`AxOmf@qL5vaYwLI1 z28OJsqeJWZcYjFiO8-Es>w7U9uTYFk3>l_r;_sp0n1}bRgV$Dy4>?#dcoXqtxBdiU zy3c6o)H=xd5!_82?M%GV1WKK0RIqM>K%hmQG_{*m1`op^Vwv@zn#4YKxWuk=$9?Ss-P$q}-cz>7xzTDY#*7C9aqf zPoE-HTp;8X$A{yhD5STR{Kdu632*%h_h(z{zK2PMN%g;K4034N=Ind5FKQ5lXz=p? zc!r1rt;){20R}%uQ^TXlb*Nr+CcZFNIcW#G>dMOwb-8O1#1#g6tvF$Zdn<@lc5)2g zrg`4LID8&`ESJ~$wfrrae>j*bZvWA~+$-7Ji`w5f=zZ@&@24N|UfllZLGSCj-_88w zJYrYdZMY|vnb8jUH*$}`a~3Gae72vVTu|~9Lo27nz*0~ii-}sS+29=*Twr`+gI2i< zKx`l(xD`XEZiEJ#tch`U{f{|WgT*;exs)D%oB!gNJ+2g`wQhgd<5;u?cnf2FM>3IQakzD;FM(!l3NUh9U7XY|a7pW{)+lZk)Ezk;ycJ8wdf zJ)tmf0xZ@#7yoFL(+-t~e+~9MSSOc{AMTxPsaZfQ{ndL z)S6lGsWo$GMR|0pU9Wj8+YqItW=Xb~-*74pQVCSCDbWR2u!)n%(VaKT1xY734$rj!gR?OE}=oG$L#|QbT z$j344yv?Z2hp2P;Fo$!mry0j;2klOnlGI@Z8{xxK7Z;jq(%d_e3=57qHMITfFAFt2 z;QDtLCw?Vbc?M3ZHyfGnW;nf_&M*F>{lee*M+RJ`@hgEFm;XK>*>pGAmRz#?lq~zN z3m$?11LuK%rIp)lUj)nBtkuWK1f6Sl;EZ(GD1sU9N&GUM&-CY^m4$s;IiTeIZ$+5R z`4`Z;27cO{1yec9O?o@gBh5%>+##TU=#0ND-O{x0b$#-mxl~znrpSif<-JApwzs`s z5BpcsUL#L7hfW4pBMpkVhIDTxxmV$^YG~gMmM3}&Pr_U)Xh6CCDe91DCT_oMs5NuD z-ZtUbQ)yDifH;$o?8Qb%%sF+L+~?>`5VVvP449#-S=hwE0L z@1na|Sh1sHkm>Yl`*mq5{}vP_mB_?!r`;C1m zM|W2Y<+J9tKTrK*Zu1DzvahHlyCG{ z=+h&X|7m|l_wW`Dz9A2(--5v3iRz zP2Z-68Pryi2}be`=hT=zB^6o710loR7~o=*yoj^p@hP`F>pCwOO?I_+n?r6HB+|L z_y|G7ItW;@O)Mi-`$A&zsgs}J2BP~1S{}HQ*}{WB*8A|Hw*Osz(Hd=PAtBI()PR69 z^JcC=dz}#C`Lyo@GCG5A7SK*2SV+|2z?C!Ll|0W%K*j@I>`wrb$*LDQX7*T3i?se2 z1;fIm<}O+;0>SC?l8`El*Q_BBK&f%H(NwaY4?J|%#)ft{ndnf7+w?5YK1XMt!R#}deb!{3hUy^wlj=)lUI^t@D7s*u$RJ7nBpsP0dkzc7U9J zYzG~s&J6(L4=QMY`va{_)YYK>d&6M3_OK{KE^r%D~hMv;HbgZ9gN3?ux!$84E z)J_TU)GwD7oJSXH8}pdK(S}y(!x;7`H6Y`E39O%kV`!;A1914!17^8t@u`~t%jbbG zvkcDoqx2E#`XqfTVf_&PCr2zHu)o^A8Bc_J68%^7f~9Z14|b(vC@b_SqYAcwTw?go zkX~hd^B?$%9;mvEOzKmXo%8d3qI0zpr5^e>t;9=cp>r|^1np;2d(=Ml^+HYWp1L`9 zC4uomwm5O3ajz%c!fHi}%IS{A#S8NbKWH`|jDYWAP$oU?*6$oBc> zi}y#O06E9!VKpwID@otIgbUlJ^WAy+Tq+b-tr-MNV%meYS9-sXgrn}_9+NVJ2=p~+ zLTcDx!u3_1K!GQ5^0I*lG?HIuP=ALszpicfb}cjDkIa${3ga=U+ew}AUqJqa#(!md zJCgc66S&yC>GH41F?Axx_lw9^6QBCi!C_8uEdtuC zyRoTjAkE^HJFO<}IRlfw6)Sr?I(2mvzwAxph|&2)t&46EmU9pBqY(Y2Gz*nYe(`uS z&6>BNY3r(fAYNkL?6220PWtA3zoMmb(jo)C zR)qTp*$eCPGcBRYm-Pr&0tX6Y+Bm=11Lr#aU)eJ-46=8t9nCw3ShvZIhVw&igwbM- z%5_YCZIq_bs3aLRCes0@|4-!57_S)Gg1v|5V|2?73K>la-Ho;8wM3_GrW0&tu+CjO zU0>!o&N0;Gah*WnX#dt>@Yb=i^|?LoSIk7^o7SC|P#mARWck(9C3q|+IEFzezwv|)?k?35OnGl3daXCf zuX}KJ6CqKDKC`-o2e^#hYv_kc-${9ZY!mWk*1mkQdY>f%eDh~L48HlTAs2T8MgbJA zUPH+gKXmh^lglPo@>cE_?3=uGD1bkkcSB)zKJ2cW9}{5%bB3xJ>{2yDc!R1L>QXh% zi*zZs*Ry(^*O&%DY+Y|N0(Aqb)8ost3;PxB%gQ%?*=lqsQ{LW}@}ctBo14W9nf^8Y z4}*~G1mv&#f0%ps_^7I@|39~Z00|S2C@M;<5z1LoQ?X}lld#$xmGt;1}wfjo*S-Z^3)p-HpGpR4{M{$+6sc0NTgWk2LsRE79p94U7B*Cp0!{=`lm@vIFo zFvDoc;r>-SG`E<^)r6jg1G4dt%`WjmL6s~w0`M1O!S+-;lljt(lC;e>B{7N6dd%9; z%$azt_YwdJJ*Wr%BA~Df7xK3c{K7u)FXx58pUhSN0Dl6vfPW5`MBxMfrM)tS%LNYt z!_qAUdnCIX#e$&KR1Q}ta>eL2O*SU?UF7azL~(ehVhj40>Fi>E%isY>ytgO5(217E z`j@@8S7Yvk&YyZB?`xjOyN0U~dE>W#V)AA#7=WF_M~ZjqV)FOh-qzY%Khu(7&syeM zpxvk^PO=CteJ7)tAsLK*adhL!oH7x)$U80c0dnB>)-=&Oviy7A^I1K!0Y)FpZ#>z{ zyTE<7=qL!J4QtB10Z5MEo@a6oWq*wiEPva7A9LeCeV5-39?e{=9Ri76%vU%YPQ-f_4b@U9{+mYEzR9)KPZ+Z4# zE`)>pEl+SO6_2H!@UX9a_7|LQEdsf0`GD8BrJ zdeu1G`TF%*&U8EL7c;vnq9L#L=v7|%PN((}3fSx>exSs`&bO7c+D-aMNmG!w3Td6~ zTaEbH;rQ_6X}y;W&KppfS!+1|LOzY=-^=?VBbF`H_nvojGyO!*UHWSXX0}`IKi#Tk zucs=XR_{wc{g;x4J1Zzs&|bI%Xisx<%Q(K#Z{%)gD^hdMk#4#y?i+`D+Uge^N$Wjd zK!220^nv>CI+g)uZ`j{5hg;+Q;JnM;bI>Ay=W|?+x-dv$wJ;o-;gYbk}FE=Oy}`>Xp@{cL-&{#HL{O4%j8juvI(*Y3(Z1 zGAmI7N&RD}L-tcBez5H!9iKnDdgT*rA#8Tu9x4kTf9&rJOIO_LM%;Ro8KSL9a?6HG zK*M%khV73+9q)_hw|{D|u~rv@jbFeTQ0>}*CTMV;OtmE-M8v3AT$>ZmVh-@RhgwHp3^OuQx28!XXkg7H9RZ}o(D zPKIZSHrBI5J8fzHIG-3-kxa5WdyjL?(0=|0jPdn63{>XZo$gmMmy)0uDP66JKmZ$y zG_2GZNGU8%2aDn2h{56x-C9+ZV6=9#ovDuv8fl#D#$RxLl<(s4=Q-(kyy{W#Xl&3H z&j63hdAU|Wf1MTI6#eFZzJF+jvmI%m=URP7&$ForyOPLsnKF-7=GkQK(IIRchs|IA zvMWgSw@l(MJd|-U?roakU-s|^hKn};(Bq;Pf<6)7IR=d=4&>`S{9(qC6g#Kg&p_Kt zA9I8d%UUhW?}bJ5WdDQF&^htjnF0qojQP&~7$^rnukU;txMwIlql@YM|3d23NPO2P zi@zy3RrWx8N}jAF@GvShwhm_H9%khZhw_%i@BJ8a3@XX!i2y-T_kuf?^F7Ykf*@OF zk{-W(FRl9}!f3+Z+KqL|-VM3_)^*%yh78btP%>E4%H19A;n1+vejn+GHR9ZC3AXoS z1)JYAZ3@dCNh8cTT-@U$zE|VN?xfiJM6ZFKH%Q|Q_JFYZQnOmi zT^n?Roj|ERfYzouW6uU2vqpi7H|T359y6y-qUYQ^zB8dJZ1gZ0KNlG^0j5HB?Nmv8 zY;J3vSw?(w#>+s^vVYeA;)8Z|6TGB$&Q(i-_K%wZKCvNaKii@k4%)kal`AvlWJ~DC;%}eO zvFfUn%+v$W5;|Vys(%O_+qf04RdWhYd4Neeh?P64yvE^@+~`jlqvgYn9}baTXx=DN zJs!$E3G+OP_HLtZJXA_YMW!zO(az#x~TXV{Mm*3 z_}hnyd*-H4@fThQ72AYn{(*`O+-j_GNfdqs;h=oy+G!TJM^kzvNk9D&BlzzK0P)+u zSSLw}N&SJp8y>LaUU-1r(o%7qIOxX2Zr3b?b9*I5J_m@2!u>yS0jr~YyYO=U0+?UQ z&B4xR=&O6aN6|*JIi7d^)+mo%zOQlb!FN2jz&DCZqVP6)yr(YtMFDAQO-5hte`T25 z?{ndt5pws9dZfxvf57Ko<#R@dYce-gL$S`K^GE-xu~2yU_r!cOI9}V~s`%Zk%re!@ zdNI$5x;fwbuA6h7^Te0syu{79K9zGhIaj)hzehL1w^z_iCwdz0wkSO+$Flc#NZ5wC z<=o_zGt~+7t1-P9e|yI?V@fJ(;g@E8+Ra*=%KGkhT6{2O?sD^ewlayC^@q-PwVUsc zseC_FzC$)B;AZ`0D(m&iTEBAE^vsM8F3QXpjvi(*zdG#O{>tyD%Q7<_AinI^xkh=v zR@YgX8TVbDneiC+&9gHzMoV{->G%7yvWZ%yf4%|y8E9^E&EJ3fXHfF(;33Jo|HU=W z@6Z3w*@qGb{92~gsdbqd-{<-Wzb#i{A3ldRa<0;u{7gG&3IB4F|FW`kb9@7mPXkvT zN&eCNPT+Ssze?{{&GmeKQ|4x7T+DA4zi;td%I^k#-{p4?zpvH1_1sH+>-hb;uU<3q zGBSKVpME}HruqA&f3m*x&HoQyWCK&qccG(0{k;9vzPJ#?INZ$^QxRl^RPu)%kiTrE zs|#xFJyqIY(&068;S}r7V$6A3kU;aeAF{r8&6julSXz*2_c52od`n2~^xe!~R||5D zrv?~mES5WA;l#`C9T0rBjxM0WXZC2k<^N6>8L2-+j#RYm#bw%7o1x%7@!EE0&q8LO z2}SHw`hP#6czN|Z?4if19UjIkuYM0tnhN0FBN4aWe_gx(vi6TK0^Q$L{h4~ZvsyD4 zQipDH6{kBdBhw9bX`v|p?B`OHo6UV}PuPUUnpC?F5*$#3h$D7fL=0RGRG77j-y_Rs zJwA5U%EloTv+iyjV2z#C9Qg_YFIdbNJL{pw!?{NWq{fHPAkj30f>`w}C`LVB~wuE?Q8aS3TTisxv-udi`#kU-5(ix_*}L-0?{ zPK+n%ydaS>=YiMF96IC<4L~LA*pxnzrDt8}X5Af2W_|cevxeQQze{CZ{H0lsbF`_XwFY z+oUcYDA1DWSh8JAh>1#L&;g{N4>tc@mReJI1|Xt@_PAU`ik>|z+?2yb-e?n!Xk4>S5cA1@52V(X&nTa{X-ekPR6y@53*H?`#;h3$|c~wkRwf6H0 z*w?x#?5{?Dal6Wp1mHq&empN2D%~Eb2$ym!Qhpf7L-A&G+PC6o8D{5)J%>>o&YUxd zvyj^(!|1|H3nQ##Xje|4qBX%UELOL?D~X(By7|||Gr)UrjVA46Rrs$!rG5AawEW$& z-JbNf+-5QsMt+LiAZ`9MWi`QXBAiex(ZN%c;yUsI<5^4s^>U!i$A<_*G!*-9_k}W} zVofFgRO4TT9!sHY?&6KPqWBTfMcnuFPni6*E8zV03ayG4(`9!4Uq6Hlb}3Gk2+Omy z>L$_o^mnqfdY)U>!Ti@fT+MM1xAlIq1~e6vTsoST3Y*Q}3V;q($V3S@tk zJTGl3gL0Chak!p)SONI%`~3#z&N;{B}#bNh;E#@$$_!p}Ft|vzUVUqNj*sZxn%x zHT|Ia*kSdtnyg+95^&vPTakU+{-B1LSP4!+4`sk5N~3V%=%q(@eh7=}mU4 zQSPiStzMFlD`^Gb;d3c-Q)``q+)Nhrh z;xSwC+y0`l%+NiYK|Ge7rAWRGa7>Qhzj)&}xb~)ufz2Alc)@Mse4oa{lbv$kj{xax z)jFA4k$lVSrNunPCd{;bw>m>JV$pw6tG%>DiBWM@=b;v^Og>Y5 z|9xmi$GbzzcSPUHDc=#BR%iP@L5$c(K;QQ0Tl)yfn_D$xeH#-VMtKwJ{HuLe^SVAZ zVQ&5L>;0?qXQ!SvgZ_H#`ACROXs~_j%*{1?VN*x2ygje4EEB17| zioq%;n<1R9{___J zbNpw|R35!oK)q0f9U9z!YO~c&fg5}H5a6yFm<0D&6%>2AI|Xhr2?DoJF97#*a4m5E zkdY4V=yY)PaT{8S3@0}Q?$1`A?{eQzR~#iAO2UC}@Cm#i5Cdd@V-Vq<}dGxxna zG=r(QGxp?b?%Q`rh(&k14}UTb@9+SZd`qCQKKK8A=>>x_GL}ajH`9kE(|#Uo-;ICO z*IEzb*JSQhE0u0DnR0kQhz9wVnEI-p2}#E7`q#zHe1t^hdi)#KozB0%G+xVYeP;U_ zI{?n;6;4#37U*|33Gw`Aburr?zhbH*s;>PVA!Ka+MgWQ&6U%>wEA;a^mqg)g*cQ{_ zfaSgyc}`}%$IBXbv;M?nJtLL%Zl0sh37-Tf_dSTETJ$*~Q&0Dqr`_Jua`Pl~>Rl60 z&b1e4dubQm+B~Tzx&jqVzDR*f-`CH`%1C4fmM`XHvwMYZ8g?0!p>9~CvE2Mgt$+OX z<=<6@K%eLFPW~SU28?r=@)c37ukTg+@$Ed^HGZ1r4sR|iPcWMu>CR@u>tlt#CP#fN{}){BuHIxkm2>Y+P9kFv zLEzZ4nMeqGrY+Q+Yk5~Isk8YUSS|V=-}QWL&O9)Oj)gHrBVzWNB0;F_*-6{w5pyA) zFcLf$Fl&zs@@3|p6`oQzes;P#_vfsoF4=IME2k2 zWH_?TtbMG??x}p4zzn_itS2i!^@rcU#o@)~ajOxQG9uHe?Z5Kj3ssd{Zkc2(!{vP( z9rxC2dONV%zFjPU4Ot3kw|y%}{y}ix{&Io`bhYjb%2~U#yOg*ge;~t~f(TfA1FO^Y^+dw7gmVvK)lY*G$b9;0efIai3MJL6 zgO$$%t|P3Co%OUoJnQM$tS8aFJAK(4e73MTgM5wXHNo;^K)2_%MEAW`_(Ez0@CGD9B(yDFISsz;^yrQtFgaR~!GH3gN{(z1U;LV|orn|B>1F@PQ>8 zAMn`tB0E?TjEyhxm-qsL^dw)5@$WCmA8(%WZ9T2d|BbnHv}G{WAi~u{Z}qX8*V-M? z-F~|zx-wXh0qKI;oamb**xOCQI##4N<0f?T^8^P;iG=^12-(dZt{F-_^32_Ao^Yj`=YH$9h&Qp$LbAw zo~rYo*6L5K-0NR)e!BJxJndZDxKxp7KM5#aenj$ zvY*O87{T2R{TfR|&upv2iNo<%@+Nhjlc^tF<^{5be@qRe$PyJjS~H< zlz>-+QYN8|Q zVRwcLHv-Sk?4@hPd9G~ zTeP@Hjb+cnta@K?ZenIfL)hFjBC`?e9@j;Q7R|=K`M|uB-MpQF5t%5^K|FfLC+KV3 zTKkpxRg*8bGdLncTMu%2G9{Xh>U$K=?ZNAWHCo8n^cOfY8w5@2gEiS$wWmGfFZy0d z+X<7EDKyXd8o5m$NZD5+PpYK8_7ymrj;w-YkRiVM9jzl2OSJ7`_27cd5Ro4F427|F znaa%hzKZd>h)DSEXa9)13%wcoW<1}Z+b8&g(JU$y47UU!VV(yZQxN`Tz=tb%W2%f_ zQHEFb92N8FroO66sF^K;P!}(FH4hz-A-fZj09@w*aFt$n@TH4QQ5@fbrXo7bYq>iH zJ$(xDxN*eB;aEPI9|)w=lBJI91L-;y+>io^5}~As=`P$crI#slUPdI-f%Wca5ISF4 z;o{|beSNaPdcHSO-d-QeUYaOxWLt1(eOGpNM%RSgc^l}Ui^~?Es?L=qKUluAUIcMdupzX6RWovaAnEt*@PCYBNywNQa`>D)y z2|dNqmoN9mTp<6X&q4xnL(i)M$et`d)mOzI$QWEJGH82Ad5=*QdhGeN$&Scm*lhD7 zJSGY|n<0JnDT_E!k=3F0EsKL|p?sV~U^!N4w|1`iZT`srxAyaimd|w)=P3^4Hzha3 zVR2{iwnMqcV0x9z#oI{*dOJ*S%h|g9Tf5Rm_Lj80MNPfFwl@qsZu5>XP#?}D>Rj_Y z0P9Yps7~!S0k3}kT`m11oL~^`=RW%-{Izp=iXPsj9_MhCn#NyK^bpTFkREJ46+QIo zyT5Gw-_t|N|2)2EKe{F7WL2TbJH>)ayebqM%2})`4DU2-eD(%!Hc8tv>Ny)129M^v z@f>j{^rAj0bGd1XmuzHYwtcJ$j-mp$?O+tLj4+^>9N?y4!C3+28R$w4H+DsfW%~FN zef;`{KIqcEj1yxm=5eE3&Oc#Y!C7JIvh)fB@9VQ|jP5mq%T`^RGfk@$#6Wu`x0?-v1mSAb`P)r?u|z4biC;>cd&!J z7hCBwt?ZqXfW(aX-XXM7voT^$QXp~pXZWCaMr+;~l8k5cTXSpT8U2@T|6@F(hZd&d z8U5JYns`R{n%gglXY{kBsdz>|G`A+6(Ou@YFP_nuO|df-&*(1Nrszk+Gg@J8O$Dvy z)~n#q@r?e4FVeyBj7DlcEHVdsU1W~-!ur-RF^xXvaFuZEIC|_tHfh)1ry=l@m0~@+ zB&Qd-<_gFOUOc0pfNxPa7CB9$x@6&zfmq)RV2W!I_kGRWAzH+JueKgKF!{G%6W}nAYiZr#R_osz^PV#TDP9yT{55fT2{>41^cMRI ze2>Go{3k(660Xa%@Y^<%F+sqjMMt+4s_n4rCTMY)85rA~pRW5N(Mwp7-$>PD4;;t5 za%*I`8&Sz08RVrjzF!+VJ81vYKEjKqRG18+vv# zM_<}KDAOv^*Gc=W)H*5bf9zwnH3FI8^8LyCthD=V?;aan>-gzqRyG=wT7<}|9l7E7 zRcvFPQEXLqMsQsnu_I{bR$2v6M&x+RL(0$&YlkB}%a9HwF^$h?_g}mde`+_+OLEtGK?mB|T{0^e(yeSe$hOm8(*2SD z^3DEJxAv(R&2F(AyQR?8NX+&w19^3p;WfeGIUU+)(UT^<8Gqc_@3)6-Arz zfj)Ur(z3|Rk7j9(@5qZ_9DS^u$oJ7oM&2b=9Jwpak>kRkB@`pz%J|&cSV4%372|F^$K9XLEg`q~zPB=5xjiyG%?#af&C z{{tAkYaq#jTOi5rFEAwO#=pY9)ayYLAFHeH*_9ty#%l$i(r4TYQF}^-W59{?Fp9J^ zdD~McFr_fmV$0P%@xmW2qShb(2I!-e8GS|7n4+}B+E%8bE>lrv#}%PfVZS82Cbe6h zeuYwf5B@xT?X}cYLi{;*xuXEmEH2U877%23;4aq%kV#%hAPN`cijndngHv#3F4S5~bJQ0dv+o z_Gm7L@r55sp>-aTJ4V=+{VuEuqlCtjWK<)z&6IyPG~`i0z2JCgwb!Da%V~?O&rG#+ zM??9mVGPdHL7^B50OGfrzS%`WqVNnrL1->f$JMzncAFPRCtoPDLUs%$0d5hnw`k`k z_}ff$s8z1@pSKefuSs}2LB`8m!rM$@D__W1aZ5+Bt8ZN=M8kTrW#1c?syE;-!8*zu z&lzwg*3t$+Wgu-g0Nrr|kj4v3TM!hxNr9>CcW$^HB9MHzPJPhxDCtaR&VXE9Idlm@ zl4!sK6iXca2^C=3QWD0%4A{!5mSeN)s)Fq;60`zN9Arvg;g7YdD1@0=_Ov`tPC5#| zhNP%elNTA1dXtPd$(k_R2umnNQ`El`Gf(8D!=7{HCC{A<<@;LmBGseomuMyRLfZ{g z?EL9>S_t>t<^j=UPsnqnIWImY)elalHNKYuByz<69=_9E_~t)*AbdyaMeJCu++1ps zHJHdqn4KpBfm@e?efN}49Rx6onaHE(=zE+q(Vu$Ff>%*VUS6CcT`_d0=c&HISmBmK zKTklzCK4$#ogH!mYdT9?`t|pB4K&@(;b_X#&!%@FrXqE*-K{>2WZySp^MeX92#i^2j<}D#$+=UQD+galU*7458XR* z<^R5azv04q*V6~WI#w^-{*^$Jg39Ae?#1m-ch#mK`m2DJbC^?niKsD+=o#I0NP;QG zV@eu6j{nKNo}DTwS~No6ep6~N`qnai>*+b2pWoK6Z+}J!8p~ftg-G8HRNrb{sI^e7 z&JTRvtc^6(7|cb^>8U=xOJ6c{uk7pN(aG~Z|M&PCey!>2X>AAM?_-R%lP&kra`m-_ zhcogCx33jiK^8N~h6B>Ash&>8LpI@Vzic@FU;6t>16KZnm`!QX+sAK0dXemJMK(=A zme%9ymLfveCs$-hI8T{)m_(e@$qB1ypjo7$ZS7H5BBN!~A~cH$)9ZY7fHVBJ$r-67 zlm3Vo-hkH1!r)mGlQ*c1g4_9Zv)A#EC`|kj-mBw@O|Fj5!4h*iJ-^5BZ+smM@2<7G zh!a>s#%%^R7|_;>PE8vk||yDBv)y$rwF z?SE$Z{`mZuI4i0CNBM&PYTLb3>x_I{s;M4#2H6`~Y)|x<_u=&`-NiVy4^F1SzLRi7 z`uh8S4&RLijQl589|+$iya>h4690CQo)!VpbUh(X#IpPSGx2-tkncA9i1J+BdO&u? zMx2nfDyz}Pl!0FUf>fukQqzxlEIHy~2?5O!mo27q*{Yq5C}|GssKq7(B10y)fXR2D zbQ=|>OC<%AIY&b*M(t{(Xqu8dKNhxVce_ky_=HN zxy?$-gD-al^C%fQ^bvCgIz+0;zxDSn&iWKXCw$A*tV(etTh-WvB3JzbsJNGPO$ZqE zPmHi-u?ZIP=v1zCq&Il!Q%xMW2~EqR&8jWt9{Ut?Bf=#Eu|-$8?+m5KqK~^zlI)g8 z+x37x$pFbR`=EHj>H3m7|4EVJss59e6bG1Y(Wut1R1uw+UaR2(ti!t<`%?N#Dk&o2 zyF2-=PCFaQXgih0ID;NVD>dF<(pm2x(Pad@B5#@Sf^*8{)Q!dYvG%0?0?rhT5HHOO z0Iqy>_QlEJJj!q4$tUA~Ydk04CCBsW{o;RntC8{FG~+q?l-Ktu{nbglI6!~34dS&s z{Ut(j2}l+pr)h&AF_*qrkmd#dQ+)kA6JI}>FBMjZ8?juW;rCvpZP2H)*re|RsUJVl zP*27OR($fM9cGIB`OVN;e9Ba3#YfDQ@hK%vl$)+e8lzr_@@@?TR3hhVM1QHuMam9( zkkxUdzAyTEl17g5Id>-B{X0x z4cul@e+7fXL@g{b1H@T`i2<49y=CkHEb*#&er%*9x)@=_NG?wE43jZstK#K{nY9>p z>wOY1Ba#57M)+RNtfvgkLXQL=cjo)!C8n$y2l8{_A2x=T!c$5mcS#S}zq*j_b&saF z^p`0XE;9@i9Fi5Zf90-{naO{_R=mqiK4_wI`dG=Wrq8;a+p)Y9lTql~Zl}c4TKi*n z=sjZ4|C{vPJje7;$K$D?p(QH)^G)+2TwJ2ln%Nh?BcW{^RXVpn!mQCLD(crDVx4<2 zlSIrX@wFNM|8LN}b)|vs_=BL^#0z)2N(aIvqtxHzd=)IR7%ld4Gwpij2tccbmT~^8 zsiRxg2y~4c4(DF%7XBi$tCw^ENhy8Ezc24=2bEVfL}paQrw zjT?~Tg+23_U*&SR>k4h0`Io<7CKyc{WMbkZroF=IuEK!_!LM@S2wuE)YJ!UD=)@ziB%rW)iw)HSMtm-;&^0!09TR2-%dL56>s=oYk|Wve<=FRSm3JT$08~@Qo7v^v*j<));{?Ll4@)}U z%JN)5LLBfojkM-wfWVQ1I{_4%a1D22ksZ5p%Gcw|QNCV<`3ss?kchy6rDDPI`^~-G z%tfzcYv3PdWDLE1#(lerOQNuxUXyfWkYCouVro?i*JszMgPL&nLGr?YEM~kB5`_sU z$9&`FWMt)&Q>>tlB@Jdz4SHK4@ovctM&3$k;R4eyb|bgL#eBCa=!R$nuY8e6H5B^^ z{A2ZX9)YD0z9Obc8PN)m;4kXM3X(^2)&n|Ea>lJ)nc z!@^>b5c79`!@PafzkI*+UwviOJTJ&aT$C~K5M{iUDr0J@jHmjSF*?1BtV5KsJ5@$r zs*HL4%eXqdjOQ*txGnFb%ILlz*_JW=%b2n;tt~et%cykmaR%0*+g)Vb>K4%6RODY> zz1_9Ot?XDZ_-So8_PoTnTE5Joy53wA5X+@nNg<4}IF%mwd;`ND|2hwELz8ult{>aD zD{^FCRmI7we0Jk@x4awE%8RYuW13ZXcRJ|nW_iv3stfw)zViM!)q6F>uHGJ7CJTGF zR2@T-f-+wC5CEpYyFOW78folLp>vU2BrP@M(nlIA)5|DLmT@plf&MUkmV!y+G!3SC z>E*pT^Gn;fXG*^|%J#E2hogaT0!$5o*Vd=C@6KddY3-BafZN+YFvV$fPE`S^vzRW? zS7@}8sv$Pt=D0U#TZ#ju$QK&11qri0cCt3@_0(o+ zw?BrH;;=_VN#n3LtbW4SF`1FUp(N*gn_cav<)-Jae%;^l2-q^Khw`0F&JTWDQ9}wZ za~Xi#HB(%QqjOVTd$-BZbcr;$CBZeSmlK{OSd?zldLG~3WcBhF99Hl83ual-^;sbz ziZ%WP(U6_4FAN;|l(4pdQy+sJyae&~=W{2hx>pxX4z5*AnA%yTUxeaIlU0lyPaOkQ z$I$v<3`6mDD(PX0Ks_Id2fpBhWvn1eB11g{5hCia3suJogG({YnrkG^(kH_9(v3p8 zt2uQ=0O{M1Dd|aHa7Ai{4R|{`>WT_a7s@Ez_n-ojePF~uDP`jMQlQ{1a^d~T!6mE2 zc`%mY!XttgdJY7EP;6*#k-5{tazLW6n>owP;Xbd?90s1%8&K>IzGYC%u~!@eB&sn> zi2xM((^v|@^b(7lpbmYado;|;}vvB0X`*y zui*R&-hj{XKp3>RqGJk94Oi}soEGYuSU}Bya1rL77v(NsBYT^(D@UwqVgYtVA$zDZ zwo|y7SP>R*JyQyI{$i}GopAj zheJgdSKuSFY&mU5$~2dPJ7xCbA7-0N!Lmo(cji)X=Nk8&xfCqxcHfyx!JWI@cji*C z%yHkDOTnG{-FN0vuq+qWNPW6Q24aRpKVeC^IKY#Rz>3wNj#)gCrf$I50oQFlCUK<5 zVyb4t8CnP#<$Kzq*wAm9#SuAuyKYaBaiG@TjSfxG2J9fJP6A-;195h6jQ!h(sGzcQ zfvfNiuJxhxU>^I`H(6wUi#*P8gbQ7x)Sj1lEZxIaYn`?9;|$o!g2OP{VKS-gUN8u= zu^G-iG^)zp8?Kz+u;66=)-7PU@@MmEFtRYmA3!KI4WaDraA`+vb|=v=v$o)f6Pw?l zqvTCf8B_LzF$^u_2C{QghV%yEK-#cO!`#f0YARC@5uIHf9XmCyg6_k50Ou48?EinD z7>Av`xE-h*VR=JYth^U*t_@|O*g!iw7|SV6dAl$Mjb?Z0Rid-hp+ZTOa@yf|-grni z{bTFKMeqk8IA9R`JA+`>?7w}4b_J6to{>VaB}nfUq|e=#MDaa5R)ORA4-9?VHM!}i zrZvJfl)c3{S%unPSe1(l7M#f6+y#TpQB=;)coEk6zox*&b2z&TJpXVL&5A89sAl&d zpZQI~$PlMRsbr2#gz7rasEVB>^mLvf^bF{a9!*ctKC7`b;`8ufe1ifI-t3kX;Z>9~ zBfV^mT~q-v#}=_DUQ`iVR0a)!q)icP?Bd)dXC}Lzt}QY=Q~}W7leVBZ!l7rE@X0R?WE4v~V zj?CEJv~v^$RA*W78%BmJyA}**Ve9;(US)GqgpMdeKsZbs=7A0+4sm|3ml%5)#ZqiB zeZ-ukdFC_^qN&1OlBcKfQxB+<(}0pQHGDO+!vU&rHikNt+lff9UwXrDQxD?^Ai50% zdz>Ocuyk>*S`p#9A3zEW9vTi3=-ffbK8b9L@N);M}xVcGEtDtCwaq&WK6iLPT5dcG3H9>gaq$&K;|ycWv0v(a$a3TFdi_Kg z{dMo-pD5E#B zR%tqT#G%?!N?Q_zM?%s@3H_`9NE8m@8FZIH(7Ki|JTf$x=-s%(y)plK33fqTrYbF* z8rgz0qX?qQ5u}sI4k6L_M~qL1l~NMCO(xEx5D1j*Kw&<$AJ;(SHaerT8yGt*)6 z>ISBl#;3#N!3r~6M>^J5?`sz$UcJhSwTjtxw%*6zOtEL20c1J`6%~PUaBjvJ2XwC) ztVN_eTbwII=7(;;x&0d8jaZ@}ATvNs zvDQ5R!7WtRy5GHpqFP0D2S7X0g;x0mZB`1}F)p+PZL;0yA*^U38DFH6vog+EkkR-r zDCj!1p>;264rJB%8nQZ^rI)eN8w#zfNTK4#b&21j_g2(Im{Qbr_c`9WO3BO#$PCrs z7PC76$!T)nRpT-PG8SevK3&Ik(Qzqs0ufx>M+7&_gb02~Ca3E=E)m2{(TGch#K8`T z*v;@oif?h<@#2rDZa2d#{WCP{jn}4q1nlX#TyuKd6zSZqMke20@)E?)gJ{ATfa@l=FC_Z7g z=?hoI6q|q^p2)Y(3fL?Gl?)0=XpSK(+ne-((*to@wp~G&34NFuBux>RG)HLqSo-ux zXuBB@B6;L2;KBKazFPXU63q*|0cx6@mq~34s3h94l81o*_bdQh`$G1R)8yyA5Y(82v78@1S1mUzDIRbqwqf?DEqmE=*a{*xv)<=#>?cdqCV{aJcZ~2@o?AYmLW3Tj0 zMQbgRoVliPyF}V@!#BeJvnlv7ZJx%76_z zY|w7`Fs6eCFJYJ*Ef9PBn)l|*MUu5Qr?<%i!}a+>sMB;(Q^t9}rVI+;`>*o-VfwyA z-~W>Dd+{hEOj|a5TY8X;9xVz2zt0RTpC8~CRMlmKGBUEB}^23z%{GXW2S2r!nXZ^ zOAwvUC8i*N-B1T+e6nRe$jEV%auq{V$E(LjbU44Crf!|Y@~}NJo0xv$E8)`h&d?|6 zQyRGc(D1MRdOG|aK-vfYljdoQQ<)C`PcEiB@DVU$JqaJ9P5#3QJnQKM=fS(LjHKwj zuGW52f%>rR2vo`3A5*SHjL#!=)FO7%=BULKR*w@@(G5C>vGMJYol_iwS_vA$$HtS# zK9`-l$~XKiX8yMq@2_tCJ3*($USHyGl^1wRdn6~An8Z1?=*SXn5%6xe1g%idNM8=qShJ3lWJr8Nb;!$olxfl{4s8A?%1 zx+MJ~x{U&al_C@D2>YmdfB=j3y2$9+#|-Xit!5}|oo22r-*71k?~2RHLiT*@)Ln-} zm!eQIAXbZCfqi_MDx~YTj89b9zJY?@4H2~cYBefg;)h9rG>@S*(H&anIhzq4!a8^! z3-mwRpIbJ>7K8&c$#)@gu-G(~jqD5fuD{T=Y%~!&hqI3D&Ef-s!&67&&!r&=C?!!i z*%0C`=elIs@KlzteI;K@$*wOFDu2t1;y^3XYs*Zl4f-tUS;g8>H>t^$@syljkJN~$ zIcP-x{A~4nazsC7hHY|0|I8c2Cvi=VXpMu*dX`?aNGrk7N>l zdf;G>~b;ahB3z>Q}IqmBvCv4D81| z8Br-U;(kGHysd!~_Cq{Mn9nty?5(=Q@EwTWsZ-4;_I6Y~33>AP0x z`dZwkJ3h}ATMKc%mH7vT!+o1xa6lF|x zn+ViYEagTZ954O@_h@;xiq0#XM}UUYk0^}=MV=#GiPIfQv~c_kCseto@mP3g@9DUN zB7wXRs>E|}LU*XL+rLagXzv4X3NhE;J}Ra=A(s=PbHk-5UHH?njho=UPy>2-zjms7CyJtITb-=tMhRR`G)>o`&y|$ zD4%r*Qe~vsPlZan< zln{o;tX28Ei?FT!Wk2RCB94bT-XTIO=qI`~U>X6yIc|%(!EZ(XhCGrtfOgczSg-F1 z+EZ8~k3s8POWUVo;KVUuvD!RMa2ULXDt9%WYbfG-6cvhJlqWuwCNCi_{?m$o1K;*s zJRxJ}zlRYOqEkWa{5^cu_KR2yJJvsm?dIhV#;oKA-0mT3(m!a=c*^PM8&GsSW|G7@2%399Bp&xU z#;{3+XL9RH*>`%fMZh@Fn}s0K*iL^-k0?{p;Z~PKEko9Ktu#-80QSFTcF|O-mGX;* zB%%EpFXMfwj05{+e1`P$y@c)O2v6ofu-U0YQ+kiae|CohW6iVTXTEc{iHpb~0t8Bs zuQ}CJw{BV_5qN(s{BItZbslCFY+j*U@V7Kl@LDY*oQEfiU%-VJztUH@D~oPtuQ|np zqI61Gi;hGJ$Ii223k%@S(9?^~zusg%2**~NMU4WIvN{{V9wK9823w}CJwb&LW2Oc4 zl7-!!Zf5*{C)K$bj*yTMvPXxWOS=5mj$N4RZ}|e4#bb-40g?=!;Vu#O)dFs;sUVcl zNMv4&jkNG0My%(@>PxA|hQ}_Th%P=7nH*TeQ(h5PV&b8T;H>%H(TG>bpJf}eFMR}GxE7tM*ZA8zyyC81x(=?m>n~g}H)~d8)8wjGD`RuakB*Vy zu0*k6KZ5wW%L^i`lxfgoHG?xwZ|}jb1PZTS8H)VBWW*Fg;r-s@>lju}tihkw1e6guo23?eSrh6Q%aN0tD~@VBp(^&I zam4}`mn-7@dEYB$m0PocbN1U3-SeBY9}Z69(~8`7`_CGZ@nmK%Y}jlM1@?C4 zG@f(%tLe?zeg0r{7N}a{yD3wG_LY+)Xy0F~j!u`gFURr>g8EcyqEE@|j76y3x;B%I zbLWd&zaZ{xw^t9j{-j*LSFSn#B$u~>*Gfl=35#_}zqoPJi{S7@dxN>m{2%yR6kYA!o5C2#ML6x zV+K+VYY9m|WTCrNC)kQ0;kr>_(C4q-WfLq!c{IgcPmaW9!iU(INtD#&?F|}~)Qs52 zTZ59C0@8$%nvs0fI@*-$8>?OTc>d|SFm)vgizI;9sY7za1~^#a6$*jqirJ_aK}f{r zWGOekak%&l0vT($EG!{<_XRrEt}>6=I!cr1o4!x)B%*u8xbLr|Hh)!@=$HwJbJ7#W z`^4z~TGbU^I)do9-|$U_P73qIwfP;-zg-stYXs537|C1c4h{9+f&HzZV2A)_BK8Py zN7w3a#;rc*GtR_Im3>@iv#1GDCwu>#1Du`YG(pC+@@71LZ-VAxHZviQsk1k@oeDkN zVbSqdP>f`+9}|H{6HDPF5{DT4b~`h6_Vu~?n~{S)MBt5NNPA%D!#Lo%Fo)ka;PzYqI$=j0GNDXeDH zyV9OZkxuY&HqLZBIHH7aH`%a%EW;N{@nD&GQ`Q3vs5&zTUoM_EWg=Md^S$%@ILRk* z-#DdcE_0qFg>iv?KNPzR+pYN{L$Sq0;3su9-)UFU1XGF?_lkRT@2 zX-4IbNK)^=KL5j~@`wU-D~dGU;9()y`7EP0Hh&e`|3{$3Ur@UUa0c4{M|4xi%_`kA zXd7TgC^q3f&0>bpmN*$k<+Z}>oPyfre?&y{7f_vlSWEGKiuM;!n}65}bCwY>_^xwd zz-ZNlK@?iz-1n?}zE+q|tmrev1%RauA}_^mG2dJ9bAlGzk@ic@e~ZUrb26@;6L)j# zqaV+V4B+pnjf0GnMrBu{qu0lZzs9G1;a@%2z_P)f{F zSAUR~jW)%AmJj_R+!zsHMe#0m%uRGSNdo+hX9W2DCS8k7y_bLV9K*mb z%|-Rm{YTdOC+!HvC#)mGmip)>H!bL&w57gsN8>{wV||Q+%F~Z$E*}t%w-|T?N7z59 zD-`{BDD}VES3g0$Cm&S5sartQSFwQmpVz-Rz5XZh?Kr6RT+{lC)buIg_%TmU$jhiL z-8DgNrM?2{+i~dn#;d)X)7!hbo&(CBaNCQuHSKNThyrH5ELuJhLB;E;^PV=n=hBgL znx1-nuAz~PK32Hn242>At}B~pKp>lpL=RXLvKN;~7kK#wS39%wYYfc@B<^&#rSUHo z_OTfwr6f?uviKviGhvLjQA&>`XA_1& zjEyCPX%Qcz6pQU|r|?0oVZ}Tchjki}oN`^gycxnm3SMajtrv{c-|{FxBCknHi{x3o zT=YAG>7gi5IDHH&O9jI`Q54+Z979vQ@3d$`wKN{N_EhEC&?9w-=7u_JoGMy4heL3n zcg}e>-)4T5;}?u`{U-v$HbxUJt{y%GTwMk5&ZrwSL{!xi1QJSC^?+G&N`&56YO2~O zbjg1gI|S8&k)8SI$0tz7elZu7rp7bf+}x~@rCWoCBt~XnEU-L<6{ zlKNY1VU7*iBNbW9Mml7p+s>_JF@Iv%UQaXzqxsyNC|q;ABwqOs-rbI*#ZZ`#Koi2Y5r-^z3{mbrI5J~C!&*12w+WU6!*|FFF?Y;O*i?g;Jp zBozH5%fF%rYO#me_yTgS$NE!(8DGx`awtf2|55XO?0w{HsE>X!Gs2Ab$*KOn)t3yw@@oJl9G}jN4AkY+#tZZ^GQz()$W*$2W~7F{r$(xkkF+e4#`L=X z)W#zyAeh5f`_GFUq084I1*SL>8VA>p4dyhW;#|LhY^pJPNkEz0iun9=^R*I6Och@N z+yO)>j*Lpq;)j_TI$yIm>5UGHj*f%-J9#u*5xci3n+9pQVnO=;Wh;8q$bpu9W5I&# zuzkA43~v72YE|y@FZ+>L6;t9uw!3bCKJa38Z6R=!93b-uBdgI@3Qj}y1bplsh%m7r zXqmXo6ve@Uo1D7|$i(WrQDfJNu?PQWwrnY{8&lztPUpRDKlUqZExQWzuHq%8!tvc| z`Nw~Zk}4DWTbSGXb7vIgZgm#?J~_jf^^mJSOxrIk3t~ZP@2)bcMD2(-Q#8j^t}Wu~ zG&AzVM`I0apcTdD<#5MhwE^W5GM}Amcesn9Tzk@pv6FHap3DM1vvEL_P-Y8{?dUmn zZ%1Z?cO&&~sNE^uUE|2;tpynihHE*p>2GW*7s+#QJ+-0Qva^$mtVwx$CzwUnq!CIy z7NuWyVp4%UA;%~Hxu=6r(?u8OX82o8U+59jYJbZ^6vk#bi-E-jK>&(OUR$QtJn?my zp44NeuLJC_(~z!!k-On~4N72{#fFUBdD)aX9DL?F6NojG+LSq*GEkkfL#?gn0nPxL zz*@#Tu7O@^kW?^_FUUdVA)0_pOgwLsN{2$yqaWh4a@SSf?3Dz zVC(1xJCxJ9sp;F*sMR^oX2})7u~y!a%f_M(XBDWv(2T&@%C(!d1zKEHIhgwny)EV- zl`LF{a|pZ3=Xv4y_#8ec4*6@lt1EN$9hL=JOY(uuvA+;C5e9@@oVt9ID^siNb^&;J z3P7{o(q=}HHRqBXLa@w1ZTLwQW)yZPue$Wb(k)h3PBGtb3T!a3zFJ5AOj#eEv%S0> zmGKmS%PY3#Y-e}6BRppcChvPP!=+m|ePSo)Q*5?Mw_9^K5{g~i+R`oI?C#KMj2*o1mZN_wk@ju#R_&lsl$({_BbVIDc1eeaL@HJJtAC$YWU4z{%~E>-{Tq zXkKXSpvJ+qV-W#b=9)5=6wf94sMc#b@7IaVJ0bMH1LdstQlzLlHzl!;|-ZQ*td>{xysp~z54EdKS@T*Z{Dtb!i4T z8DCZT%93#?RSUxLd6@`w{@eb}8{Bqs7x;}$#1~(mYiEvnOHK`EsLGr%KbN9VLV(Y2Ses9j*j;@pqx5W&f+cUr=H{7Y=meInE__GTkwwm}-YO^I`^u11JYFs@O4qIA@%@bR%OBB9%?2rt+ z{4&ti;pYcMVE3Y9jnG>L{r7c1UdNgvktofakL27j^@{}WFI4fAPTQnLhJDgHttZ&d z1VJStlozU7G`lUoQZmGC`nwfQ-(cA-CS;o+FdspcEyb%$S(M=HM?y+=py97ZA9wMZ zIuub(ucGjk?oJ*YV^*c4o7OnD+|5fssY3xKXn;#qQ2Kr3;lw!6ub^}yAWn4vkEP|4 zrDXSfrexLTYV1v*m6zFsYdqe}XQ>Hy`x#8QCjhq7N!!fMR`1CiytGJH@J%c3sMp2HeQf1IB!SKk1Cy1|k9qo7X} zTx94DWr@NapDuDx9bKoV>L9if@>gB54u&_x&H9^S5#eD!u>118p1b_g#IYxGfDpJu zM^qS$N-vD2Hh_-TxI+%~&Y7{zX_g(pCk#|bS41)C<)*NZHeMuDVIykqQYZj(`pnW# z%;`tk6+%}U>z5|ZY(#;d>z#u%5>@qUr^y@q;e0@fu{%s_PwAZ$zb$nX zQsflx^rK_Qd<5-J6fWw^92sQ}Ku%2&_kFh}#qGhJG+rq}K_lbK z>Z?erROdqs?9G_&>E=;*owpVRGk4b%H_0`D8vkzgFs#_gw6=8WETEHB~y#gr?7*BmC~oR);NC&4sm92~#SnRUu`ADNUW?-?HKw?YR{Lx zx6*W=@1jW_@wFH`Ekl1j;vQH}xdF0M^!M}_=+B%0^xI354CGxyf7s0U z@vgcXf~%pwB^s@@{phchjD7T{4Yq^mFC+Ir`g4O4xeyZm7y26oc+L?()-#?WB&oq1 zyqb*udxUVzDkg+WddEMCk}v5K`b)dd50FE#(5ISEnf$#4d5g}wu(S(Gn`@o}5~+x# z?i7`FCFJZ=WeBRNPTkPt5w5A)BivdFg`lb-sGD8F<(BWLv(^yq42gA3Mo8105gU=& z*%ub!P_9d4U7Y_yQHIFQ5|PomEzXI{L}X7DH`ANS-sHABez!?6w6%3WKT><1$1pzh zklGht2J;UN6U^zPwu}^!+6tFSptjG8CF9>;yq%iyM-n+1b0-svTzpS`j(oxXsFWZy z*Y|E!iqS!vdn9OlRUi;=K(GA>eQmw?nuF9NB@_zSAp&!gK6-;^jp z2X1pd$DF`Lv6&tWiruy(QT%p63dJ+T4=?+QOQD)XL2z9%P?3jb!w>t>=lSDPC}xuU zuuq0c;djl6;P+DD_czm%_?1E;8K5)O?f+-`JeEdy^m(~ZdA>_jIED>SJ+@*{e&SE% znVmv6o#xSS%_tAuJCpg2ZmaXqJ;O!!p09%L-V6KD@Jv|Azo+5bC@-CcKPhr)c%G!( zgJ}35d22D1x|nSHrcih_^}9xs?s|nZ`l(}&v8;3+6*%o_GMDjRM1`8^x-eQL+=34a zH^!c!xG|dgux6AP^_iW9~>KKwj+R;Cqa_>;z$XS@rM-N3Oogw6OwzHgPC3r{AT zmo_jKATXR%6CVZJyrICjIQ_wyatr4>5pXQ0w5Lz5qHxhY2#1Kj{UOn52fvPj#ku=D>PDKr-~mj zt0;87QO;|?$O;^mP?Xxaj1Q4*_8UKv;$!TOQ`&g>uNYp%u=i!2*Ef(%bZpoa`)@GI z6whi%jj7@j7*mJSUgy$pB*)aV!fA4?+n4p@VzL^Wxl>bFWonN(t_}&ZWm5gR)+TWM zfB{>P>M=20T;vWT6Qqb0so^P_@*g9&ZRMagD6%s+)ys<6kE_6#Es3==+Rk5!tHB^} zVlGjhoK6B9XyX0NWwK2%4BsWgMByCD@-!d}Mvu?``4~_u?oI@+gZX=Mf2|IrZ5@|F z+KgKHp(T+9M!nM?k`rbh!~gH&!N_UA=p1uF5_wTKzd*y=7vo-%|?E|hHw0h%!#GmIip*ugT!>md>mJTy(5G0L4LRtgtP6J|S-t}^nAaZS(^TqX)e z-7zmk!4)YAzVa(zcA7AI-;^X~ks%I!=1jlLio0=d=YJFD$idItrc7N6t^zqabuS1o^al4z^Yq7Q5w!v5FI1A>N$ zvQH4^eb}XD`6)ImHze&+^du^xV4ca5N7g1+ zh+6I1#}##!6%#|xJMjQHiggZyb^3Njc1fZzn=dG|dV4rNtKEw8Nqlu^`Qt134ADdjaEY^tiPZm%MQhr0<_`&8A~=^LwJ zwa>5@SWVH@8+p-PRav`{`)5ohdz;>EL}=o83eh@#!CI8q9MopW3)XX`NnqQ5IxHFU z#OB>5UrjP!d^#8sDZ+Tp@)GR|dj^YHk3fq^wW~M&VBgJKtKvD?Hozca3oRsdqk}Js z=M7Ar2@xK`Hs9?U3|urAK;s@!^p!`7qMs_hPifJO^Emm%uFXYsU5j*!*-+aKWC`b* zd(xP9lHT3@N-qrCct|o>WZAd518%9g*teOgVoTi`$FmuXEkN}DqwdY)tE#R(?t~-| zhIpeAMXM5P)Yv#gK@A4md*NO=SMDvTN~Lw;DK@2bXw*beL4r4t+#asd)~PR zwXJjlq*@a|!k`Rl6|B{uR!@urP7Ds*_xs!DoI3>YY2WAb{_*npgnQ0Ddt7_%wbovH zt+kg|!@8@j`n7D_36^gx1H?EFaBjz6FtB)j7oG8t5Ayl_uV;|0)@c6dr;Ijo{$WjW zW#VJJ3GSz4%xzCcRrU|fkfHaGjwn<^%cH8LMCgAwXQy4n$k=ElO)g9%tok3Sr<`oA zFv^V2`A#FM+HFLIbWE7j$Z)q2&T@4DIp5p6SYFG$zFD>lpn3xtwU;v(9zFiZa7(tH zo+0&s7Yp2NEF16$$wP<$Djm!&O&9!_s9_dUw;LJS6P}C~?8M134N}ZKPZ{v963+#t z{~YLUSjw7oGn;sqTGkr}!4`xLLpH7Y`5Ir#U{}GT-0rc}lBq%?RqJ8otyC*DrgyS4 z;i5Cty}SZ@~REJRPK^&Dv^70$?NF-p*G0m#1~9lKhz{;098%7XA0lySod zo4;oQmJ_xV)h%+?zbx0z0}gr0!n~)yhwzq4J|Dt&6Szp z!VS*3R}Ta8n`>|Y>$sIqq59nclxtAcZ`=r1u}#879&>{r#4 zVQ=b{(V+x0EfZ2N({e+$Mk`FN5);ogk>VURcB{nEFn;md2nRn38iyK7by8Er=^<&uSfp)z7W z#mz_XXHa;(GZ!A7og*t<(MVMu_rxs3y|v9d&duoI{KyF|UC7^m28{z2dJX6)Mghk2 z(nIgzRP8C!s#T~OxvZ9Ns2}*>!}r$<4ZcT=;F*cvxcU&rp4QlRpCtobT^uuvvoQ;C zIbHEb${2hX1)i==944O%>gpbaBjGZ}XBh1!sP#g%%DdZYkRe|~afV#kia$nyBCzOG zQl9Z1bkF5GX_W(O_sF7=uOw8M5m!#l5RKI3Rcqv?W|}Yg%PGE?aT4bmB9Z>eXJu^T z=p|$}uNtyJV*R%--{$YlJ#!|LF# z*jON^JQTCzzY=2$7p)E_FS-jszvN9Jl?6 z%3Dh7KMCCSYo4@1W#JG?1q9ZLfkQ_&J`J!n2g_-M#!O`@D1`6A!O32`2B#v^vZ?b=0tHp>zp?RwHzr> zY1@?n))MC*FGx^lOgi!T1oWzNvj~r9Fd4ZGA9rC%EmGKgL^-C#Z0a~R;y^z#-&ylk z5#zy4`c_fUz6L;^NDm6!Ci)yr+!P!VP2E&YApTYj8u!J(;>gV%k(*7Oraeh00n=XU zJBkQQ3mS0@DNmg>T`0!dg$bYwRx*B<=@Oetw*$^AMc^OO3Gt5Rx{hdHVbbSojuFv$ zylu1kVsDJwBdWkZsM`ve}rHp-r6YzW`=3X(5p=nmaLM-|}Z#2`3K<)vs*%BA|{YeBzE^QldqnV&= z?1gJX^SU0Nk;bEb7Y-qqsU9Af|0NhK7Hez|?s(@j#KX2hGe-_$skPl4<2P2cJe2Om zqIFzRp#4O$nnllyRC+3L(LF85bj3TL;v-}g&%^6DT~I%z6uGN|NnNpOVj-D0^>TMH zUR^pa3t<{?=0YAc(&@?IW1nMKtBIQtDoyMx4a|Rt7kk0Y9l+WuTASW#>RBfH@y7P@K1|tJm;lG5XbeZ; z6tXMWci+GdOIxW(U+Y)*-Mq)guI$He@9|L%7hXwgkB`CZ#~MBY17>SQ&cqm%0n*$X z2V}(Jdnn`F)xoC5|F|~$ahg8fko)m>*^knuD4NOabKW6WEU>YvNy+eih?l? zxb(XU<0^hw*KSr%^y%L@EFEqjvR+HzW6p_U718R>^&-fn3LWn58z!BCdMx_Nz( zd)Xwm-GZ!}*flKB_Vf@4gcvTOX&qz3FdB`PF@jLkR9JiY5S>yqz0cDWcn7|Bt)xqP z=5i@xiiV$qh(-glh0~9srFB+eLCd**;o3}L4vwf_y@$GrH0bkg|(U$nXEq&6G`jJiTrrqetDi8;JbY8Fi_Lu`UOdX}Pa zf#%ftX*dy2vqS^TQKvY3dW~_UogJ2V)YKRb%w?ca+f_W!m@3Qd#8f(w-Ly>4Zd%Ni zW18D8GPb{GX--XXRf4mOXoNh@t|g-qob9%%Mw`Undb*&Ucz?Lqsog=7V?Rx>;Zt5C zj2rMhU7(#?F7=6errp?xXv%0y5H>ivOSxT+dYdlWKujnu$O-e!4&Q5t;^9EJ3v0C1 zYEz%pX03BDZL+r1tGdNqBnR3S@_)p>)3oX`X3TzTmZ!VWyo7dO^=2Vmv5M^5tHibK zwV|5T>2AAw$7W*NFiRJP>R-S{_!+U}>1Y6XUar}@)R1HkVEWiZEzO}+l<8W_V+L+a znVBxBQoZS7G0Dcct;%$f-;L@jzrEh?Ht$zdS?Mys6(B14_2ydmT&n{;I^IckE49E> z=X`K|F*AR!fr{e27$D*)q2C;T%hpGY6CmCQkE?RKt;#*a-za&g-t{~UEV!R_Il#3?a_Yj)%9qv?I`VKuJMCYh6L} zxJcV;EngJ87tl!5{tWXTLO!kvwrziegn~f0JFs|LKjA|kH91Ic2@sdCe;5UuOZ0k% zL2=xGg|Zo&+d!n-UjkWN_on)8rn+y8*!4cZ?HU9*wFGwn;_dIKBme(|_?aL15dUS7 z3-KyJoO=l}5bsB2kOl6+0`5@q1lo>Z0swHJt-?$V$~E^UXZDAXax{Dp=~LfFYLp?u zsnS@8^1BY56~ka!lX-O}pAOTC8by%DwF*^cMc)>_63)h-xMl*9{#E3re^@N}kRD?8 zyF6(*WEOrcJrPD49Ot*edp9`Edz1Gr9M<#;bMqIzt2oSA{HoH~-csyRN)<9IANuJ2 zE-(wsx%4`bhc<(RR?^hckG$ehUYR;-J!8kerX zlETJuMJ-2SsL54cxcDUAEo^ynz<6g-!CuEJA%?d16k?W1zS5YdMh1-ec2cYK;V{CvNm@+oV(DXw~`Z=|CjPf1Y;9)wkzJ!>y+7lE%u6 z>V1}pbqio#(1AY)*-I8(tiGo*x^*x>zf_BIG1Sk+r^S#390B9W6SFEQH1`+11TiJ}2#lZ+*#LnGM#kqhg#`-w>VYwTLbO=@a6oI8857jOw!^V>qAg~C;9-1)PbkBOjex}ys{DFuk} z)ZM`%*5aK;3^}&WQ!Hob6~#zmX$Y2W&L0#afG(=HNEY)us48aPrbQpeS9NxrXnqRi zmw5a^k_zUW8b#Q_zG~JvMqGWRI9jwZj7D~>>3?|KIa17>Hg3@DQ>?_B96|LJUjJtQ zF;Nt^TxNBI$r%~fP|P9e{-JRNb4TD_5+(Xxw0_O(H_lEK50y{cE-QIgCyo6L@p3Nv zk~;pz<6Z&_kPwDDs6=D5B)2ISy;Y|fjyfH!QQTY1n^y-C67BF_ji!p%K=CDg7*`{^ z*0PDi7c4bn&R~TYLe*Qj0&_!FU_LK~UIzcE%VoOwL{?%Bhf;A@WVU>f8Rwmh$Q+Z% z95kosK}2R(7bJlKbsF0Z=fWQC^4F_`V5n2)T2>+K2}67arp1USp{lN5JvR_&S0MLL z{d>r08k>EafP&Ys$-p^vpxq`xUS+r4Y`$>ZfrS`b3ytoqeh^g`Az|q4TWzjp1Aj~r z_weqrrEd_O>CR`oZ?V?X&5mpIT#%RLM9unc@M0mTq@D>XW>ZG*FIf*~@V^|5%w zsy7XlQIr^_K+-bwW!$GjWxLo)cQekY89NP~*X7WUjiO0*$uWmzHqc$9aCC=ju!M0% z8l(@kY?!qqv|%nFgUkgCJt<|3nLTmsAV&R;M%{RlPfzBRssZjT{NzPZy}39Zax|kb zl>WAO5M%JR#p1o%!s(Z7aUDMd7YvfE;}ac^7vr$QS4NR9WMXe11Zc@(CT zGA@Prfcn zbj3tI#ccL7*&S&xyCV&wBlj+zwbx^2`6d#p>F8s)VEp>~O zmqu&Zf$VMF4c7K<`3$UE+*UCDgh7C5L2D(yTxFHs+SoI=2&@RiQjKt56P-o13NwQl zrR6vW#P*ezq2!3FzEZWUqAG`>ao2-|OBa&%$fL{?61ncV>U&iK^XlJc-T!<;*wa{Q zm%2JpQ@suYD9K@oSAF9qr!mo6i2n@rYTf2F-t83gQQ!KnUIwcHCNkIdSYcmMX9_y>F#wAF8EYxBt_k%8SWA&dBG7_Sm`*^{8=U$0 zO`7ws2A%(&mtGwzlI=P~QFM>BVs-b+%xVRR*cjbw`rrC^7Y!8z+VEjv7jl;oe zXeWHtMOSFYW>gnl>UPk+z3Nl)NOiF$S9KB2QC-xA>NlafF#h)5p@OUHk?NuW&@$e$ zS<%u`cmswj<)|($%(_A>E3UX_uK0IYN6A{MS9PIN>qR&ZdWj(>zZzn z0#)mrMvU_?C!b9V(N9Rbh<@T5%E0n>qu=@ZiMvS+EI5<@nHe?Z>L-rp3H`*q=Idzp z>jUmr^b`B>GY=Dmi`t zp{k@>_CnWFVQH3Bv11#;hs<`3?yOE%y!Nc9*T31Kc3*1o;DIgveyeFwyDyiNEvV|! zTp}HP;R7QbZ4|KmP#xN8y?PwGtj2|>9W$ulD*o=|ZzF&2@HgbxK?V2K0B3W8vF|gD z6UcW3f2Z>o;_qwxUBX{x^QQ9*HZ|sGpfXnsYgOmC|Mq~47}i8eOjkD;M`j~yX&q-| zt(7~67c83s7jr&7+LyIHMT*h=2neITH33_o+476(D=e z?$zp^p5-bM_k13;plZAdQFs);5dEOBJ=`T1CwN&ny}sx@>F#jR8vFbaX=25!vWGm=8=KJ^Cv3osm9hHXK>KSFa%OCZ&6ryptJxKu(G#Q2 z_Pa@q&v-9hvn@JfmEvf$H;`h@c;Bjd&zixNt)z@p3f_^B?gn+p#f}NhHpS7fIAWd`3<0@}50uMij^e(jUDpnb{;*F-_lT5{?O~ESn z(?~v{lu@#Wwl(av{%mRs*SP4^7`U?QgTU)uXb=s7zTHH~wBW~DNzI3nj%R?Enpn=R z+kKY!sRBp=m1J}ZekSa2zfotK8AndV|8a#6|1__{RsWZ|X5`it3S7Cj$^vGji8eF- zQuhj%?C_3M4T+v|AMJpmzl>|mE=rd;Oz*o%z71F)Oc5xA(thB7Hu;_p$w}sp2s=u| zylyMulyZUP=xYy^kX*@pN*2#Jid?#d0ME|gZ7>n$ui8}SiZQxHBW!nZg>9hya+cF0 zARf-f+#9)`;8EPiD1Ax$JQCGmO^|x^HT6o<>R(-v?bRWB=~ZC<^W=4Id3UImD^%8N z8zDuN=$}W}X(h+t^`|dDpygiPr}FB6>AL-1j8UjBQ7)wD-d$u?&8k5AR#vOx@&p)& zu}yVh%RR_QPY{gMhqPqPST3gL z?O@X4$(B4oT(WOK)a(Ti*B&G*(aPMZS@XQ6=CXJ4L8D`Kfo88@xrN!}V#XEp3^=IM zSZ=xx&}9!wK8X5?*_ojEXddTxHCnS#*Nw=u+QXy@b(Jt8JEJvSJ| z4mXicV$Rq%hawf;YfkSJJ&^!BxmHb=tnjG4(dWujH>DVxFCD^KNmWc*Rc08>Os8zH z5gy!Q)>gV8zOYcg5E;EY54lA~nj&7>^CdZ@?pAI}J*Zzw>D_5@OTF}j8uHS9o?A+s zN~OBIQhIm3;+A^QD`nCyqm+MNWShA*P{lTT#a63WsaxzSub4?2o7-5ga#L!XS4!{B z(?ntg7WG~!leW7!XJAIMq*SRu&LH&e-0PMq@k*Jr)s%8R;38#D{`2EaW@FS;XXLuK z-a*Rx6)Va4;$fa#*H5G_mLrYeg(3|J=KraR{!KCuXMrsTLv#?b&S@G88uu7qmpbV? zYpujU_XFQve>&3U$)%Y?0}QRV8TSY&_KvY^G>MrWeN#Qs?PSivFZhz?G=`i=r{@gF zdXlpKSy{iG$*Q}kd`I0}v2-}yHex9PhJ<3@{?}U5?ak_Pk9ye^oo5 z9co0;b6E^o3*bc*a-~sWh@uFkjDhny=WhmzIZLmdtU~=dD}h>4VPSbj-?v)35EB`K za}d+Ch$lL-n3~qjZO+X!ZHhXhX3z;kUHd0kDXISb%JhtoTP=2EnH{q@bhYc1@U=0* z8`^W8Cmc~uI6^qrY=dw{~EK8!2Vd{2-|B3xXNfowvKozuH7K3B!^ud0xMR+{J&|n%gD6-l)@C~rBXQDwHJkfUO-plg%kih=jjg_23qhZCKGQD*y%D;ImNJihno2FQBG;{UlmR^3*ux&?$2 z%H2M%94Tg{w9GYA0rq^g8ME~)w&ZD&O`*4djIMVihN zTVLngF+yKmFJ2d`ZBDo9H;S;-&K}Orv3qt&1mf@cK1Iw;4N@dLPCCW~j%NWT49G;G~fqOD0DI>!}4|)SEIF%y;CpOYYwy(g`BV%3* zCy$M#zRR)Lt=Br=4J61KeC6k4N7BI!%sO zlUoi1@!WDc&_ZCmjY2Ng$)Q%Cun!oSktdt@j6U*b1|!uQu<=6mW=V%=8)FXoG@nQ@ zy7SL6Q7_rsyZRXI`9Ufmdj*i}NYYxnnk&^!z~fkLh(c?@Wt@hS6=Ho#YqHm61pY2SI3 z8n0H3RRe4M$2uQpw;H_6@g(fxlP``yFSEEg);pG}Hj6csKk>6#3Ke;&c<=Z5%7yR0$x{-L_FszW3?bID)9l|!|4QRopXT< zOyEM|nV2->IXUJ!yC8CAn;;0JB+V<_CZ-VZb!0tOTyyr*(OJ0uRk#Q7Ur!fCxpVBz z$c3*kBR6P9eiZQMSR?`TjIQ-&1mD~&MB=XTbpH?@1T0o8D7D@xS|P%{p2dX$1)#2U zww{=S8LDvSnhfQp2>MOo^=1FiMix)Lz55+z@OxA)emBViZ#q;w{z(?eO{{|)ww|Op zkH7|1jTafhSDSdaI)hw3fxX*czXzEKTY=0E1esa;8P2Y4g@^tpQphE+FBxR@Xp)UDn~b_W<2^PX`$Ff}L~UU^msVAV?3FG}1XrsfspCh=a7HLC zWRbqQ*66E?wuQJ$81y6l1XKVM0{*u=g|#rqvUgZgY*Qx~+MMx~+7mp&v@K zWiYK1SJ_B3W~-D%Ub8;xoJFUZ)=wh%@qwZGkA%DLg1b9}yE|qB{hW>6Z+m>?yhIJx zqA2p6^t|;}>D1oO4&N;#4=l*38 zlS}{yZF8@KR{Yv(=PoKVP7ZAEa)XyjKcMeJYWMveL>+j3ncxi-^RL(i_KQ955KwQPfgAx2=ir0!xq(LCoB5HDvZglo_3`ts+pgetg)y3HAL zrU{%WLgVXyWTB~OD#r_j6qokh7l(V?b=hu#dh{?)Sj^Gpm>8Bu9i6Ute2LT)MJSeA z%Dv#B8Lf}bMFZfGDQZGanMf4=ecYmhv@cR#nXb5L<*))1TSeE@S;^trc&^obyT!y$ zk1bD5EEW?&o0KCtySp%r2YkHT#Q!vxuR(R6|0sisERQYb0$Caao@heUB%$Mm)1-zy z=53bV@n-s5xA`jg?^CL=ni;K?X(0GQED5L@&bjdCBL06z0e7Gk{)&feE?c41%J>PV zjn~&x9P@NC-&T#WVzhQF>EbQXwHsCR6l&xaxoVf)j;z|VlRB>k(G4=cdv9|v@8|B!Qb8#yQC%Q-1=b>ERrb6P0{#D|=+pv?&F zcnNzNI04&vW4~$_$pfe;q%(t`P|!FKEaZX3JW+8ZUNT#Msl|d=P9SCL_&{w1J3s*( zlR*#pCG(wSYdJNjB5?uX8l4By!}~i`8t-OEuqh3va7(ki>k3ijtfY7ywYX<#)G*CC z?hjrKfwqffYgE*CKL1P0uVxvH?fPFi4V+pA>0LV@G1;lT2^8<)Cs_O~t;(>j)7lkiECQpfse zMolM#GKsv0JvZ+LE!sN%g3g(Ql|qof>J(XbrqGuLf%enIT)lBtF z9L%>FOkmPh5DSUvjM|fsl_NXRfGt8>w+)@ygAy=|xBDXJFoO-twCP0*)HW{bPx>b?YL^xWVjBKWnjoiLo86R$T zR%&C;##r(S2yEJ;XX$hm$pa=Y1?B)J!j$g!P{v_Rick=AzJ5el1oBP90S{~~tNuV1gqlv?gFEevfLFlP0^hh;4{6aE7 zeUM?Dt++7R@dw6qsqs+JN`GuMYXc9jW=1<}))%dwb)1lBWSYI(OQxq`FmG(%w+*@C z>6A4>*uJT}E6?VfJR#DkI!1q3IL?H{n0ZzI*_^P+uVOW5BDbk7egoByy50;#YCn>We`rU2^DRiHuE-F zrddT3MR9aWhF%30!*LABhPG)iQz$uIA<$w1pd6m(lG>Kn#0);%ym!BPC9 z!8z+s+;^o%<6y9Por~3c&g$@Cgbk0F={K%38lNeu3!zph2(mM#@!`4$sygXg%NT(WR>ObU zy=$E&JLr6Ptl7Jk16iXyU+=vC2LR0(m6i_T#9_4Ob*k40dF=Om&_H!0e~o_}E$?DJQc)Ky(KEJ$fBls(wJu>*)D7i9~1e z@)ZNaLR}|*JqNt*t%Qxqaq*(A=%$?v?U4=40rw6mIzpwz#wxM8&+k(tpXi*y|;MP8ZXjvC(`nCmOSIW;|*`zk!dD?Ai4}MUL!Id&m!ek zGD0vlrVSWRZPkAF!`0n&R7%k@v{)QAcD4C3hwxAM>`mBSHX9eCH!29TARgOP0DN zODS2->6$4yDj7M19EW#AMw8>uZjMTFRPXQQAg0sFa(cFoI&uU#b1`+$nCI@?1Vy|}&jRM-#q-tRz;sxwPHVxDD6hsC*PY)2ai225T zi|}$vg2QNPc&gNP3|?*w_GclPE^-=AP@VBs=k1V`$U1K~CJwhg;UdRE&HB8Rn*0Iy zky-DFj|X2j7mG7+weFXnrS042U4DV${A^0U5?ZQJGPCI?xeMbvp``*juXRAGf0{eZ zns>cMGp`f$dPb(~7xY?_`5M#fVLlnJI9jh|p{1Gz91F;r^Wk*Q^>NOUS&799OJ><0hi@*<*VN4e2Z=Z%vD8bdnG?xAV9w>s#T(IrDa zoASfSV$TKc2v3}Ic1Z(Ma@4D31tFoyEz2lo*=1iOA^M2M2ILE(+HH68VA&Pre1VNd zkLu<=`#`(=q<~&|LCcpc&cF3hsGaU2>%U%6*0SgLp^eA&mMq!EkD9vnTxxCpYNo1| ziMrkcB&&4}Cpu7$hX>SQCOCQNWMcUna*-;##?%GXaDM(raZ8ozIg_b`SDnGDykouD0y?j*I3)uO4hLH3GQ7aJ z@^A_H!O8e&Y<-TB7kh;YOrSte1&Sknfr|g8fVtEv$sQ(E_92&C2bPpFgsYw)D6^mU{7QNO%9JGTb>Urf%Uu#zxXAE$hU*Xo30#DP}AKL`9SS3 z;UD#u74$^XoLBHuq_HKrPBx`FA^AN_o2;DE6({^o#wx&N#CWDokSpRr+hl}M2}j@obRXP_Meb!w|A18LDh;doxetiI8EZ=G>oic zRWq5xqc{vkAL{df->h;u1Zq0b*~|0_hY&o3;=DQm@OYt>B7Uqa0-I!wrDt}Hsd%ST zXm-9kGbmJ-ZMjY@*O`_r?MaE~0X zz#9k4uFoqnC9lYtFZ?l6WKMZ9K8hlTS%CRSie&fSe&NY^g%AH}w(vy-=-?=LM|ZGi%fSO1$!cIX+5xLqL+!Bm0fkF)($9y%%fP{-U1OHW=XODyI4>UPUim1Z3A<_Bc?|gE%efg~<$YxvDBNYHED*R= z4?!L}^dNzMp&n|LsX+DYkW<9uUUmVJ6)$u6^&Wk`0i^F>o2m&AJ4|3c%AK+ECY-`bd%J&|q9 zK=yw4qrc0rcP`23LMkAWOQN^w;bf-1ml@d1Z5 zSzgS@6KC%!fK*JTK_h@b4^s#ek|M$!EZ8MmHXC}lFd1J8654g5g53Fl%6JyFK8g@9 zi*^wyzTIj_CU{gbl^W<-CyzCau1ImO>5X0X3##jhJgCF?o`{^$#dXK}_+?}Yt&?=c zmIp*6MH`oJhTx*n?|4$6PE90~v0PF`bcrifOjp{xmBK66%u}c&{DE|cYUIch8zCOG zrizl9=Y1?fIj&0!jB)N7Vlyz;j^Vo}4_EV1xp0v9Zc#u^rnDlfz0dEOU2Xmu72 zL}8ixa6E)aAbs>zj5uE=@DVmG{@-onSHoWu<0lp5@z*oU9?I}nn0aIrC<|`*{JcAt zpS%2dVqW>fpZZOvJp4T3^79P7gW5*(<}!F!069BPAgEYC1)lsowKo}eX#K68NGFIS z27m8(G57~S(FZ~3(&r39a|TD^>Ym7x1~XG7t>gk}H}*s}yA1wCPy`e#nK?t8tZd<_ z9)q6-wz4EW1}|-c%iud<@GJf<27mZ3T-=@Rd_n*qBXYfO9)!-08xtsADeSfX?DBP? z>K2<|SUDNA(~gJ*(vQqfMzl8yB#^l8L2E^nEdV zOk+_O`^3PK%|tqP4%OI@f_)K*Op%k=e;upck*q;<{d?qKQi+Tnkt4-u;bZcmlaf>M z{E7rz)L+y51y3Gt9Cu#G-4N!AxMe^1mA8bFku>NC*3TR#GTxPppi(F~kPcHn!drX? zxV-WdR>V|Xl&3!46VZ5~-*bA*ig+1}Kb@lFeuKx$rn~g}RkKR$vR9rK{eD+>Zm~}k zl#-GEruWpGvmnwHhu&vo-20qeCx~Xdp!Iixcl^Pn*`oy4c`JLV#oY;J==CV*_17in zY1gmGU<`9N<; z@KFOcj%5vO{9{=_Uex4_BV0`4%wcpL^cV{k=W+5P(gSUWlMJwSQ;W;VKjbOUKG-B9 z0kUO2=ZyhqA)GJvSDy}#*!8|eX<9JvI${Ke4;&1cVbtNbZ+TK_jw`ZVSF{Zv5+!d3|;IHVJd-M(B zc0tpEW+9Y3^rVltpuT7tS9OZ0C0-TQp7)f&+J^>WZJhK#+wCsaTD-Ve`(-7!%M!#T zW_LUHzBT}Bvr480?LHffl4&}snxk-N$Wbw^L!%?9)I-Sydd;F_JFiadmkdfavmXQ{ zXS(XG22Y-xO`mV#XLo@ve?xjDX zXQR%Ce07P|+9hxGP)>riALsbvz~U{=s+ZK)yQtF>trf}1m=k3VVyyO4JNTXvtrO~i zIf%Jl$wss`nsV)!o@4BJGIR%a-m=p_+Lwua{)Frm+KWD=?DBhk*~^bap`fb7_UY6( z+?}SYsx+eW6E(gD8pkW%IZ;ThZa9(iG=G5i*UR;gVLSi&+^> za7i0g2|TTQgE<{G)reH6Rty`n%YH=7)Y8G{Q~Bd@0Y9pc3ebzlXn`lA7!ivm-p7x9 zZuPS-f-!6(3}|gaO!Hj}@3jAM#p_Br!KE11; z9tNNO#ZwMGoyGj_iBB7$CO&@OtD`5A;u_ zE)8_+9sWRvWCv>azwu6YRn))zhoC-(LadBiG(EX0>T5}Fz6Nw_QLP5@m+_=1*JWp* zpi6wh#;>IIx9EY9IRoKtvoSY#=`5$_{I1SIG5Z^?pwHrvo{WaK(ixMPmTy!FiSV!! zEgW#X?8+}W{eSBP_5ZPzrvJ%-{a;La^R@KvY`MaIJhxxmnznl59`THOKmhx9Hv~cLq zK}E$SL-{v&i2nco7pkb~&kug+_h%6if%Yx0oMZZP6y*l?r;;?&pV6w+^rwQSoc`QG**)RY$F<&m zf1d2}`}6Tazdx0q+n?pn2|hoj+`#@UB+c~aHdSi+)6P>)f3{P0&;C4cWVS!!Rn)-e zdzt&zy*kG6UG>iD+kLB&)pGLJiKNddfQy&8S|@!yeN@3Ln#zVUzC z`~00sdS?D=d2#3O7@l(GZ+`v&299qQP3(1in~(V1@%7&7k8f~he4mc`-^VvFp3^qO zYw4lkL8(U>g3(*Ef!b`z@8dmZ<}(7pxW^0#&V?phGW9Ydqt#w<3h7AIO14W*;Ke$M z>%vQpQgZvzCfQYJ&C@W69Omx}>U5L*O{LfuKYBdX$*y?3v;}O{xp3k8W$$t2z1gxi za6t$`iHUBY66R9Tpk}kq`8@~T5+&;J%q%ytZtB-wbErksC1Xk^Lu>*M6J&RZ$8%=&zw>Dq(-tlKE$ym#5&*XJFiXV&LJUflJ0 z3r{)g^Nme;3vl52+)NW0tOY6=`ZEO}d=C8y+Wz`%tnlg2g?jpT=ud+yzrVFX!+Q)b zY}iMerXW{-Urc)QRZQbwcyNdI5Kk=^Gf_97GSWG)M7dj0m*$Id#RJA<_Kp=Nanto) zMY)zA_(S<`0t)3w@A#kMMQSW2{0MRL-ZI-lyCjI|nzvVfP(8QXOcYQEi;ki+wcv?P z*UhPn-6!w1O3iz!?1}4W&)dYJpp44(zs-$X3Yma2wwb`Wu58LnP{sIyt^RWP8AOs3 zJ6&G`-^n0kM6;)GPv}nObVuUV5sy0DlMP+(Si2AWm+wg+G(F*p6#|?E2YoJwm)PmsBBv?@6l0 zJPolMVKpYtvj+7aXvo#{47r+ZnkmNR!*Ayq#|3T#A~>~nV7DGUMBR!2Q$N4guWQVV z7-2MyXRNCbkS-M{Hdy%X7%hOMC$obK3g)dUM20%23O#L9WOIQVXcHyC$#=R-SLB7I zD zMjptth^-&&WFGrh^nZ3wBlM+)p^7XmYf{dC%1- z_+uC0rI6D;z_$W&hA@sNo&O@Fl!0%?p3DuLa@?OX@NIx133Wx1^_%d+Pfg%T@*o)zYjz>lL~Y&GFruL`d3l3RDYAmiIrQBSB+WgMBX0gw zqRS+tl=G>A#F$69exe9?fyG1QrU4e~l#i#nF7?5Jy=v+4=t7Ade-M+0X)OE-%WTZh0YCj=IcD2zVy zZnOqU&H+`N&`E7l@E%>zT2yeRrZdn!n|F=1*h>D^a4BA{C~55eS83yLhZ4Zf;1U6I?g9rGUscS?t3q#? zDt=EDpc9dSYT?kvQ@do?J;QVT!8cf`b9N`T2D;xZ4*YEdo_oWH_}CY6PwFUwOZ__M zIUMt{tH7|W`)_qgJwUu%4Ba>dz`#WGbN-3`m*>FCbCwPQvw6SIz^pE0&ngF5PrOn$ zb}gA%ZUFdRYSNCx&T|9vH6Bo?ZE(Sz<|40K-nv50P@*&9?)kqQa8(|}rU8%ZFmd<_DedOo# zyO}cm-`i`&J>^tvzMs9f9v`;vTK9o^*HWQn6R`Lo;jSk|=0Ve2mb0xvedCZ?-EL`p zW^nHm>gYS1Z}ZZU*sduL+Zz-(Zy9`|Pj|MF&WAuk4m_sY|L>Q|s(`Am0DRWlZS~B z+xH~~tD)`B&|dz(53Qd%|DB;t%^RBDGDCZiDsqQb=MRnFS`CrkvHC@C{`sRV)p!gY zVYH}R3;MqjwXqZ7rp%fiOJ3Q>PqOMiYd$U$Zz8aS{b8)C8U$G_DdAD$K-a^4ACSe* zT6zs*MHZ3}0dI}}z@;iPMOB(aRc1kfgNDVfSoWhq1%vpzn7{HL4=T8yzk-otiw0lhbZ7(%-%uRb)fdv0?@V)qyI--XS?SwnXc zhlMaOB_y^?PV7Fm`K(+N*#{pTD?ZAC?LW(Hkkz_AgClJ;)_;P9m>Xmih}{{Xm!t{4 z_DadsPPOKc`LoX@{DT~YHB5Hgf(D|xc-`4hUoFRV(Zo)pARFFO{51`@{~)jNoPlz! zGs5Vqs4HWc=n;_)@Ib7WnA#WR25AePn+dpKg}^(71%w;ViWA<8&AudWZS!4`c_%T& zr_C~Gg133L>k1I{jF!-j@A4Nc|OqgBf6L?$=u94oM#e} zjDkf47GH-L^EzV8Hi`e=GW!TmmXY!#f2n@nJcOBtQJMk3k(Fn5vmudgEAe(=%U{gq zK}#TrS}{H-)H+AV#sEzSwdPTzn)6af&oj<>1ox4UT-_haMFo$c;P#rs2_@7E7t{xL z|6iHGgW2>TPV}tmIMiATH*T*~{bSA8a1ddd_G4x&%}nUR3sx{|B<7~%E+5^`VB)$3 zLv|q{c@1n`=I+UgzoKH`8Un(XSIy+Xu`Hi<92UY(mz|7Sj}&Zsz=jW@7`=`5K=0ZihIz(f{~8FuBiRC8LhqA3{}RZ z28~_WJA z-!oM|2A6|p1Q91Q^j^>)(n)3?KEB$lw2pgd_77!7eucwAuYmDN&jq4w*X(n!jmU zw6{H0OPd4Q^A+75QRM$vYOY#oo*AB5aF@7VriIdmVG|-WSwKZtF;8~dnjV? zgJme3u(Zf##hHSGxZXNWgm8C}JmDN*R$z98G$@{4=)wW=F#Lp=K{7a6XjW9ojrV6* z$}Fa^5RFdtjL*_lNLw!i7H^E(gkO6TeF55@U3S}DwVgVV?@9MY?4fM-j=ojetW9V% z$M7`ol>!NFDRE5#;A?77by?~4NG~C-iw!I;F6|ghq=XhHWWN~-EZbTZNTiTg+Mixp z%f{i6+G$K^gz!O6Hy3sHmJWC3JwY!A^?jE-A;_j`4lM4b#;PHpWpQoC;Py2wZ-;gi z7E~(7n}N13lQlXcgb%o)E7h#Gh?IAmLeSA{psmt8JpCBLM!`}|WgzT?hQDc=DeN1> zSJOrop8$ylP*qr58`|V_3~qS`LZXJ2_0soE)KJv5W8={UG+$ObL9Gy3DGZY4cKcCE|Glr9_N5zqgxU&Rbs#w!j9%>&*IqqDxQM;! z)7mDr9={)oKBH3`F-|1!&vxiBOVYO=^VTEL zLs^fJqn`X<*JCqebN{@>!kwT6-EWt+zuodjDbrn<2Q7@zlbdrZ>?P%ZW0yvgf&S}4 zc9H#A_q*6+Zzr^KYGfc{FCSmoa!90pEdz}NCUu8umg^;O)~c|*t$)AzO*ak0OD&u# zJ|kT7p5|v3l+)D`X^LRQ?~IMUhABhZxm>%PpYL@RRmK?{a6w`-Zzt4|%M zU+wboVQcrBu6}j(L=>o#$*+r>ON9!kW_Jy{`ap&sB4R7zV(~=tt$%ok%YR$DYa<;a zN;S%&ZV{eRiNX`q_r%Ik`2@t+kHF1KCg${zcni(8w7?ss5>_wIHJrqHyyEwhw%#!E z_meX8IS#53vV6x@DTZS;%cGl|SkZE>(HI=Ptn1QP&1(7>$2{Ka;V|U4ORrgLcw|&# zKt;?LmPP+SnCa8Sjb+UJA}D6nIV+t2yj{bm=&}CtK9l)MvY5cP`fW$gS4?r z;X7__5HUc1q&Yo1b^I4mxk{}?$nC2}S&6l%c%QkBtp}WDg|(@Vo9?(U@w#K9sp9=4 zUvQO3V8Z4cg>Gtcp>B1XuLaUSEJrUa!4BoH;knLJA9F55)VAdJ%crDZ(aB4|hV{|Z z@$A7|JBL_mJgSG4u~gMr(Zuc{*M7&cM@v9@mALM4Ad6xcHQnEN8?)x@)W{aLCX0w$b6mQ~_(btukFi-28Ijutr`cIbkor1=}1$E(G*LS%Hj^j0F zZeAN#nKsEeg(tCZe! z)>l(iXGHC1&2hp15joZmM2;dRf9MmuCd@XDDYn%3iNI< zPzWWDKRsvW!>13*nfl4z)Nje1dg7q1b8cp_^w*fFXYXaEe$1M5#ftBElYeuzcII5Q zW+uH!0exq==M0IJb)icxfzml!cWAEbHRIWTl0Q5h9BrgIb1}yN@1wK~>|Yoo)Ea-I zbl`$}o*Q#b9$;av5K649l+Z6K8m)QBdMvYYH+^E&|1&Uu5x@hPhg?@jDqN#~D;&elxG-kjQ*(|nDsp$C;3s~zR?pEb=;(?T~S<#Ax* zLga~Q`N;=($qo6*pU_X`w(^r-^OA%4$xnI7b@|DQy<}q>pY6$gUa|>opH1#C$qXrn z-x&R~<}V1(4BVaJ>UAO`_958F1ll$m$V{%}IB*yqmN^TS{H;Dhe~ zvU@x9V0F5p)BThSLTdVf+CXWWnXl{J1W&W}!L{zRPh-yAqgjKGKnDFmwA{I$)snda zB9RH=XeIi}`+AT7JrcruY2S62;bh;-`@WO+UOM2tyzl%0->=K5&pjV#)_Dq>d{^0K zzeTrzQOU)3dJ`7e21-J&!pS*Kj~fsIAJ1NFgU}$d{$i=J8*UlS67)mFmeu@z7=}Tg zeN#}3k7Do9aWWODXbxNAx(>) z4{ZA7_PnM$2_BWMxH+fkb(B)mMszhz*U>b{KA`DUGxpZ>B2v{fP}`&F=KQ8BOw&Vh zn*R14O$TWj+!@gH4w&01Zt(%Nf5Wf6Hm4tMT%Fv8rqj^$JsSG- zzxFnhuahcpP6q@t8N0Yys$%0q%B#ByR`Fe^dWadV$#!`xh4#)s3xNY@Rs1lM{Vp%N z0+|`$id19r7la(#Xui!l2JGLa?ZgLH%Xba0h^%Asjad&*t&uQ_r&P%oZsuZ?)0t@J zy>~uKFLVlZ6A+t=?aw=|PZB>%(L_2E=~I9?xe{)yh{QrKZji?BU_7FHv# z4#J*v@^6L}v>=90DCOlWUYft_emFoMh!b3;?vg~|^6S5%4oH*wjZuVWX*pPw3!VX|nP3^dk*>I$IzhI4=IED~r>?9; zYvBZyDhxdY(~PLDg?ti)`^$X$9Bt4m1LXzWn5px0aKEd0a1Y(eFc9$=1b>0u-(S3- z6|&d*!PN~H@1uT?A$c1K)dyUtXlQa!&xB%*4YGlh^^t$R{KEOWI}}7kftAJeU0NZ0 zvVkljf_DkA|LPWhGALQ0GO)PhC`_!0#>c%JlihiH;ilmQEno5G?eBUyMZ!g`mHNru zbi}fO1{Nlo!1P6G=@LwjjiA-oGXypU=r38pCMm>Bn`hHvdY<$t*eLhtkhFjqdaK$t zY3*HehnQQ4`YRJoVbY|5psBAlS&8nQ&QTE5%(alY{962c6Ir$;9$83@y6-Kqe5Rww z7W>G*_x12AFf+3rc#%WC@OOefU=?82!wj{_cadK5#&;Cm`JC}FHXj!BeftHuW78nB-!-=9_WTY?nRs`V zkBHqHBl(YsM|lnywYO`LdJ3w^hN%PQR3@IKE81rejPY9tQWdGJ=5e|@%JXrSJ)g7|WY09jr3I@Q9!V|R;g4$HH|KoaM zgT^@bNR@27QChQg%Z7uh&QYBNIbOlNz-CPZGtFL*ZPqPyk14f!*)Wy5!pSG!S$j9% z8aVaaszuTz5Kz;|cO_q1;0?7sNDX286@q)2TchKxW42pF57YP<05SxR!Mb!sgAtcf z9b^~t9c=7>;(X)~6C0mcD%HyPO=#}9A^TYhS?P)|$#h2V<%BMf7YL->F_DKb$s< zIdHhShJigOSsm{*lS$9+9kBK)9>aG0LB;Y7GK)FqJDh?&p(+vl7Di5aTk0q)mFgfR zT`_C2k*3@(yNPIJ+)l%_uBYjoF|G#ojIoQPg{FJLfG*^zdDnYVq?KRbW(veoN59F& zNO~slpzIxFV0`D87i?ZPThsT8z?AwnVCKVg?G-+>&k)ZN~&GKl45qr zuYax!$SdyQ$FeJI^Hw&SA8DFn;~Jcn!(!ut+gc84Ow8I@)G{nlSI}G%`b%N|F#x3h z$k;f`L0L1dVOvXum&2|VoW^YLFE;Q>bX9YcfD=poG)Rll`gJY8=HO~CQXFu2zd7zcCMs>g*e?Z!kYUx9i#=22^?vBi!x=5QJ!-#voo(GL> z><%MsZ?~KoY1`gX7q(j-h}m->qVaOcMa1PtuY2(1RPmQt7Nu6I1r6FR`OUQK>_1Hh z6*RoF|2(gpL%D6)a;JLb9LjC>%WXz)dD_{j5jR<>JJdtx*i}Od$awk~;^B`ttN$=+ zMJ|tXy_mcF4cP4GoUafMhZ}lgMO)|X(g?|CG_G?N1`;2j4%6KypyzqI<#$f2F4=jE zvC$)hgsV_Fy%)IoGpM>%-%@=ogHzfSI2LZ~#b1o^6w z@aKjK4ExAX0UB{H1JRPH?+)AFUSy&W1a97dX;s+1VkxtCv^PJTSKABB3KC&3``iDT z%Jy_seanMazrsx~Eknj^?;A(Ko=7J`QrQV$?BodbyRt@+grG1XBj4~@S|CiXLt&Tx$h$k%C4b*L{)8;h6; z{v!BvL|03`4Tp4q5bw4LO43OOTJx(qLRE1geJRdAo$fF}Zv!!gxhONt?dRqVv(0cQ zT7p`-s1;vIhdBB1n98dB{NK&yH^%SK7WT((ZfNV>`MG6>M^_i+cl8=FEu?RiB>T}~ zg$PW-G*h0{G7I{R-Y67AKZyQA$*D_NYVa*C5xc;guglTxWXmQB{m7|?u+96ppuC^} zn@b&K*rAUAxE3K!H|6n&q zepQn>1Q-E6B!EMZ$>>2T;&b!y2EqnOAy0G4MW3ebBr;hm_i=Q9x~-I{O;`NuYeF0` z%skXw?b6smOYX%vBmGC9pJvpMH!r}I7Y1jo)Vd!t)rNShc9I||3j5{pb}pu(c8U$com7#`Z#N| zIbT+yOg^2zWE0mau~LbhN_5sm=YH%@%?{F^u&Hi#u^44 zu>YHv5u;~CbOgaR^F2(Gl4m4uC~ZvMFd})weloA9SUQ(;#>OKDEfwo%96G4)P^v`m ztku$ozRZ--h8>)hu6SmOuxf!Z_2CMOLtMz*(L^^MRW}~m{aLzCfjgxho6gPSbyQ)5 z3~jyPH8ZIPX~)j5=TNtvxawK1dS+11!oC|-5#l)&nGHUbsqk0x3I{rL*MRo+bx`!S z5nh=fmK^OmK8@L392i&9LP(*bhjX}H*nC9T?jg_v>GtY$MJg<@uS-n@O-1+*aYJ}u z$;QU+H%mLtt2!BblyW7Wg44(UL)yE*M_F8d+#8aGaEVV)qj*79jT#NwD4@YW$&xHQ zs~f=!ig#KxV!frf;UWUTO(fgLRoZ%MwJmLJ>%X-vZPjp5NdS`|DnYH{9j(iI4vOu{sUM1WGA=7H z1;18>e5A+O+}2%aJ-TJdQ$FTL+uJO_#N-d&LJ(%RV{lg--He&6^0A)0xreTruv?=H zO)MBfM_VmJmejVr)xUP&U~GFMzG&gfwI)OEO5soKKnRe!evhgDe2t+9?7Gsok)3fl z6LrZ3eQ`O8Xx=zBUC%gnY8)?qb&nlU?|Bab@`m|^zG!KCu+S$pxty`(bFf+wxf{}> z@iwdfOzKM^NzAP%pZ)e;n?oMmasLEQ{JAr7m3=k zR8FX_pv^x34F{#1>uH;AKA?aNO7Eeo^I&oeWBryU!A&HZRW*PptIrO@Xd zY^ny)K?vsob`|k_J3bExZZldl*pFc>bEcS+xPKHMlI?-}cyM!Xqs(q~#+SM7@s28#NYE78h1Sr5%L%gQ ziu-F7C>p6jTT7WfKBLZpQAEUcVeBm3F}rE}VZ2+78%$!GeRPNP)tVmBYr;Q(BD^gt zdzv~8G0$=y@>uuPyMyJ?3JoN)2lhX8vzOWSTxCrq>v>FAmGdfDRnBzcW|VowTw&lz zjCpM!CeaSY!DDh>Lzk~qdtvZL8wo;YGX*JvyY`IUWj*H<%%QGSQM0Y3qu_hII7Lg}&n;OKzW_mee=;Ip7XX<9#verZBlL5DCFx`Ar-ugHb$ zYeSGBPh&XP?Y{Fe;;*gb2=vCHL+5Y&0XTmi-?;q}MnHzpIGM-N3#X7?5XN5Uc^zyZ z7l${6cpVJjqO-(!mR%oQ)!#Rufs&rXzv61CJh|G*6pBu(s@CQ+;Izx;hB}=Qk z^GaVOGT~Q|ze52fT|6~abl!Id(d`%1AE7gjg3HJlT@5_aSlz) zDXqzVE9i8ne6xQ$-O=uUpI@62&J;;{|Y({>^6?p({JO0`8$T@0* ztny*m(WT-Z43|@SwrI1Y3(b|#KKDensH1ium07mDz}#J`scm=of5@wk{m2?KN&V;W zI?s3f(H;H=2w{Qk>+nxBaYqmuNoauYa}Co!#a9MM4?9n%ibjMWVq;Dwxx+t~Uro{Y zn5)hEt>!(I)9zoSP=~2Q89Myu5g}{`jv9i{cTKAO@1Z?RVHP2+IZ&*r?)8TbKqNS( zf?OT`6IG7#;)nY#@}^oNi4$MYiTD3VwZ>|C(M#OuC1zqC@PA8wkF#@ zewgEPmOjSN$NgXavcTuw0zVvax}@Xc1lcw*pQSTblHSQiG)hmR{uXwg;@llR?F;`} zz7z7)-)MW+EmpdhP;Byw4*wOXFM1S^yY<}K=dpuM*Vb4(5&^7x!2hK>HM`DKcpDY6 zAG)o$Qy-uI9CPUo0l%XyKRU!&+G*M`0eE(_^@|?hEPdKUd_+XM|6P7_cC_V1_ji_d znfN1!iI2J2WG{^FXSFPCC7#vr^Ixch922#{i+YzRtEFJdoqG7s;8EMOanaGWO|6TL zsErqFBAG|0dHC0QkH>fdEcG8136ooSOS38 zoQtlcIcI@0?$)KEl-+f#uEABgz7Z^r=t*j`|Lk}nwEE0y)Vh}{Y8IGdp-*AY(39V%4uc7f6)z4j+WrdQuU2o3++uh8mXnkFSLw?8fFo}spl z#4(AQZpEiT1JZePE`Kqfte&DQFW+jK&qOj+d0pU5^iPz%-~tbVk7>5MV2%gy{NMKh z_AknMp=pnoRs2<^dlN;INk*+URdkT)B>F*qEQn-%#7wl4E?Hf#umAF~048INX$ToX z{Q@P=QS#=&8V)XUEqGE`yW(o@J=;zh2d`dCauyys_;*cTr#O4sp;PX$_L4O{z|+8A zbcpRZ+X=W6rZLn@7R)fQ_)Iu+x;v7n)xrZNnfosgTqv&%E%^Sx*&ehrfZrpD;kR%h}S~s<2mZp z`2EEtw5Q#F{W1uZ$a9^0MTK}@ZU-*7j7Y1n65iK8lc3>!e?XwtTGYu!o6!;&s9O4# z^id{j!7TGuFh;rk*YFbek0opyDKZst@$YBA2=6HBfn(b*c(WQ`){Eo+RNx!OwyvU^ z))CEuoV=rerRag;Wy+QIB5M}o#^OR+!9Ow0KY=TyT?d->JzP9YPse|UiUc9+(q3kn zQ)Q-I0HkpOZ;G8wH5^L7|-CU61)B|kM< zg^&4%DPTuEJNVm*r_)c+fD2|Yt=7>v_j5dTb_b}FVmUNgL}mlwlaRYr0eZ!C&Jltl zf=qWueEKkY>dM9H&H<754TyLBn@`u9te*-VBA6=r2vCVYraS&O?0tOE8JXIH+RDM8 z_>-ps!$YcJ=w?k$54ddd!! zo2}OU#hQyY2(Jm6+4J1##j?07HdWLGl+c0mGL-|>cvZVe)xNrfYN60xX4EAZ$X#F0 z9gr#-NXITyf~liV!uC-lI33+xv^X|?=k=ieoD$I9cLUz9Mu49wqCAxUoKQX1IYoDG z#b^7JDzQWlnQ0nwnwC+&8Q4{Y{R@G!eJjk<1lqn} z9+!ssa07EF3v+~!f%(iV%)t!ISG@$x_f2Hf=wQtqP~d^Hy!n+6fXpuuoCAFaX;rH$ z(@?Mzfq}&$Cq4Np6`AldX67X_BK+ui<`H~Y5+F~6(b6=Ou(NJ8ta+H)9MqH^066~{Te7}cGiGm+J z#j4SkA(Q`Q)iA^D6zuweR*fO2aXv47kH)m4xciH320|;J43{|IhwH!Jr16+elqVwm zECN}kG7onOu3RQS^JTv=H4>izJMYocaO^91)N9$xAj`{dhPuZ?wr0?Fkk&n|-MEc{ zF7l=g!<-?8DUX}oaJmR@N)=xOiWmi)?{73yiZbjX${6*eFow5s;mvq z2E{ze(ThKSJ84h+`L%i92mUl?686TQ89Yq-Loc?eOy9k8u=)!X1iiS!yced`ZN*(QS^oE4;3?;Os=@3V&x1w-sw!ct zNOfDO#|kwTuk@k@T1sbuXRX-dKDO53_45BB|9&+D2}MyPBjOZH1V87IzU- ztMXQmqn36kf*qvNUdW_9oK91O=50Wj1e6}Mx0!c$|9}Ra)M4pV4V)c&%)>X%8}IZm zk=81Q`}PRs2R2h0HLn0C^=`iBeI@U_PA4;Ot-Pg${f)(9*M-_I@FeXWy=DMTzY7w% zSkyCLPv=f=P01jU4!XQbr8$Pc08>>XFz^oX$tE;V9`m^d#S|k|%_ktrX{vXeiS%ZL zhtg+X07{P$ifo>rrg~lpy(oFa$TY5+A``s{{!D`D1n49S#n|K0nN3G|amScAx^fmn z=?++zo-1bN>}=~hbCTvQ)98ItUc+DXu4TQ@%q`t>=3HgoDNB3~D#qEh?5s?}-ZN*6 z5=40q^o#P2ug&l-)r1TmP({F_xMK+!-sOX5S)+b>x~9^@W)S+ayXT(3gxGGF$*l^) z_LlS1Fq_K37y}*e-ZMkX{D+(`S|+X=CSV>DP4u>bN8WN;L%5M}fWI^uW`8UDv)T;n zz%A8;y&EiqeA_khRO4f)^|_AzGjvsHM3wLuN{O1{qNu_q^H~BN96O(bAL^mAGIxjg+`k$$$OpItmfwzug?uNj0(V}8cjXm&<`%G;F>pl0xNrw>Y{ z1cEAVFa_OTtP0IGiD#x!N|Uwsa+Bxb;s<%^8v&!jTlJ*Xojfz&C{25SMA1d%TF!1W zHMmb6l}go;RTssYo_f&)*gaB?PLm;EU5e5o^@8{Qj^49CAAO>?nYQAW5T99>ycxLp zTxMXkW?&7wf3u1R4)@I`;4L#CJePi%p_C_iG5NOZt;M{7=cXI`;CXGyx6Ip`OhTV| zyX5F^&fDuu93Yr^J0(-AnYY2rTe)Bum4V^gSG@8JzvJVBzSD87p$@$~=wCAwWbb)< zcP3%)dAm>vOk0Ul@I4oCI2;niS7*WZx&OqT9{!hxn2x>m@je|pKmB4l_F7(gJN7d3 zwvUc2$N*BfS5rPaHrK-w|DHD8Af35 zj;%G&^mgp|8R+)z*zuW!y*sv82~K-*0CcKEZ3U2k_(fzk+b4JjDU!!x%E~MoW1B*esv&^GvOXY@uwgV> zbWbHZbZEsqx2vDSX*TIJM#5?px!VnuAjll|?|Eq3m6^0Lt!(&vb8XM~+6AHRiWIM3)i zyhUcd>mF{hM3KCecH6lhN{Bztc0Tt!&m?B|v$uR?Z+XAs#hc4;UMmR-`N7mDQCgJ5 zS@SX0)31Ji`86*6Jh*n4p&f@;C37QA>deGw?9rcf@3Uccw|o?FwneAeOLVBQc#gj| zUY?x1nwQ1w+n<~Q(JFlrl&>k>AbnRmk#`Cm6Fo{!67mtEM=kkRjkVEWoV9FA`nl7r zK9AP-tHmanL;hLeEgzqm7;>{Qe6Qicvr4I9)}{3y>5u5p+E~RUIFRkgxg3V6tzQz( zEH_;$NxtZQF&vV`*7tK>3Rk>1`)L_c(;24!o^ju=90;~-K*(2~wxZ7pvk6Fk!6Coo zeV{@aM!cz_d~nd&>J-$$w^wvxiIXU}l%TupOt`7x7ZFVPr}OI!-+gO=?};@CR-A&R zoPD~BAkzRmK~j_QCCLX6TSZG_$-L-@Sh65`0PeP<1F$9;n3p`1M_yj?03Q42C5PxO zKk4T&G;h=nYxPHmr;3&w0~iI#rBqbZ&2L3#{Sc|W3~-me&q?M9v303iDlRMCMRq0#|grKeI6+r!DD+CR*wN^pz)L&bG?+LUh_xDj?}zq*8y zB;9XnU_Lb*Qi z_OpRh(a+&Ztd>@%bIC@Q!&dM<)F$M=lTNc*nmFb1twQ_8SIdF^(wj_Vr1<+>TF)%3 z4ayb7l_v5oog)y@Wi36A2z!fL8+$BNCw#Y>CKBVcnYf#)uOLE3q|Ul@rrCT++;GLA zGrB6pYkcWSb}_u7M5qHOs|8=SwMVzr#?2X$Ue6+|!y__Hwbpi_2xYD9WZP0K?N-~7 zx(Yf;*4j4ImI`a*_vrAdHh#xbJd(_-#JzMTbLaR|AX3jzrF-yibmTYxu->%Cr^)o9 zbM@gs=+4^8&JS&b|4Usu7o)Cc_p0mhlQVVw>DPPLb#}V0V^r5yFPXZg|6l6TSsQgd zw^vk>PW&~U|#Ej;pcn3fUrn>lh%}a5f zV20xR@uk~L&>%>Bsdms{i*O*m^fR{6c4FygiW?zrRj&;VJgmYTs}QTDMBKa$t!%Ig zgUhX<_A;VZSVJq9^A3Y&ui(9l_g3Dw@!rMzcHR}Moo}9n@WT2KA}#F!i}P^?#Px zR-6qd23v`HdT1gF>hfc5YLx6~Kh1ELANN-kq9R)*IMcgM>ORBK8{*BTxlo{Z zE}`e8x*5k?VMo<;79wPz_q8f-eE7~eq$K=Lg}v=drJ+v}Bj8cP|3H%xhhNDUz z?t(YGvsJ5kyr`WsW>-CcH|?+*2br!rYnY$_C0WY^^C${8N>-CF3VT9bR+E^E4hs9i z=>d)|5>xUs3ev9LcGpBzBn{R*6b%8Ulbf)KAekLaI1D81+iNy)n? zXqfv(H%`#|mJrR$D42K}vW=})({I6B)~9~hiKU7@I9y+M!eqU2Q}&(uPbTJGV$jP* zo^XA!a%Gil)B0v*q_2(up*!h$+`GU!3^MKV(T-&eYeh1UTz*WG-A7(arEs`8q}+cc zC)QTeE9$G*_@-M5$kHTROqRP@ywUdh8&{R6(1BwI`l{nI4#7dmjN$E-BLwuS{d!JH-7vTy$j+ZrkuKw`*wdJST(Rw}Bts!<$# z2vCxKFU~=dh@NJm=tE&U?rEIXRM8Lln6*G_V}rnU#&1@t9Cydfds?NP!JcI5F^ATT z2a$$nytseWjba2w4Hq}@i^bw29RJUF!cCx`HNYMFHUJzOYfYgysOZD0@4R-1s9oOFClWg&c~zq0NAuYA8kInMLmB1K=RFVDLF(A_w$puM%Ftd%itmDOA!>Vu2aoVc6n!M_mY`}>y9DyJH2&5qwSGi3K%y`pB^!6x9w@K<_toueEJf&reCjqheN7CnQN zZl2mF1!51yemRlxd{5&U$={vUGbu~{me$aZdk7 ziZuhx*U&a@o{)psc+Co{B(y^4yUmVY+6u~c()U7@F6>CRezC5V*ZOm$2;1gPz{G#h z$`cXh+h4q#9B#M0KPdJ#H!>_--Vu7Rl<1DoLo_7EslIEuijE|v6k^UQJ1^A&5~!jT z-FH!gRXDj2y^Hl2^R93v<JO0&Yr3XqverNcfu48>H zJcaFaK9(K2c3?rFUGZl0^7MQ{Z4GQ;ju<-_CL4Pmz>I3=(;$+8{|m^!x1EU;Plp;d zW0S)?T73po&Ao4vGJdIeUAK#cXh1(x=_ zH9>1I{(jbh`@Q~HTj>OvldMTMr?GgsxmB={A*$^P+SG8UL*EaV-a}ba#F7ivhj>=-K$N z^3I%$27@PO&p2q6ZZq{-jXx#O^x;bRXI;e~{#o17USM;QH)X2m1HbjC;Fo(=#6`Gg z{n!~G*Q__5wWnLw?#V3r!G@k3Zk;MVT}&+g{jfsmb-`Nh9{B(+Sf|n8jH4lRRNWze z7PGLu+7ZlmnjWlnrsS8tC_(uO01PH<{Gx8On(pN#?H9GKuV2)a%9Qns3b~Sh;%EMU z_(Q$$uXva3^@pnR@dT%%hvfu~P9ccs_Pa-r8<(gHoB`F&>%q39=My!QI1pE$Pxbml z4O){o?&%ZNs;*eKXi&)6UX>U!th)45=?SQGqwCA6je&6o7$(I=mPQ$eR-1!QG>IgP zV#TiJPNB;Gd{JWKaWSZ7V#B0n*QMoPOFZC`C@Y+k?FL+7R?pj^__P2TA2 za0;&>>kA_luh0Iy0bU33YB*6-GKu`lr@Vawf;uT(qrCdXk(oH$;C4F$YPIO~It_!^y#VW8F=)0h5@vlxf?C-ZV$u!BmC0*@ zg7mnFSy11Py=^gzE29VKL+P_*U^O2Vf{^J?%dEnFQUD5sosEXUv5x1(U5URAnYfqH z5Xv#Y@C8A-yRzxk`k|0c=X4G!_p6f$G2f&%#lGBc?p$^@gY)5(XRhaR>YICT*PHuP z>Md5>Hm9x49YKo*+8yh)!APED&wM$_Jrs-9Cy%bR$&pUTuCEYc-1WSS(3I5m_7WtQ zh^M*hq+t12osZstzNdW3OvP1DqTn^fEhcW%tC`qkc>pkizhn5T;_u4;7k_DXOXml+ zjc`u>Qd(~sB*m+;{oo>3PvhKDv$(_^_}e}lVA|Gh0CsCj@(1BB2n5VO*AqRzEIGaK zg!4dT=F-Ezl03*ilmKCc#H)u)1Mtse7EJdh+MR*sX=LB8QP}j@_)jNul#I>focY`c z$+zC;1s@O{t<^e7z`GIhEkgC@2A!#Sx{x90~tH4Gt99K)AwRe1)m(4h7;HP8eVa`+T{38 zo0Q=dkE?bH#sW{ofmi$_B$M}*K#m}16tQ8JrXg1B_r$=;AFmcTZO53^Syo@SoA`;4 z>;=Qt(km&uqQjchgLx~euve2<6JM}3yjJ;nK_1j$HJ)L>eOPhT4}iDfVK}(2p=v;V}mxM0DF9O z1{+SRcKoFR-Px&gvBsNC6$K?kfeLbd{{vG=!4W1Cs1Tq8p~5-DL-!bW?Nr0@8;7AC zO(HM8NYk@ccX8XFiR8Z#KrB8)Y8KV~e8_=>z=5Uk$f4J+%uT3V1xF&&}Q zIlOg*jHf9x{FEyqrC~*w%dq~((y%^U+-g4aG%BSr;Z4<=(=?n{9c6}7<8^u8!GsKn z{w(?6K~iGlZ>TEp^|Njjm|<~Q>E8FxBA$6o1kIbtOZezk9FtwB=%!zr=pd{`(er;s z{ui$b*q%c*(G`LXWFXkZuF~eYm8VG1HlAfXUEE*<@h!n?Ij`GU;&>gFCQvJRb`rgU zXBW>0d2Z#ooaZ*4_wp10TgFoau9@d{o(()XCZm_!pC1ZkTe3k-1dp@(-;6z+@qJoH z!@eKPpfSp+Pnm5vJ|5yVt?*Nr0L$$}eZXjyAY8q$kiTIWuS7*Bj2Z0fK7vhd=#=Ne z4wp!vuX^1|GzHH{j0;gQyOpo)_`=pm#m83T+l;W)l2aS&85lj7-+&HGdyb0s=l2p^ zS*6w`bAx~P&R{Dx*S}ERJZ%McMfMAEcPf4j!xhXdJm-57q0rVa%S$}uuGTEa0mgR3 zpO!YGS ztPNGT(Np;V*YGzUgEu1-S!!GxPZ(%-%h^WFR_YlUABq%he4jSR=zpPf164% zNa5HAEW*>avw3&xSDO}k4O4+!3AK+lq@hun0;RpT%y)ic%7h@grp(@J*+O&!j#|8K z2$^os2 zDm^JHw4%2&Ui!7RR+n}m%iPnyjF1aDmv*y8uc*mkte%pGn3;m~365CL>4-2+RxR1{N8M-sDep!V-b z>09BpulzL?A6T(XG{Kgi8n#38j9Ac4jLXOOO#sK~(If5BFAAm;$OkbZwoGTgOa z={RMApt9TEBja~7bq2Ea>1=AV`_C6ta7Q3hB}kU8a&V?fx%G#X(t=SNjn;{pHc`&a zOEUe$%DiX)OoxvBd|aB2$)w%sr)!d&ff24bSu53*p!0FqnL}SprPJQ^R71U<3s<~g z#g5f7YkCgTLUj*&q^Z4~_Xg)ArC(HoC7$(-Rh*NOueAZrTGB2Y(R7!cSIe;3Cl_BeXX=m*+)KU)2sjVoi z=Mt>6GR+6hlgR=@+dPNw6Qzc5C%dkNSSYJ;Trd5x`ahiM|2Lw0_y05wpnCiNje0i= zzH?7s@ZFnvwH>1;Ku^#|#^@60s;!A%2PIO$GlFnA_b_l72D;DKh|8Bhn?49Ug8|xk zdUk-ON>!JR*cEZQh1c-yWf~I%dxIj0Fy!o@2o2JOU!`m$d;F1#&Y<;^4OXno3=|&B z664stT^^U}3EOLch#A|G7JbeDk-D+Hjv7xg|AUXHbk}wx)wEc_Tx)gTeh5q7( z@Ts)G^LKF=+*NhFEVxqIS=v*AQ|Kez2N0!*0tpo=jCWD*@2ZnG=0H`bCS8CQ{I7ej@kkBd{?r7GN~3Julk6j zQ9CrD%wR5r&sk(kg&~eRON&y{BVP+zQ8s7<8A%wHC z{$uFuL(I_4$PAs_z`zZD??kqsmkcB=QE;8Y_Y&^ixp|=sKOrf=s1Mh$Ec@)S7h+Vo zUIg%hjS*`i2ZZd7V)KV_?zhuyzByvSfQPXbGKyi;2qG<}4$q`@&_41tbv+k55mr-^mt0PG|u+b@4!Xld-YTw z+u1KVz^p~didE`gWK6yHRZT)|t#H(Hwbd2AOq0>m>vq;92O1I)v?i{HJm~cPHschg z@j0vM=X8cg1sBk_yXo1|H#73C#1K>nUUEmv*iHnxVs{_fcKuJAfJVgBt;>Q+Nf z9r(;4Fu(FIxO#^c0Zm%aoO#QchvsdM75lqs{OjgIqwIQ+$iuE2Pj2QiZP}ee0|KZ5 z6dFF?i0d_6W$wh!(Q1w;A(NM(VFOQ{$<>#Tn!V|%A48GM{L^v( zJ!O$`-c;`n)p6cv;PMu`H|jqQ#}^r2&FM7;RO+32;Bmr#_wk(HUBoE44?WOF7{ipa z5jMuB(>Zmp_w%^FAj2GRrALEKv-rq5zOEBG`{OYghJJ?eM@uInA_zI38MTPW*zXnM zi?Ox)7Dk1M%7mM$V z1zL%i2WKBnLVFCL-&z9n&4T_38||zf9UWpk2iZLJOV6KNrCa=CqtRQ_>Yjn>h}Mys z8KOmx9q6g*s2#mJvkUB?RdVBYc_7g>kSNbyy8a1raA>G>Q+uMI{s;|or zcXUXaV(C1UD}@aw#sPrWNR}&{wr2Ss`6HMZ8#fVu1VW&4_dW&+t`m>871O$+h(Gn^ zP^7DTh|WS`cnw}zbqSg3MmYF9s$!nYs!b~tEz{eY=vhfv$BHfaC>EUXwW`2#W zccHOjXQ#oGFWhcC0U8Q7&)rNX2V>8ojwC&y*urH>RIsAHx&9>QMaHV^SZ;4GZOitD zHf9sY=H=8Qx|R+sqU#STTXK?Bf+ABso=P46kA5iPw9w73FE;*nDFylXw|$>mc?zJs zyW9AcH1N1ba`Q}OBU&NTX|QQpD3EShneEUlxlnarsBc$hq$OfWk9Me15e+0~+sYtB>AxtxasB{&{16~q^oa|~Rs0n!aY=zU#+#Ms-)XwCI@0O`F^C!KU< zojPfEzST4Ubmg$X^irh``#t_U(_dE8yA+!3bC*qI#HsJBp#e^)HB#CWZcD-3zh}jM zM?P-IDzOub;BIzD(JsMP#Ix=#*xT8$_JGI`@$?_xhZARQb64W&$GpTo0`uxYx=<^K zMF};~_ot#G_VnpBC5-v(qON}-dD!t{+Zl9L$R z)^P6U0x8o7j$6|E5w9_ibbec7lDDOc4<|0T*?>C#3pC7nI3n?*`86MI1@NCFSN8`T za%-;|aV`ak7J@{n0by(MdKmB5ub0$lnj4PoVr+64P^|M{xR*_Y-_GBg-)>6|_12D# zHsKz-kWm|%txX#lQx9={kOk?^)=;YaVskDujfwq~-L{j(qq-PpylqzOR`N4+CuCZE zg!Z>)@Sg1Ww)TGMeCy!(_C0vmYks`%!RxJW+PBu{P@fq-uVMG5XXJy1DtQE_Pz0wC zf&;!Tgsa2#sCxZg_i~0l#`2veJDANbCJ zzM$t`)!-w;7TTTko3Ord@1rbp;Sg^SID;XZn7IqCMY#~{sISvCa-wafrp$b;zc4#j zKLG9Uw){<`9-;F>Z+!oaz3aXw=V8=u`kRYrdb%g(31#;Hq@M?fRex0)kPL-JYbHaX zfuL?*5Dd@Y*OlK||9$bk^{->&xmW$4{=js^p#P=*dH=Qke{cBK_U}=@^xxB~-1N+P zJneP5i zGqBLdc%A2O8d}7=L>4&Z^`A;xp%##ZF9%pn&x1i>=XouU`=Hm4keVF$={TfF-C{8v-h}{Q< zWyiM(rgSyw60`3txJ5f zqz7l2k_Ki=8d2HH4-D@ut+%A_end%C-z@0}52iu&wR^#Ies3w@{Oo<P*p^*5^7_$0gv zbcFOUk*)ip&-6AegBDv@A+MosVClzY0Hse^E&aq54iZX#K4o%488g z^%!D+W$q68CoNw7bKz(SCqDVcuPYnG z{~+H&Y+&}PrJsh4pn@rnc=Z!=-XZn_g~&p&6ezOH5?^5>_|;BLYM&@Wp?fVM8NBPQ z+6Zx%5F!lY#&#Bs)h0dm?lx|GhPk~BN`>&)P2&MkZ3s1Otxxp&#EK9VRt>l9xO@y#8-y=Z!Xk*k1@-S^}h0UwwN zUdmcN0JzeP_i(f8lU})=nLk3YK<3)kGA2@3vMlew!OW=Xf_C~IGwR$WdqA6=QPck~ zGir=x)F%V9mC}uIZSVSPa=(y(DV+OpB#y=x!j$;+bbxx8DJChvw=^cVPh%u6p?|&m zjSRHVgiQDTreP0&zo{y02vE8|PBHQqzR91(7 z1P?PKjUhZcqpfyybhw0=HZ9)8y}vA`Cwk)@^C6QoDg8Et>S;kel)@6;m0UGQE)5|_hglh z9nQgnDXySuib)x#N4(&1)gdX8ynDs^TsW%nLt2)$mt}(Olz_34g*4xtfpng}_m>=^ z61kVej<*=IrFe_0-G97gyZ7kv9zKLKM55xPMxNwO?5n|VxITbtVhSvWjxYVD1r2Qz4QmY^cB77N+5lum)_~6clD+#f%L6j zy6dHH?@d<%={;V0K2kR|fk(Rjbiyzt#9IP9Dw|tMyliDW432dyE$DcV)#t1V!v`Jko8d-#~ol%KCh z;*LV|@Sm-nrZ^HdCBOUq?v#<*c`6+4)g0|CQ&*7{Y}E(eY+^ypUJ7<=- zr<$}g^WC8&RPJb(A&2zxMGxP|QG26059H8%UnJU2ESXB?tcKQ#oNUn0Z}0v-{0+a0 zdS$^`VT{warcJ!Y^5R}ghsB4VN)UCsrup${|=kp;veQ`NrL2hM~@h`F0iWAP!bbPgO%>DYm0}Ob5U8Lcy~qrYd9Z` zLbxwkvQCe%Jh_OIz%hTM&fsGrtvmcl0zJO7m0YHMj%(E95ZPH0O|>kT-YOF54HYbO+SWH6C86xhpn=JOma zZc+OaBMX!NxrdGX@%E+(W|_p?uoI>_8>AFc@2$pNbA_SaYC0swW^bKVN;Y zVU23?Ma$DMoSHYsl!jAQbwI-^9V`1}3~6qdD72Tf=|-OF_>rlipMXKS1p*W|sX_(RYDd z;{WH!#R7pak@I1j8$Qhr&p~fk7XS@Q=b{p7>6-{I@?|#&&g8D(%bO44w`^e{@5O{h z-rS$x(KpXce-r>KC~-RX|MF2VgA6k`mj@Ya4;j+emjo-G4uT9S`Db73tai?tSDo9L zMhzAJv)~s!)y|}wt8-hC$+T??1f90X;8s)%^Q7l-Db!$@(fifm&YQc$^@Ty0>EVCm(Clo%-TccS6>^&`^(R+r zPbF>&Wy;jb{Xs8`R<#=oI<5BAo!hsHw}9^oJA+b1W&hTQM(Al^s}gLn4dr{1kL>-f zFQO{)5~>W+~1EV zcg!D!GFYmLLG?+HT3J5NA7u)P^F9sV)v4Ud<^}92i=e0je+^$;P(~Ok)_D()??^D<@VLSeWqP$CPXKFqqZogM~=6ZP)CXXIoo(U$; zS+m^pO+{ywyO$!kVZL7m@jA?oEh_VIR$6gcNffWb=N8IBricVjp6mYjmr{o!PyK6% zAxhk{kETOcdl>Vq`>&n(kfwHH`5e$JHOIrh*uh=02`C6>!zk7Dy_muy5ovy(^;JxX zxd|xXye~n}k~Y&nf12IfKfm(ArhopS?O?ip4mL>W_0Ix@z5Ws6_35D?5N3L4nwRSv zJv7|pp@&ABs_3B#Nxd>XGSRCeP><1>|9{I9aO3yLL0Z@7f=wd60?u%#nyQh%H%0Q-{+|-Bg*+ip1s>?W_9z z5hNi&FbA!*n&!#E@D?MjhNy0FFEN;)d6T$f9u&|`KBRutoI3f33f*%rvABv*q8BfI zirgy|;dHsHvmf~Mfm5(?l+b)*)bGCeXRWzxt7%`Q*S^Y(dYRa$8fR9avBWT&yprC- zVv)Hvoc-50wT0o*4dIF>ty|us+HhmIW_F5nS`A(dgaW- z$dOn-t>+54^CpBM-Ro0)BQ zfX@^Q8eQCdA#*PKS#~QOF!#NqTRD--hm{%dPshnabd;uGdGC4d2oBeWxub9RI<>mo zv*pX1UwksqbV6I} zx7+c#!|bth^P^FLVcX8-keyc-9hc4TOh&^T_Gide;nbkjxI*>0bEu4Li{G*_>Bdwa ze$tb$<^ER*8xD{<-P84Ld@fu6xqPZizz|SfZX|7@Kv^l<58jZJElr<#{df7_9}0SV zj6YQKWpfO)-D(@_1oJe#DmtRa292Vp$JZEF&D>9!mnz!&Dhx?gI#)ec1EyJ-d7r3z zP3yDZWCn0w@^Y|Q@EDVLkNY^k7p7Sv1@a|X4Q-L=1S@Ns3jtFciB)URITF~3630cN zfU!=@t(sn7c@xA_vyHh$hzfcv{6<5_3bDf5XR7F}e;TH}>26BqK1AO3 z(U_6h#+(D~*t;?3^HI7nuPR7mDtM+F!|H#jwi+df8kVu_>xOdCCwPi>b<& z{Ob!&^)O=p{~MqHWjYi~67AEkTrEd>EI$g{UcXO0EXlRQ9^ z+I60z2!sTJ+`HC3f-sz&OZ>xLo`~(X{K6PDxy#pUBII#vlkTe*&T2a;Vtx8mv^~!w)8y@)z;TK@I|qH{LW%=CnBdUbwLwQSk_d1$_^md&I4 zbHqT)R1e5{lE5ms7Z$jp6W=8~t)|x*G#mjjZ3(y|e@=5gQ54N#(jP)|tQO!)6@6lG z!6~@Vd~Vz#P9o^e-=#KH5ls1m{8HD+ISq>s^wsxEUP1#_mC~j}!Qb^TjQ8?a1hEb0 z|1%#DMw}b-LrzE2jv#kNOAV~;{eXJ~LKf8F6&cc%dga3ZanhWJE!*Tia(=E)jXIe| zT`0H;)}QJ%YAw%u}eP+ie#db0-wQ@3;{y*cJ{f+_!h_@$9Y<}@szH-|a{8WtSttIun%Iy5I< zbx?ADq&t)QSuNNNo*8f_-qz2zELC)-N-lVWFP)-)@Z0M(@@%;2wAaX=lOV5=KjNu> zbjV_YbjUm&_%WqRmublZ#Tk2V1vX2bX^(gR6OEBnl6CVrszhqxpTNhN#*`9vF9Sv6 z<4aVg|5$!2AIYg~zP9*CtL0SFq{4;;N6?_k=4-fW>&C;AH?t*hhHul@z{?!q?g#g) zbJyX|cu`T5oji9nqf@OF-PzNq=ppCAFaNd%WrSr72T8VyUR#W-<`!X-FH<4 z>AP_}@Sno~n7*4~x@`|@SNIj(=f`1XbZC1Jt|C#eiI3d7SzeZry#&*{^!eHh)Z~{_ zevQ*rt!s*R!gUR_T3%hU38{R8vx$6D-R}+*NAEe8f+v@y2eKKa%Xu{pnmyIUQ;C`` z=>a=*aWkA?6N6CcdG|LX>02WOmNMENT{tCr4*Lgc090puU(7uhfTqG(rk{H9~^MIDkCej8JvF;HEP3xLA07 z(9-_wxz4o(ezoH?7{H&c!R>=nsA*G1M+GkH9%15n^|T!om)$6)8A$bChF{F6o_klT z9W?-?^7vRZtl#pCAi#srs1K{l|L3<9joTwpq;B1TZ96CL!PZ5X9O?CNi-N3&C-aQN zPqQ6(>%JV|k-b@epU&FfWL=z3UV5YtxA^&{Y_T_`^S(kC5Y$SdSifz{vwpSep zNjQ|hBdFBxAFNY3OpO!%LC4VV@hawuQSz>U7nf5Ty;jT6nL&(a-LqV^gwK&aBVdg#qRr9y)oh%p%4`sr zdwLC+so=jIJs(fU;2nD3yPmFo!0PV+Uve_500Z`-d*QviEIJA*gBf&iYv$F84a^W@moM-hfFb`Wo}x;4YZh4962^X`2%gCbYos8QTc z$?KCtll@g;a$x&X;bjgY4N+P#Q{&$8n5n6~#s^uOgFX(srwI~s4puQ^k0rG4{O}HU zrovM6&&$q@N|qpYPYtU~%}fcJ;OjnsO;h_4MKL9F3?}`V7Y~yZy(++ud+dAkzfhzX zjWAV>dE^Vz`Uu=cN3MrWhV$rjOXDoFIK>(!o*pR#1Ah@Vb;D%QStQk!)!LbtF(vhK z-Mbp#z0jg^<2TV_Byp<+lT^=7CXC4%Nu$R3LXCOpDw}xmO>gMK(DTxq`u;L+&>CQ*JI6 zD>|Bd27gX4$jrxJL)ro7T5ZN!RK}BNG6bDe61gNf*6OZAZ;icQS^l6bZx?CC>-&_x z7la3Y$*4>J-fiN04Rv`c_8{*KYcvEtY+@5qX1KzN#lebV2T_PS zjaY5h%2STMkFC+KZ!ssLo7`?bXK_D;P6#?*gq$y&g1bkW#l2K4*q8(_Fow5ykHtJ- zhw6Ah`>*D~{ko>#FRdO zzG1`SBYYTsCoW*azuEmhmu@a=SYvQLI$)*0lADmN_L$!D(svS~!sJE8wQPSG@=tLQSIPZa3&D4xd0Pav4`kLTCp zT0LOuIb{?Sxyj?1TN>_m^P7W1)`OYMOOB_j$NFdUh+Y;ec!Vdyn3!rR`0xbrVo82` zd9ipR%Zoinf_SlYJoS%(Yaz(Mt>D4^l#DqvTA~{xM8Y5AeKWs7UxsBx+ceVu#}pTO zr{za0eU<3q!C~Pm&a;z0^c!LfF%`)|xbb-tv z*+mDnK>kh?aH%mSxL{sQ3Yn#b8H*|vbRP^ z$J@xW{$nkw&MW6Pl2=)4%|O*@2DilgaGNCH)z!91wuyMlh{UU>o19a>nR8Zeo_SJv zM2mCwZ&_ILW|zu$UhJ)}y!vXb>#5KCo?2?H7pa90@-P^g{kA+eU`0@g9#zI(=!po~ z;}J69;HYBVY;HTyL_ZT1P4qw8?24|rW9|53#U6Kpaddn^VK~+^K&G~bAD*D4CtDGh(sc?rKKH&_T_^;je2*?k_eZ9ghXyU%G$VS%f^iiVj4L!cj{o_u(qkJs zO+Gf^Bb_g*%UAuH)acRG%_aafec^MK{#YC2l@-hvd_ttx{Q`%@lIm|&QTXlK#sH zA(!*l!&z3KPkZsEO=M>Eaki$42Cehh*dLgv&9He!RmZ(ZR~95Dmqp_93MI8>cVKC7 z@~fN2LVl+*@X^DK|MxTuDO?O-S4^&)b36}@mJ*qf`H7gA&BJtT)d*M5%0Dp9l9UHbz>0p)8oD$yadUxDri%u;=MC6@Bm zt@oH0MC7{6-gDP1uC{^b*!U?72_A}GRR?m#B15ZCQ;|)))wn?~j=xIdb~Jj({;`T~ z^4OTgwxu?!os5V)2-SQCk17YVL&9gvOAx>tFXqbFmh}QY9{LUgfm5N>RT-y3uYXCY zo@=QQ8tCfy!cl35LdKa8?eo7%Zt@Hu_(iklOej}*IYN4r&v8SANKeFjUyJCxVtFixNMKbbnT-EFa5Bdohy2X$JF zW7q?*!reGP{zJ}7j*9JC%)YDN(GdCLtd@1P!O|!BX2e{-)$-3|5izTL;8bn!==EG* zQlH0HFKIh)JC%uVkjZ<#2S+lY8)%zJkTW&2%7A?LDkJe-CjPPF2J@~BDT{{tB51H= z!*b$K+rBuB4ZpF34Y!$V0YQj0AMb$>2X4aZ`p<1^=R3>1mRU8 z?{qn^X3aHZeW0B_T*2GE1WSFG#nS zn&oX3P#qjKUR&%5VN715LpYTw{;SWkbE6Wi{lrZE#g^64*>7ihlnQdyp-3 zEC!vlc0>pnNWU-MaN2dUPh)+sev!oZJ5VLI9`A&Knz<$HjC{$?b?vqf1Ij(RbHv z=HqA6#nXq+y9hog9!Br)wmXGnh0PY>mKQ_^rT^_*%S3`@tdp?>+?RF=89A2v&s?3P z{ZrMXQ=Q6-OVy)7gB;j?2#p;=h32z?bjl?3h%M9~ju*4eQ za@x#jyVa+7qntdfm!~4l8(j5Et}denb|ZCYf8K+=PQYei&5ULuKLQwD+OjWudC7Jp6@l0Bsd~7e-lL~{o^Yn zOb?rByeh!g!Ig$6nz+xa&h7Or@$zMu06 zHjJGFnQ84-NWR2ANm}8G*R7_rsL^WqBy`G-+KTt1H;DL%tz>~;W-Bz zNwL*}Tj}$Piyber44=|@v29lDElYV$2|A6pEDILOqh|@>A9WE%iGtUW?$NOQ--P~g zYWIa2cSNCj?U1)tE?#Pj79}QSNM>zpw?{Lh{>L@_*NP5C5+@$`_pegH$J7~Xw31jY zFOoBG<9-waCAl-SZR~!&}%&Gsa(3J5iTK>1Mz&eJ?Ylr**+d(rFgaT%KG^$`|o*7Y0S*)f@Xt(?ydX(53 z**+Y)Hp#kcF5~u44wr^a^PDnCK9zW=xD2bkm0b`6cd^`FmTqHaO#37~GvNM&Yg>&< z@>|+tumeQrw>2`8>2Ql`mBvj|Ys8Y{{9A-ryxF|rf2GW7Sw?^Q;&-i}7^|h>6}>b) z%!{YR7l8dQ!?kp$ZlGcr9i13*Fpq#{hDJZ2>e$sd2^FA-+%{T(n`Rg>JKm(&jQGOT zVJ8v+HD$wzhJP=EWN0D#{1nZ$L_SHhsQC|u>$pofmD+NoQh#AyE6}HthzN(MS?Iyn z4D6~q{|2a+Lgn?hFUj(x7 z_X0;BLNGJH6(^>oayQYnCF)VTEg4AIUc^YNgucX32c`peDg~!=7X_Q$ng;1_4x@+Q z)t9;E{)R;8K|9Xv86BZ}eK5>;bX*Q84+Jo~s6p)W-SCo>;^!O~hS2G&zbKcpa5;9U z$RLNTgrQt1rv(RU_{@Ggj)H6(o871CxeJht2{JOkhBq=i5L?wyLz*pW(g+#GNDfqH z;Lh29IMyuqayI>GJ?bywwYg~}!_(f>!tb?VX9%u7v_H((pe78GxG0P;@GpwsSGv*y z2v_WgMpWL@@9k0Elf3pV597yum3ONQgfit_O?g}ive5LAj2!5Qv$u%ACp4MF5xw{f zuS+&)(q+`2FTQlWC$0Yby9gPNgxI=M(HnT$i~NNm4_9P`B76T;PSdI|ohkd953`F| zWds?r^S;=5!*j5E4r||KxR**)&t!P|jNb8g^OCLrw3;UirF0b%`{o8(AloyvhsI9N zTU5jHQI@t}kIY0U$yO5zZ2c9B5XOno<`-TVZeF}y_QCo)xVPr!XgHg3r})&!q>KIgEGznn&|x|VkVzIqk`_Jqd~8C)p= zf>w;tjQPRDz?gBI5h`m~X*B$4NWco2wWJIl4)a2Gv~5$}H{kjl|7ZV(w-hg?a)T@B zP!;?(vFRd1CH-1=bQR|fnMJwW>N7czDc~0rAl71P#w%Es{mA7eJ67-MP+G-FWbMA= zv9zTcw*zp6#ISkyKX|u{8N-hs4#P_rgT)xx6`xu$9V$kg`GxixiShZj*x0^26)x=v zmu{9Dky}d5P$5LxXxSB;=8iCXmi3qb8~fkwbG0!+==w4WfJQ42xt@Ck)q84(+ogzz z`8XJY@L{(#XPS)zJUFW#9}o3UoS)N2<*lm6Gb-I`$CC8mmm{R&##*q+t>(r)4(F1` z2B#k&H$iQ)R2*3GI0Y z8@4$!t+;5wwI}zAQ0*lgW zoz=3O5Xn{C%j}H9)J5IZQJ^}B_(w9k_A~f z2D_(-9>hm^m=#|@p;|E@6o1|*y5VgUP%3o4Uf0_c1F+!D=|{@L#q3j!I5E554aVAbuN={oJUjTswzl{E*Q~puy0or>FXJFEWVWIG zc$)^f#8=arH#>VGuJvO0UMCNKY( zza4fdzHQK1y5$`M9QzaRPKrcrPMF*F%tYQicZBgN)!|L#NCgu)Y$h_8rHR~wbTG%g zuNsBo_)Jg=2TxbXS7bXPUI6SgFkNap#(&?2W4J|@$qalawO5}6e7&aPlhd=B$H&a- z)QGuw)Dy%EZG%{`Nxq7?6eIecJ z9=$t%vu6gl zC*ALrudIjo*Ero~iCAadBHdU!{!noPV4jf}dAhA05vhzlW$AJhZ~~02cwPvFBPv#g zZnmu%gu=k<6;27VE5jLdpH>E)&TC~bTw^#fa)807tI~@@^yq<%*9POq_!+mBH(^(> zJ#S=R%=+ef0cLF#H8_x|pB%GK%sTsT>E)tuk2?DC=9MoU5Lj8YFl{%cOHs-|HyMv< zcLR~188ld+j>ue%4AZk+PBS8~HJQ_q?Q>atT!a%LLTF< zHKAa@JrOR|PB_EF80Az7@trP4+5LP1VP`>|PFGKVLS)5hM@f8gK2!ybBC%eZ5v}9z ziq0fq?vU8-oO)X4CCG~P6;ddoOhuR@*-vin=UZKFc!|w!v_0RKu`QWr;x@T6h|3z@ zGQsKojCy`{(ah3s^v5&$KubOl%pfd2NQ^ng%Xa+E;$_4MrDcJ{9Wn8|Vs|ryUB(g6 zgC0`ZH(HJ7!Yr}$K5TI;%6u$aC1~6e4JaU}H&eAW@u2Kq8xlXi zFn<0psDD6bwXbR1;Iaz-VnP@|Oez6vBG0GX^Esb8zq0xKi5-)wfgKKL5lfC)ZO%fr zTM%YXVaLy!j?gL(cStKL9S8Nc!=Y*{~y}k1wM-E?*C7+2}DSoAfR|FtHd@o z)TW|s;zGMic41a_A&OL5Z?Pby)oLoc5-MPD6Ula1#nwL6)>hl*RE%e_hulbe5X-)?ugv62+<6RmBkMZ*bLq-9l~&$J?%dnp~k$f6_O~O?TRua#d$r zeNOZwldl;VWMQXf>a!FC4f7hx`nGZr&c)}P(d=B;N-Js82fOa{vNF7;t(&?x)v9}M zY!xK$&dq3OqYBwE-Osa^5m_`s$kf)#+j$W1ihx5nx$SVjvutpA?QqA_0jKFKMv@cqZHI~~Wqc-w9N5`a#^0-7Z;!uSZ4$+$fU_ub8AymE@2r3fCUr zhs9vxl8`gKEL1(c*!i}=C)*L|TzOW=IlD9yDhyR0p|-PZ*;^4BR*|GY9inNPD^^E* zL|8P9mu7ng-|Oo!9o8%JZ;K+iO>LX7m3(5tz#y1M|H;h4b7ji6>kKh!&?dMr`rcm& zPrA0#z?Y1Yv?3-XXBZKiY5pcy^=Lc-Yb8SxpDpCNLJuM0J3J?6z^v^zm*BnU?63nD zR3*0ybJ|9>$E)YdQWAU`gkwmuLC&G=@_GvU!Q{PConMaSPAzwpCZShpU$D~qa{sG# z`b=x+V97Du%uKgkAK4UXeGS>(%vS)K=?1hC3*WV z>9fwanvnxG<~#{(JjGM)>9HJa=+i6Ars}C0WO~skr5mGb%O9)c+fw*u%2PU+0sN-g z&1@`XHaG2o32>V_%<41#z=kR4aqmCD5D?%%mKE_R;yjHn@3T_#vUaX|a0JKwX1#hb zRwjFGsH7p?bi|@`6Cqzk5rC;#cR~x;XMI8%V4rSKc6BK>+DsPAIHO>-{#wJq=L9ze znL79B`_!v}?w~qa&>bw(fkuAwgwQj=3s&;o%)5A7P$vWbuseHhDR^)tQlJZ)qUo#H zj-N>c?i0p#y4iK>&323>0VoU?Gq&SPeqpxbtIJZ^Lac{#Hv}=WxFb)FVsGgx?9m5Y zT0=%}%EUZz5>{ywG%*i;?$AY>gXh#HgG2%7+cf@INOz|J7%@J_He-2=_Vh;O&8NGG6G^0jvi zfP6&0Z~&*k_7peiV|10N8B^w-D8J{r09&76`Pfx_E4%gK{-_{XpelrsI5 z&izi06bm`*fFSvkM$*Z`t^!u8IMRAfsw|1}kW3a7qumY~9pg(dxYI6>h^9-O*~^{( zXyFB6Z+CHKG>6iKp>(u3ls>C8k`58zWOfan#YSi~X0i|Xxn8^PqEOoB7IM8P?E|`( zqI!4kH}Xz2kuj$Oj93(#(h*ASDMTdN#uJDIcSWZVe7o$%blGaz_N4Y#T)AtbfRfJ> zK~pLBbAL_*FH-3aJ*E!xZGL^5y^er;LD7y^x+keNQVyN+r+S7pU?vV3qPF`t){ zs>$?dmLO0(@CfKdvFF_<1|>3DViP?7j7DJPFT}seY2+H99;JAopT=>c&!p6Phi}$m zmXix`1|3ftzd~Xf1<*Ny^bz&~p@r&Rdja~{UaRdjjTdEMdqJ!+W>@#5kF*zH zG1X(Wy{loKT};ecT-Hf%H*GFMfSggokGwPF>?WJRA_&-&ArHa=KFTO#=OH`axF*ZRWw5k+uia9jc)D65Mu44Qh z-_@ezru%htj~`yO!mGM$u<937HQ$F5^)f{?c1!iL+&`mVQhvY&NuQkUX3`#J81Pg| z6j-hQ&8bIUKv=UU#>|=paDpEM-jK5;54#BzewnF9m=$e+_YDKQVV-8a4nZ)xiEn^* z^PuYs*N&frRy>L1hDAVLNLRFvzM(4cuoXK{m#Cba4p=cAmM|@J#IQWtu2Uu#BUR@! z6H|KmTm!sEM{+Zv;^S0&i$w+uW_`!+i6f<%1|cs+N4;SfW3h?7ba^fm( z5f=#&VO{wHkp5QNXSu6n3ntq@EZww+h)lJAD?{Ifp73Q&Kze>@G;<*=;$oC7mlBq; zaUhoYiB^6ry_g{O|22UoF!tLWcBXaxl`f+dZxydX{)YW)C9Z=+BX9uep5Z3o;#>A` zGdup1Kb4q!8KfqZ7LfMWeV#VqZ|ZN!S&|2gjOQdWdP;AE|NqIMPyHeH)Zg{g!wo!@ zQXcESB(F2D9?e{@?@xaQy7xy2>w$m2^v;No7K-G#4CyL+eFJ$74TZ)!tnNybV+CGJ zDDM&z6ddC3ssf_=oj$;?`^&RO0LTft=@XjN|zDC7wwE^%f)Oh?A_m*Ywr(g7^=Z!?vGNUrw9vMyCA>2yh76o6NOg=b7>0;c;K4t=8X+IrazI<&b*Q7x&Cl_ zePuX33r2D7WIJ77LtkbwfRRaD;0GG1<`@pQpBHPN6&ES_UD0H9(-y*fB)Ets2Ui z92l_LCUe4K_qM7=?N?3K7*Ft^qdro{+jGZt9P`2;0heNbGyLrLdV5$1j(ve`bcdk_ zxe~1d!x-}Qa8_$J5*8N11o+e*Q#e(M@EW5C56TuRXy`3rm`3-cNzit=k1>)FBdbz) zFX*(wt47`{QyElbVALjS!vw3MweniiReEG?Xs!GX7g8=Z#GH$e0nxboJUU*6tRvL9 zrvk&*@aPVN4^MPH&qdeh-l#)r9e9YKKRe$4V^{?YM^|mr@QSE950f$PNuKHaz@tS9 z(dz`Ovg2Q3Q3G6G6mtfQpa?D8{OQ@6x0ah}D?5H06~HohWOy^>shekP2QfB_OS%Lft+5^9Usdxs;9XQNEgH-H%smFVsJfX%)zH1;l?c?vy04-XtmRs4GMC zszcnKdWAXlsC3DfWZ7vL>c!ddlTE*pu&(UXyz_TGw0$G*1g%u{%~&i%hWWU`FGl|b zJCf&t&>=mQEN_{%g_lBd{DB@}#T!~z=w3cX_8%Tjt){`FDQ`VN zUJFNewQS^9S`gc*$7I_87_{#Ph?R4o5)HA^kYhk9Nnl+Bs0H-}hAR=wd5njtCHGG#|g@)q*#mXxQq|MUF%MOE2acQ~i6sl1(r4E>vvf>3U|XN(erS*?ew z5#tBU>g)HnOzVR#E0~!XcX%XIJW-S!YnP$mgQg}<47R?WJl?rMBB3x6p(C75UKb8k zcZ8iz-)_}GEiWmR>Q@(;w$EZsyD$B$RW7ARF>4h?KnXz0|1&2PRNd5_JhL;`j0RI zNi=oM)|i>i?(uC|a@@R*!HdD;b*uG3D@LTv5$?ZaBVw8k^~q#n`_nHyI!bGkS#Fd< zQS5-XmdtwMa}_#La+n~&oCH8d%^{$d=;)Ik$QI`AkrCY*W7c}EKi>4KXtn^ zqquHnvC7NpW|mRD&Lp?%uqx`CHBNe5nBA|R8h0;796;)H2-LTOe%u)ftk$2T>YnHV6oN3mO1=R9l!#b&Z^5kZQ7yI#VmMZp97#vuns3|Ee`F}I zYvqYfXV6;zcy-A;uRuTrI#!MjSC{{lb29CYs4jnzGt-S*dHbpZ{In!Tg>a#_TEEQ& zd~fLOcbp#6LGkdRp*jbt^NNbh8n30|!N5ac0#-QUe=CNe(unlo02fiaV)w&5vu;k% zY-pTs-GVvLrrXAn!G48Cd-9*(&2H4CDgW8;pZ+?T0-r=evi`diz%*g zKfW8(pBaBcD^sDH?!0Y79L1qk4?bzCE*z}-$&aks>s75AteQm&kY{NQZS_vC>fwV` zA5&HS3BTvpyP)L{-NT^8TmD+4EKab{I|6wmJ#R&2)8=WCKnW4>Q%%I_icjjy$?%6+ zGZJG@n2=!rHA+gr1sPENvFZ+k;3)&=@9du;uKfe62?~!n^41fK19i2ic+vY%?OxjJ z)H;h!>KroV@O6mJs*R0iD`a=usdWJ^rxo`xBGed1Zoei{)>Oag@Q~Bpn&!yUxG-+* z`zv4BGVQ`QSu&<2mIn2ov?7w%J3ARWuT#XOq&WmJ%RGl)np==-ZnK)(_td-DzMs)p zUK?c`2a+eZOj`-Z2G*w!m31^~&EcmmRykEoTXS^6o&IAq3W07b)~(KSF*Sd|0I5&f zCGwl1`o?Ehp&HT7m^j5y7uY21bTO(LM9BK)Ki%hj^Aqai7^PYo7ItSV}-05u> zEvDy~xwBae+bcZyNmP~^!*R_YI|VO`DL%(8ZMJ5xgV2&jvvUYkK+R~MH}hGanOgY1 z3C)u-=Dx2}jC?_?uH^U^K@PO-Y!}k{wuiK6B@B+Kj4qg48V+m+V{H!yc5&DhT`*ed z0Tz%@Vwcq>)Dc}Ut2n-(9u(CXUCSdiy|n1SFcUkP*Lm3@WzvA1Yxu z@-ef>uOWE1H&qeT5BZgQs64IzQB(Dt!K%lo>fk>^pNapSU{oDL`5H~$^@gx>Rx|52 z?A&7J(0R()qS4^c;qGP&775u;$V@EyP+(GA@PABIw5Vv#S@xEY4=v#CbYyq` z$;5%4LcPG2Yf5uPW5d3O?D*#|Js3HzQ<^R&>iVh<6SWm#6etNB>Su9YQ`>dyZ8 z>c$NvbVFLk_@Db{Ia_$qV{h*!bTzMnWux%3(=+3&P~eGcCTQD-tC8ehC@j#s0`8y0 zZ6W9Wkh2rvcKpwwY!6|I^)c*J%MOoJKdqjd4|6E$SXLgY2Iwz&jJmu8>an^j+d+qp z3Rmx@^E{=>A#Pc7wxlDyPA7g;rp9<4tnQ!q(tvw-MTv*=w6Qgha|?X zpWSnLB?Z@%4HaC2NF&003Hc(6@5MC#MO<7>v9jfp}bO z;D2w1teRg8HvI0V4Os;dH6gWZc>&?7ljlhqkrP4)xp6AF1gDebo{}yeR2|z+nK{?) z&w%HV8JnLR3_?!3h9}#J5C6z@ir_q9>!}^);O)mUMXeZ&-@fX#7HZ3?O2KYk+@z$> zivmwsDL=rQn_;}_tW<@$#`D)fX0m>x3}Ivj7}SsL87f46CX7Cyel<#8T5G_k$NgUM zVZZ@Aq9P^-@t9eNC_$TjxP7^tX|{U~g|ckh>F%FbL?gS^NVB-4oI*<<7k1tXS1(-- z(x@-V(!4%TtUtiX5J| zxCm>^Vs#r?&*wQV9F$yfu~-}6WkRKm?gKw6CGmv;>|&a_XGy8AW@CzK-*o5nb!is-#I3@&e&WG`R+nhL+{&;WtkcD0_a#sGq0Bj z#s0v3#;OL=q6$595xq6=IVfLQw}Jl8Q~$3b%CzAejrzDr+M^DhMo`njA#eLEQoxIk zCEk$j|IismAp{k|4A(Hj(Hi$264voa=lzdAsd@GAPm)60FMh}b-ohR-nm&)sj}sGu7J5TWJzp@@Q25FW6WOnrw2-*wX9ajTHNt*w9J4G@p8^#RTBRkeg*`aXbRQdBQ%=) zDks9Li3{V7L3ug+S{NrJt!UO7^;tw!O~|=X;2o;IQD`LOJ8L;!!Zq+{bN&i22uP^v zaW}#}g{E8Ru-qF)_#kI*fQBka9BrrW6LVi6Net`nVRscQr!&xhOzx6k(AeP&I9*5( zMs_HA+yFoA9u<2{LqXzWz|WwLa!FNQ!YJqK#)?Mzn#g z34jo+$&SA}eOQ5{7H4y7t8PV|l6&cuk=zl|u6zW%p`QJWC%LqzgmhMNc+#6eE z`21TrnEW5M8lcs{8;OzO^u1NIOpGxz4j8`xoNb@Nv_u?4P#ml+y-EW^U{#tg8X$(ZqeYeZXvp&?cIIZibBImBvU5w$>I^}R>(@@96$sOn<6Jy>D>t>T*JsK` z!DnP7kWdg73IjbWu0h&&HKMWlm7HujCJV%Lk1^#_q z_2Y@ec0^$w*?%gB>`!_hvcK?JB?;`RuMiPYBe^|K7V4V4GgsnC0CN{!ez-;${YN(p zp~$eas)ato=R3PXf!>uPF=DUo=(iB*26n9&;cS7>Ey9!AF=U-KPAznVHpSJu`YR&N zmzReSWR}1RQSAwaw33M^mC$G|<0<{QpGwVi$TFQ&kQ`h zqF&ea

yRH}+csv)mI&?`TH(E}=JbkD&r*opWh z6=^izkoJ^kcIGDUx%Cn3iMrZbEAOBc$Qvh8MU}hM5f z2ePEF!%#5l%;s;#Vk+`C=ii$Pu2}U^=7I~jBl=WMD;T5U^h#IqdJR#(`?-d*d-PY3 z3BTps-E_h*fw`eyp%F{A{$>y#+AW(ziP7v+iBH8cs|2)m=Ej@#@Sk5Mj#w&p8^Z}n zfERzqEW(EKwzg~%wO3%Z_M6A+*={h+O-t(*1E;WgfK%9RT6U{e&Rw11M>I`XTPA|> z2P>Xad0KsDT%j0_pmwT33nYVzOpr7L4TikVZAbXxiS*)OvnKQ|A@Wk~L0nYkWrKPH ztP(%eaIhl@51|DGa583SJdz(Fzb&l>dxP9PN+qt`4Q=Y!f93T?OrVXQG=pE;JAs^>D zLp7$%ucMO0j21&PiU2Y+_B~KawOq=>MIg6lvZlYDzC*E z_;RdLt5$_iA`B8W>dn@MdTyAPyQ43XP~#Aydw!1S21cY-f!-c6a}Lg%8#?DuGo~VV zkv%cFTZfuu<4rWGfT7o{)(07<*n;UuK+h~w*cIWWP`1-va3wC$e=xgdC=)D9N1hYy z8%zJHvWFT(Ojb}9L24{VWyKbp@xIj%=|{3DAuqN{$ycUlrefLK=a1qpAVRqM$#nc! zI}0j$1EltiqY$4RrTf*LL{57WVQ)J53}&&|RJ{;Of8)*c3IePo$96~FHWMpj*)V6R zcM8+<4ibRjV0R>I?quR`+g7|M@D7Dm6c=MF-f34qVcl1nH?n<+URNJT&-3Zw6vzuDoem(VkEMWEL5%N^L}Q zjK4xPKoT=3ZiWLCMW`3=wC<~56fy1Ldm>LVm?6R*`?jR#J(G^Sfb{f9Xt(<2Nb8G< z!$VHW@Z@PDm_;bi?0)MKFMx^}Zu|>11EL>0%+Z^(SdN}#`x18+I-h6_MsW~7N_s@> z(1>a0?!gKOEn;-B7Eb&qw*t+hr!+XE+9t2siTP@MKOd$rFsfnVv*1i;g0u7r?XG%F z@p)O3e9SLt(@M1i@aq~QR8|D;G{ zHX{LiBN5{`Mi{2z(iqr01t&i32HBb_C;CeR{gUG1QO|R4q^bbcgGKgOC&zAriBA8_hE5QzZal+X)J6fBLR)`J^gBV2Ie(e zYqqMWt%YBlp?a;1h@wDzxwS@8bf-=5%MFa_e!!V_d%yA4&aKZHcNvkIa<55#>%(5L zeBn^@E9TSm^0`CJu9%4xq+OWAR4gLRAifL)mrHyGsZUjpwt|wAb%3!})~|6P_vzJ; z6oHbllqN@K9~S-JcO)1z3J{t((hxKJ8s*737|g+(CguY(VrFC5M7aNTFgN_tzcK;x z3K7s)C%4_s2zYamS!Yf)9c>+6sRmxerZ*{VdpI4O7b8bFIWs^RL$zPE+V0?&;d+;Y z^F95V8Tp>8n`>m&=CrB4CS}FdfsD45!>L3@i-z*U1@dwVuS3n^A_g ztckN1Fwk1MA2G;U!jq|ksTO3Bt}4Cxk*cIhoSJnF^ru_*j>a2X#Y16BW;u;oG5~mo z?l%GPy$o&(1$&rwx_?@5l62F(@eDQvW?r?w30`#61RQ4`P)z&1d@dgd-Us0Lu9tQoa1;L+s=@#W* znLy2ibT!KUFI?t+X}Wg&b1G|v6&bqI^4Rc|d==X8yQi9J99^SUihZD2el!f7vOF?*dXaHtsq0KZ$i-va>XpRJnppf$#400NWd3Ihi0r+yCv#>}d*D)OR; z`XCD|Z>&43f$6VrWB~Lz`79jB`Mi80@N|VHr3k}Ex90zkwGKsB3~C*O@cqzf*n=8= zk@miX^5vNXhQNTseJq;c4tm4=+_qi$QH<*^xFZ{RBdg4Jf3SU$d^{t^O6=jL4XTB9 zh*mtWuSytEXUJV8SVFa1D3=DBGwyESI#|sP$TX1M_LCo~?$e z+;1?_Yv~@A|K30CpcSGL27@x#-Gx};BFwbY_dp<_Zo6k42jZ2(H)tLuJsPbgM3?;T zFC!z!inaQfnas_ZbCIoB))?=c8c0Ir!3Ub}RbpzLpT!RMFExeH8CW;5_ z%nx;Gr`*EiuVFQYwTM{)6i>+S)K{fLRd7D)A#6xHpKMj!pdMOdZn#+kklvtPb+5*L zgn(-QC@?lP=^HRFBPBb*Emh;s95#ERweMG=G*90!+5Fa+Um}YZq`zOwFJC;B1FaM9 z>gM}!=UW{%RcwB{SI78eqbgj)!00sFwfbxwjqZ2#d`@}SVh#*x69>}MtFZlsX7AQ= z9lBQ|AyqR4U8~pXjL-TGM-H-0=I|Tuu!TeS>K?k@P;j)KBUF|-?Owf`vx0&OP0Nd% zYT(9%n2L~}KgfrY2Y-l|z)v0Wd41zB24W43eMnS}9Jm?%{4oC0C_Z6U&Mca;Z&CVG z0*3(uQyy_6FO|XtrZm*p>K?Z!J#!%>3ME@8G4G>j5k^Q(O8ricbW-YcdSUcJY=J=YM?c?W!~ z4L1@L2{Av<05WNT!J-IZh9g$2lW3y57GaaXPhh)Fwl7LMssSX)mQT?}Rji6V*cEtX zR;*`HfH0_zUa`oEZE3kzpBbluUbSRoS4!8}mU}gH1&NWlM!Rs`;(8;lCd5rOz{?|o zVaL&@I^WemV05H0P!1m~qk58~y3IHUd0HEKu*glEfasB^0o`WYR1Q2Q%Qgvx@+P(C zp8ketq#f=wjZ~(-bdz2j%Y0Y3Q{XWeKP;a4P7AkWR9<1Dm1xnGbR{iTMeue`&Roll zbt<`?5)3%ToVkvYUX^rEvPUJolnkiETn*Ud=S<#7n}k8|t6gptSrsYaPF1gnrq86@ z5%zTKD$F<9>0c;)CZ~>Y>7vxVnuUUu0nRk&1uJdNG~NX(=W(WSE?6liWp-(a31wWg z|3APri}+&qv!mIN^Tm5ryg2dMC~!|UuUyO>B0Nk8 zw*R`rd#LAMJ@oe#>Sz8Tx+!_hb;2R(>x3lQuQSLX^CuApi_)KNpIMx~f>&I@%deP_ zzQWI;iocpowLIhAX`0J@_Dm!QirsN?e%Q$I(4cSQIG-6W=$`2>C}{Zt@u#Z1gy}1P z{!cFt{me$%Y@%MXHx^egM5|l$X!>q57TqS*n0NDTbEA9p?c5&`)X3g_7pKh77xAdK z)2r8cS3&P;BUe=EP+|A#Z5*`5Y4|~vVEwT_HbRCzs8r=lghUrgZ!p11ek08tiHCP? zi2NLIC8l3a$r-ogC`_!&h~O4=o{9tG>;D>a`rHeB;z&_z$fY?3#R~buMTgb;1umpL zpKdZ%bbI0KP9s^=zQo~C2kTUNGul1;NoLELTWS|hL8@q{=axntqAyiH87=&5lsHU% z`>C#8*;uEi^CnO?f2lj8Kd;0f*q)=rJxcZh%z|f#br; z8Yd z%D<@ZK>tY4SLnL}27M?pvnHL@OnF4av_Nt2n-4aMt?{@(Cg%Em+_V?Cb{$L3`3Li| zgbCZHXa}Rki8nDb?$XC|(_$L-=Eg@bJ|?RAjVK&A`4U$ELcu@M*zm8@7q=Y^^l2KS zfhVnNC-ZetX9NOjQk{bjIs}_hf@ztEbl@yT&&iEmUg$a~l+EQ6xcMr&las_=7`i}a z6?WU}i66$C`=gmS!k1q6&f|GaYDI&kf~$(7P8_q+h#gJG=SS0##=gsVD?|1lH-fCs zqtEU(T`FRs`g;j!&McL@a4S6 zd-yf7tx<6QS^3ub(a5pYcoRaGeCpC1b!qQg?~=6L!H;*?n$&uP=L%xUZ>>i>aKJr- z5pBio=Wd>{o!iVRl^f8}l;b8{Y8D)Z#tUBlWYAaT_LDiK0BJCA0(TkN)sZC+B7g<-7Paxv4W*mi?v)|`tP#YcbBuET-en$8bb*t?xe07bUt!`**EVXIqq z;m1r5)U9(H?ZS)Zn@djH9!Z|6PW((!)j-E)C*sBJbi{~~ay_WOG~a_wKF)GIVC1*S zEYVowH-W_%(b^k3_%N$=CIhIJO}w_uyxsb|v=Z_}cEh#Tc13oxz?&v0R<`TDNNOEB z6W{HgN9$PhYdm7M3AlJOv*l<%= z=YED|DcS7pbV?e3buQSxp+lFcZB3pZquEam4HSsf;^prQN-cV*9&ShIr0AePkZt0K z^f@J;$7WOuQZeXc7F}G_9ww65`rb$@cJLZ2_tPj#82hO6`*mE#ojW&ji1}WQ2L5if z%5W{}WbUHKCNkNiol&RlcHMWT;%7p<4=eSa?m2vVrn!*FTGq8Upv15PSC?Kj+BWXD z?n9bpQ=_a!1DhGwjzi^0$kUKBvm!mcg0NE+(QKEU?dngvx8dVYcL*t;(rcrhd>GJ@ zjlsb7mW!TM-RBVWl3?1t_P)c1F-f2UI;#pteUR@tv|aDeGC3sw1JdoIU<^J*CAO z(^}cW%O#;&9Cj|Qh*%pAwEUqm$n=sKEaDtM4~&e9G%(YNP3uX$B&%GUf0D|ApMt~% z)uV=ifC3sr|rxizM^9ozGEGt(QDiDOaNx8G{@ zVCTtb;Jqt<*>{jI!o)WfOD*ETC2C9piiXxkKk7ie zI}4UIwbens>1N*D_gNOLxoy7U!|NPD9lkI5Rn2P?PeFLGE2_2TXr@i8hxmaLgk%Bw zQh=U_%B_Yb^eTH2^TDvF?2Lco89AVxCyf_u?HX7CU+|K>8bxgm2g z6UGus&wYn(y8qD606$PnyH-On-Ng!Vv&_Hyv~E4l&*mPaC+>N-Lq!e!lZaXVwCCm+ z8tdd-K2fLS;x&VeO_K>n^AxD6ZhEQJs>aO*8OAcp$f)a9>8q@ppsuY7_-JbkhBu)N zgxdxWPU&E;WanZt<%ESSLUOUk#4fyouYyZBWpwu+PF1kEiSxQy00b4z8>W%D1D&f_0Dg@2I_sj_@X^f?3 zPEg5WO8hDTY-fUbKtCpt0P#$XW4^Hf6RGF>o6>)sASwTR8^zHiC^M|GZD-sTag(1M z&1Yn{_CIZU=gw*t*`a>2G;DpN$NG-F9ETYh@BGb9-K)VXNX){5=UMsm50Fqw@bUfs z5`3&W$Agc%sKZx6oXx62gJP$yCN|qqN{v>jes6OD`AhqD z<<~XRw;Eh&Rpn+m$2DAppX}zX0*|RbJ_I~G|1MG3eBa>wUq??gQbFwG)O8OAz;@pJ z^JeWvHMr6QNt?x`8!0gfE^{9mgddheMEUx=^ntg})dw<5d|lnY`?^cV9Qt)$umK!b zz5Xx%(ZA@wo%&X3LE>A0@&9Az-G`WZ^f&MC_j^BAzg+~k`F_{@yMF(+!u0!n-F@iR zL%VCn8%SMF{E{3I4mtQ_du}C0_Dg`js1u0Y>CCcX#Sz2q~`j8bBp2qL6K(A!;+0ah%O@RweEH`}sepdfKE^mi|M zYIh_tq(n4BW;>BR95=!SaK6L+25@2Op3bAa?#Fnv|C3VCA$#JD>+vf6ou&(d-0Q)* zG}Jf`yIYY34b99K*nJdcxP0M%YG!65FNW4H1)%Ex-NYPYnTd(-*CL63Rgs~XBro{r z)qb2VFUWz`2|r}6JWyu~k@wbMgm1!l?FE5~%4WK%R_CYdC{!FIY8_zK5Ra_5#(c^v zb~@f|dl-}X>k76fALi@28Q&+nBCiy_9m>t;H<$qh%x!f zp5cYAV)jt@+rh&22#BfV;GJ|PQ~q}y6oK^xut|(~SrG@rk@gmziI9N3dbjZvsSYle z!9=*)ULLM)Xl@=WFM-1&fv1-r776q$FFv)Szer}c@pD^o#+gI^Z~(p!$K|`3KK>$K z4cE>4s?3u(zb_oPK4oYlZ-I_rhC`kceB9_#AXSe3L~6b>p&C<;en_l?hr)1 zixx)T4Ma6S4S;~j6p=%~Jwm`|4I$v3G9W5wt>u2d^zR zzpL0$iq*j;e%#mD#5)<<395op(~I=qM?#>kfaT8Qa{&i=LWM^$xHnETt}U8_(iSW zqg(9az8)@2GuqO1(aW8fQ>;vxC{XqnC68HDSCqUcQrA?G{8G5?%8KM%XF8E@gT?*d z;(lQ=tovUGrJ5PM+SIrBJ=NMUy=+maE?Aa0?Ct5GwHDkEtqo@$PMLjpa;!7Gf}0h| zQKZCsNIJ9RCm0-q-AO>SNAr~(;e7~k3ZX7ZaQ9`H`u;K+E?pEVgk8*z@F*PM?1ry+ zaJEt3l-kxPw}L#7ITsj*EP9o{l>GGEXVB9jK7*{t;2+ zr%0+v9ujw+k5|7OcXq_9A3#&6xX4Hlzg`+kpIyOQCgg^h!Ag3SDfnj!P`Uzb3 z55=^E^Z9}w|I6{f za|ztaUR6W67NV0Ljb_;+)cTB}*7at+HkO%3HXa@r_LAq4_jzw9)*DRQZ-V;}isAjC zk7foM?X>UAO!0}H(2Q|Hl77e-ABQ}lW+8wb#3t|>8C?+lyH^EMb zr_{@!5qdpj4EXuP9>y^zvC3khB8yo%{Q-5jM-LCjzeZAu1PHCq{ut39OoETMNb=W+ zDkeG68=S+U-7`wnLnQmi`HQ0I*%f`)7##RDNR0a>lTIh$zQ2L!ZNIhp4OMj>GV|H} z8paTXcHN8$a;xKZ|Egpj&TRPkv$FOHUxasAIU#0+4@9#M15u0ys+HVm07|K;#e<`< z|6+~C>{4=#l$jPJb9&gw#5Dj$Z0C&uDLNS1*?t?_{4|HaL96XbiO2-9_X}61lY4CE zf?k;HquE;3d+59PZtp8r^u#jpJuy*#nR$?F@d5XDd=V|u>WHDSAgFxt(Wp2o`mY-D zhrqXcr~8tkfr9bCOv%?+46tg+4$~vw1H8w)9~k>&{@t9DWjKN~KiN|EN_^fEvB1kot+KwGguH-zY*c|KC>go28jFEZ*2%?{)|gl& z0*wa~U$UL!z4pi)EQy&$w9YQQ(gnNb>wj{vzCT|djckKK z)4@ZYdVZLR0{ayxk%{lS7pS3SQdn-zb*PNp%C;f^91WXVDmkLI0DI~`8Uzq+nppiv36Ul8w@le8b&}UhLP=lq5M{HWH8RFZrzhg!@C2L z@gKTP_N4gu!kI&xD)|TkNb`DX{GQ2v#%i+O41zT0&v*u^>UU(sN>hDm~avz zioP2mmxCK&%pPObYEi!-nNXJ7l>6mEp*X#%MScM z=GO~6jusAKrjcoquG^&6BOi==CGYJjK1CF^brmj|qm=;dUydl7VUK(4d309^@>m+C zFGF4a(DAH?$b8WK9`WzY2zmBsEz2qj5@&aXWD!MRkQ~bvY9W^;@jIOI3;#nrn;jC* zhAk7Tl^6bNsoB`9NxmfeCr^@es-ssDPB_&uFl`kxxpWeW>ax$V7;Z-AR&?BULL;oX zb{q%W_ds^TR+J;($F_M;lvbO-&)j(0G0 zwC>xz=}xXX4^B+IP?R0Nlrrbwrk_%jdbkie--;g0j=xx?&u#jdN_Ui+(&vcVA=$?&)6LwWe-GzV1Y??v19d|E@wSOtYiBs-n|UJAx{t)qi|WQ?}E2)Y|;&NR>ei zuu-nsORYj@3*l8ziEp>UTUNbHN%3f~xVIkjl$!Qc#j1aaG0jM zd{&#yF}181<>M^QtPLUYuk}`2JMG%ev_Jf~Oss98)hY{(RQYpUBlswP@OBt&a0n=N zmEjiaR`v3P)%L8OOI=gL!xvTA&apTSmk_&HzjHmc^K*lgP_WBM;<2;Cnb(1I1JP?Czy z{z<8lTlo_64Qn~!8`?PV4J%bV{>3wL-yqAOr6PIg!E=cdWxfW?l|kBM6C<+Ymjeu5 ztIs80$o2LdARM!%)bn(IYknuMHgu75-hR6pKO4|*YX8Kmjx?*4{d$n>xs_SPcxZF~$r!RKBIC@!hV#WD0O%>{oG6#BWwWRZKzGk=09V&N~tas0W& z@$c_?Cy+9%>x?H73qF+2?=qbq%Iy0=s?IMO z-e;Lr?4|nc04_Z2k&7Sa-z=1MiR~1hG@lb>!A|k0x{`L_+qz!+B-3#<7w$XrO3C_o zh2Y+5`!+We)8Y8VTyN5Y?jky(7uDp{$U*Q-FleXk22{$NR`BQNvL-#DJ6K#$$jT`s>B!?>thT(ITR0kh#9RO;?9`77*VUILi#a>anL8oOm^9YStw{Qu zxqfT?tdcWkjfgqtA+>0#A&X^K+<6o!0RG?PR0eyQT`w zH!`6R1gvVD{V}9+dgSkg@1t=G3@cf;?;*K$Q7TEW!NuG}$^rVoWckSCXx?)T@?J&|=ke{P^QI5B z74G%u1qr!>?sZ4K2T^YPPtSo@Lpr_~swv#gZtMO6|4L;^%*?8FPdOXRXr6eCGsh=$ zv=$n-Icg@9Klm<=>G}q__@4T;Wc1btA5L?DC!8_Xf)kBzGAX$)Rj0<|CAF@C5S$2`Kr3pce2jryb!_)@>g}h&MZ5AHxxL*Rl8>T7#^EQu{UZFF|2<4dbExJ(n=s)AR8386Ung? zTMcjFoJLTyF(HC3b0e2OMiifkj*yw2r3=Fn$kc>u%SS=2CKfo?F9y85s3okq>R!UK z&DMGkBIV=|*4qd{@cy@HnSN-pn(byW4S$ymds^terx1Ea<`5VHmflsA4Bl02wSC?o zs9U`%juAKh$359>YECf{;8LtXa9p!9OUs-Q>6ypjyY?HT;5%K+ShM{{$$-K%?LBuP z _aF%wh`okjyJ;Zd3dzNMATr`Z=Wsx1I8brC2tL>-2+Was608PJw}!LqH|(U&}PVrLEp6|4Odz^bah= zUMc{YCu<>MLWuCT~_CPbWeKyE7oS}q&O|TsSjlo z#@Noanu6^33FgD<3F9gc^TIxh-|YAzO83nyg$LT57-gDsXVO;cq*Bv}Q~o8I%(XRy z79JE)L9AD2=H%hA*6J(&m+{dOpUDSWn_2U}kFs_OtvEArtskAPulj$9u=Y=!9MSFo zPdnVR?$Jh2{&l4*gubcKH=p`}$$u12$>C_}XJ7Hq()Srm_t!6wCtxHNP(^a@=vH*>Or1w$ZCh`2{){WHgI@#Mri2 z4Mc$-=`1Y`RS#sl!p?iFk=j^hDH$;j8elDbN*rpborhuB|Y4+~sw@(XzW5KyTiTcA$nyX%*T zP8mPZFQ8DtOBdxtql55dw)!R@=Ya)O@~G0qB!e*#wFFT|qzpuj)RoQKa-Jd}e zSPo>$pIJqv^4B2SD!M%#C1cq|p|>o5M?E6#ZC(iQC<8TtDP(Zq8RevBS2|85Fq57= z*;?JJ zp0?V?@VQQWTkhtw#j3hb)sz^kBNW(2HGCr<(>OWF`Qg9|VJrN6H~^LWD5KHA)qwb^ zbaI<}_3MRHTG5D~BO%J^{tkCBMO=Q0#1GVpcE0S7jUK>g+@0gyKsNlONXDRdSrYo4 z$@}%92~@t*J(*%+)OE<1%9xrHf85kee9T<#bVr)YseaT|dhZUXLp}1}1 z0h%$W#2W;LjEW8D^?#Ee0g?#zOXj=NCmPtAzOTf>e6%7l#sj2Lc|hu)3>5ukg@K|c zbPyEXKluYtgc*qwnXeZ)abiirCP!inQbRg!<83r^M5uOVN%AN`l}dB5LbE*~-?ntb zMw!QYf8`%S0`nSl9{6iED~kGI_nx<8wu2L*^F$~+QYBybn<*hS`*u}%$USAPK@?aG z#KFBKBfspeXA?dBh-WZMt12Wmnc5eNhJ6LR*pvyj9AWO(%sd;;o*dZf?6+bE`J6>J_9;S+6zjH*MQyM1OX%fAR^%b;zCP!EQly_b z`R(17eLty^*1SS{&|1;VmCF;;rMSH&n%OM&!8yu#sQSq4mQdaFaLAf5Jxm6mTdKG0 z+8-d(OowwnS%kLU(s^KFYVUj5PHuLF-ri4O?E6Wxooj(=yzA`AviW4I7pFy-e!fbK z7db+Vwm@_hr-lh6B2<*^OpKoke`lm7ECs{$DT1O-jc`LL1D{z&#OoL-pvE~?_2F4^ zT#Xw|VAm1VCcvH~707cQB_F4I_Vt=uEB!8CwfQ!F`lLc>&1G(L%AZ)~4Xp^O4-D-o z`rO@o>@)v%5x1MyR&zDds3rB#q2w>VS~{czP}^CC;h^m;+33TCdG|3`p(&eASwC6b?n{G zV+(j#92Mt8jJn0D6(pe(OvK)3oymfnD2ZUW>tp{-9Y8OoZ8~+J+-embP93N>krCyRV6A?Ft5|0I$BvyB0c0DK0SM@v7tmI`<6K()@{F`O`d*$ip=49@hhE!A|RymwkhO_i+Bd z`5VV`<@_DX-v#`=`;xEVE&hC*|AV6#Z0tb{y&@BVL4)(7n$74|aEOz02MclIufvD` z@dr5vj`mT_dax~WopK5ozo3@dKNefFio0`|k? zVPVG&b-q5ae}wWf-FCl7c)y6fDr&PEV>JH{_Z})sPb#j>6hG?z*egoQb2|)f@(80R zv==QXd@qZUd+V*-8X63cp8GWkeLiY9#$9EGW4sPS!_jrR8IEjn_;9G><07r(O+S~x zC%=f*Hk*_FBUIK_pDSxUgOdo^Uyq!TDJC?lJbIZ*)5rS?G*RAID1OB-)AI9iFE;Vk z!0Z*$)=U`AiZ`%KG4U7B>3cG zI*4v5CpgqFT!M9x`v@B35a!_}1=dX+p=@zw2><92Ldp-ipXHuYUtBqn{vQz{g6<~e zt6tRBX|?{Wl(hky2U*oht|yI#Lb|jXemn|Dkv!>O6g+SoTM%ZK@<(9z>t|>5c1Jdt z@KcOB>`%$cgz4r|9n{T5lhn=OqSnTtU3TFV2wYfc^eVRlbf^anWi8G%$OPO$S*Gj^ z&J6O~&dFUAnb*>tMni>bm#xkA(A{5gPi|hf>p0}p_u3NEqnC70j~+V_36n|hh0jM; zX(}5Ln>0@f2mWB*v*Kvk-t&Q^kb?nUK8oMk6@!i-Ybu)o7&(LdY$Sc5`?h`>Jl2|p zZo8YY@L>Ic>OGKR>%uOam|103+quPj=L0Q!$F>|muNvhld@gkWV0s7LD%o0{ zHP_u?KwxKfFQ6YXW^8J|#?E+dqwWS2zL>g`!S=Ozwr^fCeB@P9u`;^*2)wfH|%2 z22SXcAV-%i=6O_b_JSRDbuMLY@;G*mR(j8ulUUfwd^!L0S*p(CLGy4uS0)6(SU!kB z|JFCnpfAxugZ>r&p=;nKBUH;TgVsu!&1lFw^eS2G4GlX(4pm0rZdMu~-=vWi{h4!; z6&nf(nr+5eX+JQ!y}#%l_N*L?f^o(GA_!?PacMq`m|q`O#YYv=qaelqu)FW@ZM--} zdK@^&{4yAP12bg3qBr5>c!{>iwS#lA7?a#J?J(puXT=57Mbr`RGA9`QHwbVtgGk zNX-*SCVNgY01!2VnY&Ygq?*11j15C~63OlY@2v1fkPW*aITnaNCt0rAreZIJ0H@HQ zx$e-lV)~_?_bCW>t7Hp`(DHK~xxS6#uq{VoPy1#N&9*$E@sJ*kK|pw@-w-TXz%9H* zqO8nKM)c}_eV>?Hwg|YOz8PwXe$}Y@hd&Dqt7qp^(>-PQv!e!|%^_Rs`nP#kZcV<% zDGjqk(M4y(Dwz<-r}aI`sX$?mjx?+za-@2UPmvN-?JE&kSdl0V?ZeEu7>8k}bLW)E>o++B~q%06ATB|(0ukU(QsPKpxwWlSGRgannW55dJWd)8>})7HYg#< zAi6GK(1y^piCu;eem|}0pC(V|dA$7xBTeVcr}3C{m%fL&FnTV7Q`Y2?%;XUGn#!qa zzN*zcX3)5w0zrW}nu+G3heG#=+YK6bIWI~EK4PaTfBTAAvk72cC8^ydoqpH1O!a)qOF zZN8wwe4G0=du{GiVZP11xx!5<%(wYquJDfGZEjHEqP3Ue`trZ|d+K#x!7cm+-t-k5 z`G&9H^4EL?tz2Kv@f&aX3ZCHaI{uy<@D(^i{Fy|!K0tGCj3MBiZhp0_QHYH zFphxg>9bf$qM#!zdV&am>|W38_r zQCnY<98Flj!Enwe9*yK4aX7X@o=}3F1uC*7 zC#zW~^ZohC2HF%g@>^ki5i5~QxRM0PCgZNBsz6l_ozjc(jkqRzTe^OnGYeT8B#WW< zdaPJade#I?au8IROrsd*)RRh>5S;`-qMv2b2aY_oqwjR~GsJ(r{3({xC8S8c*9U7TTG)MZCNLKOWR|4OA@ZM_XM| z`!zEJpV7e#!Ndt3W*JOZs?g~z!jO24@A>d1AETi+7n_D$9ZW+{9HoYu!PYB~tHOyj zFr9p_a2j9A;e1AiL!(nafx|)*(tw}Z0)A@rQ>7n2E6+%FL^B~I&JY3Xr;`+o1J|g` zyizGWP8^GlN&4s=?vs0nJ?e#CjI0G_ZpUXC!@3oIB^O1A*=wRH zS518)Tr{Z2BzW~DEiEeu9|1QW+0WH}#MOj~x?}(cEtjKihrV)8WyY7D~ZxR}?$!k${_DToVgqm{?quZhnZHgIRy0PwsFNzhG zZQ^su$9ZzCdl^rz<*d{_`wkp&k2T9^Eo#+(cCB3Gx%Sy>ixo(l)E*PJ8Y)>hsxs$g3PZ`eqe=76=IfJ_KH7xD_4 zDOm1}NMvnpKyGJ_`CuXgx&?dT&bJ1hTe%sn!a6(E1>)csNN!|Cpbcxd&7AajK=8=3 zM%+anXQ}xf`?!a_g-`O>`vXaz!AC92CKOn0BM5@$8C?iJ0Vf!KdG=6)AqfUi(+_3< z?Ltj?Iwl`$$KxNR)Ur7C=8k)E$AR3@YpvDDlaSPZN0c$g ze0uBzFV!E5FrR!Yp}ZVh=xH;!ba`$OY*o4y_WyM}?jCV-5i+DV;Nk`cvNZH3w(o-e(A2`@=g8;)_fYc@PyUoA z8K2zyaHzhiE0pRg>K_sMlU}1GQv3h0_9pOAmG}OCLJ|{}cm_2#u1q!B(4lPeV_0Ce82sZNqb{$m@D^b+7UCGCD36+ zl)z`L8pBaTnvcvW#9V*f|KaO5@^!D3?(_OfBg!isoDQb*MO|D{ZF>WUaD4A!00@>?EG>!8-6DxklEzkWB zW0@A9q@A|t5h(}F7Ur|;=dsiI`jCC(FkD~QHxuU_K^q1ws*pwU(|FxG_Lz`eoaClC zBRis?FCv#0jqqXPbkleJkxVc}VyKwdwwD;lz9Y@+`E_GPFt@D;UTLSBT;t(`d92~R zk>W8{%X4xy83bXbGl)#~6>cL(%-UOWCZiN+Jkc}mR*?RNs$OG3VqfJ`eLv9KPRYAh z84UcU4wCqO&-xmsFU`Mly*Y$R`VbR zfhew-{2jx2fMbxqN+5m?;ovqEMhcqU%VvFmJ)=i8NYX~0`P^j0j0Nzi?nW7YvDuX7 ziV9x(u&7`X$2a(U^n;>;@mv>jT*oob|5p++LR%E-8OY=XjG(9~coGZTBaFV;@qYjd zoYnFfR#L)=1Sh@$+7A-VeTf%cDYRA#sHl$hGmFIM2G*R%t&2Mw64v#b<;@9{c2genQ#13$h}II|i`#5Zw5wT97PdZRKGeFEM{9H{6)?JIt_XN8 z%7iiSVRKlkEBSnZ1Yu#tFc`&dG}OFB4*=h>B9wb7ml5|Yo|+sB_bxk)cWRqTV&UhP z9Zx`@@}^=GU?3q%`*E%=zwLri1^sxaY>VRN(eZN79?iguy7Q4*EgKRckMw2m@b+b> zdKI2Md>Q$xOnNCr#~^rHDRFqM7Fp~W=&7>}#7@@1@C=_EL%IvbK;fDA@_c@bCfyjR z^S;Xrqe4R9wPN7c4f@hg4C}iPO;8?fKDIIA9ePTi3bt-FLr2DmIXz}PrTIjIT1)(E zqG6;kq$lQdq61L(uwvVZn=NjX^=AJdfky!B z!(~QYlGNZ3d-;gG5by7n={*rE5w5j{FsBLMa^+RChq=}Y9EbcX^ZPk*l)+p2v85FL;Neu98SZMuPY_IB4o?Dd8mN>+b|!{x{C*$^3u69q@%+Puz}U?_+!v| zY^yz*oFV1!$u|(7tM>zGH>@RyNJDyZw4jMdt+*RHCFk%iXVo#MyoaZ;Oq4vVw?M4* z$Su9@wHxu^o(u6W=DsNu?PC-*CGTNSk#tBZ`i_1k==~zc5&LafbIJP@p zP)4V_0(1=iW_>t3=cGMfRzFPOi)MGu%WU|F4hf<5X(#JrCygoan%tz(Cj{NIef})P>rz5Bp%O2%W}~%x#%&uXn>$LW}@D{6;2el zvQ$uV$oGyrM1Tq+hp^l1I$~_hK^0~+U|LKzIuW)_B{PxA;Za<&cUME3(C4g!Kv?4! zAhDJF%Osj9kZWx=XnnVlqa6bKBQa&$!C*1xOy}BS!)eo#$#&*PcT?);F3#Sn3!6UG zy3+^+E&6aWehf-Ri3#1v8hP16+)QJ9BIzNKIHzUD93OMOS%xVr8rqS=qOlhjjo;<{ zzZ50vbU8)H=ooq_=1dzC3w1|x?J;M&{LZ$y)qgB3==(K>jF{6frf56sA%a*N>Q2si zIM=zqst=P4KCN+-3)4EOXy_buja@`#ET?`cE_4 zlEjJB@)4rOy&22=#ysS_nt8-L22x?`Wb4*=_S-oWN|K?^%9vq@Wm@m$L;7~TqNI52 zpt@@eY0>OOI2O{-B_eJ%QK>a_5RSv`Xrj7|D5SSlRU7#$tF5T+_r7CvBIKCt zUHn#)s56^Y74FMY)}qZ9EvQdsTFu-$`-yL5I>E}*wART6#}Pfv@{Lf;NN#IrJChpP z7bRW-4`T|ym<+f*C=K~kyhlY(!5ScKL3OktO>q-D-UJ=Po;s#;8Pn0^oFkKSrWF$%C>q*|aB%sUXs9>W zN!C4b>6s=b=M3mnO-s({g(5ynoT_Bz*7a1Ma(1U2DC84~(00hH@V2}*=;RDgym zSQVr)bE}#zV%gzdpmkB7%$jA?(hj*~x0{*LWY}ad^{M8;cy>ZyTDGcEc^&gvG7WU( zrF8>WtiH>DWrlf{?U;%1;3%~&x0U>+sPSZotwV#~sy4)(EkaSKc9Nsp{YDs_)r>YD zh~mAd$(r_<$dP_NI7rH; zTSVb$71Kd<&q|VnuJcN(DHX>^X)UGgUa3uKuU9&Y(mh`3JW2<=(j}A@&@A>GnD48{ z0j^^J|5*v>0HM>wAI9(2vcTaG5T;585Nv%2#gYdf+Ia zKF#5CNoV#(K#qlyYB#p-9`Sbz5}|D}n$*IqC5t*(n(SuagIbpW9?_$+KT-+28f+Q^ zs_!L~a6{Z$nN-$4u=g!*k7iFiDq6Ieww;wkPHv$AI5U$q)0w>%%*8epgHJ`pE~B~# z56*Jp%^a>^?^t}TGp#rizlEJ=x%N!_cJQ>0cl!9N146gfEnkmPI1enr8`ZVe1uE+FOn8^I=6MQ~;dd9uyncKgAibWycK&+# z+r!@g&|d%z^%6B3`Zm<6n7B#iXs|_G@yJ=(8Or|284`(TjChS93Nu* zxK27dW%gy$U^dn4&SS4>^dYU0=H>g*=KhzNoA?LXwTlcGj=R>HA5rF{267!5sI^># zXTDC>U6sb)@$w*F%LR+KQyUNKKuS+(71ax|*15OdK&x?GrY-Fl`2*Hcz06?qt<^C+XXnf7BRAQKN%uY=*5MJ?3fvhp}YTT74J|B#z!VHXK|ykitu|#JH|Gb3TT}_f26n07f>PVTPt<=`gX^6LR8$pGG@N~ zcPQsNziNQ>Eey;0zVFld^$ksCI|H05YcM`Br?q_oYl@7L$;{G#-5ssOVGAxlGpP^4 z@OPKF!hl7DH!f^u7=Luxq&7Qs&Trtlbx%ObLzgFLy$i^M! zz;H?ncb7>y)t(?gjZz{!<82m_0|y#qVVQaE1vjz~x+D@DvRKx--)AVDE|M}VJW_)q zV!WiU5R562k;EgV%9^~y8cQsG4Ka*Nvk#37u=nkw+gLQmn#m|lW>x^P)AJB}fpG~- z86tV-Xm9b<3;tJb(J~mmcnCZ9-)QdUu{3TD`AdWD!O%XoYhy9PNpM{f_ZTd|)(q?R z_UMDT6=Ds@XBq1ZZzF9=3=4K&keiR~>Tt*DPG;)Ey1gyDEs?!+qaK0LEzev=>>Ay@ z=+Q)GYP-4`WE$*5c4{vL8#S=CiSDWGw6Vbb$vP=!ea+9aqv%Ci0fIdF6e_#`)7FyV z{swn%^d!`v*n#m2daUtFHpZ;+k-Pb^Z|A43ji1{|H<07FPC;=w-aX4ZuD8A8##)Yy zM$0Gj!mZE_;Y$&2ZP<7pWz?J4kyv`Qb!r;{zHbpQY?X?Tl@KBR2i$mG^*?J!^?wDZ zMpIfB!`=+K^pQpC&Up4)I3K(g%Ur$ABft0-iP)j{?#;~nY24|Iw>}lOfF&3w%%`_& zzZ|(CW8ocE(x~r;l=L*IGai18grm>IK(es)3!gW@YT8E%Quc))nWUNxyKx>eH}lJ6cBl#N@55Kt# zMd~Zdt?0wdL@w^U4$k3gD0kxF0eM+C&xYS7g(~8IJp2p=+Yndc;U|<|)wBfXO>r?2 zE+?ZRIR3^{i<5{?~4wDqSd zaaPp$y`abPji4vm8J_BiwJldSUij)@H(uh@>qg1LUqOpGtQ(txe9S%B>jY`7<)G>H zVD;;qepDx72v$U5ZBngGbzIHt!E5r zUY%P17oJ*PBfh^T=ET<$*FP4Tc}uM5Y4G$cr!y3f7|9Eb4=`C54Yi|)hG}UIZG#<- zhyMax{UsV^qg(RrXjuEl+-MjQk$m|d!z>#ln=?%EKpbw!IF=W0o4N3`e(Aa@ySASrXSwPr0t!(xE3)d_2?j=#%9E}$?H*)=fGTS(>XEJShXa^&jPH^F{dl4y)BVS^69)&20~j-$dSiHmK5%j zbTL~Sacx1hc*k#w3-Wi^sA=wuLFN=Ny4Q2l2Ye&5BDw}C=b$ffhUN>k3O3hkUeL-6lBq6ZU2u>AOA{>AJKc5bz~l_c;1 zqOV7BLS4Cb-v90&RF8KbG!g3bO4%fei9WE^3Ovi<1uO6>hkpLv;qSw^73jkKubZeU zAf~J$-h;emv1yJWu1B*lw~&)7tnt9$_?hk2_}TdRd}WU{{^|j1d=nIAInXvf9z=YP z*9e`}8*#k>VjnEPtJ=6bp_t0fWR_Aj?q=)^R)9FVbpb0dD`*AkYplS$h!t2+X9bqj z1Ei4!s05muX-KXs{8wwuLPLoNH>_*n6FKarX< zk=_#Ur-teHtM#oWng@=$VV-#U^dprFYB6Eb9B(RrafH~5>;g)~s(-o&$part(cs}; zI>yhEpX>I-t?H@0{9M$|&!y}6`O-Rm=B(xC8_oRudV#RMRsB#s$6>CSvp0J&{;*W; zJ`qdh4cHQ-%1B3tIA1;OASpWk;Kc6QVVi0P7i6Sq znn;vSP~NS;8`zUuU)MNomxqqlwEK8UT*ldp0kUlg?aC|g9N#`ns(NCP(iRFH?(&;rF3r%qnfh6(tbDbAC#ZP-#yGUrpCz_CJ zrd8TS(}MDdQ5XE2Ye$+~WnitK^fp1+`D{gjs1;b&iS@1KVR}bsUDyN_L{wnnC%fOh zi(!~_(v3-3iSPHEmpHcZfJp$hLleMomV?5WngzLv*jo%__R=L9%=DH8U+QztEi{H` ze}*HM8&foh@NHvv#;iJxn9FA4UATAA6@yWp3H#f-c&5lgo$*|?m4-M(M1wI$A;!CN zmrGZHc?FFbG4Rk@AaCDwrD87cP-qMLj ztdOuhgr|0{BW~Y;#^bFGXgEtv$nvZ`!p>ZTi{qA>tCS?cDN2}aJS>3~QrDoh-Dige z>|F;Fas`yR8r_0yTlFXylGz4EXP1Vw+GrZueNDqzDQAGF(VrE^8FX)2hOTu(ijm&m zkQuYUbWI`uk0yNdf<$;-7eXwZrm%YHn|OZ=eNt(xzT4wF6qHLsVZe-TjBUr%Bw$lAs# zBK(6qYw;Jpq)gV8Af5Z;L17rgltR;ghE6@d`eUZ&-(*<)owgsRgYmP(|G42xZ- zDTrLETz4c4CJ9Hteh8$me=AHl>H`U;oMhKGlnU$_vjvIl1nBcKf;<}a?Mgenji~2O zFUF#{L$B=WW9cOUp-&{T$}sVQm$rHLuvM77hMOy{=weAZ` zS&7Q8PMWC9Rh2Wc#mnIX=xYy=wM!SVG{#BkRYcru0;@NfdqWW}r~31=+fMhhzTO|V z7+kVjPR75=l@|1$!afAyq#$3!T-@#jw0z5D@CDre`9+P}IYyqA;UBJSf$`zd$VIQZ ziu8gf(7Y$yrfYpr{#*U>y_mGXaw}hNH^`#olPCHUX^aN|8vBvaLlT7UW%!;T4<()7 z$Zny%*2U4TLNFliA6=yc#}q{a;PU&lxqt-SuiEmP3G|rqG8B^a!!G`NGIz z7U|-wfaxL|1bLA+LC@MOaYvi>-#fdlwl~FIWlJvn-TM&wWmWYBR!b*E@_ndN+9RvA zpEu9k3g)+31Oh}u-rH$4VWOBiD&79)^#0k4Ph>Yd!M_VW_(UdT4EU*%7BxOW2<4@XU1x4pua6+Z zWHfUGH;;O(F02i`4IQtQDR{z7 zT((_S6>EzTy02Y|jfi|4ro_gu;fxPU_XZ+4lWXO7h@IQHc;va46k4lJ=0);2kFXw4 zjcX$W65stGq%+OVNFy0^Uu z%?hxH-k}ZFu*V6H16Tooi82fm0Dyaicaj^+Zo6w1{Ylf3TCl0N~@*)1K{rv zs5=v4D7dOuHGM1Px@J;eiTcMVH>Nk^>?05invk%>tDB-Iyg&W}ZN{(#Vxyk^Y9=G* zPsWftf2@B~*Aayp%Kp->(n9OesiV9W5q_f7Yt<*!s`mbi3|LU=w-~*ll>B47yPxt) zM|q`t-!CoD5@_!~=Fl6z=LR^-G0ByMBJ1~qBJnp02qac+(fS>(JL5@l(kz+JRKjmo za_)XgA-Bs;D=rwrpBIx}!=rx+PwyA~uOrnDd%^n{zYS|u&yhd{Xh=YIwI2tS8X z=tJ^xll78#3W`|=+XtpW>p}pE4Ew{EmL;86387wDx`?2vpRk?9Xfo%*0eF;RZ?raA z*!V>O_QXW?#K!2xQS=BjE{wG4T@AZF$Sl^9q@&SFgtjPhLic9YQVuHNoruEZ+U;;x z(wZU*3WvRh6!n+-PmoQv#nYCUzimo5ijn^zPTG@h13NQP>6CBu6Y z))dsm1J`^LZj0o&F*|)QN22)`sZ;I+YF0H7x0;Rv2FGEsQYcKXmn5%(o73JjKxn7? zr)Ly%TcjE1JUaxaC$Px@tHq`;O|;0tx|%Z&$V#n;l>t zaSXuxMV@g3<3Gr4EQu5}_bt&YPRY)*AYIw=9vzI&;RAmZKM3_YmCY+3qF1`HVLWr9l#j$D>D8E1UHsufYK7HfIr?A*&%za9>~Iv}O>J zf+^?8BsU~4XlmdEw!5Q_)rF}aPOBswp|P2BQqE3zSj~Q!=?m>S(YOeYGqjNp@Dkv+ z!~Oc*XeU4rMeThnRoY$h4nD2s0Ya^P&CWW#T)2NNZl@-^?)-fnLk?q8RHjKg+(}n6n4%?8L#j_LvkVhfl+!>ic#2 zzMlX2QUZXSx!2Uv{}bVb2%;40ir9AQK&Fh%a%@8SfX%sDVD);$ga^FyFCY^H#o?=M z*zmyb4hwI$@^wuuC=GAXq@*>3fEHuB5eOO>trhgJvexpTXoGE_sl{w{R(Uh!N$00k z^^`;BUMRyjKH!v>oF}JH)B@S^zf2~=_-O?UK<4Z>InR~6&M#cszZ53A0V=(Zj*GmE z3oj?GGGB1YyLckp*ru-8srVStC=(k}Er?jDjL$2Z*YJ;=6?*j&A@3i0utK~%= z)A%-XG&sS>02FC-4QB8*UYUlrRIQ^_Yrk$-gTWO;h$A8{SePP0CBhWCX=aKG)|!t? zKpQ)g+|lWJpI^k2R{gx3W4@nze>B+73sg)$Kf_V~80#ud>E}rtausPN zO#PIFb6C5FHJR!eU@vLAdj&o-F=zjI!Z%(B{7p4{)twK1F+hhjH$2w8gTZA-0;5lB z7msyEullo#~HIOd`6Pj?PN-=|c}h@8Yx{}_>@IAugebI4VE8}M0+ zp~YXBf19FR@lL(HX<9+;e8SH9o5mhF*#tg4eY>Q8zpM{qAc1L#sslLbHOFl>L*sG^6sojGr!0(m^uz_J>{J&} zRCzn)#!$(6*~2;OMaIh>%I?F7e^HY3z{y3X3yq|CYH?LC|D1&!>@4yY04U5 zvAobS?%z-{AGZp4>OU;))DyPPLaFJ>>;~{>q(h$-^_SaKk-f!)h(Ex;T{|}hxRA^yE z(r0-glj?}*()4j5^;yyw)Zu^dp<)ZR9gEsreTP(|j*^l(ppz42qjhVM8WX$@X$`r(>E~auhgsF7#^gNUFQF z(X8AlKNZ47W-AylV zmZOPI8OM2?>X$e0Ln)4`F$7gcxvQ{1L)u@Q)Xs0fL(OX0t0mx+|FN2}b58oL4)~&t z#fBTU(d8Xu-=9%~N9%=in7V-1R?6(f{NNdk~q@gGFJknCmyNU4QNl)=> ztsc*7QXsoC8|0C;hsq`|VO!VbrhiBeFRp&L?qvr|{2(fWbRPCzJhULG|keFd+V>C9=(6$K&9?O7i$I9JEo z^9nwrNDsB>0CTD8K0w)QO3kVO)li1AWR-29#Isl7O-^ zSFyyLc+O{6xTOr-omD~2ipPOR{$}5~`*ZNnT6Y11sI5nZ1hjgfxgd^Mt|HY7qp(|u;FwACt746# z9;%X{a2A!baz`R6|JEM*tKN zHpp-~ZA9OzRBSy!Nwn>VKI#1iAM1O9D?Q6AeeImHa3clS==9`zu4B$u0~;AopmK3# zswkM)^=4wvaraDe!_RcF z*Qn)&*BLgus57@Gy2*^&XisO=UnFHRBR4%;{8}M55~_+_@j}Y4p-hy2tmG7D#p1oGl*R+R;?C^8FDET;qwhv&P$so-Nr| ztnjGYAH0*t+}Dhk4xkO7eE0G9S*EOpiNEs!1j8?WX82_O5e1!8*Yz^TfOkyfFb|~E zR+EnETYon6c?rVO>?^;t`R3Z)!KmwI&S$BeVDx5k2MWH6+{nxCPZVP7Ay zpNYbkx+_=l=#yH)snxlP`#I~oir4JSokoJg58+1b;umLzYCq{Lzl$)Ixr$%&peAJ) zkV=1Z%d4gAeqTCGkWN*P|I}2aprFz|6L1u)5Wf*_8UFU8zwkNp1*>g?o^?_;0&Tw} zq+@jBI9j0W2ZkTy+s~U!9j?@xh zM^$*2DjeR3J5*(Uxl{Gr5kBqx(m$&Dj=`!^Lse^0n20y39aY8=nYI{}BL$1W4zXe4U(5l;EYk0XAV#~S^|qs|8j=N)7Jj!Bd&nL(=_jHv0^)W0LofiC~4 z?#%pmGmJNGA3lty9yW|eJMlXh{~AQ}vvBO-tbb&z?JH@s(7as}>dhx2)&t!NAlva8 z;e4J={{i#dm|JNRj9CvIu_xju7;7uTdpg&Z&IPO+j9uuTYG*~f&>!|cZTqL2YEY2Z zRGnhIQm7#{6~iVYsVH;*+%Q#~p0EBZ|L>{~{`1w}Sm5{mm91hdyrm;zw|l1a;N)G&b{zRsnGQ|ir`k4{j%dZ;9ik5ZqI z&0xYFQ0U4n!fdz#%w}sF@|O0S_&JMhXOhV_l?Xup?GUMaUXul%T@0&?O#Y~JgZx{O z40lA-`p1E<0 zFErUHcDfJ!K_Up7s}tn*j~Q-Zqb;cL}o_dqU?zm zKqvcWd_)SsM_55mB%CXGey1IJ$=>yTGW`CcAf5#$kQ~`qX5kyLg>>D}yB&W5jYr`#;33eO%Ml{W>v2!mT=u$BAH ze6!g#eYinekDw75wC@j#|I@BMJ0(9nh;Bimldpn+{J@Vb&=0KcEyj+eAi{F+=_a9@ z-pZB~9|1^az^iLK&AXH;|0RgdDA}S{XhI&`Phm1Mt#Tldi&XRC_i}PVI3k&u5wsmI z)HE3ut95P^^OH^70QzBLi0kJ7XNa$!nlle3atLL>KuVtpq%Un4>nVq&Ek*PElO zb+1~>L2Af`%#}EvuM%VkAbDUNr(LV==9i?6@0|uuEjOrDzu>x|dzJh?bpKxOuu%tI zde}Q`@eaM-VYheK!=Y=H%P%d}c&t?A;=!yq-Yw8P6DmEriLO96`H8r9{1lPjA7F{F z72HcWSCjVm4b)mLAa>a_Ly~X z4`4D&F&(b(qe&QfMCdZq6LWso`V`N+pzXL3XS3PuN#kPdyjZWEuvaD@UG54Vxgu8d z*I}QNT*r4}p&pz+$MW8+#GD<9?eVz1?``9*$R0WZ>mQ6cPtn#oGa-4k<(*aQsK6#< zqlVp^7S9}D+@n{}eyU0mAj7zm94K7`3q{lgPjBje`bABg_YqQeINM`bN$O&(GKS=R z@u|iI{6oI25MMDlna*W{nj{SNL8V_V%#pvbd)NMnJ<;s7g+&LfwsxMyS%Zv2Wy>cmm|ET*sN6^}CRM*lRlSu2<5KVP882HwL@f*bQaG$v=%+w3)ZvlJT0z zfMb_M!#kE0lLU6N9e(DTe^ry^uA3a{NHnzF=}_*1j;QBLP6Xz8Q7d%-)&Th_$>n8M zisA7F3AO@v$ZjIwTHmz<*YLy(#12O!ZLy69+roJK1D1}IJ|se8Xb9tF81|X;YsM0{ z^g+SkDzjiRW`)?9RqOO*=Ev(fG(T#fzp1oql}^}g@~D;p?Y2!^p;9d_{&!6%ek&g%<=Aj$wfhqRh-n$E%j_X zs!Man(eOOb{3oBix9gg zZu5%6dppa9KmBd57-;XoF7%WerA;t1qj}?U@774tIFv|4tTp;ZwacD#Np%zMs!^7!PgN4$*LY5>mKPe2Lk|S|38wk2z#m?2ySpod%zlowfvVU6F*EOmbgE$T0EWwVOa{={^vC1lpXcK2w^rhZ}5Sd%RR2dC(jQ7=%w(b;sv;Xb39 zUu8F;Nf$Bv5ic>~{Cz-|+#=o1BXfpcOPLFKdqF#AL?s_o&mhRSm-H7Z+%A0bF|!)d z=LVbb?|^rvxM_f5A#>JVMPCKv4U7?F42owFWN&}4sZ>=I^rKWQ@8q`I`V4HBQ}HP0 z6ZgtTE&i?)+UGG}r5d_f2mSXGQ6Yd2-AiwLJbTg$^p~b zE!FsY=@RWZXWl{(e}QV22Tc$plOSsWTt#fJ&sDIg!#U89ndv%-d2wgJ+Ms9wEx)Jr z4RpjxD?G9FlMXAr&GdG)K`PxHhU7YTty5umGcE|bm0vpC!C8-V?dKtrp>FWJ|D31M z8?gg)iTlQ64X*c^<8gK2-Ce6in+tgrcGN3G!aaq!^R{E^OA^!4_ABEnsu0{KR-r&O z8yWa%_a=QiI~8oU+BD1izV~tH#^U*1s|8ZznPlP`((jddu0))bH(pC;R}fd4`x0Yo zSBq(|T9ri6PCq89U#j-o&TZ>BUsQ~XGZ%j2;vj{14|EY0@e-Rb5M1`6Bj{zE?>z8~)D=FxksPB89vHdX(clg8MwRVIV(9Dghwr{R^e0pGSBv?oW zBjzCKY|tIAZL8Hzy0fC`lVtfWJ*1V|@Tit$P3wBxV$>~oEcQ?Tn?~ zTG8%aqk8@a@PIeneF*ra21MR+Pl>z{<7BvR!=XW)u-DDJG?D!eZP!C%)W!_OogP*Z z`*@=1m&Zg2VUF)12ATB!E3MV7Fr|si!ayQ(U1cgWCz#57!%bxt?nz~?+nvmw^vz$P z88E(hMYFzWeDTs{91}&`3Dk%)2XrD}6g=hnj_0W1_iv)fD*}rUocNTxYLdqEAp;st z`gTExVOqQGjJfJcn-$mBHCno#w4O7>EH<<7L+O)%OKHj!gdIsfocbAX|m02z53kwpVE<`cUTdLi(=FO3&H50cQ z;H78of7TAx%Xbhi2|sH()Yr+g&^}X>)YGr?McZ+-BIJ&*E={x9N~=X7@BFmEW7!nA zk=P%2h7*s&eT(pOyku^LdjN%ry&UmL1xkuRq*PCHoH>v@=Z8&7dW-3zwvvhCm|xj@2?lu%*%3MT=PG zpRiiHIZLFkmls>g`K+EwZz|&{2+<<6I8dCV1?9*^9}}(eNK9GOQ=^l&yZm(Sx_vtg zZ90YXnDZ7yCT_*wGBsYE|JEVI2~BVL8%Ofudhf+P&SA5|ojADcjD`D}%Bh<7o3`{3 z(wx8?wEa|04FArTCNBzFt;EJC@E?1RjOTt zYN5EIKH;ulCmMQ9GSy#L@YjxX-S7V2st1%EygC=aCRGg$;U4S<1Y0e=XUzu z#&ZR^U!>V~1gikg1eBD&@G|w@( zAbxeOXn!)yMsVu}c@y`2OJnOT@5H1<0oLgvPzW;E<=jBme7m98&n9Jcko(kgXBFTZ zU;Jr*we|G@tj!Pof?qmxQIPyU6LW28T~9ji)MFW}2ihant=*UnZtZm6n@EixVr0m# zT*t#VY{E^yqT=*JKK-7$M5}-55^o;hie=ecz!)^sf58`@D0%;QL$Dh;Fi={#Wq??z zd-Rv-{aB9_J=ef_iMvklo}+B^Zjnj^gr~;VtM*y6 z?J~9JGPNfjCSssm=@j*6PC;gh*Nk>+)hCBHLt+sq-)qLR2pD^(*dv-wrUgQ!k5iy_ z*`X2MI=5PWMNyI><>?ufQl!MpwMl$4ArgIe@Uc{=!`Z2(N3#(*DcH)dlesXN3{$lv z7-Qnh9#htT)gXSk(tOhfMFhykV28HZjBrfR+a^VlR?7w4Hv8wAMll^shVxwiL}^-A z#KLR>O%7xhB<(rSM#ebh*Py&W>1)A3PUoT`Tf++00m|G__el8n=$l8qpf6$p@I5ND+3wCg3I0;1puslZqYH2%*pLk-hhf9h3^wf0BZJs5nLTT4!Z|2B zbAHNM6zDHuGM#-%k1D6a`>a)$^Fk`LPwf;dGb1~O5Io&Js({njp9ryo$6H3=A1@Zi z$9W0PK&E7{#dGd2YGm{;KF7xv^i{oMH>lr1&`qAt>H9Nu??C{OZf$p)Po$4etK@jU zT9j$T>yL;CrM`^ z3l>Z*h@u7HMy!9X{9yf3edoipveRJ4iv(TM5Ugx1;G9_K0ZrG;PUX&ZgF>WY(HU!@)<h&y`b1`0Up01VVi&nLYbEoOoQ3wc$wo1MZjMhA2fC0D&=W zyQy(&s4-Er4`*$h_nV%xPd(}UW3&~S0*6=gbf{INJ$^jBkAz|E~5Hq0#CJUwTj zRhK5SQv>4Acxh@78k^fT5~;Z>FT=*19S=en3u4x^o?OM5>&sAJb~lbrgkO|n+?UGw z$wz=qqU3ftMz;beu%>Yst{!fuJ8In9>y(iA{;DNrQtz#5HpibKE-o&(l6yb+ zTyep6{`T?A*vpFx{>b0E{FRah?l%5@iFek^{PlbR9Cft*6^E;*vKOy?s<>G%Ki zuXOa7@*|G40%MOF)bbho$zk!*hQ(8w{pZo1@g$C0PaiQw+RPq>+0AY8wQ=6I0?}$V z$gkx{IGcF87075USz}YZ+V2yES6Hb6yW6P#HA~5CQ+orkIEXktEb4f>+Lu z)#JQHYxcw^z7chPPmq@Z0&lQ`0dAhK4SS@>tCvN=J@r)~ifPgfu&L~uX3A#>lQo{N zCe^7mvtmxkW<pH5n#Z%2vMetKFf(98G7kRzD0JXz9w__Sx1+%F=0^AzE zdkYx!J%kn@8VTG57X!)6ynx*usr0*8t)bl9e-wez2E1niyuicWH}AWE2BJt?M!>e+ zcKW^2MK;>HnT)LZ)Gm{ZOUL5=DNbrnAO*L&-GYmzb^{nB z66rN?k#yclq;rLfBC?L2!XBgRhwU-i;m+(Rm0peQ^&w2F`}?7(Tg+Y}BQPezZ$acf zG#}E4x<5Z(Zm_ePlg`y`XhKkXXPOlx_ z54CtsI>sBmr{PodyBU=YQid~DIjHjG z?b>6R*$ZNscq3UgjyDuJwLpjr&^pn5;lrBLH6{VebtKvQFB-B!7M)-H9@8CiyMDb#~Q6xopkX@|`_TNl$=f0dyu_PlG&jui3(iMD&|>z7G2N zk*S4H0cQ{@RObo|Hg<8~@JC{qZ&mX*A(^=*qp=~dsx_Ud zomma4RL&k2Vk)Runf1|!#Jwfy5DT%+X^KQ26P`(AzqrI2*UTB;8w)42KjGY9(7r;z zPw7o4wjtp~r{IadIW_6QwLB=My9oIa%-6b(Yn#pYh(!>LTZP}PmP+Q5&!~|e0v03k zn68|Gl(T2z?~FbqMm(u=?eOwhja7)73?O^R1H?CNC&c$v5G6K~g zAjdgL{91tHJt?PCk@*%8KX`g?B0JgM7~~6hma|+N{`wy<4^uP6M3ZG7>68imlT6s1 zAXC2$(_^=`f-KxDk7X*$KT{FiXhe%_MeXlL0y*2f)rZS$>WpJobO~+BhC~yB?i`;7{YL z6`z=7pxbJV@`zcN6TEfFq{EkZF${&s`196gW_hihozD7<*U4_@txwNCU_XVmrUg_M zbqpAWfKb96N1{dF5g0~8Pl`=v{}NHefjjHQpK;~EUI5su9D=<`$?P$sl9|s_Oc*3! zkDbewzG{RogHPd@-9O3i>}C-J~O6+vN+=JKfvPBjP0RDyRVPmb?Jq@zH_FazX_u z0Z04@Ne|8t_hC-#MCeHmJtSE_9s8N9c5klQW6tbYJiPh(OVi8e6*Q(`afrn*j&<;) z+jBM^U+KqdC~JC)3T)xR{II4nKNO0MJN=?*3qT?4<-#q=;E&X-z6t2`|*qt4A+cs~~2hN6X_eI3!zW)rP5NnFF)c!+?8l6oh;VKRY9 zK&%H`Ylkw@%_DA~1Yf5n(^MOJF;=vl$oz8+(6lz2mc5(I+@}Q|w!NS{Sb_DPRyST=Kluw|Ap8oF8iQF(PuP0&mQn@APsGeca7~ z$&FgKZi(0ao6D6a2@`Al_q!Do&m-D_@|Vn2ww)&k@l~>iOGW+GXM{$HHhfEaL%RJO z0-pbhvW@fjd2BvE+-k7m-IPDd)k9^vc{D$k%!2LWUBkNw6l*ib4Va8 zL8Q%jGFyDYh1oHS3BtCXTkZ=L_~K=YPiBB7Oz;1=W>VBl_aFf9wQ?5ohvCgyA4A65 zO#cLZG3S+d=-)J_R!cWaODsMt&*QAc1^LttGZ|rg=-h7}<3$p@i|3-wJNsTF>%lvS z1NiR|($Nej?-59U3KjD(ZgP|AWt$*VA=Skugw-N+^JP@&N?`}wUU+TH? zXEEFn6d+p>d2VRpo|q)u47$Cwna{xHEO0YH%_W?na|ug3S_!MM@N|$b>oFS8NNGKe zY{*Q$gqATX$xB8!O|^YDs8!a6o!+3Bwdo$@>paBm{sw+vJI^Np4zaj~Ex!i`={D5) za5_!h)~XW_-`Q0YL2px>D0&jaIVn0}`Kg0|csuUhZjo12V(@bsd$Le0zf{{j8uI!N zP5qo6Z|Q%SP>pXL${E-M4qx>PC?C93!Y*o0Ws;8v%kW>))TaD zZoj4sLJ$w{T(-kcP>*t^PQ0VEWhVk2+T-+H?R|!WwS-G4ExJx)ywAMwNNxVS7QV8wNL^dsyW&=Lk5bO)yRN*3#@qZZoOAL4oL`mLCm&SwA!heYns7 zZ*Q0lP|ibICr&(uSthxkv?R-~Awe@QG?GPK$a5An&V)e4htcfx^D2D}Dl!OK=lynQ zsr>6_59Xr82&dla(0-pkk8Y%oz)HoOVNWjRNgRm_n}|>}vp(OOoA|JuZEe=`=|_Zf zg>$@X;`!T-hmkVzb?_NnUKDhX{BNE`phC4QeAE2UMt%$MlS}JFyA+zaCAtmQmPs4J zkum+wYc7Y|*Qwp>V(Dj$>l}njrs>tZ4lfO({l4Vqov!<7h;h2Rh610yIVr5fJMAIr zc{rRfYoCM5#hlfBn1W*k3Xbl-qu}`Uk4@~To$lv;BVjvhhy=b4NB_!m<&(IKyF)E* zd=YIVJ3Iw7VtB2q-DGF}K;Cwd;bA~DX0}t}?()TK&!I9wBm`oxBqB(Qk+9l`YimeDK8?BpqlIa{C-7s?GF1cxsFxoq*diAnLwg;*S( zbYK0oWEF}NndN~Lo|7%JwbXGe-9w8eZdLGZ7_@thr)S;+G`i_ZZax^~=dosfKv^-C zInhkBrM#UWlDH6A`uyNEs9C#45ZSKj?WDrz;|m7?z#t+0UT6!U>{FqgiCqU;+r>#n z3_FR-u0os7*+t1&1Ur=32l+9P9dmIKiTTKeNwo|4Y&*LMJDUC?XZBEzxPI`DGXG$O zSH22bCG}8|o!gmv5}LRIPEU;SEVzrP6>|}^wI0I<7S2N}9mP=`taO%?KP+jp#LDme za5Sh(rrgiPFm3xXPo#~ER_X&@Q?&}vzP2I>3)N<#y&FcCG9 zdt3KjZ#Q7L`B)MIS$0iTGxt+PX!G1NAnyGW98wEEY)Wy;K{D$<9!@3#8TB4*z^z`C z)TC`qk+V5g^a}A%17b`=9kK9Fjn}(%t4pgj3vX?IKMB7^H$t>rT@zbuSMr3vt!1lGPGLULUGEG=ruXT$FN^~$n-ZJX~Y8r z10>YEcp6LLL14It8c9yhlfIbX?!I5S7rma_j=yS9%kANNHqBjUj>w@&w22E6yNDQ$ z{9j*YJDyv&T&K3&2X&D5Cru84|eKNX9S04X$Jzw-y7xP&_ z_-C@OdQ0Rg-x<^If29dv=y{xi9%?{!)qWrWctHFJE(tWAlsTS7uE-M-!lzW48`&vA zc*3&&axL*{H*%upqu2M9OZSgx9G8B-u&J(l${2Aj-BZdCA9!MVJdzBelab~TB)2Hh4fhFgFs77U=IxE+(E6j{WDH9A@i7w8Nw4x-_sXmCtNQdW`^KN_uRY9%qZL@tAAeO z`~*IyyACQ6T4Q}e&INMK#c|PWEXHgBX_Zp}XL2iSDp6FobCS-J#w%J51&TZgb;Y4z zwc5ed-fo(+-}59mbUsXrOjG=s0RaBzNin14mcBF?Z&!Uow~OPG_S`WO3t*Tw^!N;u zsIEUTA%9|?`iP0yFf=iJ|G~KyMY+}bdGqLnl^zb9QaSOBhRpm6LCcG=B7#ZeZf|1a zqvrF$crD?R8>UQ}Kc%ecT^4MaaAB?c)zjfua}~GWBBS_#oyA8$p$O~5KXaa`_H9*$ z$Iw3C%fF{MQoVKYtNlXM>M{}Qe= zo+C|N4jfpcQBec<;T;V3c^`wCsGn30H=k*|?YS1bptr{Bhf1fYS#`7EaC&GrUCX!ZQDp z{ikU%%9;ps1JX7YGGzdqf*~y?{F+}#a9Z`ElXw375EtWbz<2*q+X^?kPyIJsHg@5A z9)$S)?E9ree9e~sMi+Z@;r}!HS%9JEeHeI6kjv_fIbRG2y;jTb3$HIZds)%$NRs=a z0280;|Cdx~n_Li7lK1#x)Q!g%CKS)(t7gdKt43NbJgtU%d`%Ib{rBBFu5JArf23~X zv7>tPE^kiS!#wFcax*6}M1^%c^~5BD-3v{|ici@jBF+ zccCuWxZDe8x`v|(XPWm80gds2Z|_%Vt0@7o%HYF`>fIHmPz^9)yE8v7n3&3H#Az#3 zCU-4p*^SdgjSgw6)pDlFGN!%5+Xv~GIyeip|8N%YfE=2IT7MSK%FjY%Xci(LF$)iC z;s5*Kj5Wn&3yrGV`JC(8aG7;6Q-faRYUel~{$ zfBc^n$XysXWF-GylM4TdCYb=vt8Nnv(S&@HB!>U zB387COgM8fT85zT{jUv{!aMs1~a1 z`MmR)9-}$GbWeN>EMxTc)RF7( z>o|fs6nKKaJIHKWmI!}7&^XyNazrxoWei(iuEb;IF+!Y^Bxa{I_CguO{U*mUU#@oI z8!}Tch<`blNKY@T=|7d$ZOEIzZZpO&nsJAk!6twA*)hfvPEQ^_QK}>j%da@Iom5U# zB-Tqg)OGuvArg*9aTKb;#+5OMrGR|Aq(H;x{%^Zvg|2J(a z=x$xh!F#;qeyJ`!%SbZwn{|8xEqTm&HrnypxOmG`L>1q+n{+jGWYVMJ@j2}&XUs0_eM)bohmY!NUB{O{MHQ-vkI@QW;V!lRC(4jeZq==#TU8az$7N3I zS#c zU`a?vvh}G%-dP%(l8|)_pX&@8l&%`1^vMgN&e5$;Ev>|`Vk(O|+vM8uEac_lUD3?Z zCKS{rVV4-X;3RJs*OIpvOEtT}!|gPsJ&p&>v(b=}S;f=_ls(Wa@G&)$4waqXQ7;*) z1FbH?!e0i4UNH(WM9sGQ$3?TL52MyEw_)uvZ7AwsfA;~CuEpl5gtNwr6&PCMMIcPS zj5K#3(sXqGPI~E{5#*d^Z){Qg%=TCtR++f-wpQnzfl;{UBd8dQeFN!?!ttVmO5qes zNO`;@cBn5gc9rF4Boh9Ova`*&itHmsR#;bTL#wC zJZ>=_VWHPZGa)QQ9<0n5M!a1tq&=elC^QPbl{HJgoEq8|Z~0Rrn^A^7bn-`jsJqzS zsD{);H|fZCx;-_AzT=L=TzhQZ_EquiNBq7=DDRH1i!^I7#(%25PTblZzgNlwvW_*a zXBau+B9<-rv|iF?l@&W~$>g?!d7`1I>{(?%m~;ih4CGi9ox+Xq6%?!#-^Exn}%c3L3v8*9<_^ z$l@sXksu{1!o9QgN1ZOYMct!tfePga44Rq>&C_G~wk$Ah@%%LiZ|4SKfcwWM_%z;- z9Sk~~3$t|xaRI#Md^>Xqz5q+T^*0juoP3E*k<{I5S{Lc|H;9`5P13plKwZWP`mi`{n$Z47De4{6t4OZtWl}pP+V4BFNv@)o1*ANkhg)_xWRXj0|&LZk3 zov#Im-Yj43c5Ww>z6F!QYt{`8Z^=f@rWhsMo#H0KZ#Iz_fC<2dRX7km=op+-p1YU` z6Yc|hOJTh**6>Qh2r!S#!u`!R}MEbQvxMR^txCMNTT^oy!NrazUTs)B! zySDS5X9khK>%n{*a1QvgyX9T=Sjray^T<;lV=$_l`zTI^_VFn)xEwQ`riaV`-D)21 z>OWpuzZ%rrQZ!Wq^hWjK{+!`9E6%8p1WNUXjHNp>pOlOINM6P!V6(*9FsP@gP0iUw z&;a)Ap8J!Lc%R@y^)`b4+-p8-tVVpnt(N;3bj^9BVJ(XAGTZsk4jr_MaOS-c6i6aK+<6k)U$F- zJLerz)0~doi>WyZEdqCdHe){{^2ISD+s4}2L?Mf<5PN1J>7>|15q{Tdy$Zr&LYZLt zPGnG#pllL4x$Raa*Qge+B)inm;^NHDPgQTD3bB*(zqj3+P14#-@rG@d;OJ#U-;7_+*C?l4VbvrWIXd@(3Z0 zk3$veKBR#0P%KmmL}+}%{SR|Zh3u^fqg8W%NCD^ne4?SmZOmqLxRToFhm^ZzRkJY` zKj+2|tMzQIS#Yex$Azj-*KL!B$fsirg)BuH9|d(}9+(5BxL)bQ+|>b5hnE?u;r8S7 z>-9+o%rE~na-$*Gvt=W(Dev)t&n(y`v2mvNuSAHSL~}@+JelaofiiIHlHcEc`OF<| zF_&jc%T>&w+}`vdR2dkE4NL(kbv9P0zkNyS|7~8HC`^CWsw4P-c93Mpd`Xyu$Vo33 zySoWzlcW$h3z`W-U%kTa_;WGbgk9vO97qZ55hD~w3{bAmcqO5`i>nM|>JC@QDzQ!q zC93@6F*1xp$jvT@#2oW#Fb^=QxR$-fz|Tg;Hl9DbM}buOaN9b2sTFd>b_v!^G8P0t8Oxfcp-18AEfeZ zl@gEsqOo{PWrNi2#iRTFj2!8n8;9t-#hPON5g)-xl+U@*YxL~c%Wyh1to z71>s5N#vG5@B+4IA~390E>;vh-teFCLX7 z$6&ZSnUDRfmD~RlhKUEXlW@=d?dvQ<|L_^Rrn4Dn}Sq@hSqkSauOkAg}M4>4f@OSC9 z8E_3ycw6Hlf)LT1sYzw(dK2A|-I9C~j=kriOtNebTf^O{oVc94u9MdjS$lw5oBnO{6A_R1PQ$gb z7#pt^P}{ci1tbasS4)F~{P!iJhVkFVrOeT~G>_?}>@gAJf|Td8ksl174LlGu8Ij_^ zxWz{sF&=KZ?~iL4Rl`?*q~6WfV2V+1@QcXrOOmUnPEA;_r{yCukL`FErWC!4CqIzpE8%`y$vINLk= zj!3^uhC4fZxtKk7;L8Vd;;uUS%hG=&FdF4S%AxnqdUqyAiHHk6=CX8W*$t@mohTh;#CYOMu9D+J`OfY>VH4X=G-Y>PKQ6rT6{n|XFOAhvzq|C=5o%NGc#w-oH5so;98=kjW+RtqFRI!+BQyHGBN6u?|7qYM*PIjUt{*pB#QOr z1rkOznyHswRaB=t5^#APEk;PR?h26Gs1?|Z(qot(^R~5yFV$FECmZ*Q0bf^(BU`&3ExTMJlQDgGu|_r1zzg)+p&de_(H1l`|qYHldY8SpD^A27@hkfUqiu z#*Hb7PT(NbvS9P?h~i@U<4_C~r1b!h&a830(45J}&o74Iv~ygtnI*QF%y3vsy7L#A zW_Ix_RN`)UEZxk#lh)g$?_OpxQsO@PgGoo&q<=~!z2OIw_OVG@Qc2HJ(!KtCdBYIj z(U8%OsewLE`s=->!Upic+GeL2Am3zN4lCKKvFeWsFY0dZdYOtgdhvKty7e!7>piE~ zwu>0EgA0Zl&xgTgxd~tgIoEg?IL-XfdTm*>IEzD;1=kOamX6p~U(SM=+s;R9wV3D5 zw#;fR3A(grSF?JkDsHXDW%G_!rdu91XwD(-V;|!Vd}*-hLxyGeNW4~qVQ!`Kv6dxP zDdcNGuP&^qX!7c&GMbp{mE<(v5Y==ULbxzsr;h|ms-T~fN@Fupdu`G1GEKQWTt9CMNUa8{&bfdDNz3NYkC2nHiF|HD@ zEL`t4bFMemxDwV|rTBW6{aHqowgUHg@`yGL&E2Y8ypdP~48^z1M|H*6^3?TIC2Qmm zz3?pdJ4sg19+S3_xYyO9zZ}M-;YUhZXwZb8+iTG4UDK=DVdkOB+}FET1Z)Wf?cH?~ zlKrqwwpJ5{R&f%x?xz~mSU+o$Qf&MK8SS->M#RWQJFy%_i{d=1{zr_G8EGn3Wnt;heZCVLK>F$5zs9U>PGJK@ zYaxBsElh(Fsr%R}zUPaxDiPT9I81sGAGif5*yg!czJQ%Us!y2yu+Mq=H`(-iS|vDv zyx;geqOy;<4T81WpnErUMS7r0dbjeFkI>t2O<`T}_Tl3)>w8-stGpYV97dIM?{;V6 zOCF2*=A~x(c&14EHz#o6dU7odj3c99qjZdhHkGSb8tsq*dZGLbU$iQI5j$y>Z?Mgf zSHH6h2D`JW|6Uuv=ndZO;6Ki7EDaOYZ@Af)?MAolkypm5$}IYc=LELB(p8!%v@OI)Gx7^z}o8~{SWf?kQn`>FnPX02hPZ6&`7cSa>f_Z z&}T?f&&uWET&Hpa&Yd~6UjY8!aPySTsRUiaDx69$K8zn(vZ?U`TQ+uueJgXT<0mr( z2h8M*9){K+b9jaXdG$i~n(2GI7#m(~D-Aom$9{`Qcq;lFsBRTe+bf{_5m*fLik3$6 z>w@_2t~-G4%g>Sd*fO(Fb~0Et)?-ZjZrx4j?(56x%e}aF9Mgb3b!de*xPSAo;_&-E zN4^+v<3oF`8aiHtWI!(QNsWK1-B_PqHJY)R`A$|<*_U<4awT}(QPP7|#XB)62K2}6 z6{KNRcoxfx(FEza_~hVj_@!($^ikJ2sIfB)e!snnav#H0Ir*M&D%}KXn58_ls5}IW zbar2OMyLt(y#yZ7@6UeC2!o-{3(Z|J+=C|XJKUu1vwBkltNYJAG}F;d@!USuL3_1wxsHDyDhyNRNE90{ApDU!4)nLQ5@w=$F7s@6{cZ(r4=M z74-SIR5B9T49}53@|m@PEgbCS+>F9o0aiQdrMD+!Cp#JFeJc~_$>kq2N652V~UHguPg!*hYTaR!$H!q#lb??EUGj8%rlx))D%cjbIEcyv0xvBB<;llL+7Ws3MP9~*JvKkD6he?#0fpLM*OTXd)?oipbFlAt%gF?z?R0h1m2$W!LE55}QZo@3=f>xw&7jKt!xj zhkJMyL^OsoslkVU=tcib-X~^EBu592XG0l{kQe&L<;vB`J9lkYj?8LFk~f5)Tc2R5 z()&z;UZ9*OcVQfr8O&!rKWLZk=5vnp&ksrFoEuF{&)+^gnpt-}V+i{Le&si~?{dik z6P^pD4L@1eT(Vw}8y{Rg&y~?f+kqo#kK$$`={Rhp$dkK98$TzDM~dX4>xSYpf=9FpszNH%sE#pEZCH zk$zH4OJb=1ugfg;&&OWYr+$Q>JJk{O!vwv+nLJa}m(R47!R9Csp167Y3}>+|E2{B>NCzfRF>`0Hq({@|}e2*O|a053G9b|3zF<&qSC z-CsDLFT_}rILOgJ*x04|L!h>|8Ib7rRh&dv@IPi0>@&`?Pm%jEdkU$P$O?uoD+=b= zlwY5pWVGi=*+BppdEIgT$*urfrj^cC_Qg3T)yAr(I)1zt`f!4HA>xEatn7;6#YQ6w zP9^MK5f?WMAm|19^8|66ZFkB)SIf=%KiCw46Ab;MiOKofhan0MGe!)EKq@Ms#?Z8J zn+T6PQ@gW>3!JRrcB;^ZpmCB~=!Db!h(LQD4bnT%t zH^CUV9zE!a_Nn1T(8r6G^Oz-{Ut#B!;PCaj%R@aPcx#r}R7!q5FfzCrZ7`1ltgD=t zy-*i8ph~=F@7vO{?^{!`)*HB2e5vvdw|>(na?JW&g{P8fi%LP(F*r&kQ?Xd!bu@Djxg%CC-zD{>2ls?f#9sGVA-f30SMWYO$8F?SaKN z0Fq_k#Eiv7{jZqzO#40K_@c&TUOeN6cuBv(VLb=^*mtG>l(;z@=t>TjTfX|%o9B1-JvAc4 zL{k+fa49f(zisxh5W?QBT&-w2c{xyjp!pPnpt%^}g>HwM-1wUwMBEEgo!lRb51wWv zveA3Us6cxNsqw>%$7T?zNJ`?%R58ZkS3sRFi5C-e|IsWgEhOj#{>;oZs~sLcmr-QsNa8yYeVon>-!0@H!Xh92HG)zC{7O+&{5{f5Q}($FCQ_O~J3UJdnr zoNDNJb?NUP$1?lN_Rbx~$iWgW&Q>!!Y^#nJRh3?uJB3}3PE3-K+vQIQt!(>B!KmFF z=qYt#u<1wiSbJVk!${rXk|h!&1sXuaAq8&ZXL|{gH`+`uXJ5kH5kI|`oMx825o~hF zJLX)I>&p}F)ya<~CS`K^GwFPh9!K*0e zGo3B5;x#dO36=N3!^{lVEaYe76vLYpy!8>~F?%Qa2Y$@u2TBQ6lvTy=CD)gni$Ph% zRcAx6NjR*QP+o1EI}n!fzb&(VD|vE2ZqM^KqU(wVE9C?F9bpRXuCPNIIjBljM^d{Qq4@K`dX>#Q>l*Zkm~$&s<)MDR4SE7VEWrW_r3dS zm%HxprIt0$XJ6cGhwQ?dHsbXC>Mvb0s3G{oiFP@ak! zu(a)ZiZT|7y519sZ-;BP88&N$@jo*phS4!Ems8%=?zM058#yB*e_{mv0B>eF|`ye#N4{h8fkq-edr&i$!jlAlLu0BHMsqED|w z6-|@70QCpG_B?NzyaV8cE`sO`C2cyyuJ+Ov4d13AFtw^rOT@`oq|;Ya`TleiEz2h9 z#yWf4#OLSmB8e45)_K364FlAMv5&HtG@tE4)n!`Kinb`-n%6!vt@&=8T2o7<)S90H zldb8LZp}zV)0#4%-W{{bCP-sr059|u7~5;&lXG^5iO(df7qBxr&4+ujzGAim zS}w?CT`k*Er#)KiYID7HH^zQ{n|um(F8F)y$K+>sw5(kQxf|SyPo)4^#d<-0O5&`5 z;C7h>6-fDpe;sue2b&NX?x0ZPJnt9d#$~ZC{C@VF3%xV2|)tOqRD{dAn08%-Z;vtufgjz9+?G zwZpbc4oWz4MhER8yf~UWSVs54)}nwNETSs52HC+P5ZzWAM`Jh|eW>+9i=hQgZmq}B z$2uw$bCAs}t2s!=|WmwwP->p%d4tL(-Hg zxFhmeDgFX_R3&Pp_=Ha8G@mut5jjvE>#*KqAkgsrdP9qET+zaf%3=qOh$UuX)g+rJ zq#;TG>CcdmgUvvNiKSdy{N|M1?8XdZM=b|a957Ko36*BB>F?x>I~Mlclk-c1k4{m% z6Yw8{`wU}muj7Ct@@wO)JfdkjB+f1nEy$^Ub=&=9C95DV+KrXHgq71aO_6VXj_9@_ zpnm|@Wi79>j!XaV^5?ty)iK#@m2L4eK-b3xR(uo{%#xGSd}j4MMp418#uCVa0@+=|8rYc^zzi>5@I3UKss{I%)p- z?BX`?P1FetZ(;DTN)U9bJ0s2(gCtGRFdV4RW3(|L7_gliG`W^3FZ7$P8DZm8iTu&);QrsVZahy09uEzm_0tNOp^G ze$@DBX8qeJruN9QQLu7#ZM?r(N(L+6HEuDh9qxcwQ$H8oS+&WYy|(y$I?1E*iZ{-S zC2qF)FReS673u;03YE>7)=n|y{x0XG_EekPTe;dZnY+m#=@-rTKp0q2zuk_~40S7p z?e>C;*>LHwYcWaFhtF#54toP_i#ZqOv%}8P{lky)Yw&&Fa<=Ul0wi?nt zv>&U`Y=w?c=oWwPS2E^cQ-lGfBI#C}6Jk`5n8DuaaMsc|S?aVvYg>V);#eNC%3Gb) z8Xt-eG`}p>k$FuK*kAFesoIMIsM5XA@6J(K6(!WFsAGrm)8A>+-)7R&l$lB%*a+J8 z`M~rifA+M?{XBtXP}2?#hnF&u2WR+hfKrgrxW+iBGkW z;YkOfm#JPBUtTD?TJyqdoH^=&vSHzhPW(r4ZyCRJQO^qh9Kr4Q%MjH5QupaZH+p+5f3wV$4q z{IuMBiXH2Mh8~PSX(amT``YwJCe!EnMKvr^@{GFthD8UO0NbnPV{_vtl=Hv;_+eAk zcLh|{SH{??I18W%Qg4vvC}h7@p^HnqJZ%8`_Z`>+bfEC)+O=Dy`=vGJ5^(kgD^K4nSK#%EFbs#uDb0q zPzpr36X-7plRAMPnNW&XRcxw1l<2*3%*;q7+_2;q>w4##SX-;tsw+IKXIB zJ)`lZl8&9iAFr;K5wJ1Jv-i^1ke{)9V6RyRK`HD5ZD1$z-V#tLYe$I%rcsYs$X{dT zS;Bd!1T$@oOYVV1i1dX+OLVu(=-RRs+y`jluY5<-9b>||2|>+t2HS(1I@%tbA@i2R zc&(E!EQmQ*l_2y7BlPVayBRYy$@}ARDNBZf$+LZp@RRP}_}KFMxZ;z4e*dFyns#h) z+dE~`j8r!hf4akauwtypebp_rogw)#(7Zp6_rV3xoWaq6HONehIP<pOP=`&E$!+?-pg!oNk|a&xc#wpYop3qo)}d-Ld@QN~>-<7*l|a z@*1qZm{m;PoXd};VU{V8)$y(|MuFsuY+fkEo81Vk_EU`XM~9MRC)vBbP!cZv!VBFw z9C5qT$=m&8YI*-DLAUqM5Vz%jC+Gz}=IIP8h-d8x-1vkMtp#;I3NCg#$FmyCoyUo( z3pB3gy_0t_?}Llq?>f0_AwYpU(6>`qJKQW+(kIW*kyy2LL#dTJo#93C94|CcALni2wG-ONGhOv-x~8hWkQb`@ z<3PXa_YH5BS0QEgS>$P|&mv~ior|8HZnEnSrs{0AR3stGQ z=##FE_c0R@ySIQ!Z}}`#dOzk1clJD^z8YaldzP=9&@(*KrHxLNb|3L7?dL$hv{?ix zZ5qG}y$gotneB>yOWcn1Wq)pVa(;jL4Dw3N?=6S_DFPDU@WUy<$6zi&x9m!BctFJl zvUsAv3TF=R;?s}Nf(P&Fz&dbYcpN5hC$5F#~B5I z&L9`~NA?lCl9o{kkk{t#!TnO2 z7%giO+~dOUP3?<1PVFVmti1I1lUeJe^n+tn7dq4nOm2PjK1tAhd!iWhHw3-Fqh{iJ ztLE>h)7VwhiU(CqiPNH4OPux%XU5c`DNwitrK&eX4ZMQSoY3Vw)2tLw15Yqp>}T;p zymSW8=cNGzX=Fcu7y1Q+Hyuryx{~zHu4#^KkLI%#$Y3v&P>FEK*Gz={W_&=<4XGLB z?-BF@@9;EgPWLGon*ECHfuAvt%dacJ*s}5c?u~0QCE}yq-1o*AiU#wC8>wC>Y8JXr zo^Pm`+n!@qoyfvNOgm?rGM&IoV3Lr%JDYYc_2PkAMbpmnfPOni5~Q6GfEW5L6iBr* z<#N-=6Sc3z$i3f(Etx`m;!Zu(m8Tbtd6Ljlp)qpdUZQyjCXyt477T)~QL^LJ|{u||zmrpt;)SYLV zv3?hz_CBooa;COKDV-QWx@2`-$snXgF9(zaoX zf?C>Q_04Hv{`ccR73%V~0v%qDBnEN>p{lT-S?7{xajQ8w9c%qpK_qLZoc$EYcS&Kz z;PNBx1c@g}p3-&Kx#u8H&B{1z{1t7pX-o0DgUcX2^_lW6XvbTkBUTvTx#j0lpJBQ2 zo3f*cUuwAJiO)i)|r^^TKD* z6l8r_8$Sa*z!~DMXPD5cZ~Ot6>>FociSNthyA_RGc7X6e0{|Yl0pNw+gG|ZIn&VUkXkH>hT4EbJVdad^VDjKezj@5VmrpfyYNC)N`noc=WXvu*m;354r? zCTS|%N(_8rM8wa_Z4dk6!4!|YN65U~*;qLrCbF&Gc-c5Llg40|0Hy0A8pF&PlRuiHNjSHGp#$o^59q zVmx1Ih|rj>*~R|sb&b7M%3hu72JX7vXH^i&NWdX9>>p0Ed^&=%eOdM+g6@?y;?seu zQQ#DwW+-z)>wblqr1M31_9q$OXvfN}pfA{@oBo1}Kke--3ohQ)l~C7;?JFySk&VH{ zpX6$>@@am1c5v|*`SH-t$)K_Pk^30RxOSWO0bOR6;jH5JjCf!U@rvWeY0K(HQs*JJ z8`P8u7!};{3=P(wbCz43#8?FhW7YSPYNG%8a~OXXxNio zKgtWuHhH&$N#*@_U^4Gp(|K=HG zoX&2xqRFlvNOo5O$Zi6_3#}ml1DtZy?%)0ATnQ`1?=!I@#2r-RHUkW?)c3PFKR$YLZ2HTu$kG@yGB9^%AWg@vH6z=gt z<>V(5t(O26U<`cLH55H@%cuRiG0=_0oYAowiqOe;v-vu4q_4v7)7}a3JRm>8Szo zvpsb|3w=;h!<(x@Bru%ijZ19*W7)OAjZ9K6RkS415i1*PM~y2<<06|d$32o%n4Voe zNCbWLZp+HA0Fwm$*{)QtTcBtrxxWQM&IbXI^B#Z~+H>kYtZdvM&8xDkT$4W6!4#*$ zY!r|8C%$GrqxB4l0<*3rPs_lJ5{wD!2U_kul~VjM;TVGMc|R8S7O3pNkvt9eCg(2B zdzOjN69f_>MZq~8A7!CaR52a z2Y8_zwY%*Ccm>jrp&|p`8r=IcAYz@4|O!G1WHHN`E^^mN~7T7HG%DEy&u)RK1_( z^&~MC-BI`M8^j!%hLT;>+lwgzZkKEH*01|fKIfR^d#B8C-$R=?FNtk8dmpE)1ATe!#rT#2Y?T&C zcmewfTGBJXL;nr#cfU}B5m&65C#jIUEx)t`dL5V~&=f9c@huCND4GU71=Jr6e3T#! zd;s8uveFGiTRtwe@VCvg`WWM+*FOX`k38s@cT6M?b#*(BP3o=RDWOU%Xe-IvZ}%w# z-OJ9S-Q_2%#K4I>{dV8dqTEcow>h^o@S}LxG;TZG9GvrNHq)0|G;b~*HUn3)tg&9= z2VPEtbNWu#mXY1Xax8Y$ESE&7F5)ZPEXQ8g2S*?j~hm^fp z9{@*nvv7v%Z<4&pd49%CvrJQ~t@-M&uAs?hsL6+}!c={;7dq->wb@~QOn=Y26-b-E zQGY+~E7RXwy?EXvMKd9~6iACN0MO#I0qC_snwDvf-zxUl8l-E8AV~KXgp9RnmD!0- zDjU-6(zQ=`U}-Ce6t7VmBU!7RyBf&ITjty*j_|lp_5A{j=(?`fjoH(`FLu9SCQz1s=^tv03^sRP4nqQb8dE4SFBQQlhKwrM2nr?j?Fl3CNjD~ zE^W97LjG));weXKg!S&1ckd62=S~ER^1|=Fw!&iZtrLaC%6S%xVPF!A?|+fP;t7fd zi$?;%;z0ng*bU%?u7?c5VkNnXH=>DqWVFa$KIPQxYt90W;<2GLhiKlgmlx?1oeah1IPET`m{SMPX z=F+Tt%JLb)w z8}v#wyH3%dcL@;mJ_`W7zXy1sgH8gySiXM(>AxojZmETwgGJ8BR3iuSOmS2BQe)s` zV6wP_RGghEk5{yj17{JUz@Y#NJRRVL9)=;T9B8lqUV$MM*w66K6jR{+#cE#;U#h@P zz+{0BZ%Y-}&aBxM_zoco+ytP&bpS8aKV6`#jTu33H=? zNAqo6ABh^`s0hdhj5# zN}NBOMlPi&`Rcst#pKXwMi?{x`&;Wt6VlnA%_lK~%*j8cWIM>*V?N)><{Mh7Xf{S$ z-0PAA6 z-(l&Cl={abnjXw4T+KJ>wbD})AixxH*@^FJ2Vs~@oaRe3C);Lg6`^gaU1iqCSboH~ z6(^B*Re@GlyYeSj%A3!RZjWXSz7pcb(hBym*iG)=vBwsjlWpqArJ2@G=S4EQHyrMB0Z1dU~+#klTxYhBj%Y2 z^qpE@vC!oHPw<|6jV(|p3u)qWj6axLrcHB>xK_PF+^XjFGls>6WW^l^=vCdCx3bH=9=KtT5g zAfQhGc%ga6*%AJaW?0{6gty2_5Aq$w&%RjZ2ttg7VuTf9NoAs%fu6o-2l|4&272TW z(@pK6^asD zSO#dUd>xn^E2p#>N@y;yMA4$eGX$W-;{YfzAHe$mSTh-DKOsr<)QVun#nAl<(fzb* zIfiM@eWuRtXfTo<{n|3CN1reDb}p117TbHq4$GVvU7i{OAE+R^rTUCD+{A^*qvU{V ztP&Z*|I96L#U$wJ?%j>FxmD})T z^aJY`bH~o5Vx~(AQE0xD8Y)kh9I(m`_;Rl$OgPc4XW5&<#CxEgC7(tk-EqE=pQzMZ zRtw@eLyrCGb&cr9>uv9D&{TiqN!tw|Lref$ZkGRx@Zo*0kUDHHwgbaOY8YMxz%$r^7m z8$5`YtJL&qR<`sS4^x=a!gnpQvgOxDiKBCytZc~!CS}W`pQL2VKN>AZ_uWbWj{X|} zj$RM&LIa_tRuB0gDD2#&)l#_lycyoIO6Qts>eU5d=az|d^WtUZew9MSv1nl1SYG6i z(ONZxo!c7t(#+za5I1dQt(91ili$Iy*3Swsc^u%$`TAKU?iF9@5Y6D|jlnxxU;8kT z-IWW+Z_150-&PjyZtqTWOUhQy%!y#vU;Is^?30^1JA1;#-?evBnPn?yW;=U|cOv9Z zX6!9sC3hUKF#Jc>ApFL*l!IC8-TI#=g>wpfTYe1o4q-Ltj(g5xuckoQTYIy`-b!E+ zd$k{fy%UUNeoE2kLmmX`4+P#p5CkRwUg%3mAOvpU9jG!0j2nz$|I$YVTtPYsoXdbo zffKL=NZ_2MXbLz3=oc`6AO-XTVE>{5FjO#F`e`YF(^bv+7^=Bzt~4uwUzozan{5l* z2}~At&qpcg_P(N#ZhiksfTpYgP|`90`+q8_gXogYD4l*ka%w;FBDFtlOmPB_o=3*X zy@}w0&$(C2STKBcq?H|GDbb%;hY7lM5y_4-RFA;vJdMI9oEYXc?wl~iG(DPXdj178 z{ZvtM{S~I^cPrDpOp~P(*xitB`U4-*bi3;MMA1gGyO|Jee+59>R|33HKX}2{z6{Zz z#zi(koG#HaV_#vm1S%DY_|TTXqRE6ih>!hm30&g@ z+_O@f0q+wun*qTEfj0=ZL<&!>XxN@v(XjHcilv`)(yqWDyDPx@glcR5ZvG_ZV*hqm z;3U+b|9Mv+msBj3$MhB!pJ$>iZe0jWGSK-SfJM6(H($}{Z|?wt!#DsO-VE?UE@VjV zADgM0*%i2Ppw;7!)~-NJ%C)S&ahw`&I`EJ6m(|hkWx)}aYmTHGpKH1jbk7(fuE|yD zfgGNOYl8Ww%Ii`I7BBUs5+_+Br%5s)9|mVNHh?}P#$ab`;lQkOW-tjEh;`2CSo%^D4EW_8iXWf++W8>@~ zaL{=$zmDO%xBtnCv3m~}-Lh^nbj$NJ_xJU14{l9~F;~$@nax1`iT??T{{iqn#FrS; z)vOKWv#wLMn;#+}s?rkT1P2OA%%Y|7EBF8o715NI%*rX7Q8@6X$r`QFJBgQ{OI@d` zu6=7wNB!qvs_HmO6pM`oCRyyO_e@nK5X%)!RZj%!kE$L`kg6UIU|$zz@VjakYQ#}? zDxiTgSt(bG+S1?|u+I66R$=?+m{R1x5jDHTDNtpdv z1DQs-|8O*UT1kzo_%}I>Yy8LH8TDCndm6WU!IqPfDsw65m|1G9HK#eM5>4i^wZ++F z=j8RfQB^wDRI zomcio{Q=?9itJ$1B5tk@myOO1&V5QD&H$MBxexKn3w?G7lt-~N7Ca@#KZ0&uxfDxh z2z!Ckd4j7-yr|u!m2frJR5Wj@3RZ538H2nQRfuvs>T}UMSFteK$Q$7muMXV2hr@JM zcdP1MF8Lv#4?vJKo{<)rH2LiJIcH z?$}t`RQMT<4bELhM0hZ_$u+-Bi2IDaf?%^fo1AX@j0ZC_YPBiVd_RepEgU|E)1Z)J zv@SKl|C}O?jhDT{&@DSphK|wr3kJ8=@}~58v$!yZHzP-z!TXMoB+4FEE|Ms_fJupR z-FaaWs$NEW^}7`+QZIkVP{>X^Px10a0$HpXyK!R;#(N`3BrjX^6Ec{U%%s5 zdwH-3dyuHeK zR~nHjcv=pf_ZqmDYGM5}A4O=U)EH5JDU=wK$uC9~tPA$@TZNS--+QfPt)One zny|B`(rHz_DawoF07D{*6{p8kkI%OHwi6G~b^CI}bi(rq< zEQFOO&8yN50G~y71RAE8!6+X&*O>?=Ow*zhR#r_|g^g%(C#@hT!f}mXY{K$=x6{_+ zSrR!jgT1pIn|Hi9EOA6pWuj-d2p7OKP<$lIjj(2%6v=u&>fB+F?1^OWY8v*r*P7%p zBuCh59pJIHkM1zK&9X0MWHa(&BK(3?@%gNV$BwF+6gdJUKItisj35|p;K!Ug+k|-QpnYF?oC2N`JEg3C62lr3XZTLUl*YGqI zZUoc89+29H+kBV-eH5>yx;U@^Xgv5xKP!qy6AS=A_wlcAjBXAmz^6norPL^}RB13S zT@8eI$RQ&2z6u$p((0xlI#;H3-gZa9B>M&FdAZ(bu3?gR6Drz$J_e&K=|P|r+y&=!CKM^X6o#Mk z9qlzb2yPRP6l$(KO(X^4W5@Wh=i1owZS0Tz*o$p!jg9?@AA6aNz1#v_a!H|_ zB{j92X=7*E*qi;>TWoBLjg9-U2^%{X5SFevV^o4mw_e=|UG9%DoWKVLw|#!Q+haqQ z?oC%Bv3Wsq_w{*lqyB>FU%F3>d$xMv|8?@AI2+(vmElBivUQia{jhU+a`%A2hC{X#{^#3E@Z=NJy2-`8^zW+Zmo=?Uap6Hk9DzBTn+e@Qw zY83RP5VOI3=L!1>GClptJ$#Z*^#6#xgZ#==bjf{RxuPn&Dr*N5v6uB-BJX1WKJ;06@|J4_?A^%D@NOnB&FO4Ak~2Kp z1SV<91T!{3L~R-rC~t<|BH(}MQnmP#l$||a|JS`r-$d;<|8?)uH{Q<|N1N{*}X78JW021;@Ay&l* zc7<5UF!wNvq$;|G!D_;aTGJK5vN9_nd$+6)KN?r%w++K`T00@8qvD)!HE*qRLK8!_ zqoa;5#QDLVd&g;F&nj~(uix>{dyr+OJREK&6V@aLqBIgWZRBTL20tRjuQP6~uGEo? zQBu`L!^N*PZwiiD7JLd5m~D9TxOQdZ!&KBa_AYHqPdPVNnknZ|T{Y!&erBhfw*r$> z&f_*R<+NJ2Ns8`0U{gHwQ$kES&j-MuqXC@%?Pe#P396*K*L&q#%#1z|%=EvNMH&d1 z$PbNoiq!Bm4w%7e6FVCgXEppve^`Pvba!%#h(3(+Lmw(qAIea}e7KY|al}{fn*j>{ zggomCheT)Z(d#L_bY}|mF}0X(EC%l|iG_bt3s9+Yxpc3kDv|wkjFsG>4#cmW7nkRj zY$EEv)&xdH$m6O~?^RUU7gI(>%jh~JNEK*mf51Kkn%XD2&BLq0)5z$$UZ$Yrc^q3F z@svk7rb)qT0|9(bnj^2HdHOt+tUN83R8eD0Bl!IszjekHkAS{4fkq5PB51VQ#8TVM zEJl>T8Nv7p^Ep-nN5e_F2re*lC>oAZkmr_-739Jm zeli5yoF?4#Ip>FQv*hR>#?2#o_rth}q6I~Rb$e&w=G!YQZgv2ZxVih~6mGUEy7vHZ zvxyM6Sp@($O95W!*e(_~yP*~U zj4sX|=k9F1riYnCubJiG84pX!Ajh?ZpJ{SXbV-iKJ@PJ&oF#mLL4qpF=| zLAyOswq%|CU(4sxZX*C?H~;HPI4l1TaagPuHf_>jM;hczn=t zKuC2208)hjUT8Yhp69G;I~$Jtf$ny-YO+^%Lj&rIwb8uWzgi(U_kRpl*16lx+~c*` z_5Z^4c2y8L<_jY$NAs*IJUeN}((K#9jP(}**uo@yTbOaR=rzV0{qe=*3%VINYYMS}BibBk zkdJ2+j;VFqHr0-{WQZcj3jKD+k2G8KD|Q@^j%)c*YL~vLFjuMhs+a$je!Iuoujcw+ znYgq0s)N6i)cHEJp@CD&Xv_ett8qxcRBoHuYMY$}R@kO*yG(d7Hf*mGbh{j{O_SFM zdVyDYh8Jc=T4ogXM+L#DEqnsk8H48*s-N0&d!U?d@ndz13(ulkc%edNo_Cwc+6nv| znB?CRR;Pr`WJMc8$IA$T^dAF2`Z)kEv>I72YQyhWUuupp2cPO}^JnZM@r5 zHOExY)+~99@0in-3fl#p4!Y%GQf2OYyd#4okT#Yn(*9|J?M+VslcXKBGS!>zQ8c}2 z4iLi527sv>0r<7(WP6i(5=hmla=Ov=cnj5z@+5YY3$T2l`o0IE^loy}-;VR97rM`r zAy55r%AklT#$jdhh%UpL#goK{n@0L}Hql1;kSgu{$DcsNKCGQK5{UPHn5S-yA7S&! zX$ibY44Z2vd9qBi|7iRx$bH0F8MezT_-U%kJVMZYy|22=e1cx!*F2T)u05(>u-O`6 z!n3K)GU+rri>c#D)KT?MzZ?4i|S$jtxQ8hd8SXDy#Er|wx>=`;Dxq= z4g=~Bj0XvV@mzoxx(1T@TS3!Hdp}VlM8tc~2ZD}PfGQj|6|M&neP!U1-W* zA!LP?^GuijlXUsN<%P<>59pUammuZ00KCvY!5i|k!?~9vey#r`(R1zYz3DlkG)2#Q zgpW=m_MzuHm-zJjj6l5idY)>=jW!>ev4xm_PtTiZvP9q~G}RY@uMu=}3PsP2LO@_W zPvv`p$#;MBoIeF>$vo5e?6EvW&mZ$b_#6TB z@p&de@HrUZg?snueq>-RbLf%^P@8ZT786M%mG`Vgdky#So=1j+OE!)^Zf znD&=4Q>9VAk15@+-zI`?z|?PpFcx@)C${<*+4_w%^%E|g(4Tmw>o+D{zx#Nh`u!Z} z*KZa<>NgF5|0B@ypX;|Jx~h}D=ANu?sV01S24DNtEGFn)e3baKNO=a1% z%z`8j+8yl@?z2S#w0CbPzgQTn%XJ^UPaAUhrkSp+^FF3^Wo!(+4@^q)*-H(+wdKB6 z(X?kV&~MLE1ZmF`058=2ziSWo1o#c9=QF<{KPBkiS)hhoqRavp@x+(^h2VQ=wi7tg z1afx-J|>Wnk)KhQ-8iyarWb16rJ5Hjb?A7WX-ceGlH!znUa007K)>c+k2f{{C%_Ah zg`E4Rgq2Vo`FYFi8?$vFU>&}d2_KmswOMab_AF1Rg zr;@)%-F6J=hKk3l+b8!SO5R7wGg8Sl(dGV&Z!A|7(Gnk-A%bqSkC1tw+84;@X}H1} zne9Hv^{+tWQYU~P|+@~z_vQ36gAOo0e z$^5_25^J{9M#D8-c?Zbgxd}ik)(IV<{%XbkYu{nMov-uj@^z>=?Iu6 zvho{r9YOce-ZZH9H7X!*HP3yVj_qXw%Mq>Qq2GSKHzKUMN}T0R^3d-t$krERz&}#7bkGA7X`L^m&F0?h52IZsM!S268LDb_2y@~U@xV%8 z5_7dLq%ilCqA~b-1PJEt0f_zp=uhMQ8NB7#?k2}+yi|^Lz+{d+(>Y$GXmT6_B*$R@ zay%2@g&tFm(0mYNtl)ylx`RdeVkj@SjTQM`=n*<}`(eE6#I1(D2-Jx?y}Olnu%nqW zrrZ5~3GKc??S3%4k7+jcjWU>FqCKOqhKXUtzV1xmUtAWQKVUnfT{M15EM8JE_`3YM zZqd@~a;M}_EnG5JHm#{E!0%`VE$Cd82))lPewEYvQ-R>{&?aE}k-Tppp!Ywq@2~XV zpAD>z_cHlTuabLg<1@PM+L*oNXlj?(>~9eqv!(>_F5NyZrh6=2ttx&ssQdqP-uwuR z8t@#A#eH25Ry~3&I;@(qn+s>dkSu*?qYVSAQTrCJ!%K85k$tF`@aW)z>;n+Wc`fi?YqYUFv;L!Y z-5*_#dw?}d#BJo&?A?$%+cT_pb_`0z9Ql{#+J?2b=xUXC<9q{`g>9@Kpf&SEzi!K@ z8>Zcn46h~9gFANRjOM@?7GteAfQ?D}H<=LFrq93nA)D?$7K@bvGbFOfNx!*vPl zBZ!|Dm@A@KCw(r7JC1}-=sce3Da}_4Qd638UTD-k5vV_87ndw z?9-r+%|%CEsJCg*t$gW(nt7%hbWFNISMx#*8V~dvG=?Az8UgS^PlH_3AbasUa(Ggz zO+P@7$F|w_E6*f7%=v&cJ&uLDb%gz5Ws|p>gidHB&vf-K z{d0;QzvqRj{{Yah{x1kpeFxx$J_iT?xAge4rQuqwxz?_b1wl#EJY{R(#^hN1s=_b+SL)eAlUl`v%UxD`J{9d9zIQ7=9^hh7Y; zBt0W{n9pm_?xjT_qdQ(@rv1|*dn=Xlk^KrmH>0PJy_TRCSgi^>I4>N|>C)^W#&`yK z8a#gu8)&p+z;+2GuEsz)Y;0#Fhuib3ikBI?3w83(8(DH8T}wdZOy%Fe2|(NbS0YYP zVO_355}^hI^vU{sDdjfu!5n=sD)qsge84p5;s_rw$qBf1FJ_5A?~trw@Sqz+u3%)n z2D$X7d~6GPTwUF(D{<(X#4PS2P*rrbo2 zGccFp$qN#8=Uu-wUL z`E22Qnt+?jN(UpnFSMy?u;pOn{5yYFK}tsFQL_^YMcik9CL0gDe{h5w+m}&{QoiaI zGZ_wy$xH^NEn?U=lgVP{)y*8*jdp53$S7zFd{dcq?y3|#46?)lBTIY74(2d#DiMDH)`MDNEe(M(7+aZe589R@>00{nn=SY|l5 zgiXh#_2)WcI7q(?d*CS%rrJcaGuaw!RyGl5OgAEAeHm=li4AsAFdJNmGmNeI(XOq* zg`Ac(AtE{YW`b~IR9ir5M}c*TP@vghiTl(3X6I+!JKFhCpS!&`8#wz-`Xifkeky70 z4<_wqlh&t_mi=JT_e)F#Mx>JFE9pLek|4Q45}4qQu)~=cj0zFP||(=Qs)y8-o`!kPL|o~uuCC`uF~m@jq-(Ngm}oc zV!g}Alvv+B0x5cYcZu~c33`Fgc`~61XYwe)IV^^-=2W5%vHjBbhRvO)8v{I)RO_z4gST!_P`aF!fhaHC% zmc8<28dsVIoBpi$_+$iz?Cl-j`zYH7P0yG&LwjwDdKEf7Hrak!6`x#yYw`-DOcD1n zmE4Kcwt> zf=Jm_055c6o8~a9j3%c-5;l39BDJY8;8>O=Sg~ZdZ{CRcf6}Al5sKzyLx#JI=;Zzl zCxv6HR8EE#$DQ3%@1Q0DHz;RR@vM$Ido#bY<8?}e&;CqyO3X1XIntNn*6c}VIYygf z8K1^hkwPMU8n;P#hAAdvDT*HS6U!7|MN>@iK0&u9x;OkE62$)@PxzsKrl=>IGWE|I zxAmvcmPLm#=Bm+1KT%jW#)NFZh zrSli<0@pT{Wd@u7Ky1ojekq!p5_oQ0aKX^-zzZm*$Z`$aM%au_jJA&{!6# zWsh^oxcP2mqFaL&k2PAR4i5InZehE$C0|!ypxv6}t!#e`-xxh(Wp4+YHmlk+pe~lm zd&qwB9{IhzF;r?X_sn(TE_rSZN#@@EXfVI4mivPv4vrg9R3>qOi6NK>o8dCJAk>K# zH*E?w$=Ng<`ekKT+25PnGrG%YzcqZ?t5b6#r^Pa-aP?qor{IFj;DU{K8~Jd)O(pFlUg)J`}g?D565l@g<79(0K62F8skJ zJC-ny_CnqdqY(G(XYfj~fc$IYIf9k#H`RvmloN?}t&I=u)D|aEZ8LML1_G(tR3LaO zVA0LNrdGu#>%l>@y`JQ9ytUA454=J zL$|=e#N3{WnX8z+|Eip~&+)H-VnDm?kQ6&qgt_iK;(QY>F3}l5Z)Lc+LJZ+8Wma(8 zb_$5augbTLh{dP(s*X=6sE%K?RqGLFs~k?0RL3XdgKjtTi`(ubefR}2Np0k)37x8( zFOj3`D=5urr)y+2ZK~_byHuo@`aIrAE|Fh_#&bOgW1 z%HVOj9W;{_Pgc@anf80DNK|eb=QH;7_f&fZqvy;>-KDbAa~X)^+HjmPrpvIL+IZKt zW2v$Dx$PJNmahBg!wW!@PLbvVq3<6;K7Fkf7!z2{StfET6^>J)&TKUhWob0Xy(zq? z;aZLkzEC>u0~ya@S6jzAuu9r(SY#M(!?gV)9d40v=aaQj3*(At>pu%BonDnrMYeNE zuD9G+`-h!ZDx2TJ$a`J5>`hEB0c&QQ5_Z1k1%v9Kw{b}L9Ia!a$C0SyF}H5119D#& zH$Cmc!e!gA(A;KErfm$DeKGU-u*adZSHoCkMx0k^O#%NWp%$r=1s4a$wjx1hU>DUQ z8|N>B*RpnpRa;Kr7e}jtbB-cVwlX;9P+++DEgFf(mQl;ACb-3~ddsRNeBsb1xOj4B zEcn#!Fv{i$t*MY#TXWm3d!2i zx~JD>nkG4KS$U7!vS8y1$`Y&FGUu3c5mCpkZOleUZyFzEE;6O_b2{{jsTyzRlRUT( zdpNCr@|y~^^3ZH`QgC5Y;WB>cd_)6K(_#DtU7E>)$*zlZ^X7q#-(=S1u=HKm-ffY( zN_$|D={jB6Sc(*$z_hOnH9fU2if7+0oJ^t?WX6HGRC9LIoT#@PBDI|Y)5WrOdnq; zTVzE)wI`JkZ@XU?njP8~-_-1-NSTF><(Zzny!7kT>?MyEn!W6%H2pDM`+^{|mv#Vt zJ>D|2mo|CY^AF{hByV;%n?@p0h330l>~3m9wL*5o?c;-}(fg{=F}Il-^)ofPi;SJn z9X!)DIwf7B>AX;lt^)cssv$^?E&zC;C1B9#^!$r6QtQo&xNlX;yz`{SOKo51l+!%& z@j`#tY*BwO+4!@;EQ0Q-dxZK7WgggbnviZUu&;L3SHUbox=yT}>F-)xu59vNC7~1A zz%$*5pFNO5>jGY=5x)WYjrbKo8gT~z`#&(DNuiWa7c(QYdT8+v^FisKJFI6%(_J_y zU+HW#vZIO}FQz+m_0M&ZYd&vvz+0mOCvs*TV0gc+ok`&L#|-34$1x+CwjHSQ{byC^ zTxm^5|5=xY69aqNG2`mOXsq=gxX9%;${E}U$;CXC&Y_h~EE|8ZoUa|q39+#AS373B zra4*J`e4=Pl7nTh&+JuM_6=v0zX_MEU@URXgrKr)_l(2%L}^trv)j)E2Zo)mDxL2l ziCQm=uQ|y-lGMmP_fox|NX*q7IcoVU&Bs$fuw@+qPJ##LEF%yu+Yp?im2jl^1LkXC zXGggB^KkH~FT%xNW_`g)=i*h=H2Bmq27uBEg#Rt1#r{dusym*s_`Xu>y#Uw!w+OTK zJ@H`h0Ffe5g%>!Dlp=>ll=GF%x?~?`R_9X`F@`NKHvPdr(oE=w-sT3&>oVhbk@gO z-!IVcRn>L1%>D~9GpiD|dNSKz%IE$N$t|i6Kg}=paZgTqDHNvep$7V79x&=yL~A&z za2tEs9gB)A>@~vPN(PM*4V9BLdvD`JnmL^Fcm3aewktiYa3l^c2#EOxF4faoTu9M`Qc+YtxCgKbgCV?P8RJbGZOC; ziI2+GXtg{(Jl7(?AHG`S!;_;I`tJJ+t(>@!#m8jCUx$TrYEfavC$rsQR~1r{;qax>?jGl?$2L6o5H-N&e`SLL96Z)N7IaK< z2>m&W-KEd4q()WNSsTpQQ5ie;3hY$}P``~|q{pA0cKo?4kMZYhGH@Tc^;`A;It!(s z20RjANj)sLvN(scNX7#n;@*s9QUFYrtT(6C?o#pTLH0mpIaXQD>&=k%ABg%Hdc%1_ z6nHJ!q+#Q}fx;?x8Ud4<)>Coz?fcK4rD%qYYk~S>G`^G|!^TAbFSH!u==j<1RV#4D zmc}}3^o5@W369cx^@QiHQ_=sSdet4Ocl)cV-nU><)!P9~R`2e+Qq|k4XsWjns6VQ= zf*{p<5x}_)RSzca(ZGq2jJh0m`peXUzEh#ymIl6Hz%$xmoqkzv=S%;AA9rrF{8&Q? z{-|9|&~4Z*ejK5415utnKmJOXGyM1lk>BITEs9J8&N&wFzv9QfnegNIW8|Chzh|o> zcK!fcJ%0dfb-mcC^zePzYOW~SxI|BLUD5{k&I8hH_2vfAZRjbMZf63MbbC)~^An6t zu1L|)trt*#&@GQ3bjtyFp{pVOUTl@*DN}aogNCQrPut)grlS7?TW#->uHI>;dXM(E z)te7YR<9ymy||*O-VH#%de;!7dY1#Z{|6S>i>(Sd<@RsbYHxYx&!?=!LuxQ3-)tn_ zvxNq1xNH|M27#5+5Y! zZu$Zw4!WP97r2k7L1N6A!iaWj3tw8j21_NT+!!a}E-!nt%TAiVPjIZ%GTH*IPSl8;gr|96II@s*XC!Y!O~VK1y!OHjD75Ah8O z)~(g?>6`Ihi|(gUx*veA_9+bOk`=AzP{m;+JUr2@b0s@o((Op^qxDt{m7J!e44$yH z-f-%V-$h|(WY0*}HcWPJI7tCCQBY`^xA z)7;5O;r(C)oit%~5O;yys)nJtMp=k{EuJ_jZ{?wwu_v30xU zUDCN5RVN|gG~E0C#LlJknG$kY4bPaw+->S!wkvgd#H2?~-K83pSP;YN{+y463fazy z8wZiG@#E^5cH$Bkuxcq7&}rj|KkQ$@HiA!K^?nqP_mW^`GXD#RAdMVgJD&X|QjTSV zd!p28T`qN>`_c@icfBrSd#mOd+sXeTTVJksvlq+WDe)3{Qg^dG2=<8H)k;(>SVM`k zlYSpKcWW!AahS|)4!0KbS-Erm&Bbe-%W6Z#Tgsi=^Z~q?fh^tpw*(c^WNpj^Ux-5t8YZOD-7o{jGOwzHwsTLV>(1QlX)&S1AUb8Y?e7 ztMD%N`i#^($DlP z)Xf`_`=C7ShiV%WgvqY8`i`k}g9!VadFqH^^Cr^+GkzF1Vf&y?z5k1DI(C9))a}KyOgwvM(in*gGhW@f2J?GlAE?!gEDOAH<}TWtd?r- zVMi!kc{e)lo(K&Wv=txcT zO3v;us~0@W3GC9qSL_6HQMqKIigJaM4DfkoFfO3Q3uaOlAuyW zDI3)gJ{P|atWcjh{DRmsBW%MpMusVfKqovgM{H9x9&UNmfpQ@+5knT7^vsA+hDoszZWdkd=K6BdO}NmkYG%eCf7 z?uOMFYs4|j_u)*hq0Y4f8Ii1}BnnUTXkC;FXw}xc_cNc==A{KRwkjT8)b@AEhz>jP zzGGyPAuAzh=m**>iq0=F&pJuO(sV&<56(7_5@YTeA`!0v-(C?>z+J!Fj8Zj>+R<84 zQHy^|q?l(X8`s>>W5^n``0c$|mCLPhpM{c`!`cOm8;HMSnLusp-Jc_MZ#xPwLZH6DkW-Bw%X4T@F;f8L0E<x7K8ZSrc+W$p--b83$C=77zuS$1`gs`{G_}#yT+Mu) zI}=t1Z^9GhYI#BGm^hnv0a#;~O2X*VI)VN;Iohk{Pk0JhUlEWCQx*yBv0>69i1CzB z#X7nDejwhh`-&bik*UVdh9GJ`zN&u4qKs-TRrS8}oS=FoG2hfZ&tTcE5tgiEh_z(G zqN>I%oO?AhaaN{gBDCh4NH=*uNH^e{Z&pz z+x5(Pm0!UAxMSve(wLM4@V$LsYulVe0yys>+Lq~E_t0fm|ANWm#8}~8a_77X;b!fW zu$ARd1iC{5b;(MjfO1dF+b_+l?ZhxA3_QM**`e-Z29yzBE?ucRtnY#3OKv!jQ9tI} z|MEu%GRpZ|$6u1aRK6?7Z`KQ6qf%)*fj)96;;p|GqUz2Klbujevzt|zIUh8u8!>P& zY0s z7ylXH2YoM_pKh&6H(TiR%1C?giisVmK>9-vOHG-uYZ1l8I)*?nmL z*U7dYsSoj4D;7eF1UiZ)qGJ;SMoYZz?LZGyRiR84p9o4W?ZvOT`LWe~V5{JU#>4#D z#jra&?adr}#8jyI=P`##*XJh@Cse5ct5q_)cX27R_j&8adX<#umuVJ`Ci9??C3n!< zw=>#&LVrdQ5f)QZilp+yOraCqYcT7wodb5+=hbIN%65r<&4hj(f~@6U-kThW z(adSAoz5^jlvZm=9+SFPwWfBi)mWrd9f0J7LfN{SciO&hvUy#hDd{m-DZ7(f3hM+b zQ+A`;KYNu1M^n|GqwzlCkZ&=T!%WPFKFTqSujVEbVn2^+6ZYM?%v0FhQYKXWj==B$ zhOaSE$bdT?Vw2WV*RLw|k%9gGLG~olxPTW`pQrI+C&0vIj8kN<8AO0)`L+~MAJ2?;S@KJigyD?G($3!7 z_BG_8QS2@Kx1|6IlJMNiNl?$+210fhH>s0L=>Sjkzv4>>;2S);Za@Ongs0zfZ}c1x z{eHP*3Av_O+^bEPk|q3U7P5pN8mZ}LK&|UePJN#U!kgC#5F~6Rz<%jj1POT1nt=3> z8_RMcah#~Ow16u~mg(}gV_|(f=l9WgU;t28#RG>6c<*eaHSf0pHYTjHnmzR^y7}KS zU0>zUZO!*=cI8ceM~qkF+sEp#$o0phUWgH}6DcpOYpXtgOnv5M9Y7SJLeyEy;wN&M zYX1ILISa)=L=%7THVkqVN#Q+^hj}UTT=@tow)11nf6aMOii${LMo>gxJ_+T{y1Fyr z!G&Gc;_kx>tU2!!4b2aSj3uzAvjMRx%w(613cj>CW{QmpKh9>svxzU+}?YG7!8ks=9C;VG|_E+0yalv07p>MENh#n3Ckn=U;OW$4?3V7K49+9#!3 z6h#oQknFN0^IGh+4$%*T;=Ayp5tWhpRo%)HW8rYnjjry2x*2BB;meS-mt(-u#K4Cz z)TF2Le@n<|*Ipkuy&`k^VV<2_4t8=MMUqX)jaZ&NA4Y+{2?#lMcKvZFj8P{pB2k^#O6YZ>Qb3oX746qWiZ^L2^bYD5!F-C( z&pa!U-A#JrCX(gF&?{oEP{w;0n;0GfXRCV|lyroOkwkW0RX9F0)c&2QIOSy-7GHri z`C{ZI^ZDByDCpliCD__q`U1K_`7 zIVr`kzLk=kW81T5tC5>#9}sz|&eb#t%^x{@{)HpjSw6w1_{gBmRGH}Rp3O;#^b8t? zqajSVFJCMI@)GctwK{=T!W03S%%vti0r{JfML_;Q1Ojp|0R-fBf@J=lB@mFMIwI26 zw;Qm0=iq1#O%Xz>#RpF)E#*_h={D;O=N_`+htyEahV5k6)Sal7dxO?7*}aq~qpebP zG$uFZ9FV+fVwsBVn^){Dp|GP zL4?}l1k^rA!0-a(FZE>$0V-Ar1QwcXK;Sa0L#zE_;((05JBvV>(=0}V^p_6_V9^zBa zpiM(0oJ!_Te3E|WaClRGnn(BxQFq5Sc!YtE5={mk;n(O5b_YiEOHJ{=@VPg|2W}AO zxKp@YQSOX5mMn%nfdBIqkpcwVkO2f@A{~Bhrr$wJEhr_%mE8vSuQTMum|H5@myVoi zjyFB?gplGG1DX>!nlLpi&DVnzo_m*GO2(zg0qSK`J||#QJ|alwYh1lq+Fm1Aj_Pt8 zR-$GJjAKr$rpp^0 zA0BK!F9OS7jQiG``rC*s5mU2f93Mpy5p~wt^-1cEI=v%_;gQr5fn3vYD`VorL+#%O zDx{q$PNl@iQ#T4br5gM(@P)IvE7Tlx<6#EZ&F{vP_yBiCoR#QP#AeQY_}Cs92thZ7 zy2Y|tG_ylRGXi)u(UmqFF|EJJYyOc`)SJ_P+nFn-8Gd>e>-HL1lo>^CLCoPq#WE4l zWd2=$)A|Z?S#zvb0&dSkE+bm-emj%SOeAOjz9gECjJmdZ$}KP6QC_^;p44Jz?F_9B zXTvFQdaoHgk4$>SPOi1HUU`9R?2BKi{=Vcp*5Yo3O~Y}d!b{BG%U-GZm?XsgSztp8 zwZI7Eu=o!x+ci((*O&82x(B@bK(;Fy_K9PPX^6hInpKnk6xp2|HlF6mU zY4)Z4S);vstb&})w2phWu!OMOIC~cm`8*?<)QEUd& zYknnDZso0Y=S4)H+tfR5mZ6 zy5}DRo{X9wgjUa0EHjcgyTh_=`x^)w55@dD>cmQ%@IOMu&0MPx-r+p=EO0^{hew<* zrBU_#+b4EVg8P#q#x8N^9^!WVP9@nbjV^&qNg)%A#7-VKk`XxVG~>-Z9E?t)ztEz{ z)l)PwdyleC|AA(0!Yg#Wkyx1zZO0T^E-J$YU^&v8U)hXUKb-ORMLd$tL(S)Yu4w!@ zRpulFa9M1Jl_;iY^brxxQq&(wobE5%Yn88L3>Yc5yx6z?9kL5IZH)`{F~@M?jej7f zBM>0>80Uc0vw*`2f1vKn4yl{1li2s6vH)%YF}+X?4^Wu;5&RD8Kk#y&xe z{SErf8?$jAXEkUbb)4i@=^f1BQ*{=;T9Yguq2=P$7MS+mt^yN%o+Cd6H|`(^p{I@7 z9IY#l56|R8QZ$p*x###ygihu?*_VdU-5?k6at&LWJOCs|hx~I!Jwv=$9l?0|zP3{+ zAMtxtOLk^KhB@k2bu>k_9YMMQurHC+`47gUo8akG4;yC#33Yts)!`St*_=Q%tpa7N zTh|EkXZf0;ZQz$;%AVFlx9T6(gV$-pr=)vrs@zF$bXVV?9;?3SpiU?+yMj~137tkv zb;WY^oq^u;Usb0rQFnych%fUxBD%DjIYv^|lNc_Y{C?#iTyoViuN}Nl|1PC)uR+!= zQCJ(FI5AeixRENc;}{-)?kQRzE8EraMhW=h&ppd82qq@iAXuu);*Fa~;z=lP#~ZhK z`IP9PXd;ZD1d$#XxwD+YhlZhGi3w8-Rvxjb3s(LN=~1#pa~ya|RJ%u{plW3^m`ATs zYSb~atMiQXW?I|*uxj5n@`uw*jRulUg8+FvhnDJCs6og~^E!=a=*+5ygt~G3vZ-pw zimG_18)c?D?dF%-He()b%E5SyaxcmA%JSy-v=dVuljyh9TGD4))sVAPR%_J|;_ZJR-AFTa?q{NYDn)}Y2gB0<0y0&D;lx7HL8K zfz0aj5aG*s+I%-z-%YXdMm#_cJ3iwcyCuBPApH`{-9J4$V}AG%%N4U%9Ex5mv`%1bL8xK-vkOISVc^4y6Y|-uHFZap5Ud@y0qqRTK^yR7>l> zJZhGR-QylQL1yPO>WqxuT4Lc?W2td?K6sOHqYqbl(*2&+(|FSle}UmhcKtgRdeHgC z!+Chmx3+Q_SGUrJ6$I)I2H+n47OoCI!t;bjnL*e58C?2;#rLmOEn(cdwS+Z62+ZYC zrnUF$nO4&Eom#05$EDqf^Sml9^Q-X4%-|llLHU+eTy@|7(2D;Bgx8=6y{3u3*3ZhV zyx1lyucnDec#)NN%|a{h$^}*)=3Z7FS`oPwtVkPoR>+v7(mUqXH(bYfFW_$>e~b8Q zqE9yIq(TYo<8cNXt>f+#S+I_~U-aKP?l~#$tmFQ>zyyCOA%MMV44TKMg%v>5H&LnQ zB^0EyaPYtDq6vR`N}Pvj1^c!V_ph^9CSy(9Ww*1qRp%Mw;Yw!$HwsUCOIHi;EZ&4x zr6fLKa?JTziL>9I5Mg#OTV`Hf?tE6YAf>?#Ij?GyJn(vkORfy(-viJHfvN`>&FfCv zYR;|9f!B%ml;*o}KJc~w!&)-Bws=o@)^3gshmtL9Zf5^Q}GW5*c` zSw+)06wG`z%QRC$NAw~>~)N_)%~X;+^MU*F~hpEIh@EoBeZpg z^Xk@jBhDK42bW|Tu0+lqXjXrE2ZnrudpuWUEod7_s#n4=1l4bu%sDBL1V4@7n00Qhk8CuzYYWrNGUA-~+?6*_`sQU`*_=25N zrxOZYd+1@B#K}Wd2VX zxNmmQ=ZpWN4pRRVr159=M|D&{7rp#Sx{GRc zl5@&Mved-r7*e+JvTQ& zi|pe6nYrm~LuL3Q+wt_W-qd*Eb*6@{S9G6lGZJ1^eby;*9%#0`G)nNqW&>wHL`7I^>f{&+VO z>Jx0vId1_cAq60WLp&UQBJbBQIJxtay;2mShN%0<3W)-y5={oC@M|Ukc7Iw|wWu&a zbt9ezSv)(FPs!5dl#dQ-eqp}8i%ihqAxVy!C7rk7!a+|wYBgC6M^g$~+fMEiN;|A^ zhl^tisyYV6mI+7DxAq0Aq8`ye)X-s-TPh>nEY`6diR|y&EL_&(??jwcWH^(ZVq>E! zqsUSfsp1%RSfi8?BGy=T!qa29+w&xsv+igXH!vku_C?K&xWJlTzo$pdb+Pzq-+g>= z&MWZ8$^3QyB@%EdX0ohYfs+YSBw%x8THbwxl5uB#2qDuyi-76>6~>?FZ5S1hsO?Vz z_1~XZv{Yq#H~clDo7$oLw8|&rAmo$~$}P3mOk&%e@^L3xKG?vSpi03RCrrV4TnC&} zl?6%ABL>u8F_;=uUU<-L=2KDdoJNR1={f;-gL~y`pc1fGc=oZWtNwDNJh)*im!QT^5D@1fYSgeeqT zeMx#MSV{)PvIs%3J^O?Nxt|gw^Ur4hH5DFJ)&G7fWK-$Cp9*GYI+@^WJe;`vn+65^|MZ|3wtX|2nkt*p;`4Pv*N`SnoMOsQQ}f ztF=N`iZ!V9Sc6LDR*6|?i?sIcpwcK?B$PZGaC&b@ldi(K6UU+=kS#Z#H zI>^drf1xXV<1Xy+E?Vi9+}W`Xrn)Gog9;_~(vz-uuJ($7(-)#-h;nu=yUdn(6W*4j&@ZsXr9dvlvTn3Fk~_y#Q)>GbPq_ePVs5{=qOCz|us zD5t#lS9)dlZf!e;UWkE`Mar<*wHEiwkvF5}9XWi2nykEc(r$iN4@1_p?V;I2);OEn zJ|QBC##h^Mjb38Sz0agvtsdE=3Dw_z?!DFX@7wwFXKxido` zk6)Pl5{xQ6f1wFF(zj9N5C~|6)fS8PB67KZybR1u z>4iJJ)IQUVdoMkYBI%c4w|gybB2M?HBmFY-O;#RQ=FI0S@1P^rQvRKoBtN2LV{)Tn zzuTlkq!;ebcYXl2!ykj#N?FKcrP^$obxP{V^gtm`zKd-$Yc{D|7UO3hHM2&&D=#NqoozE|y6CTGf26Y5F8pzA zdKA=Zb7XO+xd7&)+kuCT(|AV1-&KZ)y46dyaXN!&GBBLq(9>K>cy-}1Xeo0tYNl%< z8yHf4d-`pJlw3~P?%FqD_3;y}f^)zA!lS`^2~#xq-t*I2r+1Y+2tyStIJOXn4!00M zhnonv|MU*%u;+c{x6pgvguzaE0iPI?nV`d&4|bhyW)a=#XPKPJQtKHge@|H^<1agv z#lI*6|DBlRNmiq9p+cRrQIX#HIQKDcSd1Z!mUU=hWe=`r*kdXs(yH}&B1dyVb^?h*s!ta^ zIaM^QtXH&BRP?8JvXkjtY{55*W#g?jvBp}BQlqOLA9T$8i0e#>5B3_GLPxh zDCf4R!Izt@G`=y_)&h45CwJs*7tr~p=I=(TX7-1ivU8s(6{^{EUf@g+CZ%C43!Kc8 z)i4n-G#%JYD)%3TC^zCJIw0M41KVUDw%TZ7_?SppvibxA(|%ClTVQ$vBP|~$@}^;e z6BTEZIJ?9>{xsEO{L)$tThuGgd6o9yt&nqU+Rw9Xr<*#5g;;OB>u$?00Scw>+sU(o-n+b17S7w0xYrKWVm~g=ts=6#!qm0V9mbSx zzc}6YQumQl1Vj!xKSTHotVQnaL({2t!b4A~2RVyzhoNBVrG*qJO6H|-uG_tua}$P! z%;%N+uOAqSB2<4b6}8=MigNz_HqFRieCo}}pAn{JWRLO8NY6sILdi0y8byTZSW3Wj zJeh!dT^D$4k(^N+qr>n2ihD@oDQ_jTIh3~Ld z7+l3y@@k(-g)Pi(I4=jsgQ2PJ_?c?%_?c?%`02*IIcA!NKS99(+ggK;SZi2cj-Gwu z?xXKLG7#R#ySMrKAWZ~f8{OB#)-)s{PPhN z+dw~|ziI*+$rRhxD>)IwQE`(sM~62e@i4menTU)U*6GnXkAuLQrdiMEhM47ODl*Ld z!;$(MWC9cu*Kml9Nq{{9apWwA8IX4;CqP9MWT; z{MQTh`U<@k9nq6WhXz8}=BL2ZOp%nqFbLXPN=*}_$_m*HoI}bv5|~U7$(1H^?g==3-X-7NtRCYzDv+EMe^k`d&V6Y3x2Zx~U?-zfyi{CgNUS+kA}XqHub_fuV~ z5m`uX_qr|>Px30hC9aBhwR;tRMwqJj_OsI!Z&xxEuOrkyDsCc5#mfki`GZt3`{pgE zY0aAcYci#J!(8QrP!xQ>?M1qzjdyPN@148k)d2?Yeau;-r)2(T^99+(RG==INSK0b zPb>{tNXbBUGNJwf*|9`{Y!Cr@9r{&WQVL||j6!7hKW(N$`hqWJ#YT6qm{w`UTD%Ls zKFEfHVK+2IzW5t&r`%LyLd*P1zE-WGDIfF3y>V;jru6C3lml|@lg$62Y09rZ^qP_& zOf}`ij;2gkGEJ!>)IXYX5mA~lo&fn5BjsNv_H%g6e?NCBJI|fT#$rv|J6jVd^M5^o z*gKzkFmyVe^3ZG%38DVc?f)Q3x4%h{%pb+r34?ZtvL>>R)Doorq%TBKpd{%_)x4h2My-y5d}_CY zsM~!!ml{Di^dMrlhtM*7l2SOVmbyQ!m$@!&b*30HsEK+HyL>HPshiY>4N1nF=4dfu z0&eunz)~v<`JcH?t?z$uothPYh%!5RSJQG&Gri0C;|%w&oA$vV`jY8$PEeoH)%_QC z?&{+Y@7&b`^pwmGH>#^2|G?|&#|TqhJ@%}0SKpyzFuZ|K|LE!)h|<+l36lBS7%z3T z!@F2K#tR>kmSP{0TOa5Q$-@4fA-P>o+ym7hNRFXEk&UAXQ;;-A(vb90GLY>1XBv`E zhysaAz`fprBz*vnfymJ4vl74gUuGrNGW$*5Q8FPTKNf!A&r|-VW`9&iYf@r`^vxv6 z{O<`)lp!}!p}(=6Ow|3aKS@}8rN9VG;@4kgEy5tz(s-WmDy-vJo6Ix|n)}wK@a7OD zvPx-R5AVPRqtcsMMPD+rr2gvRo2;u$TDhQJU9}H8Pj%HU!c_~PHXFgl2T(YkBnD{nb9c=AQ<8zj%D z2szu3WMR9#Z~l05zgGO@)Mi@!jRg9s!a8M!*S5uLYnPD_Qj~Md#X08kXOR*-FU?%A zAz3pRybqGXJ)0E-lCuO#h+x5LHNHeK<<399ZS!YXVadA_L(Rt2+x#5>U&M*n9o)Ef z{BO6Xx#7+mQnR6&=pKE%+G-f`zcSDr4EaKytcDl6sVzUpx8o%kalL6ZJVgRV_XQYc z2if2RIU;!)co+w7tq)3ez|1GZ68siq4drCL)Pe3oFxleW3bG+ z#hm=I=H0EL=|r}cP&WS1PNJKK_;R)fdFk9aEQo$?Dbs2N+oT;g^oX>qk|+}eWoR($ zs$bqMSMk2>Imni;8#UriYc$JiIRenx=CLaTV+{n_6B~;bw&02g0*Awk$^7htAUHw! zyQ)aLKroQ^^Z=bxf=I|Vy8)WO1vzXl&P4SI;XD(=UTetXM^?KtR*>)YRTCN7zBlwwRjo!pH99`S;v_=K!OZCG!DH>yh(6pWI zG|f46cGnL54WFP!l*YGNg?k+;5q2x}TDLUtzAd6!{XUj;emLcId>V-DhkkF%w3 zOoF8Zqpn{6>y)~B|NErXDbrP6rVo{1zMAMIyru;6)!pf@UQB;A#LM(l`l}y!3BON& z_0#lMx23+43VmPS&RT(#{!nUxDNwSPp7tpj%tB0OhCXvf41Jr8R|}wOx7C|-*IEar z?MSZ!l|8ug{P1GhzambP3QLv1FM?nFC ze<9dg27&u>N z6r%=zwLG1udkweZa{s034V=oa-%G|_EJ}zCm=+8;c{HN=;17}?RdI$dbIQvFh3ghxhj>6W|?eUb1K)R6pC zKHfxr#HgSofCe`4U_g6U zjq(kM=Do}VUHhayn7z+~@UNr+UKC|ov2N)8e+3VRV*>9jz&piinC;bY4mCvaem7?{ z&(Y$w9MxjZ)!NuwO)Qc0>_`Kgc-=H`S^9ML*B(^wfg85-CV)kXG4X1N7QemVtVF-> z3BdlNtR?-jwfQQ~>u-FF>eJuOr#TW`&otw=E!VAo`xofvw31f}rQDu(}u?RXAj z==%u>2MpdPJn+ImM1CO$Q_pp)7n;&-;))V$4^_nPe}sPB+)z7+4QKNen08UcPq!OsT{oQv0NL+ zwz^bO>LV|`{TItW=XLvKfbLZjj(1=FnUc;)*U^KQprD^VZ23;TKJl4rCGz}O@ne^_ zD>!0#7UgOPS zd=qd(!@55XGJ5>DdJz92MxMCOTrPC#72Ck$_;C?TLJ^o4L1Ov*BK%)p?-A{LuqXA2 zb|X=D>3t&FbBJ;dmfx;K8)Cx^N0<=#Vyn|fvb^~g`p_XV|BoWTg@XE9Wr~$u86vFp3LmYPkF|mpwetWW~;lvH+4^1X13sO^w5vhL!+BI_t3T+ z(?eKW1Nkq2L6HB9Fa>#!4#+E%EXYR@0rFA;AU~M^{SSTYi^zHayub;HGO#yfZzjyi zM1UpXB+tl?8w&3v?u(Ec3h(A2B}HCH{M%6orm90MZG>RKPI$ydl?}P4GM5(=$-VfM zP^d{oc0r|$sB5$_x8e!Wmgp)-cii)yc*%VHnUNzQeO;F5+^luoO|!n3 zs%BjZfNIu-gsEojJ1yO;QA$>`eoTaB6%){`LV{%eud0O%g2C*8qlu>qRj&s19zywp z_EIE%W0}8H(}~mkw}V728HbUjJuE++hvHmnYA$$=&jI&D7hUO4bFSKL{#SRF?_K)l z(vEM~I(|Ow&}9Btlk@|MN#>6u9*GYy4lL?Oad#QX6>Wd+md;a`agi`PSBE1Fqwq3_T?eBbk8XpVXlf%|67DpPvrEDWIK`(1@8`ss?&LBL+Sq<`4 z7kW;jr3Hu>3M7#UHhhd}1L=|aT2B{=fTyV@fRS+fwYrL}){`H|){Ucj&P(Oi=+=-m zX1z6U6<>+fE=7Dnz+$-3oa)y@0rv>eL5BQuUPiJQNhu;uvqT3?D$BeJHSZl19=zVV zK+*9f9>)^*for<@+R3|w|_2o*6`(|S8@Pvt+x~3(2cz^iD#4OZPuUl zQ-&+WY|i)LF6*H2&{hztv@et&)~)&_!P{!n@!%#x{D zP$p#^H_Ll4oBeACyeInb-pR-Qw3$QHefl?ocb19_%;48In1s{gxc@>K8=IX|^+2@c z1+lEwSXt}Ti7{@GkCin~JxM2?IF9~aG~Q##_GEtRHEP%?6yxNV@Jqv@I=nQ--EayG z`?-dx)H{=4)Xq7N(${7rpI^E!+5Y^&@7+J~-i+n%^!|7C>X=vQOij5KLce34B}&IU zO~Co5YrnN)f)ma&))b~oti@O5nbZ5ndj~?8B-MvThh3F39mzDOqT@xjb8>qYhu`Ui z-!eoo9c$;Ho|Yxg&OEiF{5vqnektk%nQv_I5>x1#oz|Q`u!`ZH>JUx9hP%iW;TZay zk)%eP^RX7#XU((pz_xMDdwN~GIpTS?V8g@C#cFtnRCZoW4fcj&5PYivYu;=gbYDU7 zTQUQlq3lPIzRD_+4pH+#G|2TOp^G^pX7&Z`y-c8;mbM_AYr%GxiFM*5 zhOuA1T{CgVVk`?*wm*@2na4}dyV3I3^uCGrp8KKOQdvHJSXs>0;uU4NUs=o=dX-OF zR7;D=C3?49r|I46Qw6n+>Xhrjj$AvG>zs71ft_;Q)RF5Yrr1vilKD%2 zVWwDHrFdKL0>l69HRG87SDE=w9u@o}!c@TpKK|sNK>8E7=Whxks zUre*l@xK>ESm`Y&QETK*9mxLrSMUz3gU)J?<8OYSXGx^k^m!JSYJ8g8f9Stye;?ES zr@ZzXFdb?%?I%ou`9p!}t?W~j44J7U)IUI-Kon5VCP?N#MJM_!{so|m>Ms$FlzI%u z|1?IaAd^*sd1`~x&yEcPFvJRZoBiFZ*RgJm5yGzci%EvJgP*6e4co*Spw6v%1L z`bMfgg9$`_Fsp&_j`Tc8V~*t34QqA zCJOx92$K0{2!4+ayvO@!qMCi4tnS4{6F-QB9tS=h#6s^+v(O*D;jz#NqurU@bmFtn zM|OMz3%y?~bdE{-0Tm|m`w)-BF99Sqa&fNT$QiRuBUe|cktM*YMxH>JYUE4B=|<)& znL!E=`iIYV|N1ojVAmAH?|Oag9Z>y>Kq z2)=Rh%lPf!W?nnqG+B%G4|t&_A4;fy%%&bh8P^N~{NI5b;H56H2Goo4Xc<=i?{#8o zzF`l!6qUA&oNOns^FE3a`3;vyLyMUSirJ|1j_2i9I$P9Tug4rznAj{#$r+015whLp>x|4et*@99{UX54(_72{^)h^>K|HutEGA4Kp#pb1U@w)1Z zBJuP0MB)~1NTq!!jnuawliky6+DO$e)6}xn(~f1MFx5^B7-^TiHtldvUSCyT#sMuC zC_KmLuTA~fZdpl}CIQl#GmZ$n5T5c8N*RqaF2ZLlz^~Cg@HyjTx{VWdOt#+u(=^(r zbRO+f+~t0Nomb5Be)>PXuLt*hLpm&PGa1t zP=aM0r3z~l>Wvp$>M%0=sO}XtHZX8;#Yn!2` z;@7#MtIgw4hI7qsO1tpT~#q>u{}P8N}~C*ebO z$QBUHY8Y<%j$NbJJt3Of+$*l0C;%K8>sIvkKf&z!-pwV1UI`v>nA zISuhE%ldf+{?f>{^bRtzEj*i`@aHk_>Z<@D6p!2 z@9{Omv>KFhMf_?*8X#CO<~cDrS)Z^_&d9SeBHTVEz>h4wX{9ql~i4GZ2B_G;rDR1(KLVgK6;NEM}*dN z;k%}gj`|9|yY+=+Z66R?rjUA885dMupJQXh4^-3^*Yk^b%@utSY!0*mprNyt4?95nUcQ63K2}V{?e?uOq(PlOLfqKmDm75cCjKd2~6X{S!RJ)Ti zwK)ljf3?LHR#d^coo0?|tol?89Y-~uA(B3-aciwua4gVw)x4tas?Z>TNZ}ki&I<)W zO0LO+2=@o>0vqLHeRB^^^Jdv)GYUANgHuTDEzw=E=MLw5Za?PjGwWBCs42m7fxkfd zXXiEd+U`Q|-pi+`5%sGIE^5mKfR4rz6YTUo_HJ7BbHwx%7zdEf^K{!8nTH!y z-&|_ASqwJ*G_8LwN$a0W+(oM)h@JJ%G>&z$8t#Y6FGJ{}s3&|W^^!XHtpd;9j{EOW zO>s6u2N^xlTf`|HvOVIk0C%z3Itw7QA{BIAkqX>m0nv&7K<^zx?aF?y$i0?jMf!vi zM0wS@k$Cse>d{;WQUONTOCz@z>OY5d0v|L<34X4U@wdQ(;q=X^W_mJXnflW`;(Cqc zPQ41+iCQ|*oO|LHg^licr<+qV;|knA_ra-L)XBYrQn&>{w@@;6wLLz!t>c?@0;rkO zp!M%$+P#~V#C_5en-HEP1i|^Z-So4r`DWHuvab2uy0qCkDR3;=ryt{vW4xn@(Rs94 z4K8(OvbT_f4meN@7IG~$#gRFaGcqTVG)K#FTZnQ@DL%Bh{ZKT|ezZ??Sb1{}EL^<0nL>W7PUTbr=49IZaAIM;hknLK z3s{iIO#sb&lmE>}L%=o_X1~qCx?- zZ9Xcb&TjdF>mu7(?_Q_zO<_YHJKnu~epOD}iKN2D<=9Jyc>l5@!^UvLr9|lrF-Bom z6b6@72nD_`ih?z|P(U7y6Ly|^QR!Dn2AK=mV5)x>v|Gm5KJ)Uk5@s}9WV4Xqf5L!R z>3=@_;f(2pYS02k+ng90!&H$e9%(w)kivlU(A&u{==%L=d<^4-#U4TyCtQ*m@mPNi zcdX3P_zv$FYN8!OeRDIk>I{vvt&zUZJDpYUJK{mobs3^AhPvNmhWeSxG}X~~Luw#l zMxq#X&Y@Qr>vQ_Y2E#pt;5pzP_nhTiyai$3wwj|D7uC?hL=_roQ4<}RD480_gy*;5 zSi(yieE~{0f6ZO!DOH_fTf7+LG5Ke-7c$DH93DF zG1|(6`=w&EKMM}v;?4hY!)@mT*uDRQ2LE#6|5by{dHMntjU2m014dI%kx{`Ibp-Uq zXmoLZbGGd^@6i%@cBz}g1R;{=-u5ADrCF>b|F}+*Yd0%w&3Y|yLG^BlmW|ZiI_hH; zYqsjtc>$xbSE@j?TVqSy5Ty~xb4wMGvu_s7US=(J1Ez2-PiAFGjn;RRhZp3rw%gah zJGRuFsAW|Jzm3hJ}^XD2k-St-~di0WWeQfZD%6;P30CvENh;|Y{1>aRwhTfUoUGr!26EAP%Onyb#h2=5ERz_SW|ziCF(wd=fUB{f=~Z-^1PZ@k>hW#3;gW zNyK5jftdu`%f7Z6E;36^wU!!oiMrM$aekir6OLv^28TIg32WHABRiV4Co=4$nm&>E znUVOcoHnBskyIs35IlSG`dLL>hdd6Z02+t{W*k^J#*?c2TOxSjctaCv*zsp!<5zvO zE&WUU6x%r)7M0cpdE(w*4B+D&<1}jtU}&+A3>#E^S;}7UqD~O`H2HrA(3*2SB6T^% z*F4+_8avMZpKHRjl#T5K4IRqOQeRq)%g{vVnu-$f&Mb>CYFa~BNUj#q_Ee|Z!-!F> z3|)e~8Pnr`Xl?t0EN>Hj%JTNc1?lBY_d(emsPhfHvA`RVgbH)26{OJuR>@VnS1<0B zkwzY^kCP4dcm#9ndsKBHISlqJyNgrc&WO{^^C-?I2hLYCK5(Lf4m8rP-=m|3jSW{*(U|PBr*Ab&>X0|7ZAg!#N&4P5ghx zrzHpBlkA5~r?_S^zCaZ&kUJ9sG#3V}d% zs!@MvF)eY&zh@THJa;S+f4{lGJ?iILjm#R#e$!t=5$7J_=U~IG1u%4J+ZR`PIGkQj zOSC?)B`m<*_b@wt1M}*JJdf=yP^s)!*{j-d=3Qbe99=q(-7=~~94}$17NZup(z-Ks zLVkH3kM6$T7}-C&L2Z4zFiVhWZ@sA-G4}~%vB+3)o1hCY(5A!qC-d)wD9Q7cv(>%* zD=zC3>F-M4bzeaGi=1TsB=bdZGFfr2@0Cy6R3@TM6-%Zw$P{H=bZl6Z-sN~Fa^_D4 z_sg9H!o5)O+H%LfIh3`A4k-^MDl*H>u8++_5NPy(Z#i=fIJbm5b>fCTcQw^fwOwnb#D960!+^^<%Wzp_$$z4tEt$J-2 zp=PJBWd15I_t)mpyCbtFUXfFPUJ~~JHM~_tXF&ZWJq0g}TpHepwK#C!)5ra5YGTz+mp=yvG;zn>%a5>s>Q)^3ZR$fdjLu9?7r}Jr=Ifw0v?w2{M-Ia z^}an%vZ<<>6tTNWqbkU~6oRC2WbansKDpPRcc-fZO$DaC z>5o9&dVMr3aNl3qnp0eEyBZFod#H!pm1?A&Jmz9$Q+xe>O9ckr`Wx}Na}wt5CsGQ` zLf{ZWILJVEg$BEwKrcUDB#Xj=_AK4zEl7=lWC2tx6K%eG!ZlsNHuR)pzvs0VD=dHF zy;z|p?-Wn!%wIsC0=jL5p?VwK*&DfF0TD7&OgC}6p_hc))_fUE+0LzEG=$(mJN~5k zGX6W0MB4-Iot(bWHtT}w&h9(+d~NQN>`K3WZ%S#^*tKn>M^Xdlhfd(AM|*W*anttm;9`!X-rF1LJqZX(_@f4|QBo!tFx!sk0pJ?A`5lGs**JEqHvUoyAp-xuYM` zyk_!7oGos=ygQ|hG9dj$J+sZ%aH}8PbyrKneJ9am;5YmVqeWDL67YZ@#^26)cH-*7 zHE5R2Icv`U&LpGJEdP)b9nLww|&i&z+BM8Kh-MboI34( z)sb5AA#e{xwKhW{>=G+)_8W8rj@%!&mc&0IS{0AIfaYKx+S|toZ(AL{ThcXD41L4v z_}k83JAeE6<3^zHk@4{0mT*fYpV~NbON6OuUWlzPd#%bEw+_(PTQ8t?EN*UWnt9X^@fIa2KV5XzrgHd)tk&|Z=JI&>f%t^q zm&YeOTpo`-7D}!NC9z3+p(%uGoyHG<&U#@5XI>k72wvh`J!CZ=<2^L%hMY!e*|^+e zjotiC9^neS5-QpeBe6+|@z~1;_l+bC$^8Cfg>bRUM1-YzH@RXDgch+G^gD5<$@s}x zsf@L!N1U@6HwLV)H+D9I35Q9|@vT{(^|Bt5&f1O`|J$?v+RM7{ zv{b20-zQ+wzMdi+@Y%YPR9aQd&%_%QiJM%DSR3Fe~<{?~~l3YXY z6}Y!@_KLaOiRVnMACa6+?C~HKKP*uCM+g#&N$%Yb($*HPX_( z+Ke#hywLn#_D4TC^KkwKB=bj%U{nzbom~)~KeL3tAb%AG*x0a{s23ij zLOm9wcsEZ0>oDyegfer03bTP_G5~NN)J$=<>3%6avzWWPo&!>H6fqaYlz~#~+@p5} zczG>*v&;J1jUQJZW;^TM==*FBz0cOW$%}M)0T)~Kdz00@Rde@b{+2T|d0W!wLVTIN z$i27N!nfSj0#5%_ee|px0fo3I!5kHNYbNPOD*X?n3v-3vuC{|%MvMJa7t+VZzlQIn zZD}6&Ex7fM(W>GdFj5!PG7?%maDK%biQgFd3Z}F4SQ8~f<$0p9vqC8pcD^vy_Euld z5v{^jDnwbN%7WojlI8Ou(B*OO{>vdv6ugZLRfOJh;yJ_CJ<6hAy3~3Pp`b=7{)fUH zN_2XL^K)Lz%PAwDv08>Tcw!x!twhl zVD}UqlTzVA_1e^h&<%wPl?0dy0kglm{Cgmg3Z(u6;Et0!+0^a?t{_a=@nvVj*La8I zVoK)Z%P~PBFg2$Vz?1xl0RArsvbPUpTqUoV=nL!^M$8h70PkvKXPx`eGZGIsFyZAt zE;ZVb*=JyZQm^WQrR#`;L1Ep<}aiu+qtE%6l|3()up^>`;^S)#K*rI z;iC)?)erfVv+usUe#GSIy)pvR$J>pkUQ4WcEK}l_L~I;%lm)IJR>Ka{a||amtD~YS z05+Q+()Fw{gHYvHy^CP!stQ0}gDZ9qy50Ka0jg3QFk;!nHU4I~1eUwx_;O(wq-~(Gbt{1-PbfR?Dc9J+P zp&?<=zkUGhY@LQLZk3(7c;&S5kh8>ng(3`?H6pN@7R0PEZMCs-`TCwzn=$`e)>EcD zQYrisYxyXsyCEl|dgSU5WFE6TWM%b916@+kP4uC|eDF<0fo_C>uDcK2WrFT_54wvb ziPt2qmmnp*Cf3Xx^XvC#R_BD4^`;Y= z@SH=~J+!PZ&v4JDQucDxQ`}j!Pv}&k~Z% ze^gJrAqQvYSZ^f=C)Ky_3y^s@H;p8b;CC6-VOF*HeWdh=w`3lLG>`3oh5F{&-xTC%sE#GRBEJzdzpfR=~)nVfC8<`y&+VF zky-L<%^NfGfe6eNaZ!MVZ-e27bEE5Ia*ESpMIK~)tp z5HNr9I{3dJCIO(tr*eELf@SWa@eY8BGt_xLo2qaRA!w8NGfK4peehc?e^%pM(pc`Z zLh&9jX~A7BnKf7P+pXq&ezU7{TC&DwWHhyp<0+?R6s~#U4##VrpPeT|5yp=AXv6H4 z2IpT5B7J?)f#e9?03Xk??rhRt7-rKuawi6AJla%+BfM|JqN6o%pi!O30nLb*@2FcO*-+yY7@u65YRi{z_+Y4hIAAy}P3K@r z*~fLbm`pPDJ~d0ZM$N-CADeWGqvmBT+j2`fx(ab?h8>v_$7tq|T`RJzMje;lwIaJZ zx21;=X;Eg|Jd=`B-K(XChY9VqU=Zu|FN<*eT@Wp6ntIWE_W9A`CSjTFyj;#Bo-fwG zK^0`Wm%I+6*27>}HTQaqMo5@l7ir$nEygATW0WJ%?&joG=U~<6&ZtoTu~%Q`&Mn`c zWINrbr3dHTfWzBZ3u>0EiJr)Yn3N!RxlwnCR4l4ab=T_c8hStZUL~JNL)^!e-1VP_ ze=VULdJK9P=vQNq1m@h1pOYuK3-c9hHK-XX!L1$Qnoxg2JJ0GHDZ-fu8>S#eoIzv5 zSsss+a)IS=zF~=Pb#G~ve9xy5RNV=G5`l+-%&`BNY)V)bqm}cpGPxjVKtm!kab*9SVxq*^Wcmr9ijWrtp>T+ zwBz>~fYYV_Skr2TVS??{EmxcI9^##N#`PIDKzQt4 z=|el3CHCcI)j4UK&M%vD1;+Imyoc9t>z37>U}^_1o;8F#)x{$0kC3CB^m~*OZM?d> zZr1RO>MT2n4cXXa-K-I)Bg)rB;(ZFk@iU;W2X#K375vVZ$e0ew4(<=ovNN5E+_i|6 z7Lan|y`d7=!Y ziF+d)L4o9eesqNIgB=Q+z;cPcE-)7r42j04VPG*WZ&|4-c`^z@cU87V<0nVs*OBMC zy#GnrnwTzSYvNMdlwTe{sPOate=590g&R3VRs1e(Kcex)TBHdylmrXCzzlGFvA7bL zw8c*m5@@?do+dA7^@2?VyxT^=<*Lw*xn_&LB#fA7uB3*%+)3aWKY{!iVmATb$7h5YhRL|`rUc2D!?%GeWXI2rFs z(Y35ISC_qRMPGMkT|-`L$xbM4wHMuUR$_Qfo@%ZCU}rQ@IAbC4NaC{0i1qjzaOXU+ zvo8={ZymBGng8XFGkP~^iK7sd}*ad75Uj$Q3bk6fN0dzF2mYG>TU z`&bSCG=Hu zxBExq!2&zkOcYi)h>$T@Vz&fKk?N7THpp1nQ#hc$rn(r*pBJpWdJVQtLM)(2jW)|^ zbU_}1#^?fgv|x~))Ck+PRq@&zgY#=|4$fbChbe8TX)-J9>o2HHrrmayX@ea%ftoSL zoNfXig6ho6gag021{`oG!z~DQ_UzzMKJ{;&oQ)LN&b?*?p>JjPE+qf73uU7M2krGX zCnXLuCn9>{0(yC=fMUJAc?{$Y8;EXV3NhMzM9&DUHTnVfQaxq;u1T58ez%Zd>LSU7 zDAYHx>}(4vh&=qF$IVvEnZ1*5qE1|pWv#e3w40lCM<%knS&&1#6vsQ%8m=wimzB5Vqf9z^k@V|pxhCtcx#W?=pRmp5dH@i%=G=DI z88tv#QO;8)TY;{y?s?HT?Ny6-Ji+MnJx#tW2M$s9_cH2z_w~=ZF(Ma>jl8HYRiqb-hH+|`6su+HC5lGuPSs_qJr|x7*E`aJnSagtnvA_`#D^bF;IEA zFFXlz;Rb6ZvV?cV?s*;P9&N0ycG}Jo56@zk*=0-Aeyd*o33yn@!_?k7Y71s{mPE?mr`tWZR47B_cxWbs{5PM-M`5i z$E;o#k-gO_+7x!q@23x~#iMVwmYiK%H}l4fn(X4%_U>6t#jTt7ra#oUm6vUq5rB_s z?}fkF^0Lk4*0?on0SdS%8EnlCg=X)N9yBtk8Lt$f+3)H>9N}4sK0#D+^CHEY?D&M+ zLWoG>;k!`;{uigE9-wht>;13-?&z4OCf&FSbc#Jt9J`xs@vY*{d_`Dz2};lw{$Azp z1C*h=Ls{^ck5*~@*hVkFZxWX|84#`GuZc33xNTE7!omvkfD?W|yJ`o-feOz5E1|Vy z!re~zZZWoN{baecsE`tyP?*Y$k%q?AwCT2`}-t)!`OJ*xyJ+svdpldf-OO z36_>SvAfxTt8dk!i8}y*+e}_CL?X7ttsm7Lqz~V%3^C_c;ee2T5a&TU;eR59|DGVA zo#P{8GzriITv0gG2|vtC00KP?@8l(`?gcY5G!5`8m@RD!nuF4_yUd#--qXB6YAm9_v?-xf zzCaG4wh!1u+uKI+&LcGtK>6Q^{?NVb-b7LxpdZgW!3!mb$iZ7zHfD8Yd^a*HSw{T z{*<}z^zshggTeE=F}IzO*}^z^XB>7`j&&U2poDaGy(G4`;+_;3yW z4-j$xd}(Pxq>@eB2iFx8u(eG8(guH-HCODbO-fM{7VZX}l_sw-d1ua)r0Thv|)O^4yDO0;1$inve(@VajPTl!8uOU z2{z)+<3gmbI?2otlefyMZS4qwTTr<=|C-rFAw%s-*!m#1+FZhFv&sGQMy)nxrq!vB z$9-|Wy_M*iZ*yr}>TM_VS|YT_3GHB_UH5Pz^l0L`$6#6)vCf1RhiZPKc^2!dEq;LX zQ8aw9d+VNV1?d&);DRBvOFr3CwD_P!iMpD4gGQRE*GL7LdM3)qiNZaVymix|%khw9 zEXQ><4Tns&-$8{m*}kpGMuiLx48=zGkLy{YBHRpIHbL7*nZ*AzPx=eR=pnJ$Mu)*N zZacfJzL=j8khvA`S$)T{c9hAcUF*}G0C)7ruIElbcP4SCUU#N&XA-Fl${N)J?oJ^+ z#NFn6X`|S2{93kiU?z{OZb9#8s;e7RE=@zL2caWzPQ2b*8MbD zri2!b*3u`-H*<_&dYHzsj^%r}cA=YME@Fx~4;0v~DPM^61@q70Zpi;vD|s3xK~k11 zIEfRE$pIYk-~bL%8-cZDQ9|%o-@~mteI`X*)@a0AD7#mi%?%>G+Ig`Gwbn?!q_ui; z3U9EQ!E|Kz?BSjI2C3(igHh9#H`tWK%N@!?cKGqKHIhtYuHXc`JkzRPYT<^f>}^QS z!8A&T4Hz0k;OVN-JRw0E#Z$D*8>x`b8>lEgDH^9xz#FD$y*EnKZ3ZdIxOihkeWqUb zHc$lqhS)C#zGJ0fMm)&c4mEo~&u-Af>d*+-K+$V&xK7IMRfpXXud07WAJyeU^)f_p zFa62zuTVpPg-6h*mS<*wEW#XOY(Sd;jRCBl`>`!k0;YvTv?=W146H>JOy#CAEMR@u z%`ihvCFf#Tu#$s0F(ay%;4Bv$%?;M+q%=wysv;hh4@D~ZlsmcRWZ$W7T0buJ3q|h{n?KYy-Kc32q=DpNcsvHMkl7 zCNusNb?HJuKTJ*OVjf@c~AKOHo0eh45>20f_pJW4r}Q=5s2!o>3J&4kyoFO z&eroFV|}{Z5d_NBRr>GKD0`+R=$O$g;!UBiidsjQGG=s7JfO0K*|r$`L+qOqOzW!O z^V+AxY2Be;1t}^tGFApNX~MN;BD7P-MAInRkc_5_Xdw+TDlaKMsF-mSYL=*~-I7?^}& zyxh60*#F|J5$Uy=g8umT6y(bmG_6IZ;YrT8Vy2kAVQx!eT(M=Z!o|pFva3V_%xJG) zuSwV}4S{mZD^`dW)P$Vvpm8QK~3d^hwCsWx~>e zt)CALHoe^~n5MvVofY0&SM*WP|4Hl`;S&@V5jhiom7Xh93MTts2z7K{-%Iu(gdid+ zJKfD(7+Ww7q}$zpT`UYR+|ZQ>H^HL6QegaroVXp(Vq9Tt;Ibm+P z{^YxKS+zPay}&&Rw#H-!TuaAP%sRI_#h?)p{u|4(we&tslBl!ZOoU{gzBX>X)T4zw zfYhoLq8vrQnc}!@QHO1vM%)FB0slJERWK0cTn*!e3K)agZjfzwj0Cw(Xy^XqgIt}N z>Qt91VF@(lOFAz4^R`dzq*Fi@>9s-si&kX0gL5tw7Eow9GIVda*Ng74ztqHT@)!Wq zbm_?-l{R!xgc6iCSi8`8YW_D9Mp(#;#Cg4`ImP-}|*GKcE;$&zuR3N>k zsJ-zSBjfN+M3PUyNd5tdJjM&mNd5tL$6JUzO)DpQ8o}!g63dgfn;9yj=a3(lls`fC z`2C0h1!rgDkNKVu%P91WE;Arq=10d%g=W-y8HJPR4NOc7gl&?t2?%J^l4FT!qm$L| z&0T{B0)lcY8HHMRXj~$_THK%2GwkH+AyF!}}N1?&N(^vVnN+=HJrpHvfTH5N6tQcyn{~ z+p{l!$ zmyGOBm{S3;Mnxb)*1LVk+Kn1e?}#K;0kS=uYxNA6pmdN`mP8mLlDG1L?fg{LcwgVJ zD=iIWljfvw0}=n%!nvUT^ExZCOEjlC?QffLRGqW8u5w>6y*f1a?OCHp7hng0kZHj$ z_6WRtmEoOq~|H&JZ5O-1-WpYa@P|Bh-@p3xl<4=+Fg!9ilg zfULed?4TiEcbw29x`QuVUV2cCcS3tJCxSpGBvQ{%m&oYx#WsA$rIy~h?4H+yyq>(?% zG}87OX*D;Q8(M}AZ|E4sD;f$zkBp-#$G{})9D@#R?>#^A+AHId`?Gaj?QOAfg5jf$ z?zW;Lzhw*;EG}?P6qOV2M<#M@D}E8G+1RN#HjQDzz~Q-N!@8rzTtN=I$~LmX*-f2k zBdW(syriColwqiknT*Rc~S4_J@mydA7OSoGPn>Ts920Kc z<11Qg6qh`zF>8Ky5YmKn$a#zho*vWeBd8^-MvR)bgF$_Ey>m_`U!vp*!zhZ8S@W(z z`8w_!lz)nAgYqq3j`Cxcls5c0I$Ek8RzapF*^aDgkVfF=Qc$i)5 zH0c6ZJa^+Wk#Cg${T4ioI$_;;v~#S}uuehoi02N`~00mxzIS7w4@ zGD3hNn22Y?!@6>@=ooPeJ}{KL)9B26!k44WdACKb))C7`sV@`n(Y{#MV3?aZzj>;6%tbBjZEF zFfto1dfv#Km4RlJ=QA&8PWgx-@Mf*4h&qy{j`-qiBg`sdWY&%DkN#Wr_wwqG9ijfc zAAX_!KAHN#O|2yLd3_aj#*Zc9T9}#Fi~pdM3{Kd>iMviWgy9gbIYX>7>kqsNe6v}? zC*+c1X9DO9COIV&#$>Q0bULI0zUdKd)swjO7g<}*K06WWK?dI5e}SbVRTRqM=)Oqe z)*fx4Uvv{TIuk*i0BdmXMlw=5;uB6Y`LafJYhV=nviJN0Xh8Wz_qo404O>OiivD5= zUMKe)9lDNOBqX`~HHA?pik3sHM{OY#=F%0+1!yrz0cgY~cjsmDRiT~b4PE}mBE!iE7B4Jycig5~wxXL_xeF81 zBVyuEi2o}1ye4SK|E_h9_-W}iv-zO1@WP2NG#I88is5eGDK?)%;qix|#;3EK$B$D+ z)aD5k00efC)swVQYvLm#cnYoE3Bn1;lJ2kOWv+Uk~COO}VL zSK|B87GX;<9fr0=i(1Ax5i+Cs*T#m7bBbFV{Fj!+PR!C{!Zdm--cfhsA1ZnNhA?jV zCJeeYOcY=9f2x<19IaldBCRE%2M;NIymKw3ljhFTa-5AMTI0ROC;GRJPh8$wnmUe@T&3P=Z#^P#v~XOaxK$|y(^0~L*qxP}xaLq> z`t(2==Pnh>=fTVyq&KX&Zx}7<%tz{+n}5X+S{U)#3S!5&zj^%Av=I&dYAv{$cTy$p zecYmq7rZ?=a`3U1UWN^Dll!M{YFM=EMSPFJmZtv{vmJfG{dv7{aRQ6$O%Yx=;a`V( zEO!g->k1*5&U%##BQPZTP=A%PO`%zTmH;JZ*K-89%@hRze|l7AYBrlNBbGhm0ABr` zweI>E&w`3p{1Iiy2tQDf9D?79oYw+H=eX3kPOGK`Svh)Sj>C=_|wc}Y!1&k)Qh z_G(ywvdoC5)DxFD(uhKNhE;dcPx0$}w!D#g3gZI}C55zS)&nDaUB3s4EDC$r{n6Z4 z6)Z-*QlwrpiZy3}8357T?n8ut4|&@>&snLF^1_o1zwgQCGU>GNfFax$%nhLyUBq5% z$!7Tn(Mv>XV}U{EPA6b|(n_u5R;>ihwFhTpO4)0hWaXP)b?kIWe61TIgv_&3iS z${xm(y8WS$3@t|}ubA@iLVnd|l*G|lO9=gg%6idm8nbWt4w<3MiQ6lfoq4)vt(uyD zKC~)|vB^xp5rN$3wWwrI4UO_5;$WQVNHmu}t8O{3iNpGsO%mp*K#OM%5Ty7*&KwCW#r+ zt(BXDMO)@>oyCY2;Tql(U<qen9HmvfIINryPhq~tWoYAV!7ufvxxkGTil8`M_E zHPOr)!tZpuOonQUCt#??`YuRRxz3N7)H5(D=4P}n+couSfvcGdssV%mS(;@iQ}=}l zEtD!uuut}8uTe{RvllVhV=$3~I< zNvRDLaDO?yJEYjyzt0UjSCrY6M#yl-3E^cx@twT>gJm~+Eb?;6(=%usA7|_fG7&W6@y}TSc ztm#w`AHkck;rKrNZabIh4Z$LO8TPWxPTj|-1%p=I2dPu5(cFP(aPU-uI%77ZE*i#dpeD?m@PZ}wu5?O+X1~Rs=;!+ z!PQb_`T9)b~F^{)4y1%|k%2=ADPU0WSKkFaQMj$k3jjygS z>EMYTCbjVNaCywVbQA82U%;n)Ka@B98LSF*dO2wGk;)KjI}yhq!91PWVo<7kBymwB zF$XsA`jhR9DounIdsk1|z|0oRjz!bC*-j8w&ol7bHlV{) z8(oaKmW6Gjy9(&Rl}TL~f6P>)2-XVWr!!%%UAfJm9ax>gyh;!At^xD#dUPqWbMPHx z2}bhEVu9S5kL>{-NDutSle!mlyiD)?n`c;>Ppz{`wGGnE?Wx|23o-W~*uqh_hsnqX zxow6yuG<+VLL2vkOr-lpcae>X2hW@h?gcNV(~Z;aDv$+b+vgr04avWEur3kboR}$h zn8k_fp5$to0fK?kvp{*VikVG=fJq{>-3dLJ2)*WnmH|vB^sIqp7xZw*aYEY#!Nk}+ z=xqjisD(DV^Ck*T(5`5wu9w5ZJOX>(M)i-t zGpYsi+#7HsZk)NR8%u?!*zx4grS55e`7Dh;U!usltkfCT8+W7x&Wz%gdg0RGFgo*? zMh~b9;$BS`eqv5GSqrqaj(?+-oWl_)-YbC0DIsGI5B)amtfckxGHe&+Wje8j{?Vs< z)jhfaAYsY%B7je~v))DT%XGm&aGv|C|Cee1@mZ$T$j?jNhPS--k1AEzvPSmH)cnE+ z1StH24N`ciFH*Algwo19^MwG~SJu)ZyV;idTDOdvFU;bW5+EChiw!SNz5{ z<2NQZ*|0Mf)_FOJ&w>Rr2fl!lG)vBIHR}S47M^^4W|1up%?v7Iobt&!+OW{U z>wANH#TFUSI;E%f;X*@fl~%(V_YGDCYsq-LiN{Q9oNc^`Tk$4dQ`x$u6XXbZKz=C$ zWN~aG>-@{*SG+vc191;qX$!GXaw_ifA`aik{Voq)@Z^IaF3yDoLo{Szr}^~OmDF`c)b;` zXuD4@BrDf(Kt&%YHA=W`v1*GshpZ2_cAg7hw~r2$H3u9Be$>i?!&_y3qo?hTi+B^ zNp=9PxEn4xfC_W~b>S+!H~+~W%#+^UlS?kj`{rl)-_+$lnQflXAeHt}erY%J1Vw`E zNPT#mMo8*3uQiu{s-qB5i7NJUp;Qpw!v#h{bhp1yRIo6(tgrcCK&?NoEqy3g-hVzi zqdX$wsV(w|cy}UHDN|;yyKh7fq@VETE-m&BS*R<0?*7Zac;3twVykX2ah^1v143OF zz_U-DWEzpF5VrcY=@gGFF`u4E%P|i3sp^*bB(>lRdigUBO7!y29J%`|4#8yt0T5dF zjl72p0zFdH2pUl)zfR#Z}ZFgPs(z(weBA5 ztk}1`d8<{o!s%2-nn`qnZPSjT#nE;)3HUg%N-HmEwYY)SEc#@Sy=^|v_2fUWzNls z&RJebbK32W0s2C3y7O+-?aXJ+C^+e26NJs;>ZWcLl^Rv1#3?F5SpO)+bR+2xpDg0k z{YyW+lX;F>%2ZDoh-yvhy3Ni8R`U(r)d7ZOS3}YCevllEYSU{Ep-FKjSfo3u{zWz);`q`p_VAIBK6!7HNEnq$$xEr1^1r$?&n)J0=5KsXN zyaLGA^`s#()>40+^FeUVJHmmGb0FCIL3c)TABMzt%a#=sGkb@2=4R|KN8};gk4Z@r zTm}$@ip>}1J(rqIRM>LZbqX z?p`KcUb&}G?k7csu_`3w7GYt$x3y$ckCBD3lkqD)Ay;LhCa=oJDC3i&ZhG9$>*?M3 zkCQwmK=70JQ4Pv2nKG}OAH1IZQ04-HlAr2j3-_7iH;3sauX6WZ?=Eo7tQGH@l|Iw# z5tIoxSy$5|)@vb|V`>}8J{A8&zJb}E9jl)Ca+hQ3GGBC$_4GdIZa#Q1_r^QWnY<5< z<%6#=K#%C_u4QBUD87b=n;2|55U}o6lJej(=)Uk^K|UV*P}Hp{U1H6<*L(VWYJa-c zS@86uh^uzd+VoDo=ttj!%T6&bWFT-M->G@RSdH^Uj@~%yj>frOPqNS`J+yfvXkO~h z;IhG9owbyodYFp7t&^W};tuCzL2%g#-V25Kz<+l1*Qz?n>v@oQzE-GfC|q*Dt@)^X z6E9|{>t@<+*47=Iu(s-26m)?hKnfL6K-=NLY<2#faF;tn{`E6Upxi0gO*YCKPYl}? zzVMAJ#bMS4J=d46ur)7-l!`y7e#*1^!1t$%{aImH4d3ONiV>G8<~@$C(G3*?OOOfYsqoh0c!m8v{-H9rz2yhHGX-L$M+y5S1R^q)qO}CH9-2RD>@HVC=ZB

)HnAx4=Sk`U=V(}4`#0n$^C!XE1#at;L2EPh5xXvlek%j zPSr4M$OHJTcsy*!gZmZN!&)qd&`F{>yjl=DMN9L{6LWq11<05MlzWkB6_D=a-EOfy z*+tOp+q*mNd@DN!vqFkyaE-~G^GrsmWXb#Idd2VH zk&1UYY8pxnPpXx!w0UZiz^!p}Ul^f_l4Y5X7iB(H+e-QUFSTv*SF~-~7uzP8?KH1p zFI9aJXd1`=rG_Ol#qXfV!+|#G$PG)@7>HeQgocS1|6giY&#!3M#3MKCuNRtzh4LB} zd`@eU!ml)-|4U8#^qen8v@e0#RbJE9X1-iRW3n?q{M-Lh!)AX)!@dM&Ef>%d$dx9s z0;kF}H%we2lWn#H?#i~Xf3kvmShX%2_7uAR z*(jrpaScRsfXZ%4StK!eV8kJ9w272>{o61S$=-gVmBY@T+>KnWF0M8++)$qlKgkgG z0$S$2V6G4d8mWgY2jcW&_%y7ue!4v3XiFVmdcYGOQY3q4@CxtH{?vFwhPibA5nW0) zaZD;_)cOu-U}^^n*;aSU3J|g5!t6o&Q>;vK38vU4d{IVK4t>3Y`SQS~S2+yh@w+^hpi($%53Y2h@poY-_t>bf^$ zMsc&iFnla4y3ewrzpU&xpUu>RbnC9$|C?IWPWPUS&wuhMzTXPUHfju-rGoFt7A&w! zoxwrsjDX0zVT8FMJ4&Bhbc8+PTI!6cH1Zi?rBNU3;Dj}7CkN>?-redCUqu76Zg&HY&3|&Sd6MaFasHE2c|vAKpzy)4yrPcgcCPub4K*}7 zqyFK{sDIS4EYRNcXSr1a!?GGRBvaku{BoD@V(!U<`A;6=iKjB6VD|_2ewyWK7g3Ht zzBqGi$sEOBLE`jS|73oDlye>R>Qg>$x|b9&=Mwzajy5m{nHSWy+@i006r}PDtH=3v z&NBiBJt>0@)3_mYn2I|->D!t9>aP3PHw0m? z{o;To>q1X-F(r#;T&vu{*>bD6AvEJaxf#~$_)NLF>t0T|Iq~l$Cb0&|JoyGsl7lWb z2q$y}9_MlpYMsv!gu9SKPHFV`nR(wH9AVHT*y;{ubC_7SeXj!M0QEU5jJm4bzHv(= zdDew*>`yy*!9V+FPh-_7o1i|(sH{voK004zZ8BJw(f7ejk^g5nI!o70V=jq6eZ4qpxk{Y>{gXSLLIJ|l>{+m#ws-Qnsj%Mt~ppBdnH@f3k z@G?op(M#9K9!%zO?dc$DJuRbrdd zCA{OcU4+|Ioh)w5s>CMc{2|ed5o6&FbAu#R!^6%>Mh-N4Iocij13_nkndgvDMKl@=8-8$LN19TI= zp&80Bx$bnK+v-TA8C>1Uq1#NV`Fkfq;purDtK&ms-dcG`yOOVTwJYefMf?wcv0;D8 zHEjHsH|%X@vr&H4)gZmo+*dhw)B0|=nP{`>!W*a+sJbjDp@I(*MV+d8hKnSZlcy}V}q^Qn2wYUJzxezSgZ z=a)9?bTz95)cu>y^7xqn$L2Syqxyf+tk0%>X|onmaK{;cPqQ-6+Ektgt#AF0nsw8c zH*5cSG)vwIe-m06thMu-^$cJC_Xlyq)GvkBcPKbV)K1E;(&h=I&szo+GBgc~|L7Um z*~uDHGwZJWoK|)I4zzw9A^$3=0av9^1GaLbmGt_k0axW&ot4crIlLb0v$eE$&{O`| zm226&`x4f|tUp@Vzt3v^7+_g1T-4B%o;`JJa?HaGO`WGqJ=2yif1{cB)~{P{XO@e2 zG8d-}%^ODNT#^o2OFZhx3+~fr0l=o!-BJ(d!~ZO)@sV~zAp_C*5m#}~TQKQjK3V#l?@Nv_kHs}uo}GCYA>&RJKJ)w*_d z@dY#aa}nsl0O-BHEsb1yPQEBHv`O`O24}i$XS+Q>m7{ymK$}7{-S+N$Nw*`(J8<>m z6+R9(9klK^FYLTwH?6b6{@2yx*VW_Cay^DA8&~KCr>F&;T|K5T>IwOL)nlAQjM8C~ zJDCL7tBWsGhcEm(9iDV{euvMyn-sUsV@9;D#-+m(cX84P5n=9$z+8}56wi*;HyeSa z*$Bpg`EZwlMpj~Q+0M(zWI_%1mF$(KtNYpvI>n2R>LPuz61j1u&)wgf7xA{O&#FR* zz&S(}^2iQ}Wz*UJVO1)sIaH?a5~vt7D<-B!81Un@cUzf3GCOhgz^_n&Or*GQVycQ$ zVG{*hR3K|Ugf*SBauSzcRDC=)AglV=daG0)kL^VD;ok9sPt)#?__O{ZA^;z&=w48N zTK9ro{ALjfGl1)?$BO4~s_oJKy(|7;YTvA%=@_C85)<0kT?Qw#m+&`)rJL|3F27}V zx&@cFm)z;c8K1KPtNAHSBO-9vUGgO81smadN_=ZcD=%70TM0l)gf_KykdTmYopp&& z8x%0qBcV9?Q1AI6BHT-^)~AV(r6+?EAskbc}$0x!IfuIe@|dehY^q#N+((E@4kL9;amx@8Yj@NPI1j-3(-_zzh^gCzWL( z>mz_8qAaWU9z>iur93x|Krp#d6cH3{!gkVYQ6=H^H>12<0s?;^x5UfXR=8c~K5`e@ zR+La&vP#92h=W8gIK7$FH^H=Vr~ke_pA0{ElZrq>uPeW4?$&Gx-^W8sB{)jzRe@nO z@@#) z3xr+?z>*Vnl6Bws^?zuAaE(`P^JW@Ym%P1j?k9EliD%LknUoNl+`D)YIkOHyw`K-` zdk;tyl+XH{$&!0azWNLP0sX|N=>>KEo7e^Q+t$)iMAHMna+5N1O{#~pR&InimtN)r zvAuN24Rv#iYh$OV|9~gs&)yI$+Qb{?S^T3q*2!U9CPIz41UG|pjRM2O;2iwd(z9uH%^;g5F4PRL4;og8)wcyrBc^wvIfbQxOz6K5TNEY zxerdbgNmn#VqcN)V*I z3qi)le8Fcu1QEPRkX*Eq7la>Ug&!2nB{M{XgbBg?Fc=~ZtER5#Gc&JJI8=g2S*$ra zF~}ed2jNVe6MxK!FCvz4yO)r3v-{EeCEzj&XKTUPgfoIY_@)+X^e~#rASn|yhl233>W*Bv~WV|8YtIXJUPdAl2;|<-(@8q z;(iDiC;y2PGf_9XpZ~_pzC)+vF5OH{se-Mnvi%>&M%Znm%lL+Yxr~s2D@utXfsErUn`kqXUcu6M3Wi`tj79UTI=E37Mrka?F@!Ew~9RqiwTJ zCK-~_E_)fqc;Q-ht|@9Ux7g0cT4M6E&!4B2or}I4=Fih#F8Y}Z{pI2qbD_Un^fwp! z%Z2h2&Y!2hT*w1={ygJ{yW}46Z1d;oFHhvAJAa=3I`>rYH!)9tBgxT3(A+@q$@W;C zaDq%X1&N|4+k%H31ekH9?ufH9fI>h2`f#0Zma6^L~B}pNqR#AHxEf_}VIaSvGW9yRJm4 zA?&Z%PiF(GL8g|7^|t<(5v}@mmiF2>sUIJI9A!aL>Tqx+C9y!aC zEdFNaDdpiaNoC`>1pZ7=$4f0arm!JV)M3y}#vN4go}dGNA#AaQEv)|TD>sW9CXm4{ z+N#yw>E%8!LX#3p+Jf>YRLRpDGVEc)!f;bZH^qDJ+ez?XkzC{1?4>SQOiuTrmWCjy z8k$LSi}bZJg%q zGQyZ{_L{iwdwk-(Ht0sfyQ`qSoPt>DFA^A*w{B&KoNBM-n$+;xhmLW^}2#Q4L_%N2%kw|li4bR zoCm}PgrCh~NWfaM!G1oAL)zM`dSe6F5+(sOKKa(;>sf?a!qx3M4G+bIvUO*1xM)0# zPLP+jNBqk*4GLM3CYQxXbFf`O_ty|~e~Ssax1BfH2Pk%>)XM%9@quLFi63Pr*m3OA z(iDE345rR{={~^(L?B~T?G;Xs2i@2`G{NZWKxlk&+(5Ve+qney>)c;3nWD~A;ySv^ zmfhS66dFcMt|H%mnH+4$Q@lF(doSW_)TsDaGHv(MfttA!nc}a;z6(I6*+XBnTi>GlS5DG4sUxl~&2%V|I4_d8 zjSfW|?J&YwUWd9x=vH!E8J(J_-b65mabT~WQg`2^ybi71rQtl{juBwB#uG@y4vlxp z6YPb%juBe#$bu4E9MuP%WP0P;|2{cJL>Ly%FlC-7twzO%HhHEwy61 zNW!G26`cw}OAq)RcQdyN0^DK?#eda{U_n(A@!rzfT z63P%^Dojp*YVyVjxgymYbL%*6VI902zXC5Gh#_9}uC)NttPQ42o&-gA_%AP?7VD+- z%uSd_%TkugHkk+}yWhqkQ`2UWDRbRUl^K6rE8sMP7Ltyp7~Bwfn`{t-V0=3<9)!Nq zAV^0wO~N6fp)Xp2d5na+0HsY<*4NQitQlf1?j|NHm$rbYx0XI7kw%o9b~e?S9k9)#w;>Qv~e0wHxp(2s8J+VWwl(K*ro`s^R(td@$&1Iz=FdXDCmwo_;SbNq#*UYqk8hq`%^txaG7-KEw$+e< zQZxj_C4hlIH20%lsIiM1fr!(n8Hw~ZjgVX;?$njf;M?ZVd7sqHkKb=Ljh(q@ejU-R z=e=Dw|EBU&I3L1x9c_Fx%>NldUPy?<%{Hg-PG7=vx&Kg+yMS5{izyXxXfZz>jfe!2 zx0ASMF|=VBAHyq#S~yTjeZa&U^f4UNiPgFlT^x2ofoz;X*qKn}-u+FqRotYVEkGFuc@eWF9u>w3gx%P9VWS>a>=ABz-|c)BeKv zh*0y}*4zW!t*g1Ie26voqs*x<-g`a>qj>oliJ6VyrSYf{t&em($xD3REq)bTfDdid zBisl+S;*B8Hu&b{k<<_nP}s{Y`k*%Co!i7aFnpia&A)k3>Q3H~bY<%uSncS7Q1d78 z8+ra$p5LFkisFmnmzm;Y5t3LADbx!VY48^Wm$2b6<76#)mr}|pr9!13#F?6i2Gowo zGZ)ddcwgSA;tl8@M2N4O;SH9DLAh}9(!KDF*4&pMI%bq54-P|+Th=WGm0RnciFS{S zpNN9iKlbM(8gq(5ozR2m00LD`bWy^t0R#4m_5SAT9dOcXLMKUBl`+*h5Pm)}-}^WBJ!}4pRAMQfQ}bY>d(VTw%4{^0!HmXbjl99d&-q(N zT6We`NN>GfJ|6MccJB9F!2S1^FXENhxrUB+Bl~)p^68U}-KTW=z{#!xPGhT_Kuw~y z#%?0Ug7-(J%E%pMCo_)Y4blw4a_W&J7fpA&a-E_J_jTAsTL`izRgR)IFV-ecO`jIt zKG^vn1>5Gm+w?)9l{gpL-gT?1g|EaK?55o#ZPF`Eu*y!f%5J22x2S2CRpG_Z`QRS{gn|Psgg^B|Zg8voa9D{RN> zoL0{oEx=4Cf=tvGGjHHpPi>q|oaPB|W>vO-RC%ndar&@=7n|o}W(MV1PDz``&Peg2h#W?vvC>`vVw=}F#Aw}fsl+XrIF;#(4*mu>FQX}cKUA#I;DP_ z>=+AeB$;6>u)2(Y#ICl%)uT%R7I1vXbB#pk%_kM)9LZ%R>0yrul`$v54RJRCW?g$xYq5)3pDN`BxylFx zX}A6RZy#jONf)r>-d+X=qJ@sOck=vp{H!;tuwAnKtEbA59DBOJ1{upj!1{f>_oMlkZgE@oggQ#6QZ~_yQoFu7$Lt})L&d4 zXV<<#t*PS}hZO!ZtC1gd-P;~PwzYT(_(k%z#K{`Q!j9prIp{0i+}NYQy{An`X!1u+ zFgu9_a>3}>&oi5CE@TU{hPn~Mu2y{3B?D1JlrbJ*+|!6KocFN04(XTz(=gERxDn@F zSyCI#0}S*{``Jx9;$Z(NzB=q*dB@4)lErlq|K6zsqW*WMmPE159vyW)k5(QGZ%L3bmJC{J&ik5I2cwviUW7D|LAB@3)@(o?mo_w zY|S`Cm#bV8_z3eCIBIh-wsA4(UFz9W+-OLmPC=KBp?Zyk?^DCM z*_`oD!^x+((38bK)XshWwYTj-95-T#l-Quw_i!09s?aET7W+gHz*dJ!3pzG1>!QqS zd)ZIQ!}?zw9#L0bYBg{A;&iM5!0uBhtTLW~1~^H(%sudn&(O43D1skA48j>?Rz=jg-lzSElC{A1t_rhhJN;+R zzAc`6=^-uB^DZruz*I(P7dOhrHO|gqqzZ45U0~2v-OT$draN^i# zVsvRVQ6&Y);hpKtlOX;8UydNcVtAJW-Hj)iku$Th@t^nC8BG14&hy1 z?pNmUiR+ld%U9$U5^p(KA21m|luS{ih+I8wv|3Q>saSwYp=(Wk{z}_9{%IdCxR<>s z;=GQ9Ii3<{TVp$xc>>P+Zs-`!V}tMMQ}=J}nO(gkj~3c(f($aa*}SIRPM`WFZ)B2= zANsssHqklMmG>@wx_kD$O!Jvfq%Q61!-t3T;TzEW7$nj3p;?vE)``EhW%WBCX2yToNsmf$eSqRme_?I#1d=$~6k(YQ!xix-u$|>aEIiboTREwKnk! zupj`R2C@F5mgO@UQ9FwzTHM1%4(v@UEaac1Tk{^%Ei{~yM2cBErkC24`|Z|u$wRWy zjQ|(I%4rTAV+nCd&*9D^UwkG}`nkqLb9h>?BlPGT_2}$B0a!LsNfw{24QcJ71Il?q z#ncI@MJ6$wthxT074$KWL2L2)Ir=&J=~gs%?enR@Ec3=Yc~!t#Dw}!(dguWFh+XN~ zb=L2Vps;WSl2#Eq*3R2!gI9IQE9qKTiOH}B;^WdkL8uHIOl$1yTs|@*uNKw!*?+$D)WOP@hQiu|hkoK-dO;>X7`Twj&y&kLH)l_yLFbaEKgbAH zD41w{2fw}OFr|FJy1x~BL^#l3jrktCxB6Gzc0OyE_)=tqT9zyc2ek}CZNN^BW!>vZ zfJI>SxrZg((v_=oVZi`IZ%awZ)^KzSfY z6MioV@E0=BFtZYV7&M(!0hiePgPGGaCiQEmd|#g;K%tKq26+LHmfbeS0Mh$3F(xl* z1LWTwPSINPC@!nhGj;G{L*j=4qwhuiCn@Bq4W1OD@%9wHw*>N)zS{6Tw`GHwAK5(( z(N(QMS7oG6r{u@)We_EFI^qHrFV9Q_$y`S^G}}4O-BfyjghI#SF>@TdZ(`rY<>nq$d&tSJsdoRx;O18!eExX(kHuAXqS8683^-HKCoIl zN+}W(Tk=Y*6g-;7qeSQ!EEpK>sb|72ngbUd>cQCdq#&wqYYT1i>fYwnJ-00pdQ*L& z2-6=1KHBD9dz!}mIy{EP*jmniV3O0;rbP zFJuE^eYg@cA*(w!J1jj|eIi9+*x*Y`(UHXg4@vQsYmQMu!3{VmLCwO>L>@51!_H(d zCx#uxVMBGYdMrLtJ~>0L^yE(&w2b^Y z2Hpr2!|K@iM*I}dX#`NoLW3m>=`s!$SBbBHt?~rXj;jszLgdUcx%LS5hBH~UbMW$L-;skP5Ljv`Wh))1Ok172+0;1%6cof5GvPHXg7EJc}eG8oC#HT#|&+tpM5B4N%to}zK zua~*!hs0)#Gj}YH)ndWErsKdm20&+7f|mWPr{OI-UgvrCkzoNPk7g4YW|zcZnRbUe zp4DbMBUM*^8p}?DmcyvCGlCreYwp-&@qM_Aus*Gl@wGCy{##4eHWZEP*Wlkc^=NrL zTT8d_LfkcL+3JS6N+MW4j_ZS^RkORtdES z`>`yxGOZfw3P<%zouCJ?qv-6Ies)t^e=garyrJ%las4C+%5Q3uI|RDj5Ip0DfsVgI zvK0m{YSsYZ8KOVsYSGw3nj$&MG^ZN`-ZZDSG=93*oZ7PZNv2jD9su!>)KR8YI%SV6 zwaoMlF}%2ZT(b8Pu{t&uU4QMK_?a-N^sV-)H2wN0+?JgpfR_`W~W_EMO zko;bv*r0}mcJiA3g87*!rT4(`d?PXHa~pMc{9Q`4o!7OZ1nkN;4Bo!zVa`Y}hyBwj zL|@kP)fB65u|HAgN4W$vI;5XfKl8@T`Ll+q@!pG*m*(8j8?|qH!4$i1^=$4LQ`_hOMox zykEF!B?5}~MjU;RH`n1@8tGLYwRWF-7IU$?RDZpHo@Nhw&5eJ_i4{J04uajo2#-zo zQGmFXiJk=cTo+>DUTZWEkP0LPAir-nXp!)Q%+^nVFZPbVbQ6rjh)_kF?IJVE$>N_V zTX}NiJsRafr+_LXFfc_)Lab!N9#gdBMtF{inoZf_jQJ#-xB@(gKquhT7A!Xl2wFF9 znP7eJ_D?oDni8b>sqNfZN;JP+*%_%M&^KKi?h=m)?sr}elPbG;py35>{Dpd06r;**c3l5rhd`=;6uJ z$AtZE{A7gc)Ok#po)L#_P1+*XsJ&4+r#=dFQ;$cSRg${sf4Ef**!QI z=s0G_GQfbXEFbZ>ulK6id&3DU?DQ@V%RM5iz4&Tqi%z9}lY#cHN&0>mv=^E@@Lvzwn4#L; z3sJM>K^x6z7qq?h=K;J1fJ=mfc{(D27^iJCcN^znjz0GADb8-6aU zi~`K?-z+QS@(>>tt!xkP>j?8VIlOQ9sXFgpvE6#GMBbKe4k~G=3r!jPw{WQ<7U((ij~sfiQ%q^+_H0I*jews>NgVx+TUs4 z@(JL>{;`+LtV|;IHzt!SCu7i3jP}V_eY|X#m$2FNn3}}snj=jq_HW;a3!80?Y7hI@ zSo79ukmqIw`T0TKAd|FlWM)eJ*4!$RZhTx<-;UHJJ70;1W0z6lMF@o^A9=W`bMVw! zOu%7=zou2g4+&cxzGdS`%{PRZO?F}&Q@JhbXrT!feW0XKN>=AsWmiwu+>Du6L8Qz3DE` zF%zb76fFA&H~o;b`=hFezTx@;_PgQpI0GxD44#n$#BYb3JsQsdViIoqb?`hYT&~c1 zqe9QBJmKoj7TFIvrN(6Skm5r%%ZjkHDN_$i777@?FrFUp8itDsK1XR_&G+F9%$7T` z@8tOPyz-AXNmVp)Aqjs=%{Ni!ps2bXPOr)olIRt zhplFjWs=D8VO1DqJ-yD=Z#yT0Ol9s(H939qM)$%OKmh0CaC&9f|3>^ZX?c8+GbWX$ zzN6CN2MlXCf!#*miybeGbvQAG!fT9&=h)In;jOGtd-U{NQsnUDlw5sg6NroD80mg^ zVXn$`?#kzJ>ztd8zZprg9|UTm8jRI}FRF>Y0$=Jqe8C9Sg(^4%Fq+yb#Bl0t>%gGl zj{0%zxGr=707#FcD%?#!mk~3ICpeycU%TekIR^}~WHBUQ%9I^cFBnA1h~HPziL8am z)ECTDNH&Rq?vLkCx`$JYTU3@q_^99u9&*gkq|$y&vFi@8SLg!||MowpQ8D;|uQ0+g z!oBHD(jMpzr|@uMI4(vFM%`M0Wl}48Ae?R!GG+>oKc2euNGPEI;zoKz1;U4c?&TNc z24|ytx~ZACq*VqV!e`uCn)(OP1z16*j@B{8PHq(Fj6g7hXT535w@agiGfN}R0X@b| z<_A0;R-WHK;y6LpC_ry&9q7Jsey-Ma?(d%!N_-N3O(?*goAP-p0&lrfIHk4!7f0~Oi_)5E2Kw_FFQg5{0 zF;JWMu|Po%fLXmX4mWfR?c>eKoWbe>?MQop@Y_b^oAp1-&d-&@bCB8SKJbj0pE-LE zavJjw|3i?G5gY>s;I`W~ri>PO*Upu|M0#D`aa}ZHT`f7Jd+UP}=-;F+iwm>Kww`M@ zMn!wu4r72h0r}fb)`H9rl{ZpD*tw2f9!#2-cH>!t@RM#8l?oW-#!5TT-nNU?5-Iv( zH~bE)PXgx$HUkPLu<<)lI36rNXe!oO&EG;8)HInG&ILIbXll092$iey@yB+4R1ryD zRc1R-{Yx=w9>f?Lajq(Z=f{lR4`WH#L3%nU`R&Xu5BMy6M`3?1BFAc^x}DA7)<^tp zx9w4TqnKqS5J!i$A_5jii*|%Z3@VQw9ZrZvi9r zlXtQzswFV%m}R%b2rbn#PYsV^>nWj8sWZcso!Dd9*0y&KbPc8POT$eq?9#HkQ1;00 zvE1ER5t_R-h+&pYKJjS8DT`3hDG?Zk^*RJM^SbsaJ&lfa;cu>g*4%#+s+c~e3fXV4`c>R!Gu^YA|5@!tgqS-Wjj^`O z3nv)Ru^iyYi5*JNKbzmT_^FS*%MDhcmgvF~|2Avx9y0!GkwS%Sq%JcJK_=s~lrzw| z=i@VHXqOSdr5bNk1DaM&3SPG&_>9IX2e0iNmzg%9E&dyz@0iSsXb|sx3=QJ3|0WG$ zR)2UE`qLHyPCR#kFw?gEAGxW3(S+$rmGKFL=wbsg%jqo8H| zYeWwtf1P#|G0T|G0IwZ?9eCC4w=tCztAi5Kp7WV;+#zhV_v`x1yfvz|i{u&m`Tqtz zyR0Xr!(;qi^lYH}*K_jdS=*BT5j`7-X7J1CS-yT1!VQby?LYA~{LKD_KiM}j3Vox& z$8`{eJ{q^I@;a-?KZ2%Nj%5Gg{p;yDlKm3A6WPpjBtvOKoc|vMpY5XFto*1BN0osf z|MxCrzo&BJLbmMatP9y95}h1|lFNix`~xNbM=oR^;^*@fE@Y1#kzu z!2N&eOg6%ECi}^wOzsi?;+Y4eKgj9t4P#KUd-GAAqcDRNbVevz3SmhmH=K4O&cnvR z1tbiT)D79j%j@>0(}wxiCoq~6DeXwyH=Hbgze|D!Hk5HTt&i; zVBbhJJ1_UcQTWYB^>%X@dVN*6^)FbouZO+s74ff&_YNm|h0$wt%%I3Hx=O4rm*O|# zuP=+A^EEiGaiytmMu^PmztAu%P754eeOc^!?!|H4_?z4emWj2t*q4jMKp?qKK8#

NEIXl`zB5% zXuOtXB&y!n{<()jc?(vZ{kFez>J7|~7d+vnb!RuXL6Z)Jn#58z%1Ra|SObj7q<>E| zy-m|g2?Rmm>z<+)G2R;aCn4`CsFtSfL%&rCQj1F}TH)?__%QvDvl4IJpjupy8rA5( zJEX~P{x4yHJRu{v764<$6nQv-F9OaBi7CiB(|j1ccaUIsJE#wD3_&*TbrOQ)WK(09 z_3AbD(^=#RKa+JfCr~&=+pD&>(1ZN)pWFsEKM=O+24q7i;RF#jTeU#gWwp{D5PmyslEKE1?uy`Z(@FDV2 z({w>>RF|*<33@8qfA#vb@N-!pWcrU0VYzK$fj58(>c8qVK-eiDM2gS|Mj;8M*vAg- z|7rRCe=OVoG}~IJ{~g^xceOgoY*t&Xxz}>1;{#UO3=Y~|<3H)p@&BVp3d0^=mTj&E zAN^AP;71*)pFn_|ln`;gSIVFwvlBHB5(Kyy)0eSrH)=Jzx|Q=5dnjJH$bELvaw9V< z(u*BJz!FXibLu_(o#f%K=l_Xo4dv*cMPQW{1|xzY#$?uyn{w+1 z+D+S;#rmVi!-2I@&{!vczob~Le z94=~2?+CFuZ?fd^#XZ=Yhk`XWgsWN5!7TWbP-+w5`Qc z{0hivl&PsOb!WDYuJ&6?)c*3q;wp@;^LxZU2s+)Zr+4(ZJzCVVZ}X|Dv`yi3Ui1Pxn?FC;|F81vtL_49P=`=zK?-%|z*i6{8kL@JRZ*17ZE zguPA-?$DH_z13?XiHFT5#AMfT-VL_y85l|q>K}5}jr7Hj3;DMN9o%WhtPNJK;KCZ+ z>a?ehCQH|>qk>NRm}GBXEvN6ZnRkehp=vvvQGIXgnFOS+Vd*c7M+Q{qQbw$nm>``$>LUQj9*PKAN;k z6}vk*a$`3s2>U9QgBAshk>9=8>I-2;)H+$aL6QU_Sc)e{z$)wAP8?!gJ9a&ur!EgR zby~OEY&35?UF>bUo8Gdtq{N2nldywwW{o~d4JN43=x1d`ByO|HI)fG3y%$xdg1)Ov zc`CX*fPIj^tn&d_`@T$(?<~>X-gN)oVLSNH;Ey!+6IXqvkB(UiGWlyBlV9FL3$2&#yOaIs0q;y7n)Sq#|vz z82g|$h)k9|gC0#ghD&0pe73xi!G_dGoEB!nxcC>Co47*|&?imxXk1}Bikf!zuu1&3 zq<5jc?R~ov-R~!{h~4_06-`blZhEH&4yc9klb~E@>&bb{i9ZYNH*aSyHN9ULe?N6H zh6hYr{OOPBl~^o2pYmGf`JSMIR|4(K%&n8U=6iO2HB(k*eqrn_#yUHI_^ZJMPrxCr za~rZX?IpnooCOZdI9OLMcl%I<)R#VqWLE~A;ix%Dc8=(8kwY7ua$)=w+KsIMdy4qF zY<>MriTEMCpYo~gC+dx38<0oHa%o!s1T)@vD(wj;u46i_(!!wNE^PO#4;g+dFu_;= zSYyx7RuR_I@30^DjPd}4{I;HEk9U*WhS@HJnexPXQQk-lx^7f&^LBuhtwNBMe_^}e4nC@ ze6S>ti-=Xb!u`YNCG6-BM^1kuc9*eqLaj@@@*>NA;j6 z?bLOarV^qD=j6!2drj|HU~^S$kbPsjQpboI6t|o2Q>Ey8w0=OxDGpdrxDETd%47`1 zSAcLlMrdO2I+MpSGdxCqL(0K{S?h8}j+^>?PL4z6G!0k%^UG(ErQ2FExlGcK^I>w# zzNXI0re4IY=I%3^xtuemBfCgAynSbyKiZllFmtAK-Gb%z*9)zG)#_HTPNoUR%s=!KKMP(y`q z%9qo}d}UoMsDM9H*H5S`bxzp9@H`o_-GSIpBR4(Y!-(SWGz{T3cY&-^V4H&x`9OT9 z@v&!}j(AF_jmX%ZGz)c*m|kUKt(o`M-Q5dH`1R!1kKbAR3JQyhyA>bdUy<&aKSy}@ zmA8sA^wg7>nZ+g7Je$J}Qn3Z#&vzp6CLYVg*>x7mh+QuiyJbKFe;YeFjkVzwXem0c zlB!2w&?NlqgS>jA^-#L{t;6Z6=#!zVS^8vfD7Gy}TeI}(6OTS+scXNNa@5s&J`PX0 zjDTD&nSLjleK@`tw$}N=0+ceUKGpTUz$3UbH2I`w)732P% zGHl=^d=zyhE6Rn@p-b}C$Gt2}o z>vFeaQNd3748p$t%@z(C*rF4J=@UyDu4Ja6cv^1VeF2n5XxpgU$zjLgPG~fT5$8t( z?WVOoP@gFiF@7&AJg+T9U7wB@>d^v%lFrtm1@Nj$&&B}-H}!UF-a8bR>Y2Ao7QyR4 zv&JntRx;!d%PvV4-^x4GeFM0#_mbqunUDb1Pr|i_G1=S6N8RI&8;Q$wefyM}j23UR zoege3u2^E?dOI(6a+Gnqvhg5oN?|(wZ17Z)mTlLVwEm)`Aa&LeJ*W;g%Y&-rlT7(9 zsr)CnvJ>zjRA@|{9&9`Q1-4wX!W}pOotk?a9a_Jh{3eV*hCF!i&%s2xSQa&a zZsMstxGTYhmHjQ{*&<%@txw1^0-@3KDa6=;E5G>K3K&)jsty5oDmQ{;BTf0Y!-2N{n! z9HQ5|*Rkr87PT-)g7Uu)s<$wCQWqW(s*ke%5zzaR``yE8+4osVfmefpZ$>R=Ovzq; zZ%-$enUVtWACJkXEm@Hqxj=2` zV4Vpwa7Je!R?*aFENw2gJ{l!~fIJe0kenVz@m8zeR;6$I6m3gTD<*(R0EI+b#ab)c zhbN}CSiKrhng92<&rC?v-g`gyem?(vKAC;aKKs4bUVE*z*IFBkvvb(PSC(3`e45yk zKUK@P3g|uuSqh@R7|bmq*NW~K2CPU|!98eyC|4|b#LpB z2hAqK(xMDgVx)t$8+Z4!OM%?fC3)L9_B~5IjKLQfC?Gi`^(!)#Tg7Z}0mmx8!Q(<5 zRj5~BZg2_o_*zJ|`P5Gd@KR=X% zSwX?S-9I7c0kQU;wfk^-n_`FZfss1xyi2th8WF!IzRI2U=DhpJVXj zVf=lrH2ywU8h@W*-`{8Mf9LP>92(xTjzXNv3MpcpDPo-|Vu4@8g#WIHmKCT+hctvf71J;yeuR_UV$>%nBLiq!%4Zh}UQ#huS&bM^GjX zJIl*$w|$rbP&%vxt-z{_oNtyP#?GInpF`BCl1MAhLY5;oa(Tegum zUGfsE&qPh7?J&^QjLwt9A^l6>3sp3F>j6@#J+;W5(k0u>*HWRp8WAPkF9kF*r^WGG z)x0Xlx~2PxH~CgcH_xMkxLHFoHH534>dJJenokj}J8u*L_^mQe@1q5EW7t{fk(eEKpT@7O8CsO(x~&a5 zjZL8DkHHGtXI;*kxiNgm)0*ZlL}=qQQ3=K&ZQrKIoOANmjc*FinY#82yiu^-dbulK zLe^~G9(&_(%sIDq=d7+o+wQd|o1B}Xb87NePiUGNo>RQ0h&v%-$u9Ql?$s}j$uerc zWIwmImP7$q&W5|Ktp!0IwcXakz<(H5Uy-s?@s4?<*h==(Y{oKx?ECvy>z=PEo1U4N z6LvP-v?+{N&8-FNd3|@*!(_cBla<6yKZ#)@y2`9@mj}_^A8t8lp|8}%n&=)JXV2O* zqAK!yZb!+Fw@jq$?np<$DHPvzaNNvp#I7?jRvH}FQIO&*etdB!MH>I(%&v}t;onU( zP0f~+ZKRq%&)LwbHh|Zwc6#n>V-94YFv6T4mh8F;>7zR!)x=4}O(~$)K(2MHHlsA#$ zE}(rwbC348#D(0(D{t^?)u5y^^%=4&h?S!oQrOeBEf>1`m z$9vLFr?J_ElMd(yv}CiFvwZj0J69Ll@~Grojk6_27n~!faG{Q?WfTI}b@fc=>e=4S zln}Pc#~U{jxG=yvgwN1|YeOWhrAC3d%1|b?I*J5~G5crq6+>o+ zI1EBScjb)(!g!23gX<;Wwt(BvWjo_bF1R@2E~;~5HSPs=#9eTn8I7a!#KcYV5>kHi-d-?q2u@8l_tpZXXl^^}FV zdQo+}0+tmMySzQI0fgm?O50v<#_EWQxav?f;m@`UMPYogDoC z1e_HY4<4aw^=%i9%lJ^(+RZUq=F@Y0LUmjclK`NEqJjn+K=Ynwr-ER`h^7mhWTRYR zYcKgY&mXrsBW0hINa2&(HUl*2)7A}$b0`Z-|M2nBQ=N{_mT8%+@WVRm?>$on0#I z(V@cnik&9PtUxA^K09Bn!>G|K(kw&D)`@SZG|V(>#s?w6oPZ^(FEdlJ2&VRf_8Wi5 zz#JdFe?#S1xogRV3QK0Y{ldk1K#=_U+D{3fnsA!4$c7K~^@>e?Z~N!pOwZ1>)B8FW z)RJ-?f_mjagu(g?E~rBbjG@mgIj*(&*adKB;MGynD`&yQ^v7PZ;9?B>k2A|~ zzg^Mq1x*JrM7r!^?_@Ln`#r8eGM$DhFv5dO%l-ZudD0m9@=7?`0XK|X_+PBf3!2k= zSa&!|k&c?TNdKfUPd8bN536;`(sgj96nq_!r}bPT={r^l6h> z_l@kZ7VXeoBipqCnAhns@bn+*(-hQ)?}Pjp0RKqS-{q$wacU2q>U88%m&oHghy;}( z3DS>`bkJZOqG1bDRLy=?M(0>i^EprHj!S(5h5dnH;qgM*wQ9spLAqXDY|h&PJf0=f zJh>6d%4@U8hA<98jMjFDpS7!TM-+E)6mSk2B!c0J9r0k->QZdzfMS%qQsHT~lZz?d zx&zR2JF#VEpXjdV)6Uo%2Z&!gN15@S59ON7{p&IKjpubi>sg&RZc6V_1{DG(RUsAq ziqNxZ4-4dRhX3)QiMqtWwifZTBwUKkgZw)c%}b>6=Y`$fnF|7g18YAzTK|WJ+v5_c z&^T7d?sV5ks&#+i%WfMK{y`2aDC2BUP{hoOeU$V^L1AF|Lt&@7bo2%}!cPNB_{d;) z{8?dRUD{HkT(R)hdFbps%H()1a#fAE4^39 zB(SampHw2OV-w+piAitfE=9{)FA5c> zbM=@`(&IpQ+i*H3=aHjdopL2dri3NmBLtj*Ra7DJtK+Gn@NJJAzPjs?!!Rw@Rh#wIk`MV|!C&s1kVCb0{U#WdLP&S?#n+8Pl8euxw|B&XUCUq0 zOrquw4k3f&Y9PT@LEvgfa$?Az?m;J5A@Q5`cAiM;cCe2)3t=QcBI z1@?DGxX%`1INH=zUu7E1DE8fS5#H+%D%ub}R_YoydTZ;=W!fol4b95_R++p3BmKm& z-OC#u593lZx)Z)7RZmi6rTrToZ^}!30eY@DbRo+s^c4m|r=y#xjrTzzuB8I}fOlmH zd|{WhVG)Fju-H=FHLX)N-*AUX?bfmUo_cH9fkHiXt9`ZF?`pKJ>w`m9`+cO3xXZ8P zTHai1!(M8!isxUb)Llrvn@%0c%xj-^=0MZ5`r!PpG#!U?utx^v*2TnvKNjHIbP?93 zL9aey&3^zT)^+{0EE?$14PpfpSved|fbTYcsJC%F5t>T3j zf{DN4_vKY3+TSB$7SfeT(&Eq^yP>jB7$5zR#P*zzqIGE!(=VHlA8& zgRuUX9L#Nup}5gFk+=Vhx;uvI>#;vN4vy;tqf@j4yod{#wGBB#pNdefkmawi7fRU*e`MV|-<%6`tJ^feV^s`Ua!OIC0qWa^vEE zHs()dC&i4p8GViQn8AO~f&X!*uPR(;c1$1WP`P6+SXMtZ7l{jmw@`le#D(2&JP@P3 z5I?ZR*r&^0otThfDv7C4Oo*5|#ncefpqM)L(2XsWq}T@SqUJ3tNRb@i!F8y6eAyux=#n&B0G&UkzO@#bOi z3M1dwANzifzOORhQzb8;JmQyF>sJvQsbYbu@c+#ICHz7k~-QE3`COh@LWrq5+v9<`Fc z6BH)bVu)jW-G)>7pM}+>&3x`zaY=o;mpuD{6mEf8T^7Q0`~#@8_boe}=$-Y^$v`>Y zcz4V35Y%*aYVIVH&g}Pia1B7r;FC8ML#%-{;>+ZEBaQ~X=_1COfJrBwq}ry18;gy1 z!IAf-SIlEYFD@MQ^nm(bf9f=zALn1l0`CZYG0xa&E_O3W`pV*a%$vww z5of6|oO^}m5N1Uo?xx#kr}8HX!*E*NrDbHJ->*}LyPqk@c_6@)iL|D|D2KD3QzlA$ z%TCR0W-wKC{i{E*I(5KB9}%?KX5=Fd&!KT3!d+YuHQe+!otlK?Oe&(M8awYzInZQf zzxoricI`ameI^k_W{WyDe^_v+xHQw3cc@9j5N&4k4>e7y$1lsjcZEBmt9AVi(*=mh zrg>RNKG}7)Ydj$iJ)Z$h6O7uQN4d_Ff#qrfbdH+%j6d(|D!9?|5||^Roz`c%5T0Yr{rRclbe#*e0BFpS+AF z11Sjs>g;ps(>*afTqb+Rk+$i%!7q?F7MxpVwO!6SsmGKC7hB>CxxN6{PR_M+Ru9r= zy@e6boLW>PCuz>jWvV~`nE|+>8bRMuaHnA~@!DiV`GiQ6la(bSx;$e~-$ZCFZGMO$ zLTJs+Ap#I=n&3TW<>j!0B-6RPd}kUjMcx%B@$$ca2EAkUQ?gZk~&}4ym zEWMBMKT5YA>P6hBsg+fP)+`RltPKxPypLkCPBup1XVe(vlGh)%A0Ee09iJJuubjY3 zfj8^KyqtG+^U0BJTJ0rJqVH(yY}21pP#H75iBIm@^o|s!$Jf4OFF_kMo_7djv49pg z7SWY|pyxbz35AXxJD!hPoyg4N6|ZJcVXu#R4LYnBKI{dOd+T{`VngJqqK8}uc{JD} zixX`;?-6%|+1Z`Qdo-T6KasaD9^BnnB0eZUv~ZTWF?4vm=raMV&|uA?RQ~D0M0FYT z_oF8Zv~-!@u}E-tW5mzmBS8ENzxor>;WtfFc~&YPxQAGO$Nu9K`uM9GY}nUyNcZz7 z3rO`(joiBZD6_syo0D)UyICynNvm_83KT}A|HtrOH1FUs{5LH8cPNAZggt3JJ~4wm z*M2ot(PQ`SV4%||da(BUZFd6h?Hg>z;+`mw#N)tY6_>~MKzMRM%CPKk7ECATbrv9O z_pclQmx4!^*<)}iC?(8>QL3Nm`Uwo}_aGXgMy9&CExmFIa)GdWL`c&fIu3Wu&;-NaM5H~$$EJab3&QT6p$Xw2p0p(P+Y#f; zo#YMrsjcUZg{#@+tl4;py{xy%u3 z(Gf1JT{$TV2SocqLr8%C!Ir^WqkId075vM+iPIC*gBr)Pz-k^84m1HH%tqkTIsUb= z^(g9}YAySC)?Nm50lxs?2>i{)0%XM#jJALF*7OHP_3vySFU?C8l^c;j%$?}dJ$Fdt z$}MO}XE)r$b3Az1+R$Ml-yCPHU&~W{vMR^AzEN?hvl&oaN(@+SYj~$v?grzgBzn6q zT(P@8I8bj@V^ECn)F1IKUv^SdfnsWI2hUl9o;pJ-xC#rF5l7$42Di@c{ zta#dbcyGOR_0#p%<X%WtgnX#U$z@c4*zUbl6Azb+p- zfv>hMn1Aoj=OP98s*P^sKl>ig_sW0%y)u^E8A6&6iXolQ4Fzpiv1;jLrX| zIc?uybWAm(eH3A*{5ws(=5On?yyW?WSinr&_fkHK7V^#O7aI%ES{m}b3(8Eza${ zq?U-w&Oe(hxKsP>D(SEtGO`il#eo9`4QOE2%k@ET87#~NXjFGmzhLW`eelb)6N8hr zX57n1w|4F1OiO>MWT^#g*3g6UNslEwqR3})y(F<~vA1lQFF{4Nx}bRILa`-9PG%u-@m_4)^OAlmJ>jNA!N-gfQ#%0Lu8YpSgI<4l3am3R+e$1S=4yQA>ZS3{lx<;dV zgP&q|uP+7gQOUawnmDWKe~wfO%bMjST*bl7!~qt137E| z!YVP$8=h&Jq9wrDb$E3J23HCdk=IqxH*h!sIx;w6$!@qhyY&Szc!|`s8t!pPaM$zu zGy2K($yG-KSmqyX0x0^C3^%>Zb{=k4`6Z_8c*WxpoCFRWiB=#u=-NqSY&DU2yNQcb zbcY9qGhft3H0ze4g#70eot=g8s%VZ#FdRafxPaequz$ruuu2#vwU`T_#moc2LNs0C zu5^WXPJP7=efhl6)K(DzK~%`=|DL472m~ZoT5=Two!qrl(xf;3!x!}gysI?Nsgg_2 z^5HGuUF^IA-sBHqNp5nuz-kv#MZk18j;VxubIUJ(A#;fS^b0L3%X6B(2z&+XIpGFy z<`8S6wMEm<PmNa+^*bm5P>^@AUo-Veh@9 zpFiv8AV2r#qR!}@|NX=G_UKb*@orVn`%N?+d#T@h=zY+x(`cyvKgNGO|L5`_;@^xv z9DA`g%~I?m{)*M6X?8k{p#14NmM>o^5EjVYkX=0Qno~mX74euAHk!_j$MM?h7jpAv)b#g{MrgfK@))jnbFXN;J7Ig5$&>RYzEL-3$das`OO1Ea{kh|sX8-fC&HbliYbWp56mo&oHz>tUhZ_b{nFsGvnZ~asg2v}r zZC}%#bj7(1m?WKB>YgL8eBOlH z$b4JbqZbmsB8+vy89gPK6XU@<#x95Qz z#`DxYt%uF$Y4xY>xL-3_4?!^DAwzJ`OW;pvRI3`dj+!qX2jjFUI1y}81DDLtS@-`Pp%|JDfa{ zEAK=sTx1cn=|UE6+?`|=E{m4)*qI3jUwF93liU8OAys3a678rhjyg4EW{=5kA+v88 z3smR508nYaP8AIBiRl5GLt}BQZ1fEDO@Dm9o$n(p%=)wAtbYeHIOSi*D=%(H$vYWm zxP@HJpHx9J8Epq#lkrAp%F}(L%D|@YQ;NU`g>+_omTDK?}G1R z_lDL1WNX?+=nPH;Vr&`LKuf=XUoCuYVuR5*EfTyOVs>Tu>Pe6xGprkq14bk6Ie_0v z2Kr11I4YH}G#Y#nad6m*Kk8U+bVfMHkjA@)uwj2R;(QFs$BEA1z~r82AwDY!isDNS zkhH7N+w3 zc-D%IVKZ#QwYw4PA?9PXTwE54RGb;H?pXtoM(Q|zke6VXHN68bmEnqBgiGPzj;Q;# z74M78xvFSwb$rQStozU7VoUzP#c1P3(eK<0m9YoDR-WdUL@VQYuOqXhuw*2-rf4;q zO-)>#^s49 zJLO2l#~9i#)3ouiB>;SWhZaqZh|Hs3Bo&ya4itkF;)>9_B#hqKC*s!n*qFwM z@NT;q#|bh(d`L{+X4RJTod{(aII0P|I&f)Ph)afwo?!2q-&Cijea;*H;JYjP6LDdN z36=b=s9RJPMk}9Q2UO)SfS#=~-7|DwX1$N)43jq*&hW2i`0KsWyKfEa{XLV``boP- zlC~?U|7XTqL+uZ92nX_55XjhwF}PQ3m~O7wAdOkyi}EkNB)~kRRaZ`J!a-btI=@&a z-oO*0n&Z&ijmhJfqchL7#rz{jSPQnkv@}+6X|dh?<~Xr#wa3;s+Kg8FKQyMP>30xN z1NMB+vcBa2+L>Q!=b;x!1@2mQsPp;1wA8M^>eMK*i6K) zKK=42el@6Xz6PbHxsr$q%OGv-w|pz_W-;m!1>kp+;E z*Z8x+@tBzc9F7|kkKu?E#0Jfb+($XElA%>q;Z;`K_jpUVoBa8Dyz#>$^21~N^W#6| zXvtyZ;h*WUkfNDU#uetn1V=e|5}X9rITg;GaRNEB8td0ncre$bLU z_E`I7n)ZpmZz>-07$V!(chDtk(W<&tqpDE(2c>DU+q;qpf-~wjjrToJ0TIjp&b0no zgKE3GG8VHk_OHRWRbP~QbDU!9Th#rX3sJdjMwDwh+(Xd{)G?1l@^(jp)#VY# z?y%z5mV=>ZM1tp{5gWg#JUr((9sLGkR=mfp3J2dbki}m2X0)O|GG`J?r_8qEJLacm zoX9QN`>goB`Khw0(coXLl>D4TUD$fCb_Ty(Gz=oBSW!Of<*;?n3Xte8jh62zfB&ecSQO9aS=FI^lr4RiO@j2(iQ6 z@rp+pEr>D!JdS3(Kbsp7PGClLXNNqs%;JJLbQCj`5f?%H4=AI!D5Kw9qKsEEo2mTm z=0ZDNk)tDBHH#l`7;3UfTs+F*PW~h=*jdK9>K&X1F^Pn3#YYAut&l84@=&#Kgj)LT zd>M*&PSI5xt@Oyy&xOVwGiLO2<^Q!L+?!iWE(y6Aed#@8z{zGb>+y8S2QHkHlMLpp ztCfE4R3s|8#e`b~%_SzG0PmH&qZ76u1jiBLy@q!*!WPu=UdMX_?+sRQdka-qQ(F{Z z4}*UD?#4Vj4TCXKC9^DGVuv*PSN^6Ekc`z`S%VfY{+ZmBHp1QpgYF{?P9z-s!{s=AMj6&C0TWzGDp=K$oqK?`tv88>s!{ji;P zB)xO!=h)cPFuX%}f>PV4Pr`7CIygDX_y#bxnyZ}81ESNV1S;e`KF-G3JO z55;w??mSOXFgT`e$XbSOO)4I|5^Yy=+XAg@`-UJYb$+g9z)rX;YUS{#B$oVK?uzlT zIp-HHJI9c3dApfX+-?0wi_!<5<~-(U*7hYQ^6LHcLRKN8P@}vj#FC%PZGvss=QS&y zT5IrQ&Mc4yA_8ULjLt;>L(gLGGrvNUJlq>uUOWkzIusrdV<(&ywP9ynGdG?D5;%?s zJ1b@+oCTGH(@lTG=CG%2@K0Rk_>Q1$E750It#~lw{lZFgN8Frf#V$;<)}9g#E|{@q zdTdEw%w52lxu7@)$r5*QBXnuX3#a>{&H_!->WPWqV{0UM1CEEr&FsqZEunYuEF;KO z>(7mul{v{yo`$G!jYuO_%(5 z@g#UB#e)Aic0G_Td5$RVvbV$RH*Am2C*`lhxtakU_P%Q*c#U;o_tIw4V`0@g6?C`c zeA)4Ynz@%Cuz>!DjrD{3827p&62|?%wWtq!m||8F$F*WW1lnfpNw)&G1Cg@1UeCUC z+8OU$nvV{KJHNr1pHCC;>#iC9xL@%_$0{z&RE((v^@gO2gCH6!5K!@euK%(YAzmt``5OP-zvBdjw?i2^Xz*&CpJi*xrIc>bTic$hmRmlt9KH8vmOswTi?KRtmNCLkC!&X!3e6^ZCjxqbrv<>m@mw)X`Z=?NazBG=5jz%|oGSB!Xi@cKD z2fWV1tyV*>BsuGs(vx^_S)uOVhIZZSZ9E$*Z+fheCt)nLHWzX6Z$=FOH*SMYta%hki5a~9cjQ&4nwsp?3Kg848^e6iu`MGL^hQRzM2*9# zv}xSnS+dN2&nXd#vuTyc4)$6fj6`(SvHvv~Uqdi2%otLkQ8gp=B%{tiRI@*4Z(O_B zR;_f&)VhpXGsE!?SYSzZ6c2S6RzJ}%20YqtJDlJ&v>K7X)1Qz?Akpt_z6v(q`yB-x z!&tumMzB!wnqyc9BnAt_%7Y%NETm<#o?-3J4sV+%Z~~iycBQlnE|`P8c2zD&tx}jR zkS=+K)db77AYF1hPwoMbE*4$~T2a!O(1p${V9tpnvh(9xCAHuCz{e=lTRgzo8O(cT zH9uLyYDS!L(N2Wbl@l8wZT(Eown*$MXC@EKP`(e652>K;O2}mW=15l4Nz=a&{rhgi zNdF+vR+iF_r?ue)gwe{a4YEi2vhC)7q?U&V^7%mlz|EgDr(n)q6LZ$iK{#8(20^L zK<>&5!IwTqCfZ&pB=+1Q)9uhjI|UD2Bk{c>@xH&!I?8UsT`RiP1pn`c8;2oE~)r*cD@E@|I_*6M6%$E>qgDj_xES#%gFUU>Nl{? zpRcL@d@0i>$TWJst|H&ae3`663r(NNI(EKZy?Ug7m1Cw#U7BHL(AwbA1v6J8GnbvT z4R>TF?K&R3^@}xWU*%DgmMS^!vlAHgv+?*4r~Ay-C46G;7Ao%ZP7%FpCG!Kh{P7fcIk+h z{{#FVWNOuRR%QP)+GXbE7$s0&4jZhc_(D!nwHZW zHPIxg>WzlXmng!UoHV!x`aDs~0c0K0Vo+MbCT=Nl&312F3sITqr5$Y@yxFNJn1dB) zWNvy22wdw7rDL{1?FZvyN|{*dO~FGE54M-@KNNd!K4oIIdB^(?o4twtLyLE?@335! zU?MMG2}H`S6Dm%Z9Qtf#`WnW@AIQcp9UI@}$Ag{_^fDZn-)Pp1+Ixgo>(0u%m)a{igC+tm|- zEq<_#K(Nx<#e;KupC8-r2M7G%pdWaCaEPFzb=ZFavQ(H3@a)#^Vgm1c@GJ1k8QTrb z5GFC_&JG2=8#fUc00}#HX1?6!C)n%KhU8fnY;X;&ky?j;D8?-^aIZi4*9{b zALMYH9PZs%NI*}wcsF)7R`S5n;GI>3vDEeufuuqK zDBpVLE>SZ*?O)-SA-=1Dkf7NQTKu5H4+KSgyG=prMgv-@g0td^H0xl>QO4o{;c4HG zr}x6S1t1>hu7iF?&kqjy!LT3X&^MA4`a!WDl=?xy56b6LOsPKAxSs9_5$f`w0>-jiQB8CBI!c0hF08NkMC9r7vy6{l+&L z2-ryN%#UQguPGn(UBeDX3($*f;fi93`$^nDV%r73@24~n2eB>|QzYVyA%F{^08C3` z$%s28P|5c*^>Q3A8pGV60!$U)oYbvm6x|lBm~_dtqnrAEGiQj-M-rb`7P8$IEx>fi zg3(E(R;ezA-r4LQHD{V#Iy!kPCHSP8-+_hYbb!N%-Hj+nW)R_v+Wd_8h{KGj7?p!- zeH8f^UkvIDWje;!YWn*t#mj<+h8VtU#)1O4QgjKjN&`|D7*P^HD+3~6+nwWc9;ic- z>V0x27}<~f?h16UK1w6K_YwK1A9;f!4PlpmEOMnHKc$p^-!YQ?ONyMS$lnopY=fP} z)kF^N$Gh#9x1kO!e{Pwo);x{L!4tx35J|5U> zTlXj=nV^Ee$D=nL^xr%J=cYsa_O?oY)@-o`p=$dD6;nix)A|K|>z&q12s&DIZcahM zv%Rgw{Q8+B)l~`B)YeizMY)3B)=C1uzc=Xv->dxZw*S56*mr&CXsuN|GEV2FI-Y!b zO~B~OLm^@~jcV-{HuM&=O3mTeT&jlBGJ6{^l$+Vo0q(ENLd#CaWK$j}g{;yxZ`mT3 z*=Myb+2arvs(dCev}kenPtB1>$M8+x4!Z z$}>e7=o?f0ahWfh-<9XJe^F_v>*`QSuA23_YC6dd?ze8);bVnBx@3SPwuAG`)OC#a zqN?{w^RsLkF+gLg{<^6^cNmR{UY&`42a)q-Kl&X+<hd;>?j$1{99*2Q&+P8ArR) z*ji5XGrG0!`tnw#KZ>^I@Q+agHD+#~@}J8Uzt?{@^Zc;?T&Vb8_|LoAX8J zyTNb&C$1MncY!5PCGz!%My-cYqVfo2G)E-*+vjG(-~5!ql4&l@&;fp%w$|fU_$iWiy~gV09y9jC;Ra72*C3 z?^kQX4@j9mxCfUt#>P%gcF;CkGhrK@!N$;dq><+FDat!%<7TM7#-j8_VF<^(wQRqe zxSYmMbNIrB{y+h5ri~vtb!;1(uAz-lwegB0htoY9O*Yq*G-so5opkW`dB?rq7$)7> z$gEkN8*{)bXiFbJ3$1!*y<^^y^le;jKHo6T|J<$5ef5rcw>rO(uV`F^6v-?v5|l!l z$*p)kk0;=47<;E{Cu4NQjTYJb`a*(2<bzl+qJw*uq4>9Fs1R2e*=+g{ zu^#?LkrFVjjpv7bH!@t~@y3Bjuz&R=T?i7yyfKcXZKFvCo#UKLzvrilD0QG|JW6~6 zLldcYkIKd9J<|?nqsr%NA7`r(=Ld(7JILT~<7+%boI8ijd$+R@Pry07hQ?*zSl7F; zj9K!Z%6a0irFY|u%#*uOo%9J|yLV%;AI+w+u{86@D09apNoErfw;tYZS{-q=i)KbZ z_$3_vNltN!YpCfU0AjUmq!uzZ_|tPO0VM`9B|00`4(A57!7m^)lOqt8A$%{USJlwvWQwKk*~VB6h(HwB^a^tJ|J zC8FdTM4a00H?}FLcp{d!UzQ8gAJg}Y3d)z|xIXXAu-p5FBw#1TmvrHI1^UX*1hX)Y z6!Fwn6*aUd^AN*PUH1!llxgRggt!ZNam{g=GN<}w#@&s|l&I)!JZT|UL8#VIM1mXIed5-H|fZ1xMU<7^9=r~(}u;PB;}D7L;{V7V@bBXE7K25CNYDc zi|vNpiv+jN1Fu zNd?aMx=~UevauKC zDt3_A$56s#b*X+1_v5U1J-VirQ5Axj)O3Tu@N&ClI$%d z756p3GdS}wNz|#0Obmka0*(1Z=?x>Xc~$0(4QU=$Rr6}hTMch@ye&0vOM8-==@XEd zG=xkrxrMjJ3ASS%dXioG>Kh>Lce|~7b~s55LMohVo&{F25(oA9Gqk>s1fB7rns5;jcUClKsMZ@>i1SWLvG$!#Jri^gz%Xy} z-)|x!!*0ee1wrf|5>srXc%D8(V);$zJ?<&^)QZd#*JD05m zXACTB=8Uafb_#L`2b_i7b^)q^$*^Odfvj8o1>CvEx@Qk545$cdNug&_?9F5r>NP;a zCq3iw#LqJVzI@O#goty+1aOFHrdv?^6$HReyFtKFr&GH?c51Q*s^zeZaM;&n*~ak2 zBeUV_Gr+_(`c4Bwn6g3pS)HxgT;4dq1`^<|9DTr0Jl+)-X|zs6*|ree znvc2S@pku-e9mNMz|F74z2uJAfR}Ll@Ble)&gwlyYff5}nzjdv+~SO_OsoRshJLBx z_mgAYz63ccq&S-|{i)e3kF~)O{Tv%DnSqzkFQ>Q#!HfIy*A#H7I#jYKmCwxvUD-0F zU7OcO)^+>h&eywWa-w2Kta~^=?j+HqWf(aH2a(NkYw}&YDEQRs0(?)bK0df#4|`Wn z3_i66ovgj9k7t?mD?@Mc00A>jZ_h8KyMfW&RacFn?Ej^^>gmk?RR<4_>7d7(yRaN{ zbtsJR>bTR}4@ynZI;=D819yl*Zr*(mQ2RB7qpX$T%tuNxZ=aDL%6z#_^ zxY$jq6ek%l&-&^l%Xo%@W}51ZI*``N`|qU?gE?t)s~^<1bywrhSCPu;bOA2#E<-Bztg-dvKf_ zh2$NulP~8sZDNM1jGm`&1v_$&R#w7u?0z$Ihhm872j%pfm*IUjnDYvdU%$@wYVm|E zvA1ozt6)6w{lK*g6--0IeStKC5#;8x*}Q*k#QIjJV@Ou)v-#)>sc2N#yWVeiznBj> zXLh>{#$+}^M^PfF=(C|>K&x#Rt3yk!FDs)(F+9j-qVYaM1G#AyBW)vh!N{TF5Vt*G z3ShN8D3Dn4%Q9ImB`RKtaS4Q?f5K}d`1~3*cQwyzFNp^K(lpuai3E?bv-b|=g;7nw zt)z?(9*=O&yhSuADM7!dUMe!e0#!LZR zkgL3O{x$4+tqp$xY)qG>zO&@KvWS$wzHTn0U zfHU)PbTgXp3)j2ODCErS+VFVe*`~kR8BoD1ODyS&FM)JK33#vfuK?`Wz7AG^LlUUL zz|oq0OawVs&XD-CN?h@#-JQ-?^|j_=*5<;@E;U<<_m%1rG{l)R*@;_s3FwZ&=tZ*B z)~Wl3vMvo66K!NWsq4+V_tNF&6#m@f0MTsRi5v}Pa&fjNX@7$hU+TPv{KOwMi52nJ z#Hf^rAvYMuR7%|JZSoTzo}!W!@$j+4zLN!|->gPavFHo$5~8?o0HQBhV44aAxJ-Q z*j+&k6r#QovQzoE&;elBkbZxEeSi$8LYTjGMt4tZX~UI_N8Dwfu9h0?d}XK=R@qR& z2Xf+2rl-E5yFNLbx1wPCWJdskDI))>k{TQ2{dr zXNpASG8R10^dZp15K0#LlM=3nQf}Ou=OyqG-u^x+8R~>tlalapDF|mjd%R*No;7oE z^MV%e6W(hO$qDx{HHUT0X_}hr9as5wIy1JS217CQJ6C;@aZC1~&#Zy*2mT#{Iru0X zgj_<&N#^RZ5I$8_af>|~Um*tYIn@6lu$YHI*v=P$I@BdLX$ zZtcUU1uGPY$z~iq^+$vF+>Q@0d3)&F^#*GvuujYK5p(Q`gWSxqtbkjahbF{=hvHVX z+&yh5Yhm)tIaL}LC_>rhxGIgaTg^CxXv|HDCKWvjvm;cDPIRFi3>P=P-*$>ouXUP> zCl>5Cq!e%ZeFgPvB zk>Qz-U2rCtsLd$ zQtT6B-ABiXI$N>}y1Nha#zs+68mon=8OAO&KF6@#jUlNk7aLAnt}7aPghV9v&mW|^@{-rzA6ndP=}1bA!%w^jgM&ysY}xzBY*w4w8^j-Lyo-0OO!vuP zj}`9^yF2m7NxB{q8jrlAY-o~xeT3p?pZwrY zLAH2a7MSHU)rH*&$%FD=cjD_iauI5y$T@}Cnm=J~Zet;2c82=q?pB;p6yfuJ>!H_o z5JX;yRQ+NbMft9fK4vlI51Kvi7uxS49p>ZE=he~~!3S+YSXmarLcBR9E(>5{=bZ;j z`3<+MosiS?{@^d-%5waeEGBMbP&JCOFFAJgvRNQnA2456mSql+Gy4J;hBJoHd+zO` zBO~tpwjkt3qVyx~y>)tN6}Sakzemk-j^MV2h;_`#x>ek&^qC`d+*dfYyyB?BDHo`; zD!*e6*zMF!OL+lRSMkML!w<>ZwW>(ouBGI*S7`*=3(d0`jdml@XxBJ7?HUKST?62> zPdCps=2`o&+inCMW+#@S2_K++%K0J(6eq|w#W*~WhmzyHGYj4nz0vHavj0h{_mRrl z+I}0UI@*_-N;aDc8_cuD!)e#pgz*w}xpl$oD3!T4ZUKOBh4Eyk9>nS&(^+dd!JNI` zxJ^l{Wq*+0^(Q;^Ha1M=hjaud5wp^Rl?*fo#MbK`sES(V9COrB!$m;>@-^<4B`dFGousS8Rm zlR>ur?isx*@69wcukK5$(z(kCF}FBDyTx>O59fKG_>3XZYxrLCUf-wiovn9i*qOhS z@JhlfS^f(eP;Z;xpr>o~bgiBm_0*`R<$7AKr)E7hlXDf}D#EpdYmp_d*UUK^P2jd~ z^xsV2woBETZzgctZ}8tt;I{v}|7HTWU6x6tG=baxAO4#OV5{!rmFhPt1WC1TR@kx8 z1iXIRe>DNG-|=4+#8P*O7Ko*OuE56QzfmNPyA`&3QDLm&S$hCc&l+GViWy?Bjn!N5 z!18G_v4KWd6%w?qfm!?Q(2ThIWybo&*uY!70++Gi@0Wk-?9}Ws?uqy6T0{%1p~T}UR#_}K*J!G^4oXsF+cIddX zRZnAA;yw%bhJM_7H=lYo{+QpK2Nv@qIYTI2ay3hiy`dCTRopbW+Q>xE+JuKMca+)z%_e;VFNWd^)?cS#SzsDqll&U+klOaD{p z9Xz=kJLnKcVVe{LcQ(N+-g(eqvg<-dT&3+8MVZN7ei>Y%GYj*OLXJ;fKb65o@FiDu z&kv@@F)Eo>1&qgGf!*$iz6NgPBtD0@`brm)1Rf|)V$X_(==q*XlLpN;<*UA zOZ{`dF}isfR|I|DC2Of%rKj?r^Knyovt7@$bJ%ZSvB_bE z#UI=MYG6t6>3`jHtS1fr*!G6Fh9cDcKODpdKKzbByk@l;eM1h5#cS0^XS<$hY9RrG zcqNge2hShIcUBG+pP9jQW(FI_OkTCA_mG)9W3YPoI#X}4>YZrn-KuBmE%obtEnxJJ zw8~!op_WqOkffQ{e2_M#zGcMOzCV^_h&RfYVcu1K9XyzE^n}e4J}I{LXKjWu{Dyy@ z)yUx}Je~*wJ9YaOO~>niEPg3;Y!@T^!n0yEE zC}vE)2aeczk7wQCOoz37r#l?0{is=uectmY({I+evH4c8tXTM>((~wKJoS*85_2sK zJt-OYqr*tSmd}Z~T;Nl27e}3hoK^q?Pa3;Y_?MD$Y#?U*=iK|Ga}DU%|#Pz zzxR+ypQtGe7{cV*O)Z>y_@{BOEYQfvuPDU+6JXPhe(8l-xsF!>ii7N znGB!KW>5qfT9hH+XZW+p@F_n-Mw`Zhd+gu# z_p8>Iv>e<+Sh3=Et9V3z#kuB?bnvbL>;DS@V0I0=?KPnNYXbb2^AE3oO%?wk{(*>V zYWT0^zmER~{+szLqc}QmkA>MJ>9`BTs* z&VGc#Z1?_c1o7Y-)`ky=rpwS)qB{Z@^OfhuDvpB6P>AhU@(P2ZB#@VAUMF%wn6T;( zJxy-Ro@b)Z)IH-!f-sd>;yjL>r?mhlUloT$~M21 zCqv-I(o}2CGwJjhcTvjn6H~1w3A^#}O;(Kr^A%XF)X=>(F|15WQpY1gDq7A~?W^xk^!Jk-fbTZvQ9V5ySI|R1d z8>#4pFUa&GHObaXcm`fOUmj~h%%S&SeN8^qzGffHg#_(u_-;0gEP)p!!2mWT~ zy;NAjgHX0NP+9=D@hd$Os0`?pUlYj%H)oFAMarM zY@Rzzo;$|o=`d;bd*AZY!v5StdZBvnDnGrRsY7U=I%%hiwNMsWZ&aoV2m^+#WJ+rLI@4%yAfnI3-uQjo z`SOdv3k3<1DSz4oepXKyS8;@}&FF63GmKEueΠ|LacluCr6q{xz0&OD)Zyk^8-S zPcS?~FFXUpfAoiV?)@Pjp7QR7jOF1u@1B$Ou&^QS)-R0*pKqM%M(+wc`FOINzchca zhX!)ASQm5O(!~JX>2V{+iD$-izV|2k5(kN}41Q==a2vAk1Zd!;V~2e37KevU0(Ek1 zH_STV*lt*7gYCP?%ZC0Cb>w&xHtF>5kYD@uxI2F-`bpHgFTt^XflmHDm~eNA^7;h6 zy7fI#!BsxybngaO6z@c$Ilbq)`X9UNj5~`o`%DEg ze^_&Tjzzy|t-sBj=B>F$@HO-SgJ%J`33#)zfjTX)Cz%3b^0<~{%{>A^fumq;{kKq{ zpqjr^%^P$SaxXwWv@VqGF0%Q6yD(r^AfXvJ91HGSUT2ie=A77gTzt;wy{1BrM4js4 zp{SGiX2fawp&dNGxbfsHhKM`deS+uJYCjp`y7?T~ybmn#%`BGqA^w?J9Q(XaecZ1P z?4XHA_Dq~4O22)o7Vfuv1(eoZ!ga^=7a;GS z+68v>PZNu(IiPdcbG`K`j@G>2uYmIRHr|*$X48H(27k(lT2{@8OHF(fnQ#vpPbP=$ zilb+_Gx|{Lb(g+{f&_`9ZTo44S7sw-MU zdK+Sn)K&VdEJiS4(t6jNNCSeeE&KGWbf{`|S!Kw&r57QeIhmKb^Rxz*B1P2U-}md@ zJPucEuF49&cI`9j5(az-dJuXpE(8ofD^=1@K{BzpxbXvaFP)>^92Sk5=qMA-+M$KW zm1OlC@*7gcG;6wybP6vBqvuNJ18i4YD9v^Y_I)a|UHNJrT;IT9r|!2nr0=QLW?{-? zbLj-he_6`~zXgrPGslcJp=iD>v4%@uL?CYv=4 z)NxGP&5++z!|O5OvhTxX-6(saoUV^TKH8cB6bD1HS=*we2s+pJPpq%TaN^wwRPSsh~d%W=);SdK@u9A9;e zF}q*8|FKRVb#J=&q(1tQXDjk`MRpVUsQlm9&^TKF8ttY4&F!Da(m2J?QJYkK>h^$k z6(@QdN7qvMf@p9SZ$d}A(CBCva(>~%()LZv#f0S(5OkOb_ZwQN2&6=xN%La>rO%Hxuwp8Do?-`c~^5j!%-hWvp=n%cTQM5%O z^;M)-H)uq9abM0~Z?r`L1T!FlxWGx^^k+EFiS~htyQpPb*p24kAfJTfVl5*ep0iz$ zwBjqayzmvB>HWYGgxpZ2roa|o$Ez5SJ57XR5_|PThTTgH=aoMzJyb-_4|rFaoG@KC z*kty?nVj48MgfKapFt$l#u$HxcG91bcxt2O1Kr>9xO;cRQ3ukUu(Mqd|GLDL_jJdn`P|iOq|DwYMIcKcO znb2aj{Y=A~?&0p*LMyeI7e+-*=D5xt_`A(64TZ6BwEzxA{#3rIqu!L`GNWy}?U-JA z!>lUl6)`yz`JPFu$@v3c%;bztzFUh>iGQyL;Zie}-o4qx8d)T3MriRZA_uQC#6@DM z5W>wYJ1C1S# zGc|748xWuZy^kx+sGQF-I5}SBNX`x=D#+&48##^m<4}>Iv$q_Bmv8v+aw(YaPLt9# zRmEIsC^%#)Bh}~zn#%ev003!d%z4plG#~e=mckgpg9W!4YRtW z=z*h16sG_?4J41X{MV3`i?!LRffSt+py;X%6hO8~r5tSY8#PY`RY%Ye;^=#WSa>|R9BBWy4xNF1bpzl+p} z$va4)VrQWN)L9&!vo7CSUqnfk{L^N>@nv>1mKIN18=fau)SVFB@F&_US{?>HmdY=b z7g0G_1FzwnHb3b^TZpbgBPgHvjUw=&c%h8CRL-*5l&q=d8%u9m2cd4_+JVZC7JOD7 z^{Q3NHL^SMZLV%^LSfMW3$jYiG!m#bOn5bBeY|hzL^-hM_@6tMVw1aGwTUC+?t~j% z`l@wtA7!IZu=wscM13mSqCpS5T5Z+F?|qA4DnSe~$HN<+l!0+SwV#u{d?7gy?Z(4E zyr7&kP(2WI{?vGv!**oiS=x)4KGPxZH0Y{KJa}bs4e^3EFSk_Nb#Jk-cs#gfv(_~P$HYYC z|5xSry06q76JW36vFFKkwp;M&k7o8*pxaPosJoz(a~Sj;sk&F^X;#~v0FKn&Y_{Lc z#2_)|)N1TOTofzKO{xR38LBG0jc(GolFMn1Sn`*#5ynN9o!f-`mX>HxuMPiZjR_BeG#dYH_YmHo4ZaETs+e&_&WZ{axN8GnJB~r`T zm<){#RL&TW71y5|{}m+#(-BU+y*-S$tDEO`RrF@zYce?2w7H1KGu2dq+$jCIfnJW)wU8jQyk5KD%-sjHV#kXI{v-U zObKWx6bnAP{9>lS7C}#EaMwl{=bOrU4saY}{f0xl*YJ;n&TuH^RM*9vvzEr3&o}U`mUK%=SI4)d z{41;_O>@loS_|dHoPXDD5q853VQ0NaPaYe2yjhRBCyN99^^%F}WtCnsr~Tp8dXc7_ zzO2=Y)(>Gl0EDMP^Hju>wl@HUr(*LojVJjHQ7NT_3CH+YB&g&KaN$}* z{|gW!xBt9;t(;HzP^mP469LYEt#YNS@=Flbri2>5gsSWpU5usd`V9UF!!Y9Gw>Mm) za&+SpW2ucG=I$VO9Z#dwd2~M-1n=%R0-1^dZjql;*i>RCUqBs$xE;7kP3|fmT=8Yr zRdNT$NH2{`1ozn1f&qE*naA~^`PZId6enY;@LOTG%1-5PN(B2>6d=o5_8}F76VMoR zXKLClG>4(b@NaP3t3Qe7A$nPj>=31#_m8oc0{C)c$?klp5(7a9E{c@7U4 zPZlrXz^~YLuLH3hZam5A99QqaU~#0Gnm-O(+QLDBp<&D3uqzIY`0ie34;)2z*xK+D zeMUl(s%OW?z^u#H#kXO{d`0|2I^>hb(pvAj)sn>x>-LY->;{MC z{c*%`pJW`6=)D#T?n0t>C=vXBw7m;pRMoZrpFjeU*9l5AUah1WYc#0w9Sq70WN=0% z5EV355PTul7nKCCDg-AOrpFOlwbIsBdT$@JwUyRZz-IzTA}BAbqP2?elSTz=E5XTFH5|w+dymIy5!Zw27MB4%$@#5!^({s|iL8k+ zfmrPS6(bUH-)BU^m?lJOJVs=VKO%Fdq(`K{ACW6f&~s))AfPsR2O+JMK|t<^O!G%% z@*gxJ!m}^i?8R5T)GnDGwuY^@n&73U`V*~C9HgDKszf|OhZF> zTbJ1Snyqurmj$Lj|K4>sOCjQMW3r`v8O5!ga-gv?XnMc<2$l=-SlO*u;J|K0nws6q4%;5vn9o_xffM`-iFBucHyxhTk>>dT zw?9A!e);&f%^(wS9216Esu8*R)7`_PG^5@-^qZmzXi%6t()(^evrJkEG|Nj~EVz>y zi~F3hX4~RDSd8H82F5DvT068_x2y$y#@hHdBC&Id3cAj><0xbhh*oyX5Q$y=1G)B( z*8I!4<7ZkGm1t%`RdN8zkW?@X74Amsk07W2MSlMaQ{CiVpixG@-Z=K}@V=Zz)BAqe zUB(Hs`dd07G04Qj`ZE$1l|~YyPNKV!IEookvD@MuL%$eO1om7G4Sg4hOEm-kQ1Ay` z-=l%&CPpN*d?_|u`9&15s?g46??ZS*GM*rNzREo|o3F;-=JUJ9Nw)DyPq+EvS@Ye6hXbUsbPs>3l%UOlMGPbtZYiooB2B} z_LJvnvF~p^MOjHKov0T4I$597)PTQ`-qae>hJr67*|kDP0)Bdv{{c2b7oReSMCW(r$H|Cb7K~Y@pi4vM54MQ7?vn7Rj0q zxylFy03VoGe4HEmiOr#w4;qgEVRma5W;u0npS4|1e|K7MqA}g~2-vH&Y3O*DhG9itPC?Xb+eD>%3)Pxh4u!)ZSJW{iv>F?bQ=O#{lj@m#1 zABHPGG|T)2fXh3SLe7=H{K{??rYTCgNx=N`-WQXpfI&V?1xrqlTexYDbfHm*rmsGf zGCOG~1%a-Q#D{St*b7M4{YF5VXzH~i?E|m=ji~>pTfjhV0U3B@p*k%qQ@HbRQM!-=&QS)PJ@nF@^fSJzKkeOZiSp zH~KEXmN{J<)ap$>Kw&y3a<}Bbo+p=IcPNC2V&Ft)+mb?!Tn_Rk?^|LNTE zciLZ9`A?2D%6QAuRdcYyEC8?^mr`)yNwWRSS|{1Wodj%GAF#6&9@ZA?%twRwKT-%= zx-O7%o4xVcZ9v(GRSn8M4Ao_WvS&E+S=)yE4KMrnwE&u(uYFj^nPp37hKL6bvuA`Uir zN3%n94`FrMQTa?GqIEVadm|9i%h?DPu5TtNXD^uYD=o8c+>}~o_I{oU!xm&{;8Kpt zLWOD)_b?A#MtDTAtLhfpO?1cnh#E8m%3(0ioCQM$|pl zkfnyb!Z3M*(E{ys?s!+j0ySC5?(;%;+ox%=azRSBfelbxXBhnvw%_VV<#VuBxVOMr zjL1%QWmv0oQGbB78tol@qZuo+UvtI_kjZMm(7uU+(GY9ngiXr4Iu3Hev1ESJvBGx{ zn>cU~lJ7nn*ew$0BmY*u4u#G?yO#@ZRM=GB5M^i6(>BP?r*bVh=>2n|I50FM*!&n< zP@^~W2qqjA?PP0PPMple{;UJ&d#|ykSMsrpy^(7{s2kL zbYdj1$@?B(-AAV8JjNIK9#Xzcn(Gz%RZ~9eKQ-lNGKQHc|39DazVbS<&wTey&-d_u zJKqq|xl7ugZ-WQD3D@ptzVUd*@gx&%zWR?7x{MO&i8E!{i^wH zUat8*mod!Dcfd$_kQj0p!rs{y9GTbj4XyP6CvQf?7S+9J-g3coQ8D=fHqyEP zH-cd}($y!DY>!;q{yCnxa?ZnpZeF~rh>@?%E7$+y8pwqa}oBgG;eD?zZIY8#E>MCK03aKlfgc&I3L$gN)&Lcs-? zaOvOuofY-!*SfjO`S79Pn!j=O`!^BnXG0j+x#H?6c=nr0!|pl0tpkr;Z1Bn9b1yJ- z=@##k4-0UKfMZ5{cw=uk4%>;>oK_>}$}&${J`-C8IdAMS$zX{SgZdgv!^GhRo-r}# zzJv?gjI&iiahWwA*!2_1MFif@?<)<5eSfWf=^rvQfiXV84s{RPNGs~~-eBRZFndg2Y#-DCPHe+_bO2s4SW;}s={j6v~~Nv_V2Y9 zQxdcPefxU;t(k{u-!Zm*yLz>6&;Pi6XQ$hT|1s;whJg!2-JxtN@~b5O$Pw`J8<91? z{f+r|96sy7gm_m8@A-RVwGI5u01V9KEkT=qyEht7T*8)ny0l|TQb!moOt(X}{Lp~* zc-8^b4}NI=RNl;-hh_4=PkKoM@d=ZXxZNCap+FJ(iS7h*j*3Lu--8t8{PHTv40^^V z3g%!U1XUQ~tfODhEhxv16mYcOGNhVfy>kTP6L8CGK9^HF$1W4_+E%3Jfq-OMj_oyl zU2^Qxn01C^bGT*$J8dTxuukhsR&unmEfVvJBC(#~b8DmGS3A8C-2%-)bIjDIhH%&x zsD{zu_yu%~QDQPg0aroiOL!DOZjLuJt|;9vte*TAVAUJ;W8vD@7|sl?)tlhAm%e5h zU;&vduXHY2vIvRrVXu6Ng`yQ+hdI1K?T184{Vtc{IniM#h3*4_3q$IvY!4rzV zB8LSJvolL3>)dfCvm&upV$gzc`x|||zg(f6cL&6^0HagaIlx-sDX%VmA~xDVcTl;p zyGFLuOjdVjBz|FWlqPaFT%_jZrb`eWIW2!usfi1UGxEJKm;1&m!trx=bB89J)VYj9 znA6b#;l%l+;aD4PP1~DZ>7H)^=ZkR7(*O>d&8Rf%NU-Y?00-;S#^Y5i`rH3)c`{Xn z6}b@k&nu0`EQMi<#w#OnGIC9692ppfZ;^O?aa|%n{p+az6{QE1VGRuB{Y*a%4Mc{N zuGpq>_{YP4sCj>v(aPXc4tTFCFVnSmzBLImj(ev6SJVB?{~yzRnIuKsnjY?)u3fRAB*b@~mBtNCqb ze4qJ>`CX>@{g#>EO^wIf694=1!Zn-rnct)SSM%Ei?e5R-bZu8N)050;**fpc`!vG^ zm)jZEdhX@FpbAG@DW1rf4T14k(HpZ+zs2byU6P>n4vW?q87$4suCNbI%WMz^_s?UB zqlwE|(RHk7?IZi-NUi8Ld&=V6Vb07E2MgJ0Mfbts69@w~1*6D(; z9E&E!gl$HXLYqPME+AS4{X^CaF?Rl00VnHww?rka zKh*r!G5=CbW{DrCLjT+H{PjIKdws(pSYeGhx4^J9PCTErQ%LnPMeS%$aFOt6O&^J64BigiK5?HxXMtVpZt-WDaQ z{;6+LajAK2ZV(p+;t+>ls@>esST?>d0xF|+Emas%Tl*nCZ+cGqSNSx$=S{eWuIBtR z^p|*sD$z9l9b3m3QpDA0S*yPdr-%(1Kb;G-=}8crI;+?H(fcpWq>WEiV~{% z3xpHnOT7}9C`LF#w{|_u7#KTXBzXX=CFH;HY>IA84Wc>kHDkvqFFl%S29&^ZC?b{n@kgYi&V=d(;%VS zM5ZlzqX6B2)1gbRgfzoEXF?J03;cJmf-yAYTtx(%5Vl>iQ`X74pbV(e+@z;v$;R}w zWcu#|wclCmg5;|^Jc^n9f6?Izzr*+7??LOjpSf699HwEBj7ZjBxrSlP?$+vO(%njd zo4ubh5V@i>dd1La<@1p%)<$c#Hr-NpMMFXKibz3S<))OMSPAo^R|JbBoxH+Y4E&0q z@D-6kk;+Zh$!yahwQ=bBwYBkc(fA!x6h9YEemyrWN`R1H^N` zNypxTcHd({g9&*flREnEBwUYQPf%S2bdk2T#ALo?Rq;M^mzDHBo3zYNO1Wi}d*2Xg znWA6)B&nB1M_ymnbhP$f6&i^baL7c#63Bmou9ne42^4p|#XDn;LNeYE>e!91FV!3c zxTx~9Arty@Lb(7rKbl@xN3mF{$^-}T>v!|#VHdLQ^?rGp&Ut7idzf7`^W4pIaX6ts ztVMKiGfNX3@Q%2j0nc>!T~%F7U6@18a*ENabcQCHDL=bTn{)kzAk@Tdv%;|_B%bci zK@%VDBBe%DgM=K~dyDrqL|CE)aPHqi48_B2#`aEW?8onkO$Wd+;NE&&lcz)h@dJtR zUplLX&WL?F1uv?xPfu(dpp6Tr;vKXgNOV|s3QGC(L^-swDI|N@_L&B6Xb8>B>!0mrOVPlrSFq**yOUDpLcrlvm|4QeS*w`1L5hR6v~dB*v|kaqbVkIPZzLBp zg$t)g)|`;n$bU2Jd4Hq^Q}Y(>*NKaOuC-AYXH~HtV~_?ZId?0qaoUstJ z`G{+Drl%tsjJW@j`t(euA#3bf){!b}Qk+G9Bp@~TOiU#W%$k_2tcwU$;5wABMwGTn@>Uwo6wodeht~DWpJGoRh@xmptNmRqkUKk1kzuK+HD|b1_`%`a)Za6~0>f$bDKMOD-Ygh8Ew>w({VGsA z&*cB_L9uc_pg8AX;Sv~v4Doqj8&sNr^AA|L|9jwksonOo7jU-u!2(X9NcI5T1^R0R z8y&cW$P*bb9PvD4bK?GT(J(2OQrzr{j~xe z1tuq?1MgAbfeQS4Ch+yOTr8^lXuTH{h`-`5xIEN1gdKBS>ekWKnH+U0<}?-aUzxya z1y(EYE&_dPp`#QSRN&2-ltUC)tH3Ljl7G!N7w0v?Z%2x{@{cUcTZcV>x$9=+m;$a9 z<0o#u3D)&uGit^RQt%*x^?gcPj&eBVk)HxjR^tIky!dO2Ul&)-xoMUuw9x~@}q zy>Ta;`6k$0T^F=B(yb-fURk$L7t`r-Af;YMu)h7O-7y7-1}J^ui%^X}4DRW{S9Yz7 zKi%LUeL1VXXZllHvt7Bj&xo6EOtEc$%;jkaUV?E~0a(2l|N6k7d~n-C35?R=at^>W z-Tl=2?9MN_hyke%R9w|5AHW2b2dZa&2R9QCeIqc|dH${E-!>WiBC;E&I6&XB$l++brC%UoLJv7AKIRk-x zE;fha>^`4D3UER?UmB=oA}q^m22jGwMWkPwjv&>bkppU7XcwBMD3^Qt+HZTL7LAZ9 zSs5*kI9N?mjgDPFYKEV&EEIAo~?4{~8*r=qI4oDZkqiete1IPMlMbtZbw zs_$w5Yn~$o<7)fmwHO&3!U)*BKY)T)Ii8}wNgJDdL2ZD5|=3kxu605kyzGYWN#f;(;yXBi%X%0K!@F#U-Ak^kd42w?Qcr~sho__Sw(V$>7Sl1K;^8bhgOs9t6s{Kd5!sm)rf8G67m9WZZ~r1S0E*hM za0T_?x7}YD>Ggon*IZg`eRXi#lN5e*;=w_NcktJ>VzJ`#2AMa2TtI1zJgbbBkDSSR zPr+lDD<2?@w|MP`<>g_$X{-tNn_Pz8SX@wU*efvnEy~*7kA|zRJB=2W_rYYby}*$d zBbZAy*sNDtFydhMTF%&JmjOm;@G<9y#acwY4jo9fgN2+lPO+25;dMCi>teK+;8 zx~00lN$v^3<&J18B+*v&u4m44qa5~}cS@JonI-`EFE|w(?Q5SNe=MX~qTmepe0;aM z?c82Faa}OJsDgq)pp6EV2IGkld}~EaF~YZ;BQbg0#qA6p(V{iUImx*H{rDcfVrSOlR z_sa{eIk->%=11EYL2vsPJ;~Mj1WjgazI=G_h4y7qXW*XfF-4Vql5J9B%yTz$P&u*F zTes`Wr2O$$yXOq5Jf|SphC0c_-R@@ZH-5yKuH*gGe-F453%u|7A7iw$jEo6ea=Wg% zLoIH;2oJ=K$9>;RI%KeXu^`k7(@0&fE&I7@u#PuufCkjRGcmTl+6U)n!-<8v0V@vB z819ZSMQU;?PV|q)50x5pc>H1c@n;&Ai*V%d=r>60J@z@nqE9m{tjoVZXT#l~SWWC~ZJdYZsd#CY}W2K~qq~g*lh}8xcbo%Jy^d|ANgxMz2hV8Y?q(hlo1o4`0AaQb?I+R* zMIoEbo>BoR+=sAwReIEo$=^E;qF_ztB}-Z!Da>0%YW$RDj#M*$^jQx= z9Y(J{myI}=oJ-w~uK6TV;~P{wM+&y_lcdN36+){Er=KaQ8m*}?zhzMs7J|moeX?$| zhA~<4*`NJ}Ov-IY_g)$@N(~vBYDhzFL)yu?zXtwV;WSVxLeN%L6gyq*m=$j+Zzfc| zYA|DMjK7+9Qcbet+o_hEm)nxd)RNI^N$aQnko0QF8DG(o;kG5+7jl(@zzgH>PUjNj zj~5SKiXAg!#IEn0*zVldwir=_8kNJyz5b{D_pn3ZLNgY7YVRGjfs-N>-a6I2h3hr8 z8RCrf0Db%<@Q_|2z3!{Cqf&VAyJ?ze7OGA@S@b3bJ#u@nlR_Cy-%B;~OzSbKE{;42 z9iV^1X};4kQ&7zndrphYqH#Rtw4SbAD>2F{ma%2rHAcrqAZpP+C@D6e@S=m84MzzL z`r{B)%*@m&7e~9k9?OZAzl*4R2Y5`c9uq{ZUs|jS7mb5+n%>QE zBj14VsZ6f9ljyo@uD(oOa=+HmSFaso3lyVvOs7)lNnW}Sm&=1wwdz1whG=T=zBt@Z zMrkrdR%xeRK^fs1`apSo^wBY9f8`yj^3XmaN0_$;Z{>aqyX?9~TUZH&@sTPl#uyfL z59?jXwWg3c$U%;fBBB)Y3@H?j-BMb>4S%Lzw-f_|W6QklW%!)gu1##K7O(Tj#D48> z6giKaQ!ud8zVz{5`VZ`cl09|dUX4%Vtc82kuf_(RG^sHlZen_vTeD|29uzN%*Q3a& z=7rtEy1oN|WHr)^sp+oh2PL?e1j^qKuOFuGNalNxzDH)h3-o;~-x+WO`GE%Ux2XFI zoR)AJ2=Ab$1_)QN0<(eefIu1u!EplT4>Z8H@Kd$DaqhxTH71Qy z7JjPg8-qd?;V7U*lY;SLcWhZOjxT!C9>AeH5lxJTP&KR4SoP>2<|>*cSh*fpamSYl zyaI7~ra!Ffn>j%SqRz<)GC+4KLBV8efQjWXokv|_iFQ(7%blwlw|FzGIlims)x+nX5_U#20Pqhupk($Go{AhwC*uO)am_f9q`jJmXj~FUBzJq&2xHK z>Y}{BD2|nP4aB*M1(IeeEiG8>w;-SvT<^DFxj|aKT-Cl_mt981t@ckkdKF5?RxL#set#fQ(F+Ypv zF^fEx&a}^E4feS_Y@aKt?Q>;?eXcI$8FmZ%zwU2`ArK2{0K2*7nZ3|?c*05j;qS+_ ze_V^$2!E$l!V=+6)^dBZC+rNt7s3o0v!jty%A-xgm2U7GloOx2vLp1TL8O3#S!r^S zW%W{kFtq4xE-wV>F1OYd^f4_l^drsJVf3FFwX>x#X@5xg28<4A{jL5{;IG*MEv zyC09bt==k*ZR1n#k564@2FaXLY$b=M@+WWQq;DA|-cM;90*p~^M?lM`#88G_rFbf&V2WZ!vkz=&V2ppDdZ3Ti8Ds8#&RN zx{~VKsF`70Df&k;{NQR@i{#`l)WaE0b#B&Y$x6o6WF)gP@+$9>#kuL$kj|&iL%ZW4 zx>83%rQc2ZK(b`#uQ~6-wUU@zgn~Eh3^|v)9datRu@o*XdG~lI=#X>qU+@*O+rOg( zdHz2>b}rpb&_9%8H#t5b^~Ik!PlY~Fh)2i=3i*(b&-d)`GeVMYfJq50CFhs9VOp(E zg}Br`H#oG6q^BQezFh5AyD%L8f@S+hcecM}L>i)D-tk*vP z5M4RNdIvXCSyTI8tgsoe+wRY6ydX7VS|4R`)GTZ{VoxcnI8~N`w)$kr{kaAG2twQq z4GU@ah&so$sW}^*MKYbiuH`oxKY64FA70R6+PsT zjeO(b`WZ^<#(tz)*IlB_z4O&l$+WYURQ89*`UwS)pX*`Hnoj^R4Z9eiDcx$w@uL;Q zaD7NIPbW)e+1hpxHxz3dJjI2UCQH6asNpAtSR0v~iVewOr(%m(wyzUOfgQXT zp2a(z=yWRDlO>%t@eU>an`snHYiL?Z;Zsm(K;I)kI=&hVj4jiL-iapQ;=(Y6B};Cn z6cX1hQ@OOhj&vP<8hF=H8lj`VcdTi{`eexkN^V+Dx35~J(%0*U5~uOU$c*~fnW{Te zI=qjE!DDxNJ%9T$>8#oT_Eyo}-)!icgd$t$an}JuRUyoYD>zZPVT%}5))@yEH$3lL zvR&10q_lSL7v$4crCWNi$$6_RJP_gD^uUy5L`T=JQSZ%7Thpy z1R6S*R*8(kea5(IcC4m>EzdOdkHqG4VpWL~$)aG#IKx8)WAjhVYbwckb;9aHDpz$Yc z60RBTIH#Dg>2*T-Q$M;0yo$$k_QH~DyM$wWcnc?{vB$ExLoR*?G7+-y)BbZ#65*H_ zHkqM?SWB6K?T27QA*a*($7+smi$f3ueSzMc&d~AFc*Zw-hFqwPhqiWqZNEg`p#OwJ zCq3Xb^db&dLioZA$=}~?-*-bx(#S>mQgd}=iJ0Iz_XFS8|UydcOE|`d6P~4 z9ha%h37Map!dw7HF;zLK%tdg7Rx4EUhZ*rYQ=qvMj)-~_0hZutBkp2wh?rs`CUY_M zWE(DbZG?vvo(`0WhHXcxTxp?mfn{{Dxet!Fy2a)yID^h}ruY>`2q<7L%?V!T2WL|Z z0K|iSu%Q?-YD4$>!EL#P%s0V!EBB&RhH!@n8e`rpk(eSyDEI=u1mh5J^TYO}N5nSp zJX_1_7Ph>KvD_|PF@7~a7p&mtl4bl{v6!FViUjBRut;#8kBJ1&J{Zb4$^7UY2vm16}V7=w`T%76u3!&*Aw_`aE-`5WUUV=T$sU|tZ*yY{E-4TD=?5LVUYs= zqQJ^b%2^70L4gNnQeL9Kmlc?w2^^=uR~7jF3F(SXQs7<%zNA3=PqF|VRUnt16?O}M zx83I~#Ma3IW-mw1*OgB(hkwvOvMj^RWl9Dtxe4yV0XN74XwMDHMIKn`pXl^_Y8!c7 zd4sd)Mc_)P!mA{RQ<)pW?jcxkQru$05^w=KHbp?vU{zq4f}2$sic?@6UmuF;zLG$o z4s!w15(E$M@kTrDpx%ucNm4c9doT_7{A|c;LX-PcE*de8XfgX;^QJ5qZ-TX8_=%GLU*;`UMz#TDXT={~}FMBFJFtmF{7Oncy~N^BjY zyACy+rJXqomwny>Tz)TbSup}|NlmMbeCJIfS18imH?mWak16s7McNz>D{zSdFWxuD z4-|QyB1b7Q>!08glJeH_QaRZuL-9BE{&c`nM#c#Is?x~&QHb%ptHN%<(GvVKKyJZ+ zk%ggTQQmx5-oc}I@qYVAL0;^vDqeAeEmG52P*}+q@mIzR@pJn}rZ+LTSZNO;txwaM zkpU5y^r)E*Gk2ah&xM5qV8@5}HXCI_h zYS-XLO%Wvz>yd*w<3BN=EiH_k@Ne7juPrn={;Kr=BZX~)IOj=+U z=z|;!#V32g)#BvYx_7f#P2LeY`E6WuK|vw(K!_A9!ZPO^@%Pc1&zzQ< zt<8A^&fg7n!mgcx$LXeXUdZn;jrIJFXdJ@tl&1RNpYpqkDPu;6 z3xCFNR_CU8vB`L9K$!c1{^*B#= z&#iTFwEalOqgvO92X=Uit3FecAlf``?Z7x|>+G-Zx1t&GQ>92gslHG5p_IOvV%BAf z3EE;9T{CVG59%Kv{IU}8k_^=8(?s1VE(u_DP?Qgv#dh+k6R0L{_~)A zWR5ee;>GNM)Z4Q5*QqH$@z*>Et&ekJKI5`6?6$9amhXJ$#6Ib7>kq+*3oy|fQ}cZ%ZR6LL@>j-R1(IFkiI!)zxS9^oMe}HRRdt_p zpO&9IRCkM=n9NYtG;2h=ey0)bu}7?9D|G6mRH$_04t3bmB>DGL`pN2jRE5NEf!c3X zp|pzDP*_b*<5}i>D1HWhC(ttPY5He3)7wi~^Qs>(5FG0DM={N~E__xwQ zW||vvt~U#06gBMd?a$u3DrM{qfPwHn4p;s|?xBQefdlYSUN3nR$??U z9kK-vRF!!A!Y81+4TlzT9z_lu9|8Wjn;l^e8g&!W(ciF+QW7I}vwn({v6o87GJ7 znfx84%CVL4zi?(@As|9NH1B->3&#-T&&WypJyh9`b?PV$`sQC3=+?14cvld)08Yeo zZ#?@c-W6QGH#wda&_%xVs~ju5{Cy4oV;5`q7tv)OKSj%nE@1Q_Nngg7C1)Uzq_V(8 zgkI+Tj*Xp7qwus>ph)9=&CWlz!tm*1t3gEpZTxImV=J7YW5dz-SdNv(PEQ4fu??Cj z#{iJ=19hdvqPDOjjvp8gqJDKyrxgQOAto@E*MW%xIm6R;%)c z6K7n-(?p8w*+HX-2zW0llVmPsw)t>8B#@U;qNjU@fC@;T*CJ9<0W;cDc5H$_tJpgW z_G-e@KCM$L4Fx<;bIrj&7?@K&fh)0W%mHD0>vw9sa{2ejQ-e)==$mgCRKp?LqVE{M zG^hMwZ`HM&MVYZfr$xjTOaRwOrRn{Nen%?2!ABW(+Bgv1;yffGs?|hBWs=OwYXN-aN&RsT7 zY(-?IY;j}&~sPSttuTNuX%*$^)DAsdwZS$TSzBlQv>n^msBV6H3(KrqN#U2JXD8&{QhrhGfN0$HSNSXKUhuShXiR8GX z>)RA&?LW)3W4n=hZ-e3!SV5Zv^(64~CkpR=Q9K(Pdp=iuH4b>Rf!=vTeyQ>V%VmDO zYzbvuhp1%x4?Xg(S(KV;l+mvdHf)Hx-zv50b(|hXVqX|08g1b*uv-t7FQs^?3#0do z_8Yo?2CW6~S|H^cy!GwH5Ur1M=Le*7`^9#?eta5@5RrXvEp=9nD*!;I zI4!Re#@mTb%L^tJo;~V*HV1EFcpQT5cK6`V4;mf;SgI*!u=nAhYn+&y4Of4PQ07Hr zG39xy`NHjVSgzbxP#h+?a`#oap{{aEWLpon-qqFMr}Y^u^^Lgc2&qt3zG%+>omKs2 zj4U`A8?D#|Y}@2d7ytD)^_g=yhD=V&p#VXu0q9t&r-JEC-l+>eO?G{zw)a&tyi-jZ z6t7lP3IQm;Jg=Jk;B-(ZOxjD{-e$745m8t3E>>XNXH%|0kx69FiA~2wDtCpqeGwjc zZLxE^)(ONI;{lBLa*cs#E_s+(FwCpILZ=x=gV;Hi+;CW*o{Mh3E|yylmsNVg7Hv-W%Y%ngk2v-`$~`k05$}t>KZoh$E|7D6U_CdPkj$7^|+A> zosz^=eX!UaQyvcv>j)ms8BpGyK6$L-rZV^3!|{Sq=8h=~_w9_t!$lDL!ysFN9f8BK zKt!ovUKzRqBYXs(rjH`v%W)$|jcL-I>PE*;+p%$^6tI|Xz!5@$F0LBqmPTS<8n6e} zZY;)(6{!>=oXy^aS6MUGQJlMdwf!mzOsv)muwTV`5gS(+$h{{v($ZJ(RM_-RxbijK zj8fBm6M#L9IqgVQ*Pk72RD;22QdFLaX1kAuryd4f2)o3&{v{PC7{kENuJ+lbY3sbR z@1X6erEH%5__E1LGE!z`cBbq3+)%=lx0kUUiof8aBUoy-&N%c71KTmCO!yV)eZafN z=s#ZoxNq^oFnEj*TY%Uka7z}|9CowpvBOll@crdX2bv8YnhiVfkmMAf8F9DbcXhS5 z1QaX|0rkd}B;sa8ByS~Y$=l9tLTR1evX?)@)bhp-W0N9ll6I(64nyYm7$N~d2hxUX z?-YS4qCq=EwZjg(l7$wyW zg!J*QK;A|4tv^-o;9uaLBwk~CyZL@%V)7zDvrT=B<^Z#0oeSl~@s#GO(>a!sm^eFK zkAIhP6er|)k?EQ`nW$tOF|$@nhQ5%h3fd0-czwO-F~$Sl$&AQS5W;>&jasbitGpiS=6*|V(tS4EzN-wVd{-gnpZb?$T9B~!aW5B`xP>1) z>O(;YH=fX!a2V=2Y~@SzKD27Toe!;2-@T)0Ag7SMm0mytM(Je>DkdF4-6mX@^zKce zh}Z&4w&K4gL`shGxMVt$@^$Z8r9``m0~sJxjSTIAmPH-ty}G_v6-E1Adr0}FI%}e{1mgGe+z^?E8%3@o!fO=1JU8wGfuc>%UnA8k{B`Y71zVn z^voRsDRGEQN3h;<#(+a6=6ioWSR$$2(fAcKkg5{nfYZ)MEa}YQPyia_CGX8WvIM&* zzZa{DlgY}Sw1Y`Rt35&!_p&Csk4y_tv$@diizJf3*ZlV=FdRQ68lPQk`p<20F#3}j zflb~o{_S>eG=-ckaQ20(}3c8KuXWS}Vo7rn`wqnOxjyYd=Gh%`@>{LarZ z)n<8}$UP>@>txxbEMtlJBYgTM@8+!rHFCeTgJ2FM$@~=>5gia%H(X2n2g?MT_uFf5 z7p`Bf*_vkE=$Q*>E-XoH>bd(3+Z4kgzAitPuY17?0}H3BouHG%cy^YTydm6JXBm~T zx>z`yor{kR3qe=05ezZ^qm2)Lgv6^% z1Z0JnLY;S1nV}Mw*^$~{tGv$-GP}RO_P{w=dz%&*c4ms1B&J9z+ZqcYTDu1aR}a>bNR)iA zv7c3F!!^(?21GS;4*}^*g@-G=W*qGJNV!@w_Vy3-t>Yk2M^+zb{~stTy~P-OU8QT$ z-B-LrXbTv?rhOZA+w6#5BH84BzZ$Zxfwk~XN~m|>R#qr2KO?_A9uRFOd)nPS-`s3#o*$~@*wk7PZYJI5#I}}G>|Iq?C zx#LY~ToO_g8F^k&(-3zcH)0j7zV);vA;HpKqU+~E>Xh#u4a<6ZqRherixXqT-pOe90op_K-$(+QG$dakp^ z&<{wfbeF;y?&GP;_jV4I5Rrua#2dxu7i)Wo-%zUCT8Eh;+%~1KWb!yfLZ>1c1cue@C@v!{6ui3ZuDkDM!N+1Li?vr736eVHd1uk^aScD?A}zY zJ^bXXEvRM>s>rb~ow;0w^KHfn^Sp?3VT=kUo-id={=L$>*-!^ zb|8EH5`_!yrXDulc)poQTfd#83Sk*0Y0AC(nIr*%|1TRJUZJ5QRHW?}C1-W*VdTZc z|7oM?8gFZPQx7ffkwd`_)7cax*a~`=|13hw{g!`$Eq{_&%{1Gu(4nKUIjX?Zoxok$)B7iJ4hBaBq_1#AA&! z%7jKtJM+0q)L49aAcn-GKqD(!N^}(Wct95(z>udW1%A4hy?|$DWCz9l-?A5Y>G%G$ zhB%<_!d`&uALSj*eIlSnL$*wJel;LZ4X(CoXoI7@V9H;ea_%{=C^)9lvzf}(bEE5eY2jsgQcr#B zW}^|ilelR|uT5f1>s0JU!*mbjl-$8Lnyg%+WmJU=k06V*Pbs;vJcTVhfPAK!mM7|gDNVs8wmD%5T-zH?)Cy&FxvPJhC=}|XGkD4joVRAO0r*p~yHR+%Hacm8qM50T{b-I#nMf|?lWzMP#w;E{jicUZVD z;%U4f(w949Ad(ajNS2flL1_%!JUu3r->S#dX8nd*tg@GzuY*%C$vhT;D2!xD7m$`y z2SvR=7;QS8Hg$Ps0fC3Z2tkl2Xv+iIfMHhMX8OG@=nQL<`?oeep5bGogk7vM89uwV zr1!r?Nk63|HKuN{ekb2)nzz(6@19ii(!8fr@w#d1|tktHh z$4yynrYsn8c8HKN)sAP#QK&SBcuA%T>?XU0^`xO45DSTz7 z@IR#r-*<#oP|p2cIUb&0=ze66ZkO+f=L~kC>`t$tQDEyA5S2nbq@V zRu~~HG#n&nxA6-`Sed44c2Rv)GwEv+K2^{PXkKk5;C}LSAEK^EZ2BtUk`O0LS_ofi ztISbDyDk+zD_1#glEP6=Q+?DUEvV*sbURscW-5om37us^%WWvG64|OBXtnCX+G}aW z|6O(AKR$*|U^G zdT;-{tN3f@?}#7t&uj7j(!>NWaB~&r4&1}iciVWgq$Se9tl*!fPav<^Fw`T@W2}S4&Y1H`Yf;Wi46Bb|^UD3!m20`j zIqlXZ-K1KVgf+|xw`JoFOD8z-p;^hus+<4}YJB@Cz_zd7<5}&?*T+}2uNDHO6oRIh zfvA=sB0~4iqg&?{Hx7&U!6|M*1aOd;g4+U%u}_!W$f(Ckk2&leMt6Pl&;VRy+Odb< zBhK@!ai@-3su@{x`|{02QY5MDiB_^}CkxX0L5@)+kpz~jqMer0fHR{_0=}s1)X8cU z3MDkC&FTyJ%0d@Fp+(x?C46)0+7R!b`BsM{5_@lVv~Lfx%wU{rc?NOs$HaoLd#62y zlV%I2WfvjTkc6d|R!Nkr?rJ)s_JV4s)o7J)g_hAOc^es*GDQ*-i+!z!^k}Myq7nS80{RW=X5$sxzcj za$=uc;f3HGe!f_90hTG;^X|yRqQcKsMvc2H$T2k{zL`JV90x#K!1xmQ-LuBu2jli|IB(5 z%U=o}yfDn^(;7f)Dal!XH!e@{nWGWbobrixDW<>f1G)|5=~38@W$RQWE?`&hcYJT5tZmlQIIv->uRHUYqSu&!<5e5VNPW*H;WA>aBc>G3+?hKwU09UsNfNQxXON1+eg4Y!uC)+!Ues+tEgcuel{ z>$=uV0TY`u;M@J+6LW(f^MlXG4gR?wJX-YU@Z0SA)$(EeqVO@hN^AGhX(@-VzxzvJ z-lNkGXWWk3n_i*I9%hAptic>Ypud)5>6CkWvX-scPo}drpT9r7H#WEn&A7WSkHmdG z2yTEknQ`rE=G|SWn?P$8YUE_uFY-y7RuTN~x3o-L19A#$p2aXFOSaqKtz}s?KKdI9^6uQjV5ee|f3Pw4E9NG} zq~!IoC&=20m(n`sWb}{ITd^S?+`Tx(^o|8*q+l0GTMBq(WJk}YUqs&E6VzZCL4szL z@xil^`Cxy+6>893Ut9gF<4EM(+ZJo<3wpfOXi*ZXMHX~IPy0T{I$FwG$QIzw*Y~@! z9ObT7jk`9dNhD|{Qe}gnR+@)lJL@GltTH}D^<#+WhOm2I9yyt0WE!hx3S#ao7e>}( zcLEVzay^Qq{3ymMS@OYjQY1=F-~;63fBQL%#_Mm$VhHn7wy#kWk|me-D)dLB@EfLP z1ej%I2vF>vl`57zC-lm5i^+pfT_6ESnXXi1dSQI}WV!;y_WsqmU73(BAqN>eO){`6 za^l`?aZ_#i&t4@RYD%KP;`t#kXw!WLtmCcb!HIE+xFCsRMGakU@+)0V&ErkYiB&ty z@M`m{+1+@mRQ8z7({-EoaMaI|jfW;nsxCz^wgc`WS@LJS;$@?RXEc7c8XaaZO~q!7 zYqcZ)ouS@u4JVfDq$L z6hNfhn|J;md^U&Dy!q%EG(V-Zd5wpM+@}rEOHn&b?%kI`^_t>o%#sy^X$Y{?#5ap3 z+|DIW4O0zOnVEo!2%?p_FR#=vCEOG+RiH~Fn`<-}yibp1^7qJVSfkV&T+NN~9i%ku7B6`_ND+=d z<|!in*lr#vUL`Tk&IH6Zz*jxROa{Cb)0naQT}b7reTE2;Vd>50s;oE@JkRVw69`VJ zO_o6g`#b3^Oq-puDJc%gr(bV{eiiZh>kRSw$9q%6>qLC|3Y1Tg|A~bXax#!^TtoxI zx9FKTDmwAo_SCv=xq`x5+*k!Kp+(PN^xwCZ0HTkn*&44q5n*S4{9xC{>rQd+9zmj7 z{2~tx9TIe+TjQZK;*)Fm8jZD1VjI=e)@*bVoxB99A2Y~%QG$=)>S8q^F?kQC{wI28 zZkIMi;~Y`R9ije!3Zo2-^HfY^iKe4MGU!Zf za~|3l4~-?j->T!GbKH)g6K?AZ6Cyg3zeH#>&qH|Xdw9HVyuat-(xpH2wy!d*`B_Oj zSYMYL(ze1*IN@~@wTYhIn6sKnWh%SgH2RH|y9xy+pFkXXBzMy+cS2UU0Py=Hh?@XuD#x^X10u0`VwD zq(g4Z3pQqaFJGij`oZDc)W7nB1G&LV{otzH;G6y6irnDK{ou0P;PHMicDiX#O|0Zo z{ovx<;IH|?X6do8nJKONekM4UB~j4$=ja<+Nz0<|r>V*5m=Bmke{D&LLKlq4f%+-W z$cihPSiohSj&bJ^9~e!{Ez{M7@riH>++-7QA6xB1_$1(;=ZD zUgDO$>S)!3pJgKq#T;K^fFr)dKu>&$Xw`5>s{qSS7EnlhNm!4>l6vzfeCay0VZ@?F z6N9^1q{i+vh<_5`aWwShQjOM<-8O3v573Oq;KT z$J9fa!0-@KDp8_jO}FrSV0yCTKBx{OLZV2~ zho5Q$+*C%%M{UOGXi1T=>{XKy!t&By84YWYl~D?K5X5Dwkug7q8m$dErTiUSL-h^G zlG0wK{DSJG=rE;x5`R-DAY4Yvct(lQQ}dbK!ov|P>lnTxCI581l_-*v=)fJ|9G~>)(Ir{a^fgfw-_+Mj?L~I*OjqYspmQVQ*Z9g9bA#~m{pD>c1+i$FbV3or5UaD6exDiJ1G;`Pk|u?9-ayO=obW@r@;Q1z*iM`nF9aOl`gYGfwL9( zw@lz46?m%xJ2QblQQ#s4KCD1r-@QtNR0rdyp0pQ%0a=jtD0%?g8G9p%Bb` z9u;2n-n@?d*BqpGd5t5HH5*iA{5zV{y2XrZvgA%m6zEjo`rE(tXz0(Gi>K9v zs}EqbV^10EBSLYB(2-YnquI%q7#x5VqcbC=8_S?<1fv>!8bpO}Guj6t0_rG0d< z+5MLo+?o^fk{@GwloRuWA0wRZ{hXBmHEnhShWMEoxkDVh*7n2f89AlQ^-D=nB!%AB z%%WOiHY<{*Cj4}ChSC%M(YN-Q@D~Y!fLUTta!wV){3^_5m=p79gYCBV%-liT>BpE& zH7CyoKW6nlc^>p*R_+t?eLuzkXijPK{FvqY=pP1wPn8o|V6#FsF z`^0>5kr@N%ker6SuHZ+@l-!*--JAEg8C@utoK$Q4%)N-f|MY`<5rM8BT%B7;qaWOh zHk|4Q_o5BQ`N6$t!xR1BUbJDUAKZ&J%=d$P(T0D&(2PY|vD>cr?DZ}iR{3Ah;a<*2 zJY=(uhn%~m*HGT0sQ6<`wH`TusS;AjQfJTTmsYvA8`{iV2r+V@GBg%?KQVJ))`Rt$ z8oOm4Tm3wO4R3;(Nt;nS$dBr7WnTb4oJwI-b)xtA5vtg{o)k#G-cgo*?dCujF{B_Z zuW_KC^pkTHkIoX!<(B3Y^5f||vQXqtI!NP_*0;2@bEGv*JI}NRxG17UN8Zfke9NiC zxDN=s-)-X^!rWRiV7IlLV>gaBq;N59vfXyoDA7$9Fc?vHl~%VDl-!w6A}F@+GY`j* zkCVLdMcVmc<`Fid62(c{kX{8IVx|Kf$J}{*ZxZ=kLj$|3@<`+URwSbv6UkW9dL_R> zR9Qy4x`Z!1SO+Nb!}RAyh9}hWiL>Y<(?KJ`jm96_7n$6w+Tgl&kd6d;j8iw3rNsYW|o1u~>q4{id zW!}EP%B=@bYEDu$&kXC`rYFV$?J7U<4~VOcKeki#q(JFFPNL`KHKicxRAVY>;PB;v zD%!jb86x4D;65G`#9Og!Jf`s_T%C=X8EDnE6_4F-!p(S?IWdB1G2nl`(h_4UO%p6J zW}TPaSuQBD1la@kv*Vsg8Q!0FrX^C}Rp1w-@s<vRHvhM9U` zqShj+g(Ggr*oD=)kicnqP&LQeP(*2O>ULHwxre0Ny4?0XJL0i0#n8JS5!AOm9*e4S z*G&->0ire#MYnZH2Pgqg!=SQdgyH||JfQ-U*m>kWHI4+t7Ms}9iPaLCr6N1bF$?YJ zH3UO#BI8<4chMV^7Mq8u&^>Eg@P0LcBk>N?NU}D|_058=3;8wknl+zU=Nonj6i&N6 zeZJ8~UThJ4ee&ty_&rwC>PF^){p#ZLd+iIi*3f%r8k-PXW@e^n}C&9kE z`k8)$r8a>cxg{K`1a*mD3UDK6($vLAmFK3|Gto9;xh+DE+ypQB30BwydgLbfv!6ig zj#lcCo8TTl!D^d8kK6>al_0p`4(%$q7I>n9smya)-sUsnCUz^mneYlT;UQ9=mA4%1 zdnjitH~vtJX~}dRXXX8jluqr&Zcc&+f4Qt5DE0C3X0mI)nHfp^q||^NY>V%A0E?`9 zOadMA1gxGRZ`HM=H`n+SliO-UK+{s7j(BMV?cpG2M8M+7Izl4upKUwYn&!>3ILSzy z%917bOc4C;@B>l%7z69NfIxE?!`3Ye2TmTeQK2$?l}UbQc@Ony=UapfMID%xTk6^S zEA?2Fnm}z5?)oZ?jBNdB>dm9=ZfeWC`3Q>(@4gQtCffwB3pEIT0`j}BP({>cG!5m^ z@%cJRMS%;s>p8|PW3cFy?I?02OV(s!j`!VFGNMaqxrzP}(P#l2ha#h7g1Ka1UA$dPs z%>5<}rZoo$<=PXtdch1sCBfqFqO?C<&D>W|yRDJx%;^v+#c7Z`Of5qW$k8i>&qA$6 z4YcK?6>Ki2DAs|HW?tjjk;J{#0^GDq(z=9E&#ANeO8ye@3g@Hs(2WCN*rZ1M|Kqp9 zU<@X6X6}0HYg(Ea;4D|O-gu#d*pthsnI-rtvk`Hhu%L~`$E@j*#7{N7D9uH&bCG+pilzll-TKaFRO#rUu5q*MM)o5=# z3t!Xe2|dQ|Y9o%5E*9s!{vU^(<2t)JO6?49>x%8n4-f2A5Vu}6@K;=T$8(}3E|ov@ zp?7qua3(_qh<<9i#2#R5++YkUc^$eS=8&d}nMv5GR{fQyOp z$*2<)*sb)nnZQF8xL1KEDlj#UwwTYqPh>wJ!aGorhCeDbZt=3*OuZ*(5vSm&!{|Yx ze);=@ODVERboTO@)A+mqa zY@&3_!})&V+cSxyi3w~o<^~Zrl@hk8!laycr0}Z|Q9f|7W))<}l)?*LYmN(bb*#o* zb=fgr{5{6}XpQ+#U+^a+6M3y7k5c5qeIq9*vRsi@Dw2POUXs6MUvsX_w7T##IkV7= zh0cH$)+j?3OV6bkeN?=Ix!)C95Vag{dYxTSsxAx!X*CU;4ZZ|L%*a;p$KlGP@-mzN zRY?IxDIt;TUQ%f{QJNv0jbcpQU(I&W+|0Cb|HnC~0^C<*imQ8x69}q$Pm3qsv`fdB zD8xC()*a#<7c>=OO^Sb~R?)G}u~XkuH9mz-1%_6&jG4JtZfIzapFj+nn)6vIm9Cu| z8cNc-n5b^W_4PK)fzq_3cGCbwi7xLh(whi%D*6-k(rwHcI8EhTbdz7c7>88RmSIqe z!<17@g5R2>RLyX|m8J`qm?4Fy82nnNKSo3A%#{is@~tv9dDDNJR67}~8^(fd5|g)d zgr29Nqp2U){6j-qyt7CGBVZ;gG5I~s2(zV0VYbLi7lxu v8Q8l!>9slo!guzf! z1l{zz$g*o@Z^4F;xfi#Vf*+&JYXRr;Iq9Vi$Oi-FB#w>Q7zPLHjqJm~PH^=|lj9=J z+6-C{m!K^K{YO)?YavWpf{GG^bKitPt0u}gShjI(Chp}-9Q(`jOF+?ds=}Cp67VPi zzN;VgRk90}s|}L-HZpll6E7+?%bEl_m|2jCjdHS)AuJWolAo3x52v3xh^Au!^{$>H zZLJ{-6EQ836igU!<6wt*NNa?NdjtMWp{+Y;pJ|fmv}x5vWhxX|O{CDs z;g>lpi@L7%UrIz#G~p_T_qUrh4|fanqu023M6b|m-;MZFj?`>xyirFcx&DOLMiaL$ zClPLz8v|^b$jClvJV7V6tbF{GLG~T)4DBa(42eN;{u1v)Sz*kHN&fs{AO9j6uZQAo z;~ws|Chm@0s|xYf+!H}E@H1o&^)w=@iqTx*ovs1yAHx+`4O4?|crR$ChDE(=IG-BU zQo~kia33zCL=`bYNIj$Za4@aJ#~N4Fbsb>b=O*SCe;lTLcUO_2`LTHnP+rp!f-!_T8#svy{G)uXi{GlPWyMya|`lE7T)%=qCDp=@kMa8v-t2j8sy{s zN?w!BqW4vBeCk7JU_S&(I`O+~rkF0=S1tMig>$#=Amr#^*d41b|tQQcCiGx6t)Qhkj8OXyb%=M}w& z(oVqn{)X8n_-s?-(ag*YXF=$`j?k(}by}uyKB4>AVpf47Ah)lFM z`*KDsF?cQy?{e)KiB)#4P_j58mx8ObCD>D}_yY}0PHut$ege;o*#qX0!@_i(ZQ5YP zqwxm}F3d^LsRUe=(uyl69`Sf+Q%7hEbCbtDX&*0H$}sd-ea5H0#U)_xRhZXZ8EHl| zu|%~C1G2+202RrS>nNN48+CqdJaTVudEYo&fCUrPY@z2w8=N}9 zbXv@@i5&@sn2t4zAw~bk6is$tzEPDd`54%3KCASnMc{(-89e~f!~>tJ7{Ot!{s?X& zogj3Tpz2`10H2C@h`3Z#rd2=mbC@NV3BFyyQTObKOo9UJ=d2}niFTN9`~mZz0@y^q zZx@)dL%#xO6ls8_{^sKRnW5_RVVB(Ak(nuh>6Dl;Cfwq%W( zkT3~sf%lpMH%BCcdV^STC!%Da2}Cf^$USNDAOstou*4K#QMV_v-)pn~FLG;*aNQFq zVF=tPDA@Z+y%zlr8f93-oAk*Nqs8I)Ds5m~IX=~-sq3%3{GBKX7OwB4?u=f=C03u# zGA+-yzvD<6j-6aE$P7V4<&_De@mcuhh3_a zRZ4kZI`BFL)+w+-*^l}&_Z{IE9KV5in{~pXkGTLeby}P*lyzav2scmrp)tLP#q*nQ z64`wMDqwZKaR<4r$8&$;GFsbT8RBgFnMSEEY?ni{X{#L#o>U#>2#mCMBOm>=bINZO zKfT)Ia9$y`n>Th#^*HV^b3ox#xW~9FoYV6JNt`iwE=aZ7kZ9E2Y7V5IU!Ov*D>2Z# zfWpPDaLv3*v5*DC1i3nma3iOqCQNE{~Ngc+UQp1HRK4pBpdJgwuR|VqcWv;yD zJDT>`&H>YI<~vP~NU4ThcAYnr%ZnPQ)eK~h_&&N2^0Hsl8K!e3$4Dim9(AHk;8<>jd|bB8TG34`NT3Lo zvovcwt67VNN^y<&egw=Pt$+dizY6Y8G(*1QjbztboVt*bzRx+%G@5Hb2y&%7hkAED zpc3akqhn^7_a)wYp`kpdL4kJD=x7UISb2 zWJusn_73JiI4Dws$xBc;&RTJPvKZN!c)ss+VG0V^4J$CU+_C409-r(%(}nCc^U~|x zUe?bzp%ZE><{zHe*iE(8@?JI4rAAPlTNwl7ch|4!wONWRFQdgEb(BJPJ^$wG?s}H5 z@v!~VPKpoPE4uVx3#~N;*Rs0NL0BN!zj@JkQ4>q#PiAdEe z_SZ!x8zTe8Xz!^Q!^RwJIFj{pI14W*e_i+*#3KP>(;`=?!W>$U7)tan!EWGsIh-)PMwM63l`=`AZrZv9Dp(lWLX*8xjaGh?ec2S)v+z~w`B-# z^IL|-X0^bBpWl+JU1K)XIpu7qrtOO{m5jQx=KKNb0K+(Ask@&p&|FU3HgG=~d%J@q za2wwdwt#J*4OrY=EnmWNC>)kU;nu}mHF!SYITT)1<$kZ0Lt&8wfIS%g?rGR)kn`FR z+;kZn=ivK^gv`|~Wws^-o}f_;qSN`u9praH)ZV;W1H}}uN}AU4UJ)T#9>Gl1ed5GIktD*p$9?~r zoNu|?b2{>No^HJOvH{=~9D}f_71~SyY=TSuTrUnrga)3!#x+ppak z&B7GxFdbQi81t*G2q(6FetcbbxM@n>kcfRsBz6u^hFYFt^W=d4jsr*^tr&d;fvq~u zfFCr!cX+x**1G04)f^w)5cPPi(X#igrXft;p!;kJ5aM-tDz?dQqT6+219sc#8Al}5 zd;q-fkj%N1sQJc~|WfxMVsIu=i^;rV&`k za7?!^dme3!tqg|Xc_cozu>q~e+7i#dL2%-1@hUYSN2jTK4J8vAbE0fIU_xiFY#ma% z{gm^ZnE2U;2s+s^95KW+{jb?DGC88z9O75u=bNcpxt ztDD&2!K`k(c{>FJ%b+ntx5vzfwZ7umK-QCXv+yg{O|(5`UafVZY*{27Z_m!P>+@ug zy^`svo^1Qp5*&`$p?Q&sA>f1>nR!SIl?nqVBC!z9(J@j~TkG*WS`*8~4P$NV5GF2I z6N|R`02qCsA&%mFW#7+UANK*%plqLw&tk?+r4V3+(zQ}IU#jeb;qB~;d z)owb8fI!(!rgCS@{b=~}VlC2I7Gd0@V*v3Cv7&_`OynO|Y7X^jft@Q%N?G+5fQ2U@ zDCJr72INfX;%8bLKbNlM=c-lw%x&f8`i7*MkqlWFmw$q8~bcjq^oza9{z%H?DpMWGOVB+dxS5h4Ha15xYs?mjV5OA;zu$p$^2b zoqGUbvo`vMC5a*X-nO>X_m#(e$1JCX!FEzte1y?z=C^2hThQKz@35`Gti6H6R!k9v z4`UG)1)x9~uEe6GLhv>Kg${F+blh6=6CJZQ5dJY2!rOm5K){a!hx&sy?ljlD7@4~# zoV6!lkG?ZD<4Am7X24qG1j>4>xCDiwz|dBnQ{uiaYT7D8_S=#6H->~C2xo26snJ4Y zMSRjq?B|E<^8;niS<8Px5Qe7bg(prm$L!P2)(^E#FIo_^-*8XbsqCOhoU~V0TU&yg z`+_Ci?1#-C1xmID>?i4Gp|j@NPZR9V%^zXcYV$kRDBor~xcLYHdDfQbW*=MH+R_f0 zZ9W@rvyXkaIhQbJG1=H}h02=iT*p_C{ZWKId63{@1?R9FH|JGk{VR}V`guo?(^4Bk zT@g1~{nak1jDkh$ROg(pVl7;?O{hMISE-Adm1@WNo$MP8R(8XeT&%3r84rU|SXuaJ zdw}zcTA+}Lt~SUTAlO>w91mLyO~A~I?ARyCVy*@6k8(>Q;Yo&Z~FxocoB1E||7TS*5mlAZ+19S#MUF;jV&kb4Y*0+(0 zGeUBF_qCC{3I^)Toj~JJp}Y@xpUwLRd=v@|$Sy(mm87*3J3@J6^amO~9nujxNw15B znHPB)OA3ZVkpS#9^bw8ytFhTa-#lCs0aBpA&QL?<&X5Cn|Hkac(Hy28U8s1p@c#hK zc4&4BN_ed>uO8SLS|#hhx|virJh59Hw3bV~4%|K#TFV;CgRZe!bPHMm0zYH5@Fc(X z4vr(hE%a8*aaYJqgx2hW_OSuCcw}wxQ%)lP=@L$7n=({~F>$z?l(Z2v*rmEa1Y!q~ z$dk-^(BweIX2v@J1owK;V#4AZNGJNjQyg-wP4(~4+Q*f(BP4iB5z4jN%>`xblEJy~ zJVcVvYGyS&3GzcR!YRD_C!}qD-6c5aeC4`iS?LxG9b6XN>x`c78`RO{Ne-CCs90lE zc+_A}F8VKLYA{mW2&q9wV=g}>O)Jd`74uV3YMs+=HOX%eUo*z?Gkd&s&VH-uOagJyBXf*2XI*s=Bk?8j=WN~3FD#f{^G!}6rf^m$ z5B4N>72RDmD|8YgGqZ!AQK6y%l?>cHU#FsmdMBd}tRdNoaW6>tlS8;YmkKA5Hab-B}B~~aqqykm+@`&6?RIt*yOH$ATE%{RZkKP{xN_8!X zx;y0|3WgNO^3@_@RmGqbW5Hk%IqYaB8{t#aHE8Y!=L8 zM7eiXn}^wVPVPJf9aY~cgG;!T5_<16sYC{JX(*5Z`@bss0m4Z3{b7pS|7y@~mT)Hg zC~N&noy9@B?g;qGlH6d~tGDL{C$ci@!4PK^Rq6x*4Kt?v_Bv+(bRq9)D%Se2OtQ|X z*R@Q^CRJrkY|2o8@!T9f@Z&vYI3J60p%Eg&I^|eCuFqpp@P1sNVM`m+Gi2czxS5cR z0t6SH7vT#-a2dU$P?(p`9@Z-6YLfNIfRq^uYu%4TE;d9?4+x#04yIQIWPia0wGSjD#6E{CM8A2O&VvAzjSpoW)>1-=f9NwU4EUh+kCT1O-;6 z1G7_sW$D1aTaww&AkZUWesUv$`?ASCDxLC81sW9V$RA0$@p|8|2IuYt{cPAy4-|3B zZd0l?42|=gQkg%4f2AZ>T{6h(D^g^TY8|UMgmhfWA7^_vEw|SHR(Dkx`%y`qFVj*0 zDoyg(raC;dKZI6AIV~I7$P)rf`$Cq^%1MESe(tL6`(IVUn*DD`$c+ukOzv@V1@!+X z-rL;MQ@l5!=0SX~+^`ewHgJKp6a)ncD`>AP=@2ctjAb$@khRnB!o6fwg!uzmINUJ? zQ%u%b@s-_qGTb}omJ@?wdzib8X_F2l?00qdo((dN4xnP_2PCbLvl-H;-Q;e(dtAV3 zIbVGk4E?bzT%U;GS_-t(edttkG93uERA)oxWX&J?TTSfx;c6d?J6CP;7W|?-R{d=( z_djdohAbRqmOJiOuq3h9f#}H^HMB@0#q+{>9#tjnu_DQBbmeUdWs@HtkjD2^iXypn zhBwR0T_lbkfxO{w_%e}$dZKfG=`gJk`$%-|vUki|Um$+B#s~ztD`Nkk%LFbvz(c-I z_hH?AtD$Dgtt9lPwx~6^4+7@J7~gQE1uO6swnCh+iuj)w07%B|icGYu70WfgGF>7( zBM6=`Ph2vdvI8Oh`ZA9h)Rvs*1!l@>Szlm8cN8^{EPcHZ%FQfY2K=EfI-)r#1&aLa zY=MA8{+CbFHov2&lZ-+&8VQ9$Zmq%1;K4dfJM{GCLDrIJDSqYI_F;AaxFpLO*IyUK za~DPM77Cr^FV^}Ip%t&!p3GIx(04$v+xt(6;aY90uO@3(1V4ND+iltgvF?RB(rHG_7FtOuFUrg zRD0Pr0V=2r$lS0&ozDtL9Dz{{?o$&OLQL3z_ZBb>=a6{uXW)BwWG=)#2+d-O7lLU{Y*8`946Fi!_CIue9 zq`)QxevLp^|4Bm+j_NrjE;JO`7vyBCv0XS0{w18(uQq5-^#sF~ul=gWmuKcR#3J^$ znD!RYUP#y#F8iJ~vZeMoL*Ei^1F_8hOVBQdq`t>_?Qgn%J)abmSZ>O%g(xK@EeHbu ztXkGXlhn0DeM0Uhd(L$(8Ph=Gg}+Zh;$fIaPNpgI(ik{=kISYmEr*-$^YR;RHfb9qsDQoOqdo;1mNd%A zkFT+|$fG|^%wZWP$#J4krPI{C1~IN+NGzwO!w=2c?YY-q24N>c4qo@=Y9Z?saOLa3 zl?Qd_bLS7EBC_zzHNPe>CNO++6|2Kq_uDoe8yooXH~b3{;9)EHLL}<|{3MU+sH?Rh zGN!srS8<~_tuP?Do#Wjew~GdOnjyB)6^y!JhI3i%mxB{CSrqDM`)6Y7UNuxl3X5g# z4EQqfm4A9JoLh?*#Uc3?=jZ;@?%9T zgOs~cblsXjrhj-%AQnE>T0i;Nn#>D_m$a{tj?VN>K8^##UwCXyplR{3*yapqG;5l^ z&%1BuWWj>ZU1ocn2Tx|052h{XOP&PCeud;IcX3f}GlP`<#`NUf81!eEwlG#I>>ui& z2DU?|?7YxE2|MLm+8Q=13wE+X54*2JVl)0|-4j~f{x(#D-B6}$Vijxnf5eT>3gVx) zZaDHtH6>dy9QOziZ`>{~&I+~TWXmf@rPpGGZl$E8Vxi64OaBNdeXVKXPgd(8wy|VI zVj=Dkp*1|Ql@S8D4sB6to*g_N;klRRdY)`ZN{WSE=Ba$|MPuKC*j`@yU{6aM=F;TM|lA+;xK;UUaIwqFty z_yf>kzm9!J$ne1|-@p{V9-QI~QgBVRHgykQG@zVygHs81ahqLTSes4hy(ff!lW`1` zIzF;{hb!|ji%tprVW>TY3!Q1lf0IAi2+==#=Pqx%^%O5YWUr9gSKtqb4Y=)Gr6m5< z{#x$A>$WOy{0WI-M9My`9pml)RN=!0bnrV$+tlGBVcp+3yb3h1E7lt&>m;Yj9biyn z<3MZ1S-^W&P!l*m`)W{N7Y_Jc)B^?^08`%o4qNLN=7sF;K>Is+$TnH)2pfio+v{xR z9LQ+z2kiF!-Hh`B4*s_N-C2F?p5urx_g)Z)_$IS*a2*F^=U^xe7=b`>%&=g#q^R&H z`>N#yVS5fW*5_F3iy;;lQqfxNPaq5AwkZo1<_X?k+MP&soS5 z*=`&|K?ZFn(u)7Y^nZG(C$qD(xF%SF_f)V7wAUEpDr?;_6gJQQ%o|kk%&XM?frh;r zpwb^sF$sMy>-hW}CSh!Ri@yaPVX&pP1wzi~7TmiAW8e1G#IDqEUZ>Ig{_xmj;lm4J zlUXAkd$LMnh#125LU;IF99QS%Ap90M_yTZnmFY(I-!4qT!4oLAq5}e<934Smw(l;~ z8rYINKF7VJ>-{wq$T$hJu-1V38;ePXr*)@p?~*M(?PHm8j$|PRVHjE`gXdy`EwS}; zA~`_qPr{I!S7d3Yr>ct0exB(y7iiXX^Mf=s7QzUudu#}{AsH5%-3xI7FC-9Oo_79u z5LYe@PozjHcX~7&@R|l%mdKA5a4lMCNGc)wF5z2rcW=OMzSoq#zaw2j8+J|0#dvh2{yfvg<^LXP*Eq9!Cs zENZ^+)aU;bJQc&T&>?AYLl?n`{Hbb1{AXrnQ+UHzvQ*gR>1p-&l+)On2&sG=KJBnO zr-uj5DIPeO)6Ya2q(~gfa&8{O=tE7cJcow!5Z&TCx%}J+38mt_!P13~egQn0VQ+qb zhO(Jg7712cTkbSu6l+UA2Feb@k?UY*uM>3QyU(7JV=v7e%ocd$C6_Ib<_V03ComQG z)auK~%%u7yhL={G=B0H6Tl1(@^K*ib^VMHACi}hp0sVvsPM7nSmJ}+_`+(vQt?#nd>;qH>%l?Knh<6(i zbx6SOh4yf-Vboxm8FQ$6oXIZ>$Df`@v8%`#jo+FXg(bG^dMb;SEzP+Zsd8ZAUgx`C z(EQ%Qsd)|xX96`c)*p!Z&gN6gI}i(=?c-g==shPCn}2q0G`5tJY-K-PV-YMriIL-S zY%TXCU#MRpwKq3G9$N7=f-B-TW>%EFXDxe+k7(JArVWM$@An7$bw2s#x9m{E7*sI=6ZjnVRPF zlByla7M-oOl@Ik&ln?R3%ZK*D`d=v=R(^O*V2)bz6N(=+AkJr8t(;0>p&=)IU5 z*T-gv)^s+tpL?jM{o`zVYrAtwRu{O*R&H3AojtTBq$1r7tnE~9nG1Iil&i4kQuz51 zs1e$<5gW`^lfndsvv5!Ta_u!WH7R@i5zvl#_Bi``U(jBJPfU;yNi#g@jC)RHNcXvE z8?+&SE(l^oc&f4p{DQk9U)bu4&2P;CrN^4A zTTRwCd#6X@7jZ4ijd6pHS?e+az^q$G%=F{!_1XHPCT_hg) zfIN-XmdvuPOL7OKMUd{NngG164IY1Mxdg66-a+i`8X@tB%q9Ji0RnLkd5FjLLs%ly zeEiHE8p`SnWxXMCVYP_N^XTiMYD0g6^;b>(>x5at^ltG1VC%mD28|#L7BX%>*V=O7 z&_LOf^o~bEhzF+PK6mRibaA>%!_R?8tN?T5qUtU;?6f8C+T2d} zIB#`n=stVL)~4;{cFAyQAUOeGi3CeNt%(hhhs?<>lg)EV%j7Iz?)aLL9W}A*d^IKW zS-DyB>2_@4(3%pi-gNn+8hji{LZzGb;-OOg(26Dv=()Qmz@ffBJW33?+ucgs zL-PjgwE;U?_^6~&y(^|Q<}rf-h3)KxlT!SY28z>t5P{P;Ya6|iY~7AHD zd+Gv)C7e~jMD9Ei02SIN3~zmtZU6*sweQIxnmd%aHO(|qcv8|mIc}74H9oQiwEnJN zddb#w8rF!b-x*Y-X+n~55_K@PM^lW#rg$U<-nioU_phSajr4;;B`0`w~Rv zo<~n!KSuO(tHo!EAr=a|RZKf@N@|T-06O6oZJW|`aWhIozayI$6^)l;e`}XVUl3(J zht6XEA)Ul!*(3?~574Yc{`KkTWJHUJxFj9n@j6syn~D2+I*u}RdyrnKOE;oJN+|pl z{ineR?fVsM*hcC=75tB!R0jsNPU^T_gW_PotHr6z;Y9wLbON+$RuL;o6TN0k)P@G$ z68SNcs#(1X+B=d1X<)P6EMOGI`8%KU$b>XoTOiR!+x!x&uO9bIB0n;y79pvFY7v|1 zjC2xL)7Bjg(})5y3%+y;;?g?L@>$^w(-@TdHI(i&e@3ag+;B$KLb~Nn+<%)~-l`a^ zU44H#DqWK|b+@JC!twk!E*_d;C=jN%1UyMVKzM`+E=(>7cQ(J3&THB_SiAbvbnNG9 zS7(~|f3FC=Af@SK6d$xenBzS|Qw!wZDMGIvm;|_>=`lUfXh}D!Ih|neB4g^VPsjaR zCF$~X8g?0mr9QjMibw){sTR0cZ;LV?jy;kG@)m0<(622CQgQMcG_(*4Zacvx76?RI z#U8YCi3UkgPpwV3U;i=P<$mRX2s^d+xO4)Jh>_hfWYn%`v$tATJk#zJ$m&R!wQj4P z1j1)%cXF(CZCGpAXAF>7qb|+`$ZZ9ZWs=XNQ+gREaf1WCWH@GI2ZnoI@6!MqyxH{! zcuLA2U`v{I=JGJ}fX=O8aPmN=^f;qyxV0docHGWDfp%!agp67kilSb(RB6Pp0AsCd z6`9Lj=y_)A?DD0HNi`r<{~4dHKc0Wa&$MVfag*5R=f5>)sN4!1(=+=-Uk0qk%s~hy z5t|Ab|DF%$83D?u{a4&l8TG9|fvk<8{%x%c(7;ISs=SSUl15_J!$h2y@Spiq1jj{@ z*lm5r!hS{UHYZ>(vdI3oB8T3J#FuhP9|(7}an>i`>>tqC-Fyl{F!nZWxK4D@oN3?p zL0#iWeXhd3oOaGCS{&grUXiu0qHK5VcOsjU1XivbL7l`B#;l!+3;}(4D!ku8J}-J& z2M81Q>79`##`@F&_pxxI?K27L-sZ*%2pokSr1m_jsr!wDGK!;k{3wjr$D+C(`?|<) z$2;$`dNHnsYsDK1aDiRed*Z;aFce>!yxZAn7P+~WJ?8STK?E8YDb!I}yxe&H_Vi{w-pEv*NooISP$t!9F`udsF1k$qDoBQ-;hPVZIQq z+RJ=t-cNfv{M}uqtgNTPO%s{2@fW*J5u*wxLzIF z%E!ILygVB@ zoqSWwlzwjj_BcnFWIl2EL_C@agi^hGi>G9Ex+JFnv8_>*Wpy5`u zPc9DV$a%_aLT^T4u;#`ZmfR^3H{YGLr+ECqx}X2M zoj7;&|7j;qBAG`Z`QLWp#dn%XxZs`SPV5^ya3{JT*x>8hkA-iT+ zZAwA7V~S_>T>Qr=g~ah+UP!U5_}feWO|jqj|1Y+RVypg5v2SX%|9`diXAOcFan}Il z=MP296pj}gGZYe{!K9rTis=jm%$C$pOlK$%SusN~y%4K!IxZ^3*OS%FLdYjq4?5hK zLd{>&RBE<$j(5chiuNMcY#&GV^@L@TQ-4-8OJVq$-G~0{;c!|1Ems02MOmseTIT1| zQZCfAsuSn1oL)M0oE}WP%Pp!oW=A*&NAVdOC$dhZG35*ys8E>DO~+~wQAeV!7(I5{ zhe4=Q`&@Y)L%;bq_MeZipxj}(?I>f@_bMv3IShbM*`*qXfd-YsuKuI$n`@Z~bRZnG z9`Qy$#@{Ke#|_*(`_D=ZOSQ3kbXde3&tUY#MwQKq{h`5JG(E!2U9RC5R67=8h?-bF zq4s)Y?kY(w*}Kd11XH|*xP+_Xu)ayk%I0GhoPuH;067E%P)(;$#2`EQ(xs9q)oo+L zCmOJ%w<~4>Y!lOxwMCV}qpA68SSs z6(#~=Hap9e@@t`7{41p+bv}AozCx&Ni(D_xR0k>TCNWfWtCeWnMA+rD>dxQky(!nc z|AP0R-SiP12sHIW!qGhgFGISpgVwH*OPQwdNj`3AOEpts!0J z*Vrmo-@EwU6`a^yN=daN5M1B$$X$P_AUUe0YByu%HA9w?vc@$a?R zS=uL;B>DHJ7&kjF7%t{xi!~YHf3oZb7n0T>9(ylo%0Q=QdNl%vZc6xxOHFb67#T=e^di zMeQ=WPaCR6k|g#$c+fIg^B$<-s43i4WmILufTSZyio#K%Bb4Rx;fK)_meA2`_z}!F zq|?~<08ifx+X$k`VyujrbIuHs&cBk?iauQ$0E+QEIUQE=404UP;W zQ|*Tgp(PNFJ)+0pv=^APag#vAn0S78D!f~pLE4V~`+!!gg_cF9C{+EKr&6`SFc_`J zXS>u5pPG?3iQJO^5mL!+|F#mF?4O8j^e-5DMEg5zYtd@8Uc0&Xm{8eU)^d3yPA>x2 zzO|XHfi|uem=dRPM>ZM^Obfs)j{<>zY6^qzXBz!S_1}vM6BQmDb&8^lXWA8s+ApEi zyWVDZ%-BPd>T;L2VMlDAwOr>#N9a9isda=t)Ig*gniSbMq}BQfr0w5pD+QW%;9&lH zvP|}bPKrQN2jBIW`!=#J0K+cps%OZ8|2X!629_v~)R`=Dgmp!J$M+3&a|VuU2S!_0 zWdF{M2^}A>5pLN5`@VAnW1JB_^h&s+T}S|Tqjt`B=jA-D)NVus8S|AnG$#aqr12FJ zCjv;Jk-f9=l|)7YitFADjSUSAhY(qKCeZ%&ut4Lh3J)}P^D9A&4ig_Z)JCFk)NU9$B@R2ZU-9UkPh0*wJ#mJ9GIuOG+ zdaYmd5fh4X)>}!T9Zv=xcm%-^X`Sr=2;drPpB-38>f{RjI};m)(;1H6RtkR0VvVAC zKM2sg{~?9TuYx!*E?p{OpI}@ki2C^q(aF;OrIvvud*+kC0Uk54sSrQ=e{cvOwKWxj zd=a2bN={1%L-PlVq)$4Dlrt5$K!NWqP1PP$;I9<;B7x~M*~2dfoxt;?aqFjppS7{Uk^FpR%zd@e8YOk{vKwF{B*6v8rUzW}~+ zpu+Aj`d?2uZ{k7%n+I(ek`RJ14{VDv8zIKY|;hf z(MdIUEhRZ&+mfugLo4Gc{Plpp3wG$O3pGMjfL+lg6 z@#$z2a20tPLQ_+_Z{i8HePLLAB?rRop9~31yswo0)pGJYHOYn(p4-L z_I*1O(^TI_Gc+nRyUR_@L#crZH3L$YkveMEcVQR<_=diUHZ=Ao@|=Qm7$`<;y7Yg$ znN)q0rxbumlj9Vft%fIh=Cf`UKtI(fKqXFW-OeB;>S=?%_YY~|ROA>Xn-I-Ojfvpl z!1R7rH})S%4@fkfx=xD(Mc-$wb3%VzbfpjFymC$#beFH+@>6Mw_ig);#@wyS2wa(eM$dAPE{cQ|fk{0oSNU!^c^*ClI%rxb2GTlqj%k2xMHF-G#pIqw;+a{^`0EPTVN+K1g5>$u%k z)o!b}-P+myPi&9FudF(Z9P*;mIPwr2L81r2zSMpqcA_>;7|F1@4?d$uJ>y-0dN$*V zCS-hcgPtd$jKvaKH#&&sGhlL^*p zAAC>x=R4_{{1fXduroAEOQBM0z}Q*bPYU}0B<-6B&I`-WfN|7h&~45?qSFzw9XC#j z+@<_;n$HMpV-*H%DRcA5f!UPGU|cLT=%B&=r5oy1%PBg`1Qk}z-? z_rMf%t!(wDFY<~Qjf-RNll>aaU(Rj@NDyHjFg;~ToyYqN@4)~XUvpT)g(GR zNDqi_Y6sNvEUS3HT5;CLvQ=`h9)mi|i;T6BK1`DY5Pi{y0>y9gES4x(pKP6e5nM9< zm1XXvf++va8p$BV+oWRI`bivi$k}B$?=DWAfRiT@_zp!m2hxFYrTmssZX?h;lou;- zf&zc9z(IeUm$FzgK%Fv&A_tkKfGpD@J5-#NSD)mam9D4{qOCf>D(oKi>ElHU%zK|q zE!l^`j@yVg=N$&bIpOpv0t2e={W|Ov*2#HOg^ci3yBu-+BX%7CB=51J#iWar)nS|I ze1eg+Nz&sX3S*}Hq?_z*1MC?ck7NbHK#860uMKb9mVrH^tOL=B->I~gzeYLnrVM0^ zzwvVh;Fe>8CQ_iB-C|`a=P;tuzQ2xtP7xE)e`MWlwwIGhrq>? zk);D^589U2oWj1ZmQIe<~xrqs3M)o=* zP5`l^e3$}#=S$VGStl-#9a4%PPzH+aGA4sXwV)2g`sg%guo}}qjY-xcflFxN9YxDc z;4YIg9Cs5LyxX+Dg;l2Bc!Em1eF%zoo(f{qdiAXJ7s0_#NEm}n~ zj@EDiPh&Jsgt9#I;Y!5)WUHB!Pf3_nqc1MvgU&uZ) zBKFi$#((@;BiqKZS@!I|fRM!yWbZH}AZ@?K!|wLe0zXRK9iuTBD#{O=2>4%WmlNfzl_8Ni|a7b!H82W*S9D&!&EXY#J6=tSf zs^Y9rY{~|f+GR9yEtXJ(2SnnRu8qVlZ58F2RWkhRLy--hvVmaMh}`yezv+(V15_it zMTS+yvj}rj3)`zZ&F4jc_N(1{cgn8n!(@aOaybe28ozTtI;teyQbdv?ubk_N{vsk@ zJpZZj9s%Z>qjDI_D6B-km*XN{rW#7Xab`croNKAcqXLgML-GYRC>Vop5Wi@)#1S;M z&f;0>A0-XgQ186KhMGEy9cID}Jyk%5`yVl_ZlecX8#g7x4SjL^p)AUD>5I^~jW=A$ z54abmowz32R1K-YnY1VchVLH7N|MH&^VM|V5(P@8**Q*u$XANlEwO*j#_WH5A+@kD zhxQ>PdD+MbqjIaMFHO;$0}UJ6=;;t^*=8OJF67BY!?>O>px84NP)yj-r0B7vpG0y$ zaX+Ieq!;DJx?<$9)C9PGnw5t6gSt`@Xz8X3tm-2cj7PCjhG`HomXysJ+l!VLVM4-^ zVHh?MOC!^h7J|H82#lLYnThFOerOMmpsn??%ljc)%Mp>#CQ$apUGE0y{TA*OUa~H`~xjhZ*L; zObuv-1;q@I*2Vjb<0lQNvp5&CSJt?Mq(m$7;d3$0ZwjMc+-|EqiON;>xdwM+Q4i>LHOj?!HHIYfR(#H^zDWhT$Lzak4|t; zd37?_WdRy8OAG35$C5Y1^T{}13Tz|;qsj$~mE|YI1x^Uo-Sdi8u}e=smRfqtUCq}y z6jZ2!3hT~u3(EXVK^^A|DCoig1!)eWw&g0fs{I2sXUEW%-kE65BDAufn^cE$0-f$H zM^P4u+@lp`ON}~d>OPVxa@c@Dd!BY5r7!zeYefL{DM`SZDMjK`Q%X;b3fAc#01g|s zcM}BpXqNP4T}b%g*}%6Ww9q;Ch7=@hR^SH;JXwJQ|AZ?QTq^dU@C-Amh>_n~Ac(lY z8iVd6>m9&0ncKk_xO@B}z+g~G0`rjB6*9^0>H`IvDIyDm*n_OwK5ae;JsD6Wz?Dbw zfjpM)@*=4ou-W+M;~_pK-Y+~08V4BGZ$=7VIOl!QYyR6bYD+PZ2BkGb8we{@@Qt2t z56xJ#Fljy-Q_zwRg1}NBW0fF>;@cKS?Y%CErBKc#Onqjr+V>hY96D^mR#8us+k~DR z(u))MWiM-8O(~?LQnd#8edeHnc!5HQ9t;g6A~Ph#3<6h*E6QD;$ek7?yVDtb@HwP zbk!gcYqAX@PSp;MPkKHobXP|R%@(ZV$7kHd7H)R7PliN-t-UmZXpMck z@h}6}Q2S+iB`>bl**j~Gvqm4ZDt9(@XO_tN@B8is`~V8}U8JQZuu4s<>8#gw34JTF;66a|fgyt+aLpksd|?nh>x?w|TTXk=U|GuKlv;CbJ(| zN@DWV-A_kG&HhR9^XK{u{g+o34ZGh(jd#4{11Jp0GUI>E@0v!84n`2Gm))y9YcQmRZPsbAPDx;kic4C0x=Kp1T!i7S@V(vak%Qm@{OAn$3llwNULD)?gN^MfIq17EP4| zN-YN+tHcvsd6~?J{%GP5pi#paZv*??o4czQla{SXMM)e<5K=S2L271#hY(v$0_ESI zr&gnr!1Bia*YjF2sP}2Kf9$wd(^fL*YC1DrhY3zYb7*qvO-E`8&&qH7`@Q-n9sYjeMYn6VlfM}wBEf*B}} z#>Z4V8Z>RquRfR z;)M8+GAl)+xzj9)j+J`o&w%0-wdU}%+N<#&xhn?_6s+?%2otDrhh_0a;n)*NnoZ?k zK)%`JPOh=@-gF_k)|eoa1*HA_CbJ3F&X3j|ca>|8lO@pk)4B{1NWcw2qqM>$#kSq+jKxPoix~ zILe>XGr^5DD&QUy-o@p9}fy1X^ zU})9WflYMFxtwxZwE_GBWL>YBW5YAMeM-jt7vyB@=l^~Fi_3B{#xT1N@iYs?`$?T2 zeiqD{tki-jg?7f-4&*Cz!n8Rl69oV-1k1KY8A_CRVE#n@UgjUi7}}q)&DzmwQmLBs z4K;~vR}EnG>teyaAK5j-=mQN<0dhS3A9XdQ@*~xt0`!et)gVQs)qZSG8)y{wQ%^%E zr3o}DkeikNk&cfAH( zz|i&Jk_37Ff<*pPq@aO-mQ-kAB7eOb3i_T3Wwd_bh7OfTjlH(h)Ru}zpr(;{fqfIk zm2Pq={6B>q7!U@gK9exr4<|rYF&2nv0N~uZ;w6}}8t2C@6>aNj<6V2K5(Bv0E-Yqx zRs0}~^ca$u#Z;x4YkI{jCKO)=U?E>V1%PupEq3mmodQkADDV^oE>+;*KY@xi+OZK1 z2P0IK{oy6Rjj%PgEnLXlV_HcaUlN1K8cX&9fjV}0@^_kh#kW5!! z$3^z%4oL85Iss)oM50wBVmp3wK%xgpWY?{->mQ=Z2cmXeOXH^*et4U?jR`%im6G(< zmVwiU-)F-ef`WhbJ}g^&BLBMGz6_%!SJq*b_xGN<#Z8fyok+C`k%ET?O)!c}9&<#=HM^j*;9{Jyk{)%5?+CT2#T_A;_2JQ}ODH6gC-pfF5$tF6LcjraB39q6Qs>Fxifl_1)yi=~ z#17HD3dDRRiWQ#$w^N>o0K2a;x$1%zva#zHt6%MMg~o8 z82}&f_&7dg?6Q8f3you$-WWXToX!}KirE_kqH8Em!4C`$p6&%BX*JM(`vfm|?cm^3 zP4LmIz!6?zBYCm%C7?~?cEN?TbYqG}-Ty@Z{(W@Ex$}w?Md39CPFLWq>Ah9 zAo63Y?w}d`N*+3cMhl{VoOGpup49DZj748x@$BPI;{YYZds(<*AA)6?n4( z-&CN~rZXB?vlA44i^88yCq7Mqixs%hP5cGc6NyJCyk6nIARHmvP67{}NMM5k???x} zqQD<2@D`Qe{)uFfCmtnEx&f!9;rPj~j1de+k0`md!m;e9ys$zT&MHT|ShGmuBfmW+ zJ0p?*(Gx~t1LGLmgi2inuxAOmt!epMl6(5;j8w&``jh&op8i{&EJlSL=t??SI$x6#w)Kc4frNKdrGo-BS@a zC^m6B0!`a;2)&R^ znYQ&@WkXX3<$F0RTmQ#w)TertZB;tY+Ch1aYNOK9^JMv5L2Sj{kwg2e;_*e1xt|!C zNq_w~RCELJOEaZ!7B1W30hZ2O8BbQ(rC5$OL)sO*KPRk5T9)>;K-%HjIz zU8Ca;Uhf5Ps#cRZkhKk)R7=kw!;+zOr<8qI|0VnH$y6F{G9~ll3l6!(oQimTPKEtF z+9*#f@4JIT1FSng5W5j`Mcg)q+8Mn=gnqd+QHHDO*1Sw>#Uh4K^vFUC?*=DcQ#7_7 z7U(58!HHj4;+pkDA>LXpah3??ax3C>s2#c9obWIew7-S?ZqWWp$o^}vhXCSIFi#bb0^0+^$d_J^x{Wq)U`7(GCm zKPOnS6OD?k_J=_;4}r4%nk&YBU+=h*-Fje=$WQm5OV3q~#KMpv#HS#b6VrvoS|-bg zn`JqnvL~1PIO&xNpgG!%fSD|B8j^FFwNtZ!Kd$$ep1UNk44&VT;S6mZtNB3vX*h*o zFf`|yh*HGc%))1I#U1R;A*Goiji@11YcN+lX_&yS3&ykwI4QnYX(Lw)2WSt*A{gPi z2?_D-^W;Gd=pOqT;e*2rL^sAA*eYQ9(fGpA+B0D&gn)0$QH}&XSaFnpSRNer zDtuv%d@yV`jo(L4%*7rnGtQAuq+ef^6Scv@UaGB+*f?bFDa?AhY`SmhjZu4gp*$Pb zL|_DmVwnNc(DK5FJpzO3^&_!?$E-(KCON%X72Hegi*saD;fzpxH0CaY%=N_MmKGQm zN{pm2Y5ndtBedd~y4ToX#gSMfhoUgO-kZfn4#%hDZp@>dYRxgh7={*|!rsCToZlvv z281>)?L96WpPUP-)j={$#w&F&<~y-*f?_@8aSa0F%h)hdbteWI`xRN2=M5B_gL@K2 z9r$LsXSj#08|0mw?!(9zQB?LlA9$A|Xp^ADaLjJlZRk1?93mq<2-GWtiYiVT9c=J} zJ?J{l-Te=3=IFjocU34w=Rg)(bfHHQ+T(ob47M$#hVga|;}@UibB^&sCJ-{sM^KLQ z=IL&tNeV(&XIieapCElf81l$J`}Ua|d!>sTRDq_f8Rc4JF`}@qRtPuH%3&UAi}J+9 z_S1Nmg~P}~;tNa+RMwt=kwCf#9D!h^D@Y%QIR_Z+pE7};X_LZpFRg0)xPfuAnt#eK zNXz&?9717Z-SSb7P0s;L-_iIGPtLQ%D}W>8D5lt+HcN*MdKPlU1yi~W%f<}VVh)8w zzD*^dwuV9reOO4294icUbCtS9=!`-{zZ2eD+MSomXfS1z|Bq#G+Hl3fUjXpxsO+uG zRw6&=^Rff;?xrhW=Ks};ax%WnGb79W%k-W@yl3)p^mj5m`TXDFlj;85bI5=ds`)fi zSt|W=4Ug>1uppt9ctY8sfIDsne~q%Y7VhAKm8B2Un;~bQ!}t zP2~TE5O$~`Glc~{=WGjX%=Vir)qNwm`=C;j;oKG(bOkZ+Mxdq;V|eyqX9;NzZp%a< zyHIW8x_@DpC|eR9grC(k*!ltR3JX6y<>@jFJTBB)w<9!pSM5;fUo#CmIszJ>$@GnoNW`TfU(z3)0td0xl0{ddzyY@fB6Mv%>d~;DazA>zYjY=a`WjjCH zOIs5=k@cDoF8ia>$j7!_swp4=T|h$${hvn#@$0^r8lIbT86N5Wslz}1E_O@M2v#QB zHXCk0-ln7TUZA{lyu7pBye5-vn_W5}Z~4)AKOI5dYrVXux_M0|cTNYkdBo9qw<_=T zUfwt6nKrwN#WstKdMm>AJ!XlO>L)_Al8v+TCEK%}X= zo0B-O5r|A8Njgv6s3jVE#FP_z#N8T?n5|*kT^eu-Uqpf0P);j_N9}t|xNU25MM`#u zH+%>4Q~C=Z>It`hjQZYI*sR|QXLV7pwq-Q#`ym&(tW@0*TebnHz5`ukRc}g+J)%nZ zGP^dOy=izF`owK*b0?wR#SOG^PAgI;Yy&cBWQ0Z0?xRrtC7|0+f^7@~Wob3VlQeoIOAX(eAEc?e7XJ^!v5HwHYQI>tTzQ4rx0{1(n z?-5B1tnjLB&co-6&vNUDf=k8VG3OuS;xRj!Xy;1gALxOWVvpvhIy;6Cz}eBHR^c6r-wZeLfYa8+pIp=l zoN%d=!U=^K(5KicbNGuALh=tMCF|8u1U?e z@Z>_|z#cY}eVQg)$qPvCP4*|svG`+evU^Vx%3V0F&EH~L$TTy|sW~5In-ypdKR8}@ z?{p6$5BGGZUFTQQbd;YdzvIpHlt=77V@&F>b8tL&tOG1rRzAWlE?KT|C;;?$wx09W zVYuttdgg;=hMTr%YEeEmDFYjSe*CZo`f!;N=PU6h5^qpw&m_`UU?hoNvyz^6D(7jq zI4R41E=;J_FMjo1Dh@rvU;AL_CsM_F=kmX@DfT24J6FXjwC4>zW2OZ5_V}{(Rc=}M z8S%>6@T<=)>klgHRF!o?s;nQYEK@?m?68F zHO*1BSlGOZCBsL-ztl@7HnL%U$aJ)x4wzXPQ ziMDa7urvd%F^hQqH&_m-#N7{RK74hjyO%U<(wEkrSi|Go+T-*XUpqpNuh;rqM|>^K zvzb!Se}vYtve$2OSN386nt!mBE!mfc1DK(uX=nfbHE(qzO2$qLkcO^+W0|HDxMc|)Tc>wssHeJDY@5#wD@%uN>D(s>VPxRDH zR}wv-A@2?un(>5o0W=V&@~DR7m_i{t0Fx5=AODhO?Chz9BkP*q6)UK+#OO$Ga@2RX zU7~dLQ45T@QkjG9=G_{DO=+Hh=Pi$u6rt$lMQsG|f`Xmc_J0&2K#o94ACZq0RV%qQ^Sm#Rnll7B0D>PkHoTBnT(GFi*Rp2{`{LKXMI+KAb?sn^5tsv?iX;U{rco`_}Qw1L?&-?1uL1e+7 zBTW?`ShZVKY(7P(Av5XpHEtPmx)`WtZ5E9a@t>;aw|z}L4=4$jQpf_PMk&z%GdLnFp@+q%dKPQ$ z<7Se`Z*#x5#hQIIERny~{qBf0e`MlUx!+r3&Hd)Pm2U<#)_lZ#Hu6c)X59D2nlmUv zjeSbv{0jA8Md@m{gdMTw98<$Z%EHPxZzMe@NoTA%SJ@E8_#@go>H?hgS%;~`dHYK$ zel}~Mz%8XM)|_Wb$#K8iW6h(@cfY||?-BrY_pVj!u5m`g-2=S1JB1f_6Y-MVR>jGU zR-D{=#qMTQG1{dnrJJXS#C<$t%>~p8Z?-fXYZhuHKy?Q8uYYEM3ZOp)*nawHHc;~E zO=2xOZ3tNa6wC!H_HzU8-HdBKoADYi<8#W`Y%&@kYc%??$W+wk3@OTNXmh&0d*(^)} zEubx$6f5uR3?O3{S}X2qtd1T`&56dx!b|yQc;BbB-$>c1gkscZHP@(9SqFvKoiI^1 ztI%4Uj-V9ShM``guA^&R^oz6!u?Fm@bRsFxh3Lltub%Eme7#T(lnd8VV(ks6$!Vo> z@Lrx5=H9sP<8an9|KrWEURCUR0@>8q&9DHjJ%EX_F3h%Yr=U#iqS_&9o|?n6jb~o6 zho*b6s~Pj#3!|}`V$_G>Y=^p`Zq<&&)Ks_g7-#^gx`~-UjH4LFb%x`}J|Qtq?-#{8 z8g!YwAH*Jw@j6FtH;zGcEspdb37A7MK#sv``?xmM#0j#7#c zp4*a_{!$_5?ioWnLy9D{8%|G?u|DqV z%ciYMU<`2s%h;tsW#_%2R_$_qTbGKsCy*htQW?1;qfM-Px3P>{=gpC^b1t?AIc&;d-?y#abri18#u z>Fke@d2-EX8~;a|xE z*F>LHmj@vdr`~U8*1fwk^l+lLEHW2LgJ-`VsEGvD%m)m-Jkc9C_$drGKOBU(#RYei z50lE4(8Dzyq2FeHZHq)EBNLa<2y3!_?Pt?X8C`@$%G!c(7R)(Z;8r~9O!=o=~CLSQ0 z3(u~GeX=w35Mp*JPT>@%sjH{B$i#12E4~RIbJK$q+4~66P<v#wI{KLYH^jbnEwT=7QZ`3e)-+;dvtGx=ubbQ z-E+Gz(Zb?n#kO2V=_2V=>bR-awMT9d;jJtws-(@i;saLmby;QaHzG}mVYi%QMA$H4 z7LJny`L>!5A2Ei}Nbx0QgP*k*A3z53Aav=x_;^1C3ZFN(4awup8y2<7RiUQrblO8T z!J~&f{~$xtp`p30hau-KS?P>ASLg_XU#duSI$0jZAoh!$GoF{;Rd101g33@6{n+4o(y9KTfh*Y&`_tYcdOjhe4xhCsl5MJHh-oTF z$-Nu^>?JOR9xy5)6NU9-RFgZnM7GVC(SF`Rq`un1$V$nmTK12=cQ0s<#yZdvDhkD} zEey5PkFAK+l}2Lq6GAIquN#5+%V8LY%FC!dn}Ylqbvc2KkkBj4S=NuIMf&6j%iuX) zx<<(Ukvqgz)Sy3D>J9qld%ZzF_dbAq8)ujih#&iyL16^Z2T9hs`bGowgU4_z;WCEkcPSjm80vK+VgNqz`#b@`-i}KaxRMdRNi3X5|?1=8Jz#ac=s82V#d4Thwga)#RUETj5kxr>PvRjAo1Yvp2tI69im~+_PupN zq%2W;HDc({2N5^<4oe861{sla!|~(8HtdAyfR)Jwk^S9~rZ%Q~D6%UBNSYJ^0^I=_ z18zW$x~HSDuZBCy^E6GAR17z-l+x>d#U3Rix4yD=q^Pu|VH=lK_}%M~7gL9jMn$nx z3+~gqSPZTWkNnpBohg11E2TY=B_L^7M*X@;gg4A^|FW@2Z#Dl~(-j}RR4z-kTs)8C z{v7=Oa9=b2JlepNslB4q5$OdR2*LuOf2rp0YwN}93&OH(`K@qlsXG+-vBXpbX_0yR zQU!(@BKE!rdYMgI8G)e{c7zPxKoqJPd{RO^o~3ie$RgEpni&AkGBA*7);4F$S%aCU zNqEB=&hC9rr!^u6na%{sIK`(ikm9&r3FbdwKWTp5tsBo?PafF7l6gAp3^BQ;7dvCf z;If3nW+m@+&i`+KByD5SAI%a*_(NOdrV4vsG#F}fGBALl56WA58!w06tqC*bw(yfGAg)TXmX% zV%KBBVUA?bnMld5$fo4Vjo4c&FqN$JJ3TLwUas~BBn!8HN}G4O3%ZY_E?;y&KO!jp zE+1)5gy{t&P$s6m+T8DuQp@~o4!ccqVEAq@xP-OxF*EZBT^?Xdusg2O?(nt-z+oO^ z!v6RUhd-qHT=??i8Iw2JKMr?h>E!8%-T!J7B|dJgy0LalH5)t}yPR9&~OI!;zk>h~?3_irA<~tg?U=Y!DR~@o z%!eeyvG0T+_Q)MyHY#>erlHU)M8ZK~L)$?R3fzcS?z5E367Le)>c@CxRCUm(i}9E= ziboz|LLQ)0P^F@9vs1~syZHZ+_Ac;IRA=LV!X_j@WP%cjEtXYd4F+opwr_Azcgb#; zg$)D&r4}@8Xo?jTBnwfw1U7+8hefRSRx53_K4f-g&Ybg{=bZDL=YH@xu5fduy2-flB_Ki2pV7S@c@GWh zEG{=&f?QPn741AdbpyB$ss?CSH85EkavGTIMu`yqyYQ@qxKCwwz*R{opByz0bG7$+ zpOP&+``!=q(Pu(p(v)|is59K8N8b_}ofgu@viFtd#`=av&&>{B34@5kM!r7US9lA* zIK$f2c@F*5JrfGU86jhSp&k$B3hQAq&SYJX<*Pjo1L$heqvJUU862Baz&ts!Lb+YB zp}MCu`K1gEoAg*PBVFU7(5Y~1$A+pcXC`zDdIhc0Rb=L;x@U4OAP53&nu&o-ZEMh@ucrne_i9r(5-qlEnKwnEVyZ@Fvhq|sB`-SdY` zie|Js>EmG@ie*>F(U=+2!6!?1D8E5}Bc9&np3OCV%STtZQ4mBY7K(HUKZR|;DquF@ zL#v8%78-7})<1;0L|)J#6ZRhO=~4%GMUL-Vcr`j92r?91$anOb3-rjl%oAw8Gbt7c zLlwO$p92^Ug&pbY|y9$$^C;e|2gg4CyNq(2-SpY$a;pP)ZNrSAs^t)&+LWD zfKD-(1*2Y2*#atCK%?L=1OnM0ip8x;=ywmUrpK=3JAriwJcG<1Rj)~)GXr!sgU%L( z&Kwqd3+eXkjQqXP>6uWX)ejM3H+r(0$Oie*uAzftSra;Qk;TB| z|6Y)@nvie?@}iiTTC7Ec=Y~*qE|%5j8J~a4!XS91P`Gapq7<8*C_2Bt018KB_zDS7 z$qq6RscPsNVJ4W@2&y2yU7GnQhT5}F&|uC@DWp1g3R@X8``Db+mF^%5KyB1}5cJl7 ze&i-K%pp%ZtE(Lzec!@trqcyTUR~o&7>n_yzmKIeW^!lA-66#b!8xg6M__2^uFRoF zo^9IQNo{3fJY_H;q2&l7EL|va(Vo;1`#esnLs2VakVYgveCfT+gzv{l`F0WC zsxOtE!~VS6sj+VZSu9IS(%VX>(+Lj#@uCbiO~3rTOXn|#{Dim|r85ZEnowy>ndLW1 znMHuIB_is695@6b<-cP8OkHhid~CCWZe~_W{tt<#MfnifRHD8%_lI*Mo5slRaMq~i zPKbrbOZCfqTJ_6wqUJ{Eh4S2D|Kl7AMuV|T zZTg#kC!3xn)!dk9TFM_j8Ppx!<$odNFO%|-Hn!gr;k%{3H=g7wHvVMbS9aGT4v*H+ zE9?$;`lDxCoKzN60kAL?k>AepfhyY4x=T>nf}I&HM;e z-6c-Ys2KGsvyfjL|M7tDXqVXnWeQat;P)}-(ATQgCK{^?6`>eMSr?3E9{%OnVo7<4WoF0Bn~j4R});a8NJZqBdn%ff=I(Re8e9*#BhI9(^Y{ z3w|wa{POayc-_+#08S^KmNiSbQAQ|==o`H`Cp<{(-`*_|DUhxW%8Fe{@B%&b;uPc6 zwenq1S8n}Ame1_VGk;Jk-)TS7eMVn&gSu~Y77v|*A)k>YYRA!mG``-!@z?yoO>ONJ zkO{GPmZV_fZ@?;)>ox)O#9pC3Ni1Eg-tBA>>#Fs~%S5j`&_`HW7CH5M(-;X3nk(W( zZD*bIv%8kgw*?z#_Q{fLJu`&)wBu3lq_K_iIL023Z1d^V($iIG@wv9DlBH1`B}+$D zY4J8|3QMWWq?Fu>`E{*=!=XkbvRtR`(HM&@w(4P}$gJa8s>0AK-bC_H=-=~6`UeSR z`$hI_7j}st`1<8)?W4_0!ji6F5$Bwyhp52%$DV{?Y7c8fHy%>}pskoyG^b#O%+k3M zNml7E!bMf0w=0kor}{^4kQEfxMn5iVN)g!x`Cg3PP5@p!@5MUy60zkqlYgNbge8ZP z6HnAM)(?^(Tq;%I{6&>~$d<-HY4wuLJ)T#)R4O|xmGy6uWCESfs#W(QQEE!>o}~&{ zQoxhaT5fl1|B%+2cRSXS_!6lQh5ZqJ*b4ins8KgxQTqHestYCB6TzcXXo8<(;ed{? zC}zDo?IrQvXW5l--heDVQU0t{`wF*(=1M^hzufV>pV0`AWi^ShDt4BN=ErLATLpvy z+TZjnuTmht>R)O*?vm^s8==hAEH$;93eR0Ag;YH2y}5r04hMNi6GpuCOpF0a^Zo$)(gI_oi?bivOypyS0S+_uKx>;_~>)n}R2` zZVv*k)vznEZX+*902owuwpEFa5^4qAw*4g6@{y3Lx8;j7@Hz0CrIqG{+2)iPK6rFf z#e(?F39}M2$WO$jOd*s5tR1F}7;l6Vi06%<$6yOz7`tx20!S_1_oVtKOR&!)Un^=Y za?aB_NWl+Rh&0=x>_P%%Wjt^H?LDhH_C{h3G<_shT~AeTlHgXjKo?j|ZFqZk>5rwf zM@k#FSBTT^?GS3QSt*=2{Io4YWa;tq&!XMU3I5h*y~JN#@nW)ds~?eDrz<#-SY+{R zVq0fuSM#X|_5h+MIMK1)faJ1RtYD(oul|DnLmAl)kVA+RmDu6&g zzsVuc^|tR{W2nHV8+R+WKpbU6V>Df<+;2l{7wuw0T+!3Bu_wS`3Qr>xop@1^dncC>+{U|C6-f)Kq2POKlmJ&bgtF6m;T(}1<(aa|KCJqJFg}|$t<*N%cAGRN4d$vwX-AKi zxt(^l76=*+i@OgQAu)-8kz;*y&oOOwGyoGq1xp1)9o-J#5c|vAa!d4RiwtA* zLh6rlY(wN$yAHgD%JQj0LWSX@>*B7arNB_F`CF^c@tq*>^YL-HOjf^glJZ=YU(-*N zCn$?!r-?0_=R2+U&zCV;k{hfV_qCqR%oZG@Al8ev$#9rKPy_z=Hb;W4VZBp(h`POFz}DAIZ(Y+uN!_d`98&z?1!w2mx|0$zVp3|kG!wm}j_C<> zy0NO@kn~dsLVbb7PyKn2Ape$zJaKL1={UA1Hw?TLo+;>(0&Vr`@IIj$cqqJ}N1st2 z(mC-|C^H^eB=v~X11M`J>E2eXwc3hGX<3~jBbAF|oh(4Y0;1A0MLm5Uzih zkl665Z2nxEFOyU1B*7th!gE)tPIJRCKpYYyZ?r)Vs)r$#t-z(M$s?c_jwRW~sdUfI z+HcyQ+1jpuFN1!RV|-^l?66kfE3-jHb5X$@bJy-;*cw*WGC1VY7~|6XyT|nLYyP(( z%B;{@dxi3WAJQ&o#lPLG$Xib)g0px(G>EG|8xh*|pke=tbXDAf8gQ3&I!#MU;<^D>H5g8cT{-kJg0Q%; zS*;g~M`A34)^sT+On?YVm;MGEMITgA9{4GG@l(`O+!K@8X6u#uMJA8p(xbLqWWC

^4Eyu@y6jpG%SDxX_)S$xJ;3Q+o=5Kxh9?=E{mmCbPM?=K*znP< z%F|ph&qRB7cP7mHj;1b(Tla28L(2YQ&wH6=FW;eLh_w{?kMMgRD$=BfWVL^-=_p%NJ z-*}XO_tLKg-6m|Cd~Vkvzib*t90T9!QC^!C@jLTD`bS#h8mb6NK=tO5kY|H*oBYx| z>s-hmTmI>B*xF$LVv_ZJJUPh@@x;$Qu$C()F{psNPpw}l%kHXzLa^1nSRd|SvC8Qw zR^A9vRkhl)+Ka>B$+r?gC58E?I6w_b%^>2WT0YU?+$k(lkX9E8)YsxP=($_Y#B zlvrbzkjEmu*WD!h!9J;RpOi#exKPGSfJ!>z4iK0$fvKhCXnc(=))r7v$o%vD@|C$4 z-?(_*fSVONm{?aK;}M%)P$@R8UX`xjMWutJ(m7J8(j{C7G$={)+cT45Blh4%7oZ^)S_^xhTO}yA?{^y_>OGB1R&h6ABk5= z;__(QOFhT6$Xz^;>2*%pVM!Lw2De%wR{RWs;Lay{j@43MJnyj^mADr;sf&Ae(#7%8 zMWdN6N(#Z#)FFr9>L4H4o&FKO>`w1ySJ2~F73i?vWq~mpyLL}IXEDQNpDhOTTpM(= ztLQla%k_-v0Y@W?e~sq@EwcLm8-8Kx9PmGy^DW>H7VxJD_}iZE4Zb@G{sKO?!9N}F zVfS3Dtlpqsp8|Rxtv-?hoVh|ezzPS&Hf|YNHMJ84b)W?iIdAR@m65t3oYyI}Q-uODq zM>aTRNpRjlkP2!|@AcOrf{-Kxt`*s=`2+~&iX852`~Y)rXfFOXXs!}yekIW4oCM9! zlhADZRRTFv1sZe9`ND3g9jT_D;;rBp1`Cw^HUeWBsu|v=X=AsU_jWOEOvUD!zUI9W zy~VxBfd3)*}_$o#uK>pS4y$WIJ_ z0uEGv4x0doCqBFvDY;=gU0p6+J?B{=C9E@Ssi++eP<78`H=FA0ad?h*_Po6Y2AqDl zYkrk71QLFx7Fz=0FdgfF*MhG>!>*1@Pyn8(YD_ly%afY?Ql2RCHi2J+Yl>Gn?Rm9& zi70R;y=#R;w9!#nJ2L&9{Q^|^&1MtNW@ownm`cDxR?|?biTyy-%Q7j}@`=y&s`w&9 zES<6+%$BlR!*X6WolW_ZP*=PSZ~NHOwn?A|(La+nO~2+F>m^>(0laTihtF62Np^Sv zZS=%(MzX`#@Xp5Z5qg?}T?y0+m^*vp_DY&d#ch++-94NGPHO4_RX{efPkyOzyP9?a z^+#?^9m-r9>phgsV8oKVuBpvU=-J$ZRN1p<7bcslvzr@6bK=ZhlJQH2bA<}C zU?pDE^V20KeVUW}^yj>DfZ;#uB(mWATF2yzgyD)tL)jC)D-c&~!K}-9(7uiN0wg0-Y3-3?OQH_n zUb(T3ahGu;k1;u%{Sfp;Dh0LZe&+@_Av^jWB?9J`{-UD-%ov*=3%K6bqf64|2<_!F zWx=7AI$hLKs*Rfl3rdu&0>Qy=6ddN$!?<1A`dJ~5KG&BRW79*PquO#wG!gRD6wF?5 zx>%7^6-BBF*nTC3Q$@Q`(~d`(SnuFwF43b4`vg5lwB=&Q(Cv?Lpt3*y7<@Xm651~_ z#vCp)=Fq4=Og2a9Gmwn&bMUvfvj}cTrk6cdN&+GxCy88tm70O)5w|Dp`LtPX0{h1jYlwYYi@Og7Vi{Vrx<|V(t1ls~DCtDlJO+B%+YF=yjudh#df$q$B!n6uNWMjLu^xiwPm5h>S~av`%S7th7~VA0q3 zu=fc?pOX|#K{Kn|yqT1vUxtbf2U||`k%5lv0GJ@Y!l}A$Nlg=^9rf>upt!pKmAY}d z(szG=!>i~$J?#soijQwJ=OId`S>HQxly)t&g5^b9@PqDyR#^<11je?H+GBn49~aFG zDJMCrD7H~JU3O3uPTTn)Xucv^nGf$d5s!`0qeBPb(LO&@HxI`CfDf*=aujXH?!|#T zSVRVup$$?ik;kG#-C^BJ#p9zHML}a+wz$=S%n7KIK9&3mAzLykN8jG^iQ8}9K&nr5 zT+{{nS-1=qe@16BHoZ z!us75Z&nfE-c4nVZFL(cy~bT$iD$T#@!MS>_P# zdR8jB=G59?x+znrDiZc;F1c~Ru>d6|Y#S(B6DC@lir(L2m{ic1?SP?RN`d9jwaJ|l z>SOD*WRI%xM8j>Tmu9yo<1>ug zvi-(wIeMftJGb*u9+Rwiu#c3oyR^_TfUwKg+2#Gl*q$HM$OqAJaR9apio>c@+^bwj zJr2L-^omZtZ!G@A`O+6@`-Z;8xW#+tbE&9iFqHt8v}TO8p^N2kZp8b@57ywOZy1C` zU;Zs)n;UqILr3gh$V#EU&q3o?#^N|XT?woTN_jlMhpVbleufP|%*QG3M)+WSm$8`b zHk@UHrvYIKEUHJw_!L;O1eV54-LTXQOociOa2r~)}(oD0kO7t+X8IoY2L(KmlR})bIT;v*r337V`cw` zqM46;lu70gDL%w09?MccNJ%D5)Ll{X`&zD&bZ$i@KXgwZOME0(%(ic|FtH6Q=Gr$y zwE8;$OSZ|2JA1FvXIXkp$9QeMJ49IKX$+7<>E5|UaT4(0<<}*(c~L2^l8}A=KO*;~ zygK(8UIk$ktox}d_C3ZG6RCoaykzA=$UV1OKx$O5v#i}|TK!*Xn$=Sf&l^Q^;`C!W ztNc0TT1nK`kJJIh3Hz*;>yn{UX~R;0js8@Ct(l6V6;t}bFwsu9_za;=5MjG4u1L5n zsvm-m;wO5e zAya`~?m1fX+v+vgtbV+SO3+n|R?u`L5qy@q?;@OKi&$E7sv-Hoj7`adGUkF5Rp#luF59&ucEQGUm`m{xl35_WI&hgo zrLdgOV(s}`!ez%SA12$J|= z_pw%GG%9IP#pxpOU06C2sUn zVn*}zr?Dc)zoi661&i9)6t(5zi&h>j7=pKmo16=JWLuW**}GtFigA~AQtZ37aW}@M zN4Oc;PGAPEbx(|DEM^pWgs1#{kp^ESW^Ml|qgIII?px1&I{F z--VWS<0ha`v1)Tg`jw1tPB#2)xADP}MeqYl2OQ zl&E&}9~no2Enkx`;{^!{47m=$J?Ba}(}@j4Kup9q1~;i(s@2tfgPz@5eUzoHA_C0` z2pXsS!VU~1+*15S;6PRg>rR9vp+#iVSs!fq53V9k*epVTJv!xxb?7-KL^B=HZBq{*-)Dn>wy)sEy6t1AbX$e<1V-oilaE3*Ycf{18iO85!>AjInTYZc+_|7R?H2vKy69WI))9*-gf^eMSWQy1$7m}aDqN}m8y=0jyS z(~C$t-~x$KeC1PTN__i#AU+Hc=`+MYLb8$kx{wkxlLB9s2z>Qo8&^gaqK%8h4GMv* zr}(!ZNXRB5X@0~&^0xOt*8fZf@-Rn69b%F8Esx^4<_x4LUl~J`)wuc!_Klo^Q6B|) zge9KO;ZgWT5EW(Hi9$-fS||HRz564H!m@CoFDO;^$a4rj8V$av6otp zs>e#3WDniNYUp-8FGKggq*?e!r5c*~*?P4 zp3Laip`)w1aw(om&j`4 zQ^ys}KH3Xo`-jMNEU=KdB~-+DmO1Ncw26*~8G?G*t0I<5@FDDEsYL1346*e~98u6; zk6wwpS~Koy&El>GlWn`Jsnis!QTKc`Fs$$Z4Y$RjY+M3zn6SSSpf3qz^~)@ynoijEsm1f`i=%MvZ}a zUIIZIkyn}f48l5}nN#31268M7IF+M~J5XwbFAox2dt(oNkKlwpHhx>cZ+rNYSUITb z58(-_x!%r0ZKb$L$0E9CJN|4Y^Rit^jQHvGD_}K9rO69H*{}t373AK;$)acivL$|~iWMqP0*Es}Kv@LcX9Z_EFva*HP zMB3QE#Csw_$_~$)VIm1wTf3~+pB9qSCi8!~%zqXE3Rl2W$E!N?*OWOK&92E3)zUy%f)La_za6i{&DJ$7yrp&^@3I=?m*qpLS&ub zKi;r%xXV?C)yGP6goCa+f$p_Pdr0a{ zQXCZoE34r)YS2YLXRrobxU@6>M`_u-ifK7PlTPqYp1?4QJC~2hpFQNOKSkj)l;G=(*y%C`^@C zZKCAJ_#LUJQdGL9*gx5@P;=o06*7O;nPo}2 zPz;bjxWjsr`%%qsK^d?-vdSGMw>J;_CXD`?*c{<+x_MZ!9Hj8DT4y=fgkWpEc~~vE zgh_?iv2c86fuKElb+$FL7d++N@Pv$ofXu|VC8XpSB*@4=--m!$Ii$amb(#35{QK$J zemsPcBv|645r9lvXDF4p%iguJp$m!`an2H8i&ERqk?BIuSo+Bjr(X5_SF*m+9`!K} zbwXIfmjYWd1>O7mEK z$GOm@{knXgh4~9y<>rsO{bh9|;Fa&FLMO5o zo=?UiD&MTPl`6&QyR1abMu5-~`cMiYP=J8CiI64rTI;j@_l@LM8C* z20Sfy5`M1m(h5vtAprv8Y{p(@PXcUf#eZb39D1+kT#-2=feiWTyv9(~pkUR$koNfA zU{z}fPy3~zsu=~dLRG;GPBGmctU_wTb5+b9p(7;Otd{_bDj9iK~ zJ#rgn(fZwjZaeg(9MPN56N2@1BY+QHJLtz7uieJDZQU@+c~Sl{gO&Q`Ln<-KUq+qR zAzLbr2R5)5qN>TZl_D>%LuYY5<*W;BSea0>h4{|}>WZ06)RiToKQsb|$<@@Mw|w9Z z(R@{)BSf|aXCC}b&4cnU^r}51l(4-7!#H*aI*e&AlP#n}H&iVWCDyZg##*Jq+(!cz zX`l94d+$+mV$4_>J?>qY8f8lKD)yLNEH6>)jb*1S)?gR=*yq&t)TxVIYZq%v6k9FD zdi-<73&~43E}Q>B^Yn`bDz-!4A--rih_xfFiKOgtp^bm5<|gO-zmVCW{19z*({^i- z*y8P*9o8@uC@N0?WE+dFJu;X6g^>|!m$l}8JHm<=V!%B|UIgUDLSC$Ch}T|Zt2^Hc zKOh*EO&FH$ud@;?T`%s*X+x@d_0LrGW2tHcRhezEmPDu3{PxnrNPTJ#TOq*Cv~-{+ z{|86wIXBy_sci8oEk1{2B)f96G^4um;(f`kY^VLJ$eZ%82DaESvXMQM}f~HReW4p<#UZxayHjn_a1%qj=8YgP zE!kq{&r@(<)0;YVLKuXhC47dK+h@zN3f3l};L9XdJT}=h)7V7Ua8s)doY?P!A zqsM1xq460!lslhUr*rX)&DKI=vweoT<6%fW%;8}U59JO?&iYMaM9&e(BtmcPBX%!U zDER01bd$K0z&r(SMG{_AVj?`6nA;VM0SR!M1CbFGgpfg;N(3ffbZDAs2$0Z6cl44h zb8Lps{Efgu$wGg0Xu24n3!)?X@cD>9Lv!t*hOM=yaq8-lY|ri`86&sG`uqcqfEpWK zuRT4$pVK^_FX`v=#CX{p>+?i<{nnaX()wmHCt|poZr`t8a9ndQu`!M(|Oyy2S(K zX1*h90lK(!Ypfb$x-Ck2P=L8qVOGLj%l>3zR*dJ?19v-^CD2u2mTPhabkF~~p7Y

xeNUDk$GOhY{<`%RUJsfU%?9g0R|M8tSaMl$~RVWUSFc2W`g zZbW40RHp-@v*~wxz>_N&QiV<8Rp5wJz7bw0aFZybUZ;?)&#y}~o$RapNXi>_#zb#O zIV+iyXZX!|mFB!zely6nwaIpLKNxI@xjEf-z)s(QoddSDbPn^WlkAxdK5gPI0><%Z z!sp=k%>&xQKPLwTh_-y@7W|mL!v0KB?g7!VI8QNk##}y(?kF*Hd{JLqmEc?=LIK7Y zI0wwlAU?-$cyoeApK=VY(vb%X>cFZ{FOi)@^e3+6$v`_@^D@u zBVBWah&e9Ci2sG2`vfW4!<%EQU6?FU0aFQvsI`}?(u3@5`y`BTOQ}BpN^kt0Js|2( zm7kL|Svn)fwRR6la{g5NU59`}>ZhWrQ6H(YQ?BY8!_5aANao8;<)-9oL{`B`{Dy^8qgx=^f&3 z!I9SVngPBe>A5wT__J`d>&QJvTHWEnzPe5SkQQn(kF;ilGXo?4%|v=K-DeKh*4GrA zr?-5R>1+99VB|mPzR3IO-uffqapO$?vTPhzY8G*R#Ld2J`5VlaBSJWIW9}Yw6E`2I z))rusUa+rPyJ3H|_S#;*_ROA|ka*SY{vcrb?<|a6;jKI23ZIR;jKAOuU+Vil^|id4J>UQiIRg&Fvbj+3P9Xm`xsZ2TzGa7&E?t`D_k2UR8BQ6v%^WzA z1-rR7p!qw>Q3#rQd`Fu5)?DKE?5(+&pBdpi0lMbAF}d@HNozG{)g8;7KUi$?t9Fn| zbFa_bX8k%Z9d{p}aV9!8pK&dkkZz05%+#r=CaIzq;-v{&cAR-Bn|2}7$NHRz8FO5Y z*)Ah!cC>Vodbz#jgKS?*XKwx0um-`SoGtvJ39CEi3V#QjvvGT{bYVxJ`XGrf?<9`qNu~riaj|aD^PwJ)Xz3hk5UzUP%-mM#D`H^%^ zReOLb2)~BM3sDu#zQ`dmj@fsSu0Gmb&5g@smO87U<|YOsJv@7hn*K0PdunEovkj7c zyBO1J$muaa%sFtWkR$Kxc;0hJMT#rvcFK!91WT8}UT-~c6#gf&8Mm2(VN~g>#+yfwH&>I@*k2!g zGtNGT6#nuSU|=Q*nO5JyJ#_0PPD|3;Hp7ylI@@dK+ixY`eg*mV(PLu>+m#GMMkl^a zYq(E71b63$irS_8SZ%}L>}pT0)}WMMk!BolI@80J`671Z8x`QpPvMcsD27Ey8SS`3 zHv$jEE1$$mZ6g#to>wwX=y>f2*;?IbhikrPv#8=zE}^#Yy&+~KsNQ8wEjbq7GOmEk z`cPu))t8l5KFTJ?L>-k1QQhy3hC?}Ph?UJMcx_n6>nsJ64{wEs0LXlcs9U*gKr3li2 z=w!u2dc}iqso&G$(?achql=4ev3%tB;BFXd7pG;v&%*)hp*?>vVHfK|ZCxh*nP=56@wc zSRA5X|Be8-k5M=;QOq8Q1NYPH5^1(rn%&%4#fOov(pBbMg=`?v0tatcP6G+{xF|bsj@0} zsmbT1CQn^EZIFMb)P`L%8l+99n>PI45%}-nu{F0*tzx?|rLr1}N%361O?=T!p8tIx zUm5b%aZKR}2<_2iseLq7l&Cdl5U-m`u6M#If*~K%w&y1#s%hdA!SAVHTp+F8FRi__ z@>DP`;oFm8^d@1v>QpcWPYvURHjEFs1jYyLK~kyFlM9mBdK%wqxllsMnJzP!4jiLf zGG22NPrKLeTfEY8)5L|MRyVWEHN{|H*)}>9$C z6{D;1(J4mv@EA3^RegGmZk>!Ad5hq8zYWZL?x8`kx>~V@Q-S#q-<}NSb@G)0GyKUZ zz?{uTrvNi(gZYdsbn|X!fOlDaPV0I9&iQihOn~Usm36mWm{#MjdtOE>O?Vg=KBJ65 zGq>@)uaTHe2JpGN0dNCjZM}>gJyDz5m7Qgum|1r_LGpDn2{`|S>a@Z{+7vloOpU!- z)6%HWGVq8{i9nu2MF=`c}e{;qV{H^U#t?^vpds zJ6nWktl>cpC^jTI?U23T*%7jZQZNf9=A)rDOEZkX%Q$nyn~lIevouGpZzKug8<$bG zock_w=F6V_i!X5WxwZwnY``~h-qzLLmZmklE;C@WXaAyiDQZsJNAg3YpYlTi59uwZ z{o4#!0WH)LZ^k(<)L{lb^mz`zc^u$Di!d9W*2TBez(QZqfk07T!WJR7`q-Dzu+-jmrh;6sMY;l`1M zdT`^PAlUNFVHuM76JSlPuN~t-oPD6gbtMAgh|-OJ5KKq zLCF$m5u84gK{%!Gu#EK#w8}+O)QUj|TqQirki(n4MR@Q6F7cT)e7Rc982IYb>=9$` zUP!fML{)Fa;4EBs_uCxaI&w!cW)5YcE)q!Ufdbw0D>a{3hs;|u^JYmFGD_wCkbRmI zVxx}u%_AT}e6~<3;~pd9a<`pwM6rT*d8A>RYy~~{tp&31e<_BA7gXV-CxL5mZ6!O) zD&7Y@oWINOyVY-hJC0`>Wn;Lf*`>9sx1!^-b3#Z8UD<1bhN@fD{GN2*MI=zDqYIlV zsa{mI#vQ80 zoc{2L(@AG_bAm(A1!~0C!*fWhmtZ?PqK42lpE+1*5XUs^32MQYL|`fLdp;0dA+j_2 zhk^{+DGu^H+a%ux)_{gRx)y9tEoq`w@E4$tt4I%cc5vue2fTSPXDA0a+saSF*h#iF zY(?9!LpbznwfttI+x z+C43M-)Bx&GRU;g;S7#i+c4YH%5?xa34{7Agy_g5Ea!U)fIXWq7oUFQAavQv-X0K z7ji{bsp$MIe!mhE7z(N!u?rCnPH6R;1q@D25kHg-{ALQRlbHbUbAz1QDZh)5l#UT# z2p8_`P_RGjP~Hm8|5W6lceLGKqfp@h-&)OhAm~iNwbBliRwg@Hgn{rlwN4cqB0P_Z zNOWrb%_(c0Dz%nLt-q35mH2W(Yq(6h8NEZmc4T3|yERw50?0`x9=pc8Y~4Id=XfKA z2s?rb5Phbg4w!tAod|fc5E+xTJisfh=4@Fs>*NMx%0oMaT!wY!d1KwLrQMPbG#B9J*QFQj(<3p=>R%087*=X~Je0Ouw}yz?>&Ykod6ZIXI4csu z^K)rUmZh947J28ej=S%&A)x1RF*eVp;8=o(SDrf<58 zU$Oa<3MjaFtE~1y(>Fti8$U;YbvFwHflzo1cGZ9WI0G5Jkr7KX3sDSs{pMVJ0BTUN z=-DD(+n)5H@Y%kNpjRyvuPhX|HR(|oJAqa&`@RyEd_^a=tWZ@Iy-6ce3Ej=6!3xqK z=aTQM0ui@{c`H-AW^PXo%c5$|j{}&*-crln^3kvDtuR6hRvKOr&PVi3re|e5?{hGk z`o+`E7xnVR&VMK7<~QL+Q7Q13t^P>hLlu+p<6y}Z*Q9%3Q!?c=L(G~*xXu|6ZWqDNRX!0+K~%cah7=GdjO;hz$Ro39Xc$P*w}Vhb7D z17+VLN+AJ8bKbOW2%JbQ+;G4u4Uq_AwMl zVkaxE*5GK};A?I?Z!@1H@Fk`z)37nTQ}FfscLiUz>hGyu%*JwLsHDCrlTM{@8@L=e zYCxHvEb^ZGMI!FOtIm~qTU0ef*(weGG0YwV@l24oS2CU*JxpM8vmU)QThAPZ%|s@S zZuo%xD09coaVjweEYa~!7mLO9r`+UR*3IBW?S8}QGx86MI<=>NXtaBJ8uF6$)SWEow@0E0s(W5& zW?Ic@KJ#ID#jto|*=QuaP-qe&kIPqrmF-U!*(keIn&v;IySg~V_Yg@{L&l>*owx{5 zk{EJzfjhcKQ7yBsiwE(%`LxE{`+*H)J6Y#`ypr8PC1nY{4W6S7>SJ6*wge6{d!JG@ z7~!>eS4i!6qGg0!Tg3FlIckTs`k$a)VXojm={NggEY=!-%oDRLLwVf>!&3j~1_eU?u^+&IX`(67b(&LcZj0zG4w3J12cc24}B&CkEPWhi12whsU_6xAN`PoKl9VR8s*JINxlGIkA`vPOV}1`|0~dNq;@2N z!lH`;p0^eiYK7C9G5544;=$ysBkVr&_h%FN1#7`yXi)4slB%qN!KjqMz+pBX513!b zL-cG34F}xakRJ+ncL)x~2sHXZO!NZyNHRu~am!kM;%kXbm#y`wMuC&u1YWISKLZg& zjF$d7;}Z1Px^gXYg3sk{EidI99ZB6~1;759J^Sv0H`Eu~t>81?FhK}h*>0V9mPk+I zA*o(euN4)-+j!8J}HGEkh9G@B?jHNXQTb~GwS(uZrxmpLRX>@zCs4E{eDBy`PtR9=&@b> z;EAZbEVtSS)Iq>&F~Q)^+uvL!W5z^cn-S&=Qj>fmW6PVGkY|gw{3G;DL9<;i+KC6~ zJl$wm%Ug1420gp*;A`Q5>dHM&@R&JozujT2dVxq(Z9|~ej0()`_zhcc%?y|=vHlo( zJnc(eezV0?Up4HxHPdJAj%6}WCv}JocWyAsk$K1VfM3^9=&YN_J{124Yw}HnfshF* z1uY@7d`;&driIGzBN;4b$8ZHb#!8a0(2W3Y!Z&gxZZ=jqH5+OeL~Z@t+pvMCZH!oS z-`-<CnW4G8kO3t2mdLNzp9IOe)V;{uJ-n{nnDI( zmD1kwqrN~%I)Nw2CLegx)lNXQrzM=}-GEWoI}&~&LAd-j4ZyDH)z;&!?rfp|y66=2 zYr%5pm!xX4R=sgFt`*un8y?8j3Ztrb!^_!O8&nU62a0*BdO4i47z^#54i8lDRC=oo zFRvujRCSq9)4*JBBQm$-1}!GMITwmmM@R8gw(2PD^!3r>UjzCYnXw`fHXM;3<~?fg zF&~gcc!%{fLc-UmIBrOn-y;mp;zQc1H^r^X46jv*u_Y!cvV|=)yjBi%Z}^|%@-l%E zkRUb2^Zq_UZIw7fDnf2+wB{`X2`dB%D_@6*o5*bd0Y{U4xvl+qf_G?rWS>fMs@R=m zD#+4fVG5f#*V=faat%D^J8Wd?!TUeKFnCh2JMCh}&q@?)mSWw|b`KzM%d+JBfufYb z(~JX3tZIj=d8!WBKNr}yA2=HCY~h9V!!J=n$uT8qb(A=-niBt_7VG*=3FVaR$0joH zbhqtv;G|PnH@ka7CZobeZFucEzc$=^KY#Q_{;a8$rK%0Tf3|&HJKesn)9vdMC0t?c zKADl&%A098Xzu4!M#yKoBxNwBHLJb7914(brbQ09LEuJTUF~#KRK=JPIoS**p5UEM zx6f0dYpcG?LmV9{P(*cT(bMG4qldIra+)g7r}DgAi+&wJoPWR+laL2i>1C4Tn3AM<}nE?q2h^Jii)f3EWK=h}+T_*-2m z{&PAxW<;$wyFB{1tP%kx=m})kWN->}c3|XIKQQ?CKa~xV*>`tLOF5quvg9W|%ixfE zXNnZlKW}o11l9mLjFK})!i6H`s;L3-kg2gzCQG%a-JTx{Gkrpc10!Ko0@>&)(?Uqi zX1F9|{-QWo^o1PWDWQEWU1S8=5+)AoW$PG8n=z*6_)BmG6c?P=al!dlI$bY2SF)Km z>!Snf!siQ*m7Tc1Kb)s+@E44&_Ib)ihWp_Lqt$;-=ha6{`!c3|ppI$9WG~AnlVHt7 z4DMKo1NQ8me@=Cz#LdCEV$Juo-12Em#nJ8VLbQ)VGa-BI^P$n@8-47p%;8pT1L<#? zeJ$2NTr<))rNMJlH?)vzPw4%I)=kY^tIunxj&ymo$c@|_X~kn;mORy7Y)@|>JadObMXy(4=FsQkp%il^AISq@QvdSgWbch4u8}mh@_P;d`3>m$E+( zkWcAy134Dy^7@JHQAhA_;Y_V9@hvd6(?jNBHr4xtV_~lyJq`V2-LXF5A#uce)~+u- zT9$4eh8LKwW{_m6no821f0Mpgc@g-vafgIGof`B^%dI(Gu!=n2$G9ao7zt$O#|8;^ zGrc@YT6s36c3cH;)vH_LPUmbmbrPi1j37ImARp@|k9`wQN4=m6Gc=-=;Co%n4#b6+=;;W_>*VL|h#k zpxScBL5h6{*+f(Kl|%9ap=L1zaHQ`iHuT583EBJH!p@3Fc+QFX*&HS121b=?ImOoW% z_;W)YfBrR>tvi|E!ifmI?{^ruta%oHZC0tQ7G`2D5`{JNA21X4!yn4S@5{q1d1(LZ z>CaeMZoW+-FHUeJxfiNK(Smye+6E0?7}2F)r`kY1uMD+r)8= zha7V1$31yNOOxm(tIundxZcRNT!;g2cFXM3nk<|oTX5|4;|$l?*B5y^%NIG8xo~8# zicGAheP^(wSGwEKKa%uHyW{T=G3AY&CEsOnoCTNc*Z{foi{UQU?xwX#OzVBY0SBrD zDE~fLYV48E6bQa)xu#z=rm_0v+_ALvG|{HYf#Yu9=(0-|WrwHuMlUH|S`IZXaTpo| z=)U1Gp5sf=d@7$7wI0~{k{eCVxhORhEz(g->P6Y8!P&^Siu?Ea!!cO>BjN-{COjyS zZoMM8V3CDt`T_apQ1cDW zLW$LPGnSYrRMh7V5=g}JmY<_|<)ONn$R;cdr+)#1{k07CHF5}3#K~eH5yS*h4NE)y z!|b$y{Qo?bopy}>Y5dOOzc-=w+k2(M8w$%VXWJ-K9tJ6;|1($}vYfo4^isd0Ke2UcD06xqM$yGn1Fr zv&{RECV&mel9<*?zj5_!f8)YAHsrJ8c|HenW87T(SuOcRlh10@v${G40sFV}xN%&q zT@(doALm)*tGF?45nG_~a6N?^SIQ@Lsp;{&H-?GoL?H}BHm+>sB}!UMj|r4jl3$oY z-I`mBkBuu0RSDRx8K|bumX)H;O^;>SKWG39h>Wg$tWOlf26ihyS>npyxl%VXU6YgJ zNjQa6!ngai`tu=-b(^G_G~9@|Q7Sy9o6|$oK5oF`@I&%+D*ETR(51M~nQgei;vW}c zyos&I7$S>{@M}baATD!$SBKcoFo9Ht%zZ_r72;OUJoc8{K8G>)?R}eHwQwNuW><$A zr@im^q7%}gF@8=jcRKCLA zX;QMKionZKRnAUbd{>T;#Y6qGjQ3&tax?pK)hx9X8G*O&e4^71+Or6Y+Pyq9%ikUKy5F2OOpff~*cNlx%q=~vpN@-R&!d+n3-ig-6^E1qh$JR;T&y)UaPJ5E<#l}P_EjB@wJZ1upGiZ z?Wk_d)M(I{IRrMo7CSXsu%qNwKxPlbt;0`<;e2K6jMTd-XB(97A z013Q77jDf`S>UMN*36*BDVwb2uhfES>>!N9L0mzk-3|VtfY!k79C+H8xq*FnH&!9u zKaBx=n>p=WzxhM)(LF3!G0P9q9h8C@=JwbCZG*Qq-p(YL)UJ+tHm~%Cq8Yy#A01pL zk@fX|0HSKw^wX<7`xo{@{us>D@W<-a3SNPbT>RL~!1F%W7Sfi9ny%Vd{RkJPrD*Gd z19s}hYEhz^<)4I1d${6T#k?;A$XU;v2eOV|vOgcduLWtHM7WiN&D zi)G1ue*{Zz*$W#fWQW0=e@!NWa(SCI;s;+b@_A?XW6{a|P%M`yP1L;Wn3_paGSRQt zij~}Tusv22HYJDo;UfDQ_MxQfN#ZE`3P@(hge1xID&tV6?Ue}`RGT7+L_`T`a#EyY zd^mEJOkotJ-KK}9Z(<{UU1t97c1hlfFLkix{R|-~#T-i&7> zb0la!q)hEN8q`0^+hjfobAV^k*IH=O*Bl^{?H@gku#(WY44d|pulSxHu7$pxtg;&O}`a;|0SOK?7 zTk}EM?XXskLAl$SEaNe?Aa}|%Y>y?3z{K&%5hw)r;)B&G$ankf6XNBkf?v3->DHu` zBB|GmlksE@DNM?iRjixO+UPuiTbb>0g)f-kTs$4ZA2M|)zt?(;E~=4{ZIve>e7*ty zFT)kZFM)NEc!IiHbJJ?h+TzQlEZk2n8d1sdAI!F{eH0Za8*pK=OLloODR?9&n<}5J z{yHMl3qusCJX^H|tz0$7rj?@Sl!`ikw?(mUi5l(rC4qq5yJmJ02)j~A0d$L9W6~Y! zQm01Q;;nM2O?=jz@{rxJl!xg60@58VYw32^rFr*H<=n?JnAGxHI;GZ&@Q7|<3S~gX zmk8TCmFR@9vhoT=7M|{dGx^LI$1qM%B?xXM0l{{*dEUF@dE4ZDTWJYk(HH#grH$*z zTQaGHQwn=OPOMnu727lk2rSA$h$?cJyQFgn5?b|kaMR!Zs{UOVEZRm=v~tpC9sMA< z(SG>X{;Xrk$}IVHR4%r|k~r;XC+;T55DC#(QbN`Lie(7wxfGx(af_-GNTau;pIjb| zk-1GWd!S76bSyxWXqS0cBBukPRUZ>%$)i`=WzBp9;RczNnkD(#0&*9%L8^UbMgc*c zeSI8+M}`ds=L`~Vg};YyNo*uL1>M|3^~hDuVj$1Ngxqd507>Ld0-y9}SL!YAxDp;E`9af*uBr8cP*HObkCGrBB@6M; zYNlJ(c3BJ2oCUgp3v2=>gLeg^IaiQE4akMSk@ZUGn{Lo_g!sj4ePkVB^`N$rC=@r~ zqr$?-E}kj7Psj3X{cAs&f~>{hCo$COs$_Z5e%tYytmLy)y1mJa%VE7}FLJ853WeCb zR*2eEe^&NS{%Ild)2z%?gNO(Ji`}C`;*xKWAfsdYv-YvAoe)XvQV@`lM1biiqOoT>~e9cfT&2(jC`*UK&});hVf z4J&SC+Ox*kMITKTy+Mk0|LcaYYMV|*kz^W(g(1#Fok8Z1jB;12oiHL?qDvwX7l|zF z+#-LLL8$WOgj^j?4w5+rO9y8@A09+OTwo`YJNVmr@rNm@7yXl6l^UHm36m{NDF((50sl0K3_PZ41BVD#kwb>pk{bmMu9NhEb@#T84Y(YNfyFW6vxCZ3%){>f_l z{ZSkqI6jj#svCh_!=UD(f5iRJWB*S(Cn{bebIW zR&`aiF@O4&(reP?=BM&!CIzl@FKsKm{$*NfE1f3|wv{dzLhYkq7aAu_AHdqyAaKtR4DTlg@*U8sc-K>X1VEfH;Dm2 z&+1*QcRkr=2fg_coN`K%V9U!F4#EKL$SLaV*Sk)${Ekx;>wj{wyG~JTz{$n#J4Laa zlZ*Y1VmKl8?$K$K^hKI;dkm{t{&4SdCn8;y+H(AoVM5DyDM{8WornG6FXN*bWbz1X zK+=5;E=r{0B1lW%d22jdxH;`l_+hlcGPDKS1aE9^=%r3bGPZ@qdh}_vip<01q+Ect zTzvlHr$qh~F`Mt``8`W>IIO11+uD!Ph(Wf@+GhMr|HI@he9C;^Q-)-pw zTIEsmXWDfB{{*L+n|4un>u1`vyR|}8)|0YPo)@9fF2VxU*K7;GEV>^!`jt4`&35hK zuitYJK@io%ceT)0+(Ven7mIe0Hsc#n1wsUG1X_H|KH_B|{`89qMnKHXU7U!Gx@(8O zXe%$qhy^25P=LJ}RWQFw%vY54sQY{nB7Xw}U9ju8vE`(v1xV+y`X1pLtF{tBp&P4I zcwSQe4w{1tU?yF=sZq{)7eBoj(CYxFbFRLW@6cPy>2PytZK~v3v;yX5sGd1(XSdJw zf=bI(Q_9qT;FO ziw4qxj5H=Mn(cUIH8f-5(e7^9+f`&W5S`J9B0Es@g|~kfg#+5e&*GbX@y(I1vKReJ zV!&tXrd>P6a%r@JtN?n1Zm0WUA>1m(3tRqF@n4?76Jp{^_)N|{9-&5AYj{UF$1sn? za3LzX5cSmh@b*=s0leC()s*40OQ@=Znu<{nZ%YiNA|;nmASDMplE)3+lMW0{<)9RQ zDG-p`65f>4gy3+HDmXk@N&Ne*Ulp^$7*$rlF5gBsfF(s1r8+>gy|1qb?P!(|Gj!pL zi<3!BlzdKo$fNy4F1}Rq9*g&yD8u1aDyT$n~;aKxN-K4-5qoyS;M+b2kDchBl@o<}Y^69G8adirQr+_$AHTU;bg zYWJm=YzO1``5|bnAn+u5n3OX!cmG$C=VdNn4vmX`E;=#W8V8_3619qFx?^KV#Pd0( z28LPeV(dllGFS;i!aJPbV7B>ImgV42@*`1A;-&@ZLY;-;iLF93anXr6q|Fup3T>QxN>E?s`idLxya~d@WSpB zDkhCSb$-Uu6P;Yh){#ZdgjHscr;C$0Yg$b036T~TB_L^fZvgGzFusR`uki1z!OZJ^ z+>LRT5NF`7FF5;QB%22EEhRRL3C1M^m} z2Qbcj$ahlwBxDY@1-h2m@*@Yjzy;#V7@hVP>^mXj7x#&jTg;EYh=I1)GX9DjY0mr$ zI^e9Pk3n>75>5s832(x_|L$?&1Qc&l>F_4)Ji_(zK9Sg?dCM8Cc-|b|u?6(kHZ&8L zoYgEtD_Zp?yqN$-7?&UAQj%or46XGaWPB|*^EsZFnfv&w<(}3emqBD9otSHsx%uWk zAxM3R%Yxq!oku@~!2@!~_`nmXx7*Y`hFP1p_}gH4{UC02w7cvD!3F9Zm5G`LAEFI0 z%7|kr5G}z`UhGCC>h_{x3WC7nq3GhrgXY&TP0x=v2Vbm51E8+_K3KYkMWQT(ujAQk zZGpc`l-}{k?Kgh$a6GRJSV%BaGbpn7;q;oUjRk3G&9Nap@ltd4Wqm+UO_^W?T!4`# zQTtfu{OGHAtT?jxFB8KTq9cJt|HXN*POruD;$4E=TTulkzZfK6T#R3YvQ3FzI1cves5L%h`LZ(~U$f|Bx<^N8l2w>II3!SL13 zNPSylzkS^LoqAg{CHdVoiSL%#Z$_god>nbY1cr}co%XzVvyPGQrV?$?w1}3=(qfo# z%Q}!FV~uOpRqoEyXpHfBWx7m=Px#A(*vF;w1DJ2MbJ%}m{bfsbj7r3ICixp9ok?C_ zP@$fjy{&>L%$5s9Z){{f&22ny2N=ZlP)W@3yvMoI&9m3qCBv_dVo!!IVKP5Nle%%) z3VGAkg2Y-E&%2X_+H(lC#!irF9)!NiOyom2KA8q3;>Q(|TZv8xrHQP}V^peuC%Sq_ zLLvHmp$rtzAqD`Ke~Jbc(*X8+0JV*R%ErjH8_zp_RJziE51v*pM`!5ysbhQEFBB_Rw@aO_h zd$lo|IduO-rpRV8*@cX}Rs8O}fXS!^7P@v!aXGMjYI-5OjL)?R%v_mncyDt_N#W$o zPq8;dGcO>Wim+Bjx}3NYRA+*q&^s0vs`i~Dt~BqFw)C3QnsOD5+-?m!L#l@0cWbp2 ze^vpIpI_`ubeU>W7enJE4G=~EmPCoqW)x(yH4RIvkeC8gLa?^{_u?NyV{A8+kN-*?(MFbwaErWb@^y zJ@LGkSqb*;tssAKU;^?K+Ry{u%|G~G@Kq?#Dtrw%8DHOg;oxfw&?HeaDcNcYzAjBZ z?9qPd|JHtfvVHC3_7^AGf98iN?e97*(d(4bH|E2%k?VoXS``>(?=znVa zlafuPw0~*xVUPApPu;#)RAj!&7OIdx>g2}j5{*AEqD^96?#Vet=bueJl*ajoP@lAA z+V>JOO-!Wo6nmWF-bQ_-6eYew``j#TkPp4{^%S);rg3%a<}x+QS-s2F3OR0%bY*EP z+hpdG$MVA?adXx3C-VA{YG7OZZFm#walCDHBTKPuVteJ$8)CNY`DirUZm&mwmO4{P zi9I-;SIHzWPuq4?7N=0-*nB#9;hy5gvd%glH-7X_e#SGO;4jMrl5p4kQi)>YS@1*Q z(QKnI^cZ2I-D+*>iW@f|4fa#j&X;QQ=JA(PMe>m4!?~8W={Ta~umt>(*0_;3i4sJ` zFIZ4vPi=!?F>`uck?1smE& zQ>dm|H{VS8RQrFly$yU+#r6Lm2us3~HwqDaDyc+4v4x6SG^XxCR(5rxsen)wOj}Ly zTWwJip(+~O1hc)aQnhNUR$6P-*7{H_4^c}3NP?(rW-5@MXIJ9GjPhlQgLOq-v5f1+!j7mPa^GMR;I?>* zK!}^vwi0(Mv8z{Y{}n)^r6bK_XlJ8>3bELw{p@#nX0drMVrKX){V3Aj^T-@{<;0T9 zWU@nbxEn9hu};wfc`zX+-{&lSR8G_++bY~g8c0@*o_!UEtD>#%mDspXqbJNux5O|6 zv8jlSG4=L(6km5zNg`Z;k~pcfi7PhUNuv^BM~}xeg$HxWmMz&ZzDn%MW6)H7uqiwe z0rNhbv+t`()Ga05M!JnO&4!J9K2}T3LG-~0?r@nB8Do*N(Sf$2%cSOpccH!qoF0AHgY z+SrS`J*E$D!O$-{V&+UB>sf&Sfx6Z34jp?6FN3Yl>26GXfvu^Q^ z_zNc(EKaarD)86c=}=;RVaIa5YWTn;%IglPNE~qz37EhbeqaKl_(>Pv@NquTj;U@( zTD~J(kQYu|2;V8N?kfytJn^646&#|$7*k*l$gU@QSh{$&S^5c#V$e2SJj-3Hoz4KlJBze$vHzn3+nh7eEc559?BE{<;g# zwx?xz+suG^v^isZxUvMxjn1`2-Lc$s86@RGC;B;duS z38SEJKtQM(QFc8I>7fcm_ILm;|K{nOMybY~{V}lJrzj z>q{?YVXMHm_C@BQ0XaIg{?9_b_AU(gdi$ld^KNwq-9{yXRM&q@kL`GxBE#yRES*I| zd9^2{P9Z=cZXCw@Cse4@*62SIkPc*m-DErLotPgIW7^g+)YQRvGeq1$_Xr=5{T-G> zc~A-e3V1bMUcy)Nt~xRUlcS!^0f5u=IYV18Xjdhb;J+GV13SY;SWzdHX3GG$kN0iL z#-5To{OZrsi`?Q>yx97F?|OC?l7p`1A$8HCvafja6>nhyg4Y<@4K0_B99qU7203V! zYCo~AXP7xnGmT~vIF*Y#j0>C`m%hbH^CcFM^Ffv?eNgNy_k*eL?(<8ZnoTe8s4a+w zqp26}wFQfe+~}4M){ZuImU>gxh0GVj4TSpl3va~%g&M!b6Z9!6M2k|w2bHJDIWxBFN`(QIy z!HgD;0IQK=h{w0ca-yvK;Ta)6Mj4El0pBWm2g<>8@z>ygz~XK&-R$5I`Cnu(jYAhI z7T=dC1plr#Ir3Ayv%@f>!1*zl(JCXW#7LqaiWx;o(%1nsmJS5X1CDX6wcpf6PvpdT zxu8ARjZELsinwtvosMtWlZZ_BA?hqSALsBvKbDIm7cu5PxGT+7(inXQrNDcUsu!&c zESX6Mk$=^#b~n(@1h~a(_{;QpE5QX1u@W3*R)RlMQ1`XAz+L?6KNf?VNb)V0pLFrZ z?+X~5t>x0dHVp0jC&{=f$GQrr4>{Zi^+h-4Fn#zvXDwUne^!FMtd&>Sgi3h5ms>=5 zf?4cefZBzL?-t^M28fMoOUaSL=s3W;3r+Y6@V^c z*yDv;M}OTP@Vpzc17^IVBFSG!00Ob9>baDM(bl4h%U5&FheivJ<1k7Fyyvgk47)^n z`c=>x0W4yAnu##4k@^NBTiELX4vL`jz^Q?}C;H|U)HheqXHEsAZbRut!;BMrA2ljo z-nXP6FW@X}rZ}QMGU|dc#p&VbCJp!K!42%@Au@vVsDbUuZj|*|`(m@)NzBpd5e}N7 zB#A~~ht-B+)T2*6U`RU`}_f&H&HB~R)oqec`8`U&E&#Vim zgJln3>LJrC&p^|+}%6i{_4iGLU*O>&<(s-e}F^tKu#$$=wH&$&iydXge(xW z)!~EYhF;nUS(jSqY2gCb6=9RtTuHsuB@|>KMbE2jcZYR;Kl_CT^aW?f%HIAb{+RQL z(cULUQNmSyMci*8R7n><*y&-0nX-(yg#Ac-YI+aoa5-;z=rB@a z*0GjfSNP~~KA+d(FGemCf^SN7x;h&iw$opyYWHQX2Pmm ztC@D4V^D;7VacjjHGjf4!8m$GjRvy`oT>aLahV+Kn9aw6R!m41Pyd6k!)qS&8>@#V z(Ezh_G@h!cMpjwa^S>Fdmx}wb5x?oq75%|tc_c1`5JjU8i zls9RjMVX6eDs>VC8E9rPW>jZ&_5)dr@xI_=j9UAQC^(vykH4Ay!apb2FZ@(r*hIlz zzCfr#5B6M08eh$1>V7d;QWjgh{Y{}v(D-Y6OWSQmh&5C=krz-!0d*bgI5taq^dr7* zA#nxYSKp@EF2MDt8#b~kK0fcNT<8_;2ojE=qAUE=)*;^L279AZUV%5d`5N8sZK)4Bb*dsi2!P+wN?R^FCuoHN&SbL1J z%?V-GYxzu=>1`fU@~9l9bQ~WU_qq_zV_z`l+?v%H1no~vi=fjD`>Mi{G0SIP#awM< zc$zW{LYktI)U*HIVRlV3fXtmN(wrxgv*UxM^^!~%d{D0~4>u525E3asCg|3<7JQ%+ zZ1aDdSRgPtOBcxS@Bnv#VR=|K4K~``3`06Tty*Q zXAF9IF6h+O7@>K|Tf!{x$%ODB7Ssl1wKa~Xl>bd>6{TZ&ka@VRaTF;AZc3(5QesM4 zC>cfyJ=By;r=*BywSFFHHRdc0ELY;!q(YRSpZH+Sj zskt^2MvVjq&UiVcQf{T9CF*XsPcK3uqg&e=Kj6`H&K-0kjkPuIBI8{CwKaahUs|*R zM$&#n^n^VM2}a9g-8=d8@i%4)la)=x=^8eUYczwrmD+NSF#C)uOgT+js)Ae{6WaKG z{W~}sB3OiWybzLBNn`VhXsrHkCb|G|11g%P=nTW(IiC!S!u1r=P_HI-kt&$zQ9@DV z680ReAd~m5K2%w+5P}xPbTTs?DQllwn7Ba%+BqtVy8X-L(RX0)g8IZll{G&_^Qp&g z5{XpSFB_td?c8Ek@fT|Gf$>rPg_HgFnp;$-txX?EUFp5J;PYO2`7Lz=#=RbDE1=mt zj?7D^!-p3$#GXPrvf#5Z=~Kf5?Jll`+vyOAY|p}tuaEb3a;W!9-X-WHJCr3bfb_^ej|_4?9b^uVk-uoM{AgJnqJZ&4xjyOCN~ zIvxN~+`QJ*aH6qwU9du37cBe!#XBXS+h$Ye(}e$Ei!0Uq7BxT7HZRQ}#>MfOld0gw zRWb;*P)WAY0XKNuYecrCb~>7`mfllKpB-sB%CQ4jT1}sdvI|PDx}1r(od_)#JLiQL zPU404^+G!@SSJ50djQVTv$@dIYS$IXlMj$XQLT>kN*-b-+G$9WSO%EDtG9KC#qJn$ z@?{f(cBe7_tV>F@5RemxDqd(A~%6HQfKDM=T0Th3Z)K_(1P*a~-?^zwi z+`{8NRHelxnU*KZ{Ynyvq+tXVE5?6EXWY6?%!&GFfa25`)BD^GHl}4TU7z|T5>~B`ppMY@D%z_bX4E&CKJS@nD6Wql^bVN&}EO4{38-rxy z#0Oulg?EBk<~H@pvM&hAHECB(SO;Zas&OW5sL6l9M|M2ndclroJ4^cVbEyXan8qT; zWjS-rKfqn1Lz|6JC>A``}M1?=$BS+=T~nECl`}e0Ks~msiLZS^+8p& zOAcjv5Qg!~|E7Rwkqvm>+JbqR0y|!nReNQ#`1MzNH0_4cDz9L=S3tv?X_Wt1xC2q{ zPodJgJVr7zsn>*eOk%sEwZFl5LTR0EFuqZ=aWc)BK_vqYZKzI=EJ`B$T*KQWVFApta!RJScI-_=??Px`|zA23<%O|MYdXd-_?N0p5F? z;`81RZEX-0w5WBcXfwYM)m2JsM3wGLVkBD5MjS|+a7JNZr;rSrP>szt!zLm)Y}S8= zDgPy>7Vwb52vY$nSAfe^^2>-QzF9Elf)2b^$qtR&kH<%n*LzGzu6H~vrc`g^%)#mz zBc}xL$Q5a`O0b_k!f(`F2Wm&~?_kUo+H||c)Wa6Hyh!C`FU=+V=W~F=$QU8_rNj_e zkmXrK)c6^(u>uQ^gZ#(PeClNGZ3-=5_Y&D*aM=A%gnn+Ioi;N!ok!FYv7mNX{OY2- z%MT!yK~U7xuYUZ&bsVVsNs&u{EuDb$*~dfM#6EnMv6v^ENjuG2CIm7bhN*<9M30)h ztU&5=vRo4*m1&_{a;7=<14I!Fx!ZZhkQ>~)TX%5kWD|g6Il7X_8?-!VpeJR7$FvCx z$$QmG6#weJtAFZihCkGCzQ7y>`N=q6?Kz9DvgD`E8*Uco@8{>`X@SZM`a*817d zt~=Rky;i3;?HswyDn4K1ZLjIUG);reQv~(Cw$%Z&D#CA}VH`krN<%R~Nls;-W;Sxr zy41N1$Pc^V;2h94Z9j9YReipvhMxKws?T2-W&IP9fnh*Tme%tdS|Q=kX}H`BgO=UO zbaZ1XUOoWe1PGN|K%_lqFDAe*28c`FLlZ!9Cl6Nl`B2%$1ZZszaO9R;yPo=9Ig|w- zFU#<7%X~XFWtnf6x}Rs%oi*7ihY~RZapZi?x)8 ze5))wr(-Fo97*KX0^r9c=*tS=S|A~`#C~LYy7>Ei#NFiyKo^e0P0DHbK5+5idBwLf za`jII(xb1-%-Q*wqKd>5;_=PbszxC-B0l(Y5)Zx|k3X?PGk(=oTX9ndfFz3!`F)-pyHX?>wm36kt1| z(5Ly4F{?&k90|Ku4@4bG%p7RR-_s^MRr@shDJ{aM{maJqwEr906zx|ZVrXA+Fa8&_ ze{`p%{lj_NL$RSWE!tm9!>RtZ)g-Nk+%wR{JeOkRxFF>*x4MXm%BIN(j+zR00cG*i;_POCL+vI>zU@u#BqI*5)eXC#m3zIuaPAnT{2L za{0ge?j499k)Hu#2M-pAZz+3UK)iWxKz#MZuLQ)uc-8VVjb# z$#)vQqii?uEDshqE0x_il5Cw-$;GfHgJ6P<#j5rfz7jxwo2_PTVqq6yaTN!qG0qG( z=nk$$V6f&)MqAi1h-)w^T!6?3I>YOPY2mF#e58T4Fm`=*ts^UsNACgiRlo8f?$nul~Bh%!0wrGpgK`ttm}Li?f2==9GKWc<-!|M=d_+;SyK!K9zAaw9im1*31S@CZ?=)h$HF=z8&~l2L z28Biio{#eo89`K(z#mD~p>>#uz?r2igA7jqiE(L(mmZU0S;*94#KBC>Ur^I3b=*^m zUKFHB?45>-6ko`77|JvmC8_yaLo8uA%fKnQBA{*Q@PFEM;9%bOTi=h?wGg~d+o@hT4NH>Y)E*KaeXFkrcIn~= z)&mhvRqDgOdCmHdkByg4?A;^$PoyXV|KoVD@IRW=KHy)buYD!>?@!Gh;r|_O+Q@${M(=ZGWBE1A2mjXw_XK~l(`z@LJox*2y_(72v2JVre$D$n{BNLj!T&SC ze?d;eUEwdWBwc(I-TM;!wfPz^e;Q+5KvE`)5+v}Va6Oj+P%O?wNk}cRObw^%C_ zBRzhnD?QEvA8y)W@!{vZ?l*oljSC+RrRmhAw%tCo8^LWvM~?tOW-k&?z$ywvpNpA6 z{J!Zb7q3_45z?yZHqBVP{6R*DG^V@Zx){IiCEsb#5&{?&^I(l%w<^0)f^K$Kl9e}@ z-I=V&O#iRffsXswnT%v=&ZZ_X#IZsWgc#&`9HR0NKvzj9vm%245v2?Qm`Ww7@o(Ba zN-P;t#2KOAHTCan+VY=wFr$ZhB_lj4DeAv#4p|uopu3`-1;U2xH%{gaFE$ zm8i?&F+h#yEFN+t8H0P7MZDuZTs_S3m=|Y)dPLT4i-%hJCz0rc>u>I7kyUvc54XrV zUdXzd0^<`v!=h7u^$LF(H9IHBIMS7hyj!CPX*z z>TZupxiVxL0XMc+^FE9)xm>J3#*{=Dw2bl-!$JL1&!|GO5C4YnuatksWzca;dj?WC zr6k2%8BIQ;v9LPTgVo1_3|(KxW6mCPj-6#fE>ZjKJn^Q5)MLDzZT()Hu|(KW!RmI~ z@Ha^dRH&}o&034KMN=xPi!7sbSmu*zlk_h1ID)=AO9{iAoOGzmB{#`wed6;6@_7CM z+$6MzXsG*Pd?<<}=N3AnDoX@&f)CB_A4>Fg!$@NJZurv#kwU_~3GbFyGnx~7)B5uU zmxS#fDXIx2xS{~Cjj9aRj3$cz)Gx>nmQZP6MH4ZMsWi4Gv0qJMQW1ugYYvQ;pU&jK zU}Q(t=JsNKqgrQ-@D*!3;$+I51{ok=G~ebSI$|}#*Ef{iD1*Onxh65S!Smg!XsWEI z>G{{1(T5kcg>MgPf)XXNRua7xK^ z@%A(q=ZMV^Y~uC4B5_*x~ZNi0@kI*CV=s3Osz zM1(|(5d_wLi6T?qh1}GE?hb z@&9ycuXsgV`|;9w29#fB+-KRI8M}|vXZmyn!n=T2C2=i zy;=I@$f9U zU$c&yS&m|LLtoS_9!E@ki9XdHGZ3eNEtI5ozPyix{%|#!;~=M{_|!vuoVm-xnd1t~ zdhCfwRWo~D?L8kk-vdz1?R?py&bGeU)&8A>E$%E9?z~KaGmvTx}2YInvW$_A0oYr6#-0=jH6mTjAniZ$2*wbc1-0oM12WIr8szK8MqY zR1MAXU_K#Upv{_8-|nq{wfSuCKA#I$>}@`aq;DZG2b_<+CCHG-tC{sjUZ7o%x0cfo zaJFjKJi2?qFdrgMc@R0PkKu~f!ytNuMK`re4IT25g~ecA7c7)lK;wc%i(s)k-?XbG z&Z=E3@2=ylXoWPVW1J)`yY&6+9Tnh^mTuVNeR7XXW(Y)iSePekrQ*#A1bFo}p(bvz4N6ofa9?WnpQ&f@) zz2K{9)l$=Xua}y3r`UEJaU#5oLq2Tq6JXk5SP1%hXQ=6c8!y?H!*a@N$U6ko5wDOHSn=(Wd zCYLyQ`=jJd;kSt2smlU@PbKH*&w2VYQ-7-U=Lh<8f&SF!52WpZ`PwJh4al)v<+J6S z>$8iX=vSRaRl(W#c~EbCqVjPq2xn)!5(iy;hJ`lAY>Xo!W67g*vwo6O5ufNUMJb-W zq85^Jv!rAy7mf-$dEpV0YwDjv{Y^YtIk|B$NmZ7Mr8Iw~XOgb>(yUHlBw$8N0{NCv zlL#tNLjq}5qfdlO5UY@Z!!!_noZs!TW8*=Q2(ON~fnzkMlJnXc#Wf%q0sK-n>u&|Q- zDA~^klZpQ)C;q(2mMWK-=}Vf;OxH=_98UpE=cd6vN7GkeKdJgOAqQ4n-AUJ=SVK7&DyJabe7EHQop5Bf6Io~jC*PibXH6(3_aS*S#f%QXT{`! z>>NzouX6~F6jfZS2j8p00kldX0>wRNJ1g>00I1U&6yc{&BHYeT-$eLXG}0Dng1Dd5 zx~Jnph!Z2NwX*9lk6j1F7&6y{jz?9Tx}c(IM%)e8qh+|Qh#vT8Q@34GezbK<301iN zvvNQ?38M2L=e8Iri}S+ToNqx2U6d|MWxPw~9Qglstc=Z_ zoEhs%jh&&Gv34J(_GN%WmtitEjk2Oip#rmmY|9zs)}_MEq)1L2x_YgZvBh?~d$gQ) zz7@3%zn&_K%mb*B;vx|9qz;ckhbP6$qwkA5%j_IfyI3sRRV#Pz_uEu)8V)jO7C4p% zyE8XR*^Rx)=D>hu?nV+=L=LI~ar-@%I!66A2E<@Vj&Pm`zn-ZwFk1!H`%{e<2Zgrq zRvOc$+cv{Ba1aTcNG-~Uc^LQw^1Qf4zWJm(sEXIv{hP`=ZQ;QTiss(oTgHXKFbyp* z>EhsDH7y?8_F~!_yF}&YNX$Sh752tar}Ls7nM7%>Vahn^9I?)Fh{HbLk!}v??a>lq zKxpsCj=$BDGClemJu;55!}si=(AW>tta9{(TevTUb1C{z}PX&*VmHEb@Dp1a$~KkC^gp{9;eg^&XNqG@i;z%KF>YAFFOM9 z%`!Fbp{8B4gWCOFLo-3JZ8e7+L7`n1+hpWn1&nZHq>5bpM}&j)gxt+SIBXey-f`q@ z&^9nuEI;oA@>=x5coK`1IGMyEC4waC!OmedV5b&;H?ZM7p{LO!=RZO-_AV07j>7Zp z2Cv(~P5PBs$PY^&(FBCW#-0^;@j{`ov2_L3&M|mgQ7XxaxgB&JAK<-tSKIfm0>XNa z8!CO}RV!eBa=GvRFBX{%Gr;U&mMDIU%p88%{4x2l404x(Qn0STyn+78SPb%MWvtm{ zR>mjzLXSv)Vi%-WJ7X~$>FrUx?KOQEO>0dYM#HHh(`x4|*r>7fdpj&LUW*$sazhnb zeWs|+-^kR%^q9M7JLO~v*;LfRFC|snfTtSg>8OS?iVV0fe~B|@z*1@WZi-!b&MJr; zx>@gx{aegd_jlcHx)6Bn9iLpr$kuI-BKYWT1p6Y|uAdjo%eW4GdjJ45hfhm903NZ^ z05Ax{(aj-}yI3P&MAq3al6*`fCQe?~<8OH4YQ*Gqsn=%g!eeZ^J?~`rz^48JVteyQ z0dd`q9fWyV)A1b0SxT0rc%o(4`F?;qmJF|&6y zZHW<0LwC}|p2?dYZ>lhTTG#vy_37KT#gk|ek0#lr7iqxAJH9`nab&s^^fn2F*~@w~ zMR~FwO*&py;nwX8yL)ggkC)G6Dlm30)x_lLX4A-b8Wt$ zizVg{<+||#0OWgym`ObS5d){adK)lN{!&=#RkNF^;h~B7#t{+Ya&fF062|LLWQ_jhdMOY#Loes2mjTD7AH;={Fi>pjJ9?Bk>XtN4S##@;2TDM27H5gu;A;b z?5_&Gfpp4a%R1zZ#+8A10s#2n`{zTpS{8g}F&6l)Iln9TG7g@mMoH=is~OaRf{Z(f)SjHVZ>Sx!>VE^ZIzdfq ze^;oD;rqttn2iR*fzIx0A$Eo!7X6nX)_ICtJgw&uUPt?J(+zA%ajOZ}F(PqBA$(&v zuc-yk`H15!d8q-sy-cUywEbe)g!QD&dVzZUl0~e^(6aRkJ^XKmi;GL=aB=o82^0he zmjk#K2{^*AfA}2y+i-9b-rciNXT;qMf>u13pSL_OjR4#j31*mPO~7a97tm+1z9}Cw z=v5tac!ZJTX7{9ZyH8HNO-%c*XM0}Skhy_aDJ_U+&(fHO>gBeMZ<5{{%WnCT!HUe+ zQ+<4v{bTRzFK;(r{}2N2x*q4az<6xiy1dF~-vMf7Xk#(CNGZyJ6wmrCO6qxg+x)Q(tVc?;!ELzQVG@B|Oh5pp?w zRN0LulFc6fQ>HY-D{u+)HvXHmM>qez%*6cZk2$1*t)@#D2`0ejF!5}?L%Ek70TD;P zfc+`EahZOUATU@(}V@W9;kSp?eQ!m_z@B(RyGEMJu-7Ezb)P8YeZc_k`tYWtYcyq`(}4W;JlV)1W9Aggc-;Hf^#?v zFy~#smeC>i1zAHv$urW4@R(5kGu9|FF3O!riK!z)>2}N`BWt40)veO)M@F$Vhx1!Q z*hesry^@&fa6jr`jz3f+!iR93L)|oMgRY{6E@G{lggId_|4#2+lQ_K(iHUsyEB?q_ zuroEC`q^0?@A#fKAE`V$AGZi5uPZKQTSo(FuXukVj5JRK1SzM)dlM6FuNy~K(=WVZB zLru5VHUC=O>Tg?p%y6ACqbo7y;e*7OHPJ1)LM_%gP z4ukgEXyFYlHRxScJJwk-b#U3#!F5ML>fxak zW#OT9Un5z0NJUxYA$5am5(8=ylY5ICMU$uZ?Zl{IaO@G9wFLIip+*;DRS^fa9LaK` zTmI^uPBR@mcPyvV2pK?_G0%?Y`&fxTm9X_Sd)^CXNNx|zBF9LjupJm{1`X$!CDK909HniW&G7tsMXjrr=z-D77(X!z<+mhe ze1Y$WZ**!=f~`LJ@C2@?lGK6IR$%f=3<%p4OAnhnp7)jKkj1m%fNkM=&=R|8U6YYS zZb$r>KnuLLnB@1qZzU1w(E{fpl0RcrQUISZdw7jkFsN@_R_51F0? zF6Y6{-o?spyv<~v8Q~Oojb`85RKv>ls2$xgWX)rOi+`y$gSRnjJ*^QF<4I`Z)GptfkJV>S8HcHHd%5qMkX= zBHwiHKue)H&{7CbS!V)tRF3k5fIcvY)Z9wztl?WfJ>KM6^Ir!(9Wt0l=m^527;u(N z`~I&|AB`f6|Lc0-tyoKF47%f3+!!{T!xkKR*~I{O0-dr3z=8Bf47HRKSjbKn@fb+&`+`di&6<^Uhxt!HWAb-@H^)V~>A841X=EKyd zU)d&~7c;7KMpqU{;QYP4vIY~S8y-A5JfKH`^IV$paP8}A?Q~i*_KS`>8r++}c~tg| z5qjgG%o`Kcg{doa41Qc>0g*z}r_2qo2xY|2k3QAMjQClqSO2lu7e5PmRo8KWL^2RY zT0iQ|Ro3zHvgeI3nlC8l3Zuu7?=&d35^Kg79;`5Wgt8m|?r{)L7_}TE5nhfE`UL-; z;@=7@j4ovjF_I;hprWj1h0%d`N=g9`kJkNs#`hKQstb8JM|Dtr?WU3P6&3Z zBA&|`@e$G}r9YnWCDAmNn2T9W?#aeFjO9%0tF&Zs6v?L6^kq-*?ZOZYqZ2!myRtWq z*9bUmNqE!#knkqtwEMlHxfDtA{*L&3Bb zYC-DoFs)*yHqxJV4ZiNRdWKpZqE_2F4mFIyoZ27bEV)_M3)&dzhCj!H2i#u9Czu$a zd5cNb8FtuCy24Ed$D1ezPQ#TN{l-)Sv0%_K_&Up8K`pU z9!P$*$v@lX4{M%DzBl+QJ~o3-9Xn|!hg~}vT;Ojec)~(?i*+HYw%J?bNa1bwX9*d zKsXFt6u&@BN^pCpPaYX_x~W0DghwRdhsV&Rxd&K#jCvc%Z|RsNXj;|hlI=dXOPC$1 z<{xSDhMp?lJzKeq$_DPkfjc1$>W0?#1HNjblRb-TryD?jc&j3V|7NWZOd^#y3!mu@ z&l@Rs8jS0m8~8a77M}IWZnUm<#>CtsJogUF*?c@l>TBz=H&KvbO;N8}7MKQR0;a-m z<^t2eP`t*}C`q00a2A+_7MPu1_ds~`wr(J-7YJXfupkr-w-`*Z$S#994r?M|AvtXr zL)Y>584?!7bvaYfy1Uo2*{70C-k$y(%`1lH-jDx5@4%R($ZZYS<3!}`mpFV z(O@h|KziIypcWGud&Vkn`_*aqC!ecu!yAYevdKgXd9#fPBy7cI!cC-{g+?mRU1gr9 z+-a~ju z28tS2_1qDDutp=i(F`T9l?OY*H%Zl9m4jF`v6+!orZOFV|K@#;Y*VIUD;0A_wmC81 zj08EmI7XODe?S_UDJV&`{W&wT?RI4I7#Z}qS)$=)_#aJvd7j154gTcBgRRp0$=i&8 z>ee?I;vLJ|@$xNC8_D7k4LuhkuaTb#u=;l%EJW5R`zr)keeov1vX5ho)fu=vKurr5 zdyKJ5fYs~1Bgpvf5K5t3OBeg`V!?*+s^;i!aIfbTXS0fZvWlKd%GaB@`}|g5l>_f6 z+kv`qz7O%4g!Zsjm#3g7$(-dJHj!C?v#PRBUwsc`8(%$}u*%u^xH;C+n0?##{g*HF z=YJ4|u1kILvifa(mAAfc@|xMd$xFRPLCz-*$$Y{;q+0DC`eV^-(rlqWn58;tR!iTP zy1I)m^#vAh7MTe@)_~r(#()5_s^)h8Q{)*aPj)PNC8Wb-S65|mw zozBujP2DAn_C%;@>u6kk)RBvtk&>xJ!aR(i1a zG2-Ii`WRv5DYIXZ@$d6A&L6dUxEP+)11jEyMW0?~6sM!|;|MZI~v>aYK_E5+#*&3mwGSba3)5VI%jwCijJjC@(%DHd8T+h~f2#`Nl9m9W?yr zc(V*VA+(fwWVE6P38NJSNv!fPHme@BDMxFDX{97}^8*%7GiGe#O!(YyKu>HVB|ynj z8FHKnz4c<4f1YH;-#k2pXDV#LtMT$DRv3V85ae=KiO0!z8gvXCUi&Z)mYDBVcH{3% zwv~{RCUmtKnY+M(n3Kg2c9x($3E)@PTLA4n{y~3z#5_nZDN`nlwj&{KPtPfj8~BcB}4gT=hj%Ki%S$v7JS zYVt`xYFdKbi+r+`D1CdCPuAV0`R!Rg@y_GR9@)ZFTPub%?6*=?$9dAdzgxNDZOc0` zZLt6Kym%)>tkwB&IIS0<197Am+-Kf#xt{E4+~%K&kPS4%#At@TR7ng^~oi1juNhzSAI<48ORBhv*1Jhpkg~{&H1({a6Fhojydt5(`lSNI}$8ElJJz zy@4o(CK>fi>;^$pt~^@}0D-8Pd>^7)f#@SZwBrFCakEB^+gXY|tLisJDNXAcA6 z?l~yoh)Ivw9OL_peLp(jc}tCoIzOU)VzSS7$_P$&9gdbjd#%&>2PJ$mq$4t78#IA8 z=nRoegZQGw+)IX@nJoP#XRU^&lHa0}{+6taa@U){BJIr_6yrXsXT<3{ZUJ5ZcsM4W zw}i3+bOetAmv6ZHXDD1dgSx5>WTup}7}hg}xAym;375 zxUR&@Zw80~Md(F#+wLrsmqb_kr<~h0%@77%!$WjLd!%eoow6HSO?LP#FV2~Vmm($H zYRj}=SX(-_+vo_A1hs0Jj`tDtjMM~Kp~9?_U-_87zAUNw&~ zfW{fdaKbS?ps~T;9L8|#Z^1ZFfe7pu4mSZve=cBJ^!yCb*V|#ZRKj-ICPP4>R`SAFkP z`@N(1Uc7u6Jw~T8!5?x}fJe#C1Pgdj!!r8(@04wW1-RkuHKNYU@&U?j=KC8&sEImE zWYXn-Tfa{Y3J`geI**E0ATtXNhBRZ|ks{wh;5X>N*w3_bw}==-pr%GiDhwj{1iIZ2 zsLqcyKdBiVWx-_B;N(<{h4hckrHc#sUS`KbFdAb*fV4NS6pRkX{ht*ez}v)s`HO*( zz|%!dN|K)mx7@^oh0)c@wl}T;qyGk=dOR=pVMAS=0njumb_I}ug8&rh1|S0$0m#%S zNtOP_P|Eh6eL9<;Wz?`D0piF_K1)j-f*ZftFO_s7K~5HMk8ACc&Fr*AhHDA^yPCQU zfn@Z7>WCXSl49D&E?dzvt#uM4ZXq>jEY00LSRp4WnMfll#pJ)wU$%q)rv`uRN;7!DD_;KT!+Rh6hva7lpW?v|{vXQz3WI;|s(lYWtF%8!&F+H_ z`h&lRxN4apo57o+lGJbR^ah`UmgagdT#ZL2Zj@-voZtR;-RZ*odwlvM4L{zR!OIw% z&mdxJQc7vJICr`GpNkE21Iz#HVWE)+kvyzC_?IJdIJDXS;cObqRIghF zed$)jcIyX*`!@p(f5Y?z0VtPt9-`1`&@n=&_W{Ap(9Ye;Zv4H;4i_+YDz{RdHcaQE z8%;v0yKtzH7|f<=T5_C(H>6aLvJzM9lkEXxuVOV7bJ*U_#C)S~i0y5ZSAZ98Uh4u- zwd;bZQIaaZBfBmXn04V>z^d5+3>}jMOMnP41;p)oG+WVWQ$OnlMdw9}5km#V z-Yd-H9?EO+^4bTpP+Zp?inAzm8XmV${2>n(ir-Uq<6RbtLwklIVluKZ9ToVHxjd+x zM(_5qYGlFpbt?7%zJGnqfX|4QdjsE|Uzt^7s5YI8+?Iq5@o3Ko9hoq?Q1jOTh9?Pc z+n8B4-hIj=Pc0Y1X9>eXb1fqV?Gb(#nP)5f=5sg>;de1vxyy(zU)g{-dmivfcNdg| zm?6^Lp88Q~4i#$ss-Yty-7%V|@6i=VSEuW`3s*=@Y?q1>O&9sq_a$A$+FfW%+ITwGWh#SY}1C{iv9O5=dGv`Mmm+!wgCo zNlWsBDd7HqUt|f%3RpUG(|j@m?l^)Jge3SVBuTpX;mfC`@e^HQeb$93I-HdS~X0Jk_KBz(>P9Lh$_uX7DKy!Z`wb6(9TG`48N6AG3p@3ZZ_l{sT8qp79?@ z@L(tC8f7=un+f_a{sRrQ`<#_5%AQNb9F)!VALvVHE`KsKE1v(r_ixawUMq$jpFp@a?)} zW@Ot9+SZ#$P+9+hZ_;GX{sY5=KP7wfKY8e|ZuoQ9vlf4TCj2SFO*)G|m?FFogMM^r z*k==S?iz%58a^i7#T(_!d%IxMmz1d>%k zxXbnl_AHtAr(zD+H+T*yk}B$p4cNDPJkj#N9Ly@XB?JG>7XCVl)bURS9527<_j|=G zV_?Z;Zf8+1b8So|4|cQ@m2Iw#$x$0oSLL8$M=Bo`jWl}cR~%^x6>~8kS=bWP4lekntLDVvdG~ZX1!m3J~L78CCfCNBJ|SwbG5FWr}PknnyXVbAuG@w}y;cd?gz@zZWg z)(G!=_vm{E+V363oAL4{db~G#$pZ2-_L8eKEVEi&rtJTXz2s85vX6xd5P7S3go-)y znrkol{{C5(xHo%AF}!gv_L4s_Sl?bU)`BU=Ub2Z(#~H0cVo$ow&I#6zywtxQGjn3? zB^UhG!01aPK7;%WiBIRj!srZTe`R|~75&%;fU@?IkyPvopcWyz5dVpyt^n$8FZuL( zA-<8mbQ5gr+%KJhZDvCfyf^le9R%30%4w?eFF7l^FtNM6mi>_uNKKd9SFCwnhFrU+oVxe1Y*x`s$wF2)#Ug^)ntUdTmqo z|8ITuRZjEo1MRX3*CSNSLAzXib;W)L?byw*`(@qr)g)-=(`Amn`X*rLuCE3yeWI)S z`s#STo1srZ@%+^mith@FXW>8$6s^9x`mQV#zof4&qcEedKE{KE;seV5Ki5}({Xbt3 ze3w$O2k;$EpdJLg9`)5TTtl^4eRVH__%?vy>8q#U9_Nwmad-rKVJ&JA?M@QI)uL#y_))IZGEWy2$uH+-X>(x#25UE;xyNr$IM-p%fm^gN6QRWj8jM zY-1wpX0e==Sr)!U^ZW9EFkLbdL;*D|q^-qr78c8D4>gMqB|?ylc2&wi`0;!-OHv(8 z0&))?5I+Cf%#Mh0N^%3yU;F&4NUJ;cUIUYx&D^;4(+y0nnkATggVmj-^RT7Ds(Jq%ogV8j6$4s zk9d~3v0{7%pqqcRFU-vicD96?0w^v{zUxwubEL0)h5@rJEaQOz=~%R75>qfU=Y&90 zurp4RD%e@duX!AK1VB?ji%ou>f}L&9e2gctScxwMJA1s)1Uuu>;J`b-0{V=Td9713 z?S*s#(}oFrhF_98cE9&~yZ76qU;MWs{16WV2hD8Yvmes5+MCTm)D1^=3w(A1ALG4w z_t^KZ0_=MUd^Uhw?mN6taWU*PF+fsg;xG$o+M&=Q8FfFHYsgHfq}39;@SFVGl36ji z&nhEIZobV!G#xy2U!^0niujblIDl;_q`n$Bri%yGz$>Se*6J+h#T6KrXD5tfBJPO7 zOV1TCUVg{z1|v7oPBcTMvBu(C$aflAP4j^pd59WeuTj~J#y=SRl~GAjygle?EI1W} zshm;){HABnYXN}hd0p|tCQIUa+tawwC|R=LKTkkphe&V*TDO}XH!0_$jM_6V${`Tq-Gjk-<@ z&jQOeq3cAupp%JeU7O(;7rVwjO7CZ9aSvI&ewIPGU;U5{`H&vM=|h%}dSKM$LKm$c z91Zc}hvped2o(I%1H9sg_G1<-+(i^!;GU+Q)6mbpv|BG3xNlc>HiO8^I5yfdBImfu-blsl z)O2x`U97M&IVTBM2!~C}<9kFv4Y4#;}+0qq(;QR3G2%O!5DOQrsx@0)|HZ#J8(=Cf!xwSjz2=G9sE z&&sV9mz<)wnFLv?irhEosJB;?2yb(ZZ?t@~6X8$XxTXk)?9Xw9;jIsN^1Qd5{VNCX z@)x!or|rtoW>)HF8_XO2ePNyrKYGVyAYRWN!{&a#YSxXLZ^{rm>os=S3B3;B-~6!c16lII@dMw5Gt!GyzClp38Kg_Wh|=05|c|3lSd^d z6ecDfV^>gjUDSo4msRKIE$ZwCJ1>!NGPYbUAGL?%cQMm&BbPDf%8;EMoH?|EUwE`%tyxK_sw}Q`Dc#>$T~+~qEU@X-7>&a z)a=cM*l=n86dGwRp}|xmes%K8JrCN5uO55;Dvi4})!VLK>Eh$RCsn{5bk`Go^UB(} z?=3m#=Oj~4UHw@)QGOlCbfA&HF$Ok@5EB1f&TPAZ_xOpqEwRz9(bir0_U^c7*@nw1 z(12@?a)t+iOv|o_TU6mrI5eE-Rhf+Tp{TBdcmHUb7<5OMCQeKC(R4EOiACPhN*tK! zbZ$^@KJONb@XFfj2F7>ysw+tS=CIxA&ZFZi(~07(I6Br1ZwtJ_um0Pf4m?YefjrGm zx_Ic+_nNGq9Cab92U_Gwn?a8F&Yg&(p<&)Rj$caUE@z zZ7^{90m;-a=L4t1&exX$Gx*EpamV*^%_VWVe%kKS2OeG~a0MRWCtB80JKP!0=L%2! z?x@-Uz^RbDx^12PjPu_rhJYBrY78BvZv9^GJm0G!V(0dUPcpY>Q!91RHs*FyhdpEz zIGx5LZgEUMWh-lkN?-w33M)N!|r^Lc7!u$no@z3xdMn+OEx zj%)VkQ|BDAJ3a38bn(iwHLJ%k{ciEm{LRj4J2kvn-Jb%@YA=5EAG5kcBTEk2#!tF< zD)X^O^oDu?Vt?uQmpT7dGWBc%@`R=>t1h8q{ac~e(VM}*%}H94DY2onijo*t)VK2+ zbyJcvmyzTEKI?c3DWjbcgqdVz2K9J?pjVc4M-6WTTHa% z{8jN?y?Ab9{j<*zw|~?P77A#nLA3;z;ZLN}9UCosKAJrARB>BpA{WlCi#VrlikAIj zZej4)9UYr#-<>d!yQ>lxBi*{4CtgCe#?#d(bcID-j=WQZ>-U$`% z%I5#DaT;=(H}{{zRpuYn9Obw7?JE+`$p^X1T!(l_O| zd?&wL#$8aEoYp(!w&X&YE4)L?1ZJmsOaIG8#>!r)Jsj8^pt&$meuKFXlq-@81m=qT zm6iD~(MT8r1t12DVL_N2AH*gp7*kehK}?qaY(Pv5z$JGcB^Y-+Yx~x_GI`p6?b~|O zxBd6ow`biK`I@>Gf7|&#>s{oBYa*?i`$Uq*7exVT{sw2sgQm0YbME7}&ZgJ#El&R0 z7e9)>mmlAgUweZ4W~IB4IoJ}Bc<7-GVTO&dC$wOfL;RtTQ?a@te-i+9t{n*Vbr$?h zF%Ezth%{>F%f!(RB`z-#;*5=sZi{T(6)tuK~#ib?i*r}muQ5Qb2+u~S9%AW1S>r?Q1|PHD?my?KSlP?9rC`=+T?u&qE~piauT(RrAj}_;B1NSWdqOW2o}P#`I|a)FoU8AdqA;nojft@ZnB+(%e-TH`Ml|6w8vhC^H)XV zt?;YjPfk_9C5vkB2-PO^)Kkh%%k3y2yYn=4lvVSt1JEz>H${KACYJxI&@Prd{#Ue| z9C!$~w+{x1LBOzwccB+Q3W0PXVcl48jv9yT3*qv20e0uQJuDBMg~YZqC>I_5eq~~4 zWaEFB#8BB6VK|uh78hck`9&!4Q&^E^J#o#zPyz%2)5g z@U6DAge9F-fEYPud2l;ChE@bC1IavAwx*I*BE!nI$I5nIR&;EOAp(Q@teQro9#-i- zKkjucyCvbq*JH`@hA;Yq%H_|+2}}FTkVlR8a#i7 ziZC-Oikt1<`ZnC`T5+>*{z~L8HN|0SEf=?(Y3H!LwWDu*%W%Rvxg-Jj=7RVZEgH8> zGZP8qm1GK8cUUlPIms*-@Ar-G&adn1{<1+h*Z6vEpT-yJ4$OZo^aPw5afSmGZO*kP ziTv|npfT=6FNtdThXq7OuYu^Hc9^_J;=`ctSmK0=#7xT<1e<4r5}hhtatxef@O!s+ zW!XxgJR#z?SyFC;ly^nTcF!$1wncIX*VUZ{*J#sZjUD&liT}gW`<}W4e8*R~&jH`k z3N0WALclBten<91+}1Avxf{V89Yvx&!dY6iCkMz{zPkW<6+nja{;vWV_Ij(JT#^4w z-2i&wTvs8m_9rOKE0X9N*_fs<@e?2i5Wr=^{Z}N;7--<*W3$$0 zgU`$iAtRBDqFSRpP;V4U!!!Mr8Hh4l7M-6H$RlO{xoogSXO@4RV@HGv&0K}xW923H zEs@IjaXXCk^Jlv0EFBt2p5MER@N>NTjFElJqC6^6_Gaw}W`!V6aM&)Za*ph?syDLF z`4j%H%RbMfP7^T8-V8c7BiFzNBBS3z@X=}|$%h4MpfB=_k0yva%&=Ll3Nzvl_q;*T5NdB3_B5nxn zYf@n?DpS82XLv2BD7U!3%(yK(7x~_viNd8`(^{J2O2o|Y;`My#6q>s$QL@w~p z2K}nwrwEuGrI1JJr|e(l%!QIh{^xt&g)Tlb4z^9S8F42eqelx_i1SfTFCQF{b2FT3 zJxz{1zvDou2LfIjm6eo|TsvGy9*MXIH4h^*_4OaB8|$QmvA1eUCJ_?{h6L}BW_37Su#Gd z%twycxLuLuK61pIhd%=)g1yM~)4g-j`CeM#b4N@ai#ZIN8#q$2kzQYd9#97ztf@UQ z;tpY9C@hN<)jEr$)0YauJl4^Hu|kytLvFuBWJom8H?-!|!n_Vy-6HPx$g)4$|N2Kx zdcRNkmw~2A0y^Cr;4}3g$C4Y_7yxdl3m@ zUco03=jhhXV%RlquJJ9sA_H2L)$)lmy~UaKLZmTm>%5})y6YnwvGugYH&d-$)izS? z1!q+IC(g`goH6U1g$jDf!>ZJ$#QGk$mTwMv1MgU8OzUL_e&U?hN@C8S$TIbo|LP;I znmMK=lHa<#kPOHs;!I+t`)tmkkp<~It5Ecn7<{+daE5k`5O|65}F?$>0=m*u{Oq zwAsd)49#K8?bBLW7S5|15J?=`+KF5H_D)#$n!3K`d0^zkDXoQd0dT9eFq-(9CaqUg z5$`j%IgYi0cIZ4Q^~h~cP3eP=M&*~t>Dh?8dg|F`=^*eO;xs7nF%~$dVXGPccZWrn z`S=(~OfQPM|A-{!4T~kJS*yNZ7)#9BTFCo~gS z*_R6|6Cdt&Fp^GNy1#8-0%zSds+?7R<3*b;N0$$1`y~&5=Oy7jUVzbXbEc@_JF*AMDnkiZkhDQ2!iun^`Do55me^vj}m#X2OYmdU3EJ z-r747f4^7V2eGo}=NPkFFjY?IqR!LJfG#gcjnPwiG&yN!B)+TP+`*Cfs>0O4WMamY z5;DH?!cf!vKB)q-z?6%+67>9&q-Xo{gV;XZi)zw>AH z2*G)EOEMX=geapT?rGq0??`e;gwM|DpDB(yOTc+xI7*3H5=SX9kHpaoW^@|EH;QFA zPjA3ppY9y0$yQ=NKf%7Jv2tEtS`TO>%RD&E#(5X02%1NX_c=kuc{g?s*l`!(06{_P z)xc-8Y4;k*BaX)l)EV_E$>N11mo2Rxuw>`^F?8qr7h881N?;qFzu}^lvp|4~(VLcC zz-KDTS}L3~TPn)h`DtI4XliN-?d;@m`%hBNxgVMr`#PhZc2{aSH1Gd-<-CylqBAA% z5ML&DlPb45=eCUAu|{pI-?o}cC)Bm3UUPb!>aczZgp9mC?TH9kfD`}}@D)^nlw=O@n}5^Cx# zds5SBy+UgyaC6xZva**)8Mw@m`(BSiu*3!9o2lymj1KpGj7W z*UqkRKdy1+yjoZjajtHUxR;?c?{MO$(ubIP_?gL*YcWLEM%)*p1jR3#B4G--W0lij zf^>}j0#z^)Pp6#*Nw3I6(b3NvCz*)5MrybumG5zEeu9xQ zh^F@7Sb~)~(yj51ubX#RRCF7r`NU!VCvw{E*bUi6NAED&e59>~m+*Mq&LFrBO1wNf zhy%b%*RAYi!++N7(OYS4?c+ z-pPDO8y3>8y}V!ec{%9C{>RFg`$9Cnc^3jP=JQarZ0($L2ta(;SvWf*50gBi4X0Ut zzwt1?+7!)^hdyzBs-kr@&VSl#oL9Lr_tj@>oVu0#d&W7NtXDQ-)cKAOAX?UX*#7VAMo!}{&A;5A0zAZiY0zl6ln_$bLdHv z2^gri{xTwUUE(uhBrPd0l%Iz^(M{ZW^=19>ct5hcnAk~3CDX{$ON zPCIy(7?LC#n~%|zg;j<$=cTma%Lix?1J{zi`p{SSg2<#wH1t(IQ+a>t#_U(TQSh;R zeQaOj5JK}6lzXEG?t3tmn^H&dl|@E9sSJuX5@_{CRe}FOjKUrfquG9ZHaHpNLP-`8 z*3ZrmAxz;_M1+*vFRBpx|JvEwf=XXE3dyy#qknu$zD7M(^6Y2~D=~K+$&*V+vVDF1 z*v?CxqWl`}W0C+nR{kVxujSVs7)egXnB3;Jr=EWBi!{R;ODp4BI|^yAwun;1tG3hw zl#ze(xE2~6rS=VoHDwqgXb`JIf7+w;81It}B@q{`?fGb{t{Z$hbtE8wTMKdq1Pjl! z?13?N;Xc?mwBAgEsI1-+xay}F7|aF$m|Mz^8nYu^S`cwxbCzuQ|7d#~_^67j@joF6 z1W3G#5{)nDT4D`GYbx5t2J0^Dg}b_|u^JmHAR35RwWhEER49p?NY=|0+6OB3(Mnre zeJW4I)&ioH1TYCNLI7Xzt;APuj4${`f>8GVJ#+8AfJpoMJfFWG$=*9N_sqcC0R)&33=E3bNE&b;Rkc z)~?*aeGRfC_3IulV~EC7K#LM9zO`roisRMi4~{=r|VYBIZ(OPSP>+7F7{DAi?Vm-@Hc&Hd{> z_M4ef(V8yW-U@`Qmfp^@GN?YkbhOlhJB8?8)0_Vriq(IWDR-U)Wv_yH7%kLJ8RlYb zG4FQMlLzD7l{C)(G>|gjl2fLa3M=+wU0HobQ_ofY-mcaUa&B2SB~a2bT3gjPuKwKP zkm}a4cM0TGM8BFc<3+w2H2ownE*~X|IQcz!{`f|JeM{-sW=eDAr=YUJ$l6O+baM>Y zVolf0t8+_U_FKM8|KN3X-)q=IH!>n8h&awB(*Cq@f4udR+@^TlN3uxOa_L(L@qnEB zoA<}r4tFn>rkh!^lrLAI(_uU~F4vPBFnj&29~X|c&b#XyF7oRwEvvI!<|p&+bQ7UWXFmroc&G5Dhoh%+u0z3@1)*)iEY ze?Ax&_Xw%fAW+e2elZ!#)TkBia7#!n(L(#t{Vu31#EV_-)5zrN0a89&`VFk?Ia zMv#}jo*qTt1|o7Ux-~hCCI`*Emv)3g-=Nt30Gfbscc!9lqrF|$ z3iA`=5`N}kdeqG={(s2Dl%VJZ!(5>JAGG;@he$?lj28_5uDBcjFc6!A+| z*}#}v;|0Sm!b7f_KwEVu#0xSgDzWo?eA+#qKeN#|9SX1qYTbW^K0F0{!7s^7?860U zg>c|1x1m!&el$K{)^I1ptu^#g!`y`a4CBuR!$R{>^ZDcSXUb4t&uQ{2o5yW1eBofI z5E((A{g12AZ!w@iVx=Cy7Mf?qtyrke+H>OxTaxA^x9u}25ayt^}#)j~@L=C=x z59}L&wkVO~xD&lBnZ=Ts&2n)>=F`b9_plx>0@kCFGE9mW0Beg=<)809vda1-)Q_L1 z7$5#jQdk)>>^CGO?Z2u2?__^fEsj`tw15_Gr|H}Bk#!7B3b^{%~ z+O*>r1GQB2bX`T#}`7qup2X{O%n^VqM#h;1H zQ(-@{&Z!yE$`GN&kb?n=V;9?^PfRqbhGZ;QA6Bidr=FVfjv3fw)lNp>D@+zM^cjjD z$TH)FyFR)YQf7rJl?KYHEldibLn9qsA&rL79dECAJsxLlNA=SdJ0QhZ>v;R-t|OA| zv)?%)CDA(UohG`3Ar*yk45L#j8c}PrO|AS2Md66qy8_w>A8L>h#%lglJjS1@rTm%P ztgT$fV$9V@9$s%Ps`%Wmc}{CE*Ewp9oF`YIB*b=yNF@>lWl&`Dmt{*zp8HGrJG4P7 z{u1@YMgK)0KaNNA8+&;^?+TSHij$o4Rr2^@Sx)w+6zS56 z<%l`KtcyA2h7HC{}R}SaC|B zo!R7mpcSKh8M55{cswuTH>r=%*X&JwIR0~>qMcL(=;iSJSeBP9j1 zV^VO0Ldk^fpX-|ELLvYbe~e0L{6+itL>|s7Q29d|RUA}zEZ=i7J*mUP*h*K3BR*s>YQ&8QUe}l8>KEkTR%F zMZ2m{Ex$-t_&4EzR{SQ9Z<00PV<(Ll(EKm081?n|oF#kfIm>y`iUnPt@IIl% zo&Voe@W&4et4hazqL4oRk{>w91=1Fm$o}9otB*#!2860sH~nGf%kz4i{QUv#{5D$G zGo!!G!q$^R^Cn_gJ~Sgo`*ECCzcz8dcK*lHb7g(tMNO&v=U2!?I$tKXr=Un`=V938 z0+WZc?sgx}dyjH6hk?V*{A;lB-huhV!JsTx;Nd*_dEPem z#x!w!%ivad@@waH3~n7veDGox#gK&;J0H=G8GHG2R~O7;OecRXZ{g3?EBJHWGX7LA z;?FJhe8nP)3B*x!AX*et`B`Oj8qw&X&t%<>6nV4}ql#EI&{ZTBih8qED`qZ^zi23B zoy9q^IE-qhRi2^sE&*pz076JG^7Gv?2&rF)mOsARPo|4t>3A6DmN%W9lp z^lO}~{7FvM0a`ok&+3`gue5gx5`pk{t?DaPpGh@CZcStip|9J!RR-clL#wRJ@(CPx zqq|t5rl2vK{S7UWxt-N~(q3l?r_c_vz5w>8Vv*@cEw5Nu#$ROrMfx&HKF4~(GOM~M z-Q3B)^G;TcoV1@Tkx~vy6ZyTwuRfbV9PBn3L625EstAd$SeZE(k1`yVay@v=Xm{^s zV9lK3e`RHyg-nu2Vik$f4QZRQlD-g3>H!Kt@AGbc0k<}q`64<>+&9%|-%lzJ=V zjBx7hH15w#y?M=?ZK<~+lY5JkP>Ri*XHswHnmJFV-b#@{Q*RfVIS-}Y$~aq=dK;yA z(4n#1X(Nzb?SIi>a8Z^t`6ygycfYL=vHJjcs%t6E`FDK0~hzF-oS;GdIJ~H)El_CBlQL@Zcn{|i>awMaB*Gg4P0E6dIJ~Z zQg5T^HRE4sG~AIZ-e}t0-)DJ~9cwX8QOfz!_{YL`%I=f@mG;JXE@`7?SLZ>YZ??B* z7iC;O|A~-wov#M*Kl>GI)jrG=ji6BWN^N25y6j+Y7f&H`Q?RwWU$FNraU*U00IO`? zkHn9rr1f&n?eT;cpJ8F`0i1d70HaDhi6o&40D~dw=nsI5pB;Kyot`-DCwttt&g{oS>G~KGQEdqlHvBgD6OKw zo1|zlzT~CBhW*(X#I(Dg#Ed8IldukYgeHPgPrx^Oxu>esU}`js~_)*9W5gG2Vr zOK_!#q`|t>g|(b%;kZPcJS)2W=u8&~Hn83%s|m8|3T!BCcL@B2S3MPv^j!mu#5 zqZtJAZv_8RULEP(xX!JhLk7#bB=2JePINV{eKchD;)ug|%?w-Twku2~^;6r!=G5G< zxdE>v8Lc4qN81N~CBx-}oYI>AB+WGsF(S%nHt$J><=!t*443!etcJ!UrR)M2-lmMo zIWj7FK~u9=C|GqT`^|Bl^d>(>h-`4uQ<$RqC2ms6-mR2cr(fiFqtvT*9K{K>=GLSNHgTFZ+(j9qzH@>+&>aQ2`kpgq4Y3sLD8y?RqTGK3lq9bY#H+fVL{?7wFA z^t)h(xj(umGcu@gM;#`=LvlKag2VQ;=0*m^3m!e4^*uJ$LlSQfIB$jKFBIUdIVj0P zMbiRhHQA9tHGzv~qPJ=V=0-qhO)F?;<3{c&Zm!Ap?Q8c&ra86WP^05oe4WeIY8^+A zaJ}Dgd1lC`3G=A6-yz79OtxC3tqj+TvXUO=-1R3B^5*IFN=0AWHpZ*wTdn%JW_$FjU!%iW_LM#&}v4PHqL z?x`GJfW!1%pxv*EBZc^*U3rZg>jpCv*>ySk1@HD``Jcl2`-(sMHYdR$L+%6iNB3lD zOR#7N5e&lM7|HSfEtA777rY*5o=`N#Z;f5XLNIXFaZ+HQvPfPdYu%bXxij*lrTO&0 zpBc%KA3jVp9`z= zqx=c1!}&tgjhl?512|6qY0jl`CP1AH&6V`zo9D2%Lif!Wegy`@jfOcE_nT`va236_ zxNgMA9a#G?*ZSo(Qf@@o6^cdhz24rq5XeX_M>?_l$Mn%KvpWLiKk`Jjalv?m(>FiL zmDjpYd!8G)(cIp%hF(Sn3l6+>*(+pjwFsqajd^qtum7fOetq?OGWEvSBf~Lu=0ygJ zV_;!fplEdDcyP7Ro;O@MoOFoW$23kxVdccEiJjd3lAJ|;@oik(qOH6J|AUnsplJ-& z=IxC;B0qPCu3uyt?$OP7$`DDl9#Vh?GN+%5SQv`w^m*L%%eAqCWEb#2kUA)FxpcHcwj~2aGc!&m(cf z3(h`4_8%Q1cf<>>IiAz*9r1$YB!$dA%!rD%3@hAe7qfUflolx8J7XZI-r5$(@2vOt zzIn>V`nT$v!_!6VEjrlJmD8-x7}VOVzeU2f`nL|Xyw$%s9Fg3Xt^v(@o#egU%h`uh zT8iqM_1b2=tM!usGOWJlx$mkD>5Fh?SwOKoJsqTs{}h8#EEN!zq%KHMW+~z`I8J zaAFO!{?tp<8N#9Bb<9c!hlUf@s+9%#cmIGHxWM>vXHlz7D5P>i(|ZcOHYJ)?^nGnY z5D4&y+EYu;mybzv1F>%_6Pu+Z0t~$Oo$DCcBp{NZ8YG@Mr^(;?&Lk}vu#!Xw(lP`@6S*-ad>+h{G+smM}m9{l^;}VEg7Tqog-dreWcbsu2tp zIQM>A1*6?+B;8&@H4U42syC=alA@b4drIZIrHSuEvZ7P{-R~sbp2&Cdg}g*J_mfs) zQBBdEkrhxnD@S z9do~sm+0nPw?DJf`qO;0{^Ul6MBm}+y#+-I%1Ea#*%43lode0%3?o&BaVZeiWA( zqFNG-@=Y_&ahO1(szh)trR78g3#s$co^AYu&i0({e_DPOdzj{~m@e>YfNN_$NBHg; z`0r-Oyas20%cNDW32Ch98JwIQ#IA4=wf6Y*+3`DK5dAyIXQe}~cOd5pu2RsGqjSSy z(8mB>3Q6DBsE}rA2uD#Um7E-_vq*O6*_or6k@JNq&sJpIOb7h#xygc&hV&KxWBb!T z3MEr0y4ln7Jt6!;>QY}6^N{2p4~%oLBf0jo>LaIUbTiJRYH|wV5|h*5`D>HYE5y88 zMqWXw`GcC69!?agrd-Q=5+nIZd|x@l>8yZXlg}TAUdu>Uh(~5}I3BJ8$>#o4uKNJ0 z*Y?m>)_zK@$xmYWfJcp?{#FvFs>HfvVoVZiRN~BJVnh;eQi-#YiPuZwG?h3znRuBb z-mDVuNG4t+iTO-r|LSlu(Mw|Y^CTW+`u0<$3eLOB;jY|QzAVW!5;*bM&%p)UX;;mK zbl(hTND|KFDL4r<`s|r0IHw*HPV1eC_GG2tBrxx@zmtNqlY7{{xjjEm!g+?923=-2 z!^MoxejlW`{W6aZhuO&(7QA{#qCGQGaPCaP`E3f$m}A0uDG8?}1?R3LoNH5X4sgZV zQTpY!=MpXo5z_M7?|?_QUvrXhj!VJ$+tJ~;;~q@5=f6{M65PjUw;`gs__^hna4t^5 zxjF@>IN6?Gq~HucCY?rtg`}IH)&U(hf#ZQ78`s|Nk zK5oDM>zHtYNjP_<;3T-G&*nCn1f0u{3Fnd|oRKLw32y7NC#B$I9TUz+vl95}0grCK z65QEmpO}L4%x@CQBTTg;`>2!OVb%qUl0bi*0xB$6D9mTK!oFQ()d2J;NO$4*l5nm_ z!4byoz_}v@=k#O3DNVxZpMoRo-hsod;EDFU%spq{+@7wP31mG7Qe9-pBH_U41}Sd8 z?mH%&MM*d_Q*dNyap3$e1!wFr;rNnpzLSC@3z7q8df+~T$Aq&e3Fq1r99b+KI1MQ{-##WBUlPu7DLBpI@ZlOijX(TWnl7R~!@0q9mN} zrQp;g;oOjd)Bl)od`UPTgGaYtHznbmoPzV*uaAkJB%D8`;7m)x+0Gi{_G{)b;dISN z;Ac__jz}cVxW`g(zH>}Ci;{3oOu-Q$#)0Ec!FhklQTs&!$`<&NaJGU+2S0{+{|dK7 zv+c7-i);LqlTmismv5I{r0geu2~fAc_Z$=cHA(nCJ39P!zrbHuFBMXS-I=K3JE0+DNZFC@Gt$Ni#r6>@HF=S+^*oDed{c_Wok`(cg|9SKg_g>4_3el_*oLJSz% zcRJuq9pweRGC4g-qjEA}Cp3~S3v~Ox0U0z8qKxednqP8LI$N@2OV0^LFI7Dr0%XIw z3oOp*d(A#oo%n$m#iiy11>D}s48@y1I%k*NHyG3*~Qhc=@ooC7@eD; zYRFI2Akh0`^bVT6VKckP{z*FUoTCDN{zUjx$aS(h6)l0oyjaEEA#_erSU3aNTl3Qzy$y=x zTvl{m;Q*=mq!4ZZb{^G+%vm1eX}oXbzj;{0Bnm*ZFH^hwQ`H#vgG*frqcowj@dd4) zWj;f18DLx)jq;wE=NaakMs)K4qnvvc7OMIGs=>J@_1`?qiT3MU#3`|$FxjC1zE!mA zsMBiN`=6R|rIr0X!@5G5*s^C}VhhBIerBTn$;p8B`JfmC3udKNBPVX{J{L@v5+{c3jA=qPs)9AKAWU7l&IPPVPQW7gnqab2(rY2p@p z`mf}Ki{cmQY=SfDuZraWIO41zOciU+rvx}wtYe`*@}ofPHdEnBe;A$XMf>kjI@h`< z=+A8rKSNhCViQfA(sc8A+yIIrg=zjgN71(ug9gLT!*=VhMco>Hf+$g|M9q4pYioIe zqu&p^>Q!otU1(6ztwz6|LhQj?(YxsLUDYZ;^6u}8FIo$4Vv^_KwkgQe7eey8?cWVf zLuKW|_IEccO!CpJg~a-NPB>b?JfN+dI6rk>me}}3PG8red92$eeOw#emK&RE8fLv< zhZP{~cJ!O{$>Fv3yG#0 zTkHRa5S5UfVKzHB-yRSZ{&yq!h81)a_!p-s@ImT~#eL#!=_2)Ed*odTsSh6;ss6RU zP&JLZs^?_qSyyN7ToSYY7pRN;&k21QCPCzr zudSS$Mqh@R<@A~Z0rToU8M(107T)cTZY6f5pd>+I0kgH{;@n6+a)M*8xHz}&i-frX zl4<%c=`A>g(4nD6?ZBPV-HDHK2@0$3)hy_ngoea;Ifew3mtQF|%EDApPHza-^R)aj zo!Xq)gi&@aP?^NAZ(u%Q@4X{2K~r>j%Gc}WvuK2vbwmdn#@}9Po&rMaXXiX`5QJkg!#O`;h;m9Yh1!CJT_sv0@63} zyJa+4_}wCY;dgrpyc(-~Lb+m1ef)6J9C&wvGy}1zDcYn|vuLCZbGvSCfbnHI_NAkf z=Zn8oxa7ch&I2Sb^OZ5*LOwr0W@~|mi=V1ZhC3x5q{;X}W@T#Dh=;JCyDceOjO7?NLeFL6}4y8OA z6G~}MQcB#R6y=A92(nkk8WtvF&C%6m{G_w6gea|k#oyoZV;}7MA+bOS8Z2Ho;s9Sm z0GPS~#M5wJ&L@KD83NFKzm)fRiFeD7V?8};Mkm498Na3;e7OZ|f@o85ff`8(j$o--laDT;B)%Z0 zeol6NI2wPwqm0*gi*>VKC24_hl4tN{$tn}Z>ONtpTwuIrY z2M#gmxp+wXM~6|p9Z%|K3bNTU<-Hin+RWJjZ*oFr)u{!63Hcu`2q!{da@<^ZIJh|J zJfSLWQ%X&!yIHe`V234fvuxl%caUOPu2UUv}RHcos*VOsN=L{@J{=sa6?+TMZg zPMc9-r_CX(8Ac)&-GuA8?1TSCwy2q=L@~s%Le0}p84r6Zh9{9Y$Pgmskj20K@Sc#TH~XNMsQM;SiLwrRTwzj z5(Q=^TWt0?w0^6cD!^lBL+PY${2!pGajD3?m*PwvIbWuUA?HjC^9(d2g?3}(zWACl z1$vwPU<9#20x%RmEeN)+8YI}=CAcc~ zt9ZYPgnSYemg5M%QrTg`5hl~7E8&m-LEK*oma$8PKAhOU8^0bOxP#+t1RkBlxyu1F z{St!Vmx?rqj!lFV@q7KT;9wJDNlaA`Fbo7FV(}^j_zpvW3d1`Ng~;Jyc*g8D zN)?7TM7Zt$*D$;gZrT=>I0%<=GR}UV`%jn;iX5OKX!2%{u{O1fZzn9YQG#&#DWj#` zZONF#9xSQ{nTB=RIjk#kDh7qu&4osE6Fc@-W)WSI;4s|2(am|OP`N$p?8~g|5wa|Z zi}dKewB53GWu{Tyq0Ju!*q%YDExrULD+7JX`O^Nl0R|HLMIkW>@zI{dZf!3yYj8m& zT>k*hcMM1<2_UAOwugY(a_N!C9b9S@!MW4%B;yJiV$ED`W#J7ae7D z?-2C!D&e9pvxce{qv5LzX5n;(ZcHD}$!8lSyNqZLN}vO0%tEEKOW1VtfPNW02 zO41Oo-4=0dju$+?mypH;Ik;v z%h4Sl^V)EtDBDlgi<)8Z-G@za;!&u(aTw9bVG}X?D}s8f0Lt&FN%i#r%7oOfh0)CR zGF{NX-6TPj(}E{6zAV)%nK-z7BM$j^88?LR@EtM*OG%Z4<0Zj;KEZiz<)_6Mt`m-o z+hDjVLT0YWlf(ycRv0v>#o*I0KW9>caS5YrUp_C5ah~3vsT~iiaLQ$oVcdpPw>5Mz zp_R3!4}}g*EVoAN^@4oWO-CS!>1B2`CZ~dPbu<+8J}EX`-jJaPVdiMzewxP6**3SI4>4CNqW-> zi{LbJ>@>)mLEtLBGs3ZRi^9w-)#|1Y3MIKxM3o7}s)?REMdE5EE?0G>Y)GUT90-Y2 zNN77(YelT>ggguCof!4eb%cc+4|{WCpRW_%wl4a1rg+Z`UYCr04sXk}&nNmb2afYk z#?(O+(f`*V=C4W+^8=!NQ$ft%q=^jLE;oodcOerqhAr9z=qSo%S>4CoL4=ya{&VF| zb*ThGXey{Qx^|?`kG!t*2^J-K*JFYN>%0^5IBG^vFGuiW3M?*KMh-eNx&O@Bc75ue z11d&-=MIz3=%&no_7Bc6NO4gyrldSqk9PRB zpY+f;sR^a+$l#UKWUj~k#>C2O9 zx-3C-g&?}(>xjOddSyL|ogth8UbV%3@K7cM!u?N<0D23*DIs*?*8?Iqw8sBM@Spwp zv%Vj)3V!&{FS)d6!M(-b8k7;t+7y3lHO3FzP?d9K&q-oZ%INX1kCBrozio-O_u)V(M`ELXK@tA+!WpD;W>{3Ce~>0`p?cBl(CI} z^82#;EWdBM&+_{&|0F;0Pu@j7DpWBm8$OiDz#Q2)%$*~Ribt4Fcan~lSzG&{@?w_5 zfUgN_uOhCq`I7eVI&SJ}CopqKr{7%9wpM$X2$g4)bd+z34D`qTET5bI@>>o2z|8*~ zfI(?ZFOW9c$~%KQZ({dKfnFjtw?_MBYu4ku12Yf!8w%zLVivubb*{ZkdtoiffxJ5*_=t6LP@uSZi^)x;W8?g z+t`=#>o!b0v~|VKoSi__f7t(Y|~Fj9HhQ_{NuUbHV(gZx15+<+k`+zQ1$y z>)E1rfuNV)y@8*)VL@w62M>YrOY4RNt-H6#Gg`+S-^$FMAzbr!r-i$xzX47 zQQbvuwOW(1F7xI1v`5B{WUWI&}or^StbBW1Dd{3APVH>Yee?gX+Ok#?Ws+C^384dJ1y)n z{w-D3uW6xRM&uMB%K}t2LzwC7sfa4=At+ILvN?mImZD{plf_3-5j+ezRaR`_Nx5HT zik?iSt1>d}8$KxGKl!(xa*2OZUKUPwy`1A-qTPz3W|355=Bzu}Wdr*8Iu;@GHLle} zU2`_-t+@yM(Y{IA!d^;Bu!C0HtKRqU&I!AgGr_9HEJC$*=tMKS1*Fn#9-rCj34k_Kg1!zq29iqv5y0 zX~q%wy>rG<@LNCx-fzS&f-qMns7v5Q(YQ>}N>0C%68J^Y2=Q)*c+U{xecj*Cn!&!+ zAKic4tnXv^Tl(^s@zJc(&z>XfS5|4bhpf_Sc|OTmvpqt3bLS;iX|vs`@cPQ;-mV2Y zg}`Sa63ar}%0k^Ki!%##D+~1|S-@GSTUn?(V&OB;-`!Nx+LgoP2r~sqbC8qE8 z$C2#xciMG)6-J@F#C=$Fm?z1P(seokUiCdPQBJto8N*&C%AR)pb(tt(>s~cc+}qzp zC&zBhnE(VpfoYP$tx;^Om;ig20Ek#&&2FcoGC+Hr3DBFE08dPFCV;BLodA36SAQ;A zIw5_frMnZofDDbG9ny#F?}CFu`ir6?24yTw`&Y+v;@=#e6aVf_D`$|cz`-m%~w8sCHr0DEYFj7%ClgKR<=udngj4h++hkm?Kga$AC z0LjpPZ#oW9$OvAQFXtXP+Cl~hWN>n3-b4T=S5)8&M!^0Gie%|-9cL8Bm^cJ9nstd7uS8-eVL?L`SZ@?R;20gie4u1UuyrO?tO67 zc0Sno=O?(+fyM;QOi_uk;SpYs=TTyz!B8f{aKp z*pFj9x0lQm$=TWCFdf+Nq#OqR^Nr4Kpy>GYfOGo3)7B;mRCf>cf7{A5D$vf3LO?$? zAr18ZZMQN4(AavN-I>6yQn1|^rf&0dQ4pcBxEj?&r+8AZF=1CC+6>o!U@&WDo;VPQ z@$JYRV4E@)9JVpZ`V1ie;I%Sf^)g@yCmuXcQaazG(nBfq@XnAVJ;)Yypt`lz@IV(q zNFSPz5#$1Ie`YY|qoJ;anj7M!p+?Y}?UBYB{eo6)Vb7^YY*`DW!Q!ulH@D$2^e^D3DA?v8WG7HW!G;g7h;Nk|Xk* zq`*Lb3=xoIhyebdjZhJg2m?-(a@jf-i%f6>$AV>qCQIO^T81a7RZR^~LjJl{90cVO zEU`A=WTiM9_fF*;yb7n3Smwx&jM&Yn!BDxzZZ2%Yh+)L8Dh|c`T)z-36Q$5(6jx~b zxPWnoy(g2JW4{#(yb*iO%TN0~azu~+d6K(hadk4( z&js^nJJ;^d#49!Sm&sBz_7^$F$&WgE8T-p@=dGR}U-MrU$Xok8i};ZRwEdo?Jn=tI z54uleFH1be{_+@)09zqH)_t|eEU7kK;WIH1@4nTPCUl`vXnS)k2e5zJ1SUM)KEmA$c%M?BDN|W*`=*J=(2kijrd)3oPVnQ4tbu0hAQ>m)^ zBXX3H2#Y7z!1x&}_R`Ro)&)FpKX%7UQP)J!TiX}x^Y94!#@a_xa0`=b=?$rhj{VPZB?Ot^$! z3vbf3@RmS1v1UV?##k4g%>I0#&W=~RSRDOEJh(7@0S{4;b@@DxYN`Z5=1D# zn?Gy&&aV)6%Pt1$yn>7C=FjoyCJWw6ZfP_b<06CQH@|R4$Zx9AE8iR7$_p+0 z8Myo+>r;RE%Kn+iVTSjJ{q=6Of|g)U%^pYIT7zwC@(MIUJdJK8Wsj;}yejZ} zh>a8S$Rt?Jo{a8C(0f9%ZQ}|u+QuE9S^u<9ZHBrITFCZvNT;SiU6;xdsCY?Y+TBq|C$1-E@5$%L zb%h;kUZHOLT>LH+u=3q?_&$bD5~la)`wr8SN>Q|R(Z+O#H2WsbNV-c}?`y09g%rqO zI(ngfB5R!nzaf#QBF2a0?tSDtAGoCwA04%HN3!&mEtD@j3@TrG$MW{r%b(A~~kkhr|4jllO<*n3G(VXJd1gjeUi? zIEYcgKne~ti@lc3KC2W*C0H%;)iTKnDQ@qqQMV#vaAVQdfY~H01uo5sxJYJv$XX$^ zs;#P5d^YPPV8V~b!sc!*I#(j973<}#bA|+B|0_%fUrC+>Mxb0V9mBF@NidfPYu8Pp zM2FwgtE=SY1kQT~_3~|%^|DY!sx~BK2&Jy)?uym2G zTo@>C6K2K=4o73_;D{@X^(gto(8AkwS%lb&jJ2}Qmr`Z&osXPNvY0iYYYG(w1hu2g z@2ZvG(1P-jJO`whn#ShIbpzejM1JHiUq7eBU%rlb3k0;I3;yyga|Za!zhG_qBAK*p z-he>)A(DE@nKR5^-aD^QzN8@G>qD;#07~G<7ebH2Jw494zKvd-EAs_j{(;y2*24$l z?$U_6gyaRve0Tb-Y!CcwJPI(#FH5JT(i0 zc9UMB8gg^n_NA4?Rb2ARKBI!-1QSVhqKbw}14%D( z+Js15)_qCZr>Z~dzLyHw&nU^!5$4p?oPJ2;EX2tGOVudsVF-3?6KJ0dr@v(9zNjpx z{US4vce3 zX{y;?vOeCSV`qe9qcS1q42qSR+hWg1W_hj_X4wLC$@ZQc^`*8ls&t1xcv z5SiaEN`r=0*{vE`n`8cLj&{VHFpqxmWx4w#a+>{ld@nudw2LyE{rQCdyj$b}ZHWIk zdUUrqnAF7lr)!=u25IO6FR#J}v6S~|y!&{c&3lFBi8ec*P1Zc-QuP?DB~_MlZAe2s zKSo3MWcG<9u2zZuy%d#YK^roVL_r=PJtW%dF-UFEJ}0%5R6#v;$?mJ-y7l&+rsTmy zHGag)YfzYwotGfvCN)&zF?e&IV8qDUudST_M^bxt8Py%)A2?F=-H3Mm%NW@zT9KHO zU}l;3FO$!mvk~!vx0zF-$T2qu%=xnKH}6-NHGR|YcaluYlpTN&jS!L7E<1>Xx%Ls# z_>d8fuzzZBsHZ_nwl&BA;M6()PyB|gsDp_Hh5c3YSCUv0YY_Y!*8J7HApmGAO~Hq9 zgVcsFA}SOZBjWLSd7s8R%34OmU@Z#GYZWuZo048X-jm2kHPp!V{$NygeK=7eEcHDU34hNi5S;ZieTt0!j% zt1Iwbd(o&~hcZiRn#F<~FhBSA{xhH%pSuFIKJs-5k!n3eGsjrj-)?$6pk2|0RJtr& zT^z3V<%Ub14^|V0xJ7GX8<*iPVUxAB>GgS8{@z#JZ4u(NEUz)WF%X;B2y-6H46ger zF=UjCyv;uNDDt>DAf~S%3UGEze~O>nct=cM%|o6%Jev^i&`D?_$8qsvk!gs zWe2TfrVY@BG}(+S&IR`or2q_!%kJj5c?O#@S=K7xGuP0GVy;%T3}j4_#4PF0+7Qzp z3$VdRD>bw@R=G5-2>Xx+16EZ$Aj|v6xXzk(${NwrAT76;?8SiXX9ptevS*m%mea1c zFc7o@v0;j2e<^ZBl{7^p|7jSZn|Am$V?8euL{CslX+}~hY}ZTEjF^rrGKW2CHBM)G znZM+Pc&lFWqTl)v7eKb-5OQU!%)MB+lXmQqq#fFo>x`xN)N&qx482q@dERdsnR>~q ze(UNZzN+Ud&(Xf(E{Yy6>*-_(X)6J6FKWL6ANA_};wm;RfJ9Z5vaiYlhlki+*@L@d zj@U3QG46q)TUmZDboMFiM(Mfgn;FiHZ_H|?^P{w4wn#N>H1@KQ+N*Br3;XQA6$hON z5>0&!EBlbY=YmhMa;XH&WRshl3Q58&bt8j+iaf5;r;~pUkER}pH4%)F6-SN|SEA#v z1dzRa4fmS0>5D}^9*^sjfXTdOaEd4n@ z;gMSEWw@**0uN;ALCADbq%h#?OT=gf}z*z4r0w=Vz9j}HYY9@ z9%PtxE4pXkxg;XI zaJ7-mQkWMmX-zDJ0lqa~(VO0#*RS`r5)Q&G3UIAAq*x&vs%wO&s{A^u`)I>8pp$^#pr%D}@F_^QOLkF;wpH=mq*i?99NRK_w+zQnXEPXxHF<#+ zvx@l3x)jga6tjwXcDhTTQR#2IBda533Gdd5Re9D12+5s?YPER@1~QET_UT4ql@Q95 zrRp@oQOauJ<6W&5lA*0^YUZc8sX|^nP4~*rkftBW&#I>1@B>4?ksq_^LH@>?rtmj} z%77okTryjJEJghGtxvIj7}ag85iKSEb0z7>mEDG$uXf60 znBhh7jU^p%)X46)OO>m<5&@Q}#atwJvksYt%{1IVaFRgbGr#GLgCD_6v%(ViQHUAh zplOvt4EPZ=nF=#8g{6==aakxf5%1<#SSHXYJW*K|1Tk5iAv4?(EZMGBiqVOA6RksSm`hZu zw*=!Ip{zDXp75mP2@cOq^(dm;0#S-{xI>h=33;MVw~CJUXNj~vMTrtCQG5`ipZ8Kz zZ{!DYq8TqOB~UC-52MVw#~g02uO>ZW>dX0=jTpwy9L3KFL?K(;TItW-w;fddk;!Q< z3GItBS>eoeUZj#0NEs|x6HCRosCoK>HsPc!GQY_2oBezG`%zq$v^)HjY0y+muA@)+ z|3&P;n3)heE`Lyo9nu^TJI?*3h#i;HSP?rUH7Ry<6BPm(qEph4A#U8}$Pit!VmEA% z1^nO35czS&*(#X%ctVC4h5hm9GQ``5RME?mMTbaHWsj6NFJ7?$qwRt*?w9MS6J|%H zphAH`JLqRZQITG<3EL7nR)6_+G*X4;&e+3J3aenJi4`!s5sM?b&yD`9O>EA? zJN;QZFn?WE{^`82e%Yy-^Oai6S}1xjF04ltb~0y+V-HKRIWSP(%GHA5b$lCI$MpfN zZ+l5XEykVoEym!^o?L+<$qrlgmVYqED7WVYpjz+(aTcqyq}JF>$hU|+JxS*}`aA^S zJNJo=27ykD`SizY#2zSQPBuRvS{OMC!G2H<%KWXMId^VtBd2bsqFMB6c%&gQ&jzlajNW6TE)|_IT|KGpUiC`bFGXLahcpv%81D* zxpvf+q#2b#^4%F|o-fT+rc|BySlJ$yZZE^ap#!!Gp$@@SY`%2NpOD{Apa}X8-A3@% zKjh!XJ!hkIa@9k0>y=6I0W14`gM;>Rx2| z=(Inx4xpSyVCN;>?Dy9-a?07kDvpyg!0cTLv)B74OK|&q4qG-@u<=lP(VVy?P_kZp6P{GemV?Xo zsq7=fAjv6wQ|~~hz-jUeRr@NT0G~HrFoXsg2#>f4lpNqTlBd6aFfKat?o(+(G~Tan zko~6`oFww3Mb7ZaQNLO`x=f8>m1@GoWmLgrl1UXZ{~$xj6q6++XnqmXFO)$IH2+OT zjo~^@nt^tUy)BwXXPA^Z)<@tkM6kNj2*Xv^La`1;p$;uj-BuZ{s54wqcepwkE-BQ) zaJ5K5l^~}@Cr9I%TGL$W514b8`^`8ui@B#7aivEkZWV4O58uO{uRYLQduREfIq$I_ zWN2F$*6-iXu+Dudst(Voo6$&YsicuiqM%*)!b9{b@3HZl~*g(6|FWger1pUra}r67G`muX_=!ezX`$ z(%%59@PuvlRyf+xc*K1xQ#=CRFvE9b7=$Fp4ViCTt9Yt&j>B&ec=CKLCwli9T5HaQL;KV5O!03~QgP4w zLGwge-B=QfWNYokd?`vSjE~)3D!R~*4kMnmS+*X$Vm!s4N7hAEAij}rOWDYVQX3Ms zu7R>Yi`X1F9?xx2WpiBt`+0l*OqRg)f}{m+@DzW({AYRE0=a)OXkLjZ1ywO|>sx4j zyx{jw$t0R0Q|H;DdXi)c35Srwx=gC&&N-ll4YFGhiE0w@HVU%8keM!Om_Z#lb6N!l zexDGb_H*`ZOQ7ubI|J63OiqkKiaWb@zRe+b`@WkG#<}~daW73#O_d}16+(%NuRR#= zewE}TWjPPuIA0O7zP@@-dRv+uR`XR`)gY`tY(6clAIF?09p2vy@7J@wka^3TW;R_Y z^Ewz)rXRiecQTWig`;H_=F2R^Me4(t{$dXUcSw5@VWv{1vY~qWi{)N0s8G+`&enT+wdkQ4h z>2IEcE`Czyj~zcmaN3=Qo`ewk!?$F3BSlK?H``@dstuVd9Wm#S5xYkgK3RJM&Cj`b zI7jesir`_YEK*zTujVDV5r+_ZP8SuAmy6&sdy1U=*OK`WTn<;=BUpDCQacPODGh?E zIZOpH>neGWEz1)uFNrQAn%shDG9w|HxK5ou_k=Z)_B8Zz);mz0+-$@?N%5ZjDyYEQ zNG-yCUXboUVpR$Lvx)?V|2UjT_>XWRLF43+uZv()Ay^?+N+azSSs|||#efd|;s6dA zgqhsyH?P2vZ@Xs6fhn|CcVO~e-{a9%jh(HV>*8Du$1xpYLs$3+n37~e+Nzz3(}by0 zlk-mB#EQ<90@!`7Q=rmfQC8b@^|q-JWp4F~%9Ck{*U+OyUxmNf0R4omXdHE+7=UZw9%#fjREnEMzFgp$U*Wr=Be5(0*K17%Z+J&*jhD)3-xuPqIdQmwDo|AxEB|KZV=UZWBEkFGgM(kPC;vC@)TZ7} z``I_VBHfbJCTwn_2QmHi*hJxIJME>SiM=DQ*4>VO*KcGG?$7GBE=SUTReZH_ zb@Np{cCWKHx9UjV6|{b(Hr=Qu#cvl6Dx7vn_nUXi<{LMzSIgVv+d&w3`Bv@7k2gEY z*9FbH#W_SrH>QPOmR&x!_0Y@Fu70`}con%r)b{}`uv3m1GixR@`?bKE_!ytz&)TS4 z*_rrZ%KR4_s=C=TYrUMS#*lt(9?@+>n{oUqe>tXEm!TpGeU`P=U%o4LWi|~|KKB*E zEn?&FR|&1lS{ECK_hma1(jyp7bJ$L;-|qhV2*2%428G%8{F}+yhgxbLM)3IK_#uF;v8>{>SelFgSHhM3in)k%!enK!_QL@hc-0Y3b z{jyz$Rbdd*K~+XpyN)d-Ba7U@61a^toHpCF5#d+DP5UAtEls0m&Gma#edvR6W zo2bm7vLCqjC2Oia=(+tH>w5s9h6{#`@{K*V!MI=t zP^JhJmXRKT{`p@sfwqL{Ip5SD#$VUe{}9uAOnnx<%~C;g7*j&{(dNnrWcuO?9cZ3< zzkL;_wi8ph=^5wHr;rQUAXgr`3#ZL@5=uTNJ{$XyV2mK?u^CQ4K!Obgty`W<9n-z; z^#rAzfeB+CYBnJ=(~ea>uB|#C3^QcT%!MAbMmB#LPs@mgV|PJpEDDDpwEbbW(2eUz zUNi5XaU3|_B~Z?cfrgwT%wF092gxJS(z)5==`7&;h3YcuXIxaICfIt8hGcmVd2GqT2>Yh4yEC!bYDh5~c-| zGh|gg9V=bh*;-#B_=vN}3WjAqtMe|&dAz)+6W$JjOL318lPR5-DgD{2E~1+XnP^x) z#id1U3_-F72im11)6znBp|yYavZzLyR;c79vf>4;g(8sJ^U1+dLWV52h-byfXLIQ}$_MLYdd5naW`%x(GLR5ftikK7 zp=ESy4!*i0V5{|Ud~`d+dC4&xYH?7y1Nug<{NV#hL=Vg<|1l zLE30`$hMfNxhNK1?6-nq+J#Ao?bGB$Fnvu}cEb;9#rh&;)2$H6m7ULoB7^A`#81t# zL~Uu0J11e*t>RV8x;j4jV%8O7mNM%~24=fXewv$R%ZsOJhx`m_x>J5uHNDLbRxq*k znoaNWH`cU(zag`!gCE0O(jq^WC6MeToN8Resyhcu>b7{RSt%GUc`3dQ8#bEY!=0`4 zE=fAlB#SY$Ng)WErE*m9BBj`Zbap6HS?=njf(UB=k}cG5P0{T5rjnQAtltj!P_>PI z8EbC>G)CO#9X~F$#S58#EK@CBA}vm!>fjt@z7-5KD{O!Vg@7RrI#wwJfCoW`sW1>z z*f3%fTO1SaLyH8SYpJg86o-~z2_8JS{m89(V$aog1xq@y=LWN0G#oN;Ob3@Dcy%?l z7?$d}p#|`?Z4z)tm~}hHT6z6MFfc}2Lp=~<$R#Z0=j)JYim*lj0D`>6@fu?fp~%!B zrm*e$jZF~0u?px&)rUi&SbGt=LW5gEmL+Xa1Qo8e_n)1x`Id>z*BH64CRSGl@rZ(? zS;ytG=~p}Nj|i3Pw?Z(@JV7w^@?(X|?3)%lHea0U9rF40t;*(G zl^{hRCFoH9X@Us#TOA5iAf774z*os>`f#CLRVy~LaG|#9mom?^RWUWe!W(>LwRfV9 z+4&VlaKA62% zvgw5?^^juyDCqxk2gj~B<>Mr{+~wl)m93bLEqM%tg{zSu!i(_W2{YkBC2hg#t>LT< z3AeMRWr~zqAU~mYHj_&vCEpyHZN6x~IN zevB(6o~!OLOvAOi8Z;`YF#I{~U6i2IQQwiyTe8H90P*IN&T)o%kp*z*QnEn91`!Ps za++)ol1^F`g8s>}s#VwxZ>jy)2bS!=Bw+>Wo0Kg!z38=0(Y488fE_Qf^r9tB z(Z$K4Klw(e8BWoglSR*$qKE&d_A~4Uk#~eVk#hzShg_+k8XZNc*^Qv%$~2-oKp~6p zqx1)BUw~&eDv3)hq80QV%@CZML?POMGL3@s7Z_o$UH@sEX|vW#4t~hN^=P#81J<}M zVjo!#i@&}#KZCzM?bmIW#5oNy3yC(YM$D|2IU6r%`)$9BOS=EX$zwN8nZ(8#7(H

i(vjx|1{!1Mv zSgQO{R>k)<2)jlxb{;iI)Sbp6VKAn@j7t{EU?zsS$j6Te_NW2(d)QvfNzi8e&$&E7 zUWw<5f3kI-(rAkjar7n;Si1N?Fl8!dBA$v(<+IF_H(+jtjF!11z17b6SorF#MRP1R_$aEg@juBX5Fjv%k{auUsHw)rOKPk}7wazU!mMlnt0-2nsnoo--VhQ% z1=+9(WIL{4)k>?crma=l`YKim!5c}yB!EJ|8`M_fr8+TM@dg2u{e7QvW;UCMz5L(z z^ZD~(Gc#w-Idd-0dCqg5YtUyKiC=*WQKM-D<3@#)JQp0t^}MU(x=dt{SfLpS9Fs!W zUsqHu>!*^h@}$(}6hHHTXh(`FUBtS+LGc|NezSwa0K1V2bfm%a%#0>zD@XFpSzu0XVX(rAV7I|11p z2+T~l!7#BAn^mZFkGo{}O~!;x@NJE{yZNvTxv}VH>h8_m#-;BVm%fe&EGU}wH_B+d zR%O(5a@}NHyv3N>Xg+M7E(D^zUQE22XW{$zd28zX)+Tr=(S4cKInjMt zHM!CKUSsv9S=r14%DPfpOI1m4vQ0qh_AXI32Ob3!WuYXo1>s4c^7cAWTSg@l=F`?7 zbBm#6en&QbGni|8?`mMO^$$k)JE5T-J2orVdM0c1nfMM2v!eG$mN~vzA%Abn6o|q>cJJ1ZW(k z8^D!sJDNAYVH0vx%tYi4mPQ;iy(>Q5u8L3pAU^#oGW4?7!P81;PBx_-6Q8agS;SrF zU@F=93)SpX?ID>`kcKQFrGwL^v~@_47Kn&Jgbmlgzx3;Fq&9uE--`$dr*Cvh{N*rG zJ8SoImd;#wX7!1&_j9WAL5@cK4_NUyh1m~Y>Yl_sGcc&zlhJ)1_kYT?-;(sRxUX$x zUq2YzKi*h!l4QeXDv@8;@HsO6x~v^H56!S{U$l~oat+6;eOcKV-8emy3jZ$m-TJ;o z?h$NAdcKEy3FzqN7s0z}#pZDdu=N56@OTGDN}`3iY(p(J!i#*rf)AJ+_N%KDfOMn) zBv+J*3ta$FyF5>u?PKB!K%~srB0Q2-QGZ#|`Uo5yFv7NJ-6dbd+oa~60gd3})&eu} zrwqPp`d&5aK*MOGZZt04E_g2ji4?m9s2mm{BnZyEkfIIZFc`5m5k$mWkr)+@h3A?pWnpHoWeGF^@p1QHcMLHesgCL#MgCHFqZbJ{qL$BBL1`+9s;${A& znX=$yaO$7@HwW|u2sO4TDf991I3i@I@=K{k_=x$Vr9MVoq?Q7u*J#g9vd4&}P}NWL|rN+$y zlrV%;tgHDSH=ny8<@@A6)y+@O((^A^;(WGWek%+F=Nu=kl*Z}$r9mJj1{Pfi@pkmf zyCB-6$!=M>%J-mwMhN*MmE6vU6_59i(t=b&Rrd~~Hv2$*zZNL`Ay}gltbsR3MBZ%i zBE~HSxQ0J_gyKUX)D)k7#k8pilt7zWmFv-SCBB(o$2Tiai*FXe`26Q5iAOmRhr;{u zr}@7_6fT5&u@gm#J7Rp@K&?GT2iD3J{yd|g4KjedV*53!0ti(ZRB^@ckK8j>;5w-; zZIjq+A4v}t$QIk*E0kSA3`}AhDpUTbK(dZ&u}Wh#Tx;S9Djz&Tugj46?bXfFM>$Sd z(OiIAq*$_7iYa#_RI(n=lzJ<`dfQmjAheDv-9$5*aB_il8q3q>{&N@mcaj=3q%n2;{@S z`2BUsYCmPBX{gCRJxN@OKRkY@rCsXcO2!_I23fTq$H>@2#K-*i4pMH`zguptlvCfg z-#FGCUwkJ<8HLn1yD+NuTX$E&*_5l!>S}yM)q2CZB?Rmk*|GI$%2L9P#aTJb*wHdn4LoT)$(xKYmm9*`KJqLAmMAUt%= zA4QH)eE=s&Qh7i$7VlHSORsN}*uM)Ovk0l>cCM#zh4K~(+_zo!9S|x5h-7hlvK2x_ zZ9k=UV7-tyV$iB^pB@49efOnn4R!aVKVjb`v8f8x8pkDCR8FDE{KuEd@yw%E0GvL) z#q7!MUwFBh{MMbkb%%Fg`DRuA#(!G=!u0Y%OOZd$ya405foNA|cRtgizTv$0+*vF* zUe8`C*GJE}ZcS3{aq?TO*1!^YLzkML?q9~CUs#VReaD23kCmY|6&+>Z>LqJV89-G!D4w%dA`!gdu8ipXmzPMs94OH5s=H^5~>w2w?yPha$ce;%|DwJwQKl|pAM zyP+qDdrZ_6luNWy``rJNT2~z}u0q>VwZ4=4f6>+}sZ|GQOI2+BpH!Sf#b{uko~rgt zstq*h@y_?{hkpT9D77n&QA z{Y3&meX>|Bhm|l{DK=cG)Zo{1`_PMuxi|b44*rVTc?scWA9F7~>&-grTQ|E|S4dXp zpMq!Mzv;sTN2%S49H%Xr#?nt&o<`kT{>Jua8+CQ+W|UEX4z#yvjq~e0PsjnQsgal9 zh1h$t{o~^8S5{Vr|J-(?p4c3`n249sR47czQku-4OBUYvtu}E}(J8QBR;%iXbsaRz zU^(a~kd!)0z)0!7b|%U+Du0K_Z&Pk9$PdTD@oCYQAKi-ZA?yF>Yw90H{k8jTk92=B z{Cgaz(f-<2&>&}w*~XIdKKyodmVAjTb6qE40Y|^X@fAjNF@YgE zQbvOv=U@e}62JJHhoS3uDx31QQO+U}Ph&U-YlC<70*S6my-(|yjO$3SbbQvmJ7njY z(I2S3;wwdeb6o>vOQnSlSeI?r?E$8L%R3br!7K06U8PLzvPZ0uG8J7M@zD8N03oQf zWc>|mMLV?nZ)%n=a^O`rWl^If#<~uu0a8!h;xc<+)lb5ckn#67tQTs(;xQ zJ&v2qp3&*k>N=d++qW{sil9=TykHFoU-YX5jHh@t)qFCZ~5}u93hfTNl z-+B>Rc!TnBqwY1@R@?fGF9fyb7)#y~rki5lJU@qkDs_vz3BjhV$*%uRQ-*y_ zQ~oz~=6y|_xV85S$PP1WeffwjNFP4vH^5TCA-_1h zuK+DH>hY7ooN7x@7?;WXZjb)Pw}p1w7mG;aYpNa5S8cy`$T#Zl6ev-cdS)^| zF7*#Uh+jS-873B&B~z?QoDuq`RdeQD&=p?*uX4c$@df|rA*vvIi|cwrZ8KsaMkdXv z4DBYT;#TUNt2vo}B;RXO9>YL`eccg?0D48XkIH2JXN%N6$kwQ{(i;D~^z0m9I>}SJ z|K-6MnblX-?mzhi;J5y4r4M_kF^P4p>l+d^(!*5Vw2@w@KHBk=-hBI-)_vPWGSXp9 zD(Sb<&e8!5I-m03hJCT28ToZXGe*@9&G-$!H~4*L@z9LR_&v(+BYuTThGtyJuYup6 z`BgTMm){8P=|`3)JI64LLHttx2B+@SO>Y0k)YA}S=rAKM^=J(5yZ!pR5fkD=Gw$Pe z)6Zxh*HW$>{9Jkz+}?f7=Y3`?==XF_OM6!5bIlKT;}a{&b*9(|Tb#ad!-3VE1q~-V zlCiWFD5?G_ulpj=jwwXF6~%r0cF|R>ryO#mV+~zb0(= zz6M>JY!6$n5s$L_bLz0x3Q5&J{co!70Cb=7DHeUxyJu(j<0J-Z4{C~XHjOgsrt&wj z2n>5L)(b^BPKqsuud($e&9pA~xuPX^+?k>!wVxQZOvlnVW3^cteKU*NFQlxoS~;*O z%I*%R28>5}CO7(KA+OI!d!5yNJWplYyNBvrea*}!XRah_@t)=@VeMbNBK#pjSUy_r40%dwz9*slMSLm^SLU*IGeTIsCU&Gt{I(`8Ul<*%qRbu#zRbO`Qd zd(}vNFh_o|z*YzIPNctlH`K+ogH40`Eq3BYeJby?pByTua=2q0$^EY0F1q-H44jUf zAauvjj34ux#V^M1PJYYz{SUvF`0e92c?wxc6DU{R+Qh z`2L4nbGUw&pNo6tD#vAGb?aBp<7p&y_j@SIIE5U1d|kS~hMfdV7|1sr^#_sii@OHx z>Qi{AVzORcG)BDtD)wm-KkiQpiilX3=8AS8en%a4YAL*;pk^&=b}DirWr;2vRk$KW za8=2GJA|MI;$3+lRUB5lG&ol3LC=;T?wd4+HRO3C;M**`R1_w*P_yEtW>_DKn|B;B z7!`ZOY6RO1@e4c`PY?@dVZAZ`daN`SUV|fsi;If*taij8{8v5=oApJ~Tu%&}?XLfU z_)}8TU+fj_9wXQ=H(&f_2g{$3)8p6r&AxPw_M7e6?v;Lxvj*ni-|^QV`sHwKr@+`b zgd>exR>*)0kG*eZ%05JpJEml-b(b)(lUU>|60t&swIkfpQ>ffx6kD&v|LCx{86HMi zsU5rXVN|?=khjQ#RaE+9=+O^)aI3%ODd7_&w|c(xZN<$-F@yILRbGsO_c_C4%|bSbb-&)Cljb&_4TqxuZ2$7n+F& zWGm4AO2=ca;4d^rPC|)Q{FRQshG##|_$?J@rHdO>R)3{2@($S$93Q2;U{ZMU&&1a$ zo=cUxQe)&W$5|`wap?NIKAWTg+nPnyt-Zx=t zY%=5A!tW4s$`s3u7w%bD_>t0Cke?UiSAbo}54(3?!BFb)NRM~J2h+95l!_kqj;p(s zKG{(Zv-=}tQnnW*EDu!gLYp!92=@B6E(rK~7oHc}msvB^*IP44*djeSs1+x-#up20 z#h(*46Ok#a3$O=K7+=X3wrD=g*>i<>$MuF|%P7nR;VYy~Fh)7uCZ|_-ilzOWEz>`t*QF^==w3M59@V+hk@M9tTuuZtIhLG5Il+3 zp|uzsH1_v28?+SCz|AWrb3@`49cT=q6^=}#Lu?e@M~O?>DLfRfc!-U=Q66zshDgMN zc^r(-_|!)0l2IiX%WlHBp*e_i9DKklVPnYZ=1$`z#h-L=kAw1kM6#^d&qmQ5^xTja z@V$n>ZNGBZBEvpn?15x-!CFv9MN@-$t8f6VZD?CqAv@rEe*TdGU;DyQ z;)>ni6U|uBW2K7!-Jbli=AhL@4OWNM*^`ZiTr;Zv!Wjn_ z#^v^sJVwP>`hD)<`z*bHD`h&xB1N*F%X7rKx-oc8)hHv;$tss_a0sVI;yF76P>Ih> zfwRJ7{`T2;qnx@SuqxAJ^r9)`7cJ|A|D`Hs#g?^ZxQJ{k`_)XA8dGJ88`!7rlelFba;AH0dMpU#{K#E@fD8H}!kkrd-{T*)f zhp>=m`Soo|bb|CG)W;dt7RhS#juGW+N(mMR#0CP?4szEuz*euii?n7jq!WROGX_@#rw6V-8u1IgO5yE+`;!v~tJ1V`KK?ZVJnfr-tzw^4Sl%aQ_S#Q8 zAOLfnL|$bzDxUtYCbdOP6(uXWfXzrMLp}UB##%;8J8;&{`^JR~-2(-)hj$TSFzE29jGs zI5Y*Tm4{S4lBvfNLE7#O`cOy{Li<(f*&6a}#+Nzt_Nz+87VU_Z8Wb^G=KBJ^zbrf} zAb1JQ8X8nKM;RTUrF9f#Uqp-<2+Q1;!f3%sv}YgjKdC)+iyNA@_X zJC0_LiIEUU(9tSM5;5x1+yLHdIVCw8qBjo1qH1gOmmQsS8SL& zIVZ6YcWQ+n(wt)gv$my#(_3X@SE;@Krce`IC^JaU3zET*SGW>_kUkqdk&~Y ztj$`zC&1wdxGa^G$ILh@k9E+hR$jKVSh;9$~unCWB*Uk%7$PKmXn3DEj z2`e*@+#K+|q}E-)dW~A#b+^&Vm7`h(xk)6dB-}7S@3K;Q4yXc(M+UmEi#GllH@8RrTxGf9q z|7bz^|4T2Z0c!|Ni9R2OVD54P04lOqFs?)bWcl(r|CD6@pljI$`l%!;eCy(SjidXN z>+CCksTUbFY;{O~bH;Kx3d;$=MtzMnI5`)`SYDHD+_!#?(Uv`oj|nxe(7Syt{%6xD z@;>~oXlTc~5cW91e#&Op+u-AJ7iU7Lt5_uWi6=D@zb!Xny%%oz07dwh?PrlgfYX&5 zK2bvxN9i&JxVZ$41wETXC9Q#%FcrT;KtNM~^Sy!i19GlHB`p4cO1FL?&tj(S+Z0Yb zCVK!7f^W;s__zchfsxLTwOFpAqyuZ&cX)iP%BkoAdE< z`PRa0-HF_0V3F8B|4wM2!>`uBQun$9GK0X4k1uE&e+1Fsw5vvBnb-PV zcL#kTcMm;3oNx~{Kj+`uH$SLx$oz!y$(S}j$DF0+C(Q$C|M^+3osGKFgK79i6e*qA z(UarOjX!Crh^XlMvGKZ7!$V}7I3Tsn#A2B@<*~F!Pm8bEsOU+bjRjd&PXN0GW@8+U zWqMB3v!SLzhDl9BM{;YwX;|205$Jr2z4e{{km|Z)hU526ky~21ssE_^-L;2O@G&^X zthpZMItm*BZ#Cf^1Kzekp{!WW^e#&glsQE$RJBB&xksizt?@&x(CI?{s1^E>TBIzd zDQcy*&KcU44Vzyt(zeIkMOweT38kM5BitJ6&8a!;D~-ua<1NUt_KS~NKk!k%eco&V zo=O!C$+T_;Bix*8&e(4HUYippKx(d?F#}DE+X~U3Fs(~+9X|x3Yg5u1_BGEb3RoS> zOyAa7*|ZECLY7!FxMjST7v#b7{j4rQ2bHo_DTSyjMLA6IptPZ8p=gMe+Djf$%3-@N zf?!k(s?~S78egvdCe}gw{e;|$?VnIHN3i8nV%De;<-BD6-&j3pbjpE8bc32%y2<4_JqEg9#;ne&EfeF}=mzm~_ z{i=hCblwd6QTgk#_a>w?`rXKKAbR;WRuRc;BL+TXM!URZ_Ti zl$1+#Zn?FRtExez$E`+rt$;2j0fy~kjwX1JeT4kg|I~g(T*n!jZ3dhWe2NZ0LD|Cf zbG!K~D#Df{96LZalsUZ9%}Mc#cS<)~=w?85bM}1c;&vKJS|VrvCDPDZ)g_<0NCWTG zUA$9wu{J90lrH*p7gfoq8k}0)MRlQzce-7?Q+M&sMpYCwf7^eQbnaaFoBBupibbU( zUfb+`;!Nigl{9X;Zd|9l^VQ0w#w*sO>Xyq6RcJ%;di(8fs%rb{lf$rT@_A|Xy{PN! zl6SsZxl|8{uI=UORxD{$Y|AAk75_|E?9PX3>u%m^$aL3BxY$^Yk-yLpa8;kX3a;ww z<_wUE%T-1HP;*uLL$#oUT!H68j%=z5hhL{yy2<=%YGR8Tp{$Cl2cIWPsRm_-$$o47 zKp_ljBZK=`wDylC$Z1|gDtyxAdG_sJN=UB0j1EuY)Y%t(p2X=7y|z9sd8zssen-WJi>-L=XyD5M?Mgt|?K*k`?*|qKrVFD1$I~ zN=~oC;5niU34`Z|G6IM)!hRj)TqMhvZd5L53i*qkHE>V}9H=CJGXL}+%0Ob_B?Q09 z481-ee`#9&B*w&L+uP}?S`hSpCJ$&iF#z05Q#YK!D`gE`V^3SuZ(x@5S`SP#DqS0i zLo$CO%|#|-!Zr-h3x6}ur95}??B}_QX9m{X%`=EEu1XT-Ze*AgDlol*u|-y-sws7< z@jKN_ajL0w8!gqjZPrza{rMppe_w6C#-pA3f7^H!mrq9EUp8Hu>^7C5m|&)c0hk&Y zh>}F0#qcu23rvj+G~&8l!PLm`O2eyRYGk09R0U8Y!=0oysR;;lk=mhd8BVVX_^n2D ziLDd8BnqHYZ6R6Z0H+3)xk-a~QE`x&&@=k)^~Y<@)O~Vh)By=;M#D1oMaCxDm}*AD z%2YF=&D`qG11o02DJo4`Dp>9-L*$R@Au^ZgAp+>!Ap-8)Ap-Q=Ap-i`Ap!v1Ap!>7 zA<7_850Q9Ab%uzUbcaY}mj2Wb>9LWCQj@)mXWq+1iA+;wNo1NbMS=xne$?P}F#u83 z>wcr609}uaci^a`CdazBl1X8osXjG!i#Mz0Br~x{Kv>xas`IeWw?g$6+AXDmJvZ6s zJjwQvxL=u7bL2pFIlB;_{BYdf*8JUc`Jdx!TPt&a99=sPte7FvMpvv>nauCR61i=$ zdegpG)P4D%(Z2W;=T5L^seL%J?eOC;3YafA>N!p=3Ua_JNg zr`;&}ewc+w33lP<)Qk9Hb#Y7FoL=IK8*~Z3l<-qRZEW$y>f)A|kX~YwQ^Hh!cqw7Z z$K#9D#VwJaUShdZLV50;LJ2{Wlu#G9#M?z_{it$EOp|X+qr@~_LR~2F4e9I?=_Ssj z#Ii_Y_+Q`cO+KB=cm|pV@z*sn8h)+3jz3qa2I{$MxbP&GXBEeZI?uMA-)*!^$YoQj zVL!%;5HtkD#_^XSuTrn_ANKvJ|Xg-}{yQ^foLo(0yCM#bg^#a`@ zsTb&;#P%l8?MUhcz#U1wz_=3Aqcx{+kCN8Q7S|A^Co0mbBe$1Gug-2KTb#4ovBkN& zo$Pdu7M5Bn$^0v($(AX z)YDRehEe(?h{H*khit6QgP=l+(`jE1@y;-PMKjt6?}+mYYyKiq=6AfTB*j zP(h{ks^er}?vOGp9J$E24SlMqfPSp`DM+MUsbmT+Gpz^asSK}UcO##RJzXqIa?Z^d z&tO#Zr1%=ZDQERhs+O6tTX8Nh+_Ze6vKL8YPwpAU`C9Ja=N~|==QV!qC7lwysf3WH zXwh!#9-Wi zr0+bDviD4BJVX*}3#3`79SZ|EF?EOdvHZ4>(H4#|6FHvs9;`av5idtUt45sNuf)A6 zqRf?!%vdDXc!|XSU^BymFconpZV=*Ezrb$`;$vB#fZx@P@#zoY*Zxuax|4(p3J6kC z9V8SLk<32@510W0MIPPHVB~@0#9$=YP7TNs#Z^C`s2l)QBp#N8Y2uE$W`w{pVFpv7&q*)FFX|? zJkfD#20N_4xGF}`3d24Vf9&E#+NwD`+2(m8XdMWaByr=6;?y0l&_`<*bqo$#J@Exb zxaC6xnmvUf>%C<(hZm2;7d(zV#FOy_f40v>${9{B7V@`jyKr&iqXD7B)JH^R;=lh! zcxNZ5Kp?qzH<@UKI|OtX)r7&Knh>9UINefRD#5Yoi-B|Qkj(Q32jb7kO*r;JX3ZkJ zFTIAuN#YS{;OyWP=YO=+(em9DDA^m>yGz=Q$7pG(HFdE4gEJ2#ow4@Sd|G|16t+Si z(TsQvpXaPRl~pd9D5k1r>bkJ?g6CEHlPxqI4Iai$#6#6dR0bx^sy|zHf68%sl;c+{2MXwkgK!< zGXd2;m_LN?W#MPU_75`D($surxKrZTOR>O*HwHdO9}j}dy1TY`o#?D@OTHz@!M}!4 z6D9-pk%!YZYe&zSq&>|pn$h!ZeJPX#*}-~_)E7e%1l{!Hs*C>&phUU*5=Of^TicUv zlhq{~qqZxIFE6HqzoAJsIA?!zZ4JL}W4mRd0E6@ZF1C`AU5P&RP0tNQ+iz&bUHpE< z?;(DF@140(n2V%Xrv|h$zpjNE_<+BQxP0c^)$fOmBv}dZM+*DE^J`qR0ZU&wT!-;XZ z;rO^_uDIVG*Hn?1SXhx5=M5(&?y&zJ5D7-YFEI5?TDuvvS;+sHlyR4$(y&F#kXGs(~5E z!I$w)8XDSzD}xrMj5j6FwDS60r2x4C-`kijCw2N+w5 zh|f7ekKM!PhZ-?#wK<&T|NT7XB9Zre*9XHhE|`%$vcB24;|yM1Fe5TD8?SJ2ad_2( z5`5i8X=ucnRBoOpJuzaZ@g`(NYJswSK-m{025L77?4w87V)uixpi1h4GGN36%34xj z7%6LR5T~WJsMggtgqB1S=e@Dk1If6cA@H1#p2TQd)*f136cs#re<>;Lp+;K71syNA zlcFnz$``*?{GN%2Hmp6J*Hucja}(8WX%BT4MWw*Yy7&%V{I616)2^W|&EKoQh_3?7 zn?SCjuuc99lI+|Y*zf%Z7BvO+~F+O=}t4EAaSYl8!q(RX888YaP zkY6-TVjY>L@oclXe5|qh8Sj)+;b!~p-oK1%!1y`Jm*`ibMS-8C3e!IAZ+kIFZ8r&2 znh2Q@gi3OB1pULiBDUKUfSWmC}?dD;>66_84y32X9dJl)co*_4tFWbaf21y zAZNnx3}`;R!a0wqWWnP-r-f&9x)`7Qvf`JihRpyY=#9Wl7G&>YxcmU33E2d=X0iiZ z-aS0;V6vZ#l2z?>Jn_vmt(@uNe?!rkyAhPcE4m~+EfBdQwtu9tJ`pORY)cNXDp(a?@{hdmMR>h~TN@Ne^q+^9%C<$!idk>I+e8L8r^CllE+ zwBc*25w{Eych0k2M2Wu6nj3MA1Nw{-!Y4~d9;o?leCo=0#j2hW?V**JSAlL;jp73J zxRSNH!-grC8sR0=+z|%<$Xlglyn}hO?THOacE0LNCRWPbTCM%649HfbvnAYY7prWe zGYZ;888`(=V)YX0vRsG0$XRZr)y=s#Q ztz;H0Z|wb{1T8nM%W~U8T_|`k4DaZR4O=rqgd-zEI0LrUakk84wfIg3Wg6ZJ znysjvEe?Heta(5VvBt?zM6xaT4KD#g$ZJ1#!-1r@V;)N=u3S+RP<0e4Pvxo-SbF+| zplYxu5hxSSrYK=d<&(LPLMgB7Q?6(8Be-n zWF_jaVs?+0#uOr^*I!p&ld0TP5qur{E$~fu*`Bi3nMAGaF-2yG0mzchl?Ix`GE)x) zyVMlBC3!HpECQtsLv}vCs7rQ*TMlHSL6B)II-1#zSVZQUkP8h(!Y~rbHkNI#fD)-& z3*JKe%2*7$R3iQ5eIM1Z&Z=1N5PUT7E|C{cgzZ2gPeMy1Tgf_ir|4JI@1g~KAj?>U zdk#Kush2Ml$QNdv?bKfVZB=uUYWmeY_Zxk+I(6={KJFPx;S8hxIUSao7vsHXBk9Tf7seuRewSMr z1m&<__wyx^kj8iwu%T&NfVA8*YkZX{{w!nV$Jh9vQw# znY~gd!-HiTBfe(i=PiKOLC$cp+RKe%fY%XrD*3|O)5WQMb(OTvNoVuWfFBiDOy(EU z#-31YUtW#hOeBOg<(}M1?Yn2nq7h{Q7U$eKEQ59xVppMg=dj+XYI!Rl?&x8)O%|=^ zO0(oOGxioJrJbEK!<&&Y>vVj4o@81Jd*SbyF`HGZlWes^m!vIxXVKVQnAk3J5uI~&>1);z@cwZM)=LG}S;z|3r}V-46fJ#uhZ zTQtfwyG^;0=dip?lKl-Uk&rA&q2EO|*!1-fv0D!!+u z;&P^9eQGLBKIBv!^*!pco>#^iIQQ;81v*+_;g1)8b!O7$#GQ|?+IzuQb7m*h4Ce1t za}!$WPL0}jo9!_N)WnEAQ4I`X)cf~m3lCI0{4P2i^a79DjNO)tON|_!a4#L}4&Z5w z!Hjj|uzZSpbEMqtBikF?1RJhi6+~eK&6HU+A?vRZ>k=@{q=E|H*4a7OPxbh)pbA?r z%NZYhv&mR|n8sw;U^2hLWOH|<@C}x6;IK_|cLgCjt@glka>S@_4jBZ_ir1_RHY`|0 z)U7{rDETvn=W7~5Ys40-W@R{ikxkuUmtHU3S+FC;MYSGOK+)1uh_PUUmxiNHvqU&? zq93!>PU0B^B05;!C_sQS>ylkJztPQ3O*L%j@!H$v7A|5PNO>FgP4H3i-@b=9biKib zwNakB#XH-;mOOl&R52Y?w%PvKtkh!1d9rng-1E|MU(uHvWKub>;)+z(yEMFmou!Ea z@0;z@>5tnP7v6&njSXbsA^_hOm7Zog$S0Fyg3Bq?5Pvn!oq_m0)fcekb_%>G+|%X) ze2E75IRJcH3gEvX;mQls@y=;Jg?F~tFJ7pjTWWU*j&y4ud-|XOC@2N*S~I&+hh9#h zA-|v@#9W8ma{czCUIp!5o_*yc~dr_75}a6}JJ&HoE`A zKmUVZpq8p-#+uBkz&Y8uMm?@ z#N&k~=6{P5|0=x`Hkt&S>`6wO8K+ZZ0=H zc*yZgEYJhgn$1qA>?;W?eW7T-wztT#B-$&%2(Y{)I^3u!73fyM#Gw&6vxo56ME++1 z564g;2jO*;D>u?S2#e2BLWyL47ON{jg|d z>gfH2r$2xx_!6zWO3?>&DY<_ER(n5JlpM?(1y+%g&Eb}PP;&b+XI+s7QZh&AnMQ(UCdZA|F7r#-;Gk^ z+0H1PKS_^Llutn2iihI?>upL%glWA(?}OgeJjT{m3gqKQl1ranhBujCP8%bL8DYIB z5^47tl3_Q5CTYjYzUfYGYv)L}1 z2qY|(51ZD5*T^NlL|vp(wLe&Gh#SY{LSfW=8%=2`C)x)O0TH~IoyVFN2SWOV=pOux zH_J42LM6wW32O!4u_wMFoWO~N_HSPMJo(gak_40H6Dh*9mgoj76%S^xiHf zm!S1fQ8W25pq0TdCYT5|-AdJjS;0z0kxh|velP4Lqy9d!t`&*0gw&Pq*nf^lyL%O2 zmu(L;d3lNMU^XgE@}ApK{EGO&u$vI%cX52ekIndmdG=fXlc~?AOIA=Q62I{pelsKS z$DKn$A)5wZ|=)~gBljHFE>FwA# zG$UZ$9(|N+*wc}GvwiHvQ^?&ub_(sx=$X*>TqMtDJI_5QaZ{I;@z}m+m2pJhb1B(y z_#6Uiyfisan*1&Q$0@p9Ep8S zO@+VVr{4ClqXzS)eQbfSNqT-n$k8FSx2VR{sQR2tvIyKvW1KYM2)k*;L}(0BU%(=4 z#s#BbA8m+_Er2QQT{tY(TextLv<(cuQS!wv$X5BjAC!E-_zxoSv3ZgBPmiwKSUtepq=901!oXJ|j3^x?N^?uX(iOK5@S)j3- zr&S7nQAyljy>p}iEd_-C5aMA1>qb}t%9Z^m5POP`U0y{sjmRB}!ikF-mH21%<9l&V zlzkM8XW8?91NwsAs1(exxkDJx)kRap-^7oM)uJH6xyIG!2^!B~0qc?vt+UxJP{xRh z_azjw_Ya-}!Wsqvn-}2E{6JRENGzG#9i5rY_Low7vdvgM$XIRHHP^sf6K)Y~>wo6u zUSy$VCpm2EVtZN(esdGrwG!L+p~BLqkQ7)6-g(ZSe`3t+l_UnaSDfI*4r#7|Px9O!+SS&AR|v5L zNNp`)1Vp^J71tF>QEThlT2AK;O%GdRio(__63bf_P6TA$#g%FG&xYf$PAT=&9BoxB zG~@Rc&}OE1LU!APL4pmB4*0fKe=`yvuNFq_1{o&3We|x5SKx3EJ2zL@6>gM zcluUiLuCYhA?fL`~dc~{kvsHXGB!FvysV=&P{bp>ffKrB0_j4|4EfWges*Um_;bO9P zC;fo!6N>PcL9u?RUd5*)J970+p=&D9RgVz!PI>}RP;4AHtmZwS=A-z6y>z#ouQ0Yb zkNsMj8d{2!?q0qJy1>miMRHXUOPr6_JV-Io2ce5RR-o0Ou1lx}Ns?3pm`j?FFMmHB zhEP0oemL=q-O_3zE!*8g1b8dX7j8)b6FL|}G~*uRr6Rj1Oy)m983-F{Cc|hlEFF+x znN;dDS^>B;vD|@jA)roifteP}AhUj@)HnN)P!rUbDE!^D!s$!oL^7x)5=$yAu)Zad z#~V>J>Y~ReU4V8(6#O;xFcsgYvs+eEKa8cmKgG2Eg4`#5Q8}6fSs2R4VO=G1c0VYy?TKFm0E*VL z5WQpq^<7lRuh@)VE5?eaT|65nKU37nHE5h%SJbR;ZZBG?Z|*K?)Hjfv`sSgcsJ?l$Xr{hd zRaB{Oo++BbjcMf^{X<8NQEz@r)({OYRMK-ou?YdWl2o8DYOnt-O=%A`FkmF)5OYWX zbvfYH$TiBVWo?In^b$F{?qiq|`PCT;@HoV zp9T%@c}G3LcFWDTZd>W${Cnwkc{uf19^~e`FFQBK9L|ktU0EKkJ$Pm|LlQflzX9v$ zV`CV+*Ifst&bKG=ZNi$HMsOQ=nY#*NzDGQ*swYR%9&0Pyt!lqwXi1sc%`}IARqMwZ z&#fw+dBcbdYdx_ojMW|EjprUZSrS@$5bdw$@lo|&g^EfQt1@orH7-wn8ZfR*IwhN{ zg2oLnu`X}^G-O=au8Wgzw{iI%$+!1YVPWC$EPkr{P6|rFh$?#uYD7|1=y66mQ^R{inus{Cu3=I%kMcyy;Wp z%8s6FUSYUgjLh-0?YCrAPYN2RZM`MG=5W$LVpp`0t5xdQ&{GyNPV2a3RLx;VF{xLy zyKkQFIg&zKZaKVWq*2^X-YY1m3z6@#oH4L;r3Su!%I>B$mBUu={fFn(mi@U_Yl}VF z)A^HDInI^rChswMPTtq);VEyEGJL3$vRP7K{t|}wOaPHfxxS3i7eAM<&~>g(0pKuS*&Z~)1irQPAft;G5^$Y4H1r-A`agW?7PH%i?V z9L6VatS#GJjEk;PPOU))P>7|BOoQB%JFJZ@_G3qD;bkL(8fBNN9+b=h~f3A)4=Sf0GB)L8< zv1Da)5qF`}aTqE(7$*>I5MhU3pAY8Yv|&GZ4i{RQ8+|fX?+J5%BCnw-nFOIE@+Py( zPVNZAl1IgnRo>GK`koiS(S;d*w5XAh=8dl=h#m$arxwUT0?K3}^nhR3n=|`cnDWze zxPljYt4G914CH2)eW-l#DrYM8Ma58SA1Y70)O33FQ27$!1ZCH!PMRZJRnFj;!8tV* zRu54%6=e5}P?gAs?V(fz?;Q>BoHY*!ZoCAGo93`MZQGv(Ln;jF!=Gd>Yzy;6e5!?O zztD`u0i^jnX<2hU7<`AUqbd>?=5hE98s^_f_DC|$ zC=LtN;{?Tgg{15`*%gTZ2T{IZ#_ul@(v@$?Rvik$`C|{CXmYOWGVFLf)`&-<+Ad}fLgX(BE-2db1KEV9MywHF_WPH3GM@f1AMg=cFVmQS zZ=BWH5GhJ`EPz>T(Dzc%nD7!>R(Xd7hqb_Y)Ty+8!t11tIy^Y6l@BKKZ&n}B%@r&E zh{brDzVQX(`Cxp3ij@$*ft8t>@1xSdz}di^^HI)nAr0K~5h_oz*U_DSc|`I%$}IlG z*k*Ko;2D3p>jWKhB$)C*4_%!;6s^9pZbn3sHyL9Ct%FaCPm z8pP}zC_o0@s2?k{YvtOv4w1SjaE3rI;UFdPVaWBbB;KUH+Ak;MW-3O4lBB+x3gb=$EyA^)M#+@ObuIK8TYB*mvN^BiTvZKXKh9ysJ^~I zv=#WmXx+)LuT!0diGOk$YT5nfJu!Ae;)2d#;({(9&oig23jUEX=aj10zTCN&1PCK& ztl1uG?L^f_p~b|g3y`Ew?!@{hJ_Rv~h9{Hea^NG>>@07%-S4GQ8DelPZ#4*qGXdO0 z0DW42f%`@;puY2*wkHssPT>;lmY!KT3og!a^t&d2t%V?MloyGLN9;3xsR&Y;3K@eZ z3%Q7De*(qvc<2%4YK@;)pvC4p)dW1DE|e@dPfDbzR!?1Ge>R0QYx)x08q1(sy)7RN zioG{@MP~I8)G$le@Lz5XuZ|HO$vysZVFCPamZV zdDVsYNAbQXgORDKR~=B9{-G;*+pS~|q=fEYu6p05-#_iXe>L^qtKL7L-{0lFU(Nei zGD|hEV?fzo@lNSFr#Ij}o#_s@f*LY0laDZyOYlzJ#YjJOEPt5!CARD@B)%hwY<@aX zkPnHrBqE2Jm}!95zamC~)&D9&t#N2agdF zrDB56C!4?H&?i(4eZpRwX5IP%c0)n*lOyxdBy8q-7isK4cy~+H|qooZKHEqbb`Xz^4OjN8C zde46QWcq|=hp3#wF+xy2E)svJs8L3M4Y-nE&9nJ>Vpc9X#?^N~z&#_VYye54koVRKvlyY92(p5 zF8k_5xh-8@dOK`)1c$)as9|~bTpBeDM|EosS_i8FEg$8!eB>Q46wSx#p-?pLA%C-74vFN(63L9yuh)XKdW>M?oX?~Z+kBYyoBW@s}6W@dJnpKX%o7Of0 z&7TyJwNUX{u%zMx+)UyTPb|g=f+?+pccTu?ZxlSYB!2hlhSt^EQI3~ok z)yBk4nFJkBdJh0)rw)P0-OHN2)+MTp9dN-U+`w+erlzZ1ha0HyG;8koZ*CU^h34WD zPwY(waWSZA3St7xT+Zq1KMg)0;Stk<4}2h%I>#ldJVsSn(F_Ri5 z2fV1PDgOG9_UsF1G1@)f2&|QBUyb&EQ{6HUPZ!#iy-K@M_GGFH2|KWZrgOj5e%DY) zf==cAp&u_@m|C}D(W1^C!r2h8!(Ev(uPVo(=CwIBuN(BdN_hI=!tauKwPjv+U`7Ce zn30;-<1IC>JM5hZL!5ctAq&G^{tGp)EUEt>sDzr=#+0CA)XKDZU1^6@l`Oh{HLtUs zd6lcotFF<0o;J$7?qFVbt9jkQd{P}6v#8%PuiS67M~u+(y2Czmh0N>yMN#VM<5%{Z zT1CJp{+)95Gmsx`S~=?`y41_rqEWw(K)*HjiB|m>j$G9zfTO--snLK(^a_KY4x4!D zBlhCqpif0Ai|)MDAWHS7wM3L1mqQ%JcuNL1n857urMjr9%p0gw_8evx!Z|ROqvcHTIml6@;w$ zk04}=%;&u+2>FCMnbBRsVJOzLg6K=DudLxKgYD99>#ckplm42TbYvmy`WPB=RU<8v zeq^4W^mX?64-2EV;%wRH-#%II^IaMMSl6@Fx*ou=cGmZG%0KBPeT3_Vuru{_MN$LA zEAwvDK|B!3ljGT`vL~jmdHAmOjpHS!C?cGrkITq-f}l-ia=q#M+x$OE&-~JwLb)@Yc~ku6E%x1rcv+y*UP6=jh431l z%s+7yN^m(gTI)3Ac0H2J%>>S#vN+|CpckjeFh!V9`P{eLi0sC%A^s94 zD1V6)ywFtXNsf2%mp?^pEb9lg5F1a86S_sBmHtXN zN=Um3f_Skc^gw|aMF$D17$|u?i2*MW4c0({XZZKDWmVsbR@%>GEV-)4CAhH%_dI0X zD;roqg+V9uix4bn8(YA7)pxJ6sl6I3AvkwMf%bikQ+_uR`T!z_05d*C(y$S3DcMOo z_~g(3%BMM;u5G5xRVTEp^-EKl#`3qy*HQ&V-oe|d7JFD#tSh(5qn^}VbLt^Gb(hH< zrgOJfwY)#PD)v@pRcw>LDn7LVwPbaW-S&^F*t?`|@k9MC62135JF1S_SQUGdstMlF z7HCq0D(p2r9pn?cB~b}U!oF>i)g}aa zwwJ1V_4ZOK+fyNvzRV>j*`aMK|- zmE1(-1~dsfhMOteKo$sYN`&g<227S>VTW#)6qYmH`T^1))BQ^?>ovged?U7_-U9DbLh_nbm|^Mh*7@ynib;|UtC$@1$XI{FDo6)H=) z7W7AE3<)?p;yz=|C&KN75&aU2aodV^qwY;cIWb;yxR13aWGANVPmEgTuyQU@tep6z z+1B zx!AfiTZyHWhn-~p5hL;Rp~`XwkZgl$P9zIZz7T{mxmf320kM$G@A0~MD|Oz%XiDh3 zqx$lSl~gi+T`KRH;_pEN)vq^aPH7zhS~R*$j{BzEhXUG3u9ahmlS(5mEyu z;FJ_*^WGgI;+SU6=m=V$n^sF8_HLG0wn+8~3wcA=m$AR=aKtJpXiX?Yc5hwEUNX5j zWK3$U-ySd~IX)e6xA4#i;SHc!`_$W6iqkoJusXc<3<>Og!#t{4NEUYDX(;y#X0HFx zsQZNLA;+x?7L8CNh3JsztxZLXZmSgpp4t5rFX0``1$8?=#?6 z8Flw?pBOLPRbwuCs*}AbR%x#6=@W8$=7=I3T>ebR2w0?~d3&zXi+0l7%cMxeY73Kr zWvl*WL_B1TweF3a&@9rI+34y|E{@JWw}5T)!o4gi=o#%4v}Z6k%Ylw$zBOFdmwZva zkmSX6MU}CGc!@x?r{x+rdA7gU@MS-P!4)@Fv z1%zfb6!7O4NaA_f6V>3L$9og{FDN)&({FHUfq7?eSQzPJ!_i3%kAeTEuN$9>8fARO zr^e?Sx{G5-#&)bKc78^+SLk`lLy+t7g)6317_|a-v2Bp~-y2Lc3QgzA4nQu$9QatEIN?eXloRgLF1E`8&R?fACel+Ki&JEji)9G9R1hIh&2{QZ~pIK{6(yx3|-i2MJHB-!4jUvi=TC5fNXQ- zYmfmGI-Bk)3jh4tO$+ySC(az%axlwk6E%(9VNORP-MtX?;h{~kRO?|2np-iL3>!!+ zOPV8WOXcB2cCDh0n%J#4l|UyUJ)h8ldFoGk54#1f5$1b?4R6jdv`1_+*H;;{cjU@l zvza(0#`&=NDq&YgDbC?3@XV1?&uD*!OkF9W-$5){0q$6G@_ zm3Bh<+d1wD$*i!tE37VMHc?R+v7QxTIb!`;2xWyY{Gf%iKnsGAFrbvNl(Czp)R0BQ z|07XDz8W>;{{o+mQaOcO1xL<=TFl@&uPCan^GoG96qPeWd#Fx!h4}Oa!R|t&3Vj;7 zpW~G42xLV(^ar>&k6~o4DhRF8<<*&^J@kx}2TNns8ftbNfpl^X=yIaWwufw8>;qkF zxBl#B`m?=!>eGC#iz4F zwDwconcVvssynNc>W=BV`(v%Rv)&@GMw@mCQfeRdOBUdaHtiCm6nz~a0jjzb>bux} zEVehP;s=1)GBo>nb2USbm79GA-xX4$RKy|0B5^3Ju+Se5K?+Xg7b%)&Zy70Mxz$bq zQwZ`m*y+WFW4X{RwHt&kPgg486#W%^=4X{D^}w}aOQLM&|5Em$ee_XM({5qfWJqO* zNs$(_BUi=82M|Wq^UyGU98c35#{aM?zPKI$X;{33zwyPtrKX0(kMP%w-%!Ypqx}t> zCyq7ax3QhyP|!ZUTvX)S$B!AsrER<)e)kPQei42bH;li6OC`T+_|4=uFaB6mKDfA! zzqWlVZKDaa%Wa?ivPyfPf$xh4jLkHgX}6!1WOk=Q8LvWt&0^_}QOriJoFJ06xPgYM zPR1WwrrP}2axPWz#~Qi#8y@?OP89PSGyd30eG&JgJT>b}hrV>`%MN|%(ic%CurK}# z?vMUyL+s;T7Z;2~cMpqv;5s4+$IoJe_xEEHnbt_CF7$LdtA~m+`Q*3$l$q7H{ISnL z!=n)Cm8hehWG1xH&^LZ9dQcb*UH{uYqao_XFVpJFLGL>bk8Ur>1IKQAT(!#&3(gah|HLOqZ3pa)-n|t$m;a9ip4XHcOmkF zUQFh<9F##;t_k2tD$gFGH*E^c^ozwMOhacqBywJwEqHaO8PD9)nu%jmWt#CuzEmZe z>-U(EkY0 z?GJ8s>Xw=z$^XCCe3hyhReq=DpQqOhZt1i6q0L`s^Ao=glb?ZY&ZTBq*74DD8ZDKL zDKt{g*-VDopKe*G>*#@@1B2DTdwCOM4ioe+NTaLT3 z_WyJP%rFXu6O*zdf+SwGA0!F$C_x9`L0+%WLF}zB{wYeJe;xLCmfi)4Jj6df5wRmI zXcH~~r^AWVpEpKS{{T(hDH2g;!dz{-n5rETM@+)&+Y5H-dU4^kpWdd}=`Yl6XDt*m z8r|Cu%VZycl>7adoW2c3@B^R*6JjHhhtgU2*XliI?iePkz=S)8QMG2zg|V)dr5#7T z6!M&0N6EcDs28v2`Sj}Hl3G1jDfShEs=@^z!OS^D;TdRSfpgJb0|b&AO*GZmB^WMH zam$=9EMhEZuiAKI&3lZ9eLxx`lLD~r10W$8bL*>^b30YilzRayY=rpRQ(%X zP1PU&N_*Yier+(%)wu z$@1#;s*52Zm4t;eHW#T;e5|M-aMBMHMB*0~Ge3nD@eAb#njg#l#U^_}A%>Xg{g?Ht zsD(sMtyIbamZ|9OU$2n*)DRzA$d8&YD>O?kL)>xzg)UFdb}b7|YE9%lsQs+OCy_-A zHw$HP*rzrR*Z@^LE?T9?4p2C)VsnF6_o| zQZ{ve^H>mjoAH?B4J$2^nB;d@ zUTPQ$agke$GUP3|?e>_~Vag;ZY%v+GVtkkx`CW$`JYs*8(wVs3ZC6#QNdv*BSq;&j zbFOvPlZG?}3s4UNQ6P~%J2#L3AgB^~X$1-eT&&i##Ri;H-#+%^R3cD{&gWsK6^H7{ z=zbO!XxeJf`TKvrf0fjCb6I+Q;5+o1jWt6P0nG(HCu~h&2c%`9FlwMMS|I&YYN3L~ zT7(Qof|2P5BaRi7(L(2)_JK8NF3RHDQXtN@V-}h(y%qLMDF|35`q72THNT&ViF$lSv0gYlbpiPMktdjrQm#q3p`#1j78Q&ECOT#A$|4Ds> z39&S2P*-7CC#IRQy|RFnCwkU##F}|ch1FJb4B@3-el08Gxk{CpgRRh##dh-vsEeSu zaq8YzdHKWsFUtz~fVJ81!X|=1l8=`vCtCLJi)3Se5;&&?e{5ElCxK5^!{l7qHfe%1 z2+6XvE{rcbfg<3%Fr28ppVt-Q-InLlME3XLg5Y?Ck4nt@ zLRops)snDpY5g*(CS1v>nU5hq88{y)Xid?html3Qw3@q|1KJDs3_PHni}izG2)(G> z^Bn;>z!#@0^OgZ%?CVJ~(;Rd$}s&+~!dR@+Q^m%7lGJZ}~{N`+b!_9-t zIMD{**?^M;`WOv|W`R~K=a>_nu?xYI%7ld%u@>AY9%4aSHFG1@TlkY&2!Cje%q8Fc zSa_R&H5yY(@O-!=U=50g?v=+!kO!|bPOjKWvQk^dN8rh%ctiIJo3FUSYUk}oaLJaE zzDv%XrK4as*?*b0FUiikMh+kVRo?*w7bRak+LUc~4mTGc;&8L8L0D@c_?&b=x0t58 zO~&V9In4%R{M#+pt*!V-wV07C?rZi5Y0bXof7k3{r`a@c=KrJa-Q%MyuE+n)jVvVi z1WXhaBxuxVyhQO5T&=rgLw02s@Pfp8A)kT~t5$?vF6Fji6O--33bnRkYb$MSz0?+J zt$=t*07(Fmi>=}nw0`QYLB%Tpl>NQW%-)i$+Rx|v{r>Ue#gpgRdCoI)X6DS9GiS~@ zqp()^C(!no9bk>e&i@UpZ=ZdT7TSn@FD6uTBuA*`0bJ?g+TU&NX0|Dk*@o4H6eBBMR7BjLDXUWVhtx?)5;Z{d zi>^>p*$PFRO=b1*-^=5_r^SEEirHn`BEcj2Qi!Gxl&%oSF|2-@$^w8V7r^W85^D*T zsY~?S!WuYoo2)M%P``x=(VKr46~^LJIG;j>^eyBX3L*N2tg}5zDl3&%iDw1+7-Maj`M=2ScPcxLk+eeL|1zKN$I-$@?f(W^1-;N>W%s|T z-i9XKT9yBI(DJG5Qms||zeMZII9iIN!{d%|nfI2tKv5PTu+2Hr!HWF(FtYC)>Nk+&~dALDUwm$QD%AE(yJ~M2|~``LSrDg}{|kx^d$wvgkF;lXGmJ`69S) z0`tTd!;^EYOR$hE@Q3})p3l)@KBNK^jr@?o4o=J*Xt6%=i02sE8bY*UjFZ~tAnN&( z<>H%6*dB9}*S6$~xO=j*qRs4%ToRz0rfiZauTtWBDhVm4O=*=pOnoo@P`VLLjA6>S z|9Avt(u6&+SntCqERk>o^$ci&B=Ol5l6K>s7l>tKejWM6qHo9YRIp^Du2f;_#v|1u z>m+k4TQZTy8s%MwTWhn_Hp^^GW~6^q-x4L^n>vSHlKv?Y`O8kPOg=H$D@Z;Eladc^ z`Z zZZ=X<>O3XA%=%iABvLS|y&e*uk~hhE)CB z=23+7>P+`bVa46c-$;a|EBG_@itH}HpW2|Lu}GyVtd_>?pfUfhwkbXAyD($jbWr9O z%;PYoVG6m`j{AWtJ*2^g_>3iyt(%s}oGy$}F$%TY=tk9#5`fVojwdO6*+pa4_(CG1 z=DgyvO;WKaY7tkr$4aKXFVp0B-eVo6H`{Elz`0#0=wCsC#ml(~M#bv+5T{tYD^KCE zG2%dZS;Tq6g;#iN?X&xPY+H@s3mUA{SbUc}nOi*O>%n)1dn#V@u&VZkCzKk)1H6$K zAqyS*z2PhE^5pLJhTUu=*S8TZQ|_EPd?)7D#+~n2dB14%3I)GcAk-U4ynL1VG!h4o zL$;|`JKVMnGk%3yJaz?|2yD>0f^5|t{x>8kXd=p#i>kj}tS1-LoJ-PPeJlIp5$ z)o|WR-L!Owt(EpfKPCharH`1PtV3J}-c1BuHAbTq>x`8gA^N*$`1(-{v4j$)brkX* z2d~Fhe0kGV?#?n%Jx*i{UAAi!NsWP|IF7vJzlEekP#MGa1CekyumQZ= zW%l!g3tb~Oi_T^;=aE}yoDQ9x;11i5xWiWvguTO^vjYX|>^zt4)tN*~!R%*qWCfwJ zyU-VPU~c60k{f{}Dt1b5rSM_0E<}&O42PYYL|gsHm31S(E}87IeQh**(LBpU%eYAH zb@u2n_QFMNC+N8eTQ(9Ks!{ZxV_kqY!G4ap4bT71;5&;NYHUOcda8I3k>i_o`U|0< zY(DY7wJK}Fd2NY)b+OnC)yatp$(g%x-wMg=^yzDi)8{;b7_+=2If+mAN|?`gksl+2 z3%`&#kch#k%1|G?D2qr2c_{}WpqvVBW)CS6E2)Mg#I&+{8s4s=SSNmmj$$~0)8ZQV zV1kR-di3-JkNLvLUCP3+KH_`pi)3dSDiC!)&1PjV(tPIicxy^h}cmW6X z)bfM)Nk)^{yt*5ufJH{j0QtcVA6RLkL%<^UIC`(dO8)$g9G|Cyox9pWsr6avjJC-| z^vo79nDys2goggeC=zv|2)hbXDw6l(o5imMKUj%~R0+BgB*|EW!M?|~-#1C5Nwv|7 z9+`k3#T+I{#@}WXKwo!8oCi?ClKW6{$A0=kDXx--g+(fFyOM$<-<$qLtm``2@{@*6 zngmSV%LqwooEHh=&;QtxAMiAH&oa6OJU@wN8p)T_5IXVcc`CNX5+xFN!g&Rp>WYw;I;UzZAblg{x8V#6qnpMT~JRS)rUNSRot z*dd{bM<+=LN%%E)XzX4rl3@Xg7L@lZEGTY~l_i2QJ;;`YI@&D4v1XCTm7Ge%$wI8a z6lO{;eH+_o5lOF8NiL_H*_k(LOt$6eUsB7+$3IHn`cyn5p-;^_S$6ru1Bc2@<{&$P z_41(#Izz@SgYLTMWv$(5Tc_%K>f<83x5@SZBwIRi7nMLS4Z@N32fB$x$eA2TX2pYa z6nfW$Jt<>L2-(bY4vsfSQ`s_=0^9n&SlJj&FABk!d5h0g2+ot6!tNG&8B>%4_EIcB zu$Gm$#!$}pK{6^!P_d1wle|EzGV(V$Q31kr$pEMny;zlScmgmo?b==DW$boOcH55l zE;LpTH&)}p-~Muz#e4-z@EHCA=w_v2tG7lzcz_`E!47!G5n?)`S?6QPQF>45@ip%@ zw?&Ftx-lnZkX99mv65i%D84l=bBV4aq;%FNaZ&ZBg6O?$JSBOa`aC8{CxqQ#`R5!7_S$q}9&BYtE8v&kx-VMEvpaGG^< z966skP5XSaRrGI8vsENpN5y6{5RixPtyzwoPAB=6byM*U@)j5i+;EC4-H1h@y7+SRlN64?s zpI1HSo9AL+zQ7S9n zgX$d+tN#smC4gtF^v{T|3WCV)Jq^tYF;Xz z@LbMwr93moR#(X%45Tq6>Wnw&%qvyll2y;f+(Q%AxnrGJo@dWI%WXWjWQYcOhZf(A zxrLaL12neY9lB~77jz~DXO&nZJbz^$0#p6V2+SK<2)U0$7-O`s7^!JO4TM9w)YN>< zmmm2V3HR z&VLNEu{nI7YXt9=?{nH{>kYEM_|S{l6dM!@aoN<(f0QTmEBVb$D8^+fQj)J{en0KY zGq$NOg_`(MZodA`s$U5qsVpRgELLz6bBj7P(TJF`6XQ-A8et%E%@tz#v||~*LXj=< zH~Ek8Ol-={tv{9gP{Mt+RbuJ3BSz2I^0}Ol$#6YK?26XPK*OIUL0up7^K#?4dk1NH z+zUOf*Yrpn8-y_hFVk$fP*qy{#>eN%#P^yyQ{OA(a$&+4Y08-Q%()~x_BH6|R3OY!4gS)Z&luE!`eofCBO^y6@2itZ)#}^tc zhjVS8@BZn=USZ%Pv`L=kCtg5_?DZ=VnZpJ4Ra-(-EVm^^DIv+Y5bC=`BoO zbg4kR*4YPw)Znw3QqOuWQ|hZ3QyZVp1Y6OTDfOQjQ)5qMf_-##rqrug^Y^X&f~Pa3 zo<{uozOXwprXJqZ2bP{X^#bWBF?#YXa5qLyAp(T&MjpPV*L;_`gOOa{$$-TGFq-d? zP7ddNvc6AFq+Yi%Q|b$Sdkxr?f6D~>THl%rEnNsEKa__{c^J#XN#En)OnJDBhk>#h zRxuCagmOl^~!hJ9IsoYQE z`Z3otTz}-M;(C2lf%rlcW#{DP74<7B%C{C374^@`DatJ>$}1`wz^|gBeg*1QR5(!G zk5YdP#Z$j&um#&TGO{@1^!Id*Cp@tD1xA+H)VSOx1FAi?m*%#2=;p4M{?h}h=g8U9 z7l6+~@$r=Q8L1=VqqAq~TNzXH|4;9owyxR$gibe}f=r+8F(e>2p7~qH#DxgQH+qTtD zT;(E}39@AZmsr0jN@x5T1JF5QHiaHS3YYo#$MNZ-UFICc%-y!X3QzYR((X2!S+hBOQ#81a!ix}g1#g<-{h)~D~c48Tq@656U2-s@z zqeSqK8Zu<^MUhi$4NV@>HQ!;5E4D@Dtwdc2;SiX3JAMcU3rxWi*)@eE7p^F{coANu z`EIGk6i?Z0@?xrZo=icD*y$f9DGHGy9kJAq=-QiX&a=DqCNdNttqufKK>eO z$Rwz;uz{FZ>_vXAL61DELCYf>1(bh6jJ@nfq+bz8f0q?+XlnofyaDD=xRc$ zZRtO0+ZVU1ww=SwIvo*JT7$L{1yZ6(jOIV{!ZT%lna@K@-X?3lQg6i(_mmEgt=njx z%3HE7=U9=lGi#uXVkhIH(m`JoQRgxxVA-OEC%RU3anu+eb%;wo3tM{=8o*qLz-hx_ z{xB7Yw>a^d1slGvBgKKobun%L5tPwDL@KneVwSnfbk`%%4$JcF;J*j@4P_7f!}bsgnk) zq^Zg{#O=J8!}fyFEa44p9~n(JCD5N#E7>NKMA{*qvK$#h(>JYCJ+4ovWNlSGvd!$Y zeS{_6OAgyx+&0mNY((d5p?}Uaidx>3#)$`BFjlvy_!Ev$K<@5vE7J+9*%mP-YIP8k zz6Enao(A!R%K=Y=c|xbp)6qOheBM?^F;6(>@KnOnG3x0Uo=VkIDNiHR(+IOg3|B&q za&ez8k38Pech)AKj%kQdG?XYsEr^(7Q4O&;%V#jE)yc>By4gSjoE1 zEQ|C_5q__}X>R(4h1!?cyoW{2KM(VmJ-+Y7okT9w8l7pek}z2$lop-jqcxf1N90(Z z7P5}qG>=V@;0A1m_V_s^;O_Ji-IOTGjsV{Y_t^ZF)Ic9)0T+?soNlm?+)3dIq0;7yln)>*)OMThH9w64f`Eyth~nPaz8%3ZIQ=D zN!R%X#S2~cm~abn%uyWO+bvykAtOe*L>LVVtm)Dp`dnz?rsyAWhvNK>NoankOXbL2 zCcYPtcdO{fg5CXQ9_0!ZbCfdE2376a$HrNyXrDO$Ik?a275|y=z-MbD2Ei_09@hI( zdb>_f$$4=fvj2iZp4 zEy07Q`pym>tn?ihJb1CujK3peyUlnya-1XNU{7hIl~AsU-=auO|4rz2e#cdsoINDv zmVljEm)qVooS)gV3`7-cSD`r!H6uq@3>UPkI88TZxVizznbqHGazONk!mb=qf|)1i zNbkFOmz$_V5$ZrRyH3N^*7Sxj$0DOiHh9?$;;+%NmcNch33J1b$JGzLa%g#wA9jMD zs@m3|f^(o~au#5Pqhf1G+-%(h-PKPM8X_No)nDaS+VvQ>5pQ?> zUVdeFJs`in?)tg>%I&&aejV;=kzdxXfc!cTorN+)5{ja?>8BANM}NT68rsd$;OL(f zy_#RdU=pwq{i9>#-6GU{QU&?Y9vvx3&&QM21gMqzZm1+nbf@T1@+AK7Dm|B9Yh#Dw z`F+V$MyOOi$})9>MgOIffKMjv&`HP%$)sm>60%n^=`o!|yC#$F(@7}5lSxfFiII{_ zx>G0RrzPE}lTvtDqhoba3NLH4N++f8vPMtQNh!Rn(ZM99KP<8|`{SGd;N%%K=A*!^M7``~N&k_6PyhjV7-dcu zoz&JASpLTKG*>6rDXs#`E8P2Ye~j_J5{+42zkF*|>R1%(5v z&^r$rd~|WiF~<%$?)Vdi4m)vp>4=j~E(7e8s#E!UTIz#LPvxhdQE}#3XP;A5ReA1t zBhSC!!iy5JwcHc(bxI*}=#%*5F9zs3VlPS&QF}~Kv0O16h_T|2+9+k6e9#Qge0_bYsSg#%js5Nb(w_3gnzDRUm6&WngkF11TvUYrTh- zj&*%YwLNvBZm+9CRo&kHP*!LtSP*GAvRt2c;<4@UVlr(Msq-Tc<Z(N-6XWkmF;V1fB9* z8%}7WrYc5dI}SS4Q!|z8n;+VyPk~;$As39 zN>0$`kZEBbRb3-r?vp-bnQs&wojj9Us(hW*+x&(GVhA2QbmHt>m+_}gy}g$!`!27VOCMln3?3Y? zz#3~0sUJqsU7B3(mIiHL<)W-Ws$5fAmIKQ|Wx&kw;-qX0YN}d6inKwc{8+^gbEyEu z+SlBSdFp{Ehd-J6sI_qJJvnWd9H{rI#iXj!MJB@wwZN~=#Y&6;)n4(YI@qh7DkCr9 z)h{_znk|BTbKAl@Vx1xNY$_Zj%-0&`IZIczyijN_SBT#(hwmrlF z*2Zd{W(8mbry$tH3g8rIK|LtVsy)c?@pYaeOCKpYB1#WQ{-uasxJ=^TQ} zv%07Qr#XaRDc)E;Hdo4Ne}{0W-Ti0xYY0XwyM~5GKhMuR_4A^xf#J~)s-MU6c2Yym zmWI&STw^s}wOb=MkO^&{1JVtlnYr<-|0-RZmNoDy`(q^gcm^E=JOYbD{7DoiuP^HP z`VIA37cZ|9#mB#XhSyYkg#S2u)y#fIbDIdZInV?AYNjrEsqxoT8D3GRK^CXy8 zgL#)#o`jT8-l8Lw#cPs=X3iD*dHj1R`hj$WfUwuG3-yRqqgZFE!*Y|uKJ!-9 z!{WM}!-6z*cE*`~Y}tuermWdR&MUH^(3nO%DE1dvwljRSZTr>Z@Iufpl`}W5mG`}v zX)K?fz)sIU+8#@02sIPYBHDExayIUIMLNT5NnmOO_ez5Ji-(6+xy+g6t_sxrSP>X2 zUsBa{Rp^tCZJo0SjS_tT@Mnc!F&S`5R^#P(WNFiKV%^aN%Ic}QoMtx`%Ys@$I}qW* zwh8{;<_cF2aRuj9=FdV=z+<==MUp7|%UoZe`+`N4r^g)?Z6-ScV~0dGdQ|8~0gcws zQCX`Mdx*W@vfW`d7W-K<%E8Zij87B!f&TEp2sHch-y!w^D_8ZguMEt!T71JdIjqzK z4b>w$V8l`Kqaz@`o{@f0I~n$g&bKrQLPlZvxYLZ$UG;xnk|E#)B5}pMhC`*~~*S zEZwCtwn9=^?aKnCc1x?LY=lfD zu2s$5O3)KVtd^N+`ylOfL5{^#%GD5*lp3Lg{Q1Bl-hz!F<%HJeD z9aWO}bdg=G_!MoBAd%`K?;Jx@%_~B>L7`+uNLrxy#c^Qg3C$crK_FLMF2@Z*f8o6P zb45;4A>mx1MOD$es73Hd|Jikz>!1Jd=P`516U(lRD`YZuOjk9@EWxF}PEf92`Z88KMbl zGMZ%fNl{dPeUq}~|IXHA(tgW=0i z;A{0A#@q24Z;d=ezsQg>w`ToJy5aPzruND<`cqbYGPaH8pVI(R2&a%`!YwUdT~K5U zxrf%N?wwNYa!naKA#Gc961f>G$6gW|+q=jOGCYkXvXLjnt&%&<$;}ddTqrs?Z%DRZ z4cdF-FxP{mZmuV?%=88$gKufG=tD40!fL>8-K7|;h%Ze1tXQMvkJ5bv);}HQSqk%v zao$;_Z(v8ZdJa~XC(nD8kdwzhfg0h6%|BMM>+!`2XiwNaG%_9Py>s|I(_bX?sd6y-^f#=V z)m*veZQ1J@Zn>1k8wdUNz6!1@u$;)d*S^pA=ehU$#P5~ycUN;y@`vC)1``psm_=B8 zc(f8axUp^nsZ<#0nAB$LFx;{%K3KNcr0wgFJE<%LHfDRd(ML zC6%z7>vWhpvKOc%0ZIbEJw{?NkUdftQkebplr90Q*}L`OpWHLp>ee2?c^gT=Iw}Mp(mABywm(iK=^ZbF~<$bp>p$!oS`CIuAQNK z)C==yr#Ym?w$*%kz5p2B*J=#!mKs)wHqQ3-yjGLLekNoE8tUks*h`L_Pi;r!V3>QF z_`+cx;z_qJ!U;gsP`XH4LY1%c21e+u=odG<27&VAf2~ zO*#i&;F?l6#3j54s#6v#ddVs6w&qWdJ$A-{Xnb{!5i{FUYA?OD`g&YU>sd(nu5-lq z)gAF|cAuLRy}^DNPkM)a0B`jcdj-a+A~6NlBzMo*7Vo=zzTi>W%YLA&4NTR#p>g91 z9w&@{BBf0!ndL-9HK4RmxCqc&7*SAUN(zyIy=`yj)3lBTJ zOw=LF*9RGym{FMnMANgC73_!VENU4_vt;XxEy`SZwLvL?8Wv8)MH4gCA2Bxn-6kB4 zidlHx{Hbwcd!*k9S(Zw`J7*o!+6n}9;IK%onF$uVo^ zhid23uGY}f07$X1+9LOm`YYVgrBs13z1K1Ndq;1QcvTax!4pu$_%j_7&_~5ObtDTm z?alq89Cg0o<_^+Y$$Eu5CFSvsrXZc_6!g76HVb+h3EXO&;yMBTv zyim-6C=0!2C5nA7&f!tXoHF)nAt7fxILq7!OHowf-3t(^F|gbvz1meW3J zjAy}_=3N_^*v;LJirwrWyy>)kws45E0z_V7^I)U1;xlY;B5$$<;WR&Up8k{oOEAu9 zvO_41MX*`N^bPGLRuH>Do)a<*UdUczt#<|nE~=qg+GT{*pcOYS%&2rWVhWG7rn9eVyY@7N~S zbjmildRL@RKYxXHY?FITEdJWyYF6=kB0JH>ED&wXS!|{bZw4iy2s`Em=VDq=sWe)6 zJjRzH;yXRPfwKN^ao3mZZx#q9%8I zkH6a1$i?K*)yg^RGM+K-dO5Ou$`Jx#utl#B#iXdW<8Gmz9&2s^YwKi zwOf34vJXlPoS~-1Rs^!(S`ia1n;K;Wmu1{N3NYqcYhX=8wS7}LH*rwiSQY&XY#BAf zG9Xbq$O3TYg;-heHlz~|{I!)t{8-{8{8UPaXYJS&R0{{mX+vAB1HH$E(mD{%*j#lslYbRO-%OU-1x~Z+XHr>Z6&096H^oYP z56gt0R&DkpgbD91n)PDWA5s{ZjUrWh!l6gFdFf^m02*a^#o+OpgRoU_?L0_e6_;)2 zEE_u9B6r1l*G`&Z$1E#O+ff^!cxn4i(CaB5>VaR!7V#71Nr!wnbhE^o+TjgNX2 zKe05hR@4e0cF=#3RBiGG^X29@7K)Td2DgzxY>Q;!#ml>S*P<>oS?Uh`Aq3VyG zN491bVm|RwbglwZYb4s9>JWl&WbPL z2_IQfLjQ$xFJd7sIiCI}wFm+`ghJ(dD1@h4&}G{$Iu278Omat1VCA0%RZw=0u12~P zqc(PceDr|no;vc4R)kdJsZX7@_Jt#=Cfof(T~lgVXH8gNh(gi4pTQnI2L5i= zPFK7~rrJ02Bzt(C$6P9FLow^DxG%z!7q6g-7SWX0o4)jqbcKIn`PF&vqWQ6pHY_s` zXb)L#+BR&#@tC=187)K`=iBkts{4h@7)?{uZ$rf2y14A?3bjbSp3qXs)Pn%pN-aF` zP}OQb>nNi3a)i_z*67C43bHU&*EyaO&Lso4ju&@N6^3B8w2@1!GpCl8d{AiN{sW7! z(|cTrwdP7SeT5z>Yf~*Xwd^(zuR8fr6Fw7O>12mW=^JeiN`o6sizq^!j>6G`#7;3n zVm9iq?K8qR@c_$e6-MfgwRDq*7&C8=)ZeKLV?E}o<)nF=zx0n@Tg6jk`x*m+&!xH665YaQGW3G9aM=XYFjvgoj@cF0$QWRKL;Z| zEAv|!nyMCIG-f+olu2)d9Gb>E9#29GcQW)a!V$`HU~d;(&(ztT%MsL)=lrLVBY9}G zOE;a*rVo{oR-1c{d&-zfM{I*PT)fz6n|#TF6GyfwLmyKOTw`VJC8$`JqZ_V{HnM`( zJsW!=>f?vmBAqt!4GFa;J=NSMvAf!MGF%(r6P2m=Uh}esvZPowPWd`s4#RJ%ZNq|+irq+K+F(-bAF>`4B#fsJ zmc@p$kS%@a-)LG)nSn)i416CHNzZ?d=136TCFXtdNzM-2q&nXavsSc|((}l>=Zm;j z8OcBKa4fLsdM4|@yup@D)q}Ep7Y61XWARr8=9OA}{WevP$m%LXYD+8+hKNoQ;i-tB zBc`s-(7Caa|N6BAT#t_$f=30wC~n`%k@Ct{vQAJQ^##YxOr8^M!xAOFY~XxSSvFJ( zMcN~+@2X|PCwJ>*LuF+3OQHZ+Iq4GH79;#NjgX>0b3Ci5c4J8=sX5~>iPg>tjh|}U zGXMSPQ>2KzEPS*o8Ll41GOUk?^*fA~;b1PTwjPnpW!_?SRh-VkyvS8iAX5_ll{>#q zhOIa5lD!@Qgpm?E&Id;dpHIR|k%MQ;Q50;U?1#}#^HjXxJ28cyv3lUa6PT~Gw$F_w zz`u`B_KN#fTP05YfFhYeRZx14h|odlcOG)(?E2QA6gaqdMpiy4OpOY_!56whBUx?} zjy@N&Wx&1Oh?{LbGin8^YNmV#`^`Q zyAUh89wDScUA{LQ62t;uBJ1 z%yQw6vivu>pZ;vgvU~Ht`uqtDklY8N{Pl)&2f5q3hls;)d-O7bzq)Vf6lhPyX7??d zEn9wRa~+7fa(24f4?)c9;XIe&!0f)IO})xt62&f-Zc3GQzf0m`b-Hd@uR!9Ns8pui z-pC~+AhrIG`c($z&Y^CvAtkOinioQPj<9_%YG`A&i+Nn88dsQoUge8l_L`s4gqw zv7@@YR8l&s&y=4RJIhY-=b9Z|c^tXei3zm-G_{9Asgb$(Cdj^5Un~r;Fjg}X53aSK zFKzhjPnqG~r*&v6wFxkLL2&JIN~~Wn#cX_>{B4di7t|4x+Of_OfNOR-;-OE2D|qR6 z`S3_2fb9sQN<3tHV3j22?6h4~zF>qS>}q#3Z{fYesM%VRv#X)M)xmZYlB%3uI)7&@ zC`F|8svN}Nce;X&l6zsWQF7~NRu46QDobx9Ag|C84F!ET$s zHl9Nw!%R5|`%h&SQ!d=naiu!f$Yx=SseBWZtnaKZlCEB*+hEYzC4PFGy8W_cDn{tv zg@>E~@(9h|r)CK>>{aW8#*C)p6_742O) z96!#Ei!1y$O`E9h%vU_-9(a3-pOa^B?{J5$eZg>M`fU95&9RjYC%@q4OvINX4WXv1 z_ACs5Xcv255gIW+HD$WmU$ zu>34jBQ%fZNDa{v&y@8n*}zvV-zz6aUowQBW9zlL4W%gKQM0l%l7cf@19OWkzS_pQ zSIdy{pA?*1+;7oPTub=}Ywj4Gi+QZ2W3&|^MpjAI4URP`CEH4ZGc=m5bwWHcv&cKs zQH%_FYH;oli?1{=x6a}}UT1}pA5dQFIYALIu|Fu6b4dsGs3}8)QOHE{{BJEAI<$+R#*0UZv^LFJ;AgX z%^_%26q%S|b>#-jxvSO7c(wIs9frP19!I={1}&QpN?v4=vN^ zq}8@VRbN#@=US+9RiDRmG*pg;Duj$ql5B@bZmPuw)3mZ5Pt&rcteaM6Riv^SoQ|Br zdFTM+H*J#12vmYSGg3@^v{gDL-VZ}`Ud1Xe992^ljpwB>B{R@^qgQUl8m(k*I;#^^ zRwt%sr7AtNP@`ou{gE~nJA~hGr{Aci)Ruax?d;K3scu@6eM3j|Q_a>dfnU##5Rz21 znSn4hni)Exzsj+{!u(r1qJK{WGjv28x$o+T`i${u0nsiR*#X4Dm$Xdk&5k@d{|!@DnY zDEDIJPpvlPrYtDqRprp>L#J~;-!k1I_bSUA%lEin637qaaJL6?)ZHoDNc?umZxMq% zYF~+*uI9R)t6rinP2~+C@)YQ8IRSN_-hYI;`}&`v?sMGwK7X^^YXY19Ah)J%H_5Ga z+keY#$%nVgZP|w(%I)5_h%47ov;1pyd*EwkvW}V+GNFaMtDK=5p6sYuqYABgMp~|; zrcFj_N6p3nY`UYSGq7Sgw-*DymD`TMZ|~>!k4AOd%`K~==FP@O<+eAdZtn%7@+%UI zuI09GgSzeC&?Ub<=a$n^)7_zNUvbOtsIhEUx7_Vr>-c5mW^~jPZvR+rMcY3<%&nN) z@Q#`x+hcMYx<}ng_rzlSD&uxpM@>1mMIANDvZte_k|hT^#@HYu_$9S*PpK(4=RZ@A z=sUs)ve&D16AzRH=xm(fWwKwU`kgo-oQV{ANS~)koWWAOf}2<2tjLQ}(b!A?&hcES z>$w+_h*u@cn#Rh|Lk(va_?f!*?uG)8^*g4)ayx=j$7CP`wr)BRjF#F8U6bfM6CYDar_@wa}#s;v-tTFFMLgM1#qmLad}CBN)O8)^BFu z5x9D+Puq=N;f#r0rv0Qg>t>O9wR}M|R zhx-}a&*8FjSq6v`LvAi!3>Xf#fMa!SPh>@04TGS~`R9n^zPm z@=R3s7624Aq%HsTeh|B8?7NA==v=nJjMX03k%r*G(iwT^;9W&T%y65i8b!8Ot_txo zVFNCq0+vh(K&{v>G#)y6B%{4WER&2TLUBOKqY2d8?J`=PfpsFYHf3#W zR^fsgr*kI?7~z5v{S>HDl=0`gGZBzRWO}UR?r(WLCsuODH@^v0x?Yu7vHx)>{Sp~W`sx^ zn}qLS+*M=-lgUqYLqLP&W(FgRIG7&AZ!O7G`KMF4X|Sg$aE_!@0oSYpM=*t`tRv(aD_L*$ZJGo`n-u@v{9tN?#&EF7I6yEV7F%m zOLe^l8P|X87x((2J2=;nPJvT6{^8@W&m?r)5gLMRp?`x!BT~4uwZ7e zR24MX^%=o*2MA3(krgYc>I2iT?A1jDmV;bF0tJ?fxUS{8jq49wja(mby~uTSV}WHJ zR}oh|T>U)mzvJ4>^}WUT4RY=0c|nkQlxeiMxHvbDKgGrU@{5bD@_;v>-1NV~;(_|% zs6iLRos*N&P;mKvR z6j6P|t>4YXBD?9Qb6N{f$+l||D@A_xAyEQzLltAKX^Ev#H2``hmPgn0g{+MJKAtFg z)5;`)X*ztaB@^WcnP$>xXmKdo*Qg?r|Wn#@6gxT79K^)Ai_pb7lFu2 z{!N-WSYj7^FdWSk8w2payE4^XqZ*)VVkIjwLOelLg(1q!94nch5hhU`4O5p9MrOWv zRxVZCxqZN(vvH6`Kdv*lZjN6+gyk*bx|i!wE=hlmdxUFn3pB`e9oKZO#5ezwdo$m? z$CVWRc#!poIxjHo{5yoMeLGZNt%1mJXkaGL+_|*Lwt8Y>$e+C;@gDn z=9^&_e2vkKjE+BIWuq57iakB15QCcl3qzxx7~Mki{(1sbq4 zT9X;DJbI=Eyjla6M~7wxtc>PpzzPjm8T|}hScamjqOZ$~a7nfXtcq^Q3}}!3RRg|u zNL9uj{cUDISF}|FuF`<6=$y=e6QYwfV2cKv5FMWxur7L`2E0)N)r~xm~ zfc4R@u`ly*)`;8Wn1Z@#&zQSlJCS z2NMNl+{@&r7mNpICH`QW1R>mt!A}quZ{#e#QT8OV^{nV!R{#`*^~Opb$6E_y zRZcN^+y6%Nrx}X)UH7*aaRc9A57#Gu;o`9pPmlc1&5-}-jQMkT=lOTJZ5TLwJ;ZFRhJ5ik3yMzgfYtQRZ zLOsxei1;HD`|5q}?jp|XJivp4Rc#H%z=S%S2)7B8s_$)lA3Z((881ENvU*;qP8B9t z<|SwnvzsDhMc#30W(td83>5vaNcNfXa;S$`-3-eF_=*5-1P~H>{Y#k(d*$?Z0r))t ziv2Nf4pQ^xUj$ZSYT(n_ET8K(2Ki|U`GxY?ZK)UP>thuLNxEtUGCEq9eWNP7=d06t zeIBR!WkU5aw49)P8lO?7O4=t$8Vyxi9DKQe@6h1gUnp>u^lWDE6$1WS4Zc}}tE9U# zgFhnRGc@=wHMmNeoC0ThKm%|)8~V}fQmN@E?vPB*(wR)unW&`WGlTCI@FU6@u;f$? zu9CVWsc!?wIw`tcgCF=@HGoQbCNp@wfUnTtof=#vg-Pl?L{{<{|z+a0R?-<+4;{_T8%gjD7i2`QE*64E06iqP~UNxF_E zCN3umy%e!#N zHJ>U1RY@mg245rKF&1qy{x!Ht+9yeUb6+LkJ2ZIrCn|T9^ejoe8+I@6zpY_^#W#KF zdADlMB|lDY&pR{ZzW6(Gzm#tzeyN(Rq?o{TmGgONIlmJ<7ARNa2bWHf1|JE4SU4L^ zpHfYd3HfUA&85Y0$EU?VV1KlCB@ge+SRjSt%OM_UVd*ril z$>EO#i{uomUTu_DeaB3leD;9;Y$Ko1<0>hX0w-y=YMp!G1JT9t7tq>wN;j42vm4~I zvGN(JNU3^LGh4{~D4PKV50eLO#BA0WWm(&Cm3=PgY*8lcfAiQM^Q1L!RC`t0I?FsEZXKAwsYfM1Hnm1 z=A`6Tnwr2MqeP#^O8U!p2WUo!aG=zKx$QV)t{y<|`2pG`Z5=L&?343b(sJmKmw(qC zamJ5n|CW`)N1A@fU+I!LlGI2C8cmyo^)!BPs>~QoEA$UVlTENIMX-uYE&MQ=e;`?= z(OEAt70=CRmB#66a7t7OB2%x1@ycV~!CJ3|Yy$hGfKbh=szJgfC-DUnd%mps`r~&s zR2=QLIL#4?i`6v0$C~e70Q5@$42c7bB6c+I4)ZQA@vb=j?p&PSc=s9aaue@VNZ2jr zxy@hd7dUO{5^>s6B_4-EmqR8a3zV^&Pw}bI5X$)voCdLP%sZS}HY8rDB`U9U1XNj_ zwxzF5#d*#yA+@U{-gYGyaUrh9yGT~8+t@sQmHkg&7G}90Mh{Cj>1L*xI>94W^7%}< z^xYmWCeYlmk~ieVW?pz0PHq`qYr1LJ_;PpK2hynco>2i=NUOTDkQTo*D)3L*G+$?D!$&V)N+w(INR1lrisS%&0li zIaYG2AYH7f@eHBHre#75jfc*dD?c8i%0j0?y3HR_%@nJ!K(+C@eG*l5>w|w<-Bia( z-8B9^>effOAn2?5K`?1`P=j77LSJ=Khs5yy0p^cU=>w0sh=#|xZ^aOW9IN$k$<+$E zz62?>MZ>?6psE(SKyqx9!z^!!}C`b<;9X$v5L zmMVT2O%hBBMs@N$`SH9Eh!U87r%2@kD{pKq9L{4- zroR~K%UAKsYhEG!^$@ngDiCsfpu`E@ighWo18@3}OJbNvXt#tKN;RykIJRP$5i7Y& ziabl>K*%o9ZJx^>BgO2IY$Q<&Em5jZe)dVgww%xu8cD1TxGC0=7 zSRSno`?P$D*Z1pu3ODhZ18PinZdaa~F1=>9Zc$-fK#jxPcxsvQlgc19z#CR6u|YLQ ztmKtjlZ}2>qS24*)#y(EBj=q|Cz?G(fMc3HqI)P@vx@Ul=p|u2s+Su107_yw5;#rf zJq+E1A{sXev89he4Ge<*P+B}0{y#%u_9Xk`-jQ;ej|~s2PTckesz_qZMh;c2|?xx zbDBWU8ZYIOXra7|n?-~tRzlcfCmJi6@gI~qu4kF2=`yFOGM|)z8m_7A`iTrMSrs+? zm(X%!_Xs6?Z=>JkhZ1^P08X`PeX3Qz+8b}x)I0^vD=!EMPoNuB1_MHw8VnL@0DA4y zsIE;|yp}>poN%Wb^{2Odccb18Oj@Ik;V@vA8GZ!RRULQ8#1^iZ9{s&kRyU$MNGQ4# z@32vUbl&lEQRB!AqZ)bOe}F@DS`Qr7U!ic2xkTaMQ8+Y9w$Y*Ah{lt9dZDpjqp?h( zaTi5=7ZdwIPc&>AjdEdM!pB~a_}k%T$u|0PP)gryjqkViIPDw5XW4IUQ)uM<`Tpd7 zt77S&;3?L4XfETK_J$M`YoNkj7$cx`}n)&gP92^N3iyiSBGI}U7R&xH$2m_o*7!N0K z3O|X8z8k-_D_D#1(K7s?WmNlA0E|gX#r~vgc znpy=0_iIikT6~6JUeWFDXrpiOD(yxY)8P&m)}m;6*b}N*DVzHKQ=!A*FK6SQm)$Tb z%YQ69pXd$iv7*K$_5;V0gz96!9IXfDo8bz=tNTBzv9%-3*vm$r=bl z=%FKW*QcMyq9aK}4xvrj{HcBow5ncP0<5xQbDM};H-KYdHT8Sd@tialbD{rZm+dv9 z=_8;$=0VQk!c9T!&k6o0;@8&5?_lltwxZeCP6m6#m9%23I2wn%?{BL4gmEhS1Q6#a z6yt#fD;dkbC9_18OojL|g-ndD<}4()Er$MS8&UW%pncP2)a(|_$cxRyI0l?OXEe{D zTcQ*B(miU9)o31@$TfV`18PzaJVHwJZ2rV4hB5!DtD|NQIx0Mu7j!^HiVu)x#M5MB zGo2^UAn=5{;UJ8bn*?bO^Sa5tC#}g$-&mv=XbqTmjZvMP>>oJF0eZ{G_ZYQOPO7&c zp8II82NOJpY^*AqBR|kRcOdfOemP`W`fxo@T{Du+6&M0!*M!vXa`|mf%;09AQhryu z8V`>GX+mN&9zOkK+|QWU6V{Mb0)aX*@w=4YiE2xAVT$i-b zhUqd<!7v?gKXumAV&c9vJct|;>PJu zmhIr~GV{K;Coym3d(C&eW;c{3KL2pqPz~zRY+2mKSx47-h+e~pB^NzqF}`cw7asR95Anba?y$dzLxdlGNmjqO1T~P~HTym0 zILKHejMFLq$kV{hHu9RXk_I)(_dWLLE%#i13LBP%o*_NCkk53C}AOX#^Fz}D- z)hyW=naMDP2qBo7kKt>0TtTSPfWJHhNAWg)=o_b}W7KZ%vcPzuS7EnP*bCoL*v-1I zZIOOb*i$T(y!oQK=(t0(h*Dv>PJ>na2Yp^UT%8VAH<8{Adq0l6 z$`lzLNUv)9J!|^-4DPGjQpT){RrZP^OzlFq2Na#8- zOA-lE<}$BD$U2neKcA(T(e$KDB;kRf*Es*Q0@EXjS0nWA7?o@F4@%I#f8VOC=u$x4 zD5AaQWyC|$!IqIWX7`Xb0=G-E5c&y$gO)?!h`Dx(qZk5*N0l``OHRdDJXwA)u~xK; zI=+-OV;eF@n>$=QUIKEIkNZ*ri=@XCk%(OrT($#5b33Af-#OM?tU8=fH7eFn%@+;qB^cGw--?g12L1yFbC$6+709{Xmkj zA5bGK0_TGf`{5=CM~E5-mhIgfM%SIC*gB>o3{8ox?#;)dHZ&12dJipOG_9s&Px#VT zJEb=Q{6N;qA<8an_>?#s?(h<3bF~#BEVKv{)-l42rbo$CnD9uLaEB5=!rtd2Ul$1z z-eB=xC`>q!ejwr>DNIuFS0V7AZmk{#d05#r=j0_uKqKN`UzGQj&1L6;fj3MxiR9LpfQ7 zhBha}8yY(ROZ3l*nL&@l^MMcvr-C^>91P)%rkcmtI_Mj0W;1C8tF3aZVWnfT#Jn+c zl97KdlgK^wF7tq#4%~hu+s)M1^o*2U(J4`Y>Ldy;tPpYCgNm)3&_AAVLzb(8=wzL< z&U15?ak%i$z@zA3VcEgtVhF5z#X@_&u!c2 zGO8nPTV(d6u4f1)!OKWVr;simgvSw*MpjFA6IU^Eq(hRodCiS(IkR@F@S##+N_BEt zZMnqtsZUfc`csaajqDpL@p+yC7cHYHc}zGQ&5|u~ePG!hY7`zi!3|@&<#{}XEUtC; z>wm`~uM|1fV2B1Iznp3HVE?hZJm9Z9Hw)U zj4KSj$X<=FUo9O-wGKje#-J=g@^x1T z!J?yfh+bYW3~>8Jys0g9gba(IP3M~93N2M@($LanQW*}b z(YF=)_K3a-jfKXa*}62qLqljOvj-t(YDI5}!*ENR{0uGKp;A>8FOAO9_zRh@TbGKi z#1(3pz|DB7w)_CAuBG)VZD~0fxg_{syQ@RF)mkO1k+qes;NByyk?k&%wO?&zWDMe1 zJmwSj=KRlA+$&ac$~B^3Qi1mvUJ|%4C3>qTX}=-CWSH3wo25Vs)*xssqhYd3x;)|XP+EB^YWe%ebe#V-ug}WZQ_l>?*$$-e(QKr z_$^Mu?$CJYE_&9!c`uFlU zemoBxEugCXrd>n(nPcmd@urR@>{6YOk94d{7|i4iu9p$+*|N@>$5^YbPkYmYB891{ zqZPn+&s_@NgB-HZ3*T3GqwxJF4;tUCJSlv?zeBgD;QPx#x;+KodjwSByJq6&vB2{I zLC|PsiwHC)G3W)=QIn?mZzEri^D~C0^Yk-7_wns)(NXg@;%NYyJ&ys@-^NQMO zYFZ9MC=m&L;VN-PC!+EcV0z* zuT#xnH1|{e&B+hpvC~|m2KjGJwI)54JmkP(%}w;vs4M8{pzdjN1x;BIkCXNv_w+?@ zl3?s&e?wrMaMgY@25$-qI%^pnI!=tyWM6GjhD5E(G|2=GVju_HmrRQHD- zYUa-b+mV~i(O>oGjM`LZgf6EeqzBa(sUeu&ACrT+KY9(mYe`m}QOASsjBz}v&bUbw z0yH`uP%=!yNyIy&L_k$%*slCMCgJFW4a}=2pxZRgoSCl9oSANKT^CSQ(G6@nISA=$@rk_M5UL8T+bckg8zOQ`ZWOcmsP>BW z7UYp}Op~E`>{#}0lEiu4Rl!RqET%bV3$?$hC&m&lX{s$eJh)Cqw96aKlWGh1i>bC4 zG%(c`{RLFD#W`2NIEkj1HYqFrPUgtGqXR(cU})vRxn~ZBZ~Lzz1fR$ZYfexV78^hm zO%s$62X;f+BK$n5OV8?Zc72cP8UoT$T}3W5i3dRtF?#W2en1V^ZHTmtPfj8lpWvle zHD;s*FUh+8JZM@N&6BGCU$>^!zc8)-0;=mjK~KDPp*lN)K;-L_k}`py5*;==iHWAs zBz+>UJWms4o^PMxjY8}1(u!(6`x8%^U&PT8dF96i8ZD7mW(cT4Yx($$^1wSO+#r*< zyfWX>6G4SvauNf{NYNhxR z!;S<%|7s&UX}wf>iuv#qr~mN%f?J24t@ZXwSjeMw)8S z=XubB@$WpT27O?2iWn#LPZ8sM0aYmf&I`(l{?ifC_<)Es8e=ecshcmPjVD#!s{~d{)b9sr<+TW?s^6c-X09I!Ps+OyxZdSmN17_{`;v(&Zvs!M zyxO$#KJAyP-?0L!%DeO%$`hqT@Y0Xw^)0W9G*#XQJm~WF@}$a}lvdv2wDKMnP*vWx z%QM$+pek=6aJ}m{k2F=@k0ldTzv(=w@@`HmZ*W?97YV2;?}pyx^&Zcn8c2=j8~>RY z&!7FcFDm;c8*gGS}Qc~Yp{mWE18UaF}d7f^-DF1M;WLnnPa$LY67*B{_M^(Rf& zUvkp*=Sda%>ZTMu4NfccA_3L)*M+8y7iP90G9Q%27uf@VjW5D_%?DY$Q6*a>JH>Io zoEtBB+eTe-B_s8;TwPtB2R;jcF8N=s&tt*Z@Y(*gEC)U{et&T0t$}&v=+9sEof)}n z6wIsd3jFyYiP<*tBN9zS`uESFWsJKX;}`SKXZ$jnel0)DypQ;y zTjV`SZoW&B!+A?Lq*`S$$D|5tl~#FJz%~8XLIgeW-SA~~CEhW|l0`6fo!?3MqHzyi zI-Rd5-wCIe`A-UJx=4PQc|Vd2RgX-SJAZOGc^!zvsz61?00Sv#Tl?o8Z5VFOO1R-mbnB;lw9jipFPV^2=M~ zOVRlJf6ToLd{ou-_&Z5vU?7AuAi*H0qXZ3t8jaS(5Sf9DoPh}j6~zaVHX7;A7lj#! zf3-pv{+}3hg&sEofJIo$i$KaVi%$yv;-R18ExxtB-3)h3){- zj~F0(SsjOXY9(~D{~I^L}E z*0qENg%^$H8qDpm&&7+FM75hpX)BtlMy&&pXU{vcb3#5R!~4ixEt>=CFn<6es0znC zv1h67Xq7j7d}!!gW5Q@IMh>1qbl70Flrk6-`8|!!bUe@Z+@wK~)3isPk_U7?1#;8p zL)GsjjJ!$>$Al2wNeQ#Mkv*9V43r#m{R~JE4?z#EP--w5L)hRr&a9V#@a?9)nQ@X) zNoK_bEovA>l*%w<_u^CP|Cs!$@X@p9^-VCd9kcHi_!kB5P^}cx$~MXbvHDK7az3qG z^IEEvZg97P&&$45o zO{KBiY}QKI#gq+~6jHxqZdo^k)LnkLKzAi0f0;C{Cf)_4LIUNEE+}t3p2Ed}IZ0d$ z0ZP^T{>k2J5B(Qo869u2(uXnU=E54#fwuxlhMtqu&A}`Ido(fgJ9e|YOM#n0ea%cP zNJ>GS*#-6QiGq_D!4fH`MzC_+$xw?a zY68%kk#JHSxDsVH z)6S(29X8dIm9tOo$=}ISJ$Z?Q)ssJwQa$<2t0^jZ=j&8Y=14)+lV5mG?#W&(D^uW$ zD`}!z;Xd7Z;z24V|FbLQC09k~^PKF%!^3$<^r5xGnsTnh)XPB{>9`Clt#HXFKRCHB z9ptILm?W&eyh}>;<*R?B`r_*9%O9km>dWh%llyWOeMx{X4xOoPtxykYROX)1)sMOT zVIpH7NCtvnJ~p%CGf+>sD!?tFp%aPdO7axAMFNE)qKio>aOZY``&|h$W3holvy$MR zbqa9r!@xBGZgv-h+QWYZ^^yBdR8ITsGdE1m9XcmJIOGz{rnZUduz4# zCnoGrtrp8Qcy=rwLg_PamNX_@`a)?8E}d*l_}_q4DKsAGYV5$h$;O7znDNMN#wluF z9Q9x&WuFjH&m|JbSo?4BVs23x^4xI*s!({;>b_*3*fL=>?;+0R)QCK{i`PK%Tb@`p z<-u_Lk2IzR_i@Rt6VeB=^H--JE9ipkCMm4Yu&4yHPJ^K)A^M8KOjGW<k1I+8-M6I=MVukO~*_R!&{7c zxZPE(_J&QVij+=C@0br+b6uTpH&Vt1b?j(okY^t#mq5qajS)k>CE$v&(VGeV#;AmiOEnWGiVR_;gVnQ zt)pwC=8lILeoCH#;3rbCg1{i9AlSPig``&{UW^67^-@p~!2<$;2q)c^=M4x?0u{6T z2Z`m3H7uWTODuD9GvzhsJpIT^z&PMvY!y-JL>qN3(kkT;CxOBD>%1a39-J*gb0`Xl*|(c;W6?Q3VtkKC=@IqrBLv* zuBul{#25<&Q>37(dLC7uh=Nu&dkGZWtJdzs@}3D%T79pzkxZCqll1JO%l75*Fl}NGZtIzML97TNmU%lY$EJSBp-Dynh$u z$^wnUz)}$q<@B#_Y2->>9j%E)v%2u^udy?7YIR6=w$(SOnX}ov zdJjFgnH==Lw0$D^olc(W!C(PQ;l3X!)q~jIQ$4s!LfBZifAjmv9vm$^xd+E+Hi3KD zjVjb?wcai%Ywi#=1i^vPgbew}?Tk^!Y5YkHK<)dA7zZd~tfDnI=WnP0V*EZ&TJ7WV zU^s3UKorC`OLpCZl8t=d1@++rDX7nu!V2#`pzfGL*>Fi8GEW@RUgRl+IOGde{n42T zggn$$^@UwkH}l%6dhZoq#?7f8fp7w*Q2!r!b7K9!Cr{P?w0x%O{}m~P@bs?wUzhkW z7Q%0rf(rDd)USxWYcsM;Av+jJU~^uQihsz3rX6>6r+(SrQ#d6O{#mgEORRl(c^5BO z`!Qc$qTuhi^JMrxB2U5pFA^5~yGSYcpZZ&hV6wX4|D6<6@UQ<)0)9_&J2B@w<_LIC zk~OKVRdMi+WN$7@kVM0KmNe4#39$Xd>d5g$p;puUFvVxH1a9K2Bt8ZHQ@Z+_EtvB194hI3WPa3Z* zt82Guk8DGZ(v2UA{)XM(ZskF>6EM{g8k;>8Q)&ODX*MqXsQZn2vDqw?MdooH!X*Kk zIgz;h6|ffqtod`yt7BIP=Q~acjG0vAH|MQP+l6oAKfpiR4!-4|dnfU6 zcj2GRzobr8={uDD)#6p^42d%mB6*-|EEKILCO-j8ea=H7OXzgC<+khW>9#c~wT>th z^SNp~q03~?U?Ix&4FTf|uMrPK?#Du5 z2S4T7052D(uc~0&i|!vCyxMQv%kla#EiB|pGz$V7R$KPC@$o2ZHt6vddHq=Z65NS| ziU*tLvMTA(x#{H3=~Y@Ze{=|!rW>f`h^Fo%mRd%=jbU6hw)^HwpNY3%JBP};f7CXe z3-}_}l?Rt#qvr_}w`dQE?-qyOg&6AviW>!s`9z{=$0V>?ysxoM6R$}@olYrEH?wyL zF>f%B$ZVJy^?oJ_!iw-=EqF_5p*pk-dBpW>ugrhBN?KX_q1(fld;w1`l9>1gxCeJY zSWXYM0Y)J|*4bv7`NcoJgf3bV$DX)^n=dqB=6J_`lTFurvREO`ac~CwBXLn01g=Mav|nHeKTv23-bZMM zLb?1BFopa96`PWod}v|KJTmy07^VkM-(x)RJ!`~?BzNS(G@BsNtqs6+8=}%9bIMcY zaxC3?Pe0}sK1p2@Hf}657sYp*mi)#oxWLbG`;8A~G?|4L<0qNulk_Kf9|?D2qPWrl zG)-H9CxBwUa|Dc+x8tKi#%A*g_RrY*S_bAREOjC1-`AHD&mP{hzQF&Z`rPsCZJY#m zgYRq634a^-^3nPx`m4FPAZw%=A);?sh8ubD?E8Dx{8Z1Hg&Sf<*VS_?bz=VmhR1Co zQ?HO~8Z*kLb-Su@csm{$SZRam!g#i`XM>mYY%oEPCp0MSDf>J~1D`;Qc6CPCgWW9s z@f$i)G3M=uv9`smgc)jGPK)(D37MLVGo=JH|H+`GiFp~dU`OWh#It9P>VfJX9qxhZ zSsNE(z_#9?%BpLcZU4NLEmB{1I0nvRF3& zocw20zPbpTY~AY0M5`;;o`~g)L{~Ti@#yjFUtiFpE9+3ocB7h#uCAou7kUA|AWP;) zS#(LgG#_5MouA%w@p@Ak0viZh)=sWkH~f{$c9T}E)AY1>_PITqp47AH6UnN_`07UA z00FhRY1P6Q3V~UHQT1Genl8vX#rP6}K{ah5N5-*&_~#>gbiR$va<|U+9LGfeYd%>n zJu-F%jJ`z9p^Fv>jE}$jpcN)b%#$Rsa?}vxpfP`if7|Y|3~#rA2@J27@XxY#$h4~< zU?%{U6aSXwLL7yRp-ICjqKaIcG!`40v=o!25e~3LN~f%+)cMu#2gqvwXztGiXt=84 zmK7&NeGrqDE?1csq-agEbhA4V_ZWYjC?dv08UH}^Az9M&@M;e~jDNsT88QsK|FJA| zmfVowC;9R0k$lmOQ(en8w;S3|9QedMPmRCDuhjU`*j6RcU3TukjqZc>Mo-D2m zhQH9d4jwj^aGO%3q4-)dME;ZK_kg4o-F-Jm$}P@j?!n>q1a2etKvQqt*;*$eqfEQcf9*SPlV0tA({Oq&RboS}(p@`8Y-gWPvueh)c#6`C8mC ze8LP9H?oVDh9?lsCE~?Kamgq&@B2G08QO|6A&#m@znNWL8AOquiNiCtwQl6Y1z>^Ev<94$C@88lq76{Bf#WowE2p8#* zV!0xoDX~uZmtiNUx1ACD>%vb4g zk1_CEcY{dxv)KCA+?bL&Dn#m#v0(IzHw1~nIgUhQFbmUeWQU1`9hxG@=W>?=L&2xY zAEp#9Gv>x*z6h0w*pVPrq|JrMrZU|K!z(Oznv^?FigdHlyqPiq{5Yq@a6lm}V}N3j zyAY2jtJpu2^x{pStK=JTof8puq*SWH&{!qTP!(=cp=V?s_eoWlnWz_J0sXy&;tu5( zfd_xq`cN@zCPQW|o7B4ne2LUU^H-~;&r(GpCmKlA{w$^Nwvr--g$DViLbhr-9PC2# zv(zo-A}|YvnQxA>Z%AR-4daE}kNCHAq}FdOkRf>x_!jmGhY>dC-HePRf@!(lu*Ysr zz{~@&-vyQ6IYeL)f5UFOdH6ADZobnQQG4>>bFHod#glaHJ`cs8}{4 zX_#BMhuo5X)&7o#zv=$CHN(M3)Ztk!o89F7anpqQkmqkMlwak)a|)4yOBaT87iw^< z?QrsCk_Z()QuZ_68^98>%}(9;+^4;~p986o?n6!tK!y@pZOHFvUhQ8H`$h{$620Ms z{6XYP%r9kc8dUbrZtrbP;iGOXyj~zi)X$e47al5}zo!+%!yV4x_re`%!85}h_Mj_# zxPNG{=h9ET$_6)f6G_zGu?g01Jp1m!w5zQV7eA-=@)oZDLpNSu72bo0+gR1GC%r1X zJH0Bib<19P2*1zmWrs75#0I>wh{lZ#+*?Hj@`-`h;kK}B5;l2iKh;WG2wC9`x25}P z3C&m9YW^0zvG_t2N^gMq6kS>rI?rcpS}o}GaTTG_Xx__5r|7-Vt>|H8Rvb7-5C?js zTo;16k4I8|7e^9>dxdmn#tObATtk*PgQjG4?+a8et7KPR$9YNjyB_p~qsLAVM%=D! zrn00Zw%}Q)3M|TD=}L*?3J3ep52-H-i5&Bv%IC$i*K{i{`!!1+a{n2$4YeK) zmKSU#P*u&0blXbUqO$l%-Z!AfThSkW+6QD61A13t|T(e_kPx6uPQVm^Hs$gI>Eme?&h76;BLP21IFjL z_FA+92_-sGeYPT>VdxfS1V{4M9~{o#YqYuz=)MR`vL<$pUK1Zv1_~eH#1gIaX8y_x za9E-}@;g;5`{yndo+v%=r3z0pnjaOO$h&wUmpE6Hp|#<{zcYR+>|!#aqTC1qVkKA_ zaf6dFts02pnFxkiPnP8W4WRx>|h>7y4F23<7alB3rz@MQl&6gqfv*M3hD5&@!ATSHhC*I>?!2%@uv ztF-MB4KM5aKarsHX$mIXIwm)950O9KD(e!=KfK3fYzePV_jcB&Yo%L^o#72m(sr$s zIB+d5fX58&;k812t7Fa%<)=zxY!tEvQ14t!5_?G`uLjn-X!g~EiCR^#wXU8xbB)v= zbzGmFgx{Hjzfr+2wj3<|TJ2K|MZh>qHx@co5B$;bIL^k2bgXLqmSC?Bw9hrjfKFguZNd((v6-`43A&Rp0Dfs~VAi)7?c%M!lygV$`b%EFg0j%2$_Z!P z;o`Pdbpb0>O}u?6Jl!gM4~6~ELgG#)*?h)^2@mMFF7XDxaYd%Q*xheNR1+UtDHJT1+Y3?O!$aK%JOpsg zW>n@sriMUnr#0zCX=LgO{=Fii31c-_#%fC<@bOj&h}4sI4X1_gHf{0--lofjp+A)5 z1ec}J@5Z^=#yG-?Km@K8LP?Q34#J3xw`K zP`S3|jjD+6Ad={-Jldc?f+sAF$GHi?yE+aR5~krbn!TMZ5JYKPgL!ssz1L{1;>Pp! zrN!@qD^1cH^@h*fRgtoz#yV9*TeHdVtsvw|l}{^eEdHDbih|lI>R88mh_=snj5=L% zaeBm;L1^rQhVPY#?`*J+pDzd1QvsfTk?HKo~!hJTjz@vzqr@mnlZ z1&X(ovE9_h?UJb7vr8F*LqAHlNz~QU+z1zzEEp9qn#zhB=LoBq7_~1}lSZ$!MfG_2 z@Ky8dy7p|7KVDx}+%$Xpet)0aTnPQZuz+R#g*3$^EL~RF`66>=jTRz$BKHt5o#3-x zwd}KfnYLeko@slDe+x+8!|x{k8T&JB4w#z!*PH*P5oV`%Zw4j9rc%A_^5d}5-}JYa zwW6={o&kk6>Mi3C(`>dT;osK!#loe8EfF~r4EpUM-PiZzS`>AhK2Y(vLVl`>a@Y)Zk>x76#B5}^OLKFoBp|7X{LZQD zN37UDVqKZkh)%EY86AP>96-Ge?%L?|7Z0Ax1Lp<9zrw42t?U7-!@D8>*CW1Z(5hO9 zBV`qk@JgTYuZZs--tdR$dwhiXU^aM+{VQGI1L|4{T`gxna(%&83jfJyHTK7|qy2~< z8}U6S`d`^s66!{K2$MrwS@BhKGyR6r3hX}jD=1J3%z>Z#mZRKXj2>7UU|$jmF5%+o z`MjF%S~-x02qhZ4CD}rFQ+ho6f^JPDbj#X9Ptb&4nn;)ljolaE`#mMdkF3)hmVM(#YL{<+1+gqBLTgX%F2-Eq~T!I?T zZ>X)k#rL~YRpXCbMaaJ34XXr!(v`A5g+DqN&#vv8L>^~I>Ht7{=%9en{Se@7_g4fi zlH*= zxgz{vg)LaFsJURP!a0!dUynpzctrTC}J8?BU%R-tfo$3O)^F9`YNXdm|SS zK#9$QSrQ`RVK~mcgr_SsWNmbO86RiVuBtdHLM+#Miba7f=EjDVpCPXzWZGf_YcND! z9FO%C|Ldh2ED0%SBb1v%mBZ)p2X)oUg9uU^qT?VHwUIYQ&xOh>y zEjR_+mOQ1K@vfAuqe8GAeKdCo59V_Z*=($vqxJ7!lF!6?$Ys9&Lu~6iSnJ^~j7udGe$Qmv*m1v7DAT zh?<~#JDc4>J9V;&>=l_XEU7@fmE$Y6y|YADp`r~ze@(3@a8~<+L zUvkJ~yqsOXIIJXEtHhy%kdZEInmUXf z-Cl0zbHYbkpL5-WD&c)is6drkry6OcRE7w7#J{ZX=aoxHxC{Lj$eQI`KvB>0@hIe6 zmYs`N`$+=@<`9iA2m&H^!9k&6Uu2TK$`>i%)g&is5opNJm3;9a|0BIT}mrOsXHhbZQ3=|{jD_Q1k`oTS-bGM>=avFUIc|Sym zmIr$;xwYh=gu5v_$o@f_H%Rh0xh>dgZhC~e>w_HUbyFFnHPF8aT# zon_I{69SQYK3?UJR*D;_yOpz73Ar|^K|;<-a7c}qGz6cGlx-$@S*h`X9=-gWzKlNW zcz8A61q@T!1F)|kTqR|Lvu81sVXVt+@n$x8GaE<9V`*lqFLO`B2fe(RJJsJ^{LOq%{=926 zHJC2%l3w!b^e(wN&Aa3_Mt5CCS@AKTozsU6*+bJIMZy_=OjyIsWZp)m?^1-%V{-^V zb#|$U=_^hTA1Mj4FB3Smm-_*5W<#R&-O|zy?{VU_wWRaa>~^B2wj1li9qGZbqVyWU zt~lr@&3r#r^o{Vu&XQMs0kt8P4v=XQ3^3NGA!he3Im;hkC!!7B#$^Z1MK63AUzug= zy;$*}vQeK)0J}i(=ktaHBB9>wIIN-3C2r@qb@qJXZOU3SY70TDSnulPI-cV8c_XaS z(+{$<(I^aYWf0n^5d!&hoz2FV1-95=_=uZf6h)A_IOm`_YsnFo%I=LI?`^h%XZer^ zpxwyF@Q7%AAvCApe z;et**GMib}i1taM;4z`ABAluGz)85?P;bxNRp^M$P<&!lU1L#!~+>9GbF;lwFyL-SY{AGZB@rH*?D(3 z*;ZBVr+Jr*VYW1!`fO+wAkK+RvBnP@6hLfAjNf@y$+)V2s2&|NsV98ar>B7Jd1A;( z2#du;$k$5u8|z}{%et+5SJ^$b#W3*K#u3nW0^?%dPMC?6k{h#gWcm{u;Y8ALKlc!g0y-)8f1hM)=MN5 zdGw$?DNDG=yUlwCV^G~Az(cq8Y^P}N*w{pSUz4!f`-0TJZ!as=-tp{{+e_@{JjDN@ zCWq|l)+Bp~wUaB#loDQz-9K`;xi9QFO}R&l>}m*D}7VL7{==~ zDT5{ai;W$%TZ3iclIO0HE^t?5D}sYFRyG(OS-f<=OjFQlX$OaVeQW zLhguZ+2S@H8y_y2A(hRY+M}Xd$qSC7l3P-hOp!_m8LBF2N>;M9%RxEobSWzoV)djj zix4Q^($C*BRiY&RF%cz^c8?Y(SKJ8!V?zWPsPApbB>W=$)co~n*zwB#CS2l` z=Cp_R3QoJD`byQ#HVf64r`j2%+7Z2DDRE$>T0P>sqM$8OR=~r>JP^__>(9TEE5>Y9 zoL7)@KBqw}U=0Xg5#Pny3diR-U`Ko((1DF(=m6>`q>hi#_PmFr0WASai|=d^jfpnR z-%L<}CJFszgL2*WvB)^ifVf6AwD>Zl99}KIpep2)eD#(1TJXs(V4`mDz-B|@*u(GI zddta3*#y}bOg)27tr&wnwga43C-oC*C*T28|CNam!312@Xo;_V;iS2|`XCKE)QE!eT zOP4D&h?GAED&8Am^o&j1vqXUak`}Gq!ofj~tVHv2 zsKnh_d&%O)-V5Xt%6Zi$UTqCM`SI+H)>{P1&1MdWjHMTJ%0F%gBcAec4m1wYFP46- z_Ahj6qH*g%w_dkPt6k0$YQKA5KcIco_Jww9)Xs^-sj{A`9KEHA#fd7;RaJC-Dc9@vSDytA zx{VtP!+Y)F_fZH4CT4Q}MSv*XU~fwOa*o>2G1ho@HH&h*woP>DsJgG-UbicBr_oyk zo69E}R3pe7MHQ>}ZcEsZQYNlu*_6ji)s^yV<2ZWaKR8b|%FPP*yg!nfAm zb6FyuDK~X!;p=!Q3%zWUN3FMRKunyzLqJ>7WT1nvVGLv9+5e@g6&smRG$T?of_lT= zOlc?hLPu{wMfN|xw%JNvVqR#qok%RwgX+~Ep^`Kw5#eH^>3j8SOAQb(-tk9narq;6 zb#0KXh_B3yi?2w`3=7?y3SqnJZyEoOG|1n}Uj(d{@$9-$+_#K>$#b@PUMatnTH@k8 z+YO99xyFZiIxUY3es5jAm;%x9FZd(lm+8@)+MEFBw_${7p zmqalS;6A&858R;}_7T&_)eV=|un#J7ymq_Ywh$dn*565(pI}{#KL^QpXgC~gNu6*s zSlx=Fd=t%D#v482_>q*op0ZF>on%3$X9@NTSIe~?VvnXLXnSx5ymEUi-FQ!JQ%iHJ zwE>n^LVg9=u&5wfprvS?r zj(^w5;QWZ%6gW$yi6?0yT(W?qh0MDYI8^#y7C4pCcEoYJl z!Z^g`8N-C2whMew$KS*0oW5;deLj!o3vBOnbY2#|Ed15&s_viQrrt=eg8G=l7r9Ed zRFCEgF$qofIgQ`0c)e&6yM=5)G|x+QZM9nA4X0u6TjXf`9^JbhGFU~ztREuX`!mUS zXe{Vzar~aN{FhowZ@MFmx%Aj!h5g{C{6rje^6hda1@-(u(*39h8Q~t%aN<1c{2#iy zlxNH0+AR~fB4BKNhJXy~AqaJa!vIkb=&q{BMAtLv!NK0nv@~r+-(at*a9di;j{V2+ zsz~2w?CK3hEY=&hdh?#VajG|1gII6U)th6yVGXL;W3#z!&$xn>-j(%2fiSE=+KPcy z-hu&D-r?<4-b)5{{7|~8tr*~aWicsZy?1zP(Cx)IYbd#Wy|0WtF6-{_4eEo;_3Z!p z#%|-)K&&dr<5-TeWw8>e{A%+1SZ%3hhPPU^N|{jqRcoZG;ajS_ncHHgcl$ilKh?}O z>L2)wJD39^>;izogkANDP!y5Awe|PAC4aTeBXHFX2s*Gjh&fls*WgDuT*0Snt9Lu+ zNEO^*n5e?3syHZB$m>4pH5#tOYnOVxFZuecIcRG9c5h@N=W?repDmAGr#B)7qS!7b z44-5nQM)v?5JS~Ba%b)88KBX;=Sfzx_=wk7?h>)d7=BFO8ljgOPM%>SdL&biUgsn# zyHgvGZf0Jf2unM`QnCmB$qY1tii&gY03TZ^fM4=J#@7)zR#NeG;Yw z$jkwp4R(7kG6?U7b|l|_J}UVhQxWyWeJ_43McO}07TF|4tU8iqp6w&&1=+Q+WSKvm zT;^v34oR6MpCrpXtjdUjgMPGej-K3pZ8RKLU2I3ZTpt+4l8I5o7NR`w#$6EXJ>#}6 z`s1j$UeDOF_VB&i*Y5f5uC<3}?Xoq0a7WIrw5nxm{}t-9_SoC#Cqrn?8{B&F`cMo5 zpJBlZy#li1=;gAUz5g`^n`@8X=W2fco=4Nx9t&+)`_-I@wnHAP%t$#@#=u~k9yQc84XST@Sgp_H<^fA1S%r@_o>qEZLQM2i-G#A zHruiT+I?wNS_sAM+x1jU%(VtYhxMV4W6P;SY7i`R{YU>VO7@?75&d*Nct3<6YHm>7 zy?d9{Q(EdP_8-mf-!U@T;nHM>_o@zGWOcajDLUNz{(~dC_qZ=Trfc-<6utWny}K0C zk$YRqRQK*~tEx)Hm$CX+I5OG6bEJcUs*mVpP+JyApbzn3eTZF3LsQRgL;4K1k*!|{ zLqr`XacZ+V8{NTy41LFIPMJxET-F&_XQQ19j35LWWk`1K2YnVkTZ2Oq@~OZhc~x3r zNh>Clq0bnHhRMh?Do5yP)E3ElkyFJCt5m~Y2AA<#*y`6Nw2SWOUgC*6XU}B+KiY0f zUob<)6rwq##uTR^YB;+VKoltGC+nT6CK~smvTAV469N&llil&UUL*^2x5g zQOy?PW7SM;Rm+S4YzWHQsl?7sytpm&S(E4xrr7C$I5LbP>ba%#R#ux6T7|#fhjPw2 z`}^UNW1|Gz3Jcsl-N5Y*uc-7>@QwrC#w5HA-x}U6!5m8;Z1ly}s0`#Z+EE!6Zvoye zVkV^Lyc{H*=j)o1O(`?7y5S!)F6ZvF|&MEh=lj@`S(ez&-VlNRBn#D;J7Ub3%kZQH)i zW^d8H`apD$lS=j|{WX1eH+wHUj!wHVIJmiCfy=q}oyLZ|#9qfxl z`DwI=-yf#`5zo?i=sQ9cm(ZHQ6=!>39t|*$oaV+mM#FhR6WjBtWc^w2kfuaMpG^NS zkkPpIopsGGuKjxY8*4vVw|+9U?_7K6#(HLOuhul4wyiyM@sYVzRONW%5_qFw^0S^_2V0rnQF`vfte8&@-9>8Eo!VuRvFDnCdqu^Bdcn8y4R8xgaf&(cbYU zdoP#(A@qZ1TjP5`Nh22@n%k?)*skRr0)KX1hd$=eClI|gcNS8=3)ie1zcJsfY z(;9c$;NDG4+a_v~xnJmd15VI5nJlTKxK%NfL~1TsZ_DK%xw$ed4YKDq(@TwhwSLU% z0vT;(8R#1tg~I4v@C;yLMQ8%ORQIuTOh%#!qjv|DAs=|L5o5;0ymZ^Y5MNQ_a7(aF6*9^N%Se zQ!rai!N!I(ZASO`CzEeRYVyrs^8NGw!^t-xG5Lm8ElW(lwg1cMw-Mex1f9APXt6t# zIltQyTim?bhJ=FQIC5N4o{;h)K_J(aY*q3L2I<3sd0q_KqK>QZVC)iJovZm? zT9IQzC0qR&t+HsSWugRWcGh|pj8KUz7)>k~50-3@B>@J{jdDgqU{nLlBm;|h5q~(p zZWLqCJ2(V&&Ylr2d2zVN5;uq};ScZbk|it&t1%@(bdz{+3;)&UPKsXEN31l;t%as|llG9CVx5k)=h|(^mt*Cd@L@zwd><2)%(FthS8#YB&xJ4zq>zcHU{8Lu9&7*HgFx+ z_~O^K|2~U~{l(koVjzk;7ng}WNqu6!E}JfKH`?-<`M6apOxcVW>``Z9Lf_P%oA>Hsb-gzzE7Eup<^b7RY- zZGTu~^X;M5j`_?;0Ky3O}n^ZZZB%o@~Ex> z%hdHDGb#1b-h=<4y^b$W+1~s3Q%ZXeoT9zfZ)~q)7d{!?`fDY-_Gfx=))3&~rm+S# zMF$&@Wz)E{vCY--|wV$MI}J2o$Z}5TQdRRs|YvMJ?Hf(22#j#@-go2{vWTg>rcolF7+7 z4Ib4D2T~aA9Trm`@h~ZVH#kJD*z2k@p8d3_+2T#Hf5ret*=z-s&6YaikV9P2@0owY zEeVLuQHnm%d=eGF$%XYW+d&2O`*rQbtOe|{L!aSTBuY7qzzzxy_!f53d!6GB-lmyv zV1;k~_DnHU57{rRj}0;}vK@@`(5Ily{2TePy~?gQk9L(@>t0+knlCZ93f;*e>b(Cl zM_Q2Naln&{if1ry&EH-v&@m#>th40#G4&k0h!zT%k&eg+Ve+jI!y?DOWLq3@e8!JF z$}f)x`B~>^;BT5`*nCTqGEOoHx?<^Pa*(7~mfZ|zDst@j=Ye>rTCdFN_wE5v!?f{? z|4L0!xsUr~XRORbn_gAq%?VwPZ>hFo${;p#Ip|lk6<%ym`z;Pc`>od_gL0Xf!5m{^ zzW^6(^)<#hGaB$_ob9A^UhF+oq;*>@EQKG)wLK{K>I+_b@hg}4g`0DlEF3M}VV(w< z;tdkatlfp3&Li2Mjiz5=(Q75~_yIY#r8|Q5+O45}=F7)B<4ZS~V}~hsGgXlBvdZe= zlGiAVrg%rFw|Omng4x1Eqgl_8Z5ERuZRuJm9%--`X(d4?ufS9EgWHGJAKWf(Q&7Uj zjw>J+YZ9ryi8i8+TXKaB>_F4j*_amWyE=z*<|e6tX<${_v8U9UOG1NmQ4TRJ0!zVa z>=58Kn6FZj{+}l46{KYXeQZ^uhmXdCz03llByBVwd;oo+qUmdT^{6iiq6Ht30D?j=9lx^JUVj;Lo;5j(I%+i*Tu%V`^En8qEsuWA0NNRtn>5gB`XzVZu}-PkVLIfgR7!8a!;89D z0R`!+U^q7OVkT7*cz#viCeca)Q|fsi-!WNup^*+g0y)gr@KJg)8 z^Jnv18uu*Wc*8u3z3$TOM%H5&5TcShbZgJ#Ewg%Ji(>&2KG%RbuH^wmH_9i<~3GXxM`+&bQ~pa20_}2?PfC%9I)cT%-o7* zowoBp+`Q;kHK6k;0;x*e-Jyo%A4q?b|vPyhsF(|Q7=2w)FQxYo* z65hDhU_8Z;EVGh7BMC89P;eQ)x5;mEe0EWr8i)rVly0;PSPMon^|B{_pwM|A4=ows zh^$wvtn0~A!>`9jw3C&XPfVm}mPdU%kX*x2ID~T1tP9C9vNyvP*xL<-y1I{@vrCy8;O2JnWLl)`xmIy@PuX|Khd6ty8U8xa(z906 zl=(VzEayhJmpgtVDCSfT|CxFeZH`Oi*>EhCpNQk=`SPS}g&c7{ZE?Iwjx^ok*i1?$ zM2llbB4t@<-&HSTu|>XYv8Sa|qATCXYQqx|FzxPSCT9;>UnVp8un@mX^C+Z(2q%{j z$(&v#GP4N}6BBtDt@z%ke5;#Zy{EeA1kik=RrPZ#&70U$NI3|#RYo&EfT1EUD&!## zdV`C(+R%+C^W5vQsuZTFIC z+qpP4c#abDu$9gp@F6vC)iadzSB`WhJC>gUpDdoRqh-!A(qrR(Fe|h)0KdaRsSWxFEIz_t~UN zhqnRMF~A%kPn?V>%NO^#jaN2?Mo{R;K(LS%=dXu?yILHdk`!q9_lz)@$G;%s#2ZH6m`K-aWl&R~SuA+enN+^@CgV z;aN-gRF7oU@G~Y27rOACH9XEvRG2-7`w&2Z92B31d#ASg@*8*s6wD6hG<)cLnHb{s z=u-Bl1@%RaPbi}Oq`nDz0~nh8XYH4q*lj+S>k((9nosIMk+CDOLag4YSf}0C@cKw~ z*SA>@A&SgUw%@z5ol4A)ZvYEC#lvCSHjPvT1mKNoqG>Zbfd0SUc!OO^+=txB(SJV@5-M`!)lqTTSC$-6UY)@ zVCPdoYvou^h^f!@LZ9*MU%f?Nx63lkE3pOg=usa{VuTaVewmljk|&=1Taw6{S>dB; zK_@qwo2y~xVqM=3vM>h`oN)#L_5=n237|`(<7S#|Q>{v1%HP?6CJ2MCw3SGpXmxym z#JBPVi$ceJH>qeg!oQ@uv)<-IP@v!Uosld!EJET5h$Qg-o0cR1Bji8fRZ8Y{>&6gxa7}ULoO^bMaxtg3 zht4vtbJrYkJ02SnbVnb0@5tRBiQ{&5%@LdD7ik2~fztA!G(-%-ul!yl&j@^VhSU6u zl{RO;CkueI!O5JuT?CSJm>P8b%B4@eci}Ha@eOw<&tsUm{@5ocHH^tvHO@Yl&iy0t z>>=A3p?duiNxu$CNJqY zz9&%7gL3^~JCBdaBhFcj;YyoFjR!c3=8_v9@jnV@C`jd5Y;4d0;OM4@ow+74O(l_T ztu8XhuBR&P@p`eL_!pMc^82Rx?ZS2R%;CSqyee8UjGx#*u@YsVd&RSJk>D(Axc6pv zCiJE4=`6Cui zC5(vmpcvgQpbx1GM>JvhU>Ry*?0ge=?AM@KsKEL&hyyO&Eqx0#V~_Rc0}u&-Af z#2W1wz;rg@^_Y6>hlV*i&R%$ZZeHlSolWV%TfLF%T;8IA!HND#)Pk)v(C3s5wEB!G zUwgOh^>N^e1w)$H+muE9&f;~!KJi9x@z&XUV}o%gbH!Y0^0 zRP3}We!M5-jb5Yt>ur01LtQN8BvPvIU=ALMRkB2Me!~ENSb^YB(R6h2gHQwm*$TU1kAF}Ie9E_ zKeyaGE7S^#;KKp=cDz^RF~1;={tGr+&fy=z3hbK5hqJ)a{(??(GAnnfQA}tJUtL_Q zZD-&E#x1xWj`(l`N*-f;PHFM}1!r>r=_avga2>wu;Nc6x|G+@=j6u9vN}h6k*csOq3xilkf8|IR8Q6okRwybHZXs8( zMf2qdnz^x-Q)oA*(D|G~7osQ&*`#9}GcWQnzIS1DoG?jkviu+Rung=5Sune!ec>Qt ziRaS-##9)cqirB|SS}FB9+eT+T)K-};5CA97hwI_bWH%crTj7TxM5eCs8w070 z2*T{Cq?j89zF8KFc=lDeQ{Z}_iRON%+#XZYyc?KgeH0!jDpwcb`PkW({HpB$fseg7 zhTvD7oLaR-7E>s)m;#Qh9tM&*m{S%Rk8$0sX*o-D`yVmC66N2oDrU zR5t)i@j%G{5`j*D`ac5EHdElI%5MYFHZy^p?hpw$dN_t@=~g8V$>_r9T*BkJ^dpf0 z=6LoFdhxVrb280BNgDbz)R)lHB&T`4WJIP->r!?0kV_7mz0}9m(?XYzGJ+AEcI1#*w(K^hj+d7t2VYV$EmQOxw@q8%1 zEuK%*W2Bt^Cz~;*7n)}z>NW>R!nj6-0PqBiYnU+;oXSg-H6ueOgh(H=hO;HI3YqRA zxw1ObQ$B;4iHAX+Jm7ZBQKB_Njn~bOa>x?4W(7UIOZb{FFv~?PF+W+iWbdjX=eif) z?*tHzPIid&-bXCLt}wScSRi4z!hV$ynWP!3#T^m=u#XwLkf7M(`NC5DSvXJ0LjZH{5{b5g zVQNj05?*h{wap&7-tf9hla@8yYBIcey5W?bBIVLY5q~1W)WXd=r&8~C(YEPj;}kQaKEWG>dfd8E1h#9&UORsL z?%=?hv1Qlr+qY({bT4>r>@@Q$cDLc`eA_~&xtC;I2bJB0@i)m7IZS7@He1!hv^?{J zZI(Vp$*+=$u^KQc*+TN3wDM4Zb>%&u_{MNta)#ZuOy+g>|J0m%Pefo#e!YDwyRK(P zr?wbM_D`un%2j~5&)3MX$OJyKGu=h-S!gQUa7ZC~8Xb{vR>P==t%X`vnAqnEMbjH} zoO?G)*GhBUof*!MgJwgHHL@R3SNX`wk|(|tdD3S7DnXclP$*w`*p8tmZ)lYPlT$?q zubRGgkIH-^{adknlD=`Dqpl);nO|Ju_L{d3mmtk|I?IeJ{KkyJvQb}lUTv@P8jEzT zPCR!M2%-ln?AXL3jyV}^#y+ET0|r)vMp-F&r5l{`5Pp`2@PZA*;;LP4CHtiXhw{3g z-+0rX(X2#;c)NJ?h~@mkg*8_xT4$_EGy>Ad$v`x#7b$a9f!eV5w-6$J82bp!!mc*q z6Q^g?Q>W3d$)B;&y!%{{@cPSJ@|DTK>&BoazcIn%&v-+y25%s~h9EqiUA2MH{XU<> zTZ)Udhb~~;JH!hRZ}5Nc<2UYe`$v7!@g!-yEXp$4#-(~2Ay1y9yK@$SD0dLQ2Om^( z83*Se2DIqK=Ipbj42d~|rL?Pvf1ylC_9NLZ*jr@u_SqYYPb0K{iyp5RtYYWf79P`N z3%U$@6W@l;mij~I$4-wQZ;un5GQ%q|kfzcFrG*og=z%37 z;@yRt(Hzp2OedjYn42;`o;@dZRy)uXuN%&4dz=3xH%7D=HEX0?Z5#I(hIwr^@+x(N zyHO7EGYZ%jh8u<8`*i#ST~GJPL|VQVwle_v%`+I$kuzn7X~M{@NvYsC4P456bErIX zOrn|-X_vC@a7i2brMPm~7qKv&y>%^Qm-Q04#v!u^64t}(!&e{iQ&sj)vc-N^be zCCu;go`xo`Ud*@Ai8TrmBWo`2bb=XkD8<`zFl9Bbuu5tz2 zCDOs^(h{QenRG?mK*s}ySi7Ej8L;pW&H9wOOgjV@aimjE_)t7Jf&-QFvVp$UY~ls& z_iu4T$dG0^5$%AfTI^mGEhwm4meXCac9OVL)eu80L{?FHB8#LR=6&@Lz&~rg+*2YsdC6^?&IhR;c zEPJY(*m=f$@Uy!Ssa>VGk$rP%JU@_mGY%Oo{)|RFgS+px&mJY^z43a4zf(2fWox@_ zZ+yLvU~W${wY1VVjYho3)(Vzgve?3F)g})vslufs?yq8rPWShDiqD{7wS_q z->yZ+t0(({KF0WddIokb`#ZW$-$qO#j$es!T&6U`(X|fbNs=l}bJ$QC4|1yXQ@& zQH&o_c;O(YR`;~LFS<&reL@mOe}D7_r#W7%cOtzymXPPKye)^p%QV*!5ggNn3_-$X z8V_COFM2RftNpQjC@l!Lqx2xDLI5%oZ9BLl-e4RSU8vEbPW~BG7F1;~C1x=wuPjI0Ci4Q62IdI)uBno+vae1n5%u9gXoK?4}NQ&-)Z zoM(=pTLC1$%DFjoqmTJ#IsS+a%lE1ibnMSmbnDG#$r*GwKlRyX5J~sX<|aPt`oP8q z%M#2%ZOZY_q=ypw*bL+*`e4mfu_=-pjFS<6kiG*(ZEih7P-g65p$6y9C;NN9|@Tzd^lny34rVX8;(C7!Ocl&6lMjCSNBr|%qd{}neZlb=zX^4&!EV~ z*IveW7AIf?7h2AAW}i@DGfzXs554O|Wenz+f98ZPTwQ%!t6PZ}>ow-;;m^`zXPZAs z{=a4a+@(BA-wlb3h&w@4GLR}@@-n( z%OuVH%CN55jm-2|2G6_Y*{F7#Z>z*Lh0Z%PL^iXC!v%x_PG&Zk*@Gob z2n#Cc7|0(bbe0kE33c@$f&!YO`X%Ae>WF@j=>8&7DR{bd|4u3K;2~mVjFS>|a==C? zi9;fzQR53`#wO)~TWYK`zv2oo%~kdS_FhTPpoQXiUbdvz*Q$l(%)QjOV_6`2=FpV~ z;#In~a6>6Hdpkj}7Hh8YIg(RPG{Ysu>$D%Ly9rS7F=FZ_vNB^bnwzOh5?yi2M)VyH zGz_1cJ?Bh81Ta+#-^R+tMwq`sVzi;jD$^xE;umC0`-0SdZ9v)XK;_JS=3EX^*^J=5 zfJ`xgQ;uo<%AJbhq1e==b%%gRFvWW|0Go-T^vsgd+-XZ86@KBTzU4AU zIW!XdlJX!MFOe)Z;=XpFBqlQ`SGYE-nNFo#U~s!`OvjS#%#AOj&N9%c#G6spD4Zs= zHKO<&l9&ubWHOve@hOTXQGA|Iusx~xL{lTW&#vj!qhJpOl4w)iew5?^QSlf=DjV;N zOv)`SzN1xpCSXkz zWlPLrhzRLUe;x)W&uhdM*Sv78{LPSB+0mLuWUas~coqj0d8SLfyz(p&q)*_Rcjvak zGz(>x7>*!2W{YX&)f6!o7o(hEjAaidD}?dta#DnYlcAj{H-b?8)FoQ&5@w3|2u+q6 zPYbUvHU9pLlnht9Y{8r5l!LX1oYU38lXF zLt)3Fev&PCbpoyzWX_AZ4fCa*)?YwhywZG6#xId!Ot}=Ji&NEE0=BbLjd`-$qYpat|~L`i!Yd;PBII{3$0e36oZ| zVAX;kk37nRZSB&Dk&{4llSnPmGu?laW2vdESzG`892GU`RMe!tE+y~3o1_#RCLese zSxRX&P0OSSqpvqomJ4-P#2fq(pA)H}qPXlC?V%f#Jc6XlY+AAvjMX#?W%rm(X2DRk zpG?ShDIH!yq<8C;H`x*8CD144HlIs$8?-Do^FuDP#h56Nu|cbCkdd#v&921U4ZKK+ zxt1*d8{T*JTFg-#;<{F+1m4Pf?RB3b6+fYh56cs&_&xI}p8S=Q?Ro~b?K?!{mZ&wP zij7vn^F9P$yE#wg5D7V>9V&5|4=?5(pm(fvfIA41YD zNjv9YwY@RprHlmYupt{e)P`PGTaWB{bdizEmGC$N3vu@Hr`Y2q&&UKx?B)n>s*J@I zl0FtBt${%%k%TFLAjzqa^m{qqNbUtJB#CsQM{l!tW++7U22sByD^NL?O;RVAn$0)< z=x8F?>&8B_TAnJeu`tyUEKXHDNmZS~)C{{>rgFM4b={B@rnms+HoIV}cQ6yxSPLAA#sP1qJy-CSfksKvg18KOQ1jN>bH5Gd(C1G}qR)Hlzr$ZLzV!}v5`LrA!lF6G z1^&h>k@p|D^&q0)Q}1h{z4ER^bFtXF>OB4y&GdxwN{b4D!%K@YLN4qNw2-FP%&!n# zVX%K`QD(4DXK%Mw*U2ce^Q+Qov17Lt-RIP5KOmtae6k@AJTDva6J*dH8}XzJD&onX zqy|g=*Xn+4RqWPkR{YRrCccFMN6SQdL>!XpKsTZ zQtoex5+DlJJ1H;rJ23|&&LSZHTyAx45yDe4rx?u0-ItUsx;wW#1Ufxh?H^QM3qs!q z;~BFnYv#Mp1>tjV=5RMZP&rExY?=8cOH;sTFUxo<=+RzIgi|p#!JhEoXx03lv|TB{x--=(o;<3hGq=4KE{urM}m-#LJ0~7m>`H%Yke@=a`<- zh+i)n*DvTp!{5)J(a!E@<#W(vWJvk%C%5HiiUN?am?NY1J z*B_DiCjMOMs+cgO&fdhABAWF&3ZRo?mfXl~(CA0=zLcSa!zD08)(FXJ@xpNA72_0j zq^nZkQQ1fIhqZhtEJ3L zA_d?bM} z_5fQuw`${l+Gz23$Ryqj>Eu+`tA!5;9>NwDZNYwjO*3zn+jo&@3IkEUyVUUJ7*p&< zpLq6_&x%b7t@PwvQ?A0 z9qT#Kn-$a}IDY)hfjCp&;swLh?QRwW;M~wzx)e#dJgs!e6nFzuOx$_S@0vP2Hf+hY z$Q$t>F_uf=^k9DpllpN4-?+arz^Ae$yCM)4$PCe`T7V2q5x3@7Q6F<9L#p>2Y=}<29e=i{3dg_j`08 zFsc>X4Bb*ZXEaKCQUz6qZk2DaiNb7Ev6hp+6aBV)5$%^Q@ci30dX_Cr@QHwd-P3k; z%IB4ZjzKHx6Po3f17Pn;ksLz9(W4Vr3Q|Zak_b78J&V7{n-}WUk|8859~G;QLYH9= zM)B5QW=(@SHO9@(?GN9dZwn1E7qJRL`SOWci>$AeE*90Kiix0PZ*_+2H4fnfZC;1o z!Pt(P!fAT=JW^c^fkE+zXX>_aqq%zo&YDkT%3`0jl09qvM~OXaZSHQGZ9T z*dz11TK-g&@<%t%c~58?{-UIn4d#X1bB~-KovX{ukRaqLTkv#q4Sdtm_8IB)+PEsb zPn3PnB(hQV{hFjvU2Xn^-^7G4Bc$VyMR*-6(@VLF0R`na7Os)U)vC=j(1=DSe6#2} zXYeY$k|ivu0%ZAH_4I*wYHfkyG5k`>K+DiC!2%O?#d?#dmlmSQ7a|Z~sK#clD+jiF zJ#3fV9x3>eocQMZjJ3!gQA-1B?qdC1*7c1g1c(42f`$&~71EYlAqv%n9b-Uypcp|{ zdsr-gWUq_3K&|Fmpi0&Q9vL;wHHUTgC8hV3m>Bt3kJaWbR}iaDHkeQIf!v}p>X7vr zP|^dSlmSzFdTy3c?mr(YyV^ zhtorSscnD2u)AW;Dz79n8WUfxk}pkr9~oubKg`eGL2)(|M3u^|ov55|XNzrdYl|2< ze#+j|tc1lTEE1-XR2B)3D!P>|>zhsJCh`)Y_G+4i$a}EY`_Y6hleq)W3~*~s4|TrV z+OMiJ91+cZmws6AY0(ARe~Yq;2>R#?tNWRk0;lK@^^8L*k0G0>6p2H4QEH6IVfk2i zvtGG25FNc(ue`!cBVV?kddB_)daCoy?Ym@o6$zS)qMjj6yZINiQ%v6+F>9LXjc?|5 zGP}s6GpTRUidcsCC6*ZC(!|$#i!ah1y<7SYrOfg!ZBA{P)O91=Hu7Uu`PA70 zwA?vr94;EUu$+_FN;Z1sz`>zkcmY0O!J#Uv#LChuZ_Tlr`Rajt#xas0k{epHE8!OA zFIXm7_Rfi~Wx5V3zIef3IS5tUh7p5;Q6p#PC`6=44y>+c23G|Chqcfj z89>Fun!poLYzRvc-_5#<6)k749M1R%Glcz(6{i(*z)5acf!x^A?fl0r>o7N5`Y!Dx zq^jD^OBiDj+M~1R?D3+ah2wojrOR9VHQuAfes0e679R`s;ylRva+=&Q;^5xO&>6x8 zDnysR1risVhX;ErQi*YcEz`hn*jaUhx!fKO>iD3i$}f?TZOR`^D%}-H*VyDsB-7Ld3wwO+0= zxez5@x;6kF<1fC}Id_uGT!d__qs`Nm;s}uszp0`L&Uqik2voO2bM>~07nM8b5+E~G z+++(Rk9m0B7fq%(4xbs~x(MhVp&uRC@$Z!VRo8!A@c^v6Z+=$OTex>lDNN7hZ)uQK zy2HUtR-c|w@kV)ODLnRP;mO&@e}~aMh6=;)&{R&;p`qj%dc-(dvHIrpP-0?84<6NqmHhY`0AV5oLSrDj2 z-P)5FXI4-ub|zbji>3GmDw4`t6n z1Kggo32oCJzcMNE8@2IeH@Ixe{AgiSAZj`Nu9H1FL9s0Dyb*%0KXI^Hiiyo*Cf;&d zT?0j8ca<6cIM{ka7WNkG|E)lx@$4y23zrm2jnKk)_O{<5pC7t0bYW?9d~Z=jM6;KD z1v0XGD`X4?OX_R+5h`;uK< zK^xM=2eQ43W`D?!X$Y7^$^bs{50go!b~SAalmEJO9L}<2ABQ5T@8XJynx4L3;^);`YKwqFJX) z#kAn)MS}H)ChXD#GKkHbETF2xOzO_$5HX(pvqbe*{61Md3_$$JtTwEvZW%KX61P&GN_8Y$cvGx5c z$&wZ%J=;5>))_s^$`?J$%fS#D9wcs-Av6l+h4KaL(gUPZRN8_x0cQV`Dm+2I@#!m; zenSGXN?cYeChNR6s5R=?eE_s~({CtvkdY{wlpU0E%zHyA#-fMi@(3;526_m3bAVCy z{PIBD_{{qnK2WxXPYKgcAj~73t(v2+`Qr8>tF#cc~Z5Q5TWj>WH?{TJ_xXhp*9NGi6%N|&=FDNl|e2I7GVkrMn#wy zsDxoOT59n|M~f|5wNq;q(b|ciMC&D5>KCsam6kK2HY(MqXnxPL_u4b(oCL7H@B6-g zyr0e|dG=a+@3r>YYhTVj=j?ONRfm9Co}UK>@7}@3d^*#`dvt9ZUU%HHSZQ=-_;Tu4 z*HIt0cx$2`=kfJCtzYt;NOK=o$!CGAY5mHBr$=TC{Z{v}9q==}5It?!;jGH?DF@zzVfN3vvo zAl`aV+1GvJ4#ZM<{wQQA0`pE0n8V|(fBaxa-?)cA9O{|BPrTLp&BuM?O25HZa)&7g zVr6o(eijf@1QsC!Yy;DS1E~6EC>THB9IUiCiBzl8I}S^9d;PI9E-V5(eeqc@ysUca z@!0OlQyjhHg5Tx|>oX{oshG1?y!_>xcw3iJTe+vW?=U}Rt-iyKQ#eiG422OKqWTWI zP+`5o*($z8;ngZ;lA=#lSfMaq;Rh=9MTIGam#g^IiXN_LpQ2yCo286aF?%a)PiJ+F5i86rhE#qK+#?#}BoRH4Ys-c3~|MFUsr133f5IOY>{Y@$z?fpg_E+ z@dG5?fO9q+T?55@*6;)z&XI2~^zMH3e~!LgMMdfqnB43x!??p!s&hm zI;;l|;m1CSWpJr&HWo9*HIEG`#4%7JFm5|r*U#yrL@2Dm*d0$V8IQs|H1ykL+fLM z@QrA>pMuIfkIj5hoSZieL3FU~{XD$*S1&Q}K5SWKG{V39!VZr1Pv4FsJm+kyWKSH} z_~9qKYqX@VycP-kBwp!?PR!gqa&TQ~_XEy6TkvT;2#0vlGYWIZjQdno~3k-vfpsD%n;H@mh-WBm~Dv^=#TKUi86< zIZqi{P_vN!4jz1f?}k7UM{|6wOI4WwCw1Qn4M@jJ$@-dx_LcX5M=se-{Lu~!PP2B+ zo{zMhh%Kc=FEnTcv&SVtK zM;)?f6jn0}rUTS&KJ`8J`zu3S(&5t%IO^jx;K9Gj6|p4=9vyJDwrq3T)^F4`v5%wg zB;2F4>3}_V}t%ui!h|E9tD}5S|ACg|{x0E;)e4VNMKTBTZlV zQ;23=oAJ*kMNwc~`wWjl(a_d24;l;`(pFSmI2N~X1ek=^< z?NVxptpDvCI~bTLlN@SSoFZ*h`Z!@u_n<)_2@%BO%r*WqiY@D5%q_0uY; zbny?UZe!+cNV3w^ZCl@>6HrmqL79JU4cg$;8ufA)Du-Er(Vc3sR)26`d45j)LrQ(C zsK>1%&$eGeaV31PZSclZ%Mjs#Gin~I08z8}i>3+6VyzV8_gJm8vakGsTa3-6i!Y#y zX?K-MZn`$Z_jl4_f7{lr!S+*&X?**s6__)}sA^(-Ao~~40au>PlcG5LD=&mSpQPv= zl|Ae;_=p4oUI^n?T5Y0FYSF=Kk7FHD)`i%6`Mfnr;h749MEtVHr2szhh$|$cpdGg+ zaUUht%~=>Xbe_`#)Q1_!QyV{RFy0pg^)ezkQKf#aD$8kA9jjz6eGP+@bL!ccM6&Ax z-j>5_Rq)RA`Petpo#{peGK>D8jX^G$pdNVcM!9%hCUW5-cm`_`PbQDK@G74pEUp8J zPVYvri2472Ttu8yl!;}+b&{R}TzqtSTiOa}#z`nznY zzK)#`QNv<^%Fny;DSCA+5RFK`WlzxLC4G6#csb|UV{uu&^+fgZh|*E1$M!(0%sAon z3cjm=X6)>E=$jDZLvo*}7xmyC1TILui0yMVv3-M`?-htE78vLLgL6~Kr3m`US7PGWw$G;>h@ZfC%srp*<(>!p z5L|PHH=P3JeQm01e~tl#QHT{Zu0Ub`W_upS{}1ux8E(}xv${F=g2g%0`Wz*r(7mDD z+M){0Am}SUl!fkrS3bQ8mOtlMQNsjlT|JL|Uex;~vY>XQi?L+H7lqCtU%L1vptS;^ zYABU!(dD$NavsO*DV2*bK3loVz%^%LMGOnpw-;TA5U*${8}$mVAe{{c%X{1*w(#{~ zkM%ti0qy&u7Kb1St~-}~@u#A>?q*+?=Uj{4c&|WxWeevr8=L0S zj9Jif%okYNl+Wp4_0^TjB5Wz#hI@eJqmZHwua_|q1Mr`SJf4y5MZbV9m3FG6;k}|o z%Mdv#rP^%{NIWt+=I>a+j@if;1SNr*H=!$vyHe@fmewgy1phpzAx5!=wA-qSkQ^D z=#9tYzMp@L`=>bW;Nh;%KgJHpSSKGp>8WWe_TXvYAfB}0$4u@}Hz{R1!eoqYb?pah zavtC->Zg|6wiHK)`1j98falND>imnEw#OF3+>-g@>tZ+cfWo1WFFQFAN2l>TI$eOH zQ)fk8Y|?SqpY-euJS;Zvz*x<&T0B-cG>F;M!b=#}1F<(8Zb%tv#$} z;~*^0JsB*n(0@OD2YsdHiG9E{AND!cZXlv~e;}ecR-21zV$Z*D1g0W1#Y@&ib;0+I zronuOYA{}XiolQUA}<`lE8~Zw9elxf;izE2OTjTO%`e5PhUO1N^Up;e=$t=XW!yFx zwSuz0V{3aImOpC$OJDhFT)lyHMOqnZA7bqr)O{KBWBsHR_d?=4>hq$x9L-)1Jai4z z;)Tx;lrC-qTElVmLk8gIX*EPVs0{YMU_r5Me>J=Oi}5J!lXx&sy*>DCL^S`it>`U; zea+>yv7*hOIEPe^dIc{Bz5(^N{)qD>nL&O{1@V&KFnjqs824%hSpk+m*sIo2W_^L~ z!}rmk5Bbi`-ZjJ9nRJKO`(CZvap^L4Y9THoezD|J+&{%~uOJmWZ`g!_9z6bmjcv8D z*=2Yk{DcB4*k0Ea8-(jbwXsw8uWhgMZ>&oOS#Ept+WPiuQ+O{(TG_F&&SJQ+j@=Tk zFsbWalh=%QP9i&`tR!m1S!RO&vGYAwq%h0i>~XIcPTW%&o@0%V9XKJjwf?Ag$9L{1 zsEchmJywV>Jz76(>&a`c>uxGLnS8wD4A{$8;J4Rp-d2q3framY!UT21zoAJjnQ+bJ z+$8tzKZ4P!`yn2{;R-!gRDfLE61C(mn8^91oSAXq1Ltolx0;S-;mL+B3@n}z<}Ih7 ziaximeBLsgtm6$-v7*lH4h4Af{Ir~cqq4PLhyEe8UYM=*0nieR)ma(tam8(u@z>+# z*7|0Bt7Iu$0{s_IfBPkA8o4KpR5u{d{@9eQD%DrI1Xn#gCoC*$I{Ji#`{Q*PQ%}L$ z!z$DQaNpPj{|WUK#dtCLn2Rg?%|p+KE%*m2+}C_LHrB&mID_d^R;Xc$n%&xNYPR)L zl#q4JS%rP&SC}&KI8L@^X>?Gj*`jRCPJtHdB%HkjW7k^s@wxx=xQkh&$XYMlrHla7Ad;U?blj;<)7m)XI+g_Fpt{yg;wjwx33ABJ)yY$e#v@V zZ@iIL{NBgN?mMhd(Pt_QDV(TqlEU*8HYmIh$dP0J<*s+Do746rMcdL3n+PM_8FJ}5 zCCB6IQ8>9Oo_ok=or9aoW^^peJF##Cu3h80dtVpgnm@vew8nR43Yrg^fEFm#>BDq7 zo~pn)e!o=P=DzhE$dr#=_jr577-+bNx5R^EYDW||*Mr0tVQNQsoAGsP>|h01(L55f zy>EQqv&}DzZ%ZE}g*4r6f_}?p$*po7C|mxhMp8O?m8A;Tl54@KGxB-Sns4qcBA z@Xcs1Uf&TL3fYqjnLenUa)Cmm52BpWYg0)vWIZ__vb9SC`ML5R!F$V9(U2ZyN|gc? z`^f3o4B%NBEEd%|_j)N`idO6+Cl@O1c$Gm6Pi+;i$A*ce#Ym#P@QUxs@8IJKLq9#a zbk3xFd=O&M5Ri)|<@KyT6Fcefl7aftQ=i3Mn$qj|h3U3&!4wv9rPsa78ML+6Yx>?}otF>gwKPzRmJbmMalUMvP>2g;8HP0+_?j*h+I71gnC!$Jn*C7WO1-rsdV+LRo8|qiv$UxK&coi` zo>DK;3P;qGju?WS(Dg^9^s!@b(@#o|$lJEJZl0XJBkAOC?505A4<5LX# zPRifA`qRGBM_w8Kc%J;~OMmw|zha3`cJjc*wLiW`1JCi06;kcULsotJ1s~V8U+@JE zH5YtcyYzy-^@#Zd%$JP$X1&W_NcU#XnX2Yx&F9hCaFL&xVv*=Jf2GW4$Rm{B?_ zrH&cH(5`uG=~^5p+QyAqrw$5lVY;jzQ#2CX{JlXO1A+&Ir7x+m^9>FP_+Vf~Do>A; z0S^K5r$cZ%_NkO!{~{`_D`5H$W^R1`bWsf^uSGSOycX4j0j+Fuh6xE%5~dN#WbT~B z=RGpO4Bn9)9h^=Vn|T|Q%s!xG&H*Je&7xX0B_rV6fBBFmHa!yO8@e?Ip7+jB4QS*sZRO#=hLc?lC%YOyffHK|C$<_+Y&B+LbIoW^ z4KSl^&emCVMm3e85jr%N&;e&Hwu}zcbX7YAP?$d&B$aUa@n7IJ>b9!IOfqy3k`Mh;6-ghMX_^J+E%R;)X{3UB3{tVyi8pp*i6n2RZH#wT{uAozJ zeevfB#975jlzfV>y)GC#rLw)QK6c9fomN3x8fURPv%pruok}k%c_b={%PUgJZHrAM z@6E7WRTS%a;)9>|ZLA9p_Y96cMzHS1D+*BR#<~!U@i^)O8 zmo<4OnzjuMs#^d`KQ5}N_kQlqPwKT-4nL(~`Ts+Hs?PdpJtk(|8L&#U`)SVyTz+c( z{buyZ!PEX}lv9Lw{iv?)SF$l|I^4`eXHLAy z3YBVTL#2*J$=1jl-IeMmA72)rjVt2HULWZe8$AoOt9~j^t;C~_0m@8U3VG9Cl&?=d z7BR6nCmQYR&g)>-8f3PvZ*-fbe9dy8PwAhuNSAB#moMutA9j>XXt8?m-Vg1soQ06* z=b$p1N>qKkq&NE%-9tN{*R*QW#BvqyXMEW$uJYDZ4C3@NA!22Ih6`A#S+d5g^OZ^B z^V!5~?ZfP8wIlguEDn}b>7$+5@zUsM%&;DS0UCKI47Sz(;U~8DWsAch5l7Dnerr{; zsc|s2Oxe8v`Es>~|6J2nANoffV6DLRG^xvVF9 zIHm7uQW;3Tk@5k`FI4hDhm)N>-Yw7ZQbqYQDSwiZ_kU~owGdLjY&RT8f7cS|=Ns~@ zxBtcZ?NYz=|A*H(+drV&e_p@#M?UEbXmfw&&7nEpvklC6+EVmy&PP62o7oK7);`sY ztEC={9nxKU+tPJEgYJe*ID=!2@U5La%sK&4MwJTP^>1PO-hldFhwRqdAN{B8+W`4S z%0Ec?RlApuFN<Szrk7h;3DYL4PyBbz#&toIjs~x? zuDH?IlvTXWs}ucJD4U2@QKR}rXc1b?01rBNZ)TN)_h-J@2Ns!GTSjCfJpNf69zO=< zSVv=>va22o`y1%0hp8#PjD}~{*tMeLa!s-l2~d0|6=Ua)$NK9BRNUYgY5^W`oxfbQ z12{^sa20N@B#8G9x&^Xl6PJ!z!>X*k@1qZNH+|(d0G2o zLJ-afvtM%unf1&zIwRsJTRS6Le^-BsqjU5OGW`iw^wNsA86zvMS4i6{j#j8bk43@P zv*3eRa0w2f&Q*`A%FyPc=4sU~H=-#xHt(mZbEqm`)nQk2H~rVYKHj$JJf6oBRPw_4 zC}w~qP__dtW_f#K>{?IZJ!T!b^_@@kGj%*!%RFlRGyDDjbiU#7sD<)lY0gL`A8@pb z(?O>F3ma9JQ`1HTXx5cKq&GrSX3LLMdJE}Z&^lG62TZ!;S2{LHo~-|$?C*b$)aA6b z3L32IFcr7g?aEfsugWVMKp*GovD6o&zT=d>&@Sq(^f`N&@_#P}d6+srvi?*5^LV_L z^4C-TK_$P-gTQF;!oA36qv^j>LyYN{^`X;~k7aFW$j8 zfnUO^;-L!Z2^C=561Gj$s^M4XkNrHUV&km1wdy&HiGEeh&H~~int-aE&GIh>ZJo9i z&9p=*q8jzXD3tR<)qgiUZu5`R*F&hMgL?Y@$>w%8m(ADWAGJPja_h79JXHCoIP0I^ z$DsEx4)yc#*;CZt179isEa9@=xG3%eQzMVzsg&5ydr<|oV0z(&Z{rY_35Nt&;2A9N zcvfVI(&BaqD`ER_{GUC;-)0Rvx=X)F%Nx!)aw5yNG`T5JsP~0jOxA7l4^;=d} z<~NRyl@xrCf;T8ZXXoxWKdJs*Nc)T(qKo6bp1QDE(PN#gbo~eOcXf7sQ_b_SpcVF@ zu6pv8Gleq|V{;Zr_@x?+lWBU)W{>Vq6KKy9*ygbg-&yro58;5mo6e;5MX0Z`zrJ`E z5}RR*c@ps4$3r@FXti*#!dJC0&uidF5x5|>ZoOLdw5#=>)5D}}ec$7<}b0#zXau(5KcQ3*V))cjX}4 z`U78ylzj1{ zG3%0#FY9J|PQrOA`YNZ`Ow|sxbl*&Re@&}-)Ti@wF}_mM6(}WBq3K4(?>GooC*sRk zdd7dSqEn3Dj&rSSE8?6FpCiFJALe&d_O_n-3sit^Qb zS1U6scr~AHssAij-*pwJs5<@~leI;=p@89(dGPi^ITk&HEt2h!Fk=sE4)VEwrFA$1#XQ=FB#$fL*{k z5LeRrT~l`sKWFo^{{`3vWq(tZP;GCWuFpN$#5te1uRmaZn*_i1zy1)UzSX_i`w@Qf zD{_2_eC0gVCILFF1!=HjE#p6!tKyX%VI9Aj@sl*I+B2wW4`s)KmaeD*=)Lv{DOX^-k+)Xk14W#qB^Mrc;Bm${zL zM}4wUs*B&t=_VVcJn)Q+e#{3k)gG#ECpmWs-`bfT0(B2KqKbzi>bZk4da}8~jQWKm zR}05RnXY31HsOBzN=D^5qSi9%qtEOmZf4XQj;LNnbvdH^!w|IuB`W`C`^&mCng?@+ z&-!9PcHo-c&V#M>lkW`BS^F<+Ob?yNQl)iICGAjF`R$Zt`+H%B>SwBNHe%HP&7;{~ z7n3%cozrT~O&FBFLMH3G_h!T3 ztvbPP?U!A&L2A+SV_MTpYhHVelf3gpJv+*C<@A{6#d z2n>9E5!J7vn(aL{HME`8hmPmDHo1*D?p8Ybk9ReUvH(|8AKY7&N$;;_I^nA`AwAs+ z-muF6}=L)Ex#J` zn-0}+xTbyZdR)`)MWuB5wgzwc{+WcslL8a(j?taI~V8jhnc^L z`OR3BzO)tzF!y_OA6`=6vHtZEn{{VP8LFp$0@Sy9TW@elv1VX<(i|t>r;b%h$G5Skw!b~*dg@J7cLvp+wR@9H z`)tS2mR6ejZUJx78o4jR>Bl%b6{>$2%UZ><-h1KOR6nbKG4(rjs`c0=UTJuU3UB^a z3XQ#^wY^cc)mq%iGPW=HQNMEHmbQH+iyFgY&UaN&1G?Sl--M&9+g`2(Dyd{5mAw1> zw{d{cH*MhlZTvc&Y7c~+R!pfK&^BqW0P8&3ZeKu8!_r2b)P?6GasLX=aGoay+y@Y2 z-xeH(-Sy>M)kI|*sO+JB$~g9n{OSSZId(@=eiP;A?o7Ul^_ez+JpH|$H{%ZC*q{3s z`nzg(<4xAbWz;8Ysg=0VHo4wN$^CQoqZ&%|2cpo~`sN+Z&g=*59&j ztfq2)yTy9sIodu@qZu?W@z> z9mUM6#=Z?#^zR>R-$~R_uEy_xI*k0{{_^Vd1S&>Q#d%b5y+qE*AzAZcw%iO zK8~#l>N|z{ZrQEATIzGnpX?KxcvtVI)G=|VIE~?o>sfjp- znQhq#AJwu?J&YqUXtfY)Y=^%TZrFC=sSFp~fxLxPAlcez8O2K>f>@rR#6bkH=BZ zOPuPjQF?Ydhnf0sS<=7H%enu@+^##8^Wm{dmvcdz%6%)T_!269 zbv-NVbe~pi^ewjOtKNyeAoX2CeLvlmJ|EkAH1#=sr>-XzQbz}MoUC;0c6~16d&`Vm z`!>F9TQ5kv$UJ$^VyJ(B>fd{cRo|8jmLcI7Kop7DjA2h_Mw&#Um1-Fh-bn@8_r z5`euSI4Co1n)0`gbj5Y*Sjih$nfq5bFCZhonvX^^{{Uqxe;oly>tt2-?hbH9-l;gdjHkX&{>-|33w@kWGnH?+`-#3B4Hswl+{{m?@|Q|w$l)GWlN$SKX`kFK=#BQD z>(~pSJ5JrdqV7|^mAULM%AZ?sv;-ZZ?JIY#{}}4$L(JBDPktMBP=A;@%U66SI;YWo{wUe!KD>O1&%^X=<#m;kA)2qqgs|-qt7#M^naUFIR?{g#?s)7|UG% z+PeKu^y5y48S3Dpu3BAQigedzRksA`Co~-)9X;RHho!uruIC@IB3L(wK0fN(mA7et z_038Y0^4U&*J0H4_a|ukE{_4QU-_kscA7Q_=(Ts3*0&G!&7!^=mA=~Dj0Yd(^#JIkFyS6EQGEwb{?A=M-KA!jNwQD=+ z8~%(RYG2J>Z>?r|i$BCzjOqJvRY*fV6~BC2uf8y?#VN1iM>D_B6}CLkLHpJC?*3Tm z6uu%8k@G24-Zxq=@(0LovQTDB>+Z_YN{_|sA36;@_Lp$ukDNz8fxRZV<75L=sde+y z7iEu=a)I_|$Ot7t7WU}F>b^5+Rlt=@lZo`lprx+jFDlK{7UFup^w-UgsqAUS@7Sbh zwf7;+ZR4g7$hx|Egc@_x*-lLmu#Q4wI#&;lSt=_pV{9p8FlI|Q#(qZmZIj&cju{qJ z^dXz7R%9BEGg-bO(}UfF;Hyk}w^!Z6$mUNiw(}L4lp}vIlhXN>d~2tgnlFKf&0DH} zMWH{l9kSv-PS2UukR_jOt5{uSTbesL@w`bTc5=okW*x(pFuTJ7h*1?ezYUJn&Q~4a zyEIH1GN~RyVh?yY^q%>hwh>KMD`SxXcaE;4BdS z?P1Dd-Pb*sx-u4aaznyL`c2TbG8)^`KJA0XzrzR2Vm-#}#s>*1`wg~nLO~A|+-{?9 z)U;CErVGL8IG%^obb$1aHLWggOxAQG&@2u9rpCTp}Uy7MZ-y`?4(@q@%lV%)^ z`~2spu;-us5ufhDyZybO7*e1ha3-YnJ$cHn@`~P zV(pW_=ky6@&U-lIvp#{4>KA9Vs$4pT;Ir>y6Pb0~QR1_@hW~}b(Cpnb zbv4iF%zlYqs;UJU|HN(1E{{BojGxx6@^h?a8Z@o)*MqkASd+i-0yV@sRQ{;a?BSdH^GDNpHG zZOh*P+NMw0Fln{Yr|RG0Ojr6g{3=(TL*6v6o`2Ojd2l}3^aa$ydhici41^tB+cEvp zA1$o!w4-EyhS8^v2k{Aw*46d!G(OW8n-I2!KmP^Z_=p#j7R~O%!*lpKx>u0-PdEAF zOKHLze-V*auAqo3f1HhBeJ3lDB)i6l{8EV+SE6R$`^BhZh5_>6CHL_)7^{->Ipwd;=5tGcZ*J1>?&+q^mUTBfY<-i%ECf{iXdl z=GVt|yidgccZ_7LH*nW0dk~9I;z`EH6_kfG9iTPmJ*%#ugh`*S=^oN2Y1$%vw?1=J zN6=d|t>Twy+E4upH60*5SJS+lm6-vWwYAMR(!N#j?`Ha!Um3BUe30$y+`{T}+=6xY z54P`0%JU29)Ty&(gH3;^QoXy&In0he(P56q8pXX(AzBGw;%dU0q?QFtruN)uD zOarglSA05+lcDx^kT%vbznnu=OI}H@VtS>Km(!=FHFjG98S*NBOkb}lgnsDrQMMQJ zG}HLY&i{CtKf9jxb3Gll)O_wbzU(}I$VzbjH;cd)mUIJSqz7H6N`()?Oc~AkX&Sz) z%1pxTvK$}G)RLD|zG~3gTGby_{Wm?7tsniL*`np8Evhnw4nCOKjyq4(fijf*il@~G zRL8`k?*FxI&QSMq;2NJ*G{JU^G`1=Ia*c1xQ%XmKwyo6XppB%L zgLdRM{ZJo&)bnZ897J~`xApdY9Nz)QbSg+g{pL7m5Xa?Gt>hUle(+URt_1 z4mGV`3|4fjqKA2IUD|XNUgTC4Y`dn>(=-;ZGiOzCu25Iau(Ya<@5IB~9VRpcV`ntt z&0l!0wI8po1{?79m4|+Sk6UjmR4QU;6bCn+Qmo|hUbsDxc^zgJPcVizw||!zbH=Mz z;^Q<0FTAVs;~o5{885ig5?=U?#gY)5QTl2dL%wAW`_%Y0I28HT@zfNLaa_ZYYHQQ! z?C)?*ds_-|u{sc4OvDoiPSLxSXbhX+92#RPdM$0`5B+wam&NJ(P@ES|MJ8)_-9Yn# zdoJM`WEy4aDYNgryE+D&G1-LOn9_fukjhL0k2E!k8Z@o?Pras9zY2iPj>mOeU!nFP zYv$>;z6l*6>MPT7LDJRwS*QT%9WSFC(+2~34MQiR3U^(LDqtys^=hrV+Sx@l#uN3Z z`7lXcS>BsTIQWW8hlBTJq7J?)6LIkVOxVF=KfHqvWC9L8nDIOKP{!xr!>7myhQ5}rh=!ja$D6U^}f(b_5<%n9ts2dzn?BUpS%~8MW9E6=Bp*98tW?k-5|nwT4j_ zjr__!fy%s{8I7Fw@l#1jIoGbxbcB^3rs)pSY23le_Wh9V`}7d{ei1Jdq}liPtD$Rf zP8s9p?l4J48Sv+vPsZTIG#$J=!<4qG8az#tg7MaBm>i6S@PYJ}iox>>Fe_rJ3kBQH zZNMzc_l?$tHr7RkV@g&s&4a8MJ{5$c)OgIboOVN)?IL;(d<`F`vCXEKu|kfeo<1JL zOJ2Jf&l@Zqk4Zp|yv6!h+5pQk!?1QfZsWB8=Sdw$IBUDoJp)QdXtwJNpL)EB`1&PnzpsJH%zD?;i-{o+gIL|u79trt|g zmuXv7-HJ7>?7DxP^9rG<^A>EA6n_M1|E3&YO@0G-6N4dnvRklt?35R4e&qobdcU( z$FCvn1)V)!^jq&WSUuRHsMl}C8ns^XQFhBbHFuK6q|wW$cN|eE?!r3oG&PS%uIOHf z8gr9fu!~U@aeI`-sC$;%ql(KAbqIQ=YCm=ERyY=#P2Z8@6z3_7)f0Hn^bwBL-`&MS z?JjTO!!Mg@{Iq*epgixha^Sxj6Eb26!`d_8~HxjHAJ7FTSz?E#!}B@ zB`z4_u7tCFES9&T4ED=2{SBaP_47bUW|BT1W0f*NP3!YhIS zD{*?_5Tg&W^l}ZhiVNt6ujnjHrq2WloxNJOEPNV)!W+UDjRnH7Nb~>buR>P}bHE-x3qCCcz#V z&$1KXWX8yDljFr&H9x2;WR-}q>63TU;9e!)O`|+(<=IuPguR)6J>nMPOK2#Bm(ZxG z*LogVvUapkXq`*wFb{eT#P({P9_+)nI3U?TXWVR;WK1T?7~F#=-DIO1T^jX^IM8lE zyoCer=?&U7De2?2w2z)V(ncR>qxZDYgKYGdcndeIP_iG{=yz@Ot2X*M8~vn>e#l1u z+D6}Iqi?X$*V*X#Hu`cKeW{I}XroWH(I?vIkv94e8@;!U9%`e%c{o@1Pi*u*ZS?Cl z`b8W4l#PDGM&D?gen zF-{44c`epLyF?2uoVD}vJlSJB-lQCxP;3RV(#AT+^~!Ktn!^{h3-cXPjh1)U`RlX! z>zO}fr5`fh7*Kz0Hh-i0YdXL= zBBbdC#@E{DYE35@f3&96tl-mhf_^On%`#aY`9hth=-%Jw>~Xf1y?v{!Zsg1Mjw)(- z(k7{E1pNV~tY%)JSK4yf^t)bX?o%P+=c+at#-%eQ0Y zVNYLuz&D0(q-EP5RM)4~nCboGYp!d2jDMcjEM$$G(sY#ZwK@3!`O(7L>f`b!`v322 z{!muGh!|ZDgEo1r!Nnh=>5m30VV76!dBZN`mvq1`tpe|yA_FaYN6q__vjBXUGiJFOQpNKNWix~`fqh(wmlTTSTvjV zTwSHxFKgdA4oOT~VJkD*cKtPQ&bjCD)4)|&)<&=OdGP5&Uhl@|P7cKfBe8gI#uc73 zdxP;|ui>MQedXKnn&DvEw!G#HK81-Z^c$)sU&`;cm>oS!voN?LmafwRLpQv9L z^Jlz;E6O{u>(*Vr0Oc1yuKJPEwU(J=EZ?u`F!Q7yQ1Oa>Sko&18qhX-RC*Y6e&nwM41%lQ!RRrJz` zFuZP!JbIvdWc!%vz`GHICadL7mU3ej17qmv2LGlGst{EBx`&G#={)m&B7w!mm_U?&pDT4HNbM( z@pRW@-sOu;z&Ugfwf)ml*c^;aF1Ai%e0xI}_6F58%L}TlQ97>CIue9_7NI@Z0_os0 zwUyO*LA;lk9oxhA6EnnKJr1TIzH9HRkG)hMTM|{V=R#(PH3A>UveNY^W@8|Nczk1+ zO9TY93}SV)@ONM`X9lOoqu1if^Q-~=-VFcMRpCN(Ve=WJT3dX+qMz-jW9u-cV&t-5h2pYZpc%-lMLU8(tmwrfzGQLWeL zs_Y3uRbvPURfP)N!K|_l#1&R7758`uw-f(jtvnUwSl6AQs^&-I`mAQn1i@5UjbLK& z6iQ(W+AD?nzi%A|W$f-8SgP--*ijH_Yw>#)7F$&y`+~T#?YhtLq}_&{6V_GxEleYIQEN*G<(7y7gyy|qh2sx|p1=#w^s&y}L z&+2uT(gSSd=(uyQ!rHdU7mO8+28HY5#~`37xQYC31-7dejbaX^f?mu0v^=wG`7$kE zl9Rt%$?vf<`B=$2^R=z^d)L9f!!Cy$u1m#Ao*g*-bd&!3fzyvP>9-D?KGdYQ4xIk} zcclE81E)V{($ALky>j>7171|%US@#(Ck!mVOvwk%(S4<)4J~HffLDG;y0An*P9%0YeQFAhg~yySm=1fBOK`9w#_ zZ%M23&sfQgprv=LMox^(vtZ{1x!4KS zUmbZd`jw5}mwfhza$auIr4NRWKHP&&w*AO3R*%5NTqx@;^u2r>L2(U(hX z)`>$+BKx>ao9^GRyzX>Z?itB7Pe~qV_T)_ZSE^QLgU@UNO+l!CP1=k0T>(cp6e4{} z5Mv|NUUUM&V0+O-27QP90ryNGFYOsS;15d68%_HK<7ZS`OODyzC%aevqP=#?k!nu% z+18dUKPNvAYGbEhcZXDaKh7JqoMkX&E~uxbYm!v_f`u^)|y$HPRo37w`pMioMIj2Z;b6n*s*B*Uhk;r<+K z5_?Lde(2vrbr=2|qe7MKbP0;T^&V}r2*99gllE86oClxFtnd|%B6 zT|o@#5y6-@7|U0aMql~@sh_-cE)!F?m+za_y0Q&%pQ&92!0I`nhK^z>3G90@N2&EL>=xaVZMZ) zoS@^URqL=&!nY-UtmK<5dOrDq&No-`Jt6t-m+(@_7m)P3C4PnQ-NIK1e~a+TC0r=s zTnQT`JX6A7i`*m$Pn39{gk=(XB(x;oD-tf2@J*>lO87Dfjo;ptdKexnJ^rFNOIIP@ zx}ic`oKWHIJe@)MzAErUwU<26!NTjnNhx=R$gOhO(Ij?1e6-f{ zoP>=Mf1ZRv36Ga>v*bHc_(LQtlW>rP+mF|Jw@7~9QJNomj1KpeFk$owKU(sgD&g@G zUn}8x5?(Ih)e_zy;e8U`EP9_1zFWd~CEPCI(2?4n{Utn3!jmLiA@#XX?739pqY~aM z;oTBGBw@FNZ%X))ggYc0eylF{KnX`m7?5zHgfk>uAmI%X-XY-{37?ekMG0S*aE92O zlzLba|BUcnN#7yy5o4e9mnKQySNNsER|!8_!hnRgN&2rPd_uw(B}_}WUBal?F-zJj zB;_v@zEHxwB|K8XlO(K{aEFAKN@&Kx4zX+a_q4w&B|Kh2lkYjnXZXaSI{!lwzFMLA z4HCXCPcZ2JARq4_f6??+suQMh5OzhfQ^4}+XMAF|Bx#U6G z&czZwA;+(g(jSanZ%Xki#`F&sE=Sw|DOFO)Hi1yEyB6rAlHGhM|Pm=f- zrT!x&eXNA1OPH4Y3na{!dORe2vB>W$1N|3vr$#ov4Gr`vOe@Czk;*Vrxmm%=|MArc>!`1ul= z^4=GFs$BJ0?kcBI{J&1}8~N=bx5_17Ciz~J@NG%IPWangdLjqvcDPzXqkr>pI==Bl z9r|QGSS!c1`z5?u(pS5VcTb7@L`gS#w@ALA=sQWm(GtEW`CparZ3!(2dnEi)LeCg& zpV7Ze$~{u*(n`-Ol00A0>`J0yK?;eEm{mhec4H}cD*Us}?i7D#xrgeG63 z%nO4gJlQo*9OvrCA4or+-eBgJ3LWSPL->l@9E-i7QIhNXyj%{Jtm6&TG#w~ zxA^h(gi#4QBIL70FS3DlL1X&0kZb9`Dd;~us zKnM}Sga{!@=pZBsDMAkcwL~fb^@9WfEr!elv>_x2z8?Sr3iy~q>W2Vt0N^7;_;I{RD2R4!)rxK}Zq0 z34s>@Q9_E~`w$TM7ojK*5`+Yyhv4P=#nJ@tU_gN2A3~@gO9&Cdgd`!gA0WavAw;SG zJ>Lg-egKFPx~lC;FNJbVDdPY4iFgdT#g9?&rX z(9IqAX+jUda~8l$@DaixK!gw_BnUl(>iZio&~& zJ^Uc8CrOC%on&6T8{E@z1HgA9;|Tr^K$?)k=1WiL7l0%oeJh}c;JFRZLGbhHREm%$ zB(YDz6HNdjcN10v62AfX)&Nq3#J!{lJ%qr0q<%~4en4a+*)0I?KL9C0x(Cql2`NIy zr-0CQK!VWyZ-DPJ#u2)E0fDao(H(&F*JS$uJ^c1gs2JcM0tgQSB+CFPeoMep35Xs; z_yJiymDbHSwt9aE=&2#gZ{$Qz10)D3LX=Xp{Axh{VBnU}@ z|3N?x!NZq81PBR255b!Rga|1@H^KXRK!A`WbQ5|AKEAoXgODV26GEMUFd>>IybJJr z2nhWP5dIhtA^1N5bbk%-@}1fVLQ+vjkUA2OQZN#fAJ_anzUib0o;yepLWD3O@dBXZ zdxc0C1xTL?=pjT;0|c%o`%6Ga0+7Co)ZL_h4e+fY`&)qb_l$cK;9my_6S@hW#~DY6 z5;_Ro1kV$I03kw15bkS26&1f;siK1cQiQr&>$UjaRTBlRkv84Cr0} z2rniq10?um72i#SUjUM~0>b>_SMp9$cLUOo0etHK0YVQU{5Zh-1Rz21{0R^uBnjy! z0p33Ye1r(0n-EO_ItU3xbuwWC!1FX9Oh|47L|+1=2|e8a&&z-$!T$>5{t5_e0{GV9 z3Vix$K=c`aeK__j!Qi`GCX)fat}5z*InFI$1)R;Gab*0`Oi5 z2)6)yR|67+?rX>{21J(t!apT@6Tq_ykhlvFS_9}_3-CS$NUZ}T9|r`U0E7sE6u{RB z@D3gVErdWZAVlaXCF=#G2$2ztBSiKjMTqPLh!T=M#t}S+0{n*o(uV^QM*+N}0iJ3= zbR4N4k_rHlHGqy&0HHckK|u5jvJ(KlvjG0H0qOI|UH}MP4Den?m;p#$&bSC5(hTri z1qfV2SO7>Z1VmZ^={CY*KzJ#jdpV%zMnL3dfd3Xi;8sBNc7SIUz;_oQy_#?jAhZUM zycgj6Eui~BfcJNRa1szDB)^B-``yO@{G$Mg6UY)gCo+x@CUhtm2P#GAA^1)Lga{pq z3V=#Z1f&Sv1kWTu`aD3#g@EKmfY4=t)HHx^24M~$5+VEq;GGKyD40+7N`UWbK!lJW zbQ8SS0FnyLxOo$Q^ zgyfw7|0+TPkh%*H`4u2R@U8|V?*aIJLyFL`2H?M!arXf{zXgN{NkWR?xgQWw@BpZe zKQQ4jK;TJ0=+A&Cp_`CM0eT3bPR0?E8vy>N$rAj}0(uC6=g7VQNWBP1z6|hg0{H$0 zNW2P2zXk}r0f-X1-vW5k06!sY5k3QWdI9OeJ&-U65E%wY5JDwny?`j8gWw+l2ob`B z2q8*H5>kY2LYm;&6A&PT2w_5!5ZMdhDFb*3-2`7bz)uJfB7~@l+naHOFd;%neFu;x z_$nB83?NBJ6THU)LIkfLkoX>;o8Uha;5!T836VV;;GM%bLVyri0O%%o76H730HH%s zKV?EYAdvt>ehu(G07$-1Y8${uNPR%|LqL>}B%~F51gc{@AVElZv8N?`D8N$*z%>FC zP6!`P>brpSv4D>60eVgVBu)hQ#sUI_$PWSClK`PGSwfT$nhx+r0Dc0lIsmQ$;Q9?@ z3E_nRT&_XkO9093fNnzg-+&H6H^KWEz)uJfB7_b?lF&`)A$UIrczOXoLWIykNDxwl zG{N@;;|OU&55e~(AVi1|x(S}I0DeM%kRYT9z8!#uk`iRu{jchl;Q^d?@Zy9Daiv0q zy*r8b{lb|aO{DT)Hf5S`M)T~cO*7}r_D!8KYnBw9aOF4peJh=MO#GUi#QSd@IRDz6 z#C!7c3kr({4VGZAN3(-M86{vI_pT~f_gbeu4 z@)59n1eDjI3RJr0^B9nqkb%g9qWzLCAp@PR-2odFChX+*YSCVm$1lQe_ei|w!c7ysJ|&+@uEPy8Q={CyS5){$%`9UdOBPk9e{=UB`3$D_%kb+47Zs4tVO5kb%n2fsc3w zikA?9k=N-M@G4zG1d^`fG5$3#VbLI4{aHV-QoahwO8JWCG@#4ZAy`wsPRD@Phjp|Oh+K~S0PwQ*YRk7%}ZEVl=HuqFA#YhqWwi)^Jss~OIVPT zS8|yDb-IN4*>oMBXLt#V2B^RIzmOrUGvzBD^_TK>SWuX?U#I6wx`g@JbRC~(cnJ&p zmoN4gs4&04=+}In;Uz5SColQ)b(rr?ckz1OA2hglFebl2gD{1o7Zw#2nvir+VPS!r zH}Om>EGWp|UAjBJk)v!uetzEnR(^N--Q}5bS$2LNR&?T*;vqx#7*;a8RC~{G!%IDC z;gaLoHw?%8l+VRbKF8^FC!gbVdAfX~QI~HxEniq<=W|?6-kzRcU^tWRTP{G5JTC=Ir)4^ONgezFoes$f@7SlXJ`4^XC`1 z)1ADT;q2{a*JpYITi-eUHJ{6C`iE|DlRw9qbmI%n8_rq2=2)I?f95svdc+&vaCUi3 z>-^^UpG()2;dN-N(CZi0iPENg&E?9s^Cqv+XU4y=kM*ZyF5Ss%&Rsqwba|}5Q{KsI z&M9xtuaE!sbf)Hba=A9l)t|@3T>Z&0XKwyCyq2}sPq(;9H=Mox)Mj{EVz);he{$vN z_RrqF~_8+>3=8bOCp1S{&GxFN@T)LCj zoV|X!#dGp{gy-a)yyl$p%xm)N@o!ION{%OI>Y)3#uCcv*j!~mew||bOtX-cO|0X}% z-Tck;zUzD@m|<@5Tqv5&f)`dQ;#Iyq;$ zlb0)tS$%r`v&-x2MW#H~->%PGH_D|u>tn8)>@3|~o_F~0VZ(Pqi$ z@d+276yDgoR(NAym+;v2seYSX@|XWa*I$x7TO=K4HtN?SywTq)yy*iwgvXho{++G$ z8NN_>lfT%NKFq~?U3`V`rhO`1=~coT{YSgf{VsmAD}S|%AM4@+!sFQ{^{aKI2VH#e zGTr}-J@t}q{1I~HpX5q!aHU`DN)Nlzr@8n>7eC7-A93+bF22RZFL3cu7r)rWFBRVS zYq^W>aOGd&;#a!(gz#p(Bwc(;c+)?-gg5gg5qW5#G#4J;EFRdnRdnhl@V1@Wvj$@Wy{3;SZDiEy7m{pKzsj34fHN z_X=~q`>LP!JYAldKi%`CPtr~P zYT=Fk2H{QriU@D~(;>V$Xu9=RNq;r-iFk8k&U5Om4A`+ri(FPHjGo_yJqdDEvE z_jx8yo;q#%)L97UHBGbaKc76gsd47KT*TzbGp5XIvh(w&Pi>kr*ThVo9GN?P-t@Wi zr%#?eXZCdJon;ayPo6&IvYFZdlyl|G8BLSFm$gUp$GLd*ngPT?Z`0i4Ms>r>O{&%8L1BPrp*<0zGZ|tm#eDJ>w8@65b&&cC5#9Fz9iJI2ivI zU7~Z&iAUjem~OV@^8Kb1Xvk_py*iR?kTYk49K^gvYZG z;idRrfd9iGumHr7NIwey$Kd~1{2zw@lOQw}3JwRo0RNZbpTF?@8BJ3zn>GErngxHE zi>JrG>j^hq6`4L~hRNm$&zge--@K+NO*5zZe$+f=?lhGarqgEsD3@Byy&-!&ur2@rwO_8 zv)b*+*>r9G&a_a^%(+d?@Wiy4^CGjROr3tk^w~|mE2czDJ*PMNc4Lb2d^ndc=PO^% zBwP9}O$tw)6S-<kuGwB8?JWGjirN27 zzt#;h-LRhUoGWI}R1ccWo5@}g&N)es6nmr6K6m;>mpF2Sub47>%8#Z`+nrP0`37_< z%ggz_pXYMkcjPnvclp_#8~LYQHG9ex7#cHY&up5Bnd9o|bA2=B%=MXmVbZ5GO>ddm zWb6L+T+oy4Ji9C|o9|m0$9nhk<8Jgj#(Pua-03(0$_;yuna_NO9O8re96xo=3C_e&pF6ejDx0v%;X7r{6%o$RzOyd797hyiJ?1f6x#1}|oLqIqoaT8xwBPi3 z5e#VxrscTi`#h`mDb;k*j*YQS=(MyXRgh*g48hFBd-uTqjh z;|Ht0@_YGx${I*4SC%3l&z*vpgJ;HhIg?$DbCr^+$XQJ>$mQ&G&5&Q^n_Pvq3<*F) z(DUm&9!3`URF8jNX;nR^QG%e)j-=B%g6M@IH_xqu!((us4eR3Cc*<(Zs@%GK>bdut zzb*(A+?U_~{gwqL`d9`+3p*?M%Wt}V*2SKoGk4!~<9SP+S;e1B|2*sU+5YJb8*)#` z9QnxXTk;;*erfLHX;%)r=<1;#zx7=0yuE3|>tE}6%Ty)t^byxRu()N^N5|juTF-k9 zaNK>TebL$n(q>+>JALK9R~=jv`^JcMuOHlyf6dyx*=H1t{x;!o;LgGJ*Lq&rjyH{$ z?5kD{I9DsIU+Ltsobud?JWu|TsY?qAm#vsyU0XM6wtr6j+4zd9ghp!r18pgwkWv^vH-Hh!GRp(dMC zOgqhYnD5kf+kPf9oGR_rZtu zKl$u)Z};re_l%NnzWrGbbX#%u2OoCtF{rv`;E9XQJOA!`9)0YGK7CIbls|RG%mt^H zoOS*M4;*;o&G$OLJW}Vo++TmqDT{`WJ7woR5AA>MmA5~<-hJ&h$K``so_txKG4u2@ zVr)GUN1W0Ttggw(cI8ZI-1@UVdrAHCFSNaS=&zsu9NL)^9%MC{RJ?qF)-cCe;?3Q) zL{m?5FUxT~dz+3o_cizLHqdgSEy=V@U2cBN{Hpnlgg0Z~GQVwp$JB25$b8t`fzNn+ zXa3%d_Fzl)gb|aRGiF|X(@i&9V`9f=JLmno?NzOJzwsIK=3j8<-S<46-rnot&6nL2 zvMrf*#>@&&$%7A_Ffhgz7vH<@_z4qt-Eruzu^Ek9cg4g_a#ngTzoKSQNAUEr>u(q_ za_(Q6ZnJ(bIStIxaOlI?fi)7;W__dRP4R@YpqDE37|)_WYmwk)-WniZ328*N#y_v~jo zrRP}Ho;bKiV2if?+Wzs!Z@S8Iie*xaxqH9Z!2M(W34yl;Bv=BvCGcTFM>lojrD{g( zx{}_3eYU_$qcYVvYo;yVmSFYApQN6yEsPCp=r=H~Pi&DExWu}9dqQ6=`4(;6JHulV zESA8Ip6ga5t{yqciquQBz!T~TYLCb_g3lUq4qhM{t5&L}Q^p0>pQtAH7^o%1o@5zq z$+!pKTV;QWTfY07#YSS#IXmK&IF>d<^ zEg^QiI?*y9MoWk(F3d>nmKrn87PoHrtQjM1qvj3l-8XiIc2bYr?)_uptOd3avGwt; z$)l{3EOFM;tR_pMY6&!yom60p3*3IzkSXzT)^5j5h>06NMoSE|WL3;gD2R>ApK?Nh zZT5*1*2Uztd9>obID^HPr3Zn^;2(oc>RQ! zQJQ)2@VNZAk(T4uH=p5|rcH?H>8Ad2?RU0~Z;gt*_1krWlGUD?ZQYj5+Hy-bH8v*k z%Ef_i;(WI1zWIUcdne3~?H{;!-Bk6$oF2ztSUfuL=2&%rW?ol2(PFw#3A}xBkrtQ7i00~gXV zq5nwRkQwXdBpk2e#n0IOF)^`fx0sUyFAa;^D0en^f2U4LD-@dD=%9TQg0%mMAD~ez zIH-LoS05ZU9JE(J2hnLCgyOXSavY9C97L!60J`QU8sW76L~**#ro9BZ-Y3~73V4!$ zc0l5%YfxIp&_R0zv{G1ygT9GJ`eooCdAR~F2Bb1);h_BUage?%aM*Fso;Q_Og9DeH zSOsyp^!>X_aGZmKfr2Kz_ zA-#Z~0g^ud2Bh-87VwCG^l>Dz$6h2-{!+|eRBnZUC!;Wu*BgaUoAd`nl5!#-$vp{> z+F&>!@r?wec1s1^0+j^r4`2=;t(9{DR|4h%)&oufTn3mASOqv0kTx9(0M7<21f;!yB0#G5bin0+ zGXcr2rvXx%$q=aI>sZo(!X|-}E);eNoOGhFPvBG*g*Ji5a1=iY$mIN^zr37p0*3J!tO zz88f8fm=DMEEYJ8X%qqir!k2_qrfq2u+S`U8doW_2t2?r=r8c|1l}%i>T?u21x{l( z3(L9w>jc_Q;9h}`6gZ6~6x;&GaL2*|fzwz)p;q8D22t1~@C!MrGzpx>Fbd5AzlfvC z^8&wE;D-cG^8|%XfhTcPu~l*XX)L8MQs6^4suTzuH=|itEbz?&UoG$kfo~ExjZGA` z3A`^ym0bd-v6jN~0+;tK+67+6la)?^Zxndq3a&4WxfJXI_tlq|dwf2nw4wrAwmy3$ zHf5^T*sE*&c3*vMZB3n@Z|KT&Z?(PNhe1!`#8|qr)LTWHq>x)$MLQpChpieHxMDXL z^tvVWZ0oPe>+{?FH8u9CnraMIA$m={-(FK`uc%p3>ct>H_m4#xOkct)y;W?dRBVNW z=-7tvVb6=>bv4U9)%IF%EyuAlNqW%NTuRGAwNaEvx=XyULR;wrBkl)8uP$@%=m}n_ zh7xKl;f*0Yx)_Bk3tDhl#*DW zp#)pvTx}oq-fC>!+e7;QN_uoY>J@Ow5T9?&in5w2+yI~idzELUr;0oly}MovQf%N_ zRflOJ+!g398~+`Un#j*~$ZN1yUJ8s`5!Xo`{~XLN#ijnGDf3~5www5VD?Ux`CR^d#atz}0atz{I zpbR44Z05Uj;3&oLEw9fr*_V%5h6q@HCa0vPrH{|ZoKRYZJKB{?mU@>huUb)EQ+sxu z&tJcC)#^23DLeT1p5418#K*_Q#Zs{G5EB`o#~+tB`Q*U^`}gg0eDC9W_3GKPXJTSv z4}|VKbYmf+xxMA}<0|l#x5L)+l9Lu?=RW@JnwOUz+Wl10)9=@)Gk%_S?}gc`R$lr- z;xuInuJvc&T*U4pj8pOuD+)=PT+Zh7*|X*sVc?juy1c61=UwShW?`tpotj_JLm3Sb zW?Td9zk#~l{JBJ&jFXfPWbFJNTSWh1$iRRA4&NmO%a)g6~XGLM|H(s<5wBgZ&Rbv{;QZ=cAfP zKFTL+O7LW)(6y|jb4+10DwAYYV4SH^#tFzZndPs_^ZGDyPp_${uksXlN^9BuHnu_r zOXU5!GU(=mMv)R`m13ZgjS$h8^9zgY5va1t%Xd0_Opl4E5!I|5=UPy_thUrXyjEN{ zI_FL-nUD1$TVs{1^;EFKPg9Y<1S`}PSkG3al;E!Ca$IrvN~U{h;a^kfA2;7yjWwlj zTp`^)r7Im)qatq^?z65LmpslfE+q|vSjxEM5^ps|2i$UijwNzlA15|?7C}D;^bn{r z-&-|>7Wj)CsZ7ZtXwFCQ5@ohB9lR4!hbnkqB%~JNA;vmC;TrXW+`p9mZSdP477v%# z|3_~rhFkzZbS8|m>~$dUUHMt-?}V5D1woWOKGZ{fE{*idTZllyCe z{SUUcudcj=u82x-`Bjpei$`v1;Jc-0@yg0^<+a$Pz>kx#^6>j44B{6{7{o7}aJqo_ z?GuLM+pY}a=S_I}@@l_jcjk=Uz0W?pq`dbalTfgQ!~Cw~wEK8|DQEt37k!p~{lHW2 ze>whOLy7lm{H_XAcJ<&X8FD7(V8bnVaWSqd+we!3E40Kqc0X352w#sr^Ih*5+yVt{{FxhsiU_$B*PN`tRAd zE4h-6>k@j}jqO~}1`*o_qUTrWAu3!ZR4?(%p^>$Cn^@P_fzdGzb1*$!QVk!eVLlU{ zJt)2FRGy!BD9HY28_d@u^jUWba@>>Ya-3>Dk@Gzz22x4qxv*a+(j5jlGQC}-&k*TS zzD!q6;__#8k!~01^yzvE#{8Kgy||0?T9H0Rq|18B@qh7gynOm9ex!WKw@KtbBk?!Vdgz9-%q$e5lm*p#ixc>BG02GY+jTGrwhIG!2 z?@)_$`t<+`#{8>AI{lJBWV-Z&Mv+cGeLz9drJR1kzRw%#FVhu~e$bH4rD6UN@{2_I zp?V93Ln57i-++Qlm-5|0ew#tQ;Nvt07&sZ>>nD zpGBbHX6Nu}n@Hbi$S*l|iS!E%=~Dj|ksc7~M*Z7G`bCEPa(~^9g6ObK=JTAyiv_+b zmD@*T$1k*TT=t*C0+;Kf7J9{@ zrdX;oYRu|Ol~qtvN=Ic4>G+}#(#aGWw??cPH8AGK$9s`(G1=%~>3Fj0}kW z6RFCB@W>$V-$`2g0jrz(B!2COLHytkgEIYh;fue`KT1nhMoFiIu+e$R%IFCE)Qt}1 z)ChdqGY(~11U@n$IZB0-izY|u`E8APgE6+^NP(EqRP=*2D0sH{)Pk~o#d_k$$`&Xr6-xo^7W8W(t z;2z)};2z+9=v%QRtYf_azY4e~BK>Luo|KRhm4BE4Puh|am4BE4XXS4U`v5CH0%zq% z;H>-zoRuGev+^TwR(=G|%8$S+sJ;gz`tWsnk#M6CTM=@+0|^ zwnUdd%z#V%r98qTPkw223l^@X_q4EVSRKCDj`4RlA{0N-Ym&%XGFWZFIFPug=FF)-6%>KwP zeS+CH0_Ww2`DOXiCbB-VjyyfgAE_VZ3E2nw5-!tao5^&EQ~6XkS)R0qEKlk$+gR!^ zah^Y{Pf}5;^2tHI*Tw7u{L=`$Vk7oHZ)3fJJr(r%v8bPP`%cz{%bf8w;6^j6W|0b>{LZ zjpWNZhvXadr+gG7uPkFpQMfHAJw{%D5P}dB;Q2U)XAa%8dm0YL`PDcdQ+gzg{4+9; zpGSrYp{Jxtzn3`qdt{LPUeY{<{IZ|OblE>7F8i6pWq*=*WZxs&P%XOrp~J&{i>3E7 z;7JBN!GI4j;C&3Z-GFNb{Fx-FDuY9{q0#t8L%Q$_$uu<2kp4`#FB8vDEjqp2P`=%O z4>sUK4ERuOoh+KcREwcLV&0HUWqEB={lTx zS^x(H$|J8`dmtj8tD`icvvfqRCV~h=qw7t&;<-f<0g0dJR@{7OHqa>zS5eCBd5el` ze0VboUkkyr2<0_3brp(lX=w#^1L+kwJOEK$!IEd=-6;>A->Sk>PrG-|g#LL#KIxxn zkWVOGBNLtUqagJsI?A2H?(RU=?1H>mhjF9A4fbqi}ni&{^^bohXgUAU@L1 z?8Zq6xFQeK)g)~|JZRsTI-%b;gG8CVI7Df2y`(;ZW_IJpRLYdJ`UhNOA7kC9tw|PS#LBu!+5k}(nIAs( zi~N!YA|h=Qm<$5hLFT2hrF@x3>W8x9pAuzRH`$R3Ho!r?K{DoDYR8$=NKa|wQ6i6& zLwXZG@yL40vW&d2w`D!}sho+T?~#sQfkq)8Hl{SIkV*WMM)v-u2j_|OKjM`>M)CYV z;A6x`x)bDf3j0`OT4b9>^+kj4P~FXLoD4zw4y9AQ7&Q0}D=%PYIVn9_FYY_930=u1 z#BYra**l>QG$z-~;XDiHF}uMS)oMqRF>I|lR*x(R|GH9 zNy;IgqBQbttEd;rk?k_?LOZaKPcdGa|8Bp;PGl}iwBa|GhiT+nQZALn`XL~b4M;L1 zkEBVvMDj}A$)89L@yanv`X;5B+&H1$M&D%lPU5yk-q1KmWl0}lrh$j0dR|nLY&g1OuXk2;ukVPX|P`?P4aV|fD8Ju@l?nP`5=ui zM58ffWaQD$HrXha_+(q$W~pvktC{wLd*(td6Hk$p_|*WVj|$WF)7?#Lb*^JKgK zUYX?U2ESk@=nKeA$gRxa9<}K*8heM@O`EOY)<1F zTL-ef5TTnK7vwlXe6lR)5voI^P7vqL6Jw!}$LdD<$bJR6p|MBijkJkk|Mh+Y*|Z)F z_20-mWDhy-)S=$gM*mY|7KGtW>%Erk>&K#($uty((;Om%;ZvsqqN-N7*|_S zMx1ngouz(#Xn5J8va2s0)@RPRp^uY1de4@|VF!*eAp8?Y3uttG{WWOR59s=|8HWsH zR|+a3kMSrwlRS3d^t^#ien9+hN6;Bf(Ay*EB!hG!{!a~b8o!C&5kUuvHSaI4$A9B8 zyjEVbv-fL47w-J(oUYCHK^~Pwbr0a6KzdPG6fm#<`rbl97kdlnt#MWO=9z__K`nLY(9hy(}V5d8iE9Dutq#HrWDYS)^TPY^VAYP1+Jp{eKg1 z>d!x-Qq=#85hp#V4`8fgf$~tBQ0RfgJRW6dqO){F$aiPXE}T<1bA~c!%7WaPGiT*t ztBUP=(bz?NB-f5ZhLBtkXn$++tf^Dxi2bch=;Q`{1~MA``E*igF>hzNw?#7L-qsM< zmI9So2A3k=r#Sf{*_s0RAB}ZW(3W2*oF?}}PUQDtu7L!B7xahBV4=cHw&q)8Eeo12r@yd3UK1l0=^&-ENBO#R`)2Qy!1_UL~ zujS|UQud?Ge<7RNhIIU`w60`Mx^thaKkO@Y>PiQaFVmJD94nXnVPrWgDn{e&;LI28LV_OLxk59)`M&p3CKrDT>PWR#9i%_w!Gk1utklvQ}j zuy~wMp6n>ikaGvk7w=sBkHCS0>ZPyV7sD;U@3O__xmp)gOObyhLx)c#%0F-Xh9Ap`js$y2e;>S~Um^-nRF+~Ax8 z-o;Rv);JVMpWj-m&^$o$aa)A73+0j5U1V=c!x2t{ohit)dGN&{f=2Dm(!if1X>u-d zb6Lbs?N5RHMFcipQJn63P%yi33J3DF{nWog>yB{V7+Oy%Aa< z>o?auMn4*#nO^A`Uy)If?kG!7Eg$c2RAyF8sPvR(&})+u(xe|L5o@bo#%bK;hx;u4 z+~)!T`w5sSV55M|0{&7Ks|QXbPlj;3?e17NwH^5`g9*yZF8RqXDTMgX z<@_PK;BPsf)5+f`G#ca>%k4`3@6qi{dRU*|;}7X6>MQxBr^$MCMXy~KUEkG)dcmhy zFv{twTqC`GL$v)D3;FFEqsz5b@pf6fDVqMB)MraHykvRos*H)jv=w{MS@ z-(-+)w7)Eu#zhK7`CaML6}>AxyP|ickCEQKgV#&??IuyL#Cteh_AmDe?kB~&I33dj z3nK-+`HpCMqM+M%bGq!m-^upc!`~N^Pf8`;&G9}$Zu2gV<6cBmXtr~CvL4$+|LYWV zsqZF1SB7$aNnb7Kj$zUC1%lo_JX-!CL2nw(>C!Kr7xcumX#RbIF56!!FWY}{Gv}A> z?Dj^Fi?SUgzmm!MC4Hpe4+uHZ4mL^8=KQkU!$Kd$6;0nJ`Q4l@3YT0w8j zjixUU^x`R;ULp!kT*m9wSP)I`l;uw2blEO#QqJ^fdb6P0XGYTlLcZfPPRAOMg~fv2 zK9AF-oeKorzL3+U-#G-mxP;TCUv~<6Vi~7P`y7(=%4j()g5JC=T27OcUmML|Ajix4 zX!)6fuB_p7*}q3hx?)${Sa}7Mo3Su{vCF?h%6CefskJAdO*>7` znLY<4)nqZ#ng0zok2jOFdFGXZ+F;&oei-qC=D*`5gdwU+U8G*1c3t8hNxAu$^GkjI zFZ{n2_}{R=e)T!^Sjy%7@MzDUt4EuY=lfh0!z8W?Bmb~jE<#Uyz0_>*!ewMn@>B$x0#A=&pR4@9Ndl*fT@Rt|)yO^lav zy})R@lvkBQ%7-leDek#Y3fFdrh;zy=3>y+DD>CUWK)he4G@2J(+JZz(;j7jX*}pQ!0b@^6jPCDmT94Bv8mEj&C=^lt4!xJ zKCXvNS#R2GYBW7B%DM_^l+NW(G2Mv#+fBP!4le(`DET*<9%1sI67pXJXB*>|@?ST- zYx)@FOWdaX9dRjtfaxpK_a?<`HFr02`8I|05C8Qt4=@ilk1*R5N?(TK6vV^z&oDd9 zQ_Lj)*Ym@t%(KmBAdgKcXJ;vUnRy51u}dP#f_(FBz@us0TUMK8%NS1`c*ZDe@l6w%e+-N9KEr$8TD1uG<7~=0bu7+2 zUnLW}CI**2o(Ttzx8mBx1>Oq$N20Kgr-W1sT>Zj_zLMf4>7R8UKlCq7BXEOIT4)j~JSJY+d9okjuXl0M~fcBVXQ=Y*w0~%2I2N5Uh-J!jKSorTs z(B5N|PeSy=OgqW*qG9YM@9Na(?;CdJ@lNdrU2{vPq%k73Sx~ zmX{fvFE}XA2=xr}B7u{a-VR%ESvF;t!ezY(I`s%XLM*Y+*f0O8EOnMOB7b-^u_*z| z#h|`ox=f@IKQCS8yvFimXvDWEH(72EalU1`o5dfrJcPeT+4&*MuVF3VqjBna%PW@f zDD}FK6dnnwW}RA!g~Lwm2x}o~-*5}3id?UUYDd<^b2x%7^$ypP#?tWLE@g-EvE|MP z%AwFG5=oV_Eam*dAp1@wXrO!>g-4HXy5+^t2u9NITC=sy^m&8^Ht57Pd{L=pvkJ{& z@{hbij(|kdtRqc*tS4G!I>BMqG1gTP?ZnewRQ`>5i{jytlCU&umep;gc`)C4n?kc# zd&^*Sq!`0{Ul1>uCuB>SyPmaDKM7d)>_k#qt*^s= zk*m5&(4enA*-r%N3P_5jv5f-=)QC}sr1!z-Mf@NU`b zDG#mlzKp6{bW28)LS_Tn7A_n7_o_3b-RA<<6L9_O}m|POSx&KhE-O z%u9eT$Ginvo{*k{nVA0Gj>*98IywoE*m3 zwyHe@jq19TY4IxbG1lM3$_!i9rsRRgY;z*V>oHDk3LC3sdSBZ_hQG&j%|~qr?~7mH z4*wCQe~7~QalNLJ9yVoggr>u7qarZUJhEMKPIh=W1CvQ*GaZ4XE_UzlB6fWMBywdgkNx7~(c;kZxPr#yix$5!P{<$XLw_b*)M{iIk--Au=uPBaZO zjWwm?N@|K}x@oTIOjCtvxyfg$H=So%Z@R>Eh3R_JUrakpdrYI02TTu}9ydLOnEZP& zA}(oR*k*d$^nvL!)4#xV1jl~_w`z_x_cZr2+sz}*4(#D&n<-BoJC8!G!V-9_$UMhf zVlFc;GuN5dn%A2zF<)W69^AL$xWn9x^FI8p(ladoLGv5t_spM||7rf({G(Y@6YyjT z|LYx!aeN^52W8qQWjNwvapW@YbajF{nf*=0`hKo@2IA#7mZ{aMUp+@%CsH}j(VRD_ zjS;0^sotPo5VGZMNZGC4uRf|it-hqbt{zg`)x&D1;Qv-@p>I z-dfleM;ChxZE!4qMk@~NLZc*Y6jH{bRZ?(8G0DJ{EAVN6#fG$Buh#;`^<~KVl54T% z3FTj|osIZTWev{jw9V|=I>Sd+F!MH?XdPQtSi6QOcs)(SrROX zmVTBKfDK`kQOZaG9hOYQoR)mc49h%0qdcc0UWUVKIoq;2L_f!}!O{@Iq>e_|(|qM` z@@)IfRHL*oev@U#FLK^xpxkeH#PWp2*ouD+fA$aNf7$Yuq4cn>e=K$=R8qU;^H6-U z`maAM_sAdSG-~t{e!VYDi?#M-Sbuh&tPYMy3v(G`CtH)P6aEk<&p8>q1vnzxaTO6>FRI zRqJ1^A6dV!b|C%@j-OGMHD;93J*H30i7`nr)z&dFX)$d`skUatOpWO4!ZyM%F`|CD?MIO zUvc4(Q177dV^MVRbvakKv-z{v;QEfm<9Gk}T!5}k?8<17LK z3xptW8iU{h6n~V&{t*G>Y%OLH7=aVn+3jWtjF(5vWNGPM^v?W**5wy4y`8h6~ZaiR>m$ z6yoOWILmY*P!@L%Vu%pR)7)-~A_;(rpeUuW6DXkO5HNQ$n_Gk@!+p_jCKHR<-9i{} zcS{jK5h4gI2@Hs`#Fm3R25&!zKrC)|5oZ>;iIW}FBSJJcqB!WhWed{XMS%bTSrFhB zgrcG%9GpRlZ8=E!S~yAq4&oqTq7=lDWeZLS2V05`GDv5LWN}WSD3w8hkq;K33OHo4 z+Z|wm_=o}04pJ710e~4{3p-O}3&r_A=O8_bP!W_%3L?NT_(zo)1?4ixxQR?vW{@~2 zP=0BkEiElv9TDgzl9S5TLcey&1Oa;Zmnux?i|oSA`TolQ$8~YKuI-m;zXc~ zK&j?Dmeb905TDr$q8kUYP`VV17)v7>2?vB+aI+XOyBQ9NIeCl)i9{xDq`1vFdBlwv z6whOEU^zr18T9X_D6vpXAf#Uo1tR6-m~%*i+05{~9FoRzxMdQ|3>r*LN#qFu%CLSt zLm7?}5*UtD7UhNFY$hk&f&)E|`6VDlfitw|An%nJ26)Pr7C18nhEndLqFfOIq7Max z7jnh`XDSkj)Ws+i(SblGc5K5QKQ`lR@n)JdiP$e(oSX=hBj^vl{w&wjLF zQlHN*pY+qkH%@x!g*ztI?0INXVfwR^O3UAz^yGw3C)GZ3WYPmZHEULBLYBJo_^c&ff?ljYrE&VFxOO!j9x6SDC$<=N%^`(}?jcVKqbe+FmYcg=|GM`w)94h&7r9)0|T z?7t0lWxqOeO7_6(rez29)3TRsT#)_H@I~1_f9A=qI8c?{<0)VE$?vbtUJ}19`|5`_ zW;=YFvQKqx$^NEaWA+sFs_avLx;DE{%#GQP+MBYcow_~y?G3xKYg_Ni9y#LP>|R?R z$etAcNcIC)KaqWf$F2CkD=d8>6I1?}L z@7%L(pwqE?kn^@@hd5*ZG0a)j?__7&j4{s1m!0B#=B*TG>B#ZUlQv9ns-I^$`^<4W z7rmJ4{G@QI^X}J9bv{)w!#P5k<@{{>T&FsFp|c_GOy{{TEq1nFUE$pCUFx(?t#T%e zt95QX$?t6Jx7wLG@LcCpy#vlH`+Dc4j*ZSord;GauxOL>zVk0}Zn$-eGyA2>oF9GH z==6`i()nH4)y}hTzSjA1`*qG4BW`p)RCkke$kR>EEBf8$wE1>8H@~*i*)QWx=R-|* zIlVpZai(s#*V*sK`h*;4>>Vx6vDr^M`@VC)nR@Q?&J9Cf za>lo{I$PJZIS*#O?u{+Y_pZ}Z_<{4u0UtS+fB84(lGe|h58nNS^USON z={$2?(D_K+SI(8*BhITl-#Pm(`H$0F{j+oIJG0An&Q^Q@88%DZbpEt^tJ#36C@Xc7)O)s9}y7rc2SJgSG zF8hLX*Zmn8uHgeFxX#rlx*qsE%k|o8PS+bPZr2m{=C~SnH^pLD^7Kp zt|)RXMLUP?nnHtyN|4NJ)FA6^{oFKSNp#6*fak& zKY#90KGD-wC?>U6KZcm)j}mQvU{FpLRi)cKG6&wM!&H}8UH>(m;UqFXFo9dEm*Jbg6&V{bw^d zje<_^-%~heX$Pa*R&o3HOn?7Y@k1P{Dq8uNYLf;+x`33GJ1iK zFY9|m=l+l>=+aO0Paa_WV+1{9H--Cy-N29cp%{Na&}F~-=?J&mYC)Iw3I1%fW^sR!R=^7jh5)Ti}A7o+bo@He;fa<>`eSEh15*(B()e>F@wj>%~^$oYEv zVn%;M&}IL^YLn5Q7j$XQ+5q?S#{^yKsfd1*xSF@Gl+&?Zw7Z~7yJ5A?hyrXaGyG@^X&HF5U z<;s=%=LPfhKF=oT-+#G7pR`)n_bNB(rEk5ZfA!T@dS)$ePub6Y_{ZD29(-5t`0$|K z+S05yY*@|gHd63OdtRWwq0_Ol<5``?kSPs*#y3##$@b_(d(hF|(V;)t+Ny65^L%f? zC;dqOu$H+F=>+C znwqLN3?0h!TOjykyM3_lVqJgz2EF6JR=xGk^YjMaT*g;m;9GQ4lfLbUu3z?Phraee zt3LZ8UT%}%3)@A3T@=_wfn5~X1#N=RXpr}s_at4vAW837IaqJ4JW-eZccb8w_R}vY z*7dr1ddIS2z4eSKdV`bCqpJm<^jH16_jEcs4!7#9&)=&zY~tu1fLwo_FWXO>ks$PJN9(fTdz*g8$32vj~2lv=YzS+Ptjl0YxT!Emg={*o~~Cn zWHY{I!6)saZ$YUSD5$T3`YNa|w@bgXqvz}EQV#0-ow|PC#18%XlvaJ|5N>Ci;FErO z`D2&sPvUQH$6o!0)*Ezh179C~XQ&6Qzph1nFGYRVpuV$2eGd!1ko|gJsp~j8dTrKQ z6IbdDamB12#iCr zZ^#f`cR2KpF=O=B;luTY0RvdMNrF$doBlTJhoj>K*l!Q)SI_MiFvxpnhh5j79Hw{d z9iq410Kf9~X7c=kPx=*d^FR109)5ry;6PX^_@rGr(R=Byy`vrd_eu2M4UBKG;FII1 z?%k{F)qCNmJN4GZje5hxGRD_z(66uz`_13v_!8eid?K0@cM~zOs1Ld}& z+y+r@lYy`Evn{&*&EXBX8U z=kR(cf=}w#{LBaXiy!IwW3P4Sw?5FSR|~$y#&TJ0+tF^XqTTkQ-PW>l7YM$PzrdYo zAnL$4-imQtUWXM6J~=PYysD?&uXi}$S0}@-26Df03qDzoN9Mk+w^zc>i(uzjuyY2l zN0Wi?%G!7J-7xjdYdZAJOI!6br*ghV!56agC#WorjyGS!eDnb3qjOk20)j7O=MVPi zI*yK4ZqZxsZ_pdooW=NR4f2*>HdJrXo%;P96Lp&Vz2Z9T4Z$bttM7nOaCBUcetseP z`RTl0K5vkxWBjC}!wtR+@D1a9j~V2>Q~Q>#Z}>s)So?+E>U}|Pn7Wzi*KClt?LXV} z>+wg|Ip?;XtIuhe!}x4I?tii$-2*><6is(G;5G2)O5xAn33-z5-CB&N>v!uNYi`zC zmu^P6XEJ$*4dv?ZJgn2v@e=&`9{BSr?$7TDzR>vB@{+DUa6s?4W1rr-73+iYDkje% z^pkw=H@&Zuf*p6mudab#Rr2;7DfpzFXCn#`-m1?X!1 zyxchk{eB44>G~G0-m$(+Z>^o9Hxx}^26Vznzf1$4 zexyUEqvO*L^wtCWF;4R9>urW|n`VtgyG=#A<)GarpxuTud5wZk_TP(N{)c|?%esDk zONU-{d#iqGEw4vF@JYKYI~{Aj8eLzzq(k>EY}Kb`alTpuA6<7mi*e{-j6*kL9I6uI z%QJ#cwy*xf4;Y`}&tJfwUw}Vv=Kj1-@X3D3S78@mowE}2TP5bVDXbp54CUsp->aAJ z)Af?OI`o-Wwd&(5xPDE7Px`@Ydoj=Kn2CN_h<@opzZ}iveJA*&T@LL(r0WMDI9sx8C~5HG0GOunj_Q!57kx24@@{ zSRb@veb6A*2SsUoJdp1(TSUYPNYPlFknwjzT$g;LO*q(3XAIt39H*RuHyV@ihEFQqT};QjjT!j<+5~)mX_AtqWaCXom*Ox@R&wyItyRue>hNT#AHUGN62H*B8t+4{RTi1P zQ9f6GL^bTn;NXzp&|p$}c9$^0e z$x-S58NOEVsr3J2(%*dy`jh<^AA|nq{}=3kboxJy(dSL(KS!tk-{5~ol>a^b&%p06 zpULzerzL7Vwc&V%`gko#I}UF>_S5=n1GE#gf!c|93v!Tlie}daYeTf5S|6>KHbxt* z_0>k=-N=)*5t#J)AC>=v`tQ+?|4$s1{;8(Nm2;25e|9NXm>yI1hV(!B z@&D+?pZ~W19KHRI&VOkB-FXc5{{r*&VZ0@H?Bma|>i@t|`M**BqxYYq)Bj`9e-|BP z|K<9J`|r`~e{}0N^8fda!GCD{{1M+9k^Xb^`oE}dRj*L5RIgI6R%fqx(nYuxJ%um-mTt)w1ap@>R$Ceyf3v^eL#Is zeMsG>K8&v*JgPpXKCV8Y?gvkc`Xt_^dPY5f>KBcy*FRL4ocI^5e-S`t; zKeSn1wY+9|-SURzO}y9gw&jrJuaKe=ptvydPuj z1ImNSLwFnTVdW9!QGDO~al99}UujXERGw0vR-VCp`Ycvu&*S?rFDfr72eE1XvhoV9 z4_{SYQ(jlzzk^@!*rJEL#18$NcmX#oAL?f)6dW=4&%Du@5(>0 zw*MFAw;--*zrbt;4|N1-Tb}s17@xtaoxMfq$^VY;YmA`;YmAD53c|4B>pG$ zAD+bZAD$G}e|XaG>pwgxQvX?|u>Pw}!;^kr|KUlI`p-6n_1B^A|F-|wmCE5)42w-l zNa~gpmqfNoN{UbFH!N{j!m!@MvW7h}^wFWpu*ZfzK6K!)q+w~p#tbtLs~F-L(xNUN zavQusf&HbWj}Fez99lBI?2)FW;|=3fyeXThO~AXblkk3Qw&v7a+Dy%@O=fS*j@MGO zB5fMp+$+$gYlYfW?Nsep*U!i5KmXg~&#~%%bgdT7b#&v8%uoKq$Dd#8zYg@U-|D}~ zrr+rT_d-&0lS}(*5=M z>wV<|tYCg`{_4b)?yt{ZKPf-sR!I|H$3A49Y}$_Rg`IAq`FAnq#R}6Jd<*S7(>eGy zm&Nq1ImfhDahlFBJ!jshlw#iVnAV!!H=l1h*EGjO^RLj){w>Cy?c2%%eBtaJrPlNw-sUgE{5#h~^RH_9M44!c!IxjUnZ8i^ zoBpAkV50fAhpDG2&s1Qd`S&#J*?y}mG}V}XP|h~}h_3*YoBnEUQo?VOTeTR?rp0P; zTD+E^?NPdGJ=pF>FYIg3{zf0{To`vZ>|$>t2|FLdv7<2(`x>LL$1w&w9OJO}L9;^& z<^`G`#$)6+&JJ>J$ivK$j~Sr=^TMf^BZ@FX%)s2yXYgs-EN!+n2VaJor_I+E;LA~` zYiDRB8m@ov4XGxjpS{2RHa3gN_0RAmzWy1Wbi7@TKO5Bx)r-`Jluhbp^%C__WsCZ_ zav8p{)%ZK>Pg;ErPl{fD-l^^mtwEcAXZ_iN)#p=&_2-LNwH{<^P^@mDxpDpW_V29U zz74J4I9-b=uh0v;#eDgov5v8x4synt&2>?`0}0s9Gfj)0T# zjkX-zrOCx!dmirD2K-ICe3Un0X~5g9IEj({Tt%oT8ifO!HA z6Rz8>wn$J+ zW>vFTV{EZ;@d@3!_ekv7>$u*>_vzcOKQXxU0VfPR@uWfa!9#{74I4gUv#l;C&pr?-%%94($(cnD`)v#b{d(?uAw0-dHYLAuv%c<^(fb;R>0;vguYn`tbX<`;Nr(Q->XaCf9t1Yuc*nbsrJ=WdFFeoD{59vE-&}^eBLr|6}-5(u4bjT z!oxk6Yh1{5BfYAzb5^C;4PI!+ecw`+wglR*z(IAf|H3;(p#c86nkxL>6}j3hPbo^9 zUQ^*I@Kn`$Xv~;XS6c0>D)oD&L#@&!o+)DBl-5bb-RW`g`YQafApFbG!tI~S z8fPJV_8Onxvtqi(U*|3NN$$3%d3h}YHVHqFa*notuJTr=rj$gr){Gjzx6&KcT8gkc zSt+^=sGU6UFdtfrTFQ?5_SKj}>RD^e!F~8TP)lLi=x}WMidHYIuC4daSz3qY7Q;!X zkYkh28$&b!6Mby*c#}fwO7D{Tx>CQlrrOXaywxj9tGpF*;j@83hkI02F{h|;1cs6@wskh1_8gLj|l;+%W%nvJY_M^Z2 zqVFKRa0NP9k=N&6lv~4P7FPTGrPbvg-bBY%pThE*YF?Mb7!#W(YAw;QQQ%Diwp)36 zr@$qYe2F66E#!4I??`%ddqp)BtKc82A65g)VJBb7bZ>cGjjyKCKaShnH?D9+ZIx$* zr`pfFrN~=WSBgn9d7NWhN*Y!jDdUo{QYf#guke(t@>bzMKVGzy4R^kAOO*|O8QuTK z;&Y}nHjQ4t>w$MZqW6gGIB2X_Oz+4%lT2-}ZB}VX?I=jE7#$HuZy#z_k{kUIAxLDxD z0y+eA3&^D%t9`kXMB26MHC!%wXek?^sqD!99oexXSADZ#leMr0h1swUjo>u@=3rE> z#2?Mtkvzl~nZFno_Cbap2dylP>64*REo@$jglN5^=N8I0owZLLlUReYX$@e^Q@~Px zZFQh1J3dY*yL|T@?u%afAgg}i4GVT>@XPc+3Oft9{1yS-0{&k9#_2+@F7$OY^7M8AyVAG0f%65naF}>0hmDtU zXun*f3)q#uj?J8}SwP1n9Cr)(_9BsfF^7qQkJ^-iJA;l$~DiUT0x`1s?jwfbw*eK*T3+NE#9^3fE`!6jV zWglK#%*(4az<|J;1nd;Bc^2m{o-OkISJ#8nJ=OJGv)tmU$+eh?`_p8q9xP5-f>KmFsh~ z<40HZDgK7bZTpTx_kTDH{K#SZPlB!syo>rM*YotIuQ_ZKc-s+<+eJB!8#vxoeHy>w z>FokG{+r{)LT>T3BE3_{5%SsvtliG(wYPE@XyUN>CJukEJ*57!U5)xn`qAp&Y|vlI z`=jvK`j4-?&f}@3m7lQ>owH}^?72^Wmw(TZ#>UhIr7I zL0#$VzLV=+Y%{a#1xGB0ixW6(?#*G#AP(_Kb;cJMD`1A8XL8t<#bKh0!_LWqp2uNe zDu+!1wg?y~#OpXmCRmRQ|B+Q_hYxxi)-1n?7~IPZ}CFW7Orpp_w*yX z2rcZa8maId6$}7A35>ySFXEz@5fV~zPGLC z!>1@sFQ~mfqBSP%GSsrwU==WWx*yk_Sa0(EE)0Z!BdZagje~YM>B=>{ zI#qC0;04`}lsdd^P>p?XS~C;QgUml4>7^)DhFtK}A?Gx3u7UKWOcOuk(+-Fad+T1D z>Dh8BHM|RQ8q(>?V>#Y;s6b54KXi3MJ6*>0wWQ}m)(WO8-|3h1`H=2q?>%ry#8(d5 z48|{OLF?^$W+}2)WElm>P4y-En%`LI&p`eas13JMCGHt?m5=YtN2kw#{5(hq+mByg z(9Uxe^eKn7WT)us4qESrTbNsZ4)pXwlOk}_?h>``61)*YdePN~3{yat)(^|+$_t|H zGY6KaW!iAR;@&}4t%apXS}i!DJ%z3!>FSd7p}IuYkL$=an~ZcH_GyW`44mW%(K~6J zZ!Wkzu$kDd43!$L3+JP9X$P2Okmph>hxbjfFB)M7x{l-CP8#8Ru&kbZUuPat>L90{ zX>Y6zS$ic8u}6xcNssWo1Fj35|M+<1!_5s(UCHEHuh^PpL&l%ZkB#5DHMKRkd*YL0 z4(U-6u^-9tS^A&7|s|36&;A!JPSuJvr z{F$(oAD%$xaQM^u)5QSx;%Lh(Qh(!)LkTpbHlVS?gVMSE^U!81fstQRjLtm#X+7qb zdi^<7HRa2>7FE#4u!LrU|Fm3Dzv7xIZ@Ikr6E@eM_ZK0rptQQ8O3Yai;{Lq6nPtnc zxzm-nKQ9lLcr)f;AB=8GL^S9O|IAuXbv`y!xwb~1s>PV#!z>c*WeWU@?#EHPQY!Vp zswkd6-5z3#ZuU~|ipVx;`IN6SIzNeqp?y-3{-?_m_GNcwBJAaJd zw&UH>$yig&g&ZE{3(W*-*geK~{FoK!-WOjN%8d@WUX=OrAcxk3@=m6tQ+~3G4CH4t zyU3MdJ?gU@2lam1Nu7dnXoVxk_qm|bY+uVp2xv2(Dh8#7%`t@Wk}8p}3MENR`Mg7=(ArF{lz2{BGm$21Fqg`b=72`)6RNMDX-Ri} zaetJ}n{+RU=)7N0uC64>^=wz^h0syz#yw>U^b4c0%O;MS#8 zIkmR0%X*NW{F>p{a>*5ISK88g)W>{Xa&~1gE`inyye;!!3m8f=*YEM%eF19X}?6?>-n{Ctmm0KsUn0Ykt-(pc`S!1pHBGSV{BCZs#klhaeuQ`6JZ)6>VNXQXGQPZ;kQpFBQgeCqhL@#*8o zkIxvNIetQhBO^H@B_lN>Eh9Z+d`3n_X2yg}M`m(nN@i+iT4s9Y_{@yV%*+WBpyC7+ zKLMgAAln3x$OIHjCKJB=tg7_;KAHtT7GlNE!QlttY_{0g*f@mv_;`F1ts6r3?%jLz z=#hxfvuDp>0Vl9d8__gr>R~{ivbbYZL!{I$+@0?dM-gg*;jF)kQ-AFFAAd zlyVeOvGe{1+wu97!<)6C&s{PM*Af)|#CpS5SrUD7mv+aS@8I)nt$>XJwls5k`zC(B zq)p(9MS4KMCIJ)g<@{~|n+0r>bipT|BXZo$`8x$v?%}w+AK?~wlYpH9CJH@@1x(z@ z^PT?waeY5udd8{Of3tAZ!je^&cKrRLpOU;E7H$3foy=E`%>4^9*0I`)9)J@e{gUl7 z?p|DzclleN?K+gS@Y@r%9l9*>-1Da2eE;scFP>XempC&2qDc$;6kc1=zvZ$U*==x^ z^N+RHfA9W$dCdxT6XZ(Yzb?J^%#x`$oqgNZp3VA_zwK#sURB>*^ymU_+2xC-YY+Ll^v1Jx~Bq0wSjT_5q)YHPc(#a6erO(}NK)%yFK z`;qahr4O-><=^hE&4y1S+W-H=UoZv^Rpwrp~ewCh@ z0BKOJBqcRkkwv$@cQpIMP3HzffyU+zFu|k&(cT$$wr_PpVQgD~xlV|3Ryg<QX&qteUKb!H+)&o0M-P`?P@Y@f~xSaBptTdFpu{_9*9`a9IoZ|VE z9bp$rOFc6srz>QL?U3`4HXi8?k_G_LAs#0LP9gxFd6o*s>67-MAGJoja6RsJS%hEp zQ2fMEPz_QG6OvNm^94_<@?(@`Qdr_uNa9KEB)uVSQOLLiWwIyU_jEEa@;MV+laOEf zk;a%rZj|j-uAUdm+7u$EDQpT)j1#|VpRps{-t1JB4*NKBd{g6LmELDCBh_t!jol{j zV4u#-15XLu{h~DYOY_qL_fHo7yJrb=-zs5tZV))~8_|B;6Yu`>-&Q+56ly0`+R}> z7fAOaiAw|?tPnWz1%d6`1okuw>}rzcGJ*TgmF9eb?F9l4NZeB>%t42AA1B>^D$>>c zZvyu{E3nrt+F5!7;C^X-K$^cLu=9Yl|E|FHg97);_QPa;BAJ4&J4@h!=cT{tDZG94O57*0y-3)* zo)G@L68B5YekAN85-UE%CC%Lu_e;#46#m^32PN*6m>m)BE{WX|_e*R)D#J_MEwT5< zG9424NZcp!fW%DpBd^2}iF;%|c_+lrXvc%jP)B=cNlSCvam=g%n9 zy)i76Ovhi`IR4^Avs|uw7ZJg?z zI>q^B$-kGpRkCENd+KFVFQ2+#>Juf$O719H;QUvdJ%705K+$bQUn|;M)Lk@r%I!sm zN}eoHM?WsQxc-t$w|IS{ESK1ZyObv?{afLn^e;(Q##u_bxHGs$?lk7PPxe2Fy%Ic)FF0nqpS<&10*BD7#9iT#vFm zIsz?778q4Ud$i%9X54~NYMTl#N@djfi>8AYrh(Wf(-UR>S7H^g9%n?BVBGTuBzx|J zP#)UoPG1{D(>M^tH431v#Sd4M@uUmMm5{!+qFvIyCanq7Gn{)3e1aY4vcgtygfuD+ z=a*>FxU2ldVo_8`jw-ZMcc7opK14lsVrh?#WQoMvlICiCoR(|{%wjj5{8Mk*2uz%? zQ7<9NM;i5y4O0A3mN$mkfUnh1UrN4BeLPX}PWfI8TiScy!Qa9t;R>XhY=kVB%7rA0 ziE`{wrYjWKhK#>q6#ZC(ekGoM zqF(YD(y!s;S1cCRn-GtxZ`Z3L{!zxW1Iv>;C^wM$|6oTRTEG{!i;xW*{$&P?u z(Me2C%%6y8c=sK??K>5f)2AB?$7p=(ai>FgdC0eog#1-kRk^BXxprYR1)ffCHv1M#QQO(+=b(=lQsuz$uewQt-sa|ldKugxvY_6u@@Hca2Tw0tn zXOauITd!7;36a7b@g{ZM(S(LLlQ)7A&dd1vPb>2shQ& z8YBV>&gEr@0;SSrY-1oq`$AQ&vU$Xwz#SQ+^q_nVb;?V(erv~7f-A?qM`Ke*lfP}7 z3onfgL0?$9Ty<4FWii~)LT|46t*yAFAkN^{;P2_|}%3FOig#ZSl)_ggwm+>wuuD#?DT((nV5!MLmj)sW|qb*EFCMu3}(Z+FpFig#P2~^;KTT5da@{IDcp$@a#V!tnlmSC9k zDgKeuOLD(YZJ@P19FTj8Kn9&*&!5G9UGGaG{oOAM?ClrW{ffXOL7EA@#q~%F$vQ`# z9RG&Q)B00y-QoUH z(a#tB?MP0`y^r5=$MmD=YrnrMP-oYml8Thl$ z>`Vv(kx8eRC+`Gfn`m=kFtUrZsl=0jouvcPsvouSbEZsxx7`y0C zn-5pMdGQ4gyf^OcU0?fURN!_7^&|T4lFsH9Y~h^OvNPBew?e}u$^7Y~sWtI|T}0Y* zOn#L=!pcnVfQU4c-w5ZUpg2cv?7I&=B+VlHh7B9IJ)ER{pTtgS@8x*jt&QAXg+E@I zCFxC9?#e8~i+DF|cte^+eBXUY_>ZOMG9K=X%d>`dh5~%x$9C+_j%HD)H80vX_1H}3 z{ov!H&lFBS{x|2nqW%-l?|Y>EaC%sgdyCj_DF#4EpF zeA_p#TKmAeAK$mTdO~jb#hFWNk3YEj=-`~MUsime>e%$72fprDzVZjNc5eRqTN$*WQTG!A{9k-%cl4o*#qmRz+ zoN@Us!2%t$YG&8)p*YyFrD6*PJ*-0;+i`UPMaDU3;8tIAbRV8>Lu-UoN1RBUi(Qy9 zz9&U{M`hRzQTLW{df>Bx=Mde^2ao*xOBNeoB#T#_!N0t!W8$zQSuAG5uN?C)<`f6Mc>jVlj)p{u#EbHi>Y>}aon=on)^ z@lam7<<%S4-23Q?JLbH$_C61HSN@~{$UcmH%!hcRv{5`c0QEjbzGnmf|MI+dp^#fs zepT5S^KRc1b0;No)gA~FI1TRQ(vHeixt}u7pDOLCZk4-k2wyDism&<&ND_CO!Tn}~ z`vhrE`B33|>SO7LHH##e*$?D6_CtZ4hXwAJn8JuBu@8|g+0purR(Rt1hsU3CG5%cn z_y2ltf}?f$BX``jvFpfpq|&^MM55Fd%+UoCuMrR~Tg&6h4{Ztp(w zd|grX7sk1d{ASZ#3*Pws^GDoc)>UtCU3MgQ?2i`|P1cX5kN;wI`{4CQzJL3>3zq-u zSx3LR{`Tt2R)6it;Yq0rcl9ql+T4Didgh^hM^3inFD#nuKf3*jA60+<_HQ0}{obhy zvu0j*^bZ{`R=;q`caKcDbq>sXkDl}8Usu;OfB(oB-_nI+PTYUA_VAyo?>}|$Na&-r z3vcZ_c=YLlzgMqy|KP|O*YA$z=$%Vc8p|%f_NqG_+iMP%|9XG%)i({Q5;|l0a(-70 z$R!D$0ljVhW=O}N|DtFSsez#2d+a*|_I3&Eyi(wST7i3{*|}So6~E-Rq`OU^qxjVR ztAu;+j|6r|HGaJD(ERE7Pa;Nd}~UI*(|)r$JUz6rEvx z1;Mfzc2RL!2)#Ze^}?gX@p6Xrr5H5UD3n|j`*BrOw3l90)j=HU7Tk|T%%gJmg+jia z3+iS#*Hk+V>O;{aok4oIJfIf5Y#@YNaO2emS%6EBI-vTFcn$YTstC_CJ|(SEed5Bp z^=p^cEnmB)uEaHKZcEE18Nr#-C;3KgprgGr)EL+A&Xk@gA0baOp36Q{df3W{yY03X z?U=Nos5Ohz49>V4DH64SQB&f=+?s$m2#$3d71`yS0-erIexK{e(;HOx0o7oaX&YDB zgGdt>-|{Le;U|35+3eU&Fhc)~G`wQxfRpvnWd4k@CoIkQb08r2v53Sp=8fbbp0dxr z@KD*ilNRs0xbN%#5xj3#qB$9zL_cSu4@g`?-REcpsvAFqq>c7Xh(n;0-fC?~x^hNN zLZo)&^(XIRL7mlvwu@s^SmP_5!NF`X-n;Dr_emVg5#~XOgSpZyad3<$Ss_rZPMYdTvS0-?2ArF@?&ZV07g zq!j&8+S7IA{66qu%u79#v6=v#Rm1pCOeY>LosuEQOgd-=Bi$!1jXYjMe2Yn=Hn5)G zdm(h8Dg1dP{{(p%XlXdq!qZA_^x)=_7U?E-V}!LKo>e$Ew^F9gn7^g`hy>j^M?qrg zinouIhsf{pmd=hQV-93e4gNUYIH3;c$J~uMTpyM2BzIS-Gp5fNKIR?^FYfYGE-`y9 zG7MvPv!OGLd&juqwakc-WUTyF(Yc2XRb7H?G{MGt76lJwDF~=!@VIthtQccB(E>yW zLpzBhH?+O3Ztv^}Ea?n~abvWxTu0Y2s@F3x)A@OtfsB4TKx5s_`s{06YtUhDixV06R z4#_Q!WnIBeoD-FKqk$4`D}+`lMV&yO(#2DhU@vWr{-bIbylKV!i-sYyWuQMa?)doQ z-Bsbl@+XU1mUGPBkeRsr3}-JdW}=I%;6FBAITw3YI(^WUX~p)bpC5m4RSKn3_#62+ z8@n&eFkY{u&ptptuh_y-&=dFPEXC-{Zx6eW4g}W(!WhNnr&g#KM@=tLK`sQ&rUAOp zu}5Bzpsq*Ymm~1I5%|pr{9XicErPrhVeHtkd3pKy`2_`qg@q1>W8Ap$C&Y&HOrPQTfY3f z^UhnbV&%$}=bwMU1sAMZwQBY1HEY(aUF-39)~&0ptzEyquC5OIJsUS(c;SVcHf`R# z`J#(1zWCz$`b#dkNt3opO&`p@3_<*x^R`@8r4@W;UuAO6?JpL})(_0}jAcIk?N zhUN|&D#DrVD?3Aquk>Bf>1&0afzmL7zCklQarG#w+eUnO1j;WIj6V&Xp_ZLH+S_SD zYzT=pnZX&d{t$F=S}rHgq%@~))@y8P$2I^aVon8aP!EE3EQVrX;rE3uhj%)n&L3`W z!Gz8=g|O~~`!?uR1=>P9?#7)y&gc6BfnX4)6eU^ikAD|s8`f#Cq;MSppQIwp?V-k| zW@Lcqsc>7N_}GArQbow$?#KD|Sk5S(j>~sOEt=ZHJldxAw!lt*fJ9>~8VS7{(n@YE zP;R1?B?xrT7=C$^?{ZQZYTk~$V8Ymr@wGuY3h(r^kS3Xd2sf)tEB9z>;2$a%ZOGQv ztz!Md{o}hUffnIOm}40bW?u_J#o{v*WNX0R+}Wy1iuP_o?c|j{gMn{2i1h7*CQLie zI>R-1+XMcPuYprztj)LGcUgP188I9r3Gc)wAyoYf`oYc+?&78(L9Fpr`Nd*|{u^%- zqFhw_FeH-&1K7L|;s~9Lm*{M7#%FubxTq}29PSKV(b?Y2<3bd<8i(pb6i&5yii!8< z5ER|yBh3NP8kIZm!rTYB^L{NO<7PvBkeTX}uxg_e(BIt3YlZBM`6RQk49P6V4W6j2 zoltme_Cc>R1RhqkOz3$+2ieyWYuB7ULU13lfDO%nZKGpUG4E^}1Zo`(?IBrR7#DW5 z#i+-|2}(PS3ESWi?$L7M{nijbnxh3J%xdsaH7>^!!+KrMWfQZ|R-VO;3!TNWJLqu^N=-XavF>Ye~YKI6GndY4*3#(5QS0f3e;n%(0pk z<_<%z66wSE6dlazUD$$tdt;-oBeqYC57~w3)FvMs;3?<}`gZzIp+OOXk6Ws^Va3Nm zj{WF4IJpUZ;ug#-Vy0vavrGUns9}c%+jodr0VM}DAb!X%9EeVv%-AL|uRJj#69jfi~rr6PnQ+NnNI?#d~`HQBBtYB6{KKIczYgVF}#|L{u zYh;X@5#Px9U%{_(9f3L@H0A}%D_U-hH0VatHy$Y(52+wMbn=NFI`cJ4p7Nu2dd301 z3iubmc$^`jCmnDNfc8qq;|wT0dXFRs{h-V;t8OO`X z8s-%eFM|9^xKp1Yf7F%;6K_PAR#1egY6)K=zM^uEF=oGQ5;U51-?$xlHj}{<) z8_6o}S;Xe@_E7;}a{-jrSxCWrq>tK7IsWK(Hvbj&m3XIbfz5}%a_m4FpMRX*eB>}j zmExsz2q;Hs(7VD^uA(+6H6!sio}G)^!$rR^o}V~>g5x>|(z;02gsWo%$i&A4<25(+)QE>vIA0l;h+nxl>w-K!q?p zBA-=oufiYM8h>N%bEG{FgdZ2^QGcfvMh~?;fuZb#XBKy-o**7-gH>oT>JjD&Wkh&# zSG0uvd^V4}&qr&bhg!YBF*+h1QNr{~X`=CoS|?Ex{X?Z^HgodPNtx;Ei8NX%^Q;7O zj4qWOJt|-E;pQfUo0DL!NHEVrOBMY=&?`?cyRbtUpDtzY7RliCN#m$`h>oaLV-Fem zJLOaX(M&8?L1_j?FB+RK;nD8H?|u9Y```r`d>>os`>Oj4orS`l&I90!ZS4@1^Bpc% zj`S~aw_TVc5(gz#-)ZX~E8G>fOEZ%=I7YZD|Gl}w?3LIfv0GxM%7_e*o1 z#Jv(L`VnbXI4I3ti9HGa`(*hj9L$&Llh`A%qVJMsg(G>we^BDY^aO3fU6sElQ<&Ah zyhnyp*d@(Qi4*V5lht|}q%@y6rfoX;kWN&Ja#WUa*x)(NUKuEG?<^~CPyOWryJidA zGf&z}%&Mfj#9oQJ6<#RZ`y?Ke*jX*?JrZ{-vs>8r*2-`a_kK~DCGMBlevPCfaZutO ziTfq?-Y)zFCGNRHn0po8E9pyfw{ieXqZ%ebpksk|lzr;O#!aOLk=PBtfaqlrMT^n0;4ZkHoza_esni7VbfX9}(v69|+v{xWMkC0((^X z%kp$RC+r6$c0Vu7t``OFdr9ElR|K}dF0k_rfqQ-?@W5LFyMHNgMB@Hm3A24bV3)++ z_k`K|H-XtHfd_P1u4aMNxtIgji~W6*?8J%Zx&P06`tPUzU-|rB`B2~a-);wL&OL+m zwND(ptmk`uxDQNi|M5k(;{SSPiE$!0Q{Y~S2PC$iE9^ZI+g-xkFT*WB0%?qKw+nlP zoziTVIFWx*-e$epj)y#aHDvXW9U4z#(mjm}5y^ZRQSfS!PNm=3FU{;4 zVQ-h%EwQ@ivRm2TApEf#1@7q)IQT6Y{tlihUZpcJ;!AI|uBCg`hSJ#x3M5w;E}peK{uuQ{x+}4- zOJC=pb2#+S-Dq?>F|7;fS&vmx1HT>3m^!+DjPBhWnx6Hrot?y9q+_Uml|J?LDUr`* zykzK0Zt4;Fqc7J5AOoXv6Z)^DH%=PqWTwaHkId&GCow62D#zg|i1-srMTHw$8p409 zd?`+PL^%x2hsbZdW}uQsP&naEUprCykYuauhsGo7VL1Dt@vHWd7(%2s(U$VjjvDo# zT`or1H^LmBvRM3dkGZ(9mCAb?Z{@0;3}4Z)s6j<5!Nb|_&Cts((xxF6yEY0<+@w1qWv2qg^1g)nFdBpKZAzDJO zBMnX=T{E8F8+p9^zQMLIc51d8&UK6Li%A_El-!!|y3RmoC+QG{ujK z&w>|Ucwxb#6|d9uy70NQ^o0+eU9u2v3txV1$-?p$jJ|M2Av)wS1NSUvyKP1P%J zytKOEOD%McsCvVaFI9&Zc30C`;>(`;UiHDO*Q)89aOI=FqqD+`3Rb5tTDiPv5uFuY zzocwY^_Yr9KWbdDXzjrji*B5L!6I+!+C_7kxAQYcZCkHh^ziqtU$oHrPcM0rX=f3&idT9p_eOttP z69 z)&MlOk5BlUjE@8v|C}uTZ)$P;A4s?X@`)_^?3#o>q@VJj03IW85P{k7chB}azbN7F zz<1uE96%8u6Yx>Q#@+?od7X`Y6>u$}0}ueL11tng1LOfte96ZC0QeQ)Wx!K_ z9{|1yxC?M2fS!iyZR}FO<$%qA+X1@)+W;E?PXN9R=mp#Z=mtapjevE4Kiptr#{s_r z{0#6i;2FS=00#lz27LF+HufN3AK*^FR{_@pt^!;E*arC89vizEa6RCQfE|D!pc&8r zxEMgs-LDJ2vq$0s8|-dsuY=j*JImY?b|J`4J>4~S=D9PAl>&x)$bXZ;z02@UVTgu$ zRJ(G+g6JBbO88T88~r_-;7^62HaTE$>%n_s+~il$q<4yg=$$fzBYS#`{(6_%*^0Ze zm=iGEL-BhJ?p=6K7C+Haw2k>9`o>53Av^mrq!aEv34V7aOwk1-wZp}zXzejve*(Lbf* zJ%hXDp3|p;XeLVs#bbPo>4+@1vrVAu0u1+1I)VoG?$1dF(M*;OipTgE)8R!K4#53z zf*+%QqIt^TZofBqyC7PMwy{2lzVT6h$S$(R&b**k2^j7n|6K<6J%)D*Lp0Q5jF;#d zA7i|SQD<+$y*I&+(La^{puye#FX%tRj0qHuXeKLvipTgE(=mX)>;>IQz;F-I>@v7_ z<2_lwBwC7gTsauR8si|p{RX!I!#nxw8KzthC-_rwQkY7#0oQ$5%mNthp>kbeaBsr< zaBYfc5Y1%eO7R#UV>;{za~SU334V7M+J^d)l z75gy$0fu`hk6wd&7v2-=gu)UnMcWuZ(KkNI57}AJHlKxiPl6w#f6CK<(S85v>xF10 zs~3tV*+c1ZgQj{shIg{FtWDmJOB4L5@Dz8q!R-Lv6KRs)O3+nhVJxpMLs;^s{Lp(4 zVNSulKfw>VsfWtT{`FXXTzDTY4HSlGCMz$BM?HmjNr|LHT0m<8611l$i)(pJEV8lL zfS*jUvHw0BJXtAtG{A2GUzuTJ_X0ivteOel4scr;`~vLdHfC91W7dt}CB5J`0c}1T zy91EZU}IAO7Xq#YoZDn$6@a{E@S6Zn(8l%v9t7x}pas|gcn9!Dz~b%TR{_rgT6cgq z+zB2M@HpVhSJ~KQe?eS;PQXsU^?;iJcLMeSN>A9>YCt>SFyP04p90lJ0vD1# z9JaAZ5SmmV4Ohe7_1`EvgkM&7p$oxHH)QF1&Pl!LN9GJHH1JTnSPHQeKlVpb0emW;n31Xd{z%i zinVl7p(Bonv$QiDnify$ykSXk0?R|~_*gkKOeLXCq@5dOx{{InC#LS7NNU=!8O6b? zqTu!ErxAqv$@KbI(kMZ*w9Iza_` z!{MD9eRN1iWHKo2kn0h)Y)YL<&szTZbX8$El1xX@5=jl8COSHZFCx=%v*gK+Pt&qt zGk2Oa$quek8kb4vwi!PjUsZ*&!#9Gk%htXTclXkXh;40k?RD)zwmT(R|A+EiJOP}c ztZR?9$Iq>6Ol_TuBG>r}S~Xs{T0B3367Sq&1)+Wh=X;F6tu_`#(X!puzVVH`V` zNcl=WI!;O?EYg&uFF-aLpASj`zfgSxK90mpjqT{0EKO9wQllZhh14aZqY}AfCnpxm z2`-)skBU6cSFH?T#n5j5-?Oa)-N{pFewj{%jFEk8_LsGW{WlWa<*5WKE=C79M1ZGSa6oBIc zF2^yHXv$>fk|Zj@3U{*1^y8~B%!CtEIB(~qi~M)sj_r=2=}y)OS0zh^p?E1emM>`2 z6-7iE{TW>w2X>ay+@8F14=HyXBgV!~v}>qgj|NiJ#y+Qeq#28ENaw|YY8=(5Ymc@l z-Z&}wqLn7gJd}>(>p&(_mcHz)GUV5FWL4~Ee# z3`vQSgSNY$1T@}38#WzOSU1u5F)P$%jZ_J=s94rcN1Sl;5>h8JT>WeSGgpMfTi!z`T4828T#Hm>A>uPT8Y?U`o>`tM(Ay(2S7SI>e?HhM%n^@{*mP!`O`S)dn zBFxCSWEb+0(oQ#-y(++%} zJgWa7HZAbncQK!W?S&T)2D&Y=XvnId($?n#9M@prhm0C*& zJXz|JcHGg@%2JKVZBI;Yd^wAbf8pqhJ3kp6W*v3Zi$@=I!RJSP@#%AYCi#`kD~=~^ z5YfcCxfIOmjuBG6F5;|G<`jqOaza@g3g9*XZe{5l-EzGf8BB z&Dior5XC4m5glQSQGhK@Qaph(s_|q^YdHF6lI~=+FnslnqVSU?e0|b_p>{Lsu|{$w zaf8a|ksn3*BqKj$TH>w#4AQO|P5$U(8ATRUECnNvWfaw`sOLj_4Yy&k{ABf=yu{8R zmI4_|(usUsVyiM<7B=EJm#!QW+}c=UUzS-&xcTc0vJfq(I=aaX%k9sP#noN4_`W%A z`8gfi4mkD1WLOd+M&?%68;yl7CJmLw7W>lEqb>BhGRx9gBky29jhqX>-~- zOtU_NQWjmq*p`yUxNVpiqwRMTZRwvCxdWA z$PpmcevAwUFQpM@Sd8eymaI3iA&Kjkqt_>oIG%)X$!hV_Xj74Ms4-WgYr1jy876A6 zZyn8vN|B+=Z_uSQRJ8dxqH(-voQtPD*cptYvb?jUrK6EP$s1Lcp?RLZx6QBQVioqd z-4Iz2$*cGOr-G2YV5}gPdPY9k@;Om{RxXpaRy?!?6StHbD)@_CIzGyl&6|KlsxP5- zjl+|S#*mHwk!YllCKgSSE89n$D`WcCs6ilv`|*amVf~CKBY+yc$H(gYn+D9|PkzkKn^RU@KMl0d;5?;B)3y$L{;3TgTWa62 zrP0?BUa)da-K@FQrEr~=kl3OGLd)pte4Yd-Qz!1zPE7DjGZ5q$Lp>HJc*LO5NXv`i zimWWTe^T08)`9z8<3sAT-?Z)+x9zS=FZ`Fhg7gnEk5sbN&EV`i+P8+w1T)uBCMTS- zRX*rxkb2%2X7YXG+xITol)mcVUtYTZ&zl;1_02E+;rG6xnHL^vJU9Pa-+%mFe3frl zLK{{tW_rbm$$w9ue8BVCEyvb=cj3F7xTc*lVPwcpT&CiKp($F*>$nrlh9>P zQ|6t1qQzJy+mi*mX6j7E{6fY(S(*H%#xj?yaVrE3eK126o|P@Ftt;NNbTNf`Ho=Z8 z`(a0C6Qn<=!{dw`?2t`mhrtc!#9-GKv%?}E=c2OX;=tYk;*GOIqrvlRf*p@H!Hyz+ zGv1D;Wgy;;$MIgA-6|dj&ZZjjyNdfe6=&z+{+Qk9&%^y$;_T|UKYP3#_g6@EIVhKH zTbt>dC(fcw=0v*7wl(|0FuP&)c9>XMi0TFBtZvU@UK8B8nQ9yl&Y6``7-5G+0OMz7 zXQW5%z&SBihjSuV$HH!JZwa?j<@e*<2(KFv{z^LAK>i0{e{@Ea2HDRd`~5Fwv6L7c zvPX->H-K?I8_iawgY2PxD9&xaj^hzBeOF$2rN05qiFLh>vtQhQxUqQU;>F}Q5@&Z2 zFQYw1yE@^oJI+q{!x>|y6TNT?N)C5f8qCEoTJcl z?E2&FX2Y2c!phl!i+<*|!CBf?@-z&K#CvlglkVaG33y;`#~Kb5)Y&peKSG`r}R z!j8vr3U-Em&*O;X!wv-|!}2(o+s=AXxqPlgoOG_ZALnDZxj4knRl0Z@X!rpCN`7#L zDdL2mh7N9K5j(qmKg>Z<*36k>%pV_fm@9`3QaquKP-C%+x3|IBIc#8e)Q;zc=>HR+ zh@xdOYs_4(xlC4_Su4!UT5b-SvP>)s1Gs6dg^l%F+1R>tHnsxqwf3ONp2F-6wqOQ& zjH4u-jj34A#+c4$wn6KFC79+ld+=?95YAP-k;Rs3IJ1|D7wxCaq??$&1YxqxEZd*P zvJqEyc}B2!o{hQS#XE=mIS>Nzwpv*0ak$r6;17P`&*4mCS*GbMZ6GzMds93b;(v5H z<^$A$DS@VlBLiWrEITCPxSis-hvK+ZrXk+_Q8+an!Ou^~Pu3^%ZSaZI@N_o-Pq48G z{!BKZ4!9zNO(-uM%pVxrpW9~-I;N+wLVuQ4Sdpm}nygwzP^Qg}aH4#JMX4G~MZP0g zgx?6T{49%cJCp;N9|G9P4*5?sCkx}mntLgL(?OUxGsOkEJ#hbt!H?1G$ktePZH9}= zzZdaV9>Gr^;BA0zu_No+v-DbErz-{AG^f_*MA(f$dO2H86=ljuVHr!(JZ97dvQ2Hr zV@lIlnrSj~Og6L0hsF;&1`2}t-aH;p;m>h4w*)`a0k;Ay7JH!+BbYW1239^$88`S}I+b3WzWA@fT#29^8AcnNBA6W#4^8sb3P zFr}xkbW|xuVRi#Da>YC!%_!j+thHEJI+MTIDS4WId%b?9A3rp{i|7KT`hRj zbu5Tk^&vnvf>WC`njH=rE3C{~YYtK$q`BDfNe27m_!^d1w~pmitYvu^6S4*|PIzPS znSO3#KR=nl{yl7EITbi#S)MtN(VyN&qlPKd%rZeUvpju(%)D=e(^)@6+5Zi2y(Q9N zG>h>G<5FE3#wRlyTaLbHw=jDJc(3x(?EKTcUEJuz>BFWcBW{rV0seu{WLJAp9A`K(9Z__EH>^?{(xh)l}$0-uVoHq45as`_3_cp zrFm#ftp}+SW5MfCIs-6RD%eS_)50!8yPlQKoThuVj6sxNG?W;NZ->{XRk(c=&c(iy zqFtNHwrY+kX(+E+%&DswLI%yUzC6@EXdFQq^g`=%06@G$QI^iKP#&o$7qWArJs$%} zY9sMx5J(IQKXJ4e7!UYipv6Gb=w{;en9c09v3lflR9!laGu|TVC&w+!8b(@BH(t!R z+W~dI6lpP<9cQPpd{Zrxa|cROl*_~LdmWNl`WRl00?ZbxeJA*Dx1ML$r|EEF4x5?T zD$<#)T+}Yr?Et(sy^o(kfX?R%m-PU8bE&q1RckCqjQM=bv%M|bK^|W8`?SA-rqO+d za-T!)yS1Ar&hZqdmDy@VoK*Ll;5;2}r9Qkg1I!}cO3N-yyOT}VY7uTSwf(Wo{%Wku zsDCyQO_V`iMS+%QvTLfHl3UOYH^Uf1V~L|Ujph2kz;Y|*vRqRIqxp=|@HV3T8s^)| z_wh3BS6S>FK(2N0KE|F(ts-=7O7)u|RkTJZd&u))WN9;H=+l zV=rdq-(hAmwC64RZ6BtxCYvYMq}hBL`(75NE-|mvVtnB9FXrAl%)KZZcg8VywoyG6k^p@a7>ybQG zF2=ANjA3>*_En5wJg?bEGtRkVESpdpOEa}UFM{&;n(<=Z#Fm-0?|_A5j(jW22Y-?e z{v^LVchEkN)kh;Rg}EJGz4uU@E7<3aU48Jz$C&4ceigeTMSC|oc3p=$q4r%%JYy=e zE-?=%eoTxPI*Wkk{Qz(r!D(DDnjMpf|HGYANF(uP)COIYc0-?W{|C~#3_sdd$*aZq zK%Rd%iTvZmyis$rH&Uptiulu5-vf;O9>B*d8pHf%ZeH>Yv||LJG?BZ}Yz*Vb<~v|FAmbu;qd6`NL&_6rBYnoWc>i1y z;{l9i)iQvxx&lAit8x8!H`(7v_A_J|Qklp2;RoSVc@RHO0RA0dwz?j|a?4ezX`22t zEYLse(LdkPv?5JUO5;8F;xs;#G$`n&v7iT$HNlI=$L)Lv;b!7zE#r+7O>FiP@HinoZ?7?W5w_$=di*Nd`^@-HECkox4K2uh2r z(j$0z4xn3|OH-J;npOekbWPi&qx3OuC#*B5Eq(y+ysvyn;eMyBM%>2n-S!(b{^JGX z`>hneVZ5CIYws-lXw`A{HDte%>?@dK40yl?#28F{;{u`~+NevlQEm=t+|2tExl1!$ z5vklMzYl`!Y}k3fL-cP!is8=tWo%6p?U$U_%V1WN71?oGrNkGF1J6>(#+k-4a6iEe zkKps}1}(9c#M})2P|(drU1YKBy`sOn;fMF1<5<7FfV=JhmfwngX*BbEHwe0=ltxzu zn_OWrO*S!8BF{znx*9Z2q@f6{k}o;4rr(b85VpO?S$e$`{?)SkE0#KW8 zCY%#--hw+yoc1t#CslK@nP%;cRCd2jZ(=LcG?l*YKQi`PfJh&z9BGcXLlGJLILgx5 zME}KXV#P&lqN$GYnMBrcH-Z-(*4g2co0;YKCKmiNWLtw6vyhx3hV>lm*${iIAq@Tm z?~eHnyzf}Fjj@L??^!fv*{gHDspeg3|M&s3<|s*poFPLSW3p)GplAlgO@|7ztZRM@&UkRWe-BsRc`+- zDqgE1G&q+AhCAu!PxMoHhi-OT%WMwKTnV&-|1P&I;P`I-Tg`tj;=eWgmwZAt=9o_W zOd0O8nL4CxP~Cfe2fh#OqjWP~ngBZIx86zN{FX1pe2Z0|lcG_#WPA-(m&r0~1t-sW zb;!O`G1vAm6MG2|1_@C|Hq1MT_Qcb$Glye5j1hWBollIcoPF(;@~o;EM`4q=6$X5_mB@P z!_R8Kr2w-fs~2s%Qe&FF0cN!IpTRs$)3>543qGV=^BCvHtKp5yGH%4nJroAwh1*0} zcB_r8(Tg?WqjMY<=J01QM;+P%_5X1+U*g{Z=l-%W}Yl0IWj>^AWL|GQI{D$oS1j{B0?gt=gczN?X8k__`!Bx<;Tj z-UXkf*Wzan1+lsy#&heKzKSWoPAhX_d~;%abA~1ijvFZK&kyGDboWxY$MN%H3YRs2 zm^7>^Ag9n-dMI^(q-{mxunsT--HS>Oalb3K()Kjo(!<8F97 zD+Z5MT;X8FCh#Uo+UybjBJgM8<4`unAuPl9ig73=KSWWd$?6HctlU z5jRmhQ{m>%M;py!g(n0TM``FH+BA38YThCmqr>T<9V|Do$>*6^-v=^32D6dpWq(B5 zrg}=pi*}H&&2sWAEDz&A9>#$@%sY9ZoB`?wGsy28{Hy|4tlg_HX3c#YgTLb~RdW zlo#z=ZDM~1+%5Zjycw~BUp3khZ)>j@!|aEI%#z9u)x(O2j|u0pL63?3WSxl}N3^{D z4Q7!>@Shn6uo-8s|_h zB4%Zve3r&`rCuvr{Y`sRM*HOjC%N`}ID=!`rr zy@K2sWQcHQaA$C*a4|R3u5=sDJ}xd|7C~mc}vXAXp3dEuDs!SpbdCWhTvhD|_6mwH6Q^(f2stx(Z&pXm4gS zUYf~2_!%GJoJcP!x=(A;93+#)_?uO0n((j@CsLI_rg>{{YxA92Aki z1Upl$h1J%POeiMfG0Hbq{YB;cA;-czyy*Mc9;Dw` z<}+Z;>uMfeRstx0iwWmMx{<$!v{EgH^Uq_zKj*Uiz43Cr7I-b~#LsO2s~SHM&@g_^ z)%QYRLp-F+=tkTeFdD(3t|km{~LX9Xv7unm%nyI@Uh?tLb<>aQX$qnXPo8&qgj=VsQs-^4xz z+|0vJ+l;p}5s$ssGN?j|wvx^wJtp=R;8-+Vyqzh>g8i*AEN3rYVfJBH#ly!o$QXhT zVBP|JBGYL!J1*1NWtbbyhcwQQx{*g?U)Xd~@_s(NXcuWY4jXfH>1=FMD$Vsn>T?gg zmEMM*{Q#Y}jbOR+JD5@Rc`5o#KA#%f;$e969>I_HGb!tmYq)Z;hyEs<_`LoOUd)fA zu&cE`B#{;6=qyLB3A!IPu~z{Rh?c0%jb=w-3bQ_dwu6C3jhSN1hpxr>MB9#=(#*%FY&Q4<6T9e9v;i51 z(d`_8@jhq5CkTJ$Y#^X+iL%pv}jRoiN-4pz-5c!Wch5 z^CR8t(6o0nR)~3=x65pnQyXoUV*Lm2oW>Enyd-HnBWU!4#$zdZs;0delba4%FQv2K zlP2~J!0QM`b!9Zi$riyMQ&~FU$5Ecaiu@O_qKflbk!cBwj%;Wjl)qK~g&__3DcyjV z-GD5s`+LNiRuL*PSC@Xx#-PAe^PC*%R z-ZG8r$(5OpTi2U#x4kr8O#~T?$7MY;48AnVJrZ$to8>JnJ2Otbx3ztlX}(wbu5 zq`!?U#AL4Jkh$`7F5EKM)>8a-XF!oa$4q+@Su&@G*-MLs7Wb!qlnFU{jYze^UdFor3&&hlj;BC;NHj!vH zhDj{fnvdjIw;C^6n2(n$VdW}2h0wuN6d#4qeX+vpw_4`{`k(U23; zV#-818qx>lem$367G>%z6KikF>p3j=ii!OZQ2a5a4J{S+Y=}Mjn!%q~|6zVdy^!9N z80Rpry=G#s0lGLX?86((4nF2&W6a57V?tugp)!6JekCWe`qiDcVu;DKe3WXLYF(EGeJ|uq$b!c~7CauZU^mu+mC0qn zh%-)?2O1@g0vZ>yFdk*Hg4)=4L~U%wr^x#}{44>i2SE1OPlF;RZcRTQ7D!MF60|GT zyj26+@=c7v6R`gexkciBKGC}xA-ZnF&uxGnfH|udp-Ww~Uvdx3nkn)YEcAV8v=|lb z2E0b^pG@qp0447;noU_|mUSW~;WHJIzF#)<{h;$_vT=PsFN2 zb3WsB_-}y!(qG}_ZvdOM`!kY@_u{vQiyHQH@24eyR;sRVH{YP2Z*J3nYOA4k!*xW* zK}R$_ zw+k=b0IQW!7U3!LHf^VrS=MQqNAIytp)_&*nla3FD5hU?7{N&Sy7Vc$ydI??a;Io~ zlW06CXz=~g*uEyE;}Znq=>R*<8~|HfI)YzR3AtPEu^}Nw-n#~5dl<4{$DLO_D#6fE(tdBAei9I2r^&ouW zWE$%_h?ge-{|=C}BD~){4GYj(a5p|Qkg7kYW50!HW$P$+tO>xwQD1uxzPd;@@gapb zAqd*}ry^(sLElNy_L1<uA5ZSi9vgJI3>NfDI(5T^h}a zd_g_@@Hm_CvQx%+1>u}XAL9H^?R%o^q>MkxYmUoc|^75RZ%kIHC3)^K}=M%a}ly}k_6}ky%hZWFI2#GTJgy^Je5Do#(0zj_Lq6I`FEU+qv+dpZd^S0RmErWC{l z7!N>uqKxbNjpXJ2MoPWW0HqNp&rO0BeZKLmX?y(%)*#C)l}nr61V4|l}(AmV$&nYB5?e39kFbn`PA z>{iSDn)!ZYQLYzaeCq^Vw%=)ZnFpYDHj8jhWB_!Y)p|txJtX$1C|z}M=Ka-&m&*WG z0c81lc=?840Rl7bgK*ERf0&{_hNX&>+fK$@4V}OQ9aWlc3ma(eFvocacJ+ z1G*#m=zpBcB;8-|2dT2{E0 zSO=l<%Zg9|dfiyocY(&<1?&d7qC7LpJBj)6SSHiKqmli`fZNFpG!o6mu%suNkA7qw zlw*)b@E#uYAs$!77nm1YW%~ffiQWVYo6v=FY$|~6?mVWyUdi1ag*!_V@sVGn*%;n& zu7x>II0g#_^7_a0KxI z_Th8b_`L;#c>{tH@Bi@5+ceR64u)SyIEyLY{LrGdZK{U8PPiKd6`1*zRpdC04mS)Q1=58`iNWmE}?3E+$RF3^*H^#*D-w1OP zna_bvPlXfuUHKx<{kA@(uS8)EkRQ(fXGC>p$i9^9xz0BBbN9wtB=v#Uk;El%FZ~r> zj#GNAw1Re^%xcG8@PQ$=n1urR>6K-26q@*)v66$=zs<3xl*7 z=|**wywFi9F|!iL*v5lqYF#Glxd&<*fF@2CeQbyw*1!foy#4OQZco3~SG3y7R)f}Z z(5gnd=?r8lc<{)*8oLGXs7$NT?3hSr6muCgY2wZ(Qd<^d2=+N3NBzxyjh&?Me2mNC zX7ksP|16PzayOcdVR(C=BI?OP^+)gPv@`j@<#xr^IgKwK>*zcwdI=8KQ<4A z|4h6n|9N<)e8K;2MPHV$3r^lS)|3TqD2>(kom&W_pb)t{a7|GjG1$fUNXza^?C*hsO zWTRQ=c!!~Xc3IN-t}qvK2xhs`BE0MeOtq4B&SIe+Bc-sulqv>GEHcThD1{>*^c$bT zLb}}jw1!PDZf8oVo^Zd7ahcmC8dddUS_Ga<+7b$BO5viW4^znSl=erRd>l!o-(6|+ zyW&CZX?>+;z0}s1^^mwVXf`vaSaomom4h%Poc;8yTUTtJoA*Y z;xhAPQ>U;?ozu0sRy2{Uxnrm2PPb2=Q0K%PqO*(>vGuAxCYwREnU!YZ#Q}gW0TR+z zO;KBq;+X9|GvpO>QB_0M#|z+{_qAns(JqavyG_c?M{ZMv-UsH-9_0MD042LMnz{a4 z82WAgv6%m$M|UD;fcnk>kf1d~^qEq_98IhAkf2V|Du5 zQk)g7DNfVYl-NpG@b|qSan^X+L&nScylmzNltXp{VMia|4}2SMUU2C@59J+lVccOXh!{dIM zo2N}k*C%6IXIUb{vp;F|B5xnA*RmY zL~vM-?~#OY?osWNoX8s-Gmz6q^H~o_RX&2B#{fU&bnTOfFp{htff*&-ixR#+*Vd#R z)Y&U3<`0<3VlIxxxejr95a${mC-tXO2-bk`rKSSRLjd%79Hotv^VX?7a8T`D} z$ln)Ru-{O~@`X&1ucL`C4N^QLUvHu{rdUrz`Bf+JScOA)tR93BJXSt)pU~Jxf5mPI zk|B7k0&bpkQe$DVgI{ttnvG#h7VM=oK=z4*@OsByckn}veF^X+g(qElT<|jfm>Fm0 zaoz&$f^?wBeLvt?8AqZyjs}B38FU$46azX8bQtK6E|k$9*Bhxdb3SBKVK&bH;%6HG z8UYWY#;!w|o6<}y?E#dT6Fdy$0NAO5*0q?w`l(zBb+M$aLr~lHJz;J!B6WiDse`4F`84ZPNX=<9rRcU@q_PRcY`~V(b%*1puF@ z_>Pk<_(X2^71-MyB0lnCG#kV5zKeCCSP$YJ1({ch$1#E0SmK&-O4JWC!4LMj3efHf zUX7QEQakQL{Fi}F>2rAL2Z*&^x4t?}n~7>NWn*7`3D>nE{@F`$3ms|FgUiPC4=L;s zz?(83MzdoQ!afimmfAI6Z_&O#t+$}F(VddQeh(NsPLzew%-5)5joELM!FRzg$%iUQ zc6B`rcLJzC-$GdEN`6x}-(tq-@&6b*ANZ)MEAiiX^XBEvFc~HxgbXB*K)@k}7}7uk z6&ff~P;euS7A@L=MmJh;V;bD(q8qHXVAWrw=%Uqbq-xPZ8>v{@YB$*6VykYXu!|OL zu(3aE)dn}Z>QCF>_uTs?^Crape)IX9JNMpq@45f~opbKFGLH3r8Tw6{ew9}sXU76n zur0ez^!^tV$(Jzt1(!biB9m>aC#|_EF_&-9^UB-i!NaJ7hw*867}Z%k45`2p`=3B)ehaP>mY)4vu9Br-t@*UsZWUL+E{QBK_ng^HRH?1d3awEo%fI>r;sFe!qo_mb>q3R^YFWwiM`w+$? zF{;E^Bu1kce~;n*LKRy_+~+OWJf7?y_cdz`k5&4>5lQD2jPWI;6YG$Oonm}Xj3>l+ zR*d6f{8R#d*fg9aG3o%#6W8&X0xgr3+h!sg9c8yo4m=ER|Z0#Tq~# zfyhmaLvA7;xrsbA{;=7@t>^Ps2v>I=$TWRDUp1bH8 zk$QO(M0&sau3$u7g|LPWX3mCM7NQm2t2u3_%JLnR&z?c#riAy|d1g@k25=`!TQkA% z&D^IM`Hj7TFRV_3^SbD3(*2w~saZia4M=HRWW?t-;mv(l6CRm8zCQ|ddg1+UBeN%a z5yS75c`7OOAUr@?a2$M(}FOU=sQk#NJH!u zYx7BVg3;J4!qEWs_u&`ECf@EiKef1$>^x;zg@|>nvAd&hN1k!FNABY$9X}&uow;ix zZO{UWnoEcVORbPJlPPajM#-nw0% z$6{4@_pA^t*b8k_&c?0gZ2TslQ>P?MmDlc?*OxLfc=@a|$OCT6?7F3{%1aHOp>8&o zbAc}b$mTRq2bFPFu~e;?EpucHfraCvvx&X1|K9j2BipI_iM}^JPWS^U={wEjNAJbp`FAV|s)rVH7%nsOo8Rqk zgJpbAt;Dd9`?zw9y0WQQt*xD;3foHHI~5Awso!ZZ`LPkvx^xi);jQi`_U;wR}FP9t3Xo@|Epobs>((-znlK;;0dE zw0v5t<&DQNs$+Rj6`vbaL+Nt*+(Orx*oNL8=w1_92PV=#CvGbolswi;-gW)a-=L+7 z-!lBNdxE237g$pjtAGvWB27 zvxgSxj^kg*Rw)R>^!+Tkg#_3$R#gtb~?VnM%JX<(!55)UY(M z;i)wdtnM0&yMgZm@u;kHi>$iXid{@2;%J%MjQmh@dsZCr|pF8`JsCrF% zEV5+f_O9~O8IW@O-TEwc6LlwNt|s`fUuFCQ?@L(m8whFl z$M~rk__1rP7_!xrBlLyd92Mn0jLjKPzUQsy3?O0j`3-&%?(=}8_>t~NcUBy}OS;49 z;P&Um@F$Ul(mFiyhxwB-AM}EsBBsXJT+xo=4Nd|bOKNxiFY#lQ7Ka&*{^#A5v0t?!PY`U7Cr zLpla?V78yia%=e&_-UN8wa&J(1rfD?HF(ZIo~k&KP!rnF?SpPFwJoUT150L_yl1<; zb(=VfI1oRewPeRhRHB<9uvFyGdeZBn^qn|aH1%_iN^K9S#dihOZb?JS11!<*^YJ_6 zr7Pe4Zhsu3^RYnbGp1fAY$fe}0g$F0XSpRz8#+As{v*KLIppl>b0?-)jK4ff@+x}V z%-%S&XP3TvTLpVIxr;3_jr^zqEtBye{wdEduRzW=+5BUPdr0EGi18;N66>T_R&|Nd zEk=(RvdNCzq5j}p<$f1weees7gEnJl)@Nn~-@Z%gtudhnAEq9^&zuNa%`c`+-QGOy zH+>g#-kGKyC5+#l6;Gd)C^YfZNSre1jPTQXA6iY0^f(yERUMDAjsu-udVY66=rM=2 zWMxBH0e9>M3YeG2slFcs)n9-fFOJ`Bboh8D2L8Gq2=6EI7w&L6E%w`P6=@sUTC>NL_TEp>x`P1!BDAw%fqA(Xwx|w{u!O4Ud}T;mO4^8IYLc`}N z&Obax^EB%TCg(_f_F9fZ?oV7J_qfis5@#W2R9l8@QK8b*?7p!*W|c5?youoK7tsHN zWe4Es?Dy4l>|vp?^qD96v1M%x+Lu_5J5$y5oL{C1-D`qE|4^2Mq}20spwKemN3k3S ztcYMrIVJF#xbJ1o`BXi7^fF{hCJjI{fsPwuUFv=+s9pdDh$i!?-yOJWjJgV1^yTai zVpGwZZ2F4kh3Cm#31sjmjL46)&+011<64ZX0q&S^f1!TtA_92R-66kSk>_F%!$B~ zv!o2*X&mXtoqb;zz5aT<=KF}F`_@BPtZvKbp~{~7L_gGM`oN@5^b<|<+~2@5C^#bZ zeH^Im+@N(aMb&w1gZ78#g+Q9-=y&^dWmHzvrgJbp16Z+^-O%*!m9YukG0eD^^+M0@ zw3S(l@>Ji8LG>X}PZXn51wE7SdECbF+Q0lTXCV1T90})l`{M=*27qZ`KL<&+F(Qvv-A^Mun8WKA-Bdfw>!L_8RcUjs?|cfgz$u ze*Esl6w%wVl6}~@@XeQGXvQsOjaoL_OZ!4BR{)asrSgxgKWVScajsSi94jF8rRfkE zy~)y6w-Hq4lDd1a>;!%cu-_61?yh>Femsf?$fA4XvVz+J_2XLWaTP${MFn@N3!WaS zA&hB6ua)R4;~ZMRo(6h(OCUcC-=$DxpK!hQIx<}xeKMQ)mj7*&~Pe|H5VAT4?p2hNtq!N>* zMDP8}?D;(q?iN|GMV5NQw(8ZdgbK9Xo=g2}9Xw~zmQ}Q+4=-&gc#A$|

#ZnvP>G zAniFz{_{He$oXd1r76c-*5$Uku7J5Y`|ir{-uM4UE^-TKrv_LH?!Q1i>^cpF`RN-gJMtv_V}mU93r(((gYvD`%= zM2#Bi^d_D>~pRRp&t4In2h8@=wnZt`Gn?HiEGQ$qK@Uqwe((`o(Y zeUfaYJ(GbsV^tWiB3)dkTjaXu6u{T9UUY9lVco&!d9*I zZ#Cr@dAP=y>Yi$=mw;+`ZBiH6ZdqT2XOJDv$RjOgEY^)GkF*P1a!)9=2aA2cQWxY} zT6g+3Ex+2A-o45i&_@tGZePLjHbBv=*X5rz;D_kf4>@H)Ye*&WH*~8EUBN%M39jN# zBd;|R9J$p^~yU1+_u4V!b z>+bL4o5=4T&d$HgJ=+PV*8$;;K7eDdq(B)Sk$>dGeuNt8pwnxegUqCqLC&}7v+ZzA zm^JA*QQF_Y2-cyv5{S84d7BY#x@;4uDpP2B@85rjj7G;0f)pOYdtYwh>z31NFL5rn ztyT6C_mB2s=RJ3Bq@H4#a_u$cswdKLxds_CQfBm@m^GDtKvi7=q|c3hx0&Oa+nXwx z+bWpb%1;}s4}xFMH!@f3!Qp=3AZ=!y6IA!=8H0PiJdVJMec#M z4}IFQ|5J6gS^zu=8tI$aZkN7EUnjiKUKMft-$m9uFHov9@3q0C(+>(W2b`hepRrW~ zP(Babat0gXjstfK#BVcjw)o3@(mO@FC!9@q@dJl^_q(&=@ZFC)F*}!iNST^~EZdaY zDU6w-Or5ouwcr?d_YyC80!wf{{Fd zMC#Oz2+t{G$(1HS(=L5-4wj{W^vMPC&yo%vR5u=x_88W=LtB*1wbk4v;ZH-?SZ=FA zU?O7^*q*_ILKaF05 zxy;LRRQ*a@eGRzJ!=LS@U5TfNr--MBr--LU#53y_`a=U#N1cbNKSObuhm!NKECY;w zQ%^2nbI>ib?bmaxw=4wm5`3=U?&FNM&$AXSWi2{~wP<0ct>{BC-ft${%YyTEEZ+k- z2Y$n5_~@1TfMWy-oalxedjqVr3C=YWtUZ>xNS|e=G4G~%OMP~a$N#UjKrm@FI^U9Kd-LiXEK331 z{_@X$i1vRG&HlRmg?_jVzLMzK;VdO`^wq3!uL3IwZq_JAyZ?+|W7-d1gt&tnSp&t7 zJdSh=j^Q|b_v6*;>P@Vx)ImCpL2&)^wYKUNd=q&SgLNv_$XSqmt@v*T`gB^XOMFv& z%l22Nx&1iJeYWwOLkge?nEPzfrmGhqKSKVJYq2x~Wvl`kT8UYlCz_GD zWc>Q};+l3SIPN2wrVl)X<;Q^OYh_02q|bf9YPI(SQMk=7!TOSHuH;RPFDXrp?#pn^ z1lLKT@1=2)r?7kgnEV{D9U5ttYn|X(3teZN1*P+C1=43hzgzR04ny7P*RySdIjf62 z8RzA1+Uj{AxzKCl-Z9!ee~Yc^#E*Q+cfZ>oCy~(lQ2NvQP^2H%5y#{UdFHLQdKS3Z z!{vAD{gK>3mHpAih}w9B{S@gpLpP5N(cS~_xo)-9%RrA8Kif_F6Hbi?r*$dp)tp;B zy2?K1sL#Rk$klwr@v7rCTYUr+eOAgReTaFEdzab%D$5;^z2siQ%PIT3t3E#zQ47hd z+sOVB+Ii{@Tm1%DpT>#5I?bQGkT2XY?V-o31K9@-aNW4mR!e~%!6kn1Z^eBT(5LaM z5pFukX<=l~1}^-w@JYw>AH(*_y6GMiXX+7?|!#Gj=8@rclgmg4{gN1x0Ert2^?PsN8MH|+ko!@ocCop z?x~ma{=IlGxo&!lySNee8rz0?umqKujCZZ$vXpg-H$Ji`f!_L0Og(p;z!F%@c|6i` zzb!g2egYxCrVq2FrhAfb&F>Szwp9L;>ag`Q`)OyL+Ynh4R7dRdX-)5r+4!t`Vv_ed zP-?o8$k{EzVIK%u&4kywdC&z$d#Ck*$=}p!Xb);P|N*?=cgvo_HTxth4eq5tTB&4L&V>bUbv9@!Jf)_BcV? z-@y17h<#3UT!=7FeYKXiNogAE4(@3?A-<89+rW9L?R;qe%-O<7_T(D0Tu6(QyP9Cb zd0K!&>wBUs=a~3?#E0B!bmuh7TSeTld<iq#o82Oy}o1EVoJ=(%x+17XLV^UI|5) zTWI}v&bP0RblNGhpp2~J_|s^j^q{>2)mOB^=& zkaE0Fa9xhjlCf$EPz}Vqx-p_?x^6lj7F35sd%zilpoX@P%h`5hz`nw1a@(WjlvV2! z7QGUg_c|I0tjoC;ORL1BoL3qQ$H8#aa`xKiJMB`(frVq#Lg=*fI9tcoWz&}pkD^d` zU1_?r-s4Ldo&??Zh~NG!mI2^hAZjY1UQ>WPBKo=>R10&a)EJV25 zdYzep_9%lcivE&(){Ojjj6S!M1kpSg*}uOl;g^yY{q|$*{IHvxvv+TFB`mNVcTx>cDurjIbXd@l*M zgejkHi;Vu}AwIPSmZ1j&CL=5wlieO1~1hSoDY?lQN!ld;BUhM|Tjsc`L>?0Q!o|IMwy`U+fZ# z_ufQ)Q)C9w_2!cq+)vQ+q&^Q~`3*3j_gHPTuJMV2o`JCY2jOAY72f*;WCGw69S4bV)Ek=*bT003qBKt|?&S?Dd zp#6k%m*pK`j{}QZ!iB@xeM&`)@VvBro`zb7ubJ2GLq|-Cy z(TUzMoL)WwNYDKVOxQ?dBf4Qi?j<~()ybV0x#MEwN(Fa4A-^EFS7NyW_($M$+?^TR zR}(-g4BWT4vE%j%=WX{KE3(3_qN|_QzwRTn?#mBJw3FT#6nWaroueK;=|1>8mRBVl zeekIKqb!JI5<&Y_lJVY@gBQzQ%$(HBUdcXhtooLeLB>NKj;Vm)NXS1tU~qg~SeP5M z?{%KF7?OE5#%#yB+1YGam)Wlp*X;F5yuBXs#yCsuM$>xljm|&Ge{zMd56!``6p*gD zK>qVOj)C?R_Ki?a32`u)N7CMSNeWsatzcw9a0TvYHk6afT@z(9T7E7yyVLV+a|BOkp@RsH5 zwrjj~VMO|!>A3e}`Jos0ArtpFaT6hwy4bfmBhx&Tj&~HxAG~<4n0S>x=Zs<54?~yt z)}hn&TDpHnp5^Z1cq}sjIf9kQe_lt5`0#I!2JOFChp6{}6I2dz3!-zx>`^^^fqiLC zgy~Z$qU*V31D0!nZvve0WJhYwcsJrf0=r2dZ;Z22bkNyQ+)2Z{<;gvORp!9O ze7H}dy=|K?q3zS9mydCz&s3oXosB`V@u9@g`yshg zqh%KLe3K-ImPMG2#TkMJVI9`^wM+_cLbhg&D$W07Xz?yjK!tDe%Hz|^CFR)!s`jlI z_W^qVZ=LO!d-ju2rI*1@a_>IKCu-?pJOfU&#Ilz;f3X+4KSUEi)*kRV!Zl0TWDhFk z&^pu>GcVV32A_Z0ysR$p^tR<p(e-iz7W;ln?jEP{l8 zj$rLCVf=64eSm$fOoYBY_Mh=3w`V_v`ro+oP}F|ja+c}wruB`-l}`_TN^q8b0i3fi zRsfd*akCYy_F>jb!u}Vec?T`ajWiOBM0St}ll{k*bHH*}L>_gPVYV|Wjc)VP)4Gj# zI<0%KJSs>)_JBcl5@ZKlLH2ewE=_Y`kI7i|>q9;!xPCr^gK3ZLPPgor$!|IGGrR}y-OX;0F(Goq z@a$~pE!7J{>U^M^sQQlZ5O?VYa<$*3`1fdkHZP5FCGKUP4XI<|r|F}xb#Pw~42mDP zN4m4(FaQ+zCB0$X#PR1LQH+kN*EpZ`gc{Hzq4_f@ooDWO4WT}nd+#>#P+f>m9+nco z*6};_@xU3fSa4U6W$q|3AHmz@wt2tIM;%fQ;WIIxjHCa!$R>85%ln7tg}5hU%8~7c zYbA3JVbzGRnvSr}@U6EKSJMk0!15SCHCvC!KfK0&nq;Uwedj2?y<@dl4 z;L$Y9epu62{)8`8S$TljBY=uLQl@Hr4B9Cx>M%2uoGi==Ht?_dEk z)x6nf-qY&I$W=c{tgjQM{aGxp0sjr8&j9+oGB|>c@J9lWPV#ApeUaF7Icz-WUS^*k z>EJB;H`7X9;-xhQ%lW{UfvmK;Mx=ES0Z3~LX+0H*U+g%`IMSR;Y7Hhe%7*^4am*!X z1(p{&HO;@1Sh`HY6S*J94+RZX^pHV!9CRq>c)~isxmD)t+K_5!4XJkk%~xV>D<0sU zfxLIvXlsA_Ya#VG(Pb`|aDKNxZl3QxZNZnIL6(RC*s;=IA}djiVdtxEy{}=;%MFwa z-zAcEUO~{`NPAq2({6EF=GVmwBti#LnJiS^c)u~C}eMSV*df&1m(bot_dVQpKPhK2~XScJ|y@D&_ zSB4`u+BZtN`i)t1UJs7x5BGU5Jiv*Ta+R-vRY4dnt2YaW^R%kBR_eLK%)e7doqyL5 zG#DViS7NyhxF7K3k39Okr!&F&p$vOXrti;#3=xTLCn!0tNA&E5l8I|Ad!dsEy3a%& zv8=<9o$gR%mK{A7>5sk@jG#J*{1>8|g*CL0^}p~PvwM+w@Hlw%p5hHG9|}_T6sHXG z+d$5vaVsqze-K8*-#W)7FE!`5x;a=D0)Gd1W4KR`;iY(x5-)Ov- zHbUw%nDAaU;}G79a9Fk)6mNb^37gFrSftEJuE%Q=L2ClC+r9jlIT zr)2d2XE>pmc(+?*%}z;s>vePVyB`IYnY$nDWmrB3Gy+-ewwcxQnSknlSeoq;eU=UA zW;>6NR6;i;STI4@D~l|5XM_hf>fK|JwZZ62PZBafu{E6iygPfFTq3z5zK56i=X zN%nsR<41xq==@=rzoPjfyvID^%sG-KaGdD+%=$VO`)0L7`pO7;gZi=;CX59I;xY~c z4cw<>O+2qHq?SVOguZU>*rs^DSDz_`uPg6~pU&591dX1<)?(QRd;>_Y?Ruc09X8^ z4v}?``@`b59Vixm>TaYvD<0qdxHB^SMn^@SKFe?hrOwmzekt#R;7;zr=mDHa{o^6k zqVW(^FbfKbQGT1n}X+E3~R04;|iYnalzo6vaEG%il_@VR*k^e zri|W(z@@IbbcCOg6%bh>?!7%0Qa=Us+b)#R?+)nxX6_jTxUd}F8|o{Y6H#+|T~&d+ zO&RrQ?$O3oiVS@VXeN6aKYY;;GF0MsQZg(4;Lmn*pd?)8Z6Abrz-iv98-Y{m-=D^8 z-m7dwPLjJV@qXYWnth4jj74$&(voQ@~EuSKMkq&b*8+2_eebtJ%mXh z7#%_xdYZh!CT9mDiDT|;%6eDu6}HUeGg*SA9IztY2ScjM?on^$FgfIDnnwOv5h zOofsXSW{hQ$r_~r0av{87ZW~s8Sv;jLi+9d_1P^^#JQdmHULCpADwt#C zSF_*M`tT~O(%P9;X<(`~lCRK1QbDd1);*792naO^eWLU^j=wcMo5_LR2W$@3={;L{>zvapO}dzgd5{A6biTvl{bWL?!6;d^!h< zwbM#W=I$0cj**E)65O0^`-rRMTpOpVPBQWnppM|$uW6cg{}+A@;zwBd?sxm+B;wpR zT^DT`18$iIb`vM{n52Ea3g~vAxaRz$X-lTfGjCkzJ~c3gy&!Kk9%fDh4teSN-TE%| zwyaxv)skNE<=#1TCYJdC?QhMNe>NvnTEKDkp%FV|v0KlZgzi&l29ub(CaucoY8O0B zL`iPIxD$xQI*3};iE&w%*t*5&!ML$cZ2cIqE*|?`=xmA{491w0Rxrd(zc)H`1m}Li zDR*TLV6kTDI}?G-T$3_$IeGqZ^nJ+;zedx8oAEE>p2Xzd7;_EN`H7I4 z{2KEns3dRMZm^sdhK&(%NBV0$XXx=_r{hV$kq+mJ=>t`&=*U^zyRK6y?lMe$opXYh zmfszikb@lNWX0}xSl5WIzb5V~zX3hzk4yk_OdkF2fb`FsIDa>g-gw?7V4ckE4KWOJ zR1b0bfV#gYp0rCFb9t?+{Yw87QcJ|2GD{f0+aE6wLtb^KmOt!C%SD#u^OoHa^q8O* zpbI>&06RQfes^GkyaU0R9^DrcYQ`BoFO81scq^p-037k+_}#)Io6)oYS!Le1=WYAp zUQGF9Zme#GT^Eq{$5IcVd*vmnMqYc`B2P0*`)WEGbNFCLodUGnzm@}(doGUl`wRXn zh%V)qFn+f`p5`4aPU8%#j#D9ZH?TwE4&!vS-}mwF@^DHRzuO-#pzjUR-)p@7UXDD% z(^*op4WdI+_R6ctpSg3(8T|0+{Ed`+7d_e|+DPIqGW*@; z4n!GuAWFFdF@-x2K6!qx-ggmiFZhy=VR;^i#7duKs#q?@#TeH&d=%n_xHjY8RkvBY zd2w;E+ZhSvFdmUfk$Jonn(0JkI7Oy>ha~^y+>dQ4VGbzfo&~bVE#_OdeB(`sICq%i z#M2TcxraS{#Boj#aTG)Kfxq|^hU3qqDvH1S#Rvo7rc)zQyY&kH(xEO##lipVbKgAmm54OaY){F@546}}s` zstR8W799;0F0$g+QfCuqas{QUQraC=$#K&j9RYR^<49maxKF0ZQwz`lxvY~ z%2gIxSodpam}@bruEE&bf0n}&r#jmwOzxri`O3RF=TYs-c}*ABJ;m!eMH9aRW=eDd z3~$s|mIR%8_qvdCC~`;i#&~!B8#x8*3%6QDM_p@u(ZT%uJ;8~o{E!&u*y?;YS`^JM zB%fN(sMohehm`bRYR!|Otwx4+#;7v1#T&dknbWb%10229EebAH>seJ>tN$J|J}am; zjIr(v!XCD-QmdVFl?YOpcPUsy8Bb1&nLF2m;%Cy|H)7eM@d;_jP-+_VKCLtQwCcC2 z>^dF~yq2IUpx@F)g&}*2!d1gPI3;J$$xZT>NF{xHMpoaJK6?!G`abCiEbr>HWdCK2DHh8&(>o>dn;PTqwrZ;-F|2wDSba60j+Z= zu427xm)l}$aG9fKFL%^&P{?}ZcN_W3ai`H4B<;F?AnipM=L6^uraRQnFO?iPZ&=o1 zG}C$WKkIML3P=48Nb7dWa?5&2m<~$)J!zN8CLeyczEia>sAXS=_Y$1fI?i|0Pk=76 zAbHPrvsYrzA?Gl^f1(PVLQM(oj}>u$tWZsPXIQ6@mT@j%L$n(HRVn!`t8rBg z^^{=G7SU(mF{*EkqlSPtJ)Bu?X|I8?s_Rnpo&)~772=Lu=BUbROkT3xY(Lrv zT;iXFk9=jji8~@qy*FIP-muTN=hb`4DL zOiDE}${U>h>OLElpY&;sV{{n~!LdegT#nJIakPLVrE%1Qqr<*SaCo#gvpG=Y>8(}(u9XcFzw-IhX9-YQstFLUDqozT3Ni_oA$_ZOBqN*R?;&7e_6Bqyt1w&ObWQU z&Ge6vZhxFlnrBWDH+kTlm^y{U$qQOM^2f0SSpqFfnl-n`T$Uuy0pSS|#^|&||4fUc zCew&T8%;j4+&Zs0N*Z}HpwB(x*T(%{-1h=Yym%wsqs5U9cgl~4HA39n$5gj@ct*PY zalAF7#Y&O?BJQX$Rz$B^J@NkmI3hSCe~r1?y&u1R53hXBxV>>kI5jdpb#xj&+I8@2 zj;du&P3y_@yS2_YBL{{)Q1k~KLB_im8eLnS8pB*;bLQ_N?tcL5uQm0Y?WW%lM-fL6 zN6$|q;^;K?YFW{SGNH?GULMyAJ(n}{814kxwRx)RT1Pzx^rh4AxxG8gX1&#Q$Jwm4;?6XU>3%kS3f!Q$+DGFsp2kleq{RUJ1v>Q10n$E8!_pY2C}d|~u`+MRZv z))_DTWC&TV4(1P_pq;X6IW+W3_97=tJLJ-K@@}(yUkH>+81g>SorOaQ-I%r|_EDGU8U+`{)h=H+vwJ1fpt z9re{F=0(Pk$PKl8!%=&Hv|iP0x3_MN2p7mj4rr&oJKJJc)25Sn*O7i4ryoZdGj?OX z8vG_St1XUdy3W*HmV1&7Z4q6jjEjb?4sw&M<&gjM8ZpAp+`|)@Fm$%JX856Rf>+Ch ze1Ii#J$pOonF0AnN73_0vjpv*S?@6?>UXe9INKJOGoU^V(>$ClY6*TXb(>s(Wr@U% zw0wYONoTieur`DY-F=%!NrRFSSmvl@O>E~Hb3_ldS?8EXy0lTTA#6;k4{KGENhbahmu}hH5W+ zf|h^sXv#NWxebsuxJCX09q|_+fGl@BeY&IH9Tgmpr*Ry>(&yoL*5IfIhn*X@E`%a% zJRG^b`ezf-F;YkB_8$ml>7%e`USyk%hKmq+HO2~8vo9ihJe0j#xqEa#Uj>- zqHUhuQJDicmlzq`(f5GTXDFr$B)D5sTc8rz{DB6;2VxK9<3C8*8g8I$7h`-G;N6q| zpc+%^h^6cBCXbfe!aOB)sPo~fh92e;pou6_e}1>_+cKVvT!zs_G?b%kXuDkpSdW1F zB%a`E3~TqCryaH1!zJJSZhst4uaMcJ%Y4L|kk8o-K0Wgc@5cO+xWM}!u534Bh48~V zf<}hdG;g8#j!}c)_*5@vCAs^p^BL0a^YClD(Ue)f``!LH!+V)(>TH3d77Xw*2s}p7 zUDzdffi8`QPH~!_w}0_?lG)+utG@W3bbq=0pS`{}8_Qxq`rbVGM;{LjOxL>>FYmq% z{fYiE@;uR;#<3C0H#{6SWNu7-pf+zbu*ULDo%Ri0kT0x07w|9BE50=9~EY?7kSEa-d36zUk(zaFda9hV*Ekw94$GImI>2xoN{vvi2tj)$~g?l44 z;qf)u^Ru5$^EK_wUY>KYM2|wgft_)!dfd7@JMWe0aC5LYbp$(ZnYR<-@L70HyEQYz zO5Rfh(|7M=j@^LcMnLA+jq;C$Hpv4bMEt>bNE7!oqqw2gxnkG)4!dBKZ+(8TW~{-q&n9#f2$ z2i~(7(JqpQp)`)b*E4sHDhH1k?FnfaOE<6iYpv zW8%hNMBI@Y(D!myWp?tCW^+2;Ml9cu@Wi{p#A_hli?+p~6q-T__umj#ErR!0#2LJZ zb+c%dS+@jFH&OIB{3#Y^9`DlyrJH(mQgl7moK7blNXK~#OY}0UPE~tAUa6lM?Qu1g zyC0L$!!^0~OlZpRHRKMXXsl&iOJAY*Tr<`&=W_c)AthP zX~HF+$MP~@-fw)s((=XVQ?MFr_#-o8cBR(&7bqD0PQARpF$NlSS`W08x%3uPaSHC4 zSmpo*_n?-eh;2lLqGjD=&){*$3N@d$6M2Yupo#_G$dkyNT^(S}5*pmlOx2m6qt=bf zQOiJ8hAeaGKn48iGdTnB=6c&K{q07+r{p{7O4y8>^AR`{aTIYBaTIZ~a1zJwHvG5K zbw^0qJ|c}(QnojK3p4&3^oyTai`CVh*nycN|M|)OUb7P)k{|-Bi|yFg{#^S-6njsD^*}Bj9EaSGF5mBf|Lg0`wC{ zuSYK%o#LkbPv_@qx_UdjduUwMGji1T&&W}4rqlDe16t?tL^WXqZ&H^Voge+l95ufp zM^%2)lsn4}8|*ChOr`A8g#J~JF857<)`=$jBFZ7|gFv$vKih4}dYa!zT#89Z_Gb7i z((N)|Qf1{W5#sxSqV0cxtSX{i~%UL&MPA$w)oj@s3q-=h- zS;u)}lQ}%WT$fmtS;t!lulwVbry!s*XXy1hTMz#BbeQ|GSnDlMKT);ODfd})%56nX zV+(Q`-$YKM<20Re$B6bCJnG~LEVtguAy2<)`+#|>H^397B5*CcrOmd!bCIh>!nSqN z7CdmO4tr_=4}|U&9_0a}i*8Y9z9{tXs1dVAn+MFRzYhfU6|h+rM1O#f)g*dr}jBj zk?YHs%{VtXzhbsk*-&+LO;xh0wyL0Y>JXo*qDj?ORRQn6A|5ZUD6g1NQ8Bkt`DOY~ zQVn|DJcY%ovW)B#x+S4m<+pmWAXR^xH&*>EbC*N6-#i@l>$cU#8BxY=ZGb&Tn0*g( z1G3lp%|_Ofc#GK=%llBeUqF*d)MUw;={u2IK$p55!|L|>gyZL!)nR%T_^LRSW8(2Q@;+!&Z28mN@#n7=KG9C$|x4%Dgzohs! zBlE7yIvq#;yr62cz6&g5XPja&U8wu7-06EK`wo(iKQLb3UD7n_6yxL zVs^C1ZHNxMDZ;YfaJ3^5{m{NL?7Wd{pUVZ(sFP@!tk%6>s4h7c6Z zRP%RDQZ+k@RmGMPHNAbZDr=siN&<__y~RHL)>JBlzDUW(0Qgr07z=Mu zqkF%(2e!yuL`e5f)2GYKYr%SyJYZ!;%mK@EU*q*%>D@O3#ayW$uAa+2ZZX&9qjv>k zRq<={m&BJ8v=+WGw&?7_`|YA_h5MX|Rryu1s^Y52Ra`tRRxcFNX`@%f8w)B5&AUF( z+zy+yRPz5IS*AHn-`#^FwnN>^&L|PI&gS?bTO_kr9pJs9w&C}R24$~!oZ#(mU_|fW zULO$G^FVzytadogb0ypQ;*Pny`nI^`%4q>XX%qJdq9k@S5Oh|#3!O^$={RF0F-zX0 z8LxFqPO0Txnhf7e*LNdvFOmF;?!!xQ*o%VJ4WiIe#z+o28*|j(0qOO{?;dR~>;i@E zgO5n2nk@7FN4Bm?DeECVCtt)k4%o4J@?O<|VPB%|Wu^I>as>AOlKCHF0)X!Fq8U;X z%bYjS`QXkPmLcea`sL$ivQKNzQI7(pMAQ6C)-B;@ir=q+YVjxSYHIF7;ghkwC8>~7O$5Y*(5b`w(w2!5N6H^Aa6$9*_~2; zi~<0De>dUnbJappfriN2Q-?h%Y1Vw{lXV2Uif_q9Sk4EGZc20asKGs?8m(fR{dC~7 zx$3jf_vb?2pWPc#mC(2&JJYvOXp>K%&qUU!K9^l04iCZD557vm;9pz?7qn)svux-yrVVPHU&iUKS8UGN` zk}qPM1YF(bJ#%jZOVxdXO4jj|vx1jS--iFI_4pWlP|}|6HpH8yJ@c>>0am1IOO9%= zkMU@(^a0ubuolVtol?&F-^JJyK_^`7^y~9dgLhu2MX%(oyz~97990V%>HB{7$oASq znB*-OcLS!M(q5A1H!+;u^rB)$g~|)d`x29t&?zMS$0eTh*Qc=9jpklYWmrwBP4PC; z>eF}@CkbZspJIu9n=u}hJ&cjMO3f|$PZYZJZ<5YZi#2+ywba>x9R8bXF*IVm290A4 zDqsy7&l*(78Z?15sE9Qv!5TD?HE0rRP%&%JXGUFvnn_a2V_kz~I}nRB+>@ia>hx05 zuh*hJJrO@66VH2MOhi}g(TH_4;xxJ&dEhcyZPmsq)i1aSTj5rcFCT5Z7nFLAd>)I% z+cUWVmAlT=RX<*O-|!ZekL5f2hIi#3`V*zy#-&6(H!~-6-SKL#ea#6Jrk?U?N*?uax^zs!?&I9sG&c9G^+h^OJ`1yU#WL2lP^(sS@uQlt&!$v zDl#KBF><>duX5O*6jZ5YZiVfq32T0gzWatgQ}&Ca-F<7g+q)-uDV9yZ?LbWDudAm1 zhY%-po;q|_-_rS?k|B#d;mri2@(W%+r_gedCFf!LVKUS{R z**7~CdMy_k89e6_`dCRLTO0NzCEN6Qwny6f2>6n6H{um6_AcvkY0K%z!ME{N5&9G- zP^V+4Pod9jk@|MfGE-yp2FCxz8%qf+_9|;83H~iP<|PDMC+)Eki?ss<60ru#QI!&- zMT`zHI>qP`qg#v~G5W;l7h_Ni^)g0WjA>%b6=S&=YsB~mF>Vs$E-`kA@pCa=5aWax zLt^Co7DFobqKRUgB}Tm%Ul8L8F|HHiHZlHHjJ+7{=Fr@?g3H(yHZ1e%W0k3oQi_zC zj$t)r)l(f_&5JOW1EyamU2C!Bugf(OM%v+WESxJV^q9(COFAUXR*bs=qyA54zroF4 z&qjB;rB+xq_SMz{A@(C&F&McP^+B@&T5} zJ>*;W&6FM^x>M#qY_-@m*7>~EGvB#cZ3tPS2Y0L``98Z?mUZUe-ZuN?&%yF zo#!ffXe0P9!CAK%%a4IxAU|^^@dmOtdUd95mBo9J)w4m5U1M6&Xzb_2WZ1-yN7YaB zYj#^lAVjiT-EY6|YT$K>?hVagK7G!jN|)2r|bA3EY3ASA@0{pe0|q7G3p(q zBLv+`nB*&1BH!W66YHmfD%69lKc+fmcI?(|{#wSW=ggiNM@75e5l&a`dy$)*DzzlK zBvv0^7@L8eS3Qt4J6pXJbSunx2APol-=Ks42y$QVi!6r9au2U(drdi|UeYwPPU1Wb ztR{;RFWb#PVQ%yL$s2p)p(~8%-tkBshKIm$layK7;6)s0y0msuPoh)(HND?v=WgaZ zork2Wl0#870lr{9_o-a&&UgG5x<8KPsAFD!{O-K-CaLp|MbzgGjZvRL*Wagh<*GAx z#MBI->%luHfp?Iotx}0VxsgThi}q-(4kRY+*mR%QjvKHzzf!k2=G+VBY|;QAgH?S$TVOhq_y4-jnbD zO8V6U$V+FI+^QZ?{Xg4so18rs3Ky3}mRt6Nk@I=uFtRJoBf;g7yY0dJa|;%#^>Oxi zfE;nSzW~4PwbS`^GMDs_mEp1XEDn^KsCrm25^pAN3)GhAwQjUC$43O$`-sqYIrjoi zq!Ut5$NUs!>bhDo9=`w)G3y0&qx~>9qSnFW5WED^V|tK&<>Pbg;+uXSbAY$q+9s&_ z!5nqbDd^y!mUi;Hy|=Racq`S&C+fHQ7lQxQ+Vy^0cCr5qyzZq9ve7{@E7uR@_ z&q(Q&mGho9{W%K#T)#Ckl%pO7y1lr{=Qd}tDW7;2EBIILxRKvv36`0F$LG>ps^~pI z>NkL91KX-n_sGS2O&dN;UG!L~vRdS$nuE~z!>VEczFpfGHLlUTkI?fzbb=3Z)Y+hv z`p9;}nI@hhp4QVwJad0KH!GYjcbc!rh%OV_@e$TjedjQyO8=6h!hg+CySy~~?!Z=8 zZEc#YzS=k)KGzh5WM6gJJXg&dny6+EOo9hhOuLn+e&S!vxEatmgeEbOJWXQk6lZ-) z%;X%+S$Zt`?ZW&P>MGzqqL-z|yVM_H6k)QzBU|GO|NoK!FMfb|YdXeRfZ+=y z-8wgNOl=HWt5p-!P$LI9O1}DXUG+!clk%1Qo$b0Wd_wL!G;&9=HZpuqQRY#hh3T_e z6S6tztNRL&ma+A_6V`s&=s*{U5l8Cj3cpXlabY)$mC*urQJOs$`( z%c2$5Tu%Ot(Zqsr>Z~1c&f$5S!}B?ZkK;U0!&y9e+FB%Y<`7x?Q1baPma*THc~6FR znW5+Ec`yHBTN?No2sW5xj1^KLdQ8jH#u9}P@zlqJ<%?gxduNi208{X%X z(T>Ol@;1z%r{_r1NZUzP&)b`^Y#Yu)r>+paM~N>FTCMg}>p}Y}YYUSv`)bhz*^JIn zWOSq)PZI4s%8@#N#d@8|gE2RPPNR7adcJ0C4{GXVwQSBLe?5dZ^EG!u+a{_03Rgud zU3DCEGCyUzndkgs z?+$#5y+pgM&ZJxk#%qGJTtaBfd&jpVR!-n8&Kx!G5IQRMhgIdSh&p4(7&UbZXTNrM zD9!TLLpgh)$tu0qka{^nI#-eZkXT;tGv-Z#Ip$_+gKZKOM_#z?8d7 zb3)v2@PaSJuC_9I304`}U;{yu7h_xrII)gb=_0pbTqfh*xz0j?-7=kDy@xLwbq~LT zsCtY(fJJ>Do{et94&8=OWzB%qe`}44Q&T+?d$$g;$oM^AT`Ot6Ajb20Cw9Lkdu-5* z8pEu-UvnNSx2P-7E!68y4@UL`qbFl);+>JR3#*E%CRP$4(Q@`UxEh$boS(w8>Jk3#=xZj5ohK zKtC*cx}Yb&B`+1Hfy&S=H;w(oMD`N7>=8moxQnpNRqp|vUVOjXyHmuP>8k0hg{7<) zG2{S5|3LK$=b#P|+|wI~ATJ5ezDJMNvJ(dYSlu-Mn+@PvVyIuXL zm&QCS)(++q_zCo{1pO;PIAk3(k6?8=+*%3OIh+KetBCoU$i+z~aJ1GAryen}mo&E_){{>5*IToW!Ieq3p7kEP> z_XoJ2Bln_Gt*-hnV1VHIEie4#4nnp+{mmCHAa{!xU*lwa5jM6;%Xc?udF7kDvyJ9^ za?j%*UG-O>;RmMw_}%)AZLgg=`L56L_h7L%hb#_O{x+B1YfJh#G$H@Dp=tpI6T8iQ87Vu`EyhYacPt04JTs1^8Wn*nMb_g-C z0Z3K&efvgN*}F|SM!Nm+yfj*5ongvDM+)yW4M1<;Zbe^*t7dLy{u4YhKA{JRdjrrc zeo}8uA~x%)p8y?RdL!LgaY)0DH^94^pP@hTrb2Z9Lhj5q?x56I)kcTgEa=#k(6K#P z$G+Si(S0H7X3?Ip+MQYR&@Dj7hJ9D2FOB0FM3TkpAp zxgBT%v&=QwZl*fIYdzkCS0ln3Io`>J2iC`)uO+DOu}QfG!S{RcwSSCd(htX~N&r2c zEw{LckN#6O$tcL)OcnQRdy=``~?S8@jI%GWsmzF;+8a3a$i28HMx13e>;dln1 zi(600KYT25c4w~rZqT|sqVpx|^I=2N>?g?Zn>l5yC7g|>viF+YG>Q3TBC@W-I{b!Z zU5kEXbR2zx<0XtgXdEdY zj*k?Au$%0;Q1Zc{&zR8vn77ippCk|QZsN-vo4|qu#M{;mFTK%a=jzkpmrMAWOe#TZ zJ^0h-)c9kJaUgw8^}9bwmSel%ydUF1z>375U{7H^%-%!ueIQRa!S7-2E+Oxp9HU+z zo|AY-F+`t?9Z?dmTFR>H>-0AYq`c}e;>aF7jKw)lc`uOdY1W)u&L=(R&cb4~XRWLC z>2JbY<)`02W^b6t=N+y#WXo!)<85Q~n!lc~scQts1JdP<&R}Hq{qL?y-`YWtPw`!+ z`2Y_4yCT}fTE|-7>Ye|*FsTEAuhGa|vQ~#0(P{A%bN4UkJgzs7wi$OZe!T>jIYqwv z-TpYkdiyi7_d5TTd&&Q74C_j>XPLlTh^=OZkvU$3kpg2+zfV1yF1dEl=<$3D7U8MN zc)ne5!_hb)@5xf$N$BpVjjQOkj6Bri1d(y4>*gsOZo8Tnv~Hq@py!BkB4d(|d~5z* zv)`8bJV~%G5GVDP;BP^P1vE_bn~bfU0ZnA;Fij->V^3#z36?W}IsiSd>VI4$ftFd^ z+&Bv#a=3;ZuAE>$6t_>Y|4OjWp3nvvm42yZLbk=VzO#DLG;0m_@6e+(@(!Nl?Pd`E zns9Y@V(9`N1K^2@KBLXn!rdgK<>YqZO~Qw9Ka%er&9N_ZoL|`q(+;DQTiQeOY$wZo z5P8eP$cl)+Fju|j zasgm|2Y)AwG-&K~*FpjRBRNim^#_)LH>szumLNw_f*i$UfM^TC#MHzAwQ`v7% zQ_1#nRo*;ZmDYZdw`^C8O22uW>9%??XdSTa zi(RL|9&p|b*+Xi#obs6uq>t{+)+Zu!L3E!t7s&f~{PcL^E#q|GF8hwFzQY(R8bIce zai-tBeT%&;P()n4Z{)kYZ=cHYr?T+R@{|2d8|r`m?5ZmOyTgo2zgzU}kL4aKE4c1Y zJIIsSD@{_JgRc55utU;)g1PQUq55HOiJCgZo6iHh`P^2lzc-w6)oz_Pb(%X1hrWx= zy;tHYa4B#pa42;5WQSw_K^i*TDBsOnWYBWpUujy-HY4jQeZA!gGj=u2@dg}s091{2 zi~Q$xq+W}vjUnqUuHQhDKz?JLk>8N|PfFSIcoUI*ZHV(h|GS)R>22GID)U*E+hyBA z`s#h!1{f)qE`E;yeO}ybH$7wsx;J~pY=5DJ8-L-=W#K0MY`3&`c9@a0x%QvP`47V? zMIJa;mA6ep2i#P2z?Go`Zeo^9x0Jt!Bmz_9?e|H@oJ>UK1RXx?T{2or_+H2x>#Gda zz`P9zy}DM>S6<_*rAMXWy~@8qQG+t7-_TJC+=PwW3lqW_0?7lvf2+W-R(DRT5dn#e7jqy`h72xE>k|&+!;(CQo3T1guEk3jt&rnGs%RhbXcj{w)^0MCm)YvW5yjN1u6e)gU~-8_U9K6Tzf! z``!9%xzp%dP4BVPw6MAj*zJY)yAzQ83IYu(G$^x(SI;D@X}@iM$GPhw)lZw&!CyWu z@n!5BS*hJG;Ww1_>pfqIy9%9N(VuzY23BbIXYeZ(Kk_Qy{qC$de3x{F@#6NUZPv=q zvsNx;tz4LOUtQ*pZqoQYbAR1aSY7~r2cTmA84hHYK9`;F8t6ayKm#Rz&%<=3@gJ%a zwsq7}_Xp)!+>3+uL%DLSHhVSB3Fh7wdO>{r%yzz)71-!}$YU-oQt{fbdJ#}R_sTvL z*X}8ESP#UH@bW$5rjkbq_x0dp4b5iMYb(|Mg{{d7!W$Gu(t?NFQCk4amUx9QOzk66t1m1zwxvDI%*zARR(1q;fSJq84R+p>l3&QF*jbXLjOW*G{`VY~63GdJG;&y>w z$Av!Ge^FS)zeu0(;`-eQboIsq)0Et$kbEg=52>SpsjBqiusR%hNK{7FZ`_}%_^W`8qINs9}9J?6CDUiAGj=3MR&H>?S(y+FI*lkv!ya(nZ&-}f$M z-t*FtFn+f`o~$)_@QKhY5?1f0{f4G1aeott z{+lV=NVnh^jst!_UeSd(v1vMUTRC&vWTj6hUV9AZWoVj;UlCSE|30jmyfpmo#IziE zUNhl&P0iBBs@p|qYeUQzv_bb(VRiji!)lj@!|yiZtW;>^*>%CA6SIe)b(6=2mazI6 z@TSDqV{C?YFKuNG=<)mprYLd$6(|-z$~)36IELf!Jqxeq)uTiJJgRpF`=#u3hv#Hp zK0Cmn->9pb8lR3w8ZDc{s`~3;b+#AJ?+(P} zu5i(CI~ZQ9)ITy9H`Cv539B|8kNxNX^>ms)Yo{-qsjmz^$xD}%0lx9XA^83S6I5zT zSnUIP1#c;||07dWL9a9!g zy>EETkF#k{@`ygZzT2p?+rw)8)26QdZd1mB;X0G}9a2Vl!)l1TNXU7`d*NrJ-?C*J z`_^wW20T1|x3t-q!3w-}t6qmHG0o zzRUUsr1PKU9wncp4=~n%bUyuVQ{M8d^2TKjmVP;OCj8y7n)z^8?O@Yt@?SyzXKKGM zYdIN~}8oXI#}!zlROIz zNS{4Fz%k(+L`e$GaDl|FLLfBF z7v+v{tKoyMmN08ET7Zaoe}Oy27YR96l-sThMs^79#F@f38*e92T9=Vl_Vy&De9sA} zoSo)Ovz@)2|4!?ZlEy)dKERIkk?X2{44a!nj6N0kysJJx5LS!P6||t%5#EQnzucIk z`hFZ%iyjZFV?>oc;&*E~GkAI4ti31!{{rIAn7DqozQ^AP zzYCr>6z)EDEGg4BiHn?C*H6OgPrwQb(5h(0;q*dndm{xL0q#UZu0 zk@)={Pfhj#weanR5Lo&3Saa z#ciO~x+rhJB5!}~3EJ8M;%He{;>cSG{xIf@^~rZQ3J!Vi7fD@|;iv(mE@sL35boq954S zM~L0eM@vY#)FB}_wiB(DFvu7M13T_eiwmy3bam8 zt?vib`jfU=c8s?s4xwjje-5`p;H~ZmtMV=Mp>}kyHN)3o4h>fmhxbT}nbeD#xhKAu za_e&+Z&17*RyDwr<&=3EdLG*>c)`eoGw#HHKJXFQG5Q9r5q|VTU;M;OWL47lmwfu~ zqAx7Y|A4#>SgXyK5tpCIPMD;*N*P+ z-a|N>wf!^ym%Vq7kGi=3{U@6xh9raq19AzvC}Kpw01;8CY#`i}%NoK(YfVD3kjN&x zZZ=#jYP6`Rs9QuuMH{UbP--L9ii(;dTH2zfNG)2_ps09X5iKg}dA?_6liej)`}>{m z`RANG9Z0L&7B$HE zP1;^HisrK2-{TJJZ{CZDFK7N4y8q z(YwF$%%MU2eT}$YgW%idL@dwwc3P7fiF>`Dll|S@&6r}n*nNzD`gvJ)gn-Q&Rx!^T z^)~_f^Bm`Bo@w_=STE4yADnF<5A(i6dzUz;6|ShHjw)(_mK8L;@cv24%6St?|ObCbz0UZ&&*KofVTC?_-kz^LwP+J>O#)xlZU%0A(xGd;!Uu*F145xVVlD7o09GG(xc4v;2K4Eue`X#AqcRE`%QaFcQ z`*kLMUDlyrn{d8e{CXboHn5+X&C-7TDNBEG_Ut1sinzUJIiZ;I>#+t;auQMk9i>~G-xu`ZPa z&E73J-?|^j+fKgR=T!IY=e?ILvQL9&lb}G%2`~9N-km;cwbGt_xU#KY5^g7+gLb#; zL-tY0Z;L>a4LkWd!M$SajC+N9Ie$ap zzwE7wx6`{X?Y8uqhR$bBOF3&s53 zz$3rmCiyzStzH{r43As$9Y@-NIKO)jAg#1FZC?gsB~R*dMB>7pjH7n2juAdawO){+ zZYm}HHq7{IbKY8pa&>Q?b(%h;AALwJeMpYtWQ&&A8Gz!yOjvc^@9(RciCYimSLr)U zdg8CmSZyq0wFqOis}8v@rHxvOJC>$6{{is;Fz?sszPh~2g?f-=YVV)2btT^I?D?qb z+{sbyRC_oqGwph3`n?%mlsLy zah`AAZI-<-ybiO$oF$U;DsH5$?T5%XOSW_stD|*N=$qyJZ8LZ7roXY3M}0Yg`r=L) zb4b|di$6a8T?CxjWwEfE(reXT$Ge?)fyX>AC4F5(Chv9LFp+D#B15(QDnl*BspL)K zHBEn#oo;e=D&^aoa_qgmz3q^A?2&kU!oRO|JQ`#1Xvlg&*3$lxsx~{mck)e1&S&gi z*`IPEEv(VY*x!6BjPHVSf9o@>L#|*7pY`pKT@v^<_u4% zTXRTHN{@8aBivQ>VE;`I(l6g%GHr3&{-vm_otSSxb*C@xrh2xwiDf#U3K&;EkfF{2 z`}>=Giodq!#+CJ#GNXT!cBmD*pMahJH1^}K6Tgc_e?6SE<}!ZeoXOnE!Hj=SqCd)0 zE-&@0Zzlc25Y@M2Fnz>H+#ds#Nue2s&)G?y%NkiZBCS$hr9YEoGymirKIVqG+^4zw z+RMr8f#5tM-ccODd#eL@Uez|zB0R~rMsc6*P}!l3GP4(i_dUr2&oddS1gs}0eILbL zcgu39tPSb;QV%|sI7z+!QsTsm1;viR5+}3HFp_nKp@*z940w&Phuq&o5T^lWc5AHu zG^Y(qcihfRB=K^j=<{iM_aVIPopgI#CVn5sa~b+aIgec0y)a(LAS~|^#5LMK_s6k0 zN*eD0Pjl)xnKif`_+{>kZdtMyuKjEugBb!IUjR4{wnqlQ?DhgJgnyp(x&NT|F;f{-V z-$u@GgA-1IhJ!V3(K?X7Y8s8*^4w5ANq2?q}}n z?d@ms7X6DjNgF#Ak@g{nFxWaFW0DlUIU-+|38t0FbJYHsJbT!|UIX5zZTMG)x&_R^ znS>vI-K~#2FO2OQ%x!yKDEalMg!eT6{t41E;Z1Mx;gy02-xu@@m;aYLoLd}U zrJKHnvwisaSzz7|U`;FTy7vvos7ns`9O1q591C{Ncg)>dRF%QXJ{0s~)M2@hMZgw{cUH|QAYVl+eBO5X-gO7dM`sg1^hVGWkf#9b?|-q0Ws74Cdcn^{VT9a z%*eOo>jXD4hsFN{zi|_Pt=};Y+3%mzo;2>wP&bz;+k}@_g9rnYCk@W`9M>~LRVlY+sM{DTuiBEsd!p@Mpw(&J(M1J6 z%~1D$M*4MJNNe1+{^l_6Z1m-wjlSenUsEUbUfrG8&)dzvPk=K!8f#-3)2r0E4#zqN z{av@CdA}xn6nj8>(uem@NAJ^hTD$M{Chn#0?vJ3q=FkG|#yrQ4oo~`!FLn82b-SeX zT!arev-3ZRt&^lb$R1k90fOcH4mrz$cQ*@;)9-JZS%=B5RXA(QuRFO&TTNqDPa%+3 zvbTzPYiz8d^N9Xa9mr6pg0)ze`VfC@;+s!@eKOD82esWt5?}R}T|@4S=n0(J!B~7R zccx#VI%PPfN=C)PW(?Q0B#W_1X0U&jQ|a>ts2eQ@Gt`gr9g#x{~6e_X`e>JBKpN{H{B3 zIWkkNxA951mOZm2>5P*&O9ML#LCLqVyTT+4 zA^#=;_E*kgI^&>WvEP~t>hHT1Vp!eXaM?CA4Q+^ny0E>zOGvDjHFc37XwVGP4u zE5U~RK1qH*Nxm5v``#bxfh*fasCq3>?4-!EQu4bO`Q2-Hj(Oh7v&`0knd)0G*TWHsWt3{<4nN<}d4M`K0S?aIf_@{-iGSwGghwO=z`(O{xki$o4 zv#z5v)mFk4f5lGxb%Hy^{feLFJeEX1d($pt?@N4dzU1B5_oaXS=_Bqk1*GfcUq2QPJ86`_O?p;u1#3EQ0zaC_#1F!dp~7s z!3})6={r?>LNL(f|JkLJ6a`@5)9so$M;u&y_k`x`W9w=A0Ro7DYybCZ_# z@7kF6d1ViUeed6wskQ_C%{bP(j%J@lHs>wLcL>D%AlPB!mVBKb?^edcVdfL_GS$C; zo=@m;d6JnTo^s#N4mQmvT%gZh)Oz7juq{v70a-&U|}ugSOg zYyD0)>%#2hDs$wM*ZO%2&+zxKHe8*lz6Kk`ojlLp!C4zy*fYrUeKDVYA^uuFldlup zN;>wtsE(;vy>y#crW@S z#XHRA8NeCoF#~nZ8l{VG&k$pzm8!-lCWyniq4Z~ZQPZv5uLJj)yzB%pfJXxhKbo9K>;cRj2 zPus3u>%>{n-=(H|vv#FZe=lSTX}+c2g>VOu?n9qZNw~`iN~inRi2FgB*(x(!LgzFS z3HNSCdSP1Hl2m!+kE08e*IR-lHU!V*u6g2AR@q#AuBg!X)TT`IC<-k^PJXW zMekpH0`V8I&%Ufb3G)qz&$3jAa|gI(t-JQI^q!cz&k-q~8h2kCcTwD3#W}a_?%L1! z&c&(@??l9D*4;2+6W&^9IL@7UCLn!nNz7e2B4piF8+Z9p#sCiI4smG4{Bq8i8S}4* zxw{SVpmle5ySt|y&Y2DzCeIbN#N7QIvD>EG4WdZsrLD6;&1fl_#OXyLOA%9cufu@#ih5rGcJ3t>9OUWD#MK)Be5pfv^P_I z4E#|OuY_wo9^iT9hnZ>yXgg0J?m9)j7gxzXPsR|k&r`3_Fy8M}LVkQi{|L5PzvHjX z`u{P9w22bG##sEeurL2pzD2c<){aw|7eSKPcwe)Px2_9M2J-?e* zf7hA4WL;B6DzlPr+S;Y~aZd_;-Ia(Hz|5H%oUL?o{F&!IvInC}!XAv2p6pEwcTzno zdDpWfmERqzXDjtg_idb6#CWKq`NhWs@hki-`&`1hQ>rHWEty-;mL6Ha88*f3-wDpy zXWz~5j~EW*zBp01TSGH-dup09C6yh_+7G=yyQhln&u$cdWgmxGTWN`%KqBp35HIA+ zrp1>at_61hdkniiG^>npNZLva2}pg;Apq%ojWKPYv&ymC$==CNh=<<4T5>e&dHq<= zV_p>dPFSzJujZLl>v2fmKd@CQa|7QdRn9O5DZ;QhMEl-+QyJ(?!pfcxIQI#TpcQ zRlczGF5#Mc68o}G%gmP&?c=4M|8@<9OAxPu$22{vFtOx25}pZ zdiHB!^Ig@Rj&w(7J%==XnZyGt%5^-?V+UrbSMXbS9GDq*osyoS(#JAJl5=GlZ|4@U zZd0t+YJ#z})XTjMKDX&M&pvtPNn4Rqz&o?udG^WEsv~%QlhQ-JbNVFf9vk)hK`CA2 z{A$XlSl*~J`5WUD$-glBWGZ=oy#)7+M^aAARwr`CVZShEQ?kFMAM0WLs2lw@m^155 z*wmL2>PrF7{5QCQojIS0XU6%l_`ypw%~JIgm7u4vEi*3+}pXJ9+O3&-Lk z?BJl2kZ2-H9cr+deGMNpsnfA ziW_-ufZd~q>+>R7a?D9}-rgtit+OhgX{WfAKJl_8y*Vw&ZuWeoKEBA5-MXK1+Wh%h zoI7jyne%wI$n|ut)7w+d(IqhGdywT5L?@f<@e^|EcFGbwEm{(vu83XKiTKRzJL0de#1@qb=Kc) zWv4FpYj5g>{oY%P_$BqVa%`5md7N2?c^Q5T2-Ey)?yUC$upfZ=ZjS=iu!eVQjm6`L zB0ew$#O_VP+;?uC*|>RT5wY z7FTn2uuEB%*PN$*LWl11a2LNmnSX$}#cBNW0Eb?m3_5t)Z0fL_Wz)V-qPPAg%LVKW zn3XWs(ETbIqst=9(2Hj{y-PULllzqKY`4b0x?(6PK&ZReg98#vu zDYRvWJJ+v$|3v#G1jz!;yU0ia#CY zL!<-ut@5#yq(Av4jQqv&s|DROT^l9*bL*#iN-ggoJz{ZVl0TB~t+otf9!W@}IJSdN?g>zM~8{kbM4=1U&GyKXqt-lz`j1v{`;mSj)rYjcu4R(zBq zjXyQ5V!in~6Q@Spb@aEqgK#~Y8B53gNj-_;{-@k0y?*F8oLIgu?%;PR*B@&Wnih8P zB;HXba$?pH=%PlQM_IVLsVIKCQcSrR8$bTvtBlj;>wMvvCBDUB^@+op%&V@gRG-CPO_SeWNv#jVr4{}_osV5Y*0#A+ zd+o1yS07ZVG2a70U#;pJyW+=^>Kfma>YC~>-*0J;!HjTu#R6lj+->IJ@=$mr3oReV z!t&Kt%Ju*I`#+llE!pk$c2HlJ+L_ixwRCGYD-pBKU6d!z-a^bKVK369-QF#jt?tBG zR<3rlX3X5_oW0Vu-Rwim<{ZJksyMgZb6sjNZu8>8D8MYAFq-1xG7Gbf#AVcx?fxys z%#+EvIX&9V8Zlemg>RU|g}W8Ay@czE3->F`dJ(QG&cB}hFvCA@oY_ds=3rJ8XEq13 zAZBIV+rw?ZtSSS0J=@JTi@j{ja@x(-9F18P;l{=J8O(YSXHT5jo0v_)Y;|0?>KMMc zN4PC<;g+0084+%NT(}owHcG~%y>YmM#bgJ{g`>kmz{CpwqVvuxNGCgzQU{t zvzEB>JvxtYDc?w38BM}$HD%NgSC%2nmQ$8xab{~UtHP`~&TJEATQTbu=ieU8-1yfR zm)`UfU1}}qofMbe!I*hTZ&O@)&%$gY>5ayvV-{uyBpq>iuoSbM21cWne-lrGy4!Tg}vrD|FQ;=4*c617w&M(S_ro}uKvvtd(^+ixNsL^wif@&;>_;C ztO~Q%xNx^%wwJgx#^v8$aZCP1JGYm4aX#rK58UbPW~(sEQ{+LMy=O4ni9Hn;znz$+ z6Tdle_V!^`hP|Cx?f#uLO!l&o_i^>Kcm#P*Jx!0R_aV$&)ca_hy_J|9z~1V({M&H~ zb)5WDapn6JX6cl#D=r;sB=-#IaL3up!Yohh#hLXKdzc+KqCL)T%t)-ti;GJEX8FXW zDK1avV78GwO^+*&g_yZ0kMuZuw~Lw7-8i!+F>_;fxW8J~pyY3k<(R!z@+AImDS0k` z15P*G-7?=<(B}J2Gh8{!u+#FJ)_kXXS+24?&GP-$U5Vw>t-0HBj%8(eueG<`@-oZ2Y&x3lb&h47Dg1le zrXzOE8(s3(U`Lq^c9h>>XEhCXn898{A8XI#l4`J{>jq=2)b0JvFIQOhTaNwSWW$|j z{kbbC9$_2rM+X~!{%X0(n$NO4$?{0cJFUGz_WD@MIhNBb@2B3$-yUnHM{ymc2(A`?23! z2Ab=QJq>&8@82A6u6x?wWA-P}H_6}mmIqmWW`Hr@Zrk$>_IjlK-ETSO&$6WJTavEF z+W1D2;=A6)^Db+?C+YWen_u6!O?;Y?+N)jG{A0^6S&qePby9k_B=uJVlKgA1@hP+U zHO=x@HeNlg{Ri!J(Eh&5x@%&LD}T!@Z?gT@P+K2j@!Dm7Uz!x3uWWt~wf?_g-3_+k zG}`o~+5GKgd3%!ox7%>nTi#;*eKe_m-k;>|!=(J^Zo}Pc%l{r*jw@|EHrVS8N%`K# zUN5xiEU@M9(y=CgzOn9(Wo#~gH(2u$YyOf=|4{q;Xv=3=ZngH8T6WlUY_Qkq)_os) z|D12lWBEVH#%F($`Jkk9jJD-dnN*K*lJaeDQaL(f@v`|h)aL6bYd$3@+&p{TC&_%9 zb^o5_hiyDIS>9;PW9e#0ivLbqZn57-S^F6_pZg^JUTpI#XxpRh_Ij(W-?v!)#{Ry~ zULS4qyE}7a`I~32FSY!3EZ+9|8+-j%8~z_{Jsy?R-rZn-Ut{Bam%VPV*XP-I+-38> z*4DRJdat$h-(lTdYQr6E%j4Cgay!quA7y!xb+^lw_p#Qz%$hH>91HJ58=t(SbgW1U zr^@F4lh%IB{eJ82t|a$4*4|5&-?YD9VXtpb@+ZjrTmCM#9P@wYaARI|x?#6%4>sC< zZN24-?eF&{^>>@C`(pch%-=oM-X!banU)JIzhv#dYWYpedo4#Te`8slVd59_|5_XF zAe)}Bz4lu!vEc_TUtrC%t^Hzq?X=hV_Iev@9P+oqy1U)-D*L<3@&RkU+42EvzQbO} z;Xi@9Bjv*p-t^K3ebt^Y+y z?dv@@A73At=$}hX^o6GeD(n5e3DthzwDKBXp;DP@T*y}*_Ra9sln1Ka7Lvz1Cy zlYHgD2_c_PX3sNx;VI>H;qjqRAf(hlRqXTo%In&lD3#Oh_oVXLO26@Cq?%n`9i9*f z&8V)O@Aplgd!er)th$NaarL2)uQps#Mc|c_E0sDf=Ic~hqh_&1;jX4UJk?iMS3ciI zoH~iSxOg0)rq_pq_2DAl-1_-5!Xd)I)~OwZIesy_jHa`Y-IhvhDm6~l!A*SGK_DqE z^!o!9s;7kH4FxI+D=R~UFR?MXq$8(us7qWb#46S#88Me!ihoM=5EGN@CL}^WLd~wO z^o^@350wO_*89WNW0!<|?7Pq@8B2~Vkkn4D)7SHTAuo|2vxs9+eb8TBL2=nwXB`>~ z;#wS-9;%*SU5hf>Wirzd6-OYyC$P?rNf$8%hL(t?*yqqHj`#l)rTts zi)y{)Au1rD>f9Z_xZ2hQ_6v{?6@f~hH&9(Gu^JGo88eD!PYF~k7{9o}Utd?fkib=8 z#YOeiA>SdtoJOqLYH4VpWT7r1MViM2DAr=%ycxduAJWLRV&B5R0$*W8xO!oEm<*aw zUJ(w2s(p28wDwPa9p^8vtFtxvL`j!Utm0go**tU!EUL4&ldg}HsrYKLp(B5@Vy=p~ zrML-=s{$RGaTOol>Ds)mz9v2vr6=F zYl7h=N*xt5H>Q)y>&Dm2^;K5-DyP-^{gSri8Z}$FVs5D3##wRsqOtY!=22qQw%Yg- zJU*!eXHfHf+$aBJvt;z9WgSbJ%P!7)afepV>ZavP3Ncu_RLYgX_ z>9H(L4$!ZQmB=s!B|69T1$g9FF^lR zT3Q#btPYft)|xB722L3*b!3x)kvDH&fIQ0jE;0}V>)qVjMZjj>v%7FE>M`gz(q zU(xQ>==NJ3d05{{x%0ao`l|_wA8WunBoc z zFqhEZ)=c$ruT~KMG}}^}Bq%kxJv~Lfx{6SBPjNRFoHB^Pga8;TO$}&s z8POT<+k04JH%EEP!&Op~$&tEnowVUIeC45vDlwl;j!mwcB~^NybiU>DeYDTx7YAty z$G6|wrm4rRQ%&>-=9c@VN0t$dxtdu^TgP>py`G`R2*gs`HKQwsy)vWAemdujx3kqm z#dxTwdLap;&zP?VXlMJD5Yx6?E26)bZh6UU^>bZ*<<)h*!uoK4!6Lb2y5lv9VhqkI z4^@{l$PCP!TpJ!STrEj79ZL=Km6NYmC7NS-7<=A$(?Hs79nEfP#w635PNv|b*vZ{G zYPgJ|`BL7vKuLMsf~oQoLuU30k)z`(s)~IzfrVsu*jHPpOE6bbB?D1A{+(GTcVD-d zDM_78-bu>!pj=-|?>XNzOXdE0GqQGx+4$P}8VanQaoG`!nPdPV_1x5=!Z4L~uCG)Y+lxu&*RkURx)F7B#lL6ir2yQGdXedTm{}Tn1Gx z z&+Uy%yG`xKIImPyQqVR>Dhb3UPUQUrGLm=%>O(T_ovr%nmLz7P)X{SPE@;0#O{b8A zjSJLPGJy%y#==wTWYb$5@(U9SW?jW$|G4rX-GmPdvV)2=Vm2iZSWq9FP+!Z0frh6x z9AYfZy&U$>s^&&3_s^_lEW$Xqo0?f`MsAf%UVOSWCnIHYLt8-}9QNm#sw6fNu=kBp z!)d4OMIbbRxjXTS%@CE+4TaYi(z85j?6?4ZIRPCdrD8e;sTqZJQj(&mW$p8~8PZxX zajm4qB^@RKOzN`a%3Dzq5Lfa$J|3ZprRi-;k)*}6LgToxn5Ft>&`?XW*E=qRq~8i_ zD`y0&YjrHuY*pf~v-jS)qGNSMohMghl(~QB$dztslShYPJs6kjFGG*mz>X4&%_r)s z%gvOceFQyy%J{;WB_oD+JfrAf?lJ5Tn{`#%(>l`JHtIRlR&BVRR%=Mzk~%$ROqenx zjE-c^6YOAiF+vBNoaVYJ{~|H-`{tKdEQud-CEL_yn%`9nn+cYZK zCx3ZpzE7s-tWNw`h#@`q4gF}W$d797^LTI6;Y!S8s<(K$cgDFhl5?Z3%IEhFsij9I zgDVmem>mBg{d0VP$-lS9`|x?tQ3;v&CHoev6O$jCvq+>XgLD#yvt?^oM}?Oh+B|O2 zcyE$_uVX?tmprfhQCp#~zs~fmKjs&CkR040bxEp8NQQ;J%6PBTW3(2`(gI6NkEq^q z&^zb_BfaLKN8Af--(swY(PgPL1Lkr{pmsi|J6d|Vo%ru#bg|Pe|Riy#{%6V2gHoA-`qfXsFIDfEDov- zsSIODA!BoCk5gwm;1E z1=gokQ8hPCxT31fNIjcM*Mx189P5ysVYrhF*J(PBQA%*hwHT;}9bP6;?Ve8->ZW;8>hgF17R8maRy zc3o5+E>|3fq=(osLzSoVCU$A;yd78DtY>DynNapBy)YIR%0xA;%Ew@$oN4{o`fylg zV~iKYtjIq~`;acVhvLSo6>aJ5}joK1;j>I{^UI8_@%_HrttdH$u9j&s6F?wLW zdXmx+F*dMRPwZxMzM$k16N$P2_rTE-wq9cN*}8Xym>5lVEArC4efEU^?s}_HywE$yAk?YS7z7da;Nqoq_(;wcT=FAa=5xZd{Z!t$;Q&!undjgygn~EZFN8GgSp?^icD>l z_?hvO1f_gB_eIWTbW|8*PDc%vtLYM!NT~*ruhggyY~II=m6}BE*{o8}TffHz{5)Ve z$8mPG-wxkH;+Ld?k>VY3EnA^1Ua6Tg#uvxNEjOl^v6o$%R!=*Kuel)n$0es~>-^lA z^4NiL(FLw17EQJu3{pj*ax=}8F&=IPY7<7;)pb>h`S18CQ>@)|<@J{@nZd}i5__{J zPn$Yb_r=Gn$u%`JB2vpHPaUr&Pn~LAu+pHHF(%i^OvmqA%xK-1n3;be^PMHUli*G; z<@t{4mtbPn3}(*o4l@hD1+t13dl*p~sP(bdQyP}=6>IjTrW^N#V!zC@V|C^Rr4^xY zDcOMop7)eiXu<4mo_^+)j!C)SU+Y^`#ptyL$zvf_d3e{Ns+1m&9<9{3*hd>6Z(o#_ z2ZN>IB~0=70-PAgIFIFsD%SdV&lbM|wezayGfppEM7LYY;E5HEQxf8?e~4Q{l{rc2 zeEilwEUK=pp{D8;MHXaA=_*L!NaF*Mso}Yo&I!?_J~fjO6bWPPjA$ZCOg!h+(0$1B zg3=0O{Pb!a83 zd@d7ob&aC8w`-5riI$~K)?UQ4h9_l5Damrl=lk_as2FCXgrt_Sa{7_-Tlt|?hpbwf zL2LWKK4B!nN~4rW!U!aOWF#w*lJbN>5o=<0DraXUBVyOOBC=koXX5sTQAK)+Y|ZHX zSfo=*4n!{-wLPIJnlo{-dDgUKxU8KOjVKy6%y?7eTUgDqL&_oZ zIwEjUy|1=n32O}4m)nZlRt6&y8#YyoDNnily8Z2!Fk7BY$g_N*_BAE$;xiKG|K^FU znx-aCE)q}8ZOZ7#L}+qg95Z415!PNwUrBYv0zHM3cY8XUxX!GdTU}n~tDG#uzfgTp zH)B$VOgC#=R-QoXXU8U6q*0;~XO)(D^EmyMQV*>sREO%qHk*%3G|}V4zS?XWeXg-I zSYjozy~EP>cw!jRPrnO;)pdo_W=xiqOJh^QmYbFd>)0jc38?N*4nKG0=lB{Vv$h%* z{1Rt7mBTCTOwiO0Gk2_9Jipqi5tYM?1u4bpQ@xX?j4v)NIMw``>_02!vXFS#S}bv> zPMAt1`#^=|f%D>YhJk_lM$C*;vqB~_0{bZcJo$~v>YJ|{9@|+L z>o^ALi3!7z3Nv6=!)!~S*YUyAfe0*jK?aKWmvj^{({FG6Latmp=A4VuO3j z$EbU(U-k+37~4ZK*9e6Cm1cR8imB8?VmHa~w|able`e^Qeks>uxff+1q4Wr_Xx!LP zeO;9?3iG?P2(eMZ`?2`gXA!g2vwGZ6AC?|QBHH$YdP#nn5oRJ6bIrQ9Y5!;oCbOm% z!spq{<7^to%er@1KgJ(1e733+a~XXUjvdD`Ubu>h!3sNSk=z=QYL?3<;2SIHv4uN| z^louE(Z3{EC81D0J%oY~K6EhT<$xdX)#LDUkY!+br@^%W*kpwZQn9ZAu ztk7#aUC8Z)z7lD6)YpzFl4_pL$+%1MNu~hB^J~=?)H#~5x|k8H`losW{v~657gx*Z zj9HM*ix~l)py(V|YL|p*jJ?v=J(6mRx@a;{tW))|9MRb!dE{XBS$Sv%F!DGwr)fIr zX-oYTbhVYfVEKGssf=&vD@s|0nqTXyER79Z+D-KErn|IxW-P0Rfg@wR8nr(xNmHlA zOmrV1tKJFE!U%e)s_<2_Mtq6pdFoj;&mRbc=&Apun5GQNXBOqRPd&d^!N4N*gZ#HI zOUT;x|80Nvtbhh#`AZ=K%sVMPiX6P<<;VoQg|6)!&OPrEIevg??>9;5x`HFGP=ONX8588d9m7)B>NMAbbv z1*8{8%{rk+I&wYm^DvGZG2ElObEat33#-FRJWOWfp@?T{HDk%Tz&!H4aC~9moYHBY z^E@W>4)bAVZ8=e0#4{fo=lOL%p1)FV0sZ`sId*x43^ilf_8-%C%9t@Wo_aDWOF1^$ zo^uP&^70U?blmi5GkBa;TIiiDdfNDLC6lL3nC|gNGCIm{#=xb4d8Mok&G*G3(vdxh z_d>IqB01U}`YSU8p6jpxW;3SUAQnuA?h@SmNIfYvqnCu5(owwaMyCI0FSePBm6`e) z2CZ$8j1@lIf&Vkd45&2I`qD~jUD)TDHG};Ie%T!23wi9sr^GzM^>M5+`k#uau{-*>rlNwDEHfBI<70feWK;g*3(rKl$sUAABC(SO< zp4-Q`W{U5L1yaHaSDj}>!6_bVP4Dug@_SAb!?;^T=h}Zt5BF^TXrJaxYQF{lQ+RRpg{0H<2kZU^Q~#gq2kAHM zvwjrn`7b+ybN+LErFGVxj%6|4tVwlnS02(}v}I^V`6&-Acy2H+z^JgJ-}Ok0fi4?8 zSl5i}tPXR(lul%uE1OzPhug8kj>0kPraZ4P+qpc!nowI{9oa9PTuCp@(5;j^lA%;b z4m)&LXjew$X@%*s+uW8`^JFR^WZjMZ@B8QCbAdpUK0 zxr^vV_{v;k9)Yj!$2)vm@CUy2Xr7j%8{zwpF}ev}={EKo;a~SR{x`y7kK>$2?0ezq z1B^djc*609HQpTz@T0h)HOS}=;es$dh^3-171(5>+7 z)0EnYE`!~pIllufTm&R6FZ>zkg^t4iWTHP>%=xlw7>Jz)_^B~GgGV>RSsoJyH@pP+ zF?Wxn&VZ%p2>cybgJz4YItIu$YTR(v6w-rs!4*K_AA~QON*-a}0AB;-yE4KL0vB5N zWiS9OoHmVQqlI(9B($9WISzoxIjhBu$1$h>UT0yh;Cr=8e=cY{216!y+wB^fQ;0!E>QKLwM- z4&1p!sWP++9$1K(86y@Pop+*Cn)(87HxsblDT_(9fO zThYyM&-v7=*U3+K9dM(Y;G?RHKW;dSC%vOEcfk+U@clTk1K%5=1%OHI8Jei`KMqFmq= zmzX?{!0!U>7B0AqdlNezxC)4Q5MFI{3;bCF_bqm!@QTZ+C+G;g4y;Ev!yf^u&r!I~ z6-K+^AFiei!H!x^odx25K72M1`$2f8HIKrBud#IlE&*bv48HAJ^5PBZ3H%L^I-{;r z>KWk2ycs@ng>mbKORO$~J4FZ{J1%$t*oY3ocL0e`Bm6h3Ti_WhxtFk02CG$o{!aK3 zkcV!78-cVxP4J>y$wSPW;SX1nSLi6*bO-GfT6j0m_blvPWA9nG8c2T{gx>+X(CSWh zTmwnF8@>uiSgmmGT_(_H0;`VD)7(8A||bo%Qucr(aHx4^2= zL~o|1ID}={s?TsAK{v{v{7i`n?WmDcq7=47TyBV zTeCGEv5qzbEu01N(82@3Xmmcj9!R>H;FBIST0XP%9gs3|J;eSNFh%UcRY3PU@LNFa zw8Ew9DJSfd!8ZW$GXjTyXY9*|e4YbhF5hqZ*~7+d8N3fjS*YLhE&!0SaKocP7(YGm zWkB3E!25uNo8QEJ^ay1saf6TFKz$YspYjLtVK;8!6CTA+bUu94V@A8-BmQW#3%>1f z>K1ky;g>g>`>q9k{0Yhw^JX~s6m?JRz{57t7NI?GIgq#o;a^+b2!9SFuhi3w{eb8) zcsbDd13w9J-^PEq6%?R_Cq6@-qrLD7AooZFe$na{_|a#LKh5xo&BjhXobo5yTim+f zw}5;vvK9W%BC8wV zKU>`bXKpoqy5Mtwq_+&d+v+BG=3ltq@v{uR4~U&6_&p%y8-;KG8yY)}@afygbF>G( z0HnW@t@*ELH(I#%Me-0Ggs%Zomm1+GfaGBdJmw{aR@m{tms;HbKVx+>y#1fFt=LgJ zX`_H{qu?!|5%U)K-$3H5UQ_BNuwKmJ4}gqAqHxdGi9hDT>%m^M@cZB(Itp)joBMMQ zZsFWdXs^(2I0B?C6@CYZKf;4Pr4C_7c=~75Pqgqn(1Z@ci=#?4qlLGALESB+;7ZX@V&r|c@z9DC_qQyWnWS!(GmDpUvZC!hQ9+cR#E@vz5`<40|&t} zv~cM`;)yPUvs8-8dyo1F-wq^i8{wBg7<1vxDJg0hTKF}v1}z)~kD`T-;7OpEyWsLp zDQXM45&i&3xwz6&)Glxk^HwyWW7%s;gk4&=m4}A zJ}M){jLG3Cz%SSh;D)}$s%lY1AGnNCUNb>A9zr9iYh~U;2KwoilBqSV2jv+ zFYlJ3TF}DRf+$)z0@MfCfj{7bvTigVP*ryVsgsTH$$ZOUIp$^X{a`h^1=)tTOmnFa`4#crU0z3(p!zeLxFWg5_x8i@|Di z1N;flZ3TSv3512Ya48?^*n}4Lft_gK1z;~a2yXz=)-}WXfchv~^YeqqFSPJmARpZd z4<4MNJm`FQ&nc8UItmXONq(V)w}69a&1l!hd#WtXtWF7 zawh3Sx4=0b{2`y+aBg9WT8z0H9x{%6KiQ0;$8o$4pF7&6o?{ z4Ys0%cY#*4nv|kW0Ak+*KLvET!yiq?FYH9&ThGSMCzLgO%#;+}*Sq11fbK8gmx0U; zg}(uFurHi7mApc`;48sWbOhcAB&=rmdmwdH`0{C#BX)$Z0nKRPyFm+D_;1!ce|m~~ z1jrn!8U7MT`y=08I3I}rjqs;H;u(dXJBRYaAK{-Dlh0`3b-?v0c?eIOL3yFe;CFzm z2}EIUEp-)h;m;H^G;!;9j6i8{j8^geCkL@MA|f^?L3rvCyB|z#)13WlFzG0pZ{{zTcO)GrPuZ=E) zp9Q)5$WQpGmF9kFhTm94J;A&cKKoY63+;vP2Ks)1Q&t4**(mv$Fj{`Apg-5O-ZrB%Ibr<=J7Je8k zMK{6UffZ=sH-1C>(ZY3MWHzXrCTh5vRpeFs|D$A|p(qJ!|u;2>If%Du#Yf41gw z^Z;}aKK(xGKiUJQ-%tHVyWl$?Aa3YJ_~^AJ{%-gYAY<=lIA@*7LpOXYSS$A7O+fCu zX82to{aO?r_8|3*F_j12{Sa*n_M`Ba^~Qe>e52J7csr0Wv+%7CQ@61%{2*|CNnXKU zgB-MQpb7uc!b3MuE@5|>AC#fX;JtsKjL=c|yhq7%wD3`n(Wj$@2ZKk^`S8tP z3tD)>AE`5F;RyH`Ej;jX@<;5$_W{>e#1o$I1mE04d*R2Pq&(2g@Rp~j2j~{KY7_ZJ zdV}z9!4AxY-v)cp!ry>{X!SJr7s&cI;limeP^M@X{3ozp^q(n1Aa$<+{s-8Jc`N+# zR#SE@aKFECuVL>mHKnu?T%h1AifYoT>M+Io%v41D;(8A|~Re8(@a{eI<7n5rw4XrQ!)Ev+AZ4W9M}vdn7QW*H z>ezSO*YFu1n)})V{{Y0T`iS}rv_J4|pOQxGH^E;38TSgG^%?$SF6;%H(84E2Neen3 zJ{QPbrVM@>?83Ym-T|b4Y=y^uPCGAd;mh|?$G#_B@T)+|s};Wb3*sQ=uy?=dFN9YD zvC|0e1wrfxH+@O|qlKRbjp!D*=U2wQ8!iHpKVJBy1Go`8@af;uZvH@<28RwZ?m{=f z|M-r&fo_FIvWvAFJm%-&Dvw>89(a9@Lyv)*;3tl8@E&7V)eL`o9C47HodX;yJj9^}r1CrnzU)+oS|u9Z z57M338RbwLPjjfz=w|r&(GIl?-2$hNaj0!*7aT3*`;wiAGu%?-Pz`9|%O*PbCIo)M zHvwIC@E@#hhA*2;+S9NDe-Ct>Jc}LTK$jQ%?AZ>!uR$E(cfm5W@Hb!;T6oMWHqIFF41ca?rw;g95bh)u0F+fy?>+W(hh7AJ4bL)`*5L z2a+!Np4dU4@4?v)buy5CGatSg=(NLE&T*(#@e{u9Jibwz&NEPW`Oh6{FggN1_Y3kI z-2(sq0*5LSJMgEV3N2h;Mn0g0SAo^&M)%=g+B!)=qUWi0*9(ZH^USBCJtWsRjXU!+#2!$JHo|aBU*S9*oGFqtd=;Sh3^7U zbR+yOP#KgXd}WaMpoQ0g0(2AXt0Vu>LHPQG)GKrZ?zzas#|__abrU>tu`&0+i@-+g z3%|aEdWjbP7VJT*iyi7JAbA^s1HUvn2p_wYdjLE6@Viz=;dPf9^Co!YWi~AM1*==& zSq;X#41Uk*D7^V{Qx+}oJy+Orfv@l7M=*EpoJHJ z^U*nfaTKGM%6CH(9S3A^Rv~U$TfEK;O{{$TDZ@B zv?pj`H<*ML9th^3^WoP(5G~yIe(o={@Y!G$+6zAf?nO7j=?~DRpj~j0Xz5Skqt=q2 zn7iRo>!`125Bzh`%S9UkAOA3MKciUjXb;H*KF>ir$|6tPP zhOY*ja4Y;*unjG|2ehK2aOpF|6I}*>)l3`Po%`-j#2MrrN!^2g116yx;dg+Hsf178 zOdE^2um>zgd*MER=Keyv;UPfcCVVGo#awtD*pF_4ANU*nNDdla@e*Z&j==L;@dq7* zF9U0P5FhwjAoIHj+Y@d4XQ`!V{6dwDT$$u~WIMCxy_?*x2lRZB{IB%ardC(sC>o2Ic zXyK#x6E<4-DzF?aeBzhfQ)pp7SdR|EzXF@k!gqnKXyK}_xTny<7X#Icb_{L;F0}BI zz>OBp{yf^BHwH^DBn@K@ktv^q%t2o9o!uLCajK?&dY9eIcr{v8;N7Jd#)LASuqe9t|C zZicHJscH>62>%Z36#H;EHI=pOu9_ogXCLYn{65G*3wQ36s`AmogTW+pKI}LmRn0-W zU{`voY7`CEXQZkobOXGAZ`SWcH^PsB#eKVKj-r>Lg@4v7Rjo#Q;48pdwD5>NscIuy z_%zUh79Imy#Xjr>vVJRkD>#6;@N2-8OJ4&Y(Kl7O(ZXXv0b2NeFbUlRzYU~*M&Wh+ zNSBzy+5J=1CbSE_637}?6MXuy#8cWdICMPz_anXV+d!V3MPb)KV@J3U$o=SrZ$B|r zO~HO6ynYbrLbt#xPD)h=(GmC+kbZPm&A%R;s(PV?9|Hww;irHXE&L~I-U9y{=rQV$ zRCOFE!;bJQ;71GBfCh9Beh@^^!e4_mXyGwK$rH2(-Uyn}E%1&4>JPdVe&`hP>KO70 z-ZPRkprf$s)Kv8-TKGE9f{ut8*oAI}KOdE<_M_Elsp_cFsYN%xQ@jR{KVPR2Xr&+o03W$AuRYgP=Xe| zZ(6FVLJL0$8pJ+)RWW6Oj=;M?3tD(U3Hgf_J`)^7d*CN#rmC#tC<{1m7U`ki5xx|- zF&AD23eXXF%53T(V@NOjIFNl}&G2hL+P7ACz#P(#ec?%<1TB0w@S}xKIF~r1g@=F$ zIv-vQ?nO7k-Oi)kz<)P<7Laz*3oi#7u_L_t=fnpsd>7awZsF&E)Jb8-`Q$m~!UIA2 z0KyXf1@{Ho1^>N_d`7pz&z5t~qMPA=0vSsPe+jl=E?hX5I*b-B22r%|$3PuV9fq%} zq)gEf_(3542xt0I)l$rbyMYMW3!gooHc&L&yPA6k?S{_=ThYRwgBG;#F&9#w#SXk{ zfwA8T?*|7k7yb^Yfn7Cs^Ha~!!e@g#wD8Md3R?KzpbRaXR+FlNXyHMi0WEwoh@kV~ zzt<9HbSpggBJy9%;TwUBNh0tKA;LO=e1I2*sY~bv_|%2mQ)mxdvzTzvLHM4Fxxdg& z@Ci$46VUna4VO{g=m`7+kTT*xCH3$Xv z_e67#3qKQ~e>}0P=FDGnf1!oDfdaG}J^-YTP%G&pKoRD`-`~W2gcip59Y#OgA%k_Ls>+;al7 z@C%?BE&SLE^aW_)gJ3^exaXf~15fU%IT!6l3m*##(D`s7khU#I?~UGVw42pe4n zFM5MIjc$OSdDG-iGu-?(X~bN3_Pg|JVjuRuZ{q;h@1-7ME_~Mqv}41`XL#3s@(|q$ z*L+R<(GBn}_;nCOx56W* zQWj_rTnE;mg%6xdzM+L3=iweL{5WVuH^Un)aH{k((D0aA@*nMi{{=Rpg-34;_S`SVDTx&2Z04sq-=yfcIQR z*!2Ae9A4p6+t9)ngD6_K0jM$5EBGMjg%&>T2I7Vm zJ_i(`g=d5F(Zav5=4J3Jz>m4`KW`-MXyMPmYP9f|pb0Jftur2EuLY`Bv)UCL?o%)Zt3!Zx?`GyX{Hv(ysBk(JCQC?%Ym*IDR<5ZK-QTWP6r&^AV zz^~j*n^?pxy9SV001f$vUUH?jhPMbOi4F z2xW?P!Jh!vc*2F({{cVIP4LRcNGG}xzUOi5qg&y#o}eD0y>Q->v{w_j_u(>-ix%Ey zbqo9@7>v1k%Bco|(PAII2$Y~B@C!ilL~U}aQ-S#7ffoR=AB2Bzbrbx#)#_>b6d-my z@bf_YX@R>uW6j~Kfb7qVz~?_poxy$sd>7FDBm5Z9<1~0DXv9t{{BLWnnsEzcyio>U z3O0&A@P}Y4ItuUj6Yj+=eB);7(nQh-Kk^*?3%XhOdD=;IKKvEfh*mFfzk^nED}4Q* z$=^x1g*R-a4M8`K)1rnUZfomJMbGVkvV+^1p(E_|0h>ukywUi1NBp&Q_*KcY@gAr9~fpAi;1AMPIIoOkc!ZGGRH4IOLS7I1{+y{qLpng-<3y;1VsO-2G=BP|R zaU%UJtg>r6u^ZAErBWu z7k*xid4mhTC-reBjHu3d;=<#k4=(hPL|ka96R4cHa6w(>0Pcdh>(PFToPJiT&;G)N zZAlz1Oe9@#VG0?ByJ4>8+{aCDO$++al5%*y73H`OmTt{_;QS!`g*>5Lc$P%QF`h7> zOQ4Fzjj&i(#veDs>Yq>_x57%FG6!05PJ^$0&VI*DuvAaRtu^&w^?d&;k;EMP2#52gGJkLvY%+~L z<2Lx|boL!?hf8NNw(Y0`Tg+w*aT_f4W1yOWn_<%hfohwS!#+gWc)kP&5d$uqNFs6J zVPeLG;R~6sxUc|;!-a)OSKI_^5V>Ah;eHZNxkr34<0cN*E@gXhH%xZXZhQI+r~b^m z#GP;rkQ1$V-&L|#7%%dO@dN4ao93ghxIKR1Mnh#40iBo_bnoRlf(tW9JT7dpk9mmOVBh_;i94W|`1=4adN@B&t_}n$8>CWt;5=~7C8PW^Se_{zF&Z2@C3`_4(KFOU+C6d zSn?OG+zf}F3{-Qdp9mL|R9qN*ihYL*bCD~!2|9_tO}ODS<3hO`Zal{^*cpd~f91Ht zg&F6$uH(Yo7nq;8upF6)3vDDB7Z$rnUvc58OY{L3rV=0SfqSm7yjQk=VB z!{6v%d9IIe2FXJm;TjT!yP=0vlx3mm8e@eE2M`-B97p1D;dqjOJK<@PgbP2q!7+sk z2a|2M170GBap9<&%wb$Onq=UK@Q9D|WLJ(0c$>(45I!UIDHncroBrU!UL+nD_9qFr z15PGMxNshE;=#gnnEGyzvVO)qfio)ysU+M9C%qrU_etRJ zY*pI&oHk)l4c3Jl;lmH;Kdx#9scuBh^LBWvc95DuxevZshjrn?Bjh?REL}H98G0~o zaCv>k3U|Y^M1B@23~fOBlnWzCciaJgB=U2A;ZZV>a^VeyT}@umd@a3qR4z?a*co(yp5@p$Yw`P9h9w7Nnx>i~|g89;E7v!$K{CR7czd zTM-$52TX~h50ncRwFy!yaN$OhiVJs>tNra&v z(I3i%m5BuxHk07OUr0w>m`>tx;UkiW3spP%i3{tI6}YemNyUZp$thg8h4^q`0~^ct z%IRkpJP$7Xj6~zY-lPidfRBk47e=>dta0IYqz^8fN5rZ&|d6E%7yO`CoZf^R^UPl*@g>yki)nzg?Mpc zQtu#@feY7@@P2$w8~#cnap5ge5%{XWYKv z*nz8v5qHDx->?rj&)eZ*BA=@fo*zL!sUy5fs^G#~q(1J0B@&p&xUl9(#uFFTClhfi zj3@FOUbutIpO0MCuume} z^(Fgs9Q$;Ye{ijc%8)HKKS-Fwx9QBg!M=>%7sHocU(A{IB?+v zl88H@n>cad_uJ_wE?i4eap7Td3K#xDGH{`nNFCvz9Xvmj=Qi-Qo$S9MJePy_c5zO_ zO}km=9)b(Ekwv)hwS634xbRDI92fpT(sAKDz1#&y@25P9c3?XX*FoNUx53H>7%KT3hM9P2Rxx`7W|5n)gG~-LT4K_N%92knjH5WOz z;BHv!GV>6(!Z}x&i?|OqzQ(drAC9|4f4*XzVc)wP7q|mnOK0BVKG@+tq{*1B1?eJulVD$v|!g7XSReu;`4rhb}tB$x6p3K30+zV~S zV09fACguuO*@n|T{5W^8s*MW=6B{mU|4Ojxg9{yGA}(A&=HSBLNGdM$>E*&!k-_RX z<@O@MYB`bPPUs_fzG2&ireGC?o8WX(5f|N`TV#9^TqQRW|IVV6fk#gZuG7J}< zAW69JCRv0Fixmr2>v3UqBIQnagm`c-ELW28!Od`9Db0m5qZq3ZTr1&W5{Y|ZrP9GF z8W*bQU=@oCE0Z|f0tXWLIlFK>=}Niq78!;Mvz4XKxUexv#)U1(dR*9+cyJqxDM#OM z3oP}f=4P1xZOu)vJdxK;!VgKdgq(hE#0%hVSh{?$_B)<%50TGrc;Kh+Xl{pBNEG#j zuT)?k;li<`J}z8HI^x2%6`3=*us<1t3kQ*8T=+Ff!4u(tO2H}(cfht)g8kpIV}l>P zAFM7=Zi9)$G?M!8y{e2kF04T;xD|eFp`WFw10$<3Z#bWuV8QCaT-%vr@HQEUs~W*7 zcWkh7N;$mIfWAq2BgV=atkOrZUU-hk=Qe$CWD~}7H0{H~O<6C_FHovfQXBU`S9A6+ zE;O{z=DZOuC5ciFcat>S2j6X})v>^gRvgQe8(T9^Y{9C^7}|tie#YFu9WbvwSS5T* zpW%s~!OD$$VU|9O$#=|K*oeq}7k)`PQZ5`r;_*aSxi9VD7TB{N#{%wvZQ~hp+y)mA z(^!sixQCc=VXiNj=eV#8X@d(_k?y$ALx$nPqr`~|PZ1aHg+sm!R@-m~JW0}}93C3T z{!7GRCh@=a8AM-*xUin?c6f9M$C}iKZx3Y~aSNPBQg9bM`4!uRtFMDq1rjxmI&d-Z ze@+SJ9LD*?!1%*DMCw>!nc-|ZZiWj-PzQIxuM>jR3UOF#B;%`r^5um_37g?&jJ?trU_Y_A(W(Or#U9})li^RV2vJjU}PGh9WwvaB0s z`A(~2gk5yE!wtH-;S=H?+p)o_197sf9cCo5UvM>!eobXxOkf<~mt=+bv|#lk@!&4_ zn8w!U;#{GcH_09Jnx*B;p>}{}|5!a0d)L&b1yF zeouV3u)qn%Y6ja2GtaPHxH`-Al*Hr0!z2M09`N#91s9$mDY)=gl8OtnpJQFP5e~S* ze8n9w?l+d5$$p2X8?=iHv)*K0@tI*GJV<1JdEvW0t=s}z+-CmG;&+AMraO!YF8qbq za4)QImodbJw}}H6J|RwArE`oE7cMm4AOPd{PsGuDd> zBZwUr<|huR14A;I^SE#)Nx_AO$YI26ga#17E7mIjt&nEsV1`{N)3d z!M(6)O@lJeqds(zI9zzD7R%s5M{R-&?~_Dan7_V(?_T0_s;~@k<7W5{@!-NP4Giiy zE}TFza3}21ka4U@e_(kl+nCHY!lO+LDjN5~JWc5n$EF!}Byw)I!|g5TKXp9t$Cg^T z3r=grKH~i8g#FvlH|jWG*|wD9!Y#y$3m3FEsB~Odv;*TXKc}Bna0_mM-8%6%vT+AI zLge|O7lwCcU-SCK2+MUhs9{ncP9p1ZC(Qd9+m4&ydqlpUN7$RBQ!f057#47>!LZLc z9&sZ~BvqswE+(?9@I2{Ax$wS}2&9)m<0`7uGiHwyOmg=dEp$oR?Mc=4ngU84P zT)3$>{lSI55aU8#%R?`T!iAUgavvPkm+j*GnFw?AXKqu+2s4Ql7dDDFs5ZE;IkDq5 zSnx~c5^jdmh!c0h3q<-XEHQw7QZDRBJh*VR?!qnP0_7f1&QhTzHctI_esaa97k}+Sc4jf3*!_^-RZ8umKE{rDyR_656bCYudE__UC$gc(GxM=FCcA@UxGa6a+Sj_@wI zfD6MPGM{nbMiRc7pW(veq&O}-LoB%P0J!qD z`gVAl$ak}NVWq4g%0amW#sr0^WN|p1q~cB(5**@x?P-JW8$#4!%B^rmNQlb7J+NtL zh$@i69DozTLR2&^ObicE^>N_`*+W!E+zKz}3{ib>AFOH&(Ow6^^+evAb;I@%n%m)Z z-F>ioF1;LH*WCv{$*q+q!t!}S{GVwNenZBvE}o*?pGjTbg39Kw75 z^b@8Oq5o^>51elb;qzhaclaBT{qBQuI}NcA5u9T&b46{5n|@^cngnV4{)t2Fx$7q%)BqH5#9 zPe~ju97MX}!Xy%pJ7HK1ea4OO08#6B4F!voXP@H28SjMfxp+Po1m}}jTI*^GCy%)u0|osj+_+pA^4&fNz%&`w^4P`&#!j2>ww?p&Sw1o>d5GyV`PCDXV zcyO40zJl??=_lpF2j4J;xUg&j#|18oBP(zlTuHX!!c1}s7iJsHe8Yu#h;a*_Z-Hfq z2^Y>CL;rB$Zqf|*NZGd>=eTh9SoS3@JV%mnAM7?>JI3uWbOQU7a$&?|<{K`IB3|4K zClVhnjG9XSw&wJ+3Lc4D;K1qHm^)ptk-p&;xOEeA3HQK4Znmp;PCu*Q=AC?A1@~Gu&v!9?(Tz`%5bt0dOFvA_hMI8?ubB;dai7@g! z>%vVi>Vh^OOcyzK5qUl)tVEtrM`*l6cC#`XqyR1qDiEroap8N!f(vU9D{h6oi9F|Y zz_A7CKjn#V14+b%bt6NS6Bk;ETb6|_hzGa9G2;DrE($vo4pmnu7ureqe#Q_ME5i2h z`o#=q7iD`Ww-pOjOJAdJxG=CJV}c9Y5WCcY?oy$AuAgy+rq?wWW|R(9DU_=+p{i7L zsP>(ru)-Umd}lUe4v&^&8$FCUT>NIJGUG0|<1N;Kdtmh2jB^BI4wGU+RRZNsIPG1^ zad#!g=eg(XNjE*wg-9b(SFc^_(Ry5RISq5jW1 zIpIMPMIGS-V!?&eJ_=RMaN+mFjtf(W0~ekoiMSU|Xh+|0;Tl`0O2dW5_Vf)G*7=ye z;a0f5Bkk~7(hc)=3RQ-~e2)aYPoi;Q;m-62H^E#z7&md))z1FK?eO!Sv@Z?~z1Rmw zm{+h$KgLBIb|*Gmc$e65Vb=bkY8Y;WMZRR8;wES(GJk|gx;ursdtkN!S{);-tGgAx zH;`?o9SfX0h`E8g;DW(y*HPvP3?9NchhxnMYw7NRQ@>)qQO60rUx)JdF&SH!dj!WY zE;JD*E?l3$nB&5zZ?$qOoHL%eACCGhxsi`cQ~*h70C~ zsu|RALDM|Vg^h`Pmy~ciSwS7)LXwJm;SF*e7uu2;BV0I|WZ;Rg=zMK#&9LACwwL$Q zOz;7b{-}kasv+^ep9bgY?uHMD>=(6&a{`g`p&fQ!%vc>~yI|sS#tIj1Si^CRyWt1x zm}j^ZR@}(E!Y!~FS%eEqY+}1`VJu0L`tVC4eRIJ5MEd4|)7_!`Em7tJJWr%=K3HqB z-ha4PcMqJijeU25v4zF9Yjwnh`n3rmqmTv$bS;h^8Na$%8cnhU29GxeSD z^PB8f+zx-bMgMW(Pd@rD4mXly+zkic;arb9V5@YFFWd&3-{(Ab28XvFFc!GV;F?a7 zap6jmf(!FKWZQ8Q-1UTQ$31XHCj0a(?=L~VHBiOi!cwF@E-W7qrfj$c#$*Xoc5(Q1 z)-aWfC&EM7H21=Vfnn-6<-+ViVakUKOOb3|mW6E%VJZ)9g9SpvR5K}u1BlEG2V6|# zGuXmR(v>>G>|tSQAZ~>H$QWFhGdxVqz=ef~3l~-(Zd}+wFSo-<8hDgbSCGD!8zkkuk)DACj)Pusun@gQgT4ouBq`;YQL2_rk~m+E|%jL*1=#D;Y)| z4}AJcn3{vDf??{tNX8krz~ZJbbpbcS%|+?kdFsQJ#lrkQBX+|RCFvXGUig^Q$Ax{P z7$02NkJxb+jCwsxIdC(qN9N$dQl;4*To^@CaR=NR&0NJj@Z++~&kKwv{DH_EaKXb8 zUQc;pSULJl9V4txq>dGan6+^;!f|hMyimspw-Enz1V+B4j}^T0He*N~{t&%tNaSZ> z!s_M2R3hcV=41{o>_S%Hb~yVT`p-Ga1#4Dd{*-2XDu=1NMEWVrS%o^(7fvA;aAEj+ zVd@Dkj3D6`dG7`0Cy}@b4thUKRm2@IwL0?*_rPa0IKFTd8>TGeFfKe#T*EoULAfxt1O3E>E65_;4U2UO zQ`>MeJV<2!3ZIa}lnaM;=3Imer;zX~>{B?0MB*-ZlE|DC-XPJG3*YI&*h(FkO60Sq z!iS_8St^NF04z! zuksohwj+_aaHC%ChF&7S*C5>eIoBKN2rm*VF3cnyabaK&#ugWb5vgN>6^I;nHdxWl zwSY0Pz%0GlkEJ+%VH`=MzVI93#D%TP%*jMp4VbQfmXpp^@=lMLz@;pB~4n@%`?ljbg%&&{!U zgRz1Oh>h!l3%sN;1%6{u+L7dO$WR}q)i|6?!i0KAK@wH4E^`OYedFb_}XdCMU)F05esgG z3(m4HaThGCqG z$rD^S_zLGeAMek@Boc)SXOY^ta1M#XUGO^Tjtd`OWp3j_<8Sl<7d9pGIUnHyams}* zl7b7fUDJ+hGjx(v%7xvobFRUKDa41nVdxF5z7Y=5-615huZ3RSg$HjkKJD4>(0qq; z-EGDPI_@%txNuZD$1?7OQ=;41~fl?^w+G6l7AGfa9_EB8i*tD=R% zRXlZsLkfrUcR%@VOgND&!i6`;dTGZLuHGkJyf$1;F5oL*j-ufzJe@fP>k!$-`tTdw z6JYLQ+BW8ai%2Z>g&w_J_)zyJ@SWGR`W0a#;=eA#(R%qrIFEFvon&}HFBd-0%T@7k zwTj5tx}k?iJHo5FUx#f=hN~I0V}rej)ae6VM3xo4QcCkkI7Rm)n5Mf2{urgrr8zK# zNISyjud{C2kAta1o|g)%ln(cQUw15AL5@>Lc!0>Vhha<^#)|iCguBXy^S4KH@;%z& zs$98nWx~y{8!3SIfjfvS>xDJUYy<^lDE z*0;k|eLN11BC@Otn#(hnC>J(*CtNiP<6H{gtr+gVZdQT)h>T%8+(TqtY48m3_Y>aH zJspO>tJN14*Ijs>I9QhteqJeD`%9UW z>HY);Rnf{#u#)Z;c$vuWP5WSQRn8BzZ-8bZ?Fj4WZii!aPlWSzUj+MFv_8kfO+=P; z!;8B6;OT1Y$NQXT;E@{P%JP8ElEJC5Y&$*!_WB@PUBUang>{)v_#(Kh0q2|y<_|p7 zko^+Hdr&a9m2sp_9=N6n#~!{O1~+9c;RZO0$XMB%@i-Cpz#J`^pY$gwE}XxGp_QAV zjmUTkySE8fwW%X)+Lrxh6F; zbzcv&wAa>~4OZ5@3LO10^YS!*&lDaY(&xjlU zFNC{w_rUwQt4_=%B6X~A43Tvu!V6s(FRm#**t#pnBja!QB%Hs8p{=Vv96|hjgA2QH zEXzKEzYuBj6kPQw=V8iI;Bg}5!Uwu%Kzny>+=MfGa1Noq6FwmF-hNj*<3?oJVbGliJMBHgFt~buEJDbT@q)uG$f)mgR4PMn<7&K9< zQys<=X(tIDAcuJ^;Dt|#v{Pad$2E~U$?y#E*MUKkwK|1ib0T%R!`ZsK;0Yq_2s4R) zoTu=7gNR#TKO)OshsBb#@+z>G?hd$0cQ?GRyPC?ij~ou*??J$?h_pEa7N4e-3vIf0 zhjVpb56e&2>Ii%4J_nxG-3#;0(AtcKAM0+1lZkBCdU%IOAHrwy9Ds-m2kP#CYl!qM z4F=BAJQ6n7-3F)V?u3VR_ra3iYxT{r1(Ed%2kI`It-A|eBC;+YEa}vjg`IV`!=H4I z{DJE!kv1K0rtXX3B_i`be0I2cok*QwaE|UzV3{Aa@*Z#@apvXuFuX`)S>qh`HIZd4 zKk?j;hzrLP+3ySBS>3bE*Eyguwsq<#`ysk`tu zBFjF3ugur#G=QUtlqbUdMEc`_H;A+oxPa&3MCN>b_!W^lURYqER$dYICsIBJrs{qQ z8W(Bh4!A;hH+(>3z2S>FcM)-6f8B*kboapEC3-t>jqYxEnn*vd!eUFgPF3LfA^e0$ zn*-rgB6a4%GsIkx^SmoueL-ZNcwqF;TKfaxN+QduWttxhCtqPq=FB{Jq$ zV9~W&pKHURMEWxWdWiJl6wI|wUl+9LE}X3UB6v}EA1t|Et6v4SBC;QaV~LDUGCW6Q zT|W5w2E9J)OQbvr?$iAWEWA-GkA@!;>01KatotdLYm-)Pf&GZgfjMvsk@X56>MnfC zt(Dh@pAhM%a5jwXI6-Ky1zgX4(Q z@xYL6dY|DJx{ra|bw363Zl``m4pkBMB>uj^tGYjd<#%Z1ad4dOeRkqRwo9e*yomUZ zS=gUQo2FfyONrD;gsXIS!%Mms-yNxDgZx5L@GyWmkG?Ra6<{o1w|VOb*OX4p_KPlP*k_rM^JULV#WGPYJYl1LvC z;V#`hFzA3@AJ){}3ct}^9b^s@S+5ITA~NPaDLmGA7Ts03L)52y%+7XT-1tu|1;0+?n<~hM@2O|5e{B2A$W+qhNcIv6koCFo{SV;RfA(u=oXSU1s>X?gQao-97L=k@ZGj%HI*{o&cBV-tS7dT1=!J7d%R&{;$w*RdW-pse2O`uX`d~ zrMnwm(%lF1{iZJqYwE79G5$o_x5008cfuXIdtuOZeOXvjcOT4mLn}AK7P{Nv_qw~_ zQQdv;m77|9EBs7%J4_+z@-tTWP%n4g;`KL?{o;esKF!UryY6F>VwPl44qN>Szyu4l{vaImk zbghoC5s~j&7xvOU5xR)f7w#c*cyGZ2Ti)aS8QccP5_v6|2)F3&fm!Zr<;Dj*&mr=9 z+?2sJok)EAb3fGUK#T6eVdS`l&!WR=#D_cK0V3`AVD3lM8O8AqYv^u=n~Bs> zkC{J2+yW;P>5mhh(aXKC@Du41`{nQ3f7=L@>Jx4WU~Ln0zOA#W&U!jq>g=Giht2^y zN9g=s=YQJ<{Jo<8#1Y^-ere63l!L##qnwa$WLGJWZ-P`F$kD7aU;$_hP!90IEmX-| zilUsq&d%rHZ|Vr%5FPt6my{5k?H~&SsSJn=8e zoqtj8`ipWm<-WHgR9Bfy`Yo7( zOD{Yg?}|{-FWgV&ev=pO$1jRd<6pS1evVLSauDgaFjYszl6I<^vhrUYRYNsW4WB=( zqA)YDkpQK2)PHbt{~vD-j&w>L4h{T^5{GiT-8_)#|6gThiV^;kBKhb4mRtXSc2g=i z;h){?D%kN)Hq7v!RqpsNZvQ{4pMTX3{U7!h<1^wf%D;*E#|LCQM>Mhi!vj+OP5x5< zSk8J!wC~i*@~20n{_uD5<&TjW|H94RPSZ{$+IO=7& z0oC+=cF7mhvpvPsubwYgpq%rheV@H&`wq2AlxT0)ZtbgU^&bSc@0UL~pS_oKr+twB z$|m*yJk2u_#wH|CKKA8X#@LLEj7-YG*o5c*k4+d6KcdHocst(%|IcpPF#VtSmz~7T ze=hppqFXx4a|MTx^XlK!#=#9_T>QiOZ;Y#4XH3LGo?mB_%+Y^(b7%w2bp<2*Cq?}8 zUwxhV)2vm@7V7U4s`GVS>u%iz0>+WTbB z$&LFl4}$aYb(O#0ui@i7F~p~B7mt^CP_D_na!ojw1xWqA?-b$WRxUWu)Fepm_iNgz z6sM;WrY7^9;TR^QH642sK2TK>EKA{(Rcdy7j;D z?@P^bhedf-_+OhB*D3SdNKEAUJa^##{N|9!EfZS<|DxpIpXbl#t3D5de`ih~Plhvt z?Y^A+5JEvFfuA`k;68W!WcA#BUrygYBZg=j_27B?TD3p^{r~!uPCEzlnvkZ<#6|*? zfq%Kli@oPJW*ZOqZ-IeO^*}6?_-L_3_+rC5X*qN#h@7|)0rfuVQ_YSFFPaahl&YXDOtH1g82>d+)|4k#H zoxcB@R`&nQYV!QsH~*02;cWYF-tx!-dGh|dBboQ_`u}pM{vREmLVx+H;7_Bak58e9 z7v7%xQ|pG!I;|I_-P+Yb)(znP+5sQ>%+O7-8n|9gM_p?&F$e+NkG z&zE?yKQd2VY@ZqAug~~Aubv}^Y%t^VPx>!2PHSHlphJwPHay|f|5N{Y>iiG4{C||> zi_Fh|`SRt{`%{ElQTgXTylMOQKd6>Fcab7`{PoqpeL2#q|EnYFf8b-(+hgwd&{*!`0}KBh7xuUB ze~QcFyptNnO=`UGIQ^q}Sw3g>Fe(_rc&aHQ!SW{Qf9{Cf5jmxTwmgq(Pc8p=Tw9*U zrTG`z*OsSyvH}*D3jQAI?H7vTRCmjC1ai))m}U%X|>lJ%uqmMqzx-vYAn-=BN`r^R1vO7lNmIsdW3UzEsl zl9ASiwX%m6l#iokuTwjgkK$=3Rs%l`S8aMFcUCH2r#i9Kn#Iq!aeCK;$d(@k`)-w6 z_+i$XwU%tsWblKeo^DXS2Ki+UCiFlhaS0t{SX{XQ}ja{fI)L z9qK=7{cC24`8Q+hPELBOb@Z^0>Z#fjhaBmeFK|pqQI(~G$?CrAxzpPCY2~Uz?gRw* zrquau*V%Um1n$g#Y4N>{@&}oriTpOlDw`#owu%5I`pLOjH;D0Lz7e9Z8@OK?Db*B z#kCR_ukYAyVeYZzrzhR~Y~`gx@!4;_(`tH~N390W+I;8Q(l@NT-c0^&a^(Rbfiu2t zKI2rKO3U+19@Qf8di6R-OJym4;EeCrw+@ZZ7h5I%qdbQ3_eL5Dw|?@;`iEP?;vZgL zzUIhJWtOB5sqp!^^xR|T-8wtqR<5JRP7Z(AaAxamQ}fRXQp0}Oy!gY_Js+i?cVziB zZ%faO>%W$&x@OeFFE+Q_^eE5Kqwn;*b~H3!{zkvPG1&aZ5_ z-?sYL*VRqN85yUts1?TM8KLgtmCgJ?k{pNM`z>^M)ymdUfydsza(Z~LJ=-!%l+CfS zOWm+xB|E(yW|;Qjhy~>bY&3N~l($v;rlk&ru6*DAq2)lA6VFofrWIeiIeYPs=gr(W zxzDT1y_0e-JF#M!%X;?oa|IP~D!wa*5gzjt=?FOP!i7x;Ma;KK774k~>1;mrrQ@PpUF5@)CPJMH@-&F0N?3b@GG_`g$I@j{ShC+rJOV3o=czWxcjweo4>G#72 zOMLmKyk2-%KVxc+x_LyEIqSM8g%@Z4<}^{*!5G_eyIy)&RSl$@{prJ zzudjhul2^Lk|Fc&{c^g@(S-xYl%4wari};p*S&bD-h+iL_GLN#^UiK>#7&5uV9IVO zeQ?zD?TwpOePoIWnpMggZ!Q_T@JJS?xwLP5j~#XQe{^H)+02w>B{RS6vT5SRsvGT# zJiWTDZh8CY$s%)ZwR^U>UH=xw(k_=h@`q{Z852^rl^l9=SG)M|nQe=FdhJ^L#fx3u z9MV0<*~e#2W^C=TVb+Z%>+1bzNxJn*l=H;>y;&>fFez95%)M*Xw8QnMZCIUqccVS4 zv-G;wH>}?eL3;`fKN?VOU(u=M0zO~+abT8}hss{qS~l&2-bmsG`6le0vhUKWKC?RyO8=qpw|j;UEjV;v$e4l&q0XS+zAzq0c{Ts-r;7qz8_(>x zHRx{n>o;%ybm&s+vQO@AT0i`w{f*`on%>SnIjdv)_U|{BSz6`t3Z>@k8NMLiJ2cpH z@?Ozd?&K}?9$3=fTh+So#`JoPygi;hy5;LKR`JEI-23u&FGoUf z?xPi zHjG%da`KRNojyF0IUw-PpuP>let5k0c$`z#ytT&uYOZApxpQs%8|_1!VK&EaPBwdo}7qS+HhE-gF_$h?e=Eb<9ADbdT9Lq zMJ1iRqfZWBImf!=$FCah7`=Gxmg(KQRf(Gt6yCA#gyo;i4cMIB)~W5RfoJwC`>{p& zE8k6V#toga%5ktsla1fb{lYW8<*osXx5hgPS^~R;S8v{5?dv$d?BMnhqua%$^{DwQ`WPBg{QpHcI$>o*$4X0W{zGu@OI3ZXZs)AUR<@(x8@c1 zEp39Qwb*E?bp1_7zF#vgXLBqlS7GMlrFDA!uxXL6<&mF0A6qeVLC02$E2S<=AGP_l zI`ew%y0dJ`{hilucUou5f3ixam0jj^OgcMj#{Lz}x8_J&wDQ;L*EY|Jo4ICLwpvO3 zlUAI{x@}4Hf}mq7_jf6Ae$3o@B^FPJwuQxHTR(YAfuUR6xrYX|`ndnC=b)Erqkz-j`+0@FFdgYweWg zfmItGc+|92#_2W7zbHSo)Z=Zxt;)N0#|dw%?XJZ$N6eYsdqJ&*okv!6Eo)r+t@*dh z&+j!OefQW6F?;%z__WCAeiu6*`RZZ!1v3*S*aw>DU7a$p>L*897n-prdBgsL4Q}Rk zEb8>==SIgS&26$_;F52qM6H~&q~7WS4J{>Z1Rb9qy*OGG=-@0kGfPZS=f~av!}hG} zqed*<@a0dLHTM?3Y%<0MRxSI)(yySa(#U|7gCEVm6ly6t^Znq76Iv{3U>ZMj-KY{n zi@qAOZ{EzMoe!67w&qmj;qH;Pxg&aL`cQG_$gW?`jNN#zOs?!X zPCPETt4pr>Q<oZ%6{0S4fJeU@=Yi)&n z_rg=N#NYX4!RxV$8uUH8twc=Abr&cA*>yBvJ{qy{$S!-73pSbtrv5h&y z2mIK(a+!-mlYg%H+vb3Zx7}IZSyj2f@ocd_1Sa15vE9?tXEVQ_Rc_v*vDd5K-*Yhj z+ue_PB(#}Spl_+j!zoMh6>7J!)cUN+W5<11EAP{z2j?^oeS6~Sx(6pN4%s=j{f#Zt zhh+A--|F51$NOy^H1hf2^ry8N&Z^St(HG4p3{Gr6efm*9$)OS>F&Mh70#tE*?IW460_$Q z9aMSjP2aRv17gCr)<`u*G)Qx_J{_XAFML+f7aMaZD9^bqA@Ah-Vs=662hV1&d+Vmj z`TC6Jrr8Cr^&Xl1N7eZB#U1?KXTb4SOw(E&xOXS(l3isAWlimzM zTwI;)j|IgEIqeS*{w(pWL`FOUJPH z(mlU+*wZqoRARQF)y@Zg9TYIK>zF)_3q9tyn^-x_`1JcRD)Pu&cb9!L+B;3t99!BB zowwJy#NB)Udli0|VLa04=RkYK4vxq7pWNM__4wKBIY$;OGsswX$iNFhjZe>Mw6|cJ zYHI^5trq4wGBkNxgNM&Pb8M^-Sia-~>-)u~Wxu*S$K(BzKMhEEVC(q7mvxEeFRH=6Cp>ppAnBKbZb+zKpPP^XgmD(k1uDvBAcJ0fSKDFEfyLI@W z+iGbD>)PebbhUMKJZxQjEH-9(Z1E*eAAcXTVqccHA#u+}cAj>HKM)odVEQ&+Y|P`1 zDfNafUaoSL+iAAMT#N0Pd`&G{u0qQdHYcXEbuU-3%XdxalgHI#&l&y(!kOxE6F*b4 zTb2B%&D5+{9xS)54l_P@b>fsPx%b8_4Dd|3+rc>}%)M@yaqm0pPLvx|I(<=~H{j#q z-O8G?82851d7Sz5-TYbR9?Q9NbWdYq*LObXayO_-{_r)*+iGdRN1^p&Cd7Pt|G}rv zvaTtT^^>)pqMg4>$Z^_r>~6;ns@PSN={0qr`>U6VnN0ojS$;j6ZFxrSy-U)A2i2)o z__t_FrA?2sW@%pN_*m2ItESs`)%ARC>w~Nb`?4gI|FXcjscQBs6WfHuTpw=Px7ZPP zF8AIQ?MK@`54WutK61z22ZL1OV{d;^Y30*OKP)isn6rOLa-~j-7yPgGt^~fRD(l}+ zmVgw3poW6@EQ%elZGcijX-klTVE`!uNX0E_nv!CdOp}(Pq5%X0!ca$nsskqJlGp5x zB1Hs(Y;}V$id2*_h)Z3nQ9$A_&inrNwJB*Sh{)#m>&x%vEq6QTf6qDh1*-JotbIPsxVf$nQx)lK&)lEBlK+Cxb}lXTfaY+ zQ5J2RJCK5FN4|H*zF*Q_HZC>{6fPqD`t_^(_~V@@+9ja*0jl=sR<4afhMm{#<`Z6x97&j#D_4I4%{oKB@qcZ_{` zT*9=K4IT7JH=-ae{_tu4xsC~)$jtGZ5AS~Kt!_!{_D-AgWPR<-E{3SX{bPh3KHm2D zO+$tZaWpKueM7!Lzxn>#U(iu<1c@G_m!s(Oi)~%z3CO`KT@x1iX!qM}-^Z0u= zyf?YC0f^on8~alARK0BYbp*q^X3ZLD&7BE@BH}x(UwZ6|u_8rIPFE0|WU~NoyjAvU}a=c_nwfSZ|#2N+*ox zO8YGLkNL@I(=#&#e(+l}A71bR%eaKOKSYU1kB9#p*L~c5n+lhFd16_W&%D0#$A=ER zvFE18S3Ka}_l-Mw|^DNhW+vmKvAo073 zWtw+(N0EoO42XMh>pQwrX~Q#fRa97lpFJ}bN;l6xx!3-z8LI?4$B`ZPSx$Xreeu(; z57@u`zW2Lj5ufy=7>+FXcmL_dM$O-UO6hE})f6{Gty#Nv&vQ3LKW|!ivg7Tl727|5 zd+q-GjXiIkl$P+Pfg8yp##=dQt!LAw`Qo2%wsej9>MwpqsQuy7@$0`B`21|usZrWz z(}(@@`%|f@|EV8%{bQD>zH_^KPcFFc^^&Jo`J-*mEnL;*>&J}m(+9M3zx-=G6EZW3a z2JW|Z@{W~R?ioJU{CTA}Epj&|iOAj8lWB{G643~;yW=gkwL=g0G|WMzRrjMs6=CK| zI=6L@)c1$aQVOoQ>wU$6ShDJ71N&WFC&sq=uZOQ)fw}?%yIo9-Q7eA^VD1yEIuEp% zf7nFQP7JI1vO{1EN!zd|E<{rV4pw(2?gP*3Q;qdPPbb;?3WR})+>KXqg$`y6Ir>^` z&5_mKw4zykNydMdW&3K@N6OPq>T>5MWclRY}C9|}a*HqVldRv4cu$@UO+H+e( zFN%L4dClXE$S$7Qne4cCQLh67?hVm#KwwBAmP3sd3$AhX?{tf8^Cnx*yVhp}k3Z+; z1Vm;IB=_2PEGdtKpBb?;N1hOS*FN#eVcnP5y6t@Hx25bx@ghn+_h^@X<%Zw{Ej;y~*42j+OtK_w|bZ z-SBtK=P#H)UlVn7$&g!re(ve7wOg-qbtrkX?&l#9-fh3^cyPgj{(Yk@%c5QjL|iBE z@0gmu>b4R%$o9gSZz+@#LVvb)lL%Xai6 zCm&OIr0cwj`SgbM#PN5fx$aObYQ3(8p_#;RE5B?1YDb_Qf&X6!(1%a-FB*7>0HIMV zuI5CanbH45gm08~`tJ>Dp$1>&X&~t9h1ASLiN0_=))R7ngao=1kSGs)C>`wST1XCS zA^#AG(h8EPj8GZpw-Eg}p?Ozrq8PW+f91xf9setbJ=M#0{M+&WJ@&a>e%j@yU4Gj3 zf8Kv{JBPngU$wr^ui5_tZ|~p`pM%3Hz^j&rbJ-tC|J12dxhYeoa03PmxGH%#ANnZ| zG+|vDja+tB@o_Ht5vPTxJ48R?yzugbRRbLU9C}so@muMqvP6?iCgZT0mcu_@uP7fE zO8?}^letNgCUJM(c_)V$Jse{AToFEgFa4AUtjpx(YV_#Q9AXa9b>Ekbmy1ciTrTIw zW5R?9+?X+AIK)lj7>2>pquw0iEL{?w&PD%chwIMJ-_W|2ko<(>A5K4FJ8`&a&KJk} zSq|}@LRH(`(|M96tkZ45C;nm1U!!_Pj|Boji+rq+IqUCt#D;1?KkYcAzx0^WP7~ee zJlC3jJ_BDN`l-&-aY6hlE;%`wqwT4CKSVnsPX6KO57AEL=W3;&(%6>#glG@R|J6i4oqt2y+2n`jQ~PtZ z(jU^_v(ZoIv#W*v&p$cT!v0YF+v&f2_7m{|Im8S+kF#S)vw`BcDCd4x25Yv!DY{M4mT=t*l{Si{r{qsTiJ=q+LiC(*s zv|}ADM^lJ~bDcNQe*@^JE5-Qj$;F0Tdi{@(9)EOxA&925hp#GbWTgGO9f5WP{!j=s z`e<}bE8Wh%H&Cwd#GBtk^V|2p;~Y5r8lIv2mT{#RGzx5K0EhOiH3D=^H=TFre_SFj zQ=)y*K0|%eH@?vSP7JzCiS|YN?EKWyCl)SQ_}Iw{^%0gYUCgamwvMili$+feh98#((|T1Db|tg{qxU#E-Buv$Fm)O9`knjgZ`~t z_9TB#w6%;}%JXpfL%c&SH8u6JK9g$QKYg}D@AuIAtCy1RP}^<NcrdO^cfZ%!`9Dx!~1`3&9lMqXREaBaQs93)9c~= zj~qFY3-uo#AJ5~0rDq&sx^W2Y#y_(_Y*hZaQ)_uXSO4_hW@u}T|E1{vg|%x=hVxJR z5AlDt{*#iDxQoUAKg!l#(f-$6;r?O&Pww~a<(ktz9PfMgjO4>L-(H&D;S=KCa{JzW zn@{xme$+z@T&}TPV;W$_35$p?)t`{Sba_wMX;l_0UF-!}|~AhsuQSTOZSNn$A`~lporM_Zhl& zuKFQbTJOW_wysalLtE=|p>|qd52wF%-uawgton$D%3Vkb^&ff;bgq5qc~SU2RR8zv zfzuV;c&WYs3TVB?x&karl1N0jx!RF`gR_7zOye1Se{%-{ZhdzM{H~n}tpFaE2OPo9Z$A`l=;&P7t841@PH=i(Dyax7o+7&jdy3enRbz68A&e;l3*r3IncGJWgu9 z#_96?i2~*C=ga>EG|nMKpihbNg{Y+_B|yFqdmTPGKqsHC--R^N{t$@*KmB>y4LHi- z%TlCIm+jhAmLl61ecj9VHHge?db=lHOUTo{9Qgy0e_vC68hV@8)SrMv@MT_w{0+kr zH351wpnPa_7`$_v(yDL512WlE{sz80;;5qhIPjGZ9H>0By+wt|7sDe*<&r_W1^8Cs zTMH>yj>`O!D^+q%8)H*ZTGQ<9g<6srq$IR*JC(L>`T(TqA@RgjgFKPM%6 z=8Ork(r8AfSLSCaa|`m-Dbe%Qy67>fk=>FJB1WB;nLD4!Q|9Mnt9AOjTQq`Z^K~iF zMTPlEI+aG9r_{yf<){h^bOqV^SXDt@l2Vr!S3ESD;S18U;^Dtqe0X$fa+X@BD$LR9 zL2v4~f_z;;u9}&Zlb=;EkC7MX^vUtz1yEY6&d&lz^J&e-WBu4B(o>t6n1~|!9Q}Mo zKVJ(b=jG%N9~RBzEAwzNB}8^yV>i0EaRqsK1^Kb)U0+y`tBXF3Ut*kuexey|VS!4m z(-jnsEmUc8^lFv9s1R)pMov*?ZjNe_dj5=phtz;Pam1(*nTZKQ6B1Oa?4eoN(M+x~ ze@>Bd4qrSqIX(n*h(=s$gOlcKlv90SkxoA`Kf9n2-Sfg#r&bje0)o`!LiJyZFrMnH z^unCtoLu!BwJz-9_!1PP=ucJ`t8Oi|8Q73Do8I$N2mQ)9^D zn_FyIIXox4ulUon#5c2o!P07hoRaqa8~DOlLN;{M)p|W-8k|6m=cFt3nz8egh3Zg4 z8oged6d&Its&UPt8V6C0pFU+`{4mMT#P}uw2(=ZR3hLC!@hw_RP1dND+8L0e-_Z6j z$(e0WSI^0VRLap>a(t*wKz0U0@nOKECgq6&}kQVh% ztL$&8(gs;!L|+V|+UP-7q}3J_LXM|SzZg9*6SGpHACamh*&~%hhs9=QXAX}|$W}{Z zGe;>$#3o3yvqngiLq{k_DHl+gZw+x$7&(`V*rC~pBNJ646JwptA^O&BZdx_0?XJDS)&qxY-U!hR6Q&!RyiyoK{9OEi0o0ruO4I* zl*6)zN!7z+M-0y#6^om{Lt`_Qqg1h3iKDU-lnKg2_3-mS_Dm@WQ;W8QsD(13KxlGk zbah)#@a^Znf|bP>1EEpm(9j623HaZBAY zx7?lX&TuQ-8n@P6;x@RA?oxM|+vH~59(S$V=dN?3K_-$&v6GwA%^7BeS!33kOUwqd z(af4X<{GotTx<53>&$+0y*XeGnu$ed5n03*BEw%k1cZ34wN{_C&g!?;TLab_*2~ti zJ~qITYEd;)Evc4Or&lYgwbh2|(rQz+r`lWXtM*q1s)2>(>{`3QUTQblJ$A3%XZPC!cH$5@7>C3mbEG>I4z0uB zD0P?|9*5WAbNC$r2XTs=j8o#2In$jAr`Bn3mO4#NkJIb)IsMLnlek1K#wBsdTUL zpdATXlA$dH+R~ymLzS`0Q&m${Ta|9juqvz?tJYd#HCT<-QfryjWM!=$YmJpkP&%8z zDp(DxWlLBCYh+8=GSmgT-ILjbQwbk|2LR*YYZqwL|Hr7^atG5a5 zF?P9KV>jAad#%0RE_B2=mTza4pwe~t*CxYNt1g(g1NO`@;0Owk0hS5>R>qZSQpxROI2m%SJ9a5l?4*k$L zOLz?_bF$6IP%WX@P(7jOP)(uuP+g%2QEh3oLDT{Td5sbCwuf4t3|?zWc$-5lP7SX) zb?$nqJDx^diAiV@n*gw{Q8#3e0xd@0gi-flv-0_(uSHq^l~P|l^`S_6*Dz-=w; zLC`4zk23J51#c$sHys6tJG9;K2)v3F-;)M-mo0p~+;bH3dv!=!C{x3cdD2 zqb1N~1GLx&{biuJTIj47+A4ycDxjer=q7n_1Hu} znaZ35GRDFJ(~+*_XBR&|+CeuF^&sy+Z|Hc7Fyf3U#w4}KVD-|0mBJD%Cl$g95o{R) zYbF6sGFY^9*fa&Knih7=0NhGp+f1--9@sZ8ESwKG`eEe)uydqRSSf<7V}Pp!_D%+i zmkt}HsMG>w1MFTYET0Ls&jah{J;Qbht3twjxQ zFMPbksK-1SgqBdN5gJ1&EQcI(Y6i4M1M5-3+mBM-g3!?=Ob;Tbvfqri$SSrPpf#mU zehIY--e2`ih@=M*Lnb_?8j}}VyRgX$EHaDDOt|mT%FjI;Z$5Z8ZH+5B(?WK! z^YF7;Es&$L8tj~*2Vv_r&#(YCJ8jP@-Yf&T*vzRl18 literal 0 HcmV?d00001 diff --git a/crates/zed/resources/windows/bin/x64/conpty.dll b/crates/zed/resources/windows/bin/x64/conpty.dll new file mode 100644 index 0000000000000000000000000000000000000000..555d6bf655a7cb0427b630f1052bb873c837a152 GIT binary patch literal 98304 zcmeFadtg-6wfH}I3<-~Tf-)KvVbD>cK}`v2VvNj?M9#oOqKIHchzb!EHH8^S1td60 zVLXm%HIa@4sJ^ zIgkBXd+oK?T6?Xv_o?~eN=KH%;mGD+JnnF;u+{fO`7b!>6Tfu&&tai;IXUz z^?kot@Ycc4Pfh%r*09g>GSAZ*4tQQt*ZrQ)xxT!t?$rBLzRymb$#qi0Ap^X`!!he8{TvsqKI!^Io_&s!-2*d^8s*r}o0ZnvRS$8^ zSNBaBa*_K?hohf*mAX2%lL``xvolGq^Cb$|W$l8gixiitif3myhEQMCbs3H-u8!3i zjy=-Q$_&SgR=Yg#*OuXEB7ODP3`c|Yrq^$iFT+vDn}!QB99{*eot&TH*e3)0Eq`YP zXEz2(`|mIKk@lqTLJVntZil1pta-Dp4qokW?3zOYbvka~dMGUuw_g4~%gW~P4&sGi zOzN7I4v@;%c2g>*OIP{2f!lDcZPhaM$r@i2qr?0K2(@5*sB7eN|7pNSsSve+J zPhVM2OO97*FY}bhpBMRWhok$+pF12@`Mh7s^CQV;=YwaeGB;Q?bTrU##QD{aRr^<2 zPs7scbH18ORD$leUqf4ih+Z1wFuLUssPmfIZRo_swL^b|~`)?&Z$svf;Ht{b10d}J<) z$KwHGbHM1-jhA&JS2wCWb$WPHhSXUoCEan?jLZy2OWQiK#x=dYrLFNO{n{?QeA~i) z#3`^sLW8873KT*mBP59n;!scY5iduLGnm0r5Z^+7hd zjDvb4!+1>(x96HSQ1Skbj9^|XqfF8^bI`qVo$e{tK>+RbhBkdXCQGfQx^YO5K1VOw zA)VN9D0q^->=1Zax=>3F#BxR~<}7Kek7VfOUxo%)eJP!JWryC`CP=KW*M>JMs;v(% zuFQloy&0kZ)}z^#GJfPs4Br;%k9*4HQ+=2Frs~V~*1`^S!+H5lbgX299%=S?dE+;B z>BeUP<5N(gHP=a=mQ7moRBlQ>s*jvbi8~hx!rFo-drQxJfcxO_0S3UJ6Px3%PDh5L z-dA>(*5Z+}(TcN9$#570TQ&vtaCb(qNPdnAAIu1ONt;Jn?7aPz*)k4S@W8tXa?z@K zV#DqCXIk%%O}x(`)4`K793?w_YlY?M(SXNmZhMpAprYVuRmP%0Ksn5Yay+@gfy)TRgjwyzi*vz$ZDgrWam2Jy>ZvLuR%<7%z}V1kx>h; zPy=o>smseX^zCFRSgbSG3XMvyR1%0f{|JzN<9Rb%hVVF5b->s}!r$$L*w=Apju|kF zl~g75xJdmAscQaona^1n;;s<@p!z9IlvYI+dx~XB8I@J4En^$y?oE{2YW`Hpg%=e# zLPvAa9if3-iXB?Bv_sb4C$e^#*O8TO6-etxlk|&JQou^m6G``^l7657n^;ImsQqO~ zBzUZVCf&B+=rZ%j6FXTqx}^M3BuIJlz*oE-#M95^shg*gHr=>W(Vkwi#XRpWdTQM7 zxlcxR*#;p}-%Q_(nKQ0p4)3MGO_9OIbLJl8wlCt#_8aZKP9L0@+~Ey{DPo7}ZGIK> zw}9 z{PB9!^)+u)!LOuX#Q9IM#&RhB2say@W&CDHUWo7o-FQCk`iBf2EXZOFK~Jg890WdC z>0FDIx^?42VK6N_Ll^5te?jS(wKBo?Y3&)|o{Z3Ox-mTN8cCzTUb0hxFhW4+L`cbH zyh6k33~2~5oeMcDE);-U7sp*69h9-J&bEtwERV)MfmH953V>0QR?j>_tB z*QBRc_f3CpzFohnRp^CbY2l^xGPh)B+*SCM!?8~Awe|Eq^nyHrD+;eLoL=;;1XtvZ z_PFx~s%dW1TDDO*0NZx!a1J*QOib4@#%Z1Cy>Zv`sQ!BNq?u%IY15kTkqm~&U>H!b zZ!KbkOq<9ANVm$IF18u@Npg|SJnT5UbK0oKic#(!bAFcjP`i;;O4gAz!jN#e7!$xW`y3ZX=&4=6XLp7{oF<& z)cfWMp&A{~Zq&J&2Q>|)`%J^tcW`C;@tBk#di>ObsKCkb>@&YU^&vc>d0!2ax(>x% zn-56e>4vNSaG6Ew8R)bsnGMW7CdV&?f)Ae=RujgKWd}1*29^#B4q0~4>2)+|ErS54 zm49u<`i@q3gIt##IyN(>NxS=YP!Vj5F8e0lv$VqGL0kR?zwnyT@52x8^J$MaJBm+) zaTs?t<#P3Bb;aLq&9iGe&c2>+U&q*2CzzMkPO+bt+Sd~MdQOG~4btC&=C*8Lo)Gjd zJ9Hs%1&fy*x=60>WrxPgHGkQmd0azPt@AUjlGoZbTy0-jnW_F=k*INylX5pLJ2XkI zS1miVPOdfRZ&h0BJ45H^Xsw?O1w>;#CF8>@9F80~zqV$lv7XN5v?0l>x06mne@Ez~ zmYuhqM&h#Pc$9WZ$k)*tUS&1eOvkLWP-C_|Nc42r~mJ7a_t$=oBq9MPAGK7r*T&nT6{}e zNK}jB`x$D{gBvX!IKeZjOE%dO=y~%+C~NKgBJY?7wW91!gEx5(S0A@1eb+0Q^tkJH zpQ|>`NVb76DNM2?s;@3-E7@7H$y|`Ay`}B8Ty0I8P(+i|pPbi;37wKhqTjox`Y!WL zQMw`;u$G@0KseFC&W$3Jd`|xXs*f8DLSiche)|aN= zRtp4zRZupeP?&*gI&dQebaSGZ&c|u#zr(oI-S<# zKs4LxvN35InSzIPLAUC!lceMO;42UvqYC?@qmK1wnKjWh_W`TlILe>Z?a%tqpB2-y zV*aeX{;ZFxOFq)G=7Lo!x*%MZ5$f+R``apV&o7f#ryCoS8j0T+7I)qIsVEW&-QaGX zY9eRVL~dsJ_mZf;t^@J7G-SL{V_cL=6?)W{jTG9Yx9rxMFL%mlPc4`b9hFrRU09%( zzY&}vOVcartIEy@PNpZx^-WdT+;N%K`X&(dsQ%QHZ$&m*uav+481J6H97o|Vg9VwW4VWCmnK8fWJ6Wy8NnL`cS_z?mCnt~NYIba$Fq7- zhySn!i)pYpHc=Y%(4o+fxN8tov(s@b7>m1lK65x?2}oiueLEzo9)aW*f#e2(WHyj| zFBnRJp|m#)D`+%Id9Rgb{1P+*uRFx-@e|L+Bh4lH_aSiXWJ60n%uC-JfS z#c#)ls;BSBd@BqrU=l>|izZMHh07&&N&3jlOM_%aIwY5TJ0$6~{V&I-0h~1S#)-Kv zg_9S#PvWHWg>T1+s^|Z5d>VnJF*Ye3E7Q|p(bHim{|;DGJ;0(hF9yv*FALR#{gEOV zLA`_LKJ)d+lV}=1P3w)I&QDSZ`aAbY1pRsEw61d22uwo zb-h8kAPtnlbWl$G4p3A*0tyUyXYfjen<;{kd2p}=VxfVZM8|}b5G(4?%QwzDSBHcX zOr-PUwElA+8yGM)3OoDrj#U5ktT)rxSy8NHXMkGC)V@`?>U0zdmVtGmQp`c*yJaQq zyUI7+QZ1DJqL#)bah3qlA(s8jcojK~Z6pITSAiZ`CxU{@DlX=$@N!woMxJO>Z=dLr zi&Ed!QZu1SsH?3{h^cUGZ9b)M+(PD}JuI)@{$Xl)MT#C>C-s%=*2{)#Eq8Mpi24dz z9V}>NE#x(QAJHxvPN}TCu+m^;^C=UeXg#9l$SZf;71>LPYxiM|!zy9^dS5)=+!pFj z=T;rfB7FiYT&n_vkkqCsVwsMTQE1g?%ckv8<7FPVEE&&{I*sEPMwU+^xUe^}e8FTP zkh4~5ULx?`?~y+GjVlZM#(cNmc(>;8^##!^y-rR zB5TK_WbKK%@jJaAt@~{{2 z%Ep$m7rW^=>V?&H)!lFH=7qfgE#*m%{H;yei~Q~1chk|U^dVCfQ)r3&&F-cv)lG|n zQ@s;f?vDI4ZLKQ=4c-2LrJ5i%hqdq5Sz7-10&Q?* zftEk1P#Zk4kmpjKOL^9L)_I=B^E964@H~g-MxGnB{JU2Gpf=c0m;B|zI<&!IxuD_S z{eZd-Hsq>Der65F&(v@j7R?%Wt0yo#SdWCQ42Hbg7nc%kR>3a07|Yd@akqN1OIR5U zc_k%ep2}>qc$0F4D5pr;NZID_@ZP!_vF3wC#ev0Hsj?ra~`6M2XjL(kD1nn|K>MVmM7W{C#NpdTx4wlHCFLj%4#ij)!Po`0pHnJ$!XVBNmLB_{b#veVaRq~c<)Z=Zp3)L; zEPY1S$zs-)>Mi;Zmk|%Es6pAz{g{0p9HN?_=vznxX=R)FUW`iexp`7&l|S6LW{+?WQs?ICFPG)c{ou#A`|-yRcXUH#!l^zWRJV*$v}~*_RHgNZ{V%ajeFFns4SIMmQlUR zon^foZ@<)VmSiFtrOIPSDx06Xc$hEzRhHHw3d?1jw7oSnms8=m_Pr+EluMtS(2sC#y>giI>wh zEoO)MH&uK(omPSv0i0gD)||NOOe$EZTnjQ97(kNR5WWTTcPL`XnPJ^1dvKd%Hk{L> z9vC`Z?iY8x17+|s%dXtQ;#T$2Vtq2s;!zXw*9KR4++z3Xk423u?ndjS*oR6J4_>j| zcysI|OeVNau1pvmPn1ZbVwJ26 zCDLFPWzDxu$s68_e%S1}Pimt8qCG+BVt2M_T~Z3o)r0$>N6I%=8K|7ELVVIwX{gSe zBF0u}i8o?=o!vhJ4zbg3Y=*Zv3~w$KPSc|z{wB%FaN zkETk|mNR|x=z{n>tt zMD<$&#_PT*Cwx&4tV^JN5z{gE5@ z39h(VwC4%&P1HoL-DSK`6Pdp~P}EivS=5#CFYD3U4}D|#7mo93YqE3enGJd3m=3pR z>fsMFLT@M~+4l`fa=WN`o9iQmsvTudsV^%G4$#AG#Jxw-j!_qt+du@xW+gH zVhghz!yW$iPlgAiUf;5J7=@*JlNQv z=q$74L!`+*G`A~s<4(^Cia~*O{fJ32x>`jVm@nYzG&XjhL1l4QKOE71BgFjNA3P=Q zI`D=J&hv&DcTFcnTtYb99mdwU>q(MIKB|e3*VwGkR39#N1p5n${6$+7hSHEznTDBN z!I5ZwKLZE}9%zXN3qZ>N84pdyBfd?L;xBqV^e);aYZ^5o;H0`Gp1_I1hw8dk{ejPT z1q?{nm%ky z`0G;7W@BI6HH#D=s}nBW#cO|LNiHmk=`3RqKub_z1}GDA)bl?))41x!Bc4WewVc7C zY*--+ti0qsE3W|!T$gQ-3E;y&Y!6TC*8V_t8<1@=yX3cb!fbgZf1>|bQ7p`!NdbHI zD76DS{;RU~yP6>j_rzuWgx{TUpTit!{iXE$n=j+>?ka{-NU%V8*DY`7wVRE5V$g};{y{~{HB&RrZ?0S~t<+EON_5EvR9cRje<;iwQg z)0%%v&es2u_Wr;R4SA#yxIP71tpZ7?AIqYVN6BXD(g-egM|iQv;|S)oKE{1)RO zjBqo5q_7HGKor-jUetwp?~g7^@UUYOQwA0WOIn}l7q;{!(F%l0eIq6WB7#y3%lj^(-Fkeg}!J@V`}5A zv55{Drx*1Ei|`&lEl!)F=kPlU2R$Bi4HmwZFM||+UdBlef07Y;2d<~a$r>butR#|l zz9vXg{Cjv>2OlQRrxlDjOdq{7dW=tEGo^hd&GY8R%nYLkURRV>;>VN_lz?=HxmOv# zU}8VprDnYzds?}$o2|#w^lO=f`n6EK-v0J*ePEYAqNz!9t(BpudkyrGsNLdHyQov~ zCt-cot7rSZdQ4tf{A$YO)&dRX!Cl4Due#)4#<*PKwIV)#KMX{!&bM83+M03R`iQSU zdpvW6?*tMU{@X=f=@B0*tJ_7Hvf8;UzwtP6pE)iH${N(j&wDS#QOVcYR7olR;_n7HRSZ)uhC4^lndQDv3X- zbN$j`3pT?F0`y0R}^6N z)Yka8?Zz_Rq_@AHt!I6%m%kY-(;*aH4|jNVqde|9e${i&X{DNxit!3cDSw={TERayTa&S1Yhg|JRB_DQ1_x?R4^ zkiB|YV{YhuJvuR47o$-sN3uJKc)0ZVUH9lV-$r5G-74-=!WW=htyxp^=+b<%3w2pH zmePT^)_jl|5gzLZ4NjU-{!*#TC^_NVYI&{Y=V*g3s%3RGDW8SZ#W?$O^v-swh~r(> zjdoJ!BvTtzyi=CMJ7w*%CWDSF)9w<+v@vVC9!G>M%}uR8nid0=9`)nTht}vh5wS?4 zFLZ4jtbTv+a?l$(D(<=y^jh;JRw2ac(#t=dhtUA{2{A{T%~@-iL#GFfXO&K4-un-0 z7HuTk{G;Qp8ddycfQY-wxZhYcEY8*g3#Gkn#L1GkfT}$sBW%A1DGcgJQo{TsCHM_RP~!t-<-u{j73T(wt(YHm1LnNV z)DSfUfMo|F3z-BbYQw$I{6q+Yj(`Q4mU-hHuunHw3xc?}QG(x@D8RJ^M|hj}hw}H2 z$3irqWD`6tdPRQhXOBuz;w70rE90)aaDEwi)9Frmm)6Yo7{^l(4GSB54taiF$J@gP zv$ZJtt|Q_c!wd6aG;d3H(VE*NZ$rKwZX`;zlF=L6x{~r1>mTGP3lXwx1+m^hU8DWRCT$I-yXX+u$UE8otQ~>!=Yl6z z8I}1}WxnhMxyJb1GGA`6f^`zbp9^)9{p5I?wq_zep97!PMHjyad~vPrzEir1-QB~fE+M`v0^C#sCs(9~9B$VJOB0dA8&=+J%XsK@3=Rhehxx3zF2* zy&fZvr7x&>?}SA+`iRGiDQ|7t5X9@7tEr@D4?1y`5b37@<4=XWFNxPjo@%4Kcg2!6 z-S|y0?~H)}U1(Hh1Ij4#UIwSiFag;uwj^WQS`pXbMY$PTRhO(o)(IU56%?VE$J=>T ztQ*l%DiR_dP+tZ+XEhaaDi#`dRc(htFn%d*pb9HYT@_mDEQ{xhwh)NSen5|eR?C|? zwhgsZnA`ao2xPOnbaXY^Xm$byqgjPFPots-4BY;XKyP9cro=&8;Ie z*D|77?bcF33}xQ=$#|=qBbq7(s%!<88C8$oE2CvlHgh@5P=I}s}34Rg|1u=01w`jAB; zPG_MOw^A>9JLNt>S4TrR1`j46=L5AWs zGM~Y=y1&^Ynsi3!1f3C80I`G*&7ihr8w7IMr4qf<(?llouVQlU_2->3S-G2JI|{6+ zSXk3`VNI2fP=o64I`M5ve_PsMdNsyAB~Js!Q^HM3cGpLKBjmc`s6|RO)t*4{Hr}Pw`IzGv#h!u#t?q}RB zK1GD4#inKnn<5@leqd8eU{gzAQwW0ZVpCsYHWJasY`>4$uG?(tDq&OO?X(0#StuC{ z=N$WWUp95Qm0|2*<;GgCr`fL$V^bqYEn_UT=pZPf^rT18<3gv=&VAenXanP}ubC>^ z3RT4vZO4jRC#+YwOfKEjitJkk|iTFf>9On5&P1vE4eP)AY`W8KhtNy(K>+N*I_8;r^^JvCv&PJ#9AtNzxuzK(c7su{;+;dWvd1f1TD$TqX#ULR|hQZ2;aFR6$ z%|2?9HNc&gIh0OiNvD|dD+S198w6I_G1DPdJ*9k$_ z0jWGXsD<}R1vt$gIm*bu{hEPF+_GP5K2Dgqn1|n1eGFdQ?c|LpvtX#$LfdW9Ia$^y zm^QYK5rn21-$rl)rap5#?ZzHTB$)T{sO&2Of9oq^7@pnhm9iCIDzB{gYIjpbrC^u= z)mqrf!;SNEuQe%HTbN)`c7a?eDR4?6b=q{clG%7>d)aAp!NPN%jprKjg5RAoylEa>FNpTp`ct ze=@a``zHzT2}rpA!IJ&0GQrit#UCj~Mw#JFLe9J3qoGp-_GwIcu}qljamQ}%%;K$7>o;nt}zEibgn@NQBY+8^^y( z`FBmEa$bF;azQ;SuW3XAcvNkJD~San254|4F+jxr5XC@1wQjsAWSFOB@1hPNsNiLK z%SZAClD*z~GfKD^kMwi)@Zllt=nQ|(>>;J^6ekx zEq^j?+?MeudVQ^!F3PtOJI*K-%hW6ty$klbUpIzH`={&1mn?Q!%udQTn+P9?yIxx_ z@FWaLFY+WS;a37gxr8_7>g8XAIwbhbE2bpOJW??j#t^l^jh?3B1FT8m%paW;3~k_^ zXsW+{BpQmv(&1u-q7MQWB&3E`q66+Dr&%xhM$|b;qR}EJt*W3Nk{(c%+~Zj#*U0_W zHR?P_w4cD?mON|iwAEbAu_p-b8wt@@n0Bb1B}}D<+Ep8sQaLQMO$s76HebdSVaTIQ zvO7JyxP!Z!*Ayn!kGaC6U{H%a1yhodLD9LMeaJ+@#i-8d`hnf`Y9bH*RTY_qCKNsM z6?U>gf#T0EuGenK)94>mj$pxUJIf=n4-VgNd?VG{S?2BHv4)u=EE* z4maE71{GFbN2Vt#o|FJB*ePcM2)-|?kh z{<(I0d$x4zKOnFfmkH$^%30vYtT9F5jyVBEwzF&8k?(NK zE5bX%QqkWd4wkT)i#cuhf@7t}_!AQwg78B_8(Yj7mrK{F-GymdMA{&#q~Y2BlAk?s z*Pq(RV05u%bdMgL@~^n-b*MQAQz^3B}o>4LDCtOabnDr2^TWUzeHGLmK z#kZE>aKw(~aT$-Y&(O6}z0cx(-%+s!;_{K-Zy+uPY{7KZcMZcxv3n)!;s5D)Yyr_*%;%YmeQ}bc)ZrfpUWHQ6IZYubK^&L=zXfKj0>&qtL5QQrXAy@8 zd2=8xGU#v$#wW<|+!Ptsm}yja{1K>7TmBfdVtzPTY|YqgYjTDEOss|dM(_6sBVjGf zVG{FM_Al}(52EEOATsM-oW!YC+rFjMKOCXe1$XtK{kmst>e9nY967o+ehHx0^q_-q zv_kk`Je0jUBbGV3jb4_@q!k?9J`E*j~je87{}~k7oj}B%5sa z*%O6qM zjz&m|+;9C)H6=a6Vk0S8veWGTjg)=(X)$o0tm^p%_rflQ#j?4$>o1CUF4QCEOv7iJ z7%A;1E5z@ZWE$zCrQLp=r1j+++D|Sfp%4DtG{;~(XD&U38<7>q0>?_zTPeTi^PAF` zh)-gL?~gv*BTX#ZyHDZL`5y5L)|y@fmu~!1V&IJR?Pk95nuO>!NJ#1&R^xSQZEz1v zq+Sf>hPr|O+|G@JDIqIPhn>_!ZrmP-Mw_S%`6Gc$GLT4#CJ`5ngfew8uLxyot2Xu` zWAuPl-HrHZJi{?ch+OZiUQtY87~KkzR}v}f$nUIPHI6H_+=tYven4baq4`7R8XjWe zDx$dnEeoNA2S!i#7GWL4&%aA$k=fG(1RF5jsMe)^ULj;^;O(?8FbrXz7|=;O zw42{%!$R|3DppOCt~|8~Y?^4DB?h^$wC^^ryzFY$R32dg5N!sl?%}hLp3qiU#|}JG7OH;hLWQIg=(^ zioro3T5->Nf{K0NJq5n-`y5q}8ypet$qybA?kT{Ze;e5F8!{aZ#_~D3<6NFa6jXdf zeMa8!k_^Xe-N-(58jZCg3u!UC%NUedmUkyhyxYpCBFv5X=6x8MD;9Da4|xKSQy0j~ zD{zdH;j_O0T4<8=GVgMEUrlD|6yamio}j;)h2}Gtd9DcnYy}PE6RU@}3S2d!lV5*$rR`}-ul_$E25+zu+tIf{p z?F~F|+k<(ju3nqJE4yC1txan+b0gJ*q+@!dWu>%WRC~F7fLqqlYvsj~I^so?C$>{@ z9px_FuU-h_37u*!NS;-t6-6`5`}fFbZZp5ZDBJxflB7R)NdeyqNm9|aO*B9aLsR7O zH04^ckU7kMQPSv%;rp#2WGX7aiD|!>-AP;ueZ&Xgga)Nz$td?$^9iq43bXSYV#?m@)&ncB$(hJ$uyk5zj8JbY->l2tjuZ zGZ#f$7@(M&7<@J58=0=CeHg{rQH4J2H(nG~*KcfaH@+? zO{?c{&4iD~==%m&csVJ?FU%pPqPNQSSuGl3; zvu8Uk8!rUt8!8#$#?Yi7t`OpMo5Li8N@7E?0VJ`sjv7QNzAPwuawn#^{{G0h-q9cV z(3hm1^yw4;40+jWj-AI#Z!@vOAT4;6ZX6}TJb084aR0dLcv1rlt8p=l8{Vq}YcM)x zsQ0%)#24dIPD?O}Zwy3k%a6N0mOD|R6wi?NMo0J9Bq5*AT}pLPcZu^BoFykG$*#0y zH!{*E%8Iz;ee2{THJ?`19=xi)WoP4LiA-*+;rt%uXQ)S>jAoLzf0*xE+YfBYi!o|p zR}T^`b3okn<)dmmj^|t$b(cD)fq}^K5nr#2BBerL$5kr=^WytW!uPWg>G1jN)7>^1 z9RSuQLDl_LNS11{Wha%16+*kWO#uRUk}#U1X#kLY78WpAYcA*mOd1?_%knGS>l6v* zH{|Pdra=RWLdRL~Fwm+4ms43 zH$CysR${)CjJqD-DP_U?y`IcbQtjgUP+dyk`kY7wSkLWid_VC?{J~X9s=XWOoba1>i3_)P52WYqbvCO|d<@%2Q_cbL8E>SYvR7wb zf48txF@e%3!ay85NSRuP2ohe^$t#3~I_5-c9-!dK5W%K)Zlj4^Ce~E1JMQ`|G%WhF zwu0~|fROqsq#kX#n6Ej%Q9{F03+|{mtKk4zec5nXad%oP?oX{|i+c1O=sFmZW#wJn z&Uq({>;hKa`yY|oQ8N-N?=RE~vCd7%o~b$6!_}Q%Acw=^{CO2xZzG@xv6v8zLYB?M2*DAG6LSFC>3L@&y;O$tNR8)Z`H4@`vu zeLqo*J=h-}7Hs3c;;OKLj?6wbd5hy^w0irdS`M)2_G#5$Yt6EaK_)UQBYUj9GP%)G z?a_sgQkNQC7Ftk&U=Jde(QRWw$mlly*c#dLXSL;{7*46HI8eSPSmHOvaMl3_9>iTQ zJY)ekF##M>I_|2rfxDMfe`Kg9hDbU+kwbxh3QuY1Bs|RvI;&}#O$VcptCX~cUh4Zo z>ak!KWZ}e^$3VOVp z?<$^J3p~`+C1g)QrPQJO{N)=LUKDal)eCJB=+WeC0jfG%+(l5qR+g^DF2+y6sDd+5 z$H(#{?rYgR?vIR(EzpH4$lp>`+Y@qJtlnZSIIcc&UhAS9j0>J8tmADzk`3JUyL@bH zPbhZ0kStpDo5Cg=8Nkl%qOoo^HN5i1=VFP2%+}Lb$}jXJPvQacN^qrxR`{x(APPkj zRA$aaMy5s=ej9Z@_yR>EnQA$+Wqt2tD;`(H7yFF@-56lJ;5W+Rt|>Ab815VqD2%uz zFaJhOt)~e|Dsdq~qI*#%bW+|izaNpbl3!9RbwCLaTc3zJCkhl8F(VoK*JrRxNo2V> z<3f4K9`xkFxH7w~cn5-4p}pu0ZUP*PZ&;z}}hdp0Yw4L)52W5%&bOM_l2OV9{ z9h*=F_~ZCiWD^SJUiLe-aVW?)77=o=B7{{R`GG&`kNdUiO;n% zE$0K3Zd9xlB^hFa?q9;lB6MH#=X9bQ&y&#oFGvkUDprs`cCk&^g!Qr$zqqwrsC|G% z)+O8vtsrNepKrBjy8Kz8Y1zF1O@B;9>>Eg|f23MyS~j~wHeOk8$(}1h^wriKDl+fS z>rJpahOo#qr9r5Wtp{?YFU$6BXS}r28@ja98`oB8r+Xhjqj0!vEycD?5uW=gMh~^E zxxPxf#Tnm;d9zB~-VLl&5m$(JrE3VqL~B->-~2>6zN86@NRu#{TCvkMsd2}K%8O|% z_AKkf7*;N}AVM3$%8DE!Rpd}9b62fWXNqmEQ8_NoOPRviugm}?{AWqe1>yI8{AYgr zXWH^slo+$PT$)hrr3sJMpjuXuIZDj|%nE@|TmE-`15q!+Zpum>-KWlc+i8KJMF=;g zqG%6USST}CFKnA7XNE6{}_Rh zKn;Z`n9mfLm4Y^1*pAZf*frQFUK3l>L*iS5g9YiT70JeatJsuM45lTP#gyK1JU;MGO?N7ev=dyH$a~j>Cy!{f=P;K%P*!!T0X(T+Tb7#-+Uq*wZSlH zo2cymTk6KX;1s`cIr}_tTb+{_ea@sy4E#{b0pi1P+QsH%SsgL(?FvH9??Q$E>VG*$d>N2{Ky&DTqX6#kdvEkPOPuc)DSfBTT=c}#S)q3izF z4Lw1B%^xUG75Q6;wOdV_3(x{n*et}Cspb1}wZY?ZExDbL>9T&Z_5jTM?VdIk93$DCgvjoS6>tP@-1XS1Ua9@{ z^3C(J9}@@UPCedU&HfE{QvL&3>p(TJWVvX`|7YlUH2cJ6Y+CY?kXF$XHI-`HhurjP zqVy_F{ujuQ&Nx?IrptdX;kYQG#ky?Emp{beF4-AS-l;8@kTOvarj>l8hri5Oc(xw7 zf}MuG0+|o8$iNqIP+NmoaR}M*6BBO|4OzVj`MCiqQ)25mPby(|b*_tfzqD>04Pl-N~4GqO2`lfP#!Oq|a z5rNZGB+MM)v(wDk{Q!j3?X)LAu_HEIGL)(etg=N8&tf@X1coP>e&;${nB0A1z{Oi8s#tDkJqVuu#yg@ugbcr}X5?DO__u$egg?$9ZD;~lN|{LxP*NW*?~BG5XH7}-~-t$43Nz?5m(MQtu&-_;C zNA}MUldg`&eM+{^aHP|V$1;vS?cQhcMuXBnwIP|SR?KeGjeFH*ngIy6LStMuD$FR; z{JmJw#k=hG8C$HKG`IHMN%J;!T03dPBVQVK<^SfeSaf*PEJ?#_G>efV6dmPhwLE-h zD0%}GGsPqnJr8v1k!p6#;3Hmz4)>(!aHJ(d(f9tA42JU!`*q(?^vza=e;iisAJ*%w z_Uq%Sn?DL27LWcPWMQu``)2x?t|ws4Ri&q%ME(##d(LdTATVAp`LxIP$VQo~ekIHk zL~p2;(_Yn1nNarMg?sLnCeCQytwP%07-Ds!vGk-v+=kFM;UY4%&<_i!FgAlm*4ViS zQh$M4N9c$_>JoqPT|w%Y{xZ{GN*Fb4WzULr*-`*v!h7;1=hT$NgdEajlnNg?jS)|Qe*bPFA4V&NZ)SktNjPwS?sG+PsY3JvxNXv7C z^w#STC*~idH;H&`)jog7!Y_5Fogef!mThL;M(XDS(=#i4V2Y{ruG=ExFgmyrERoI9tMtS}bC| z}0+)6R2T{-8m0PcCw-IobQSZ}k+b3(=TGmo{G^c}`mDV%svN;;- zGSb9NTPJ;6LLDnb+Rj1RY746P5QHd`6#h4Y)>gGrDsvI!zQsIq6n(ZMdQoPShT>J) z1mtJw6XM{F+@cf;4LoejflxM6M5mBq{Ik)$yB zdjLx-v%4DtS9$ zjl0jS=<%Gjs!{Be_<%>0Xd3#{thxZ{v4Clr&Yn zgcEEG7^qds$SdbgHBsLBmW^fSy$bebo-|Y_=L7k*{)8W>^P?INL7JQ;A}m0eM$|Op z_}2;0#GxvKBTyt3ns;IP9K4!K+S)pVW zM4iJPkaAgSJ|G4p5?m&Day}3dL?F1V1d39@;3pgrLlf_jp4Akp#Q{~{__S5E(ZMbc z(2imtbK-svubC4vRS`iAy`@z`UZYs2S($s(JW5|>+Q{QVOIJUYc7`qi0o{WdlITs@ z$Gzb9Z@30+{4!3jDaQ!A#}GJ$f?T$Ic9{om7Py7Uz$pr4wKYJf(upNGmgifGT$Jhd z#gjH4%hDr@5cP}b;b!wKi(8_an=5XSe!>n|d`Z9mc#1r;!IMs>f0YCH*HKq;%B%GS zyZ5_*&m0aca5sOa`be8LI4ikxtFy8IKpgce{qeSOO&6eQvO{INz;GonNW?u z=_ATN;1d`6YlN1BhE{P5Qq)-{*ocgSx)}0=J)i@ozx^gre?lilIP}SRipnLYkkFZA zE@&p|?1|G<Wq%j$xk*3D6}7$VB1No{zK*H z6a>jw3{Sb_9{Z_uY@(`7zPjfxyCgSslrgZ3dO2A`^_Q4yyWa^Y0P||Vj? z^jD>~7Mt6T%weyP6DOjs*0*V2aoFN(d(#)=kJH0)Ua>CAwLXp)-s@#uS7q(*Y7>f5 zf{G9>>^x{fBHUJFh}s3)g>QK={ouIR$b)|waZ%!Tc#8*0%39cO9LtyI%m=O$EbP<&D%nrT1+ifFm2p(~8;jd7G<#wvmFZD|<9GurR*pFpLkC_eo8G z9Sk!H#HlPukEgVRhma06#s-2ISz(#?^W=|C6)k~4{_mqD+%ST1IOmAtFg|LJ!$rN? zDCNnjKYeH{{E7bz-IjS_9c#r19Xv)xr~^X~BP3i!eR>dibkNVlH~dUedmgvz<@*+* z5h>qj_77OD$mCC-&~Qr-u-W{ln$9JlN6ksEOh?pUIUW(?rR{frSthylJ;`LMcQOG6 zvOltvX_Q7B3@e{PLwlt9Pw~@cXt#5$1jNT8LJUTHW~3e-{-T)9EqXwUv}fV}4NbiM zEi~aq>y=q}^IWXZR#Cu;Q5X6)R$Yit{61uG?QqCo z)Dg+RYuIEkma2WJYLiML+)&VD4}B>s->Vq+jH6}TH_LD8Pw9the5Obsv+|kF^=xAK zxC%_5)9gOLMRdk$POu|RZlATIm|X!MsGXFvJ1wze9qMO^ovIZhI;&M=oLPLe_z)qh zje7XDf*g?<{iuu6oi0MGRlkMn?G>NcCu%e^A5Ohe<}+UfRXI_9Bqw`;d*%GZd>{F> zQU!=SovpyEW{-ZGR`s$!Yih2ZHD$P-wOucpS}>nure@~rMQT^%VD@O~k(rFscq9sZ zoc#lcUf)`5pQ3Xe{JV5`oBG}-UQs+0OGe6qa0;uF>fw^IO(b=AN!$?}C3GKkJ!cnr zT#4{0pOBcNihM#5EGXN|`z6H?p*a_p{+k9BaEbW= zwcV%XXUJNUX{_|IV)HycMu5@4x$rS+7gca#q3kr<$5^ja)K;lW*wOrH=&N-(v{f(X z*5i`!$}V0%gQ6Dxwe>JJFZ7ekf`0PD)I}tO#>QM*-<9uGs`K42xE>HtibXMJB{`A( zF)VchbWGMn<}22Kh{L{*w}JZq4eHeFVj?FOdriv)b+@0WOdq02H;Cl4<|Oe};+%mW z>Q7ngXfc2Fs59DdauK1nTX2g`wQzVKP17?=5KbGj&yz(D6JC9a7Vu%+aQ`ki1>){9&G8$8!|F z{8|1GwHbvE`6=C2oFcvgd(k56lTwa=J7BIyw^Pg`87m~^r9%n%_8(24P7b(xRnb0f zDgPXqTsAvKp~M}f7NcD$2m6bZ5%a+dkT&bEM!hpE)yPcpu2_WfIk`>|1=}JgvgEbh zP~TbQGP}wqtI9MgUZhi>SJj8s1Jxf^9~xdC5Eq^!;Ej>GqR#i2l|V_rYo$7;A6m|> zk1Q&bSp=Rfk=#8QE15_ZWGAY>?XLW=Sujo=|EcO=#nYbKeL6V>jmbZSPoZe*P-^3l z99^Ig`#(eLJ{t#%S!Xhnd!u#!{|c?R9MnjiZdI9v*7^S%v|b`;eFIBAD7lLn3zUbU zb-tk0>{kRO?C+G1tb;fp*lhlnltKfl6?#mfouh6*x$->PC|?*p47-AV-yAt_P%W8Q zwhO*l`r%i$`GQvr3a~2pAWT8c35;$sC(wz*w-)5+#=QImSpj3T{#3$#&LI)n@_z|% z;cs&Id|?Dl%eIxneEU&J_i6-KzA#@f`PK}QVKvA0A-o^m4i*kms~(lyu)S!u9T{4#xCh73$WF)`3(Tt7 zl0ms96p9-x9hbN;^Z5SMXTB@H*8J~ywM?1nPdNQ1`6GW;ZVQL0>le?p0RbE!RQ%qzl0d5=>UP4saOg z;xHhPuP=kWIQHmac#25Pf~RWwW8vsV3rF({O!q7cN6aRLBN!DYm`LoL`2|vQi}_5h z!clT^bCa--Tmq}H*yJcoUTIwNh%9J?jPRqZDAEl`O4-PQ=EPDYqu$5rxv;5E`!;_G zB!gI9V_m`w90YgL`Dy^)g;K+ZfoWo{IbbAjqt2NRNn(zc&*z{9k1ya&h4lvWJv?+$ zq4`fG!K0Jm(35oYzwD!-3r-MwnH%OUXdY_KOm{&=3)qqWwz2h2YiQ|y>!hXy$ut3o;JvVk^MzwA(!^*BHtr-t9o4{vg} zcW1}W4EJ~!9upj4bcMS!gD1$hqFmvFqhkZYJ&S?^D+aPk4Q6v7ty3K_D%LfCuMp@Y zPF^eIMZG#ni77rbd@w`1w_ST2*O~aGP@#9D{y;djS9Xlv#Cr2zHZI)eW6E~LM|&k2 zsAz|O$02|CP^NZgH}DyQjrHL@Z(-0p)8{zfAzS~#Jtxd}!hy3p>cfYobE0YZ(AioG zreMN8*0fPHd-0qqZOs+^m{R@tX3>|^gE?|JJ9LR82JuMyI69_hdT=7YX9ue#AMY~N zJC2j-IXiS575E7A4fl)<9w(Qxg88aCUW7P9)t4R0hHg5@CM{+y_DUwJA&QSyrR7DT zYT{(T?dRw0+@Xo%QjV#Y6KV2r;bihYWWvUJd9Q?DVLuQ9o~}!9wEUqj^F=U@y~6AC zg0}n|21GO+sNy+3)r<>;b*mh^#CpRr+_6mQZMZudhf})YE+z29^(4aXeefuOTgUi} zuVNE>3(JK38)eHs?I&v`Hym>JRIXx#HRMHaC-5_pCdMVPAiOj&I&`dT36;@jMR+;l zkT4xjIp)0_B#d#?gy;}UGNx<^eKo_8(&!2BVasNh@*gE^cI*!j(*Vp_5R=jc-@H}C zg9JrxF@Kernr-kezV`3Bt zYAjIql6v{2KL*QUe7%Ia)9bX&J)lemw&932AF5;sUy)zy&mO<4{=oPJCM93nsiJ>6 zFCh)}FrPw<4<6&F^Bd(2Co=o6*_T+KCFR5H#Yd74V#}hIwW^jr@S_a~xA2PW$;LX-i&o`{`b~e|nl_<4=%e04M4y>7&@6J7xPKLfi6G zN1+_?R5J((XSMQFOO)o-sM*iuB%IZdC4-aZtQN8y^qpjRZi*~>{yp!=mE26@-%aK! zyyIsAa&O)dmNd&SJt>-v!FEQ$#Ar5YOoEZ#F^s|*3yukRw0=Pqpw)l7S_;(zvDR1YXc)vETOV$L(3wJDf zSzFVuzASrm$i)$W?DFB0Kp)kVcg^c(hb5Q4#nJsjH+a$%8pZ-yK>%a^k2p&eYr1+_ zB3Ou3`J+Ri`{&Ky9}xSASSf+!R3?)o>r&M?Rh5O%WKGDHR83gt__PImf8Kl;ImTX@ zaghrabW5Zb4n6DZh~_2qwR=CEEQ!~Ye-bQ39#jh^)!}rg10(%Kd-WY(>t#3QYqx(U zxQ)nKCe0jNAGu&o>}mImJ*Zmexh~;Z*yx*w$E|l>JLi>}Ug^BrM zcRdL{&k{+-hYGE}WOZii@Kn5RzIAdHV!i+yw3MqUBD|QQXua&S*%1$v_bfaEUQj08 z7P#y>1c_tC*ApFdvH3Sd1Tfwl;|r_=*Rjx3BfZ&Nz@cPfj`z}ffK{t59J@9_?5KOw zWLbud{x{$#oxJ<3$CUK%|Gn%&!-Z`OWuZr0BK~}Cz-T4g!?I|QC0R)ulNAmpq-xvz z-k*24M3BWFbSK{u-qb)Gfm&aP6@yu|*i)dU9y}1o0H>F;f2zuGeaZ|;o|dGv=?O;m z_9|mYMpc<>E{gDNKSqPNmK*c0NT6`VRBq#;S!!n$D^e0q`-SXbuBAcIi*?2KS`_@2 zo*#7uu=dcuxa-8UOu<|9i1P$$qlyj|zh=LFD*7yM|A#!hbTRWg!1+l5PoXSD1tA<_ zigD6-S!saIx5&o#%Rqwq^l^hMwHPH%(iY}a2ikC>b+n>TIZR>0E3I9ME4WPzS_(g+ zlu|?Tdytpj4_qLw5S4}fv)A4lkEb8FV1?h+nNQ4zRQL`uxeak!967R8>cRL!$c_0> z;^-O$0*ylVV#Q`u3(cFD#|hA^bg_q8_Pez!^XbPThQK<<{L?qMlND66r->8+y-QtL zf8Xh8qLXTbq)@+F((RW-12v#8s1TC+mDtnk!x#977YP`yyG_av%?K7jxC$+m?I#t9 z)Bf*Kwek%|=y{x<&p#r!8$@{5@%=fjr)e#_ecFApUvcOY7iQ)(aRNUH zW^ln_N8Cs01t7PM=0|XWSs!dEwhVfoB`#YH1{khx-V_{Ve*Qax+)=jasdSo^7}~Ac z8zgALnPH!R&nMH(>x?8;mHq*~)j0!&=5G*CE1?s0mPZQlY5V2N^CGVkPLr>+I2Ou^jX7JgAVCLUN!$?j+OL_1p*(8@dF2y-?%r%*b>6c%ZA zU_G|`Jj84zI}iQA&;j1ZK1_DdvWqlT45g&Qcr|n#EBS>tn4$g(&4rxM>j5hH&ZapL z7yw3~JW8I+)pPJHuv&t@%6WQ&fYKGmXXSiJ=3vD6CKq{>D~}&=+30+f-wXu+5#sBd zv}+EdIW<-V>=0dBgFaVZ=Ip)#qH5Hy%^Uhdr4hf%v!3g1ITyM2v)3b!8>IZh6*mhu-e3Dfty?dlWA+=k7b{kRW!}!)kkxcAI zXZSfkK`IahBI#pNG_*5V#+PM-h4OQ3_z>II)uBPE1@kqGjWMVCu8{V)Af~WGVy%T% zMUY5P*1ZHj@>swmOvw-Bol)}Djb4jrg$O601v5>Xy9AgWC>3|?T7kXGyCx%Cu}mC; zTFbNib~=B=1GPEKp|s8xy-DU+`e}Z~K$wHMW1)1$T)E_($i>`Jp-A*}a~*Z69eq*P zp&M0t2N(0tq<1=pQmL#Ayrbg`&M;V=w>cgfYramcvK9#QVOTb6cbPXTFoO`~##Sky zq$>(?-o;eri|>d#PogS?BhH*}vnRQn;(?eI9~C>iJJgqi%?m_=Gc*b>B2tUJcN}DY z&uaqk(mboMmm)-(aB{tK0}19DJpBqY?r_BoyvPilYi_1zW~TK2NBnd;C-GkX=*eY( zTe8!8$2Rk3?)aR9`8=8VV(cF*803iHl>niuz2)ve#9I)lg|F$k37?FxHIAeClqsQ= zxx7@ply8c5zeoMiadDzo7J4yJ)(OUiEk&ng#Bw=T?FLxKE*NF893A>sGJ`PV6|iOg z2|!3f(M>G*l1T%A#!3puGeUgJG&EZAPbu~>wo5sX^xk%JGs8ohdu0@vd4$=)W68t$ zUDt!Vc+eS+XN4|ER$G$RV(?T-K4z7)pv4enK^Ax2L2(P}Sih3p0==8~ zB<7M_Q}ir@hX~*lJXYCE61el<>58adOjHbTiqi`bC?*-8@^IwG~9^}bZ zo17yUK2a6`grg!N&PT}rJ_g9^h;z{{E2o$HrKEH^r|?F7Hr+XiJDGKz&MWLYwOLNT zu}x;$B~r38J40Gdy*KiHjEkAIbGAG25<5%Y9}_PrSPY$*pWB)R#}3*t6kga9-{eIb}HwS0WUG~}aH_!GD++mn%S}*&2{Op@U z17Bdvy!LzJU}(mv`^LsKz0_3mm@G5pRwIpYkjx>{DG9qq+9{nnU{<6*2*#5#%_lF z6rj|@d%o#(p2VFsy2o)RqX-FPbE~y}Fl7F4GJVqN?Ae)2>gKl78RNGfdC7F57A~Oz z0Ls|T7vJ3a!0qA_aFaAt3x6dQqDi_v{VwRIwS2^*;e3?RkL?L}<#sxM%PlSmzEgKU zcfS4Y?%*}yE_bK1g-0e531RNmk!w3}-^hK{DVu=mYrwopp2|0cutd7A%^##|t^06+IyK(-wAOcynNQ?z zqQ0>{W3vxd(B7SYYTS|S2*2Gg;{1zDnq`|#2Xp#?9r`JGFYb`a^R{xiAY;KmJ^c1F zJU-2%ROsU4rcxmd?Kx(Ce=g_zo+O~)gEhkI=ZcNa;t}~Ov|mvh9H*qM%-Kd(RM?JA z=lL`%f0q9|cZwc;#N&mh%dRL;WxQ&0gWq1GxIwnDdF$zXyO?Pv%NW^sS+mc#c01hE zJh2jhd5pW-$;`**OtP`(ahI@KV>2@@?)sgiFw@4eeOA4XM)yc^-`R#=D{4xT%r~i4 zZrsA#Cac<8x+IYqS*u4{q$Pnxz4WcUPa0M%sbz(Lx@;WJhE)PdQAcaFE=H2;WkRQ% zr#DkrHPbq=^zm{Q22HYD;d@CWN=x3j>l1E~BY7y}!Q7~xWwt8e!LbM+i{qsPX9RC` zKYWg2uEHe>rsxL7m%R*!bL}>AJu&+CZ=LzeBJTLS0Ou#w;3uzUE#2BMy_F~ z(|7*S!FxixN^1wM_3WGpJB<#U{||NYp`*jKT61yyfG=?-dvgo=USX@__qWe4=Xmz6 z-Sd}V)t);PNiBQx1*dKo=B?3S2s}OuY8=lS09a3 zKHv5Fu^Ey6tAfXZT6+88{9}{IMFh>4tkjo**l!TuJwE4;MpE@}zVJHvm@f6^uRZsW z@EeX~4hqXD5!p0!V3)VdI?}l+oR7?t1|iJ| z`H|qy)%1HLVq!`p6(s`4Qwq5SIKRfMtxQlCc3ZAgvWyn2EpU<1OLF7_knJq!;b0~8 zhXdv8ErKPnx*Uf}Ba4nZS9@jmZDsYM=d0!0xIMRc&nyQA&{)ocS^4P(oFIVQ*<LNIhk-eC8s%7ABn%t_t`7uv_EHTv#c+y8Q@wi(7!OSC z7oTYVHgO@LeWwdl{qbn3`Wo!dMVqSE*7jHbD|?GkbXI>Q@IdueZ0<{rau6XLs>!+1 zUzfvytETD=xF4xWEemTkATQ;${dNCj$nXrAg`W3ioA$(`yIzO*vf$e%`OoJj5@Q~x75dcl!Af%3iN=mp18Co@Nq9S*8`0z019W4_cbj4miVEj&b{-_Veu8p#;%P5j@;9bywLeO9l5?iPClL; zrG}!Ff8huub3pFrnQ`p5PFEUPdhm3XoNx79<<50FemLgxRM7)oZroa}TRPv)Sq7H` zPV1T&DV-_CT_WIacrMaOD`Q_gBqMyRCUqmVCHl(88CHea9ttyMpqz)ouZevq{2LAS z*HeBGllmKCe8_!?bH4#voqv&+)HNuc;Y_uh0pMJXA8mO^F=pgMW7VmAM-JlQw1n<1 z!!e_Cg~m#hI6%1otf0v>P4`ev6x~sR)4&@2Wma_3w+LSo24-c9ES+ob7C)I_W%Ol{ zPW4*O`<=gvm^RSLX!JwpRe6^K8mupJb#*P0OzKC@3%E+R9O5>Bm)& zJ4sTJ@~F89(%z!ViRR}TD5JGniC$FQ?3Qbd{I3&pm!$urM4wG-eIWY}1 zlsiY6M?pMDRqp)Uz8ij=*0X1Pj3A7jrB0gg;hL0Fn|j@uUM45gHu&pOT$8zWN!_`B zI?keYB(*gf*ztSFCkrBs1XSzMX8_r6lk-ICcd4qUd@LOY?tD<(s-EJC$dN$b*8uu% zdx~2Ew(m8sg`UJ|co(u5T9b274~?TkXs>pD9REA+*mjw&+j1@+df!dBBu+;RKZ`f# z#&h%}Y>NK4h}K*LaAv0BiRjr~G-nW;slo#_IZr#Y&J>A;>!jH5nlnhhs7d`WH64xW z72%qk7wu?wMY--R+^f6_{G~)0Jycm4xbqyug`?jmaddN!VAZ5LL#!bm{T%OHm0Oqd z#Ikg0`*9OUirc*vLWL5Six@SSSe$xN2m@JIgzh>+!tl~PLqZ3aVv>{PwUF1s#3YeP zhq|7wJS8bRLPrOMs)NMuyX1gM1XI2PvXXs<;7^`?>{JeNQr1ZS711g|@o%2GoP&|k zA-rm={`nS6w4Qo!e~;|ydth~PhP#GI}6v;2`3_n_B}bu>4?fl z6B`WD)X1?9q>BDB2oc-xC!wn!?j`QPmD`*IX{CyOfgjsKM46879JY-(mz$7*L(6*@ zppn^s5l#K~v7%Jb{e{v_Rs@mnN^?g5NE6huMTSS=V!rZSDuC|Hurvn$xyIhLe+xqtTql zqm?gpoh#uZqk{-#6D1H~%K5_Uqfb^>9_acKEFe(zqtv6vyz-Ga(TYN4+OnQ}$)|&~ z0^GKl^gfBz_EsbITK%3f>lK}&Y=mj*@iR3`Om@y-`wd%hCLLq^{Rsn5V0)KG$ng>D zKYCpJ3$_2V;U!ywf$i54VChE+lleWMmuYv7+<_tH!NW;~=@PA?*J{BXMKXDrOXG_luY?mF)It;gm z5N67;%R%X3qMlk!05bYJsb4|zYhLP_Rxqo5#`)((qNBZIx<9bvOX9!uvcUFx3n(N@kU2txXGuJ{xMu0?<$>*=l!%goaAiVI7Zj2~ zr&-X*4T(r`&o~Q6^w?5~BYp08VqF^Zi+U6J80N$kkzILZzT+>Q_QaoG9^IdML-cT| zL+)}^z0_xeyX!-L_BFz1{`S$zov+E`%kucVJnrSe7rEs*#w*hGu0Q=+!~K-3-~BAI zw83|KF5;M|BzmZ&i=7u)cHx%h#3hI~$qi9mtgFkN0a}AxCe^g5gkt17#X9@5D&(8o zGKfEJzw}u>lLA+yqg|gB?H7L%dd@N%{A4$Y<0rx%E5F$Vx3KVCTCMZ^99ss{S7n_m z^JYP$V!2jqP-oy31d7^$C^9Y0`9%g{=X4Hj)~0sr5AxKq8IhfQhb1V?o5d%(%8;y3 z{fC}qn6J1-n?4XsZSkiraORTFUAxOp5I&m24yo<>(ki}swi8XL-afWlrz^XZqavY% znSm}b;tGmXlGg*ibJ>Wr274VEx-Iwmb8{V1m@oZ}eXVKW>t#K#DrWjSrCq0TfB!I+ zp6SPHQ{NjQFLcz})Hh@YW^L+MOquUvwC}&X$Ifs*W!)3gon5@9;yuot*oVTJxy#M?yFV%s z`nLKJ@^eqWcyu&zUQN?AH1_K`*!#>8WpphuWs^xPq>c9Z6dbT+UY97fsFtCvJm zSD<04AP*kC3b$aMdRcw-MW=_qHl-bkYFWE%bG0M#v&uf}mEs7{4OKoQ;J()sM6hNYo z%nCTdyNY?PO&3e&QJ}?>snF0qza7!=H#@&$T*d@};HeJ;FJs}poO~yk*58cKP))>d20W$rIFt>8(XsB^30i7FV z4Cw1wq8KOSn|J8uzWFjv+YSl?8)J2RyzhQ`=d;dUsr`YvolYf<2DsQ zAm)uq#Fw0PlCPx=!9>-z%k(_@l5LmidG%shDs0u-5zsPil3R@F5zk0k71Bg(TRO~g z4xMJlbkYW3D-q~^1Tj>vmbGy%`EPS{pWl<+=h;eHS$$ui z?|V>!3*Tv^os0D*#Mt_pz#Tv2cxbiT-{~n4>G2{US!H|R2UJuaby(orB?5R!#5}%_3W9Z(~5SgM+g)aP&zD^D7d;RTr8mv zi1;xuyLFK)7p-Q@dj8$}sHiVjqrR*jQ@*}iCnex4iSXWfDWP>Yt&=2D1(E)`GFo?R zbbn2MwI69OR#|sn;Lhu{Jc6W4Z;JgGifub6Eqh#Vavmy?Zt#wIpcUyu(WV=@i;xdI znp+xVGY<>ooUb`1JnL|6&QpQy`=#caKIqe8?jk(WVy=UYktKuy;eN*zs9 zLMem#-|@(x)LNhOZS68fsb3;vSW4AMibAbP+~1tzPq3i-xK9%JW8}E6Oz4(_=1bm% za5=t2AhO+8_FgE5p1r{(Uu4a@(a*iB{bgnnHS&dSp`ue2gnr8ryX7mdgAjs`p?zPp zY>@}&!-*pgoyBu$YdBe6wRBP9%&Mh1$skwj1d^QV`A94zs?{k~OD8Atk538(b{wac znfa~f@CR>8JN<#}uksQ__+&zU_xmK7EE#m%MBJ4@MLhYcU@-E3V8>?-;!t4wL*$VJ zlQ8WSAr1z~AoVKw=OZKX;jrDGHVHFZZMuO4r4|0#oJZjxvR*sEw0tm9dlVI#e&%#NHd_`62swEM$6oK6r<0R#IZw0C1py$9OiNZ}B$G!++ zvP>e5`W0$KCev_lkva7?<)1hY5OQgIS$z_6h6CHbqRYA{*#^aPx|+6qBs3R-Z@P&^ z*NWPvxNfj@&ZS4B^$*qM{5TN~JYc3peAx%~w4{#SWT#VdibPhG*&>h2VK4(B9dMBf zvJ&4U`Q{KNkY%)ih}#6HbaKHb-6%)-XZmGYBciyqvh#SZ7W65NA<*9<5 z6+TCgE&Z&h_{}X#AKD@g!guI+-nZT&^-sX&my2qap+cXy{DWX>9DsUrgF+g@_6i4 zr3U2 z78hJBQZ;>CrpM*_)S2kB(kaRjw6Iol)o?7a7M_M8GP%^Q(_Fx9ZNB8W@agO|I4({@ zLG=i!GC&r{Oye2nA|U4>Xb=JN?D?akumIgaBYg6b?Olcuu&(E?+J(ei@2T3!6(!QuyB_7>EPKF-?@v@-666Ek zP5FGo8^mFMO@FlFz$J{T|4u<`nmjcF*XjiEW2U`~-<)TY>%^wL9G zrjP-TN?V$rj7MaFHu9iI5Xm#qr-veyGzC|o#DQD(o760glqd5J$!Ah^q)HtrxtRT0 z1V5aZyluZ;)#cbssHSgAg-Vt>o0+K5@+FEMlT2)_lXaJdq!XF@Sh#PvF16pe4*i3C zJT2*WmJq$ar-ueci$_e;`|eT)GnIbZej{C${ZkJzcU6qhdhWWtY?vggQ%7CR{43JG zd*l(5??ObKZNHSZ4S&&6%Tvxg#%;NFXH!&el7JwWsl=(yci^8!>zDFU>eOX@zZ0e3 zUh9m~?^C>{dU~7zY}+B6ZM=`tEcza2g!)!lFIhoJ&f-=7-I|fkZ*(+0_|f9on5C71W+R?uffm=XNxTe73Inc(2nK8vEe9aA76^7T{+he&<*t+pc6V(r+pBY|qdet&i3l9Du05AqHzgXm zqy*$rf9=!+j>w5zVah%f0<>`e&!EbC8OisKCw4qh#RY*< z=izEu`{*SCpKy%y=M6;U9LJ^^HBC=MbEa^U45dI#<9x;s^S)4oomey#1ko=FIZe*( zCwvE?HsuQ*^RJgrA|T1ym=8%z$Cod)%$9uT>Yt-PI}Jir$)R^t&&^D=&GgDNhNFMa z9M!p*amnr&sOMGh7}%}btxR3N#8)=u69A&MhP_H7AIDz*Omcgq@4z!_%lzZj`XMuK z)oVE-qE|0`ZP9O+o)h?(D6P1t6TM+iq4UZAAW?LQ(VP?74cfPAh2j=Gs#2GfFnru` zQ?%)^+JWD}G9X>7pEru9Ap*Q+iq;f%Ece|9GmJ*NbI+&h3$(lD`JaB>(^PTf_S+C_1 zk<=SY=igD!ifj`TzI{IfP0r(ex0U!*;xz5_S>qIKT2tb4_GsXM<~>M9XwM41NVUy= zMGt#(M!D|m@#&TCyX}4!L^Zde#%RH^5mECU7EF(@_Q_y&(_AU>bKKgKF_p<;)=Xt& zgB-eoesA9^Ir|-2+?CTK{mX(JD@>e5gW_D-8#zg~+k4s8BDK&jK}ukC;~k=@X@6rZ ztm&T$;#mivQ|eJUi8eZnOwTS?)?itrsZQ;$VSfzwX4LfG%yzx2La>5hR)djoIK1$; z)eDc;p#79-{VUb7Y|W0yIE8(C>C!uvSErhSk;*rdvJx5owkJ~gRx*!up~wTCFrN*| zt=@h-IaAmG-R3FhIhwfa(YSq*ZYsz0w+?!0Qa(oAM43MjXS}bR*@j z?e8(3?7x)L`m_t#gqhti(rtouIoaM-p;z`&KV^FtRl}_Z{+h~q|K`n6?tI_;;Tlfc z3T@71E9JRQa`oly=cCLscURR^uJmuZN_sBBHoKRdQ?&vKk#F=sbm|l)Q76P3Bo+zp?Yf#_i;= z?%u+^OR8Xr!J0$+Wt2XeOHbBOvk*TmFhk*&SU9DPzx`2i$A*P7TcRo7yb9g@`|lB} zG8*j49$QP94p?1MkrT z953j4LGL6$TWNL@==qo3!!f&oP{+8aA@^(v{**<=8UOEG09>>bSGI}#Qg1Ghh*}oN z*zcCH_bHTmqjuoZU!D#J>U&0-XN}$hRg>C28r|hld-(E?IdR8=cwmRj)wyRQuw58l zDhXe|_skkKY?9t*6kHuS2!`D3qE`6Wm_oLf&+EG?s zYASyd*dYwB?xWw*`IaLva>4;-)1aB+xP}fhH~^NTvoo-eWIi2?eIcZVj$FSVA&;^X z;69f7Yd*SLqdxYfu|09M14W7DsFR*Rx@h8sGBjg3DQWA(+oN+?)pTKpDQypgs z!8DuK%w777$e}Vh)%+wBfsqUp{evJTh1|P%VZ0%U?UDpl&%Ph9Sx1`4R(m878OK z>UWbj_7!r{^4M<6}D zigZKL*|~yFAF-gr+YYQ9u7)KX+!hx9j&`Q-;xb4f_38`u*$r{h##HgnwAOZ^uRQQkRO* z44!0ngHuEu39$j{WQ||DoY(taEisp=$YUSJiaZ$e%wF5RGaf$wCQl| zna|ZU9k?oxK0mB zd&z<)d3v(iEC5vnxw@Le|7o}n^4V-(K*g-SQ8_EqFad9Ac10+bRq)E+V<+lDg4qey;DrM~^)4^|E>)=eFPM zql+P~uJ{y|>^j)7S7y4*Dt5ipcjRqzPe$*no z#jNclg4H{oWtol}N&1ewRn>oGP_{7LKv;sD6WA`x^AfB`g88@qEU@DcE^=Cml8zSt zu}m09vVOGqA>KHt9@sHkGROE)udi9SA7|9Sqshs9Kb7~RZ zciXFYiOa}w$+2njF2iP(gt!EtyehT7`piRou{Bpdu2tQCb+GTqk0LBp=p*FMMPm#qApbZEtN92CTe$hy9Y zY=0wBr*mwO(huHmkd}=W|9q~S=fL(GrMF7HmuTL>F^_SbYZE{`jlK=E#HxY-RD=bu<1v9>KjD!Ui|vyM5=r=`5V0ceuW&1 zX87k^du{~IK)6!~C;F1-S%1*s7z$Hi9|O^Nex(I9ZnHlWmRgW|!cu=o9ffusO!EAZ zhqzKh9a|vxL!DK{wYwsAg`}Edj4l^lL5nYLFw}X!beY&OKgE-h#?p+bD)kuUl?KU# z?IFeLr3h=VrvMRCCuBmJbKA#KKVkp%nYF3Bn$g2{y_v$iSk2lwO3Oq(&QUJ*N&Z@z zdcBNp?(D18rP1iT)sSv%{E`cao^m#mV=9kTpr@SMWn(kI5i?@y){*C&ONghPrC*~8 z$Hu;aqWeBArR$Q)87TS;z#*wJoW5e5dU4Wtn0|d7r_8wRI_^bMz}zqH(HL&TDOG&4 zcntREejLaC96i5p0BJ-|PEb@1rE(K^E${cyt1dGMotlx*a-3*qnQ1AV){_*yuTmxF z9m=6Qj2^3|9;p6T{}JS(*cd1p;+6F%5FGn)U*%}x7wObfMRI;VB70=%`qK}maz{Q6 z-0!2I6@4Brx0v4$`s(pjkEQ3Z(TFs@5|;wXX#3V5O*cSlo<=&O^WG!^9|GiFhgcL_{-Xy$KhzDAk&zWwyF zTIs)ai+)zi_)#MJ4Fk(wXsJ!jniqsLb(Jr4&sZPX_A50~H}DyS^c;CSM_iMruZT;- z^g}6qXj3OMhF>f9GLz2spZBnkHtRPQ-*}9{H%R+u6ZGLfewAfq&K#x?)X@_ZcNd|7 ze&G3@`(JfP@B#k`_)UrR-{Wxn z*+q8TAR@qt18m}J=PGjUl__M`uIG=-xQX2}a-i?hw>pxAq@J!tJ=?C7@rg0^OxV=SFW_XBkUQIdaUxQ@#= z_aDo>B6aVlW16Zzjf}o}@bh3t@0CDR`Fo8us4dU%V{dO& zkDb_TYQUxcV{aWH|3@VMMzWj{*T!FAK7N|e`MSLz5H*_$p7s}-2BU%$-lgr z{L7ok|LsR~{>38yVrAw(@XZsZ2yXtVn#L4$M4e7Y+ee+AgI>neW`NoZX4Yme)ZcWK zu0S_F%TXRogq0pDQ692F72(5ETO;-Ih{+>Pok?Ws%vdMH>VA|uBav4|Or80p&hAT> zSvSV~USsVs)?Q=nGuEK7hKx0=EzbxIS&f_XKK(BGpc}3iy;SNC(u2RqrakGu1H}*I zi=tGzX(2653hb6wI_r?mqIA{+dsx#*UFqjS|0@fTAy-bR?N_WY)5D^LRXgjaaCXCs z9|0b?vH-vJcvkI<790qCr(k`*|G=J90dbf4#6PcJ;)Q+q=Se&lZw5+ev8_KK489aqnIgnbNdglfU?t% zm*iLTic|j>=q!NRovXY;?R+8pD%i%=tgLgnaBX146E&4j1n#_?ajfss6FW64`IT4d z|AYz&E0GsfqFKpjKcSmlH>~8bw>!Obw$66%u;m?ryAIG$Omm!&xdiU|0s$<#z(+5& z)s?OzTp;2$F*;rwFSMxw`w&r1A-X{Bo>h73AKhbv}drkJy77C^g05)jtISu z2)&NH7kVAB^g3ecb;Q!^$p3SCEuN5Gw;y>Yy~HhKD3Yd^xMkBz+{V%C_9L0}5@?KG z0%@{YdZk(EIDRNCky)AW(}N$=4g1sRFz7E{=`Zcx5d!qmVgh&JYa8~&eD=cu`5vF{ zX@Sm_L0)bO@mMGPH6L@Wu-qGkXE~ z9d?qQnkm=jizO}0_=yFiS#)i_Skm5%pIAUc1`-Qs*g)E9 zx9mIJe*?j3Q9kaHbC~OErjBs6d$6X@2_D}cklQr5&Fz(uKoCFQ)JR}zK0Bjo&mH7u zrkednCnY`{nD03@L)Xq2{|?M&KjZv-jV%zYojM%dc2sDAdpt0|hC67grgA4u?bLA1 z=o2{B;}{l4c4E{{<(oW_3S2_s5)1?jrzXEC7;E~5w+71ho0NTtnHlNvVJ$K(IRs&H zgph#*i?=M?>U)#2NV&=_xd>RkbI8P_-CZQ z<35PU~HaPXB3abE>XAR9#PI z)VMNDP-GX8E{^jSJL`4~Zqa!^>|A~lOvehMTIHLODyCF2u}m;|N$=|<>`l)i@q z-=!@|-{Af!(^hHPeZyW<21R`>^>1-}qrw^a9P>r7-~Z?3E^4v#I_~5N|_Y^<8E5Y zuDYE2Z)IZ;N(waueS3fx)WbKYXXt3EBo%-HRp;2nB-?>lR^|eUVaBP1RZHsfa zacgx_yrc&1BW%BHY_5y4c(lZm*4?>^JI)gB5qv1eL;9%Sj$Vy^HF5Htu4ma97ZH?) z!g1Wyp=#zTVpWNis;(l-;k7alkZE_2>94Eky}dyt+Bs84;aSG#{8y^FeA^OzMGf?> zF6R@Rz9jp6aP0|&SVra?odM#)3=WeUGPcS_WX+fb#zB47`a-MUn$|R##sI4A+Y)_CrVc~d@x#8_PJ&Ud4!zsj9Uqs2`e7+|BK+#$n~%?E%8mmH4vFOZG8HI>Jc9M5^T zRaa9WS6@2&mb)~D#LUrRzeasPqn2XHHkvot5~bhAvsZNHMG+m-e1oN$s4_!L|IX@^ zhECiLW0zRi4X|&caj%uzMEHH^!^Xba+DmLaH~gMW#(%rDH(7g{wZC9(H~hm{@Y7lF z>2AEF!Mignz5lTA7i~WOKFj~@EPTqAqs-cFK3=r(|I5aYx#?QC$NK-m#v96t{~H_r zEE~Q+wD%dmyKVkmr^ju$zgYKQSo=YX_p8=^*e$oEx3cm6$@(X(|Gn1#@vMC9bK}{B zpU#3;WYK5D#-CyBi*5LjwF|T2d$ZchMYh~CY(76|?bj{+Hd(xb7XFyEzh%R1vi2;C z=Yv`0skiR!S@iw2%~wTMc`nI{zs}Ojt%twJS|qEscN3n-7FZXOj|dBo(=E00hwj|zG8%0q3#gyc~nkGNl&LwEj>*w@kCo^2N@2Z zZsPlG=Ibpy$-)g5&bM%@g&CgpuO|!MY2iZae~*P-dfjJX7vHxmoM*!iX88|g!NXbb zh=tw!p0Kc+KmQq~e8>sR`{MQ__i##*TQihb`>3*O4q(g-rW(+o#{cZhI-QFiIQ!tH|;XTUZ7j`88O0mH}0d zg=bs%o-F?dv;1Gm@;_nWGj06P9FyOf77klDWZ_l|pJCyh7M7t;e*3cG4_VmdFT)mg z>HnIA(G=+4NLF|?*W|~|UzvqnepF#$m;PZ3PqFywE$o&jZef>xdn_#DxBMQoaG3$s zaF)OSOp~8;t^X1WyZ9R{9J2ns7M^S2`z%~;;g>9Yj)nbY2LEXmF1N7Dua{U@OCyA- zx3J4Uv-xE#D}0ZIUHtc0*ll0?EbP)}$inU*nvFl3zh(2M?Ean24|dxA;_}Dr_CI3N zM~kX|YuB!8>~3jx$*0z?ZE9|5YRB$QG&eQ2x2MA;THCtE9M-N~-`JhVgg3P`B|5uY zm$hr-T`k=$U7K3gc64^M2;O$rbM4xe#&vCO`rVt`)+g3-_(T6lSHlal;6-UTp6I%0 zG5A2A=xS?9xbc!5jqPn4I$D~wgX=2=3w5+?w$W9nxuv}&(V{NJ;WA{&OD<7r4)2%Z zFb8w<2TaJ$cuQ9!{-Ig3cQ@p2CS=VH#sybm)Ea~Y8?0l`_k%l_sMVq%-tCmPqax13E}`NcM^ z%YudCXRBCZOT49Xy&Fu$+B@+Gbtf7VZB3yK$;Pf`?H3cGb!-?5w@K|C`zOO9c-p!WNhs0W)*WweY--uq(vb*lY>d0*Y-tVsjjR~DkA)kfRcI_ru`&O5 z%~GtXGrr|yM3ZtSn-ig~mPE3vLswI*I;vtTSFNhixxnAlYr0j8hbKlX?cZ(Jrb1d= zP{leocC>}04RyCktB8#e$@G-WN|pR&```S5aUo(G8#@{|v^4)MqGpGikf@U07+v2% zxiRXG3+K{5i_V#$3E#Y>qj4i$qphPY(FW(ZwWTYxzOyUjwhPz4G11b~mdI%SnW4aw zZanXrTsqu)%AAz@9d!H~_{a74L~B<|V{=BgS1vyb&7B(p{TSTTxwdh0W1D58Y4=Mm z6)pzvTm@eVRkd_Cb+yIE*n~A z+Kno%+Thq5RX5Mgymw$XsZQ0UT6kNJ%R1h=w12z005@ZF@zvjo->tTQu}QTO&%|EH zTZM_t^A?S}gJ-*1q})+WW#Ki!WzMjoy*tuQU8WK6V%=ACwX|qw3t!QaxW2JHY23ua zx?h*+E^>gl*QA^I@-7-Y9sW9~%bd7c7S2#WEI0Jao>$BK8Rj6BpeNAxoFrUH)x`1l zp2WSpdgAbsUui*-XE7qziY@cg5=<_p2qW?;E9SEIM2RdXM%+bCm3NVkPr-<)N!&%g z6PZlh1eW=Myo($pvYyCef_t{LPq%gmTf&QsAni=dER59THq3<>$=~}if^WHn>#-%x zl^BVC14i<@5fj3QE<)1l#E2XrYAC_G1+xqD5scvN!$`c3VWcddz#vknuUhy!*pkjO z7{T)*M)D|kbqgN3yj$YG3L!=0`Zc!X=a1Nu{-3P<-`4&Sgcf>y3r~sPm~YZ;w)T7y z6MWN2MCzmz8!t5*TkxKaEp>1fwuCFkmU>%+y&HQmwv_ck>*J@$Oi~M$4Sn?#ldn_zz z$#0*9CmB#3w(uMa4_jDhDZdjI7Fx^C|2~s`s{vKW!XXP+SXjy|zp#Zj7*I7>Sjs5B z9t#U?y#f+Nk_aSXlbM{DK#l{7N5@ zU&zAlys5&%(pKdcwy-;n)>~NGp8R4KmNqHBxP_$+%5SHI3k<04v9Rz1`R%hX%~1ay zw6Njc=%OurtMzv*EPY3QCoC*HMt;GCrhH6A^sn5)c@_>^Sa_QJVip#jB)_#O?)Y9Fp8k?J$Qg?^eBQ&>f33YTPLfy%Dyt6A|Cfu%nTSq9_ z4byXB3D~%)v8`Puu^?`27s*u5fI1LB>}(zPt_?{&HEVC{PJ|MjouT&54wzNiy)&5z zb*>LJcW!KKg9&K4#Nl%)ww~)FwVcLWj#f_>@5v!FA@R>De9S(t2hOYiMKwGv9_e*%9UGwm+D zf+m@UySHpy*V&GOMm$38Et^`}rNz>_la@(o#S(#|BYU`#4)O9u|+gqY7jd874vTB@hdZ~5ft(zQWVp!SO*4^S( zLPllYP*WSq0M+I;Gh^)5`M8)8Ny(ac#&K!WrIq?IrD9%w@tPYLU-cll=GK;GZ4%Oz z6KfdZHzE(TFI;(~<>7hJHWt)r*ysdHh;sQHSUQ50zAdv;t4Q=h!A|S1;SfmM8OO8zs zSfj2}b%ec`ytmWpGHt0D^J9-vuZP$0JY%mRw$yMdPoc>IO%Jy>p-&+F7W_>+=q8l3 z$gxm8>LRrme2uiyjrLtq>Y?>rg#F&=*WJ}b={p!mkqgJ(7Bt1(`O+FRQ_}aPH%NY@ z#^iTCe{Son!%bR7Qis{7!zHN84ZxxoZN)`gyKNhPukkmNTDR84Hj`GXj{RQJHI#K} z7Lw$2enqD%dFi6PCBQBDl~7$7`O<-T%%wKFd4_aMiPfn4jg&&lA--ZHKcX#mb13b% zIU`mZRA?nN)3RNkZena8H^Mpu>m6B(sujFkoZrTq!({y`sZFip^ zo7J@Ypxyr$&a&NbG3(BjHWU6o-`#$1?dgB6Khi1}EnM5$(vG0iZ9C<~A#{FL(8(UR zH?$yMwKd7^JYAkpudyqv9olAKl(E{M-Fe2g;n~oqVfOSH+s4Zde{pE8ntL7(`(uNj z4$mGxZL`f!+wOidcPi!1Ly(tbl{DVjjdq=#W)>E4Mo(n*e6@>p5Lt{voaxw*&@5;T zW7*{x_qRq>(LOfOj-*Gfv3#ORw;SO|V=c(UcyIM5{6H_pcddbsUcG9$nPg?~gR2wQ zAhNAs7UyXm z*nIkJ{({ynv3AJXlDQT8W@|N;m-ibnv##){Sr-_>x_+Vun~v$huP4h-bW}T0yzRzs zcb1>4FD8ioJ^u&R1~5OvJc0535uHEwPcV;R&H+9Z6T;+U6lMhX7cl!VH~&egRhT-= z3e4r03o++n=3q|6oc7=36;p)C!;Iqg-=pgSjzFyF?k|BF&=q?Ug3&NJPOo z5gjBQ_gBJ8PH#?c6}ax>{WsV$7y9`>!}c-%{$-`E`WmMvG5>-2!8aH?Fcsfasu%Md z=7;}EePb%_SL(?hvA#OY+BRmvD@v`#Jcs!;=G0%YCX3nkpGs}@dej#&k7EKk9yJ3q z8?y*=DW(b&#jL`75OWKr2eS+FNzA`s_F;y|WAGA_=Dj(rset==@?ZOFr4BMlp7ndB z?gsvq$D^j;zw)}O>;5%<_wFBje%n=#9N*LY$TeI4?7{!N$xQ3oI@a`Dw0O-*l!@$h zSz{(s3*yb|K%sw?JjLW%`-M)59d74%Hyv)zIJZ?g+&$ynbU0)}Rqvr38`ifqcT@Ia+VwfWT|y6~ zR8|wwd5(^AyGiG{KGSWL3D+>r&4fd~P$WZrYxSpU&Vb8sNA586#lP$( z6DN+_OU1_^3c?-Q2o&ujL;3LWf3-jWhV2%o|D~wO3(Us22SEnq#PMzYpQWcIA z4Ey7gVtMsSmBF`$sI%qJC-x&=yj(y+C-*HQP9RSOnkT6M_yP+j$EzMsHo^zC!bcH|C1(YWXm%%-mI0Y)uWpJ;m1^10SydP7g zQl2hvg{0YzQ)`mPN5pT+pT5f$qYsg~XI7z_)m)@zt-@YBSo?p#-Enl713=LOJQn{2<&i!4Fd61;$P`*sq z(A|WcPT1)SNhjb{ft8bWKBw2g_vY{@!-O$De~@f?&y&|vgCf3$g{Q8W+8cTc%ePRi z5F&ae=cvicCe`ObFS06iSMQnRRg*ktsM0g?)EW1mHc~o#YJ5hlSf_Iv_m?s!w~ro=4{`&B(LVQSfSyYMaSp7G{r^o<#e}3bWV^bD0iv zrIfwYmRE4Ba{WKZOWb==wtvF)mo%u)(zyj{?&4F_+^*9{W)06ADjA#+FOE&G7hHP@ z68jPl?-PPXdGe)AmgqLA+xHi6lXm_qY~42b#>?WM3OrBy756(C>%rp)p5F?dvy^sU zXWZ2_xaacl&NA*ce2KWv7Wa!(>FGJD&{MB+hiu#PGS@q=P@VUuD`|IEsWT4Nsu_to zRkZ3F<)d%zrT%}42@!pc->dx1S8MpEaGS;ACcE2+tB9+JtB9)zqlmAv<5T8Le5Iwr z7YbCt%DlMrNf~=f-<+)8eC=vgyy{w2y!aYb3~!h+0>6lj<=69*Lgvtu)l-QAHFa^J zn!2!Pc=AyGpl}aQQJyLSXVJp^VS(r5dR1;BU&RniQX-txn?95Ymj?YRxVVTtKx14; z=v+_O)*E@mFh1SK%Y7Gky@Q~cYWb0b>%qHRc)>Paydke=W`UYXnltIcGwH)K>6bGX z&KTBx{C;o6)OnNDyv6_Qo#(m78yuN3TsTxP=<+_nci6@E94~nnt1UU+=R~H`ZR1hW z*7&D_KM4K+_@}6u_sA#@Hg7M5O+uR@+r zdnb=Tr@`E~FP2l`tv9^4mZ*<1(CIR*2JlxPb9%Sss`cK|xs#yPO7PVxV#f1gre5l$ z9a&nB)Q|>EVmB;#&J>@TB76*5h+7PE8z_a3jfX)1&Bnzg%y>9Yhf&~C;LP;%=sFCj z;L5Q&)Z-505xgrlNBYxiepQe#JasS4^Z`uaN|TpN*o+l3(__UT`6yjjq{ynJ1NDMPTRbA~^s)aP?PWUPzDid`fF#*BZ<$@{R^ z|480yZ$Z-Qe>3lN-XB)Qle~omc08yrotLYodoELXvD^wD=}7(UBaJy17NB=psusL< zv6`K@R0UqFRKA0ks4!CNy_g|euJLdIeRU~)^iYn?EzG?-40W6pue8{VsZuVH&r6Zh zh0l$HzhK}n{&p;?D4pw3r;twxr}@VG$?DX_4eC_S)hfGBi98WnU(q1!R&1Y6wK0_~`v-A zV?2A1AhGXBniWRILS~da4vG7Z#C^S$FN4TL0p!amYWn@imy>hVs zghc;m%x{P~N9dQxgGO2I)aSTx3Rz`>aHaF})v2u_6P}?0jMXlmmNu2R-lM(%p7Psy zNnw12^`B&#@4Z;8y#;rwmEP-pmwA5%qMw0!>`UjMm^==BvnXz z^Q{W1-rMNgn0r7a?QJ{^&m+7dydu2H3U9{JX{z9PGnU4PpB`Vk^5T-;vKu_C+LGV% zd0C9{7R0wQ%=unIFyNTq#f{ChxE zzKzF61?QA~G#l?Xa=gelK9%2Vn9x~b9!o;N&1>Z_b+15uBlocz58a!{COVV#QXr`_fOd?RrGZp zF|YFFJ%A&F&4a*S%E{Z2@vm_KGJ_~kd9_tltlEc`*<+cNwG{!oU0zVOJ!$n#xj4C1H6 z-28u@Fn4^F^U&ZC+w$ppPrgUxuLG9;-DPUR=mRD(PQnuxBRfuGoGi|zd7p}*I$e-U&@(4mgBv2R*stDp&x-m44V8*B|nValugF%{1UG!xqo_vTfUk1bFK-KpnPJd z%L8Z4FY+*Ml8~)S^Sz(w*l~C@Ih0{l-4b!G(7gFbw zsdMB4{AS}fd!ecGd`S~tPzo<7g%^~rG-*p((zj+k%DVLdQwJZ#-iQ%fN+fo^?w9bB ztbQr}KfwROgC^`%VuuPr2lj&z`djG4QN~O?AKA3}n*eQP ziVA#ka@_0nsT|s$H|B|J{!>9YGvq9{9?qxTd;l{<`w&~#Um_n``c>(wNzgM->T=kO zr|3?Q?Mlx>U%2>#>NL;w(511NpT!+Cm#pKcyafaUWUtPCr?bmY=)+SHgSwi`r@2!^MC2B=`rLC&n*p zzVge&|Fkm>wFX@LTO%v0@-Gh*s&%IOAQPVG?dzGIXPWbm=xpo@&kTaz6duf95$T&AdK2BglC#fGD znTJdN7-1*+?a9i|P67>m@|__MXN8k2@B6!Vh8vk6T=4F9hLbgF`MGqE_8H3xCv(;N z$oG3KtKbU_$dh&jT_*a;nzZ~f)0J;mW&VNyq8685X1KwuaMFJ7A)S-;5sB~8-#JNs zB#ivrbY5c(Oj2f6qD=$v9?W5k+$p#(20yX3TsbKEo{C0zDeiL9pj_%A*A9xGxC#6_ z;(FJ6R54}&<_b(BW*g=oG2g-rVP41NvF0`t^F9n*i?JdHxhfZ_nz$GvWKs`lkIxTxpbu^%SrG1ohYpJI9Uu%pCl&6^-iBi_iDBj zDzd>I<9ar1Pm~9{cs{XLys?XY+!OoBd8~pmF78BRpUV2_En{xYxYbW%`j`>!F5Cu#?4w{k)ct)Kw-9bBBkmAxJ>VP4 zi2E1ZqPWRgs{5NO12l2VGs0Ejwima<8Ti)Y7A7x)?PGDD#BB+B#PG&3x8LCQAa3_$ z#PxN1RFJr>8E)s`HcVc6GTc_-HiBDyMjAc19VTv1M*i-VaOAHwBiup3N4T9CZc~!v z>lE;1)?h4*z=(v*{=Q zr{2RHR9wGf&Q{5$`KviP9g&+;mg|w|x)yF2Ywhf6sqVQ!?y8aFSLXVjBE|JQwH$2I zr;gC2x#LtiitPJN>U}wJym%G-h6kcpL_=GpC)&Q?A6z$j2mgO)?QrCaPN2B{y8B4>J(z{S&3hjX`si zoEAuDnUL(N6FJ1>7fB|4;EY>fNU5Gv&=L2ze!iuW&YY%DQAQ~wDA|-^xG@0*pWN1y zMHy0_#2K#UmgTLDU8_3lIOE#3j3egAQMx3{sE%8tv^Cut-q6xj&yg^Qr5fXH=Wpyj zp93l9H&IjPx0|D6?VXLyEnVlgUvU1y^D7pJ%=M0t-CH`ES|!8bv^B-=XIHg#^?*x9jmQ}<>L{C6bQqhD8R zAx1~%cq(@#&hJWckHE&0=7JDhP}$a*nFp`p)Z_BTIG5+OAePDwOnM}4wVj=}B;zaW zRR+~uFt7!!rTAQLZU~&o&*E`TrnpIHBW(q}b^$-Rua%#M#c}07&yH$szk5qF+5LudPl2j%sed;q^ zaw&6_zAiKC&Me&0^Q*!2)UlrGLgL_ zP0p`LTYuLun#!gY-Zfk@(bk?BU<0S`^~qTH+U06(cT4M9@`Kx2VNWHBZq>2AZ38W7 zZAT-ASvRlcJnbg#+>$=&-frl6wRCVJW@m?-pk>i&TnO%I<*LKA`t}gH&6F!vH_)O; z$NIRJfvCCkT7A(-i|b_~olai55^I@6f(XvPwn@Kma#iOCu7)Oz9Ov8-@yjmNwWz5}LFL zr1WfB8ld1YbesqU4lNM0$TdyxWnYq0u~QZ0fU3uPEDQ#w$ca^=EqWA(iBiGOcXuyb z+H_zXNB`kwzVrFsbI(2d?6bSi?%n>LXVq_b=f(5cLv$L}06`+}-#dm5SJD#WQpLWy z-N!*o;vDNQ#p_zUpW~bluP_!f!(WSz5AxKkjWCG8YKpU$7o?pWh}$Sk-NEI-mn9qy6L zy`$A`E$2kqI_6xs;C$NE&sow==U(SkT+PF^(KSkT_m z5;^1(3K_L$>1tw~_89LV_BxeAa%i@lX3axXdZ-_gYdH;9nxS+L*K#5qN+~&2AGU-U z(`Jm>yLn5PF>*0;@DjX(=DE^oELGq53XGYH})9yQEtWQ2z+S8W@cP;0EWW|?$_36KR z9j*S$d+WbBxV~$<-4CMv$L{~JDr2bH8%L@Cef27z<-I@Jd+X-&?SBePnLmRyIsAGbyw1uwR=Mh7 zyz%}1I=z=LQlqiHH|jfHvyNGQgLh>-s=>x8&;LpBzWLnt(pdclI;#e6YZNOwR=JvV zh^vO>t43POZ?yjbvfMx!&%G~=`3b;Hs1qNDyO3g%HSz7aXcN8;{u;&c12Fap!?5w< zgJ=++g$>6WMjqb?=bb>j$9&S^9;7^aUeht3 zawhd8JqGWrqHgkV$(PwS_|iCk#sBX_L~^*?kDr3)(%$$L(*7_2PddkMPpDn^4DyIu z(Pn%Wp8XZW=*8E;1loU*Admi_|@kTB8#Lnk_Bfd0#y)+hH zG4e|pP7x!IxB{i{;tgmAUi=n%3NPM=cH_kdk@AS+7?Pp%0KA`g_hEc#towG-CSJ&X zdkJIP;Ahbp_#k{2)!><=$N18vjD^I9VKKg3oc%S3iY%W#xILAzR7kZU93VA>bWlm&p{{R!>|~mEcT!p(#0&Q$BV^S zWN~U2`y1(E2yMcPUqrq5FzmmT@8dJ@iQ8y9!fZ=pd=>lq8`Ma;_*=9FFTRX6;Pdc= z+r9QwY{W%(uiDF894+IW{A0$2j~4nn=v6aX0ON!t(dN7q_eU-SfIx7t+NgXgywRLfv?A zg`XaQHzW7l3X3tj;-v4gzmZ>@f^v9qDjLLx;2e4FWAGuQdMdu-fbWg(8E$N@j<3>~ zUB&LcwwYsxh*|Meva7(*zS0ZG+Ge%+Asod-NJS!JqC~Np>FJ}0XPMz z9}dCmx6(I~CkpRF`|ug~3^JCoE#NV|Ui~M)ugY@w|V7= z)xH<6^?fHiGU2t)IJg;&qYh%$PZ$5}`+e}_q?f-E&PHlyJ&p1PKV1){9Prb{Kc(pR zDRT&3ou=Qzx5G^)+Xk=aSH6sNEtiKAEw6k%((+BTk34^etv2D=c=2C-wAJNoyMD^d z@E!abc+wAOYkVzShE%t9m_piz#XY_+z?vU&jF4Z9{D?NfcR~~Cy92Q9$9zV*xN19_ z2p@r$?cg)~Qg{#2KGqHQA}wnld#N_k9Z9{RnMw1?>iJe~dn@ zfnx_=ypz5fzYtFSC3zZYE4b_l+6dnauX>uc!?(lRo?%;%eB>Yv+&&KygDmpvKguVdS>b~FVNQ1r+$!QA1S>6gD-k*E1tTWvFYRyKmIc3P|BPL zHzRGY`rmPkyh7c`QwayrQhXrKbr-6{hv4bH55qaWUk@KeTJBExw(k$X3tnZtSl{{Z zE~Gr0;7h*W4JZG>OP>PQBJB^I@I~bQR@y@!jI=GpTYVpc+kKyfNB_~wqxnwyQ5VbA z^J8D~(-rsHhEz^Fd zhLmR`>__UWGw?azKM((jREGobg}=}bG!eH0$N!bSg5{RM1~i1X;Vplo9mum0=8%>v zjw^UxT5sE7E#p;c@#pt|(KF)}4$(9BX>Q?p#!bkH9D@XS<7K zZ~6NG)T4L1DMxe;cFNWVt%AHa(m%^&3aB5EXm4PMkhe&rE0 zj$e61jpdgYpFquc4W4}q<@kM}v56XwC@-!-^=fnYf}fs;M;+ni(Qp!t_sDVn5;cxv z3jMRF@fj*d)YuGpQR6G*MU6$+Lw-?X4R+F2qT=HVcu_I;wH#lf;_dZaQL+BYBPz~c z<%o~_Ua{Q;r2OJX#?vh?Y=T57t2r`NIHO)=T zbJ;Sh;<}cOirMFU^w?^i4A!#pN+vR=fNMHtRq#Z+vpbfx5*gDmjR&uJ)A|y6vGVMu zj+LifdwPZD4{uw}^AHx5E{&ps!D=2(*s(^pb^OvMzfWm1C%fO{n;kq4f%{?C`R_^5 z;}uv+OLN`owrlxol6i(lJ}mwCJogSKt36+nk9$*)&#GwZIN!ZLxVj>^wr$Qb1!v5v zSl-m$(NYndK2lEg^usTuditn3RZkyTIlf*!z1VKN8NCt5eivYyv)u|?hqoHNWxavk zU~j0msyEzQ+gsObB+3%yiC|(VQAi9YjAU6dkStGzlHp`svN0J+Mw78*Jef&mlLN_o zvXC@VfmARRN`+H(sm4?!6-~ub@l+<2O%0^-sY1#~2hzcGC>>7Mr5n?cbTl1H$J3c~ zHa(EerweJr446SPWQNT;v(b#0Q8Q-7&5W5f2h6-#FbykU1+9=3w(6`#D`G{hm=(7& zR@NG@@>anz?0_A#Lw4A%vm5P*9kpY2+|Jlpd%(`y1>0}}PS6QCVW-Y%bRtgFi8*m6 z<7AxyC+`#-qc6}G> Date: Wed, 19 Nov 2025 14:22:29 +0100 Subject: [PATCH 0213/1030] gpui: Restore last window close behavior on macOS (#43058) Follow-up to https://github.com/zed-industries/zed/pull/42391 Release Notes: - Fixed an issue where Zed did not respect the `on_last_window_closed` setting on macOS --- crates/gpui/src/app.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/gpui/src/app.rs b/crates/gpui/src/app.rs index 9d93de53d06f3130f28b39c7b21611f7996abf89..c042d85a1239dc6723b6501b27690a9f593a021b 100644 --- a/crates/gpui/src/app.rs +++ b/crates/gpui/src/app.rs @@ -1410,7 +1410,7 @@ impl App { let quit_on_empty = match cx.quit_mode { QuitMode::Explicit => false, QuitMode::LastWindowClosed => true, - QuitMode::Default => !cfg!(macos), + QuitMode::Default => cfg!(not(target_os = "macos")), }; if quit_on_empty && cx.windows.is_empty() { From 1c1dfba7e3a7830457bce9310d1081eda5de399f Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 19 Nov 2025 15:11:37 +0100 Subject: [PATCH 0214/1030] windows: Bundle freshers conpty.dll builds (#43066) Release Notes: - N/A *or* Added/Fixed/Improved ... --- script/bundle-windows.ps1 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/script/bundle-windows.ps1 b/script/bundle-windows.ps1 index facb8d07ee65e04bbb12636620b23c9b1137aebb..67dfb5082233395c00f3575d8571d8d5f8520740 100644 --- a/script/bundle-windows.ps1 +++ b/script/bundle-windows.ps1 @@ -189,8 +189,8 @@ function DownloadAMDGpuServices { } function DownloadConpty { - $url = "https://www.nuget.org/api/v2/package/CI.Microsoft.Windows.Console.ConPTY/1.22.250314001" - $zipPath = ".\conpty.zip" + $url = "https://github.com/microsoft/terminal/releases/download/v1.23.12811.0/Microsoft.Windows.Console.ConPTY.1.23.251008001.nupkg" + $zipPath = ".\Microsoft.Windows.Console.ConPTY.1.23.251008001.nupkg" Invoke-WebRequest -Uri $url -OutFile $zipPath Expand-Archive -Path $zipPath -DestinationPath ".\conpty" -Force } From 39f8aefa8c6e683d3d6eed2b3b098ee4231f90a8 Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Wed, 19 Nov 2025 06:44:58 -0800 Subject: [PATCH 0215/1030] zeta2: Improve context retrieval (#43014) Closes #ISSUE Release Notes: - N/A *or* Added/Fixed/Improved ... Co-authored-by: Agus Co-authored-by: Max --- crates/zeta_cli/src/evaluate.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index 14dc0f6c0c105919b822b9077211a1e1d9686d04..d808e3d743d7009ca66a75b3a349914b0a4f5447 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -478,7 +478,7 @@ fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> Eval let scores = Scores::new(&expected, &actual_context_lines); - false_positive_lines.retain(|line| !actual_context_lines.contains(line)); + false_positive_lines.retain(|line| !expected.contains(line)); if best_alternative_score .as_ref() From a42676b6bbbcd35e819aa4037294749dcdc7ff92 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 19 Nov 2025 15:56:10 +0100 Subject: [PATCH 0216/1030] git: Put pending ops container out of snapshot (#43061) This also fixes staging checkbox flickering. Release Notes: - Fixed staging checkbox flickering sporadically in the Git panel. --- crates/git_ui/src/git_panel.rs | 10 +-- crates/project/src/git_store.rs | 96 +++++++++++++++++------------ crates/project/src/project_tests.rs | 2 +- 3 files changed, 59 insertions(+), 49 deletions(-) diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index e2a4a26b320284fed727a7f7e60acf807c39abf0..6889b880623da021bcac64b8cd96d70db817dd5b 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -2713,14 +2713,8 @@ impl GitPanel { self.single_staged_entry = single_staged_entry; } } - } else if repo - .pending_ops_by_path - .summary() - .item_summary - .staging_count - == 1 - { - self.single_staged_entry = repo.pending_ops_by_path.iter().find_map(|ops| { + } else if repo.pending_ops_summary().item_summary.staging_count == 1 { + self.single_staged_entry = repo.pending_ops().find_map(|ops| { if ops.staging() { repo.status_for_path(&ops.repo_path) .map(|status| GitStatusEntry { diff --git a/crates/project/src/git_store.rs b/crates/project/src/git_store.rs index 4cac71c6ae3e2eb3f3615821443db7c82e01d810..f111ef17a0d17f0288ab739c4d02f51148b431f6 100644 --- a/crates/project/src/git_store.rs +++ b/crates/project/src/git_store.rs @@ -48,7 +48,7 @@ use language::{ proto::{deserialize_version, serialize_version}, }; use parking_lot::Mutex; -use pending_op::{PendingOp, PendingOpId, PendingOps}; +use pending_op::{PendingOp, PendingOpId, PendingOps, PendingOpsSummary}; use postage::stream::Stream as _; use rpc::{ AnyProtoClient, TypedEnvelope, @@ -255,7 +255,6 @@ pub struct MergeDetails { pub struct RepositorySnapshot { pub id: RepositoryId, pub statuses_by_path: SumTree, - pub pending_ops_by_path: SumTree, pub work_directory_abs_path: Arc, pub path_style: PathStyle, pub branch: Option, @@ -285,6 +284,7 @@ pub struct Repository { paths_needing_status_update: BTreeSet, job_sender: mpsc::UnboundedSender, active_jobs: HashMap, + pending_ops: SumTree, job_id: JobId, askpass_delegates: Arc>>, latest_askpass_id: u64, @@ -316,9 +316,7 @@ pub enum RepositoryEvent { MergeHeadsChanged, BranchChanged, StashEntriesChanged, - PendingOpsChanged { - pending_ops: SumTree, - }, + PendingOpsChanged { pending_ops: SumTree }, } #[derive(Clone, Debug)] @@ -3062,7 +3060,6 @@ impl RepositorySnapshot { Self { id, statuses_by_path: Default::default(), - pending_ops_by_path: Default::default(), work_directory_abs_path, branch: None, head_commit: None, @@ -3190,12 +3187,6 @@ impl RepositorySnapshot { .cloned() } - pub fn pending_ops_for_path(&self, path: &RepoPath) -> Option { - self.pending_ops_by_path - .get(&PathKey(path.as_ref().clone()), ()) - .cloned() - } - pub fn abs_path_to_repo_path(&self, abs_path: &Path) -> Option { Self::abs_path_to_repo_path_inner(&self.work_directory_abs_path, abs_path, self.path_style) } @@ -3331,6 +3322,20 @@ impl Repository { self.snapshot.clone() } + pub fn pending_ops(&self) -> impl Iterator + '_ { + self.pending_ops.iter().cloned() + } + + pub fn pending_ops_summary(&self) -> PathSummary { + self.pending_ops.summary().clone() + } + + pub fn pending_ops_for_path(&self, path: &RepoPath) -> Option { + self.pending_ops + .get(&PathKey(path.as_ref().clone()), ()) + .cloned() + } + fn local( id: RepositoryId, work_directory_abs_path: Arc, @@ -3348,6 +3353,7 @@ impl Repository { this: cx.weak_entity(), git_store, snapshot, + pending_ops: Default::default(), commit_message_buffer: None, askpass_delegates: Default::default(), paths_needing_status_update: Default::default(), @@ -3381,6 +3387,7 @@ impl Repository { snapshot, commit_message_buffer: None, git_store, + pending_ops: Default::default(), paths_needing_status_update: Default::default(), job_sender: Self::spawn_remote_git_worker(project_id, client, cx), askpass_delegates: Default::default(), @@ -5027,6 +5034,32 @@ impl Repository { }) } + fn clear_pending_ops(&mut self, cx: &mut Context) { + let updated = SumTree::from_iter( + self.pending_ops.iter().filter_map(|ops| { + let inner_ops: Vec = + ops.ops.iter().filter(|op| op.running()).cloned().collect(); + if inner_ops.is_empty() { + None + } else { + Some(PendingOps { + repo_path: ops.repo_path.clone(), + ops: inner_ops, + }) + } + }), + (), + ); + + if updated != self.pending_ops { + cx.emit(RepositoryEvent::PendingOpsChanged { + pending_ops: self.pending_ops.clone(), + }) + } + + self.pending_ops = updated; + } + fn schedule_scan( &mut self, updates_tx: Option>, @@ -5058,6 +5091,7 @@ impl Repository { .await?; this.update(&mut cx, |this, cx| { this.snapshot = snapshot.clone(); + this.clear_pending_ops(cx); for event in events { cx.emit(event); } @@ -5390,14 +5424,18 @@ impl Repository { this.update(cx, |this, _| { let mut edits = Vec::with_capacity(ids.len()); for (id, entry) in ids { - if let Some(mut ops) = this.snapshot.pending_ops_for_path(&entry) { + if let Some(mut ops) = this + .pending_ops + .get(&PathKey(entry.as_ref().clone()), ()) + .cloned() + { if let Some(op) = ops.op_by_id_mut(id) { op.job_status = job_status; } edits.push(sum_tree::Edit::Insert(ops)); } } - this.snapshot.pending_ops_by_path.edit(edits, ()); + this.pending_ops.edit(edits, ()); })?; result @@ -5413,8 +5451,9 @@ impl Repository { let mut ids = Vec::with_capacity(paths.len()); for path in paths { let mut ops = self - .snapshot - .pending_ops_for_path(&path) + .pending_ops + .get(&PathKey(path.as_ref().clone()), ()) + .cloned() .unwrap_or_else(|| PendingOps::new(&path)); let id = ops.max_id() + 1; ops.ops.push(PendingOp { @@ -5425,7 +5464,7 @@ impl Repository { edits.push(sum_tree::Edit::Insert(ops)); ids.push((id, path)); } - self.snapshot.pending_ops_by_path.edit(edits, ()); + self.pending_ops.edit(edits, ()); ids } } @@ -5695,28 +5734,6 @@ async fn compute_snapshot( MergeDetails::load(&backend, &statuses_by_path, &prev_snapshot).await?; log::debug!("new merge details (changed={merge_heads_changed:?}): {merge_details:?}"); - let pending_ops_by_path = SumTree::from_iter( - prev_snapshot.pending_ops_by_path.iter().filter_map(|ops| { - let inner_ops: Vec = - ops.ops.iter().filter(|op| op.running()).cloned().collect(); - if inner_ops.is_empty() { - None - } else { - Some(PendingOps { - repo_path: ops.repo_path.clone(), - ops: inner_ops, - }) - } - }), - (), - ); - - if pending_ops_by_path != prev_snapshot.pending_ops_by_path { - events.push(RepositoryEvent::PendingOpsChanged { - pending_ops: prev_snapshot.pending_ops_by_path.clone(), - }) - } - if merge_heads_changed { events.push(RepositoryEvent::MergeHeadsChanged); } @@ -5742,7 +5759,6 @@ async fn compute_snapshot( let snapshot = RepositorySnapshot { id, statuses_by_path, - pending_ops_by_path, work_directory_abs_path, path_style: prev_snapshot.path_style, scan_id: prev_snapshot.scan_id + 1, diff --git a/crates/project/src/project_tests.rs b/crates/project/src/project_tests.rs index f3c935f3390305c8c78074439084f20b4d1562b2..d4a3068e856e1eaf0aff19754d81141956ca8fcf 100644 --- a/crates/project/src/project_tests.rs +++ b/crates/project/src/project_tests.rs @@ -8611,7 +8611,7 @@ async fn test_repository_pending_ops_staging( // Ensure we have no pending ops for any of the untracked files repo.read_with(cx, |repo, _cx| { - assert!(repo.pending_ops_by_path.is_empty()); + assert!(repo.pending_ops().next().is_none()); }); let mut id = 1u16; From 79be5cbfe21eabeadb2d910d4ba7c355256bd0e3 Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Wed, 19 Nov 2025 21:02:31 +0530 Subject: [PATCH 0217/1030] editor: Fix prepaint recursion when updating stale sizes (#42896) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The bug is in the `place_near` logic, specifically the `!row_block_types.contains_key(&(row - 1))` check. The problem isn’t really that condition itself, but it’s that it relies on `row_block_types`, which does not take into account that upon block resizes, subsequent block start row moves up/down. Since `place_near` depends on this incorrect map, it ends up causing incorrect resize syncs to the block map, which then triggers more bad recursive calls. The reason it worked till now in most of the cases is that recursive resizes eventually lead to stabilizing it. Before `place_near`, we never touched `row_block_types` during the first prepaint pass because we knew it was based on outdated heights. Once all heights are finalized, using it is fine. The fix is to make sure `row_block_types` is accurate from the very first prepaint pass by keeping an offset whenever a block shrinks or expands. Now ideally it should take only one subsequent prepaint. But due to shrinking, new custom/diagnostics blocks might come into the view from below, which needs further prepaint calls for resolving. Right now, tests pass after 2 subsequent prepaint calls. Just to be safe, we have set it to 5. image Release Notes: - Fix issue where sometimes Zed used to experience freeze while working with inline diagnostics. --- crates/editor/src/element.rs | 135 +++++++++++++++++++++++++++++------ 1 file changed, 112 insertions(+), 23 deletions(-) diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index 785fd9de00888a7f658785e689df34bd2cffdf8d..8801f20323338e28bb7ed62923be65db785af312 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -74,6 +74,7 @@ use smallvec::{SmallVec, smallvec}; use std::{ any::TypeId, borrow::Cow, + cell::Cell, cmp::{self, Ordering}, fmt::{self, Write}, iter, mem, @@ -185,6 +186,13 @@ impl SelectionLayout { } } +#[derive(Default)] +struct RenderBlocksOutput { + blocks: Vec, + row_block_types: HashMap, + resized_blocks: Option>, +} + pub struct EditorElement { editor: Entity, style: EditorStyle, @@ -3667,6 +3675,7 @@ impl EditorElement { latest_selection_anchors: &HashMap, is_row_soft_wrapped: impl Copy + Fn(usize) -> bool, sticky_header_excerpt_id: Option, + block_resize_offset: &mut i32, window: &mut Window, cx: &mut App, ) -> Option<(AnyElement, Size, DisplayRow, Pixels)> { @@ -3820,7 +3829,10 @@ impl EditorElement { }; let mut element_height_in_lines = ((final_size.height / line_height).ceil() as u32).max(1); - let mut row = block_row_start; + let effective_row_start = block_row_start.0 as i32 + *block_resize_offset; + debug_assert!(effective_row_start >= 0); + let mut row = DisplayRow(effective_row_start.max(0) as u32); + let mut x_offset = px(0.); let mut is_block = true; @@ -3850,6 +3862,7 @@ impl EditorElement { } }; if element_height_in_lines != block.height() { + *block_resize_offset += element_height_in_lines as i32 - block.height() as i32; resized_blocks.insert(custom_block_id, element_height_in_lines); } } @@ -4254,7 +4267,7 @@ impl EditorElement { sticky_header_excerpt_id: Option, window: &mut Window, cx: &mut App, - ) -> Result<(Vec, HashMap), HashMap> { + ) -> RenderBlocksOutput { let (fixed_blocks, non_fixed_blocks) = snapshot .blocks_in_range(rows.clone()) .partition::, _>(|(_, block)| block.style() == BlockStyle::Fixed); @@ -4266,6 +4279,7 @@ impl EditorElement { let mut blocks = Vec::new(); let mut resized_blocks = HashMap::default(); let mut row_block_types = HashMap::default(); + let mut block_resize_offset: i32 = 0; for (row, block) in fixed_blocks { let block_id = block.id(); @@ -4296,6 +4310,7 @@ impl EditorElement { latest_selection_anchors, is_row_soft_wrapped, sticky_header_excerpt_id, + &mut block_resize_offset, window, cx, ) { @@ -4354,6 +4369,7 @@ impl EditorElement { latest_selection_anchors, is_row_soft_wrapped, sticky_header_excerpt_id, + &mut block_resize_offset, window, cx, ) { @@ -4410,6 +4426,7 @@ impl EditorElement { latest_selection_anchors, is_row_soft_wrapped, sticky_header_excerpt_id, + &mut block_resize_offset, window, cx, ) { @@ -4429,9 +4446,12 @@ impl EditorElement { if resized_blocks.is_empty() { *scroll_width = (*scroll_width).max(fixed_block_max_width - editor_margins.gutter.width); - Ok((blocks, row_block_types)) - } else { - Err(resized_blocks) + } + + RenderBlocksOutput { + blocks, + row_block_types, + resized_blocks: (!resized_blocks.is_empty()).then_some(resized_blocks), } } @@ -8772,8 +8792,48 @@ impl EditorElement { } } +#[derive(Default)] +pub struct EditorRequestLayoutState { + // We use prepaint depth to limit the number of times prepaint is + // called recursively. We need this so that we can update stale + // data for e.g. block heights in block map. + prepaint_depth: Rc>, +} + +impl EditorRequestLayoutState { + // In ideal conditions we only need one more subsequent prepaint call for resize to take effect. + // i.e. MAX_PREPAINT_DEPTH = 2, but since moving blocks inline (place_near), more lines from + // below get exposed, and we end up querying blocks for those lines too in subsequent renders. + // Setting MAX_PREPAINT_DEPTH = 3, passes all tests. Just to be on the safe side we set it to 5, so + // that subsequent shrinking does not lead to incorrect block placing. + const MAX_PREPAINT_DEPTH: usize = 5; + + fn increment_prepaint_depth(&self) -> EditorPrepaintGuard { + let depth = self.prepaint_depth.get(); + self.prepaint_depth.set(depth + 1); + EditorPrepaintGuard { + prepaint_depth: self.prepaint_depth.clone(), + } + } + + fn can_prepaint(&self) -> bool { + self.prepaint_depth.get() < Self::MAX_PREPAINT_DEPTH + } +} + +struct EditorPrepaintGuard { + prepaint_depth: Rc>, +} + +impl Drop for EditorPrepaintGuard { + fn drop(&mut self) { + let depth = self.prepaint_depth.get(); + self.prepaint_depth.set(depth.saturating_sub(1)); + } +} + impl Element for EditorElement { - type RequestLayoutState = (); + type RequestLayoutState = EditorRequestLayoutState; type PrepaintState = EditorLayout; fn id(&self) -> Option { @@ -8790,7 +8850,7 @@ impl Element for EditorElement { _inspector_id: Option<&gpui::InspectorElementId>, window: &mut Window, cx: &mut App, - ) -> (gpui::LayoutId, ()) { + ) -> (gpui::LayoutId, Self::RequestLayoutState) { let rem_size = self.rem_size(cx); window.with_rem_size(rem_size, |window| { self.editor.update(cx, |editor, cx| { @@ -8857,7 +8917,7 @@ impl Element for EditorElement { } }; - (layout_id, ()) + (layout_id, EditorRequestLayoutState::default()) }) }) } @@ -8867,10 +8927,11 @@ impl Element for EditorElement { _: Option<&GlobalElementId>, _inspector_id: Option<&gpui::InspectorElementId>, bounds: Bounds, - _: &mut Self::RequestLayoutState, + request_layout: &mut Self::RequestLayoutState, window: &mut Window, cx: &mut App, ) -> Self::PrepaintState { + let _prepaint_depth_guard = request_layout.increment_prepaint_depth(); let text_style = TextStyleRefinement { font_size: Some(self.style.text.font_size), line_height: Some(self.style.text.line_height), @@ -9394,7 +9455,20 @@ impl Element for EditorElement { // If the fold widths have changed, we need to prepaint // the element again to account for any changes in // wrapping. - return self.prepaint(None, _inspector_id, bounds, &mut (), window, cx); + if request_layout.can_prepaint() { + return self.prepaint( + None, + _inspector_id, + bounds, + request_layout, + window, + cx, + ); + } else { + debug_panic!( + "skipping recursive prepaint at max depth. renderer widths may be stale." + ); + } } let longest_line_blame_width = self @@ -9481,20 +9555,35 @@ impl Element for EditorElement { ) }) }) - .unwrap_or_else(|| Ok((Vec::default(), HashMap::default()))); - let (mut blocks, row_block_types) = match blocks { - Ok(blocks) => blocks, - Err(resized_blocks) => { - self.editor.update(cx, |editor, cx| { - editor.resize_blocks( - resized_blocks, - autoscroll_request.map(|(autoscroll, _)| autoscroll), - cx, - ) - }); - return self.prepaint(None, _inspector_id, bounds, &mut (), window, cx); + .unwrap_or_default(); + let RenderBlocksOutput { + mut blocks, + row_block_types, + resized_blocks, + } = blocks; + if let Some(resized_blocks) = resized_blocks { + self.editor.update(cx, |editor, cx| { + editor.resize_blocks( + resized_blocks, + autoscroll_request.map(|(autoscroll, _)| autoscroll), + cx, + ) + }); + if request_layout.can_prepaint() { + return self.prepaint( + None, + _inspector_id, + bounds, + request_layout, + window, + cx, + ); + } else { + debug_panic!( + "skipping recursive prepaint at max depth. block layout may be stale." + ); } - }; + } let sticky_buffer_header = sticky_header_excerpt.map(|sticky_header_excerpt| { window.with_element_namespace("blocks", |window| { From 52716baceffe7a0ba641819ee18b25df1abadc88 Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Wed, 19 Nov 2025 11:04:35 -0500 Subject: [PATCH 0218/1030] Bump Zed to v0.215 (#43075) Release Notes: - N/A --- Cargo.lock | 2 +- crates/zed/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c2062b5faefef4d5a5ec52c0d397a2b01a525d54..f167ce3ff021521a2f829447e8f3fba5207b4259 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21233,7 +21233,7 @@ dependencies = [ [[package]] name = "zed" -version = "0.214.0" +version = "0.215.0" dependencies = [ "acp_tools", "activity_indicator", diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 79892fefdd7776e2fd7f99cbfa6caf24bb174a4b..78d650793ebb98cdc9a52e50adc9fa57c7c24b4f 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -2,7 +2,7 @@ description = "The fast, collaborative code editor." edition.workspace = true name = "zed" -version = "0.214.0" +version = "0.215.0" publish.workspace = true license = "GPL-3.0-or-later" authors = ["Zed Team "] From f05eef58c46be2d6617847494b444ac442780367 Mon Sep 17 00:00:00 2001 From: Lena <241371603+zelenenka@users.noreply.github.com> Date: Wed, 19 Nov 2025 17:09:28 +0100 Subject: [PATCH 0219/1030] Stop the buggy stalebot for now (#43076) Delay the stalebot runs until the end of the year since it's currently broken and leaves unhelpful comments on all the issues, including feature requests. Bad bot. Allegedly this bug will soon be gone https://github.com/actions/stale/issues/1302 but it's too much work protecting issues from the bot until then. Release Notes: - N/A --- .github/workflows/community_close_stale_issues.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/community_close_stale_issues.yml b/.github/workflows/community_close_stale_issues.yml index f1359b72a6afe6008472ec3d59d90091b6779a59..14c1a0a08338ee513a8269094b41ee404beef726 100644 --- a/.github/workflows/community_close_stale_issues.yml +++ b/.github/workflows/community_close_stale_issues.yml @@ -1,7 +1,7 @@ name: "Close Stale Issues" on: schedule: - - cron: "0 7,9,11 * * 3" + - cron: "0 8 31 DEC *" workflow_dispatch: jobs: From 17c30565fca7cba0982601d3d427f1f2da24fb79 Mon Sep 17 00:00:00 2001 From: localcc Date: Wed, 19 Nov 2025 17:41:39 +0100 Subject: [PATCH 0220/1030] Fix extension auto-install on first setup (#43078) Release Notes: - N/A --- crates/extension_host/src/extension_host.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/extension_host/src/extension_host.rs b/crates/extension_host/src/extension_host.rs index 2c99acbc7f4028598ab110c54d7f0cb4714d2c67..bf28ee1acd29337f21cf290e886a353b943d3ac6 100644 --- a/crates/extension_host/src/extension_host.rs +++ b/crates/extension_host/src/extension_host.rs @@ -1128,6 +1128,7 @@ impl ExtensionStore { } if extensions_to_load.is_empty() && extensions_to_unload.is_empty() { + self.reload_complete_senders.clear(); return Task::ready(()); } From 404ee538120ffe6b1c37576ab446983012e4c961 Mon Sep 17 00:00:00 2001 From: John Tur Date: Wed, 19 Nov 2025 12:25:03 -0500 Subject: [PATCH 0221/1030] Fix Windows bundling (#43083) The updated package from https://github.com/zed-industries/zed/pull/43066 changed the paths of these files in the nupkg. Release Notes: - N/A --- script/bundle-windows.ps1 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/script/bundle-windows.ps1 b/script/bundle-windows.ps1 index 67dfb5082233395c00f3575d8571d8d5f8520740..a9f5eafcc670ad6d3f36eeee92c7e05fe80fb8af 100644 --- a/script/bundle-windows.ps1 +++ b/script/bundle-windows.ps1 @@ -204,7 +204,7 @@ function CollectFiles { if($Architecture -eq "aarch64") { New-Item -Type Directory -Path "$innoDir\arm64" -Force Move-Item -Path ".\conpty\build\native\runtimes\arm64\OpenConsole.exe" -Destination "$innoDir\arm64\OpenConsole.exe" -Force - Move-Item -Path ".\conpty\runtimes\win10-arm64\native\conpty.dll" -Destination "$innoDir\conpty.dll" -Force + Move-Item -Path ".\conpty\runtimes\win-arm64\native\conpty.dll" -Destination "$innoDir\conpty.dll" -Force } else { New-Item -Type Directory -Path "$innoDir\x64" -Force @@ -212,7 +212,7 @@ function CollectFiles { Move-Item -Path ".\AGS_SDK-6.3.0\ags_lib\lib\amd_ags_x64.dll" -Destination "$innoDir\amd_ags_x64.dll" -Force Move-Item -Path ".\conpty\build\native\runtimes\x64\OpenConsole.exe" -Destination "$innoDir\x64\OpenConsole.exe" -Force Move-Item -Path ".\conpty\build\native\runtimes\arm64\OpenConsole.exe" -Destination "$innoDir\arm64\OpenConsole.exe" -Force - Move-Item -Path ".\conpty\runtimes\win10-x64\native\conpty.dll" -Destination "$innoDir\conpty.dll" -Force + Move-Item -Path ".\conpty\runtimes\win-x64\native\conpty.dll" -Destination "$innoDir\conpty.dll" -Force } } From 97b429953eb616d98032e7a74e2c98451210b183 Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Wed, 19 Nov 2025 19:37:22 +0200 Subject: [PATCH 0222/1030] gpui: Do not render ligatures between different styled text runs (#43080) An attempt to re-land https://github.com/zed-industries/zed/pull/41043 Part of https://github.com/zed-industries/zed/issues/5259 (as `>>>` forms a ligature that we need to break into differently colored tokens) Before: image and https://github.com/user-attachments/assets/ae77ba64-ca50-4b5d-9ee4-a7d46fcaeb34 After: image When certain combination of characters forms a ligature, it takes the color of the first character. Even though the runs are split already by color and other properties, the underlying font system merges the runs together. Attempts to modify color and other, unrelated to font size, parameters, did not help on macOS, hence a somewhat odd approach was taken: runs get interleaved font sizes: normal and "normal + a tiny bit more". This is the only option that helped splitting the ligatures, and seems to render fine. Release Notes: - Fixed ligatures forming between different text kinds --------- Co-authored-by: Lukas Wirth --- crates/gpui/src/platform/mac/text_system.rs | 10 +++++++++- crates/gpui/src/platform/windows/direct_write.rs | 11 ++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/crates/gpui/src/platform/mac/text_system.rs b/crates/gpui/src/platform/mac/text_system.rs index 92135a2c96e5cb4c3587f7f01225be5b1fcd8b43..3faf4e6491e6588bdb1341d5a8845171562fa8a0 100644 --- a/crates/gpui/src/platform/mac/text_system.rs +++ b/crates/gpui/src/platform/mac/text_system.rs @@ -435,6 +435,7 @@ impl MacTextSystemState { { let mut text = text; + let mut break_ligature = true; for run in font_runs { let text_run; (text_run, text) = text.split_at(run.len); @@ -444,7 +445,8 @@ impl MacTextSystemState { string.replace_str(&CFString::new(text_run), CFRange::init(utf16_start, 0)); let utf16_end = string.char_len(); - let cf_range = CFRange::init(utf16_start, utf16_end - utf16_start); + let length = utf16_end - utf16_start; + let cf_range = CFRange::init(utf16_start, length); let font = &self.fonts[run.font_id.0]; let font_metrics = font.metrics(); @@ -452,6 +454,11 @@ impl MacTextSystemState { max_ascent = max_ascent.max(font_metrics.ascent * font_scale); max_descent = max_descent.max(-font_metrics.descent * font_scale); + let font_size = if break_ligature { + px(font_size.0.next_up()) + } else { + font_size + }; unsafe { string.set_attribute( cf_range, @@ -459,6 +466,7 @@ impl MacTextSystemState { &font.native_font().clone_with_font_size(font_size.into()), ); } + break_ligature = !break_ligature; } } // Retrieve the glyphs from the shaped line, converting UTF16 offsets to UTF8 offsets. diff --git a/crates/gpui/src/platform/windows/direct_write.rs b/crates/gpui/src/platform/windows/direct_write.rs index cb22948898fd496d6820e29088a9be7c5c502341..84539633c9e9c2ba2204d8ccaa94bd4156f8ea89 100644 --- a/crates/gpui/src/platform/windows/direct_write.rs +++ b/crates/gpui/src/platform/windows/direct_write.rs @@ -608,6 +608,7 @@ impl DirectWriteState { let mut first_run = true; let mut ascent = Pixels::default(); let mut descent = Pixels::default(); + let mut break_ligatures = false; for run in font_runs { if first_run { first_run = false; @@ -616,6 +617,7 @@ impl DirectWriteState { text_layout.GetLineMetrics(Some(&mut metrics), &mut line_count as _)?; ascent = px(metrics[0].baseline); descent = px(metrics[0].height - metrics[0].baseline); + break_ligatures = !break_ligatures; continue; } let font_info = &self.fonts[run.font_id.0]; @@ -636,10 +638,17 @@ impl DirectWriteState { text_layout.SetFontCollection(collection, text_range)?; text_layout .SetFontFamilyName(&HSTRING::from(&font_info.font_family), text_range)?; - text_layout.SetFontSize(font_size.0, text_range)?; + let font_size = if break_ligatures { + font_size.0.next_up() + } else { + font_size.0 + }; + text_layout.SetFontSize(font_size, text_range)?; text_layout.SetFontStyle(font_info.font_face.GetStyle(), text_range)?; text_layout.SetFontWeight(font_info.font_face.GetWeight(), text_range)?; text_layout.SetTypography(&font_info.features, text_range)?; + + break_ligatures = !break_ligatures; } let mut runs = Vec::new(); From 2a2f5a9c7a51ee471a2231d333ccd8f3218ce9b6 Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Wed, 19 Nov 2025 18:47:34 +0100 Subject: [PATCH 0223/1030] Add callable workflow for extension repositories (#43082) This starts the work on a workflow that can be invoked in extension CI to test changes on extension repositories. Release Notes: - N/A --------- Co-authored-by: Agus Zubiaga Co-authored-by: Conrad Irwin --- .github/workflows/after_release.yml | 6 +- .github/workflows/danger.yml | 2 +- .github/workflows/extension_tests.yml | 138 ++++++++++++++++++ .github/workflows/release.yml | 10 +- .github/workflows/release_nightly.yml | 10 +- .github/workflows/run_tests.yml | 6 +- tooling/xtask/src/tasks/workflows.rs | 2 + .../src/tasks/workflows/after_release.rs | 14 +- tooling/xtask/src/tasks/workflows/danger.rs | 6 +- .../src/tasks/workflows/extension_tests.rs | 129 ++++++++++++++++ .../xtask/src/tasks/workflows/nix_build.rs | 6 +- .../src/tasks/workflows/release_nightly.rs | 6 +- .../xtask/src/tasks/workflows/run_tests.rs | 17 +-- tooling/xtask/src/tasks/workflows/steps.rs | 48 ++++-- tooling/xtask/src/tasks/workflows/vars.rs | 14 +- 15 files changed, 345 insertions(+), 69 deletions(-) create mode 100644 .github/workflows/extension_tests.yml create mode 100644 tooling/xtask/src/tasks/workflows/extension_tests.rs diff --git a/.github/workflows/after_release.yml b/.github/workflows/after_release.yml index f9412672b3dd0fc5e029b3e0dc0c7a93d8582aa2..2e75659de0bdf51f4586ef57770fd54dc4eeb074 100644 --- a/.github/workflows/after_release.yml +++ b/.github/workflows/after_release.yml @@ -7,7 +7,7 @@ on: - published jobs: rebuild_releases_page: - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: namespace-profile-2x4-ubuntu-2404 steps: - name: after_release::rebuild_releases_page::refresh_cloud_releases @@ -21,7 +21,7 @@ jobs: post_to_discord: needs: - rebuild_releases_page - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: namespace-profile-2x4-ubuntu-2404 steps: - id: get-release-url @@ -71,7 +71,7 @@ jobs: max-versions-to-keep: 5 token: ${{ secrets.WINGET_TOKEN }} create_sentry_release: - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: namespace-profile-2x4-ubuntu-2404 steps: - name: steps::checkout_repo diff --git a/.github/workflows/danger.yml b/.github/workflows/danger.yml index 054767e5f1fd86c2a5b8fa2112802e797ec10f6e..9d6054eb3e7546088d29dd9c6316a3494ea6fb17 100644 --- a/.github/workflows/danger.yml +++ b/.github/workflows/danger.yml @@ -12,7 +12,7 @@ on: - main jobs: danger: - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: namespace-profile-2x4-ubuntu-2404 steps: - name: steps::checkout_repo diff --git a/.github/workflows/extension_tests.yml b/.github/workflows/extension_tests.yml new file mode 100644 index 0000000000000000000000000000000000000000..89289fbea20999ada413ef1801bb428f03c82c6b --- /dev/null +++ b/.github/workflows/extension_tests.yml @@ -0,0 +1,138 @@ +# Generated from xtask::workflows::extension_tests +# Rebuild with `cargo xtask workflows`. +name: extension_tests +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: '1' + CARGO_INCREMENTAL: '0' + ZED_EXTENSION_CLI_SHA: 7cfce605704d41ca247e3f84804bf323f6c6caaf +on: + workflow_call: + inputs: + run_tests: + description: Whether the workflow should run rust tests + required: true + type: boolean +jobs: + orchestrate: + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') + runs-on: namespace-profile-2x4-ubuntu-2404 + steps: + - name: steps::checkout_repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + clean: false + fetch-depth: ${{ github.ref == 'refs/heads/main' && 2 || 350 }} + - id: filter + name: filter + run: | + if [ -z "$GITHUB_BASE_REF" ]; then + echo "Not in a PR context (i.e., push to main/stable/preview)" + COMPARE_REV="$(git rev-parse HEAD~1)" + else + echo "In a PR context comparing to pull_request.base.ref" + git fetch origin "$GITHUB_BASE_REF" --depth=350 + COMPARE_REV="$(git merge-base "origin/${GITHUB_BASE_REF}" HEAD)" + fi + CHANGED_FILES="$(git diff --name-only "$COMPARE_REV" ${{ github.sha }})" + + check_pattern() { + local output_name="$1" + local pattern="$2" + local grep_arg="$3" + + echo "$CHANGED_FILES" | grep "$grep_arg" "$pattern" && \ + echo "${output_name}=true" >> "$GITHUB_OUTPUT" || \ + echo "${output_name}=false" >> "$GITHUB_OUTPUT" + } + + check_pattern "check_rust" '^(Cargo.lock|Cargo.toml|.*\.rs)$' -qP + check_pattern "check_extension" '^.*\.scm$' -qP + shell: bash -euxo pipefail {0} + outputs: + check_rust: ${{ steps.filter.outputs.check_rust }} + check_extension: ${{ steps.filter.outputs.check_extension }} + check_rust: + needs: + - orchestrate + if: needs.orchestrate.outputs.check_rust == 'true' + runs-on: namespace-profile-16x32-ubuntu-2204 + steps: + - name: steps::checkout_repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + clean: false + - name: steps::cache_rust_dependencies_namespace + uses: namespacelabs/nscloud-cache-action@v1 + with: + cache: rust + - name: steps::cargo_fmt + run: cargo fmt --all -- --check + shell: bash -euxo pipefail {0} + - name: extension_tests::run_clippy + run: cargo clippy --release --all-targets --all-features -- --deny warnings + shell: bash -euxo pipefail {0} + - name: steps::cargo_install_nextest + if: inputs.run_tests + uses: taiki-e/install-action@nextest + - name: steps::cargo_nextest + if: inputs.run_tests + run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + shell: bash -euxo pipefail {0} + timeout-minutes: 3 + check_extension: + needs: + - orchestrate + if: needs.orchestrate.outputs.check_extension == 'true' + runs-on: namespace-profile-2x4-ubuntu-2404 + steps: + - name: steps::checkout_repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + clean: false + - id: cache-zed-extension-cli + name: extension_tests::cache_zed_extension_cli + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 + with: + path: zed-extension + key: zed-extension-${{ env.ZED_EXTENSION_CLI_SHA }} + - name: extension_tests::download_zed_extension_cli + if: steps.cache-zed-extension-cli.outputs.cache-hit != 'true' + run: | + wget --quiet "https://zed-extension-cli.nyc3.digitaloceanspaces.com/$ZED_EXTENSION_CLI_SHA/x86_64-unknown-linux-gnu/zed-extension" + chmod +x zed-extension + shell: bash -euxo pipefail {0} + - name: extension_tests::check + run: | + mkdir -p /tmp/ext-scratch + mkdir -p /tmp/ext-output + ./zed-extension --source-dir . --scratch-dir /tmp/ext-scratch --output-dir /tmp/ext-output + shell: bash -euxo pipefail {0} + timeout-minutes: 1 + tests_pass: + needs: + - orchestrate + - check_rust + - check_extension + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') && always() + runs-on: namespace-profile-2x4-ubuntu-2404 + steps: + - name: run_tests::tests_pass + run: | + set +x + EXIT_CODE=0 + + check_result() { + echo "* $1: $2" + if [[ "$2" != "skipped" && "$2" != "success" ]]; then EXIT_CODE=1; fi + } + + check_result "orchestrate" "${{ needs.orchestrate.result }}" + check_result "check_rust" "${{ needs.check_rust.result }}" + check_result "check_extension" "${{ needs.check_extension.result }}" + + exit $EXIT_CODE + shell: bash -euxo pipefail {0} +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }} + cancel-in-progress: true diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d591ebc509fb9347daf41614c7b53f09a5ec0312..90d105880f94ee428f01746ed627f5c6f7d4e246 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,7 +10,7 @@ on: - v* jobs: run_tests_mac: - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: self-mini-macos steps: - name: steps::checkout_repo @@ -42,7 +42,7 @@ jobs: shell: bash -euxo pipefail {0} timeout-minutes: 60 run_tests_linux: - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: namespace-profile-16x32-ubuntu-2204 steps: - name: steps::checkout_repo @@ -89,7 +89,7 @@ jobs: shell: bash -euxo pipefail {0} timeout-minutes: 60 run_tests_windows: - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: self-32vcpu-windows-2022 steps: - name: steps::checkout_repo @@ -121,7 +121,7 @@ jobs: shell: pwsh timeout-minutes: 60 check_scripts: - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: namespace-profile-2x4-ubuntu-2404 steps: - name: steps::checkout_repo @@ -150,7 +150,7 @@ jobs: shell: bash -euxo pipefail {0} timeout-minutes: 60 create_draft_release: - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: namespace-profile-2x4-ubuntu-2404 steps: - name: steps::checkout_repo diff --git a/.github/workflows/release_nightly.yml b/.github/workflows/release_nightly.yml index 0604aad15531fc7f013b4790ba1e0efa9c21eb52..bb327f2c5527d353c9aad01c3e26edcf5baac78c 100644 --- a/.github/workflows/release_nightly.yml +++ b/.github/workflows/release_nightly.yml @@ -12,7 +12,7 @@ on: - cron: 0 7 * * * jobs: check_style: - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: self-mini-macos steps: - name: steps::checkout_repo @@ -28,7 +28,7 @@ jobs: shell: bash -euxo pipefail {0} timeout-minutes: 60 run_tests_windows: - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: self-32vcpu-windows-2022 steps: - name: steps::checkout_repo @@ -361,7 +361,7 @@ jobs: needs: - check_style - run_tests_windows - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: namespace-profile-32x64-ubuntu-2004 env: ZED_CLIENT_CHECKSUM_SEED: ${{ secrets.ZED_CLIENT_CHECKSUM_SEED }} @@ -392,7 +392,7 @@ jobs: needs: - check_style - run_tests_windows - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: self-mini-macos env: ZED_CLIENT_CHECKSUM_SEED: ${{ secrets.ZED_CLIENT_CHECKSUM_SEED }} @@ -434,7 +434,7 @@ jobs: - bundle_mac_x86_64 - bundle_windows_aarch64 - bundle_windows_x86_64 - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: namespace-profile-4x8-ubuntu-2204 steps: - name: steps::checkout_repo diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 2322f5d7659366ec312bc76b7501afdab86ff5dc..883be0c1905507639664e6d1f35b4c7e48d0928e 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -15,7 +15,7 @@ on: - v[0-9]+.[0-9]+.x jobs: orchestrate: - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: namespace-profile-2x4-ubuntu-2404 steps: - name: steps::checkout_repo @@ -59,7 +59,7 @@ jobs: run_nix: ${{ steps.filter.outputs.run_nix }} run_tests: ${{ steps.filter.outputs.run_tests }} check_style: - if: github.repository_owner == 'zed-industries' + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') runs-on: namespace-profile-4x8-ubuntu-2204 steps: - name: steps::checkout_repo @@ -538,7 +538,7 @@ jobs: - check_scripts - build_nix_linux_x86_64 - build_nix_mac_aarch64 - if: github.repository_owner == 'zed-industries' && always() + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') && always() runs-on: namespace-profile-2x4-ubuntu-2404 steps: - name: run_tests::tests_pass diff --git a/tooling/xtask/src/tasks/workflows.rs b/tooling/xtask/src/tasks/workflows.rs index 374a22f3ea9c65dcfc9743f77448a5c29117cedf..31ca9590f1d6c8c935da4056930db470913656c9 100644 --- a/tooling/xtask/src/tasks/workflows.rs +++ b/tooling/xtask/src/tasks/workflows.rs @@ -7,6 +7,7 @@ mod after_release; mod cherry_pick; mod compare_perf; mod danger; +mod extension_tests; mod nix_build; mod release_nightly; mod run_bundling; @@ -39,6 +40,7 @@ pub fn run_workflows(_: GenerateWorkflowArgs) -> Result<()> { ), ("run_agent_evals.yml", run_agent_evals::run_agent_evals()), ("after_release.yml", after_release::after_release()), + ("extension_tests.yml", extension_tests::extension_tests()), ]; fs::create_dir_all(dir) .with_context(|| format!("Failed to create directory: {}", dir.display()))?; diff --git a/tooling/xtask/src/tasks/workflows/after_release.rs b/tooling/xtask/src/tasks/workflows/after_release.rs index d626faf52098a41c531eab0f13f4d727ea7a7cf9..c99173bfe7183b5a3440804a18e0133270744654 100644 --- a/tooling/xtask/src/tasks/workflows/after_release.rs +++ b/tooling/xtask/src/tasks/workflows/after_release.rs @@ -3,7 +3,7 @@ use gh_workflow::*; use crate::tasks::workflows::{ release::{self, notify_on_failure}, runners, - steps::{NamedJob, checkout_repo, dependant_job, named}, + steps::{CommonJobConditions, NamedJob, checkout_repo, dependant_job, named}, vars::{self, StepOutput}, }; @@ -43,9 +43,7 @@ fn rebuild_releases_page() -> NamedJob { named::job( Job::default() .runs_on(runners::LINUX_SMALL) - .cond(Expression::new( - "github.repository_owner == 'zed-industries'", - )) + .with_repository_owner_guard() .add_step(refresh_cloud_releases()) .add_step(redeploy_zed_dev()), ) @@ -95,9 +93,7 @@ fn post_to_discord(deps: &[&NamedJob]) -> NamedJob { } let job = dependant_job(deps) .runs_on(runners::LINUX_SMALL) - .cond(Expression::new( - "github.repository_owner == 'zed-industries'", - )) + .with_repository_owner_guard() .add_step(get_release_url()) .add_step(get_content()) .add_step(discord_webhook_action()); @@ -145,9 +141,7 @@ fn publish_winget() -> NamedJob { fn create_sentry_release() -> NamedJob { let job = Job::default() .runs_on(runners::LINUX_SMALL) - .cond(Expression::new( - "github.repository_owner == 'zed-industries'", - )) + .with_repository_owner_guard() .add_step(checkout_repo()) .add_step(release::create_sentry_release()); named::job(job) diff --git a/tooling/xtask/src/tasks/workflows/danger.rs b/tooling/xtask/src/tasks/workflows/danger.rs index eed2cba732292e5851468766084e846f366b3edc..8b3bf0ac3a855f096da096dcf8f8204bd7aaac42 100644 --- a/tooling/xtask/src/tasks/workflows/danger.rs +++ b/tooling/xtask/src/tasks/workflows/danger.rs @@ -1,6 +1,6 @@ use gh_workflow::*; -use crate::tasks::workflows::steps::{NamedJob, named}; +use crate::tasks::workflows::steps::{CommonJobConditions, NamedJob, named}; use super::{runners, steps}; @@ -42,9 +42,7 @@ fn danger_job() -> NamedJob { NamedJob { name: "danger".to_string(), job: Job::default() - .cond(Expression::new( - "github.repository_owner == 'zed-industries'", - )) + .with_repository_owner_guard() .runs_on(runners::LINUX_SMALL) .add_step(steps::checkout_repo()) .add_step(steps::setup_pnpm()) diff --git a/tooling/xtask/src/tasks/workflows/extension_tests.rs b/tooling/xtask/src/tasks/workflows/extension_tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..4ee094fd37608c2427037effac3a6afa182014ba --- /dev/null +++ b/tooling/xtask/src/tasks/workflows/extension_tests.rs @@ -0,0 +1,129 @@ +use gh_workflow::*; +use indoc::indoc; + +use crate::tasks::workflows::{ + run_tests::{orchestrate, tests_pass}, + runners, + steps::{self, CommonJobConditions, FluentBuilder, NamedJob, named}, + vars::{PathCondition, StepOutput, one_workflow_per_non_main_branch}, +}; + +const RUN_TESTS_INPUT: &str = "run_tests"; +const ZED_EXTENSION_CLI_SHA: &str = "7cfce605704d41ca247e3f84804bf323f6c6caaf"; + +// This is used by various extensions repos in the zed-extensions org to run automated tests. +pub(crate) fn extension_tests() -> Workflow { + let should_check_rust = PathCondition::new("check_rust", r"^(Cargo.lock|Cargo.toml|.*\.rs)$"); + let should_check_extension = PathCondition::new("check_extension", r"^.*\.scm$"); + + let orchestrate = orchestrate(&[&should_check_rust, &should_check_extension]); + + let jobs = [ + orchestrate, + should_check_rust.guard(check_rust()), + should_check_extension.guard(check_extension()), + ]; + + let tests_pass = tests_pass(&jobs); + + named::workflow() + .add_event( + Event::default().workflow_call(WorkflowCall::default().add_input( + RUN_TESTS_INPUT, + WorkflowCallInput { + description: "Whether the workflow should run rust tests".into(), + required: true, + input_type: "boolean".into(), + default: None, + }, + )), + ) + .concurrency(one_workflow_per_non_main_branch()) + .add_env(("CARGO_TERM_COLOR", "always")) + .add_env(("RUST_BACKTRACE", 1)) + .add_env(("CARGO_INCREMENTAL", 0)) + .add_env(("ZED_EXTENSION_CLI_SHA", ZED_EXTENSION_CLI_SHA)) + .map(|workflow| { + jobs.into_iter() + .chain([tests_pass]) + .fold(workflow, |workflow, job| { + workflow.add_job(job.name, job.job) + }) + }) +} + +fn run_clippy() -> Step { + named::bash("cargo clippy --release --all-targets --all-features -- --deny warnings") +} + +fn check_rust() -> NamedJob { + let job = Job::default() + .with_repository_owner_guard() + .runs_on(runners::LINUX_DEFAULT) + .timeout_minutes(3u32) + .add_step(steps::checkout_repo()) + .add_step(steps::cache_rust_dependencies_namespace()) + .add_step(steps::cargo_fmt()) + .add_step(run_clippy()) + .add_step( + steps::cargo_install_nextest() + .if_condition(Expression::new(format!("inputs.{RUN_TESTS_INPUT}"))), + ) + .add_step( + steps::cargo_nextest(runners::Platform::Linux) + .if_condition(Expression::new(format!("inputs.{RUN_TESTS_INPUT}"))), + ); + + named::job(job) +} + +fn check_extension() -> NamedJob { + let (cache_download, cache_hit) = cache_zed_extension_cli(); + let job = Job::default() + .with_repository_owner_guard() + .runs_on(runners::LINUX_SMALL) + .timeout_minutes(1u32) + .add_step(steps::checkout_repo()) + .add_step(cache_download) + .add_step(download_zed_extension_cli(cache_hit)) + .add_step(check()); + + named::job(job) +} + +pub fn cache_zed_extension_cli() -> (Step, StepOutput) { + let step = named::uses( + "actions", + "cache", + "0057852bfaa89a56745cba8c7296529d2fc39830", + ) + .id("cache-zed-extension-cli") + .with( + Input::default() + .add("path", "zed-extension") + .add("key", "zed-extension-${{ env.ZED_EXTENSION_CLI_SHA }}"), + ); + let output = StepOutput::new(&step, "cache-hit"); + (step, output) +} + +pub fn download_zed_extension_cli(cache_hit: StepOutput) -> Step { + named::bash( + indoc! { + r#" + wget --quiet "https://zed-extension-cli.nyc3.digitaloceanspaces.com/$ZED_EXTENSION_CLI_SHA/x86_64-unknown-linux-gnu/zed-extension" + chmod +x zed-extension + "#, + } + ).if_condition(Expression::new(format!("{} != 'true'", cache_hit.expr()))) +} + +pub fn check() -> Step { + named::bash(indoc! { + r#" + mkdir -p /tmp/ext-scratch + mkdir -p /tmp/ext-output + ./zed-extension --source-dir . --scratch-dir /tmp/ext-scratch --output-dir /tmp/ext-output + "# + }) +} diff --git a/tooling/xtask/src/tasks/workflows/nix_build.rs b/tooling/xtask/src/tasks/workflows/nix_build.rs index 3c6818106335aac712bbf4c282107e735cd5c631..ff98852d191ae8b0edf29c6cdf52985b16034221 100644 --- a/tooling/xtask/src/tasks/workflows/nix_build.rs +++ b/tooling/xtask/src/tasks/workflows/nix_build.rs @@ -1,6 +1,6 @@ use crate::tasks::workflows::{ runners::{Arch, Platform}, - steps::NamedJob, + steps::{CommonJobConditions, NamedJob}, }; use super::{runners, steps, steps::named, vars}; @@ -71,9 +71,7 @@ pub(crate) fn build_nix( let mut job = Job::default() .timeout_minutes(60u32) .continue_on_error(true) - .cond(Expression::new( - "github.repository_owner == 'zed-industries'", - )) + .with_repository_owner_guard() .runs_on(runner) .add_env(("ZED_CLIENT_CHECKSUM_SEED", vars::ZED_CLIENT_CHECKSUM_SEED)) .add_env(("ZED_MINIDUMP_ENDPOINT", vars::ZED_SENTRY_MINIDUMP_ENDPOINT)) diff --git a/tooling/xtask/src/tasks/workflows/release_nightly.rs b/tooling/xtask/src/tasks/workflows/release_nightly.rs index c916013c0bc6f5ad89d56f382f3484097edcbed3..73cdbe3f3ee1f19a20643189b37c378d307af86a 100644 --- a/tooling/xtask/src/tasks/workflows/release_nightly.rs +++ b/tooling/xtask/src/tasks/workflows/release_nightly.rs @@ -7,7 +7,7 @@ use crate::tasks::workflows::{ run_bundling::{bundle_linux, bundle_mac, bundle_windows}, run_tests::run_platform_tests, runners::{Arch, Platform, ReleaseChannel}, - steps::{FluentBuilder, NamedJob}, + steps::{CommonJobConditions, FluentBuilder, NamedJob}, }; use super::{runners, steps, steps::named, vars}; @@ -83,9 +83,7 @@ fn check_style() -> NamedJob { fn release_job(deps: &[&NamedJob]) -> Job { let job = Job::default() - .cond(Expression::new( - "github.repository_owner == 'zed-industries'", - )) + .with_repository_owner_guard() .timeout_minutes(60u32); if deps.len() > 0 { job.needs(deps.iter().map(|j| j.name.clone()).collect::>()) diff --git a/tooling/xtask/src/tasks/workflows/run_tests.rs b/tooling/xtask/src/tasks/workflows/run_tests.rs index 826881ab8a1e248450a2f3c4a3a62f4d449a8117..31ebddc8b3dfff2bd93de7be4bce39cbb0a6195c 100644 --- a/tooling/xtask/src/tasks/workflows/run_tests.rs +++ b/tooling/xtask/src/tasks/workflows/run_tests.rs @@ -4,7 +4,10 @@ use gh_workflow::{ use indexmap::IndexMap; use crate::tasks::workflows::{ - nix_build::build_nix, runners::Arch, steps::BASH_SHELL, vars::PathCondition, + nix_build::build_nix, + runners::Arch, + steps::{BASH_SHELL, CommonJobConditions, repository_owner_guard_expression}, + vars::PathCondition, }; use super::{ @@ -107,7 +110,7 @@ pub(crate) fn run_tests() -> Workflow { // Generates a bash script that checks changed files against regex patterns // and sets GitHub output variables accordingly -fn orchestrate(rules: &[&PathCondition]) -> NamedJob { +pub fn orchestrate(rules: &[&PathCondition]) -> NamedJob { let name = "orchestrate".to_owned(); let step_name = "filter".to_owned(); let mut script = String::new(); @@ -162,9 +165,7 @@ fn orchestrate(rules: &[&PathCondition]) -> NamedJob { let job = Job::default() .runs_on(runners::LINUX_SMALL) - .cond(Expression::new( - "github.repository_owner == 'zed-industries'", - )) + .with_repository_owner_guard() .outputs(outputs) .add_step(steps::checkout_repo().add_with(( "fetch-depth", @@ -180,7 +181,7 @@ fn orchestrate(rules: &[&PathCondition]) -> NamedJob { NamedJob { name, job } } -pub(crate) fn tests_pass(jobs: &[NamedJob]) -> NamedJob { +pub fn tests_pass(jobs: &[NamedJob]) -> NamedJob { let mut script = String::from(indoc::indoc! {r#" set +x EXIT_CODE=0 @@ -214,9 +215,7 @@ pub(crate) fn tests_pass(jobs: &[NamedJob]) -> NamedJob { .map(|j| j.name.to_string()) .collect::>(), ) - .cond(Expression::new( - "github.repository_owner == 'zed-industries' && always()", - )) + .cond(repository_owner_guard_expression(true)) .add_step(named::bash(&script)); named::job(job) diff --git a/tooling/xtask/src/tasks/workflows/steps.rs b/tooling/xtask/src/tasks/workflows/steps.rs index 969dd35db07dc6ed315e0fa3e3ae2b69934e8b95..c5edbdf8439675b4264388001322c12f4f3026e9 100644 --- a/tooling/xtask/src/tasks/workflows/steps.rs +++ b/tooling/xtask/src/tasks/workflows/steps.rs @@ -94,18 +94,18 @@ pub fn clear_target_dir_if_large(platform: Platform) -> Step { } } -pub(crate) fn clippy(platform: Platform) -> Step { +pub fn clippy(platform: Platform) -> Step { match platform { Platform::Windows => named::pwsh("./script/clippy.ps1"), _ => named::bash("./script/clippy"), } } -pub(crate) fn cache_rust_dependencies_namespace() -> Step { +pub fn cache_rust_dependencies_namespace() -> Step { named::uses("namespacelabs", "nscloud-cache-action", "v1").add_with(("cache", "rust")) } -fn setup_linux() -> Step { +pub fn setup_linux() -> Step { named::bash("./script/linux") } @@ -131,7 +131,7 @@ pub fn script(name: &str) -> Step { } } -pub(crate) struct NamedJob { +pub struct NamedJob { pub name: String, pub job: Job, } @@ -145,11 +145,26 @@ pub(crate) struct NamedJob { // } // } +pub fn repository_owner_guard_expression(trigger_always: bool) -> Expression { + Expression::new(format!( + "(github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions'){}", + trigger_always.then_some(" && always()").unwrap_or_default() + )) +} + +pub trait CommonJobConditions: Sized { + fn with_repository_owner_guard(self) -> Self; +} + +impl CommonJobConditions for Job { + fn with_repository_owner_guard(self) -> Self { + self.cond(repository_owner_guard_expression(false)) + } +} + pub(crate) fn release_job(deps: &[&NamedJob]) -> Job { dependant_job(deps) - .cond(Expression::new( - "github.repository_owner == 'zed-industries'", - )) + .with_repository_owner_guard() .timeout_minutes(60u32) } @@ -169,7 +184,7 @@ impl FluentBuilder for Workflow {} /// Copied from GPUI to avoid adding GPUI as dependency /// todo(ci) just put this in gh-workflow #[allow(unused)] -pub(crate) trait FluentBuilder { +pub trait FluentBuilder { /// Imperatively modify self with the given closure. fn map(self, f: impl FnOnce(Self) -> U) -> U where @@ -223,34 +238,34 @@ pub(crate) trait FluentBuilder { // (janky) helper to generate steps with a name that corresponds // to the name of the calling function. -pub(crate) mod named { +pub mod named { use super::*; /// Returns a uses step with the same name as the enclosing function. /// (You shouldn't inline this function into the workflow definition, you must /// wrap it in a new function.) - pub(crate) fn uses(owner: &str, repo: &str, ref_: &str) -> Step { + pub fn uses(owner: &str, repo: &str, ref_: &str) -> Step { Step::new(function_name(1)).uses(owner, repo, ref_) } /// Returns a bash-script step with the same name as the enclosing function. /// (You shouldn't inline this function into the workflow definition, you must /// wrap it in a new function.) - pub(crate) fn bash(script: &str) -> Step { + pub fn bash(script: &str) -> Step { Step::new(function_name(1)).run(script).shell(BASH_SHELL) } /// Returns a pwsh-script step with the same name as the enclosing function. /// (You shouldn't inline this function into the workflow definition, you must /// wrap it in a new function.) - pub(crate) fn pwsh(script: &str) -> Step { + pub fn pwsh(script: &str) -> Step { Step::new(function_name(1)).run(script).shell(PWSH_SHELL) } /// Runs the command in either powershell or bash, depending on platform. /// (You shouldn't inline this function into the workflow definition, you must /// wrap it in a new function.) - pub(crate) fn run(platform: Platform, script: &str) -> Step { + pub fn run(platform: Platform, script: &str) -> Step { match platform { Platform::Windows => Step::new(function_name(1)).run(script).shell(PWSH_SHELL), Platform::Linux | Platform::Mac => { @@ -260,7 +275,7 @@ pub(crate) mod named { } /// Returns a Workflow with the same name as the enclosing module. - pub(crate) fn workflow() -> Workflow { + pub fn workflow() -> Workflow { Workflow::default().name( named::function_name(1) .split("::") @@ -272,7 +287,7 @@ pub(crate) mod named { /// Returns a Job with the same name as the enclosing function. /// (note job names may not contain `::`) - pub(crate) fn job(job: Job) -> NamedJob { + pub fn job(job: Job) -> NamedJob { NamedJob { name: function_name(1).split("::").last().unwrap().to_owned(), job, @@ -282,7 +297,7 @@ pub(crate) mod named { /// Returns the function name N callers above in the stack /// (typically 1). /// This only works because xtask always runs debug builds. - pub(crate) fn function_name(i: usize) -> String { + pub fn function_name(i: usize) -> String { let mut name = "".to_string(); let mut count = 0; backtrace::trace(|frame| { @@ -297,6 +312,7 @@ pub(crate) mod named { }); false }); + name.split("::") .skip_while(|s| s != &"workflows") .skip(1) diff --git a/tooling/xtask/src/tasks/workflows/vars.rs b/tooling/xtask/src/tasks/workflows/vars.rs index 1b3a0ec72ab7ad9a3a6c4446f5e08743d0212a2b..bbb98af757dd9b794ae7c57d6ddb4f1d3d10019d 100644 --- a/tooling/xtask/src/tasks/workflows/vars.rs +++ b/tooling/xtask/src/tasks/workflows/vars.rs @@ -11,8 +11,8 @@ macro_rules! secret { } macro_rules! var { - ($secret_name:ident) => { - pub const $secret_name: &str = concat!("${{ vars.", stringify!($secret_name), " }}"); + ($var_name:ident) => { + pub const $var_name: &str = concat!("${{ vars.", stringify!($var_name), " }}"); }; } @@ -76,7 +76,7 @@ pub fn bundle_envs(platform: Platform) -> Env { } } -pub(crate) fn one_workflow_per_non_main_branch() -> Concurrency { +pub fn one_workflow_per_non_main_branch() -> Concurrency { Concurrency::default() .group("${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }}") .cancel_in_progress(true) @@ -89,7 +89,7 @@ pub(crate) fn allow_concurrent_runs() -> Concurrency { } // Represents a pattern to check for changed files and corresponding output variable -pub(crate) struct PathCondition { +pub struct PathCondition { pub name: &'static str, pub pattern: &'static str, pub invert: bool, @@ -147,6 +147,10 @@ impl StepOutput { .expect("Steps that produce outputs must have an ID"), } } + + pub fn expr(&self) -> String { + format!("steps.{}.outputs.{}", self.step_id, self.name) + } } impl serde::Serialize for StepOutput { @@ -164,7 +168,7 @@ impl std::fmt::Display for StepOutput { } } -pub(crate) struct Input { +pub struct Input { pub input_type: &'static str, pub name: &'static str, pub default: Option, From 829be71061650a9572352e11aa17ab5a90bb5c4a Mon Sep 17 00:00:00 2001 From: Andrew Farkas <6060305+HactarCE@users.noreply.github.com> Date: Wed, 19 Nov 2025 13:00:35 -0500 Subject: [PATCH 0224/1030] Fix invalid Unicode in terms & conditions (#42906) Closes #40210 Previously attempted in #40423 and #42756. Third time's the charm? Release Notes: - Fixed encoding error in terms & conditions displayed when installing --- script/generate-terms-rtf | 4 +- script/terms/terms.rtf | 431 ++++++++++++-------------------------- 2 files changed, 130 insertions(+), 305 deletions(-) diff --git a/script/generate-terms-rtf b/script/generate-terms-rtf index ddfaee95a5a965f3b7db3a276827a4b488bc4c77..654972931f12ccda1058201e967d33f22699c431 100755 --- a/script/generate-terms-rtf +++ b/script/generate-terms-rtf @@ -7,6 +7,4 @@ then brew install pandoc # Install pandoc using Homebrew fi -pandoc ./legal/terms.md -f markdown-smart -t html -o ./script/terms/terms.html -textutil -convert rtf ./script/terms/terms.html -output ./script/terms/terms.rtf -rm ./script/terms/terms.html +pandoc ./legal/terms.md -f markdown-smart -t rtf -o ./script/terms/terms.rtf --standalone diff --git a/script/terms/terms.rtf b/script/terms/terms.rtf index fa318e07d70a1d402c3be7df905d1b2c7e99333b..f5fab23f4551fd0b3f8605209c3315eb470af224 100644 --- a/script/terms/terms.rtf +++ b/script/terms/terms.rtf @@ -1,302 +1,129 @@ -{\rtf1\ansi\ansicpg1252\cocoartf2822 -\cocoatextscaling0\cocoaplatform0{\fonttbl\f0\froman\fcharset0 Times-Roman;\f1\froman\fcharset0 Times-Bold;} -{\colortbl;\red255\green255\blue255;\red0\green0\blue0;\red0\green0\blue233;} -{\*\expandedcolortbl;;\cssrgb\c0\c0\c0;\cssrgb\c0\c0\c93333;} -{\*\listtable{\list\listtemplateid1\listhybrid{\listlevel\levelnfc23\levelnfcn23\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{disc\}}{\leveltext\leveltemplateid1\'01\uc0\u8226 ;}{\levelnumbers;}\fi-360\li720\lin720 }{\listlevel\levelnfc4\levelnfcn4\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{lower-alpha\}}{\leveltext\leveltemplateid2\'01\'01;}{\levelnumbers\'01;}\fi-360\li1440\lin1440 }{\listname ;}\listid1} -{\list\listtemplateid2\listhybrid{\listlevel\levelnfc23\levelnfcn23\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{disc\}}{\leveltext\leveltemplateid101\'01\uc0\u8226 ;}{\levelnumbers;}\fi-360\li720\lin720 }{\listlevel\levelnfc4\levelnfcn4\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{lower-alpha\}}{\leveltext\leveltemplateid102\'01\'01;}{\levelnumbers\'01;}\fi-360\li1440\lin1440 }{\listname ;}\listid2}} -{\*\listoverridetable{\listoverride\listid1\listoverridecount0\ls1}{\listoverride\listid2\listoverridecount0\ls2}} -\deftab720 -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\fs24 \cf0 \expnd0\expndtw0\kerning0 -\outl0\strokewidth0 \strokec2 PLEASE READ THESE TERMS AND CONDITIONS CAREFULLY BEFORE USING THE SERVICE OR SOFTWARE OFFERED BY ZED INDUSTRIES, INC. ("ZED", OR "WE"). BY ACCESSING OR USING THE SOLUTION (AS DEFINED BELOW) IN ANY MANNER, YOU ("YOU" OR "CUSTOMER") AGREE TO BE BOUND BY THESE TERMS (THE "AGREEMENT") TO THE EXCLUSION OF ALL OTHER TERMS. YOU REPRESENT AND WARRANT THAT YOU HAVE THE AUTHORITY TO ENTER INTO THIS AGREEMENT; IF YOU ARE ENTERING INTO THIS AGREEMENT ON BEHALF OF AN ORGANIZATION OR ENTITY, REFERENCES TO "CUSTOMER" AND "YOU" IN THIS AGREEMENT, REFER TO THAT ORGANIZATION OR ENTITY. IF YOU DO NOT AGREE TO ALL OF THE FOLLOWING, YOU MAY NOT USE OR ACCESS THE SOLUTION IN ANY MANNER. IF THE TERMS OF THIS AGREEMENT ARE CONSIDERED AN OFFER, ACCEPTANCE IS EXPRESSLY LIMITED TO SUCH TERMS.\ -\pard\pardeftab720\sa298\partightenfactor0 - -\f1\b\fs36 \cf0 1. ACCESS TO AND USE OF THE SOLUTION\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 Subject to the terms and conditions of this Agreement, Zed hereby grants to You, and You hereby accept from Zed, a term-limited, non-exclusive, non-transferable, non-assignable and non-sublicensable license to make use of the Editor for Your internal use only, and subject to the use limitations in Section 2.2.\ -\pard\pardeftab720\sa298\partightenfactor0 - -\f1\b\fs36 \cf0 2. TERMS APPLICABLE TO THE EDITOR\ -\pard\pardeftab720\sa280\partightenfactor0 - -\fs28 \cf0 2.1. License Grant\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 Subject to the terms and conditions of this Agreement, Zed hereby grants to You, and You hereby accept from Zed, a term-limited, non-exclusive, non-transferable, non-assignable and non-sublicensable license to make use of the Editor for Your internal use only, and subject to the use limitations in Section 2.2.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 2.2. License Limitations\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 You agree that You shall not: (a) exceed the scope of the licenses granted in Section 2.1; (b) make copies of the Editor; (c) distribute, sublicense, assign, delegate, rent, lease, sell, time-share or otherwise transfer the benefits of, use under, or rights to, the license granted in Section 2.1; (d) reverse engineer, decompile, disassemble or otherwise attempt to learn the source code, structure or algorithms underlying the Editor, except to the extent required to be permitted under applicable law; (e) modify, translate or create derivative works of the Editor; or (f) remove any copyright, trademark, patent or other proprietary notice that appears on the Editor or copies thereof.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 2.3. Open Source Software\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 Zed makes certain versions of the Editor and related software available at the Zed GitHub Repository: {\field{\*\fldinst{HYPERLINK "https://github.com/zed-industries/zed"}}{\fldrslt \cf3 \ul \ulc3 \strokec3 https://github.com/zed-industries/zed}} (the "Repo"). Your use of such software is subject to the open source software licenses declared in the Repo.\ -\pard\pardeftab720\sa298\partightenfactor0 - -\f1\b\fs36 \cf0 3. TERMS APPLICABLE TO THE ZED SERVICE\ -\pard\pardeftab720\sa280\partightenfactor0 - -\fs28 \cf0 3.1. Access to and Scope of Zed Service\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 If you have elected to use the Zed Service by enabling or activating the Zed Service, Zed will use commercially reasonable efforts to make the Zed Service available to You as set forth in this Agreement. Once you elected to use the Zed Service, You may access and use the Zed Service during the Term, subject to Your compliance with the terms and conditions of the Agreement.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 3.2. Restrictions\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 You will use the Zed Service only in accordance with all applicable laws, including, but not limited to, laws related to data (whether applicable within the United States, the European Union, or otherwise). You agree not to (and will not allow any third party to): (i) remove or otherwise alter any proprietary notices or labels from the Zed Service or any portion thereof; (ii) reverse engineer, decompile, disassemble, or otherwise attempt to discover the underlying structure, ideas, or algorithms of the Zed Service or any software used to provide or make the Zed Service available; or (iii) rent, resell or otherwise allow any third party access to or use of the Zed Service. Zed may suspend Your access to or use of the Zed Service as follows: (a) immediately if Zed reasonably believes Your use of the Zed Service may pose a security risk to or may adversely impact the Zed Service; or (b) if You are in breach of this Agreement.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 3.3. Customer Data\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 You are solely responsible for Customer Data including, but not limited to: (a) compliance with all applicable laws and this Agreement; (b) any claims relating to Customer Data; and (c) any claims that Customer Data infringes, misappropriates, or otherwise violates the rights of any third party. You agree and acknowledge that Customer Data may be irretrievably deleted if Your account is terminated. For purposes of this Agreement, "Customer Data" shall mean any data, information or other material provided, uploaded, or submitted by You to the Zed Service in the course of using the Zed Service. Notwithstanding anything to the contrary, You represent and warrant that You will not transfer or make available to Zed any personally identifiable information or related information subject to applicable data privacy laws or regulations, unless otherwise agreed to in writing by Zed.\ -\pard\pardeftab720\sa319\partightenfactor0 - -\f1\b \cf0 3.3.1. Customer Data Made Available to Zed\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0 \cf0 To the extent You elect to make Customer Data available to Zed, the same may only be used by Zed according to the Customer Data type and the use rights regarding the same as described herein:\ -\pard\pardeftab720\sa319\partightenfactor0 - -\f1\b \cf0 3.3.2. Usage Data\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0 \cf0 To improve the Editor and understand how You use it, Zed optionally collects the following usage data:\ -\pard\tx940\tx1440\pardeftab720\li1440\fi-1440\partightenfactor0 -\ls1\ilvl1\cf0 \kerning1\expnd0\expndtw0 \outl0\strokewidth0 {\listtext a }\expnd0\expndtw0\kerning0 -\outl0\strokewidth0 \strokec2 file extensions of opened files;\ -\pard\tx940\tx1440\pardeftab720\li1440\fi-1440\partightenfactor0 -\ls1\ilvl1\cf0 \kerning1\expnd0\expndtw0 \outl0\strokewidth0 {\listtext b }\expnd0\expndtw0\kerning0 -\outl0\strokewidth0 \strokec2 features and tools You use within the Editor;\ -\pard\tx940\tx1440\pardeftab720\li1440\fi-1440\partightenfactor0 -\ls1\ilvl1\cf0 \kerning1\expnd0\expndtw0 \outl0\strokewidth0 {\listtext c }\expnd0\expndtw0\kerning0 -\outl0\strokewidth0 \strokec2 project statistics (e.g., number of files); and\ -\pard\tx940\tx1440\pardeftab720\li1440\fi-1440\partightenfactor0 -\ls1\ilvl1\cf0 \kerning1\expnd0\expndtw0 \outl0\strokewidth0 {\listtext d }\expnd0\expndtw0\kerning0 -\outl0\strokewidth0 \strokec2 frameworks detected in Your projects\ -\pard\pardeftab720\sa240\partightenfactor0 -\cf0 (a-d collectively, "Usage Data"). Usage Data does not include any of Your software code or sensitive project details. You may change Your preferences disabling the collection of Usage Data and You can audit Usage Data collected by the Editor at any time. See {\field{\*\fldinst{HYPERLINK "https://zed.dev/docs/telemetry"}}{\fldrslt \cf3 \ul \ulc3 \strokec3 https://zed.dev/docs/telemetry}} for more.\ -Usage Data is associated with a secure random telemetry ID which may be linked to Your email address. This linkage currently serves two purposes: (1) it allows Zed to analyze usage patterns over time while maintaining Your privacy; and (2) it enables Zed to reach out to specific user groups for feedback and improvement suggestions. Zed may contact You based on Your usage patterns to better understand your needs and improve the Solution. If You delete Your account, the link between Your telemetry ID and Your email address will be permanently removed. By continuing to use Editor or Solution with this feature enabled You agree to this Usage Data collection.\ -\pard\pardeftab720\sa319\partightenfactor0 - -\f1\b \cf0 3.3.3. Crash Reports\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0 \cf0 Customer Data consisting of data related to the behavior of the Solution prior to a crash or failure, such as stack traces are collected and classified as "Crash Reports". Zed will use commercially reasonable efforts to exclude any personally identifiable information from Crash Reports, but due to the nature of a crash, Zed does not ensure that information such as paths will be excluded from Crash Reports. Crash Reports will be used solely for Zed's internal purposes in connection with diagnosing defects in the Solution that led to the crash. You may grant us permission to capture Crash Reports when installing or activating the Solution, and You may change Your preferences at any time in the settings feature of the Solution. Once You grant us this permission, Zed will retain the Crash Reports indefinitely.\ -\pard\pardeftab720\sa319\partightenfactor0 - -\f1\b \cf0 3.3.4. User Content\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0 \cf0 \'e2\'80\'a2 You may access, modify or create certain data or information in connection with your access or use of the Zed Editor or the Solution. Such data and information may include, but is not limited to any of the following:\ -\pard\tx940\tx1440\pardeftab720\li1440\fi-1440\partightenfactor0 -\ls2\ilvl1\cf0 \kerning1\expnd0\expndtw0 \outl0\strokewidth0 {\listtext a }\expnd0\expndtw0\kerning0 -\outl0\strokewidth0 \strokec2 file contents and associated metadata (e.g., filename, paths, size, timestamps);\ -\pard\tx940\tx1440\pardeftab720\li1440\fi-1440\partightenfactor0 -\ls2\ilvl1\cf0 \kerning1\expnd0\expndtw0 \outl0\strokewidth0 {\listtext b }\expnd0\expndtw0\kerning0 -\outl0\strokewidth0 \strokec2 source control history, comments and metadata (e.g., git history, commit messages);\ -\pard\tx940\tx1440\pardeftab720\li1440\fi-1440\partightenfactor0 -\ls2\ilvl1\cf0 \kerning1\expnd0\expndtw0 \outl0\strokewidth0 {\listtext c }\expnd0\expndtw0\kerning0 -\outl0\strokewidth0 \strokec2 configuration data (e.g., settings, keymaps);\ -\pard\tx940\tx1440\pardeftab720\li1440\fi-1440\partightenfactor0 -\ls2\ilvl1\cf0 \kerning1\expnd0\expndtw0 \outl0\strokewidth0 {\listtext d }\expnd0\expndtw0\kerning0 -\outl0\strokewidth0 \strokec2 anything typed, pasted and/or displayed on screen while using the Editor;\ -\pard\tx940\tx1440\pardeftab720\li1440\fi-1440\partightenfactor0 -\ls2\ilvl1\cf0 \kerning1\expnd0\expndtw0 \outl0\strokewidth0 {\listtext e }\expnd0\expndtw0\kerning0 -\outl0\strokewidth0 \strokec2 derivative works of the above generated by the Editor (e.g., format conversions, summaries, indexes, caches);\ -\pard\tx940\tx1440\pardeftab720\li1440\fi-1440\partightenfactor0 -\ls2\ilvl1\cf0 \kerning1\expnd0\expndtw0 \outl0\strokewidth0 {\listtext f }\expnd0\expndtw0\kerning0 -\outl0\strokewidth0 \strokec2 metadata, code and other derivative works of the above returned by language servers and other local tooling; and\ -\pard\tx940\tx1440\pardeftab720\li1440\fi-1440\partightenfactor0 -\ls2\ilvl1\cf0 \kerning1\expnd0\expndtw0 \outl0\strokewidth0 {\listtext g }\expnd0\expndtw0\kerning0 -\outl0\strokewidth0 \strokec2 metadata, code and other derivative works of the above returned by services integrated with the Zed Editor\ -\pard\pardeftab720\sa240\partightenfactor0 -\cf0 (a-g collectively, "User Content").\ -\pard\pardeftab720\sa319\partightenfactor0 - -\f1\b \cf0 3.3.5. Handling of User Content\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0 \cf0 Zed will make use of or transfer User Content only as specified in this Agreement, or as necessary to comply with applicable law.\ -\pard\pardeftab720\sa319\partightenfactor0 - -\f1\b \cf0 3.3.5.1. Zed Collaboration Services\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0 \cf0 When using Zed Collaboration Services, User Content is transmitted from Your environment only if You collaborate with other Zed users by electing to share a project in the Editor. Once You share a project, Zed may transmit User Content consisting of file paths, file contents, and metadata regarding the code returned by language servers. Currently, Zed does not persist any User Content beyond the Your collaboration session. If You unshare a project or disconnect from the Solution, all information associated with such project will be deleted from Zed servers. In the future, Zed may save User Content regarding projects beyond the scope of a single collaboration session. We may share such User Content with those users You elected to grant access to. Zed's access to such User Content is limited to debugging and making improvements to the Solution.\ -\pard\pardeftab720\sa319\partightenfactor0 - -\f1\b \cf0 3.3.5.2. Other Services\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0 \cf0 The Zed Editor supports integration with API-based services maintained and not operated by Zed (the "Other Services"). By way of example, Other Services includes those made available by GitHub, Anthropic, OpenAI, and similar providers, or those You host or manage directly. You may configure the Zed Editor to interoperate, communicate with, and exchange data (including User Content) directly with the Other Services. Zed is not responsible or otherwise liable with respect to Your use of any Other Service, including but not limited to the exchange of data between the Other Service and the Zed Editor. The terms and conditions, including the applicable privacy policy, with respect to the Other Service are those made available by the applicable Other Service, not these Terms.\ -\pard\pardeftab720\sa319\partightenfactor0 - -\f1\b \cf0 3.3.5.3. Zed AI Services\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0 \cf0 The Zed Editor supports integration with API-based services maintained and operated by Zed (the "Zed AI Services"). You may elect to use Zed AI Services as the provider for various Zed Editor features (e.g., Agent Panel, Inline Assistant, Edit Predictions, and similar features). In connection with Your use of these features, the Zed Editor and Zed AI Services may make use of User Content to generate contextually relevant responses (the \'e2\'80\'9cOutput\'e2\'80\uc0\u157 ). Other than as specified in Section 3.3.5.4 of these Terms, Zed will not use User Content for training of its models, or disclose User Content.\ -Output is provided "as is" without any warranties or guarantees of functionality, security, or fitness for a particular purpose. While efforts are made to ensure the accuracy and reliability, Output may include errors, vulnerabilities, and defects. You are responsible for reviewing, testing, and validating Output before use in any production or critical environment. Zed assumes no liability for any damages, losses, or liability arising from the use, modification, reliance on, or deployment of Output. Any such use is at Your own risk.\ -\pard\pardeftab720\sa319\partightenfactor0 - -\f1\b \cf0 3.3.5.4. Improvement Feedback\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0 \cf0 When using Zed AI Services to provide Edit Predictions in connection with certain open source software projects, You may elect to share requests, responses and feedback comments (collectively "Model Improvement Feedback") with Zed, and Zed may use the same to improve Zed Edit Predictions models. You may opt-out of sharing Model Improvement Feedback at any time.\ -For more information on Zed Edit Predictions please see: {\field{\*\fldinst{HYPERLINK "https://zed.dev/docs/ai-improvement"}}{\fldrslt \cf3 \ul \ulc3 \strokec3 https://zed.dev/docs/ai-improvement}}\ -When using Zed AI Services in connection with the Agent Panel, You may elect to share with Zed requests, responses and feedback regarding the Agent Panel and related Output (the \'e2\'80\'9cAgent Improvement Feedback\'e2\'80\uc0\u157 ) with Zed, and Zed may use the same to improve the Agent Panel and related Output. Zed will only collect Agent Improvement Feedback when You elect to share the same.\ -For more information regarding the Agent Panel please see: {\field{\*\fldinst{HYPERLINK "https://zed.dev/docs/ai-improvement"}}{\fldrslt \cf3 \ul \ulc3 \strokec3 https://zed.dev/docs/ai-improvement}}\ -\pard\pardeftab720\sa319\partightenfactor0 - -\f1\b \cf0 3.4. Privacy Policy\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0 \cf0 You and Zed are bound by the terms and conditions contained in the Zed Privacy Policy which is incorporated by reference hereto. The Zed Privacy Policy is available at the following URL: {\field{\*\fldinst{HYPERLINK "https://zed.dev/privacy-policy"}}{\fldrslt \cf3 \ul \ulc3 \strokec3 https://zed.dev/privacy-policy}}.\ -\pard\pardeftab720\sa298\partightenfactor0 - -\f1\b\fs36 \cf0 4. FEE BASED SERVICES, FEES AND PAYMENT TERMS\ -\pard\pardeftab720\sa280\partightenfactor0 - -\fs28 \cf0 4.1. Fee Based Services\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 The Zed AI Services is made available with additional usage benefits (the \'e2\'80\'9cEnhanced Use \'e2\'80\uc0\u157 ) as described in the table published at {\field{\*\fldinst{HYPERLINK "https://zed.dev/pricing"}}{\fldrslt \cf3 \ul \ulc3 \strokec3 zed.dev/pricing}} (the \'e2\'80\'9cPricing Table\'e2\'80\uc0\u157 ), subject to the requirements and limitations set forth in the Pricing Table and these Terms. In order to make use of the Enhanced Use, Customer must access the Zed AI Services through a Zed registered account.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 4.2. Fees\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 Customer shall pay to Zed the applicable fees set forth in Pricing Table, together with any applicable taxes and shipping and handling (collectively, the \'e2\'80\'9cFees\'e2\'80\uc0\u157 ). Customer shall have no right of return, and all Fees shall be non-refundable.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 4.3. Payment Terms\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 All amounts payable to Zed under this Agreement shall be paid in United States dollars and paid Zed according to the method of payment, frequency and calculated as set forth in the Pricing Table.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 4.4. Taxes; Set-offs\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 Any and all payments made by Customer in accordance with this Agreement are exclusive of any taxes that might be assessed by any jurisdiction. Customer shall pay or reimburse Zed for all sales, use, property and similar taxes; all customs duties, import fees, stamp duties, license fees and similar charges; and all other mandatory payments to government agencies of whatever kind, except taxes imposed on the net or gross income of Zed. All amounts payable to Zed under this Agreement shall be without set-off and without deduction of any taxes, levies, imposts, charges, withholdings and/or duties of any nature which may be levied or imposed, including without limitation, value added tax, customs duty and withholding tax.\ -\pard\pardeftab720\sa298\partightenfactor0 - -\f1\b\fs36 \cf0 5. TERM AND TERMINATION\ -\pard\pardeftab720\sa280\partightenfactor0 - -\fs28 \cf0 5.1. Term\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 The term of this Agreement shall commence on the date You first download the Editor or use the Zed Service (the "Effective Date"), and unless terminated earlier according to this Section 3, will end pursuant to this Section 5 (the "Term").\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 5.2. Termination\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 This Agreement may be terminated: (a) by either party if the other has materially breached this Agreement; or (b) by Zed at any time and for any reason upon notice to Customer. You acknowledge that Zed is under no obligation to continue to operate the Zed Service or make the Editor available, and We may end any programs in connection with the same at any time.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 5.3. Effect of Termination and Survival\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 Upon any expiration or termination of this Agreement, Customer shall (i) immediately cease use of the Zed Service, and (ii) return all Zed Confidential Information and other materials provided by Zed. The following provisions will survive termination of this Agreement: Sections 3.3 (Customer Data), Section 3.4 (Privacy Policy), Section 5.3 (Effect of Termination and Survival), Section 6 (Ownership), Section 7 (Indemnification), Section 9 (Limitation of Liability), Section 10 (Third Party Services), and Section 11 (Miscellaneous).\ -\pard\pardeftab720\sa298\partightenfactor0 - -\f1\b\fs36 \cf0 6. OWNERSHIP\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 Zed retains all right, title, and interest in and to the Zed Service, Editor, and any software, products, works or other intellectual property created, used, provided, or made available by Zed under or in connection with the Zed Service or Editor. Customer may from time to time provide suggestions, comments, or other feedback to Zed with respect to the Zed Service or Editor ("Feedback"). Customer shall, and hereby does, grant to Zed a nonexclusive, worldwide, perpetual, irrevocable, transferable, sublicensable, royalty-free, fully paid-up license to use and exploit the Feedback for any purpose. You retain all right, title and interest in and to the Customer Data, including all intellectual property rights therein. No intellectual property rights with respect to any software code you develop or modify with the Editor or Zed Service (collectively, the \'e2\'80\'9cOutput\'e2\'80\uc0\u157 ) are transferred or assigned to Zed hereunder.\ -\pard\pardeftab720\sa298\partightenfactor0 - -\f1\b\fs36 \cf0 7. INDEMNIFICATION\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 Customer will defend, indemnify, and hold Zed, its affiliates, suppliers and licensors harmless and each of their respective officers, directors, employees and representatives from and against any claims, damages, losses, liabilities, costs, and expenses (including reasonable attorneys' fees) arising out of or relating to any third party claim with respect to: (a) Customer Data; (b) breach of this Agreement or violation of applicable law by Customer; or (c) alleged infringement or misappropriation of third-party's intellectual property rights resulting from Customer Data.\ -\pard\pardeftab720\sa298\partightenfactor0 - -\f1\b\fs36 \cf0 8. WARRANTY\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 Zed does not represent or warrant that the operation of the Zed Service or Editor (or any portion thereof) will be uninterrupted or error free, or that the Zed Service or Editor (or any portion thereof) will operate in combination with other hardware, software, systems or data not provided by Zed. CUSTOMER ACKNOWLEDGES THAT, ZED MAKES NO EXPRESS OR IMPLIED REPRESENTATIONS OR WARRANTIES OF ANY KIND WITH RESPECT TO THE SERVICE OR SOFTWARE, OR THEIR CONDITION. ZED HEREBY EXPRESSLY EXCLUDES, ANY AND ALL OTHER EXPRESS OR IMPLIED REPRESENTATIONS OR WARRANTIES, WHETHER UNDER COMMON LAW, STATUTE OR OTHERWISE, INCLUDING WITHOUT LIMITATION ANY AND ALL WARRANTIES AS TO MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, SATISFACTORY QUALITY OR NON-INFRINGEMENT OF THIRD-PARTY RIGHTS.\ -\pard\pardeftab720\sa298\partightenfactor0 - -\f1\b\fs36 \cf0 9. LIMITATIONS OF LIABILITY\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 IN NO EVENT SHALL ZED BE LIABLE FOR ANY LOST DATA, LOST PROFITS, BUSINESS INTERRUPTION, REPLACEMENT SERVICE OR OTHER SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR INDIRECT DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THEORY OF LIABILITY. ZED'S LIABILITY FOR ALL CLAIMS ARISING UNDER THIS AGREEMENT, WHETHER IN CONTRACT, TORT OR OTHERWISE, SHALL NOT EXCEED THE GREATER OF: THE FEES PAID TO ZED BY CUSTOMER DURING THE TWELVE (12) MONTH PERIOD PRECEDING THE DATE OF THE CLAIM, OR ONE THOUSAND US DOLLARS ($1,000).\ -\pard\pardeftab720\sa298\partightenfactor0 - -\f1\b\fs36 \cf0 10. Third Party Services\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 Zed may make certain third party services available to You within the Editor or the Zed Service (each a "Third Party Service"). You acknowledge and agree that (a) use of each Third Party Service is subject to the corresponding terms and conditions available at the following URL: {\field{\*\fldinst{HYPERLINK "https://zed.dev/third-party-terms"}}{\fldrslt \cf3 \ul \ulc3 \strokec3 https://zed.dev/third-party-terms}} and/or presented in connection with Your use of such Third Party Service; (b) the terms and conditions of this Agreement do not apply with respect to Your use of any Third Party Service; and (c) Zed is not liable in any way regarding Your use of any Third Party Service.\ -\pard\pardeftab720\sa298\partightenfactor0 - -\f1\b\fs36 \cf0 11. MISCELLANEOUS\ -\pard\pardeftab720\sa280\partightenfactor0 - -\fs28 \cf0 11.1. Export Control\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 You hereby certify that You will comply with all current US Export Control laws. You agree to defend, indemnify and hold Zed harmless from any liability for Your violation of U.S. Export Control laws.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 11.2. Compliance with Laws\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 You shall comply with all applicable laws and regulations in its use of the Solution, including without limitation the unlawful gathering or collecting, or assisting in the gathering or collecting of information in violation of any privacy laws or regulations. You shall, at its own expense, defend, indemnify and hold harmless Zed from and against any and all claims, losses, liabilities, damages, judgments, government or federal sanctions, costs and expenses (including attorneys' fees) incurred by Zed arising from any claim or assertion by any third party of violation of privacy laws or regulations by You or any of its agents, officers, directors or employees.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 11.3. Assignment\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 Neither party may transfer and assign its rights and obligations under this Agreement without the prior written consent of the other party. Notwithstanding the foregoing, Zed may transfer and assign its rights under this Agreement without consent from the other party in connection with a change in control, acquisition or sale of all or substantially all of its assets.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 11.4. Force Majeure\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 Neither party shall be responsible for failure or delay in performance by events out of their reasonable control, including but not limited to, acts of God, Internet outage, terrorism, war, fires, earthquakes and other disasters (each a "Force Majeure"). Notwithstanding the foregoing: if a Force Majeure continues for more than thirty (30) days, either party may to terminate this agreement by written notice to the other party.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 11.5. Notice\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 All notices between the parties shall be in writing and shall be deemed to have been given if personally delivered or sent by registered or certified mail (return receipt), or by recognized courier service.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 11.6. No Agency\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 Both parties agree that no agency, partnership, joint venture, or employment is created as a result of this Agreement. You do not have any authority of any kind to bind Zed.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 11.7. Governing Law\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 This Agreement shall be governed exclusively by, and construed exclusively in accordance with, the laws of the United States and the State of California, without regard to its conflict of laws provisions. The federal courts of the United States in the Northern District of California and the state courts of the State of California shall have exclusive jurisdiction to adjudicate any dispute arising out of or relating to this Agreement. Each party hereby consents to the jurisdiction of such courts and waives any right it may otherwise have to challenge the appropriateness of such forums, whether on the basis of the doctrine of forum non conveniens or otherwise. The United Nations Convention on Contracts for the International Sale of Goods shall not apply to this Agreement or any Purchase Order issued under this Agreement.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 11.8. Updated Agreement\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 Zed reserves the right to update this Agreement at any time. The terms and conditions of the updated version of the Agreement shall apply to the Zed Service and Editor downloaded, or accessed following the date of publication of the updated version. If You do not agree with any terms of the updated Agreement, You may not use or access the Zed Service or Editor in any manner. Zed may from time-to-time provide release notes applicable to the Editor or Zed Service, and such release notes may contain additional use restrictions or terms applicable to Customer Data. Your use of the Editor or Zed Service after the applicable release notes are made available shall be subject to the additional use restrictions or terms applicable to Customer Data.\ -\pard\pardeftab720\sa280\partightenfactor0 - -\f1\b\fs28 \cf0 11.9. Entire Agreement\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f0\b0\fs24 \cf0 This Agreement is the complete and exclusive statement of the mutual understanding of the parties and supersedes and cancels all previous written and oral agreements, communications, and other understandings relating to the subject matter of this Agreement, and all waivers and modifications must be in a writing signed by both parties, except as otherwise provided herein. Any term or provision of this Agreement held to be illegal or unenforceable shall be, to the fullest extent possible, interpreted so as to be construed as valid, but in any event the validity or enforceability of the remainder hereof shall not be affected.\ -\pard\pardeftab720\sa240\partightenfactor0 - -\f1\b \cf0 DATE: May 6, 2025 -\f0\b0 \ -} \ No newline at end of file +{\rtf1\ansi\deff0{\fonttbl{\f0 \fswiss Helvetica;}{\f1 \fmodern Courier;}} +{\colortbl;\red255\green0\blue0;\red0\green0\blue255;} +\widowctrl\hyphauto + +{\pard \qc \f0 \sa180 \li0 \fi0 \b \fs36 Zed End User Terms\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \par} +{\pard \ql \f0 \sa180 \li0 \fi0 PLEASE READ THESE TERMS AND CONDITIONS CAREFULLY BEFORE USING THE SERVICE OR SOFTWARE OFFERED BY ZED INDUSTRIES, INC. ("ZED", OR "WE"). BY ACCESSING OR USING THE SOLUTION (AS DEFINED BELOW) IN ANY MANNER, YOU ("YOU" OR "CUSTOMER") AGREE TO BE BOUND BY THESE TERMS (THE "AGREEMENT") TO THE EXCLUSION OF ALL OTHER TERMS. YOU REPRESENT AND WARRANT THAT YOU HAVE THE AUTHORITY TO ENTER INTO THIS AGREEMENT; IF YOU ARE ENTERING INTO THIS AGREEMENT ON BEHALF OF AN ORGANIZATION OR ENTITY, REFERENCES TO "CUSTOMER" AND "YOU" IN THIS AGREEMENT, REFER TO THAT ORGANIZATION OR ENTITY. IF YOU DO NOT AGREE TO ALL OF THE FOLLOWING, YOU MAY NOT USE OR ACCESS THE SOLUTION IN ANY MANNER. IF THE TERMS OF THIS AGREEMENT ARE CONSIDERED AN OFFER, ACCEPTANCE IS EXPRESSLY LIMITED TO SUCH TERMS.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel1 \b \fs32 1. ACCESS TO AND USE OF THE SOLUTION\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Subject to the terms and conditions of this Agreement, Zed hereby grants to You, and You hereby accept from Zed, a term-limited, non-exclusive, non-transferable, non-assignable and non-sublicensable license to make use of the Editor for Your internal use only, and subject to the use limitations in Section 2.2.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel1 \b \fs32 2. TERMS APPLICABLE TO THE EDITOR\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 2.1. License Grant\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Subject to the terms and conditions of this Agreement, Zed hereby grants to You, and You hereby accept from Zed, a term-limited, non-exclusive, non-transferable, non-assignable and non-sublicensable license to make use of the Editor for Your internal use only, and subject to the use limitations in Section 2.2.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 2.2. License Limitations\par} +{\pard \ql \f0 \sa180 \li0 \fi0 You agree that You shall not: (a) exceed the scope of the licenses granted in Section 2.1; (b) make copies of the Editor; (c) distribute, sublicense, assign, delegate, rent, lease, sell, time-share or otherwise transfer the benefits of, use under, or rights to, the license granted in Section 2.1; (d) reverse engineer, decompile, disassemble or otherwise attempt to learn the source code, structure or algorithms underlying the Editor, except to the extent required to be permitted under applicable law; (e) modify, translate or create derivative works of the Editor; or (f) remove any copyright, trademark, patent or other proprietary notice that appears on the Editor or copies thereof.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 2.3. Open Source Software\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Zed makes certain versions of the Editor and related software available at the Zed GitHub Repository: {\field{\*\fldinst{HYPERLINK "https://github.com/zed-industries/zed"}}{\fldrslt{\ul +https://github.com/zed-industries/zed +}}} + (the "Repo"). Your use of such software is subject to the open source software licenses declared in the Repo.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel1 \b \fs32 3. TERMS APPLICABLE TO THE ZED SERVICE\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 3.1. Access to and Scope of Zed Service\par} +{\pard \ql \f0 \sa180 \li0 \fi0 If you have elected to use the Zed Service by enabling or activating the Zed Service, Zed will use commercially reasonable efforts to make the Zed Service available to You as set forth in this Agreement. Once you elected to use the Zed Service, You may access and use the Zed Service during the Term, subject to Your compliance with the terms and conditions of the Agreement.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 3.2. Restrictions\par} +{\pard \ql \f0 \sa180 \li0 \fi0 You will use the Zed Service only in accordance with all applicable laws, including, but not limited to, laws related to data (whether applicable within the United States, the European Union, or otherwise). You agree not to (and will not allow any third party to): (i) remove or otherwise alter any proprietary notices or labels from the Zed Service or any portion thereof; (ii) reverse engineer, decompile, disassemble, or otherwise attempt to discover the underlying structure, ideas, or algorithms of the Zed Service or any software used to provide or make the Zed Service available; or (iii) rent, resell or otherwise allow any third party access to or use of the Zed Service. Zed may suspend Your access to or use of the Zed Service as follows: (a) immediately if Zed reasonably believes Your use of the Zed Service may pose a security risk to or may adversely impact the Zed Service; or (b) if You are in breach of this Agreement.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 3.3. Customer Data\par} +{\pard \ql \f0 \sa180 \li0 \fi0 You are solely responsible for Customer Data including, but not limited to: (a) compliance with all applicable laws and this Agreement; (b) any claims relating to Customer Data; and (c) any claims that Customer Data infringes, misappropriates, or otherwise violates the rights of any third party. You agree and acknowledge that Customer Data may be irretrievably deleted if Your account is terminated. For purposes of this Agreement, "Customer Data" shall mean any data, information or other material provided, uploaded, or submitted by You to the Zed Service in the course of using the Zed Service. Notwithstanding anything to the contrary, You represent and warrant that You will not transfer or make available to Zed any personally identifiable information or related information subject to applicable data privacy laws or regulations, unless otherwise agreed to in writing by Zed.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel3 \b \fs24 3.3.1. Customer Data Made Available to Zed\par} +{\pard \ql \f0 \sa180 \li0 \fi0 To the extent You elect to make Customer Data available to Zed, the same may only be used by Zed according to the Customer Data type and the use rights regarding the same as described herein:\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel3 \b \fs24 3.3.2. Usage Data\par} +{\pard \ql \f0 \sa180 \li0 \fi0 To improve the Editor and understand how You use it, Zed optionally collects the following usage data:\par} +{\pard \ql \f0 \sa0 \li720 \fi-360 \bullet \tx360\tab (a)\tx360\tab file extensions of opened files;\sa180\par} +{\pard \ql \f0 \sa0 \li720 \fi-360 \bullet \tx360\tab (b)\tx360\tab features and tools You use within the Editor;\sa180\par} +{\pard \ql \f0 \sa0 \li720 \fi-360 \bullet \tx360\tab (c)\tx360\tab project statistics (e.g., number of files); and\sa180\par} +{\pard \ql \f0 \sa0 \li720 \fi-360 \bullet \tx360\tab (d)\tx360\tab frameworks detected in Your projects\sa180\sa180\par} +{\pard \ql \f0 \sa180 \li0 \fi0 (a-d collectively, "Usage Data"). Usage Data does not include any of Your software code or sensitive project details. You may change Your preferences disabling the collection of Usage Data and You can audit Usage Data collected by the Editor at any time. See {\field{\*\fldinst{HYPERLINK "https://zed.dev/docs/telemetry"}}{\fldrslt{\ul +https://zed.dev/docs/telemetry +}}} + for more.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Usage Data is associated with a secure random telemetry ID which may be linked to Your email address. This linkage currently serves two purposes: (1) it allows Zed to analyze usage patterns over time while maintaining Your privacy; and (2) it enables Zed to reach out to specific user groups for feedback and improvement suggestions. Zed may contact You based on Your usage patterns to better understand your needs and improve the Solution. If You delete Your account, the link between Your telemetry ID and Your email address will be permanently removed. By continuing to use Editor or Solution with this feature enabled You agree to this Usage Data collection.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel3 \b \fs24 3.3.3. Crash Reports\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Customer Data consisting of data related to the behavior of the Solution prior to a crash or failure, such as stack traces are collected and classified as "Crash Reports". Zed will use commercially reasonable efforts to exclude any personally identifiable information from Crash Reports, but due to the nature of a crash, Zed does not ensure that information such as paths will be excluded from Crash Reports. Crash Reports will be used solely for Zed's internal purposes in connection with diagnosing defects in the Solution that led to the crash. You may grant us permission to capture Crash Reports when installing or activating the Solution, and You may change Your preferences at any time in the settings feature of the Solution. Once You grant us this permission, Zed will retain the Crash Reports indefinitely.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel3 \b \fs24 3.3.4. User Content\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \u8226 ? You may access, modify or create certain data or information in connection with your access or use of the Zed Editor or the Solution. Such data and information may include, but is not limited to any of the following:\par} +{\pard \ql \f0 \sa0 \li720 \fi-360 \bullet \tx360\tab (a)\tx360\tab file contents and associated metadata (e.g., filename, paths, size, timestamps);\sa180\par} +{\pard \ql \f0 \sa0 \li720 \fi-360 \bullet \tx360\tab (b)\tx360\tab source control history, comments and metadata (e.g., git history, commit messages);\sa180\par} +{\pard \ql \f0 \sa0 \li720 \fi-360 \bullet \tx360\tab (c)\tx360\tab configuration data (e.g., settings, keymaps);\sa180\par} +{\pard \ql \f0 \sa0 \li720 \fi-360 \bullet \tx360\tab (d)\tx360\tab anything typed, pasted and/or displayed on screen while using the Editor;\sa180\par} +{\pard \ql \f0 \sa0 \li720 \fi-360 \bullet \tx360\tab (e)\tx360\tab derivative works of the above generated by the Editor (e.g., format conversions, summaries, indexes, caches);\sa180\par} +{\pard \ql \f0 \sa0 \li720 \fi-360 \bullet \tx360\tab (f)\tx360\tab metadata, code and other derivative works of the above returned by language servers and other local tooling; and\sa180\par} +{\pard \ql \f0 \sa0 \li720 \fi-360 \bullet \tx360\tab (g)\tx360\tab metadata, code and other derivative works of the above returned by services integrated with the Zed Editor\sa180\sa180\par} +{\pard \ql \f0 \sa180 \li0 \fi0 (a-g collectively, "User Content").\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel3 \b \fs24 3.3.5. Handling of User Content\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Zed will make use of or transfer User Content only as specified in this Agreement, or as necessary to comply with applicable law.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel3 \b \fs24 3.3.5.1. Zed Collaboration Services\par} +{\pard \ql \f0 \sa180 \li0 \fi0 When using Zed Collaboration Services, User Content is transmitted from Your environment only if You collaborate with other Zed users by electing to share a project in the Editor. Once You share a project, Zed may transmit User Content consisting of file paths, file contents, and metadata regarding the code returned by language servers. Currently, Zed does not persist any User Content beyond the Your collaboration session. If You unshare a project or disconnect from the Solution, all information associated with such project will be deleted from Zed servers. In the future, Zed may save User Content regarding projects beyond the scope of a single collaboration session. We may share such User Content with those users You elected to grant access to. Zed's access to such User Content is limited to debugging and making improvements to the Solution.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel3 \b \fs24 3.3.5.2. Other Services\par} +{\pard \ql \f0 \sa180 \li0 \fi0 The Zed Editor supports integration with API-based services maintained and not operated by Zed (the "Other Services"). By way of example, Other Services includes those made available by GitHub, Anthropic, OpenAI, and similar providers, or those You host or manage directly. You may configure the Zed Editor to interoperate, communicate with, and exchange data (including User Content) directly with the Other Services. Zed is not responsible or otherwise liable with respect to Your use of any Other Service, including but not limited to the exchange of data between the Other Service and the Zed Editor. The terms and conditions, including the applicable privacy policy, with respect to the Other Service are those made available by the applicable Other Service, not these Terms.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel3 \b \fs24 3.3.5.3. Zed AI Services\par} +{\pard \ql \f0 \sa180 \li0 \fi0 The Zed Editor supports integration with API-based services maintained and operated by Zed (the "Zed AI Services"). You may elect to use Zed AI Services as the provider for various Zed Editor features (e.g., Agent Panel, Inline Assistant, Edit Predictions, and similar features). In connection with Your use of these features, the Zed Editor and Zed AI Services may make use of User Content to generate contextually relevant responses (the \u8220"Output\u8221"). Other than as specified in Section 3.3.5.4 of these Terms, Zed will not use User Content for training of its models, or disclose User Content.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Output is provided "as is" without any warranties or guarantees of functionality, security, or fitness for a particular purpose. While efforts are made to ensure the accuracy and reliability, Output may include errors, vulnerabilities, and defects. You are responsible for reviewing, testing, and validating Output before use in any production or critical environment. Zed assumes no liability for any damages, losses, or liability arising from the use, modification, reliance on, or deployment of Output. Any such use is at Your own risk.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel3 \b \fs24 3.3.5.4. Improvement Feedback\par} +{\pard \ql \f0 \sa180 \li0 \fi0 When using Zed AI Services to provide Edit Predictions in connection with certain open source software projects, You may elect to share requests, responses and feedback comments (collectively "Model Improvement Feedback") with Zed, and Zed may use the same to improve Zed Edit Predictions models. You may opt-out of sharing Model Improvement Feedback at any time.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 For more information on Zed Edit Predictions please see: {\field{\*\fldinst{HYPERLINK "https://zed.dev/docs/ai/ai-improvement"}}{\fldrslt{\ul +https://zed.dev/docs/ai/ai-improvement +}}} +\par} +{\pard \ql \f0 \sa180 \li0 \fi0 When using Zed AI Services in connection with the Agent Panel, You may elect to share with Zed requests, responses and feedback regarding the Agent Panel and related Output (the \u8220"Agent Improvement Feedback\u8221") with Zed, and Zed may use the same to improve the Agent Panel and related Output. Zed will only collect Agent Improvement Feedback when You elect to share the same.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 For more information regarding the Agent Panel please see: {\field{\*\fldinst{HYPERLINK "https://zed.dev/docs/ai/ai-improvement"}}{\fldrslt{\ul +https://zed.dev/docs/ai/ai-improvement +}}} +\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel3 \b \fs24 3.4. Privacy Policy\par} +{\pard \ql \f0 \sa180 \li0 \fi0 You and Zed are bound by the terms and conditions contained in the Zed Privacy Policy which is incorporated by reference hereto. The Zed Privacy Policy is available at the following URL: {\field{\*\fldinst{HYPERLINK "https://zed.dev/privacy-policy"}}{\fldrslt{\ul +https://zed.dev/privacy-policy +}}} +.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel1 \b \fs32 4. FEE BASED SERVICES, FEES AND PAYMENT TERMS\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 4.1. Fee Based Services\par} +{\pard \ql \f0 \sa180 \li0 \fi0 The Zed AI Services is made available with additional usage benefits (the \u8220"Enhanced Use \u8221") as described in the table published at {\field{\*\fldinst{HYPERLINK "https://zed.dev/pricing"}}{\fldrslt{\ul +zed.dev/pricing +}}} + (the \u8220"Pricing Table\u8221"), subject to the requirements and limitations set forth in the Pricing Table and these Terms. In order to make use of the Enhanced Use, Customer must access the Zed AI Services through a Zed registered account.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 4.2. Fees\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Customer shall pay to Zed the applicable fees set forth in Pricing Table, together with any applicable taxes and shipping and handling (collectively, the \u8220"Fees\u8221"). Customer shall have no right of return, and all Fees shall be non-refundable.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 4.3. Payment Terms\par} +{\pard \ql \f0 \sa180 \li0 \fi0 All amounts payable to Zed under this Agreement shall be paid in United States dollars and paid Zed according to the method of payment, frequency and calculated as set forth in the Pricing Table.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 4.4. Taxes; Set-offs\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Any and all payments made by Customer in accordance with this Agreement are exclusive of any taxes that might be assessed by any jurisdiction. Customer shall pay or reimburse Zed for all sales, use, property and similar taxes; all customs duties, import fees, stamp duties, license fees and similar charges; and all other mandatory payments to government agencies of whatever kind, except taxes imposed on the net or gross income of Zed. All amounts payable to Zed under this Agreement shall be without set-off and without deduction of any taxes, levies, imposts, charges, withholdings and/or duties of any nature which may be levied or imposed, including without limitation, value added tax, customs duty and withholding tax.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel1 \b \fs32 5. TERM AND TERMINATION\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 5.1. Term\par} +{\pard \ql \f0 \sa180 \li0 \fi0 The term of this Agreement shall commence on the date You first download the Editor or use the Zed Service (the "Effective Date"), and unless terminated earlier according to this Section 3, will end pursuant to this Section 5 (the "Term").\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 5.2. Termination\par} +{\pard \ql \f0 \sa180 \li0 \fi0 This Agreement may be terminated: (a) by either party if the other has materially breached this Agreement; or (b) by Zed at any time and for any reason upon notice to Customer. You acknowledge that Zed is under no obligation to continue to operate the Zed Service or make the Editor available, and We may end any programs in connection with the same at any time.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 5.3. Effect of Termination and Survival\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Upon any expiration or termination of this Agreement, Customer shall (i) immediately cease use of the Zed Service, and (ii) return all Zed Confidential Information and other materials provided by Zed. The following provisions will survive termination of this Agreement: Sections 3.3 (Customer Data), Section 3.4 (Privacy Policy), Section 5.3 (Effect of Termination and Survival), Section 6 (Ownership), Section 7 (Indemnification), Section 9 (Limitation of Liability), Section 10 (Third Party Services), and Section 11 (Miscellaneous).\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel1 \b \fs32 6. OWNERSHIP\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Zed retains all right, title, and interest in and to the Zed Service, Editor, and any software, products, works or other intellectual property created, used, provided, or made available by Zed under or in connection with the Zed Service or Editor. Customer may from time to time provide suggestions, comments, or other feedback to Zed with respect to the Zed Service or Editor ("Feedback"). Customer shall, and hereby does, grant to Zed a nonexclusive, worldwide, perpetual, irrevocable, transferable, sublicensable, royalty-free, fully paid-up license to use and exploit the Feedback for any purpose. You retain all right, title and interest in and to the Customer Data, including all intellectual property rights therein. No intellectual property rights with respect to any software code you develop or modify with the Editor or Zed Service (collectively, the \u8220"Output\u8221") are transferred or assigned to Zed hereunder.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel1 \b \fs32 7. INDEMNIFICATION\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Customer will defend, indemnify, and hold Zed, its affiliates, suppliers and licensors harmless and each of their respective officers, directors, employees and representatives from and against any claims, damages, losses, liabilities, costs, and expenses (including reasonable attorneys' fees) arising out of or relating to any third party claim with respect to: (a) Customer Data; (b) breach of this Agreement or violation of applicable law by Customer; or (c) alleged infringement or misappropriation of third-party's intellectual property rights resulting from Customer Data.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel1 \b \fs32 8. WARRANTY\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Zed does not represent or warrant that the operation of the Zed Service or Editor (or any portion thereof) will be uninterrupted or error free, or that the Zed Service or Editor (or any portion thereof) will operate in combination with other hardware, software, systems or data not provided by Zed. CUSTOMER ACKNOWLEDGES THAT, ZED MAKES NO EXPRESS OR IMPLIED REPRESENTATIONS OR WARRANTIES OF ANY KIND WITH RESPECT TO THE SERVICE OR SOFTWARE, OR THEIR CONDITION. ZED HEREBY EXPRESSLY EXCLUDES, ANY AND ALL OTHER EXPRESS OR IMPLIED REPRESENTATIONS OR WARRANTIES, WHETHER UNDER COMMON LAW, STATUTE OR OTHERWISE, INCLUDING WITHOUT LIMITATION ANY AND ALL WARRANTIES AS TO MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, SATISFACTORY QUALITY OR NON-INFRINGEMENT OF THIRD-PARTY RIGHTS.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel1 \b \fs32 9. LIMITATIONS OF LIABILITY\par} +{\pard \ql \f0 \sa180 \li0 \fi0 IN NO EVENT SHALL ZED BE LIABLE FOR ANY LOST DATA, LOST PROFITS, BUSINESS INTERRUPTION, REPLACEMENT SERVICE OR OTHER SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR INDIRECT DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THEORY OF LIABILITY. ZED'S LIABILITY FOR ALL CLAIMS ARISING UNDER THIS AGREEMENT, WHETHER IN CONTRACT, TORT OR OTHERWISE, SHALL NOT EXCEED THE GREATER OF: THE FEES PAID TO ZED BY CUSTOMER DURING THE TWELVE (12) MONTH PERIOD PRECEDING THE DATE OF THE CLAIM, OR ONE THOUSAND US DOLLARS ($1,000).\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel1 \b \fs32 10. Third Party Services\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Zed may make certain third party services available to You within the Editor or the Zed Service (each a "Third Party Service"). You acknowledge and agree that (a) use of each Third Party Service is subject to the corresponding terms and conditions available at the following URL: {\field{\*\fldinst{HYPERLINK "https://zed.dev/third-party-terms"}}{\fldrslt{\ul +https://zed.dev/third-party-terms +}}} + and/or presented in connection with Your use of such Third Party Service; (b) the terms and conditions of this Agreement do not apply with respect to Your use of any Third Party Service; and (c) Zed is not liable in any way regarding Your use of any Third Party Service.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel1 \b \fs32 11. MISCELLANEOUS\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 11.1. Export Control\par} +{\pard \ql \f0 \sa180 \li0 \fi0 You hereby certify that You will comply with all current US Export Control laws. You agree to defend, indemnify and hold Zed harmless from any liability for Your violation of U.S. Export Control laws.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 11.2. Compliance with Laws\par} +{\pard \ql \f0 \sa180 \li0 \fi0 You shall comply with all applicable laws and regulations in its use of the Solution, including without limitation the unlawful gathering or collecting, or assisting in the gathering or collecting of information in violation of any privacy laws or regulations. You shall, at its own expense, defend, indemnify and hold harmless Zed from and against any and all claims, losses, liabilities, damages, judgments, government or federal sanctions, costs and expenses (including attorneys' fees) incurred by Zed arising from any claim or assertion by any third party of violation of privacy laws or regulations by You or any of its agents, officers, directors or employees.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 11.3. Assignment\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Neither party may transfer and assign its rights and obligations under this Agreement without the prior written consent of the other party. Notwithstanding the foregoing, Zed may transfer and assign its rights under this Agreement without consent from the other party in connection with a change in control, acquisition or sale of all or substantially all of its assets.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 11.4. Force Majeure\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Neither party shall be responsible for failure or delay in performance by events out of their reasonable control, including but not limited to, acts of God, Internet outage, terrorism, war, fires, earthquakes and other disasters (each a "Force Majeure"). Notwithstanding the foregoing: if a Force Majeure continues for more than thirty (30) days, either party may to terminate this agreement by written notice to the other party.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 11.5. Notice\par} +{\pard \ql \f0 \sa180 \li0 \fi0 All notices between the parties shall be in writing and shall be deemed to have been given if personally delivered or sent by registered or certified mail (return receipt), or by recognized courier service.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 11.6. No Agency\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Both parties agree that no agency, partnership, joint venture, or employment is created as a result of this Agreement. You do not have any authority of any kind to bind Zed.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 11.7. Governing Law\par} +{\pard \ql \f0 \sa180 \li0 \fi0 This Agreement shall be governed exclusively by, and construed exclusively in accordance with, the laws of the United States and the State of California, without regard to its conflict of laws provisions. The federal courts of the United States in the Northern District of California and the state courts of the State of California shall have exclusive jurisdiction to adjudicate any dispute arising out of or relating to this Agreement. Each party hereby consents to the jurisdiction of such courts and waives any right it may otherwise have to challenge the appropriateness of such forums, whether on the basis of the doctrine of forum non conveniens or otherwise. The United Nations Convention on Contracts for the International Sale of Goods shall not apply to this Agreement or any Purchase Order issued under this Agreement.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 11.8. Updated Agreement\par} +{\pard \ql \f0 \sa180 \li0 \fi0 Zed reserves the right to update this Agreement at any time. The terms and conditions of the updated version of the Agreement shall apply to the Zed Service and Editor downloaded, or accessed following the date of publication of the updated version. If You do not agree with any terms of the updated Agreement, You may not use or access the Zed Service or Editor in any manner. Zed may from time-to-time provide release notes applicable to the Editor or Zed Service, and such release notes may contain additional use restrictions or terms applicable to Customer Data. Your use of the Editor or Zed Service after the applicable release notes are made available shall be subject to the additional use restrictions or terms applicable to Customer Data.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 \outlinelevel2 \b \fs28 11.9. Entire Agreement\par} +{\pard \ql \f0 \sa180 \li0 \fi0 This Agreement is the complete and exclusive statement of the mutual understanding of the parties and supersedes and cancels all previous written and oral agreements, communications, and other understandings relating to the subject matter of this Agreement, and all waivers and modifications must be in a writing signed by both parties, except as otherwise provided herein. Any term or provision of this Agreement held to be illegal or unenforceable shall be, to the fullest extent possible, interpreted so as to be construed as valid, but in any event the validity or enforceability of the remainder hereof shall not be affected.\par} +{\pard \ql \f0 \sa180 \li0 \fi0 {\b DATE: May 6, 2025}\par} +} From 27cb01f4af113abfcafe2812a796afd658a28c9e Mon Sep 17 00:00:00 2001 From: Andrew Farkas <6060305+HactarCE@users.noreply.github.com> Date: Wed, 19 Nov 2025 13:30:55 -0500 Subject: [PATCH 0225/1030] Fix Helix mode search & selection (#42928) This PR redoes the desired behavior changes of #41583 (reverted in #42892) but less invasively Closes #41125 Closes #41164 Release Notes: - N/A Co-authored-by: Conrad Irwin --- assets/keymaps/vim.json | 9 +++ crates/editor/src/editor.rs | 2 +- crates/vim/src/helix.rs | 137 ++++++++++++++++++++++++++++++++++-- crates/vim/src/motion.rs | 1 - crates/vim/src/vim.rs | 5 +- 5 files changed, 146 insertions(+), 8 deletions(-) diff --git a/assets/keymaps/vim.json b/assets/keymaps/vim.json index a3530140b39df88a0929df0a21cfb9379a9fc8bd..233c9fa7e4468142c3e5a31b730bb4d80b83a907 100644 --- a/assets/keymaps/vim.json +++ b/assets/keymaps/vim.json @@ -421,6 +421,12 @@ "ctrl-[": "editor::Cancel" } }, + { + "context": "vim_mode == helix_select && !menu", + "bindings": { + "escape": "vim::SwitchToHelixNormalMode" + } + }, { "context": "(vim_mode == helix_normal || vim_mode == helix_select) && !menu", "bindings": { @@ -470,6 +476,9 @@ "alt-p": "editor::SelectPreviousSyntaxNode", "alt-n": "editor::SelectNextSyntaxNode", + "n": "vim::HelixSelectNext", + "shift-n": "vim::HelixSelectPrevious", + // Goto mode "g e": "vim::EndOfDocument", "g h": "vim::StartOfLine", diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 6476002396c35bd25d419013833e37b96a6c0395..4b8b6a8c881d51f6c702c7d7cb1301e7a54b5318 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -1099,7 +1099,7 @@ pub struct Editor { searchable: bool, cursor_shape: CursorShape, current_line_highlight: Option, - collapse_matches: bool, + pub collapse_matches: bool, autoindent_mode: Option, workspace: Option<(WeakEntity, Option)>, input_enabled: bool, diff --git a/crates/vim/src/helix.rs b/crates/vim/src/helix.rs index 6788a186fb45222f7b09fe756862e6cb337c6d90..e1cc58b89560e46a68e05fe6c8e75dbefb4e3e83 100644 --- a/crates/vim/src/helix.rs +++ b/crates/vim/src/helix.rs @@ -15,8 +15,8 @@ use language::{CharClassifier, CharKind, Point}; use search::{BufferSearchBar, SearchOptions}; use settings::Settings; use text::{Bias, SelectionGoal}; -use workspace::searchable; use workspace::searchable::FilteredSearchRange; +use workspace::searchable::{self, Direction}; use crate::motion::{self, MotionKind}; use crate::state::SearchState; @@ -52,6 +52,10 @@ actions!( HelixSubstitute, /// Delete the selection and enter edit mode, without yanking the selection. HelixSubstituteNoYank, + /// Delete the selection and enter edit mode. + HelixSelectNext, + /// Delete the selection and enter edit mode, without yanking the selection. + HelixSelectPrevious, ] ); @@ -74,6 +78,8 @@ pub fn register(editor: &mut Editor, cx: &mut Context) { }); Vim::action(editor, cx, Vim::helix_substitute); Vim::action(editor, cx, Vim::helix_substitute_no_yank); + Vim::action(editor, cx, Vim::helix_select_next); + Vim::action(editor, cx, Vim::helix_select_previous); } impl Vim { @@ -97,6 +103,11 @@ impl Vim { self.update_editor(cx, |_, editor, cx| { let text_layout_details = editor.text_layout_details(window); editor.change_selections(Default::default(), window, cx, |s| { + if let Motion::ZedSearchResult { new_selections, .. } = &motion { + s.select_anchor_ranges(new_selections.clone()); + return; + }; + s.move_with(|map, selection| { let current_head = selection.head(); @@ -664,6 +675,68 @@ impl Vim { ) { self.do_helix_substitute(false, window, cx); } + + fn helix_select_next( + &mut self, + _: &HelixSelectNext, + window: &mut Window, + cx: &mut Context, + ) { + self.do_helix_select(Direction::Next, window, cx); + } + + fn helix_select_previous( + &mut self, + _: &HelixSelectPrevious, + window: &mut Window, + cx: &mut Context, + ) { + self.do_helix_select(Direction::Prev, window, cx); + } + + fn do_helix_select( + &mut self, + direction: searchable::Direction, + window: &mut Window, + cx: &mut Context, + ) { + let Some(pane) = self.pane(window, cx) else { + return; + }; + let count = Vim::take_count(cx).unwrap_or(1); + Vim::take_forced_motion(cx); + let prior_selections = self.editor_selections(window, cx); + + let success = pane.update(cx, |pane, cx| { + let Some(search_bar) = pane.toolbar().read(cx).item_of_type::() else { + return false; + }; + search_bar.update(cx, |search_bar, cx| { + if !search_bar.has_active_match() || !search_bar.show(window, cx) { + return false; + } + search_bar.select_match(direction, count, window, cx); + true + }) + }); + + if !success { + return; + } + if self.mode == Mode::HelixSelect { + self.update_editor(cx, |_vim, editor, cx| { + let snapshot = editor.snapshot(window, cx); + editor.change_selections(SelectionEffects::default(), window, cx, |s| { + s.select_anchor_ranges( + prior_selections + .iter() + .cloned() + .chain(s.all_anchors(&snapshot).iter().map(|s| s.range())), + ); + }) + }); + } + } } #[cfg(test)] @@ -1278,6 +1351,24 @@ mod test { cx.assert_state("«one ˇ»two", Mode::HelixSelect); } + #[gpui::test] + async fn test_exit_visual_mode(cx: &mut gpui::TestAppContext) { + let mut cx = VimTestContext::new(cx, true).await; + + cx.set_state("ˇone two", Mode::Normal); + cx.simulate_keystrokes("v w"); + cx.assert_state("«one tˇ»wo", Mode::Visual); + cx.simulate_keystrokes("escape"); + cx.assert_state("one ˇtwo", Mode::Normal); + + cx.enable_helix(); + cx.set_state("ˇone two", Mode::HelixNormal); + cx.simulate_keystrokes("v w"); + cx.assert_state("«one ˇ»two", Mode::HelixSelect); + cx.simulate_keystrokes("escape"); + cx.assert_state("«one ˇ»two", Mode::HelixNormal); + } + #[gpui::test] async fn test_helix_select_regex(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; @@ -1297,9 +1388,47 @@ mod test { cx.simulate_keystrokes("enter"); cx.assert_state("«oneˇ» two «oneˇ»", Mode::HelixNormal); - cx.set_state("ˇone two one", Mode::HelixNormal); - cx.simulate_keystrokes("s o n e enter"); - cx.assert_state("ˇone two one", Mode::HelixNormal); + // TODO: change "search_in_selection" to not perform any search when in helix select mode with no selection + // cx.set_state("ˇstuff one two one", Mode::HelixNormal); + // cx.simulate_keystrokes("s o n e enter"); + // cx.assert_state("ˇstuff one two one", Mode::HelixNormal); + } + + #[gpui::test] + async fn test_helix_select_next_match(cx: &mut gpui::TestAppContext) { + let mut cx = VimTestContext::new(cx, true).await; + + cx.set_state("ˇhello two one two one two one", Mode::Visual); + cx.simulate_keystrokes("/ o n e"); + cx.simulate_keystrokes("enter"); + cx.simulate_keystrokes("n n"); + cx.assert_state("«hello two one two one two oˇ»ne", Mode::Visual); + + cx.set_state("ˇhello two one two one two one", Mode::Normal); + cx.simulate_keystrokes("/ o n e"); + cx.simulate_keystrokes("enter"); + cx.simulate_keystrokes("n n"); + cx.assert_state("hello two one two one two ˇone", Mode::Normal); + + cx.set_state("ˇhello two one two one two one", Mode::Normal); + cx.simulate_keystrokes("/ o n e"); + cx.simulate_keystrokes("enter"); + cx.simulate_keystrokes("n g n g n"); + cx.assert_state("hello two one two «one two oneˇ»", Mode::Visual); + + cx.enable_helix(); + + cx.set_state("ˇhello two one two one two one", Mode::HelixNormal); + cx.simulate_keystrokes("/ o n e"); + cx.simulate_keystrokes("enter"); + cx.simulate_keystrokes("n n"); + cx.assert_state("hello two one two one two «oneˇ»", Mode::HelixNormal); + + cx.set_state("ˇhello two one two one two one", Mode::HelixSelect); + cx.simulate_keystrokes("/ o n e"); + cx.simulate_keystrokes("enter"); + cx.simulate_keystrokes("n n"); + cx.assert_state("hello two «oneˇ» two «oneˇ» two «oneˇ»", Mode::HelixSelect); } #[gpui::test] diff --git a/crates/vim/src/motion.rs b/crates/vim/src/motion.rs index c0be92b38e46e7d8c32c9da4a6980195ef71a91e..fd4171a36bc5baf0dc1cc60efe707fa275e4be81 100644 --- a/crates/vim/src/motion.rs +++ b/crates/vim/src/motion.rs @@ -691,7 +691,6 @@ impl Vim { return; } } - Mode::HelixNormal | Mode::HelixSelect => {} } } diff --git a/crates/vim/src/vim.rs b/crates/vim/src/vim.rs index ce359b6b1eea24d862f68813b97e23ea27829435..14ee4709a74ba68e92f07dd53182416ea93ed6d5 100644 --- a/crates/vim/src/vim.rs +++ b/crates/vim/src/vim.rs @@ -666,7 +666,7 @@ impl Vim { editor, cx, |vim, _: &SwitchToHelixNormalMode, window, cx| { - vim.switch_mode(Mode::HelixNormal, false, window, cx) + vim.switch_mode(Mode::HelixNormal, true, window, cx) }, ); Vim::action(editor, cx, |_, _: &PushForcedMotion, _, cx| { @@ -1928,7 +1928,8 @@ impl Vim { self.update_editor(cx, |vim, editor, cx| { editor.set_cursor_shape(vim.cursor_shape(cx), cx); editor.set_clip_at_line_ends(vim.clip_at_line_ends(), cx); - editor.set_collapse_matches(true); + let collapse_matches = !HelixModeSetting::get_global(cx).0; + editor.set_collapse_matches(collapse_matches); editor.set_input_enabled(vim.editor_input_enabled()); editor.set_autoindent(vim.should_autoindent()); editor From dccddf6f664f6f36110975fa39cb98c642ad41a4 Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Thu, 20 Nov 2025 00:38:29 +0530 Subject: [PATCH 0226/1030] project_panel: Remove `cmd-opt-.` binding for hiding hidden files (#43091) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Lots of folks were accidentally clicking this. Even though it’s the default in macOS Finder, it’s a good idea to not have it as the default for us. Release Notes: - Removed the default `cmd-opt-.` binding for toggling hidden files in the Project Panel so it’s harder to hide them by accident. --- assets/keymaps/default-linux.json | 3 +-- assets/keymaps/default-macos.json | 3 +-- assets/keymaps/default-windows.json | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/assets/keymaps/default-linux.json b/assets/keymaps/default-linux.json index 57c799d8fd4f74478f5fdf469ff142e0c26e4503..253c36f987a0995d51a91ab0eea75f422be83085 100644 --- a/assets/keymaps/default-linux.json +++ b/assets/keymaps/default-linux.json @@ -43,8 +43,7 @@ "f11": "zed::ToggleFullScreen", "ctrl-alt-z": "edit_prediction::RateCompletions", "ctrl-alt-shift-i": "edit_prediction::ToggleMenu", - "ctrl-alt-l": "lsp_tool::ToggleMenu", - "ctrl-alt-.": "project_panel::ToggleHideHidden" + "ctrl-alt-l": "lsp_tool::ToggleMenu" } }, { diff --git a/assets/keymaps/default-macos.json b/assets/keymaps/default-macos.json index fe65a53aa70522ff48728d3eaded16ac3312f2e0..16690a65b1d348a5393c55d4adc808be52e52c99 100644 --- a/assets/keymaps/default-macos.json +++ b/assets/keymaps/default-macos.json @@ -49,8 +49,7 @@ "ctrl-cmd-f": "zed::ToggleFullScreen", "ctrl-cmd-z": "edit_prediction::RateCompletions", "ctrl-cmd-i": "edit_prediction::ToggleMenu", - "ctrl-cmd-l": "lsp_tool::ToggleMenu", - "cmd-alt-.": "project_panel::ToggleHideHidden" + "ctrl-cmd-l": "lsp_tool::ToggleMenu" } }, { diff --git a/assets/keymaps/default-windows.json b/assets/keymaps/default-windows.json index 3fe5778e5c1219ee2b5fc9691ac876ec61debe06..216048d0634a039e3011b908ad20bcf522477885 100644 --- a/assets/keymaps/default-windows.json +++ b/assets/keymaps/default-windows.json @@ -41,8 +41,7 @@ "shift-f11": "debugger::StepOut", "f11": "zed::ToggleFullScreen", "ctrl-shift-i": "edit_prediction::ToggleMenu", - "shift-alt-l": "lsp_tool::ToggleMenu", - "ctrl-alt-.": "project_panel::ToggleHideHidden" + "shift-alt-l": "lsp_tool::ToggleMenu" } }, { From b6c8c3f3d948c8b694b9de69445ea55f5d57832d Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Wed, 19 Nov 2025 15:28:40 -0500 Subject: [PATCH 0227/1030] Remove migrated scripts (#43095) These scripts have been migrated to: https://github.com/zed-industries/release_notes Release Notes: - N/A --- script/get-preview-channel-changes | 127 ------------------------ script/get-stable-channel-release-notes | 101 ------------------- 2 files changed, 228 deletions(-) delete mode 100755 script/get-preview-channel-changes delete mode 100755 script/get-stable-channel-release-notes diff --git a/script/get-preview-channel-changes b/script/get-preview-channel-changes deleted file mode 100755 index 6ba274eabc1a9c850e53c86ddafb73a26c0c5d34..0000000000000000000000000000000000000000 --- a/script/get-preview-channel-changes +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/env node --redirect-warnings=/dev/null - -const { execFileSync } = require("child_process"); -let { GITHUB_ACCESS_TOKEN } = process.env; -const GITHUB_URL = "https://github.com"; -const SKIPPABLE_NOTE_REGEX = /^\s*-?\s*n\/?a\s*/ims; -const PULL_REQUEST_WEB_URL = "https://github.com/zed-industries/zed/pull"; -const PULL_REQUEST_API_URL = "https://api.github.com/repos/zed-industries/zed/pulls"; -const DIVIDER = "-".repeat(80); - -main(); - -async function main() { - if (!GITHUB_ACCESS_TOKEN) { - try { - GITHUB_ACCESS_TOKEN = execFileSync("gh", ["auth", "token"]).toString(); - } catch (error) { - console.log(error); - console.log("No GITHUB_ACCESS_TOKEN, and no `gh auth token`"); - process.exit(1); - } - } - - const STAFF_MEMBERS = new Set( - ( - await ( - await fetch("https://api.github.com/orgs/zed-industries/teams/staff/members?per_page=100", { - headers: { - Authorization: `token ${GITHUB_ACCESS_TOKEN}`, - Accept: "application/vnd.github+json", - }, - }) - ).json() - ).map(({ login }) => login.toLowerCase()), - ); - - const isStaffMember = (githubHandle) => { - githubHandle = githubHandle.toLowerCase(); - return STAFF_MEMBERS.has(githubHandle); - }; - - // Get the last two preview tags - const [newTag, oldTag] = execFileSync("git", ["tag", "--sort", "-committerdate"], { encoding: "utf8" }) - .split("\n") - .filter((t) => t.startsWith("v") && t.endsWith("-pre")); - - // Print the previous release - console.log(`Changes from ${oldTag} to ${newTag}\n`); - - // Get the PRs merged between those two tags. - const pullRequestNumbers = getPullRequestNumbers(oldTag, newTag); - - // Get the PRs that were cherry-picked between main and the old tag. - const existingPullRequestNumbers = new Set(getPullRequestNumbers("main", oldTag)); - - // Filter out those existing PRs from the set of new PRs. - const newPullRequestNumbers = pullRequestNumbers.filter((number) => !existingPullRequestNumbers.has(number)); - - // Fetch the pull requests from the GitHub API. - console.log("Merged Pull requests:"); - console.log(DIVIDER); - for (const pullRequestNumber of newPullRequestNumbers) { - const pullRequestApiURL = `${PULL_REQUEST_API_URL}/${pullRequestNumber}`; - - const response = await fetch(pullRequestApiURL, { - headers: { - Authorization: `token ${GITHUB_ACCESS_TOKEN}`, - }, - }); - - const pullRequest = await response.json(); - const releaseNotesHeader = /^\s*Release Notes:(.+)/ims; - - const releaseNotes = pullRequest.body || ""; - let contributor = pullRequest.user?.login ?? "Unable to identify contributor"; - const captures = releaseNotesHeader.exec(releaseNotes); - let notes = captures ? captures[1] : "MISSING"; - notes = notes.trim(); - const isStaff = isStaffMember(contributor); - - if (SKIPPABLE_NOTE_REGEX.exec(notes) != null) { - continue; - } - - const credit = getCreditString(pullRequestNumber, contributor, isStaff); - contributor = isStaff ? `${contributor} (staff)` : contributor; - - console.log(`PR Title: ${pullRequest.title}`); - console.log(`Contributor: ${contributor}`); - console.log(`Credit: (${credit})`); - - console.log("Release Notes:"); - console.log(); - console.log(notes); - - console.log(DIVIDER); - } -} - -function getCreditString(pullRequestNumber, contributor, isStaff) { - let credit = ""; - - if (pullRequestNumber) { - const pullRequestMarkdownLink = `[#${pullRequestNumber}](${PULL_REQUEST_WEB_URL}/${pullRequestNumber})`; - credit += pullRequestMarkdownLink; - } - - if (contributor && !isStaff) { - const contributorMarkdownLink = `[${contributor}](${GITHUB_URL}/${contributor})`; - credit += `; thanks ${contributorMarkdownLink}`; - } - - return credit; -} - -function getPullRequestNumbers(oldTag, newTag) { - const pullRequestNumbers = execFileSync("git", ["log", `${oldTag}..${newTag}`, "--oneline"], { encoding: "utf8" }) - .split("\n") - .filter((line) => line.length > 0) - .map((line) => { - const match = line.match(/#(\d+)/); - return match ? match[1] : null; - }) - .filter((line) => line); - - return pullRequestNumbers; -} diff --git a/script/get-stable-channel-release-notes b/script/get-stable-channel-release-notes deleted file mode 100755 index cbaf6497eeee7f6642c4b8b884cb42c3774047d5..0000000000000000000000000000000000000000 --- a/script/get-stable-channel-release-notes +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env node --redirect-warnings=/dev/null - -// This script should be ran before `bump-zed-minor-versions` - -// Prints the changelogs for all preview releases associated with the most -// recent preview minor version. - -// Future TODO: Have the script perform deduplication of lines that were -// included in both past stable and preview patches that shouldn't be mentioned -// again in this week's stable minor release. - -// Future TODO: Get changelogs for latest cherry-picked commits on preview and -// stable that didn't make it into a release, as they were cherry picked - -const { execFileSync } = require("child_process"); -let { GITHUB_ACCESS_TOKEN } = process.env; -const GITHUB_TAGS_API_URL = "https://api.github.com/repos/zed-industries/zed/releases/tags"; -const DIVIDER = "-".repeat(80); - -main(); - -async function main() { - if (!GITHUB_ACCESS_TOKEN) { - try { - GITHUB_ACCESS_TOKEN = execFileSync("gh", ["auth", "token"]).toString(); - } catch (error) { - console.log(error); - console.log("No GITHUB_ACCESS_TOKEN and no `gh auth token`"); - process.exit(1); - } - } - - const allTags = execFileSync("git", ["tag", "--sort", "-committerdate"], { encoding: "utf8" }) - .split("\n") - .filter((t) => t.length > 0); - const latestPreviewTag = allTags.filter((t) => t.startsWith("v") && t.endsWith("-pre"))[0]; - const latestPreviewMinorVersion = latestPreviewTag.split(".")[1]; - const latestPreviewTagRegex = new RegExp(`^v(\\d+)\\.(${latestPreviewMinorVersion})\\.(\\d+)-pre$`); - - const parsedPreviewTags = allTags - .map((tag) => { - const match = tag.match(latestPreviewTagRegex); - if (match) { - return { - tag, - version: { - major: parseInt(match[1]), - minor: parseInt(match[2]), - patch: parseInt(match[3]), - }, - }; - } - return null; - }) - .filter((item) => item !== null) - .sort((a, b) => a.version.patch - b.version.patch); - - const matchingPreviewTags = parsedPreviewTags.map((item) => item.tag); - - console.log("Fetching release information for preview tags:"); - console.log(DIVIDER); - - for (const tag of matchingPreviewTags) { - const releaseApiUrl = `${GITHUB_TAGS_API_URL}/${tag}`; - - try { - const response = await fetch(releaseApiUrl, { - headers: { - Authorization: `token ${GITHUB_ACCESS_TOKEN}`, - }, - }); - - if (!response.ok) { - console.log(`Failed to fetch release for ${tag}: ${response.status}`); - continue; - } - - const release = await response.json(); - - console.log(`\nRelease: ${release.name || tag}`); - console.log(`Tag: ${tag}`); - console.log(`Published: ${release.published_at}`); - console.log(`URL: ${release.html_url}`); - console.log("\nRelease Notes:"); - console.log(release.body || "No release notes"); - console.log(DIVIDER); - } catch (error) { - console.log(`Error fetching release for ${tag}:`, error.message); - } - } - - const patchUpdateTags = parsedPreviewTags.filter((tag) => tag.version.patch != 0).map((tag) => tag.tag); - - console.log(); - console.log("Please review the release notes associated with the following patch versions:"); - for (const tag of patchUpdateTags) { - console.log(`- ${tag}`); - } - console.log("Remove items that have already been mentioned in the current published stable versions."); - console.log("https://github.com/zed-industries/zed/releases?q=prerelease%3Afalse&expanded=true"); -} From ec220dcc052ff9ab4215c6cdbed262e7d6258b92 Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Wed, 19 Nov 2025 17:32:14 -0300 Subject: [PATCH 0228/1030] sweep: Coalesce edits based on line distance rather than time (#43006) Release Notes: - N/A --------- Co-authored-by: Ben Kunkle --- crates/sweep_ai/src/sweep_ai.rs | 104 +++++++++++++++++--------------- 1 file changed, 54 insertions(+), 50 deletions(-) diff --git a/crates/sweep_ai/src/sweep_ai.rs b/crates/sweep_ai/src/sweep_ai.rs index 41e8f1b2932c49c577ccfa8c4a420366a0c02cb9..75f6f123d5f2460fa7f2f078bec17fad0eb8acaf 100644 --- a/crates/sweep_ai/src/sweep_ai.rs +++ b/crates/sweep_ai/src/sweep_ai.rs @@ -8,7 +8,9 @@ use feature_flags::FeatureFlag; use futures::AsyncReadExt as _; use gpui::{App, AppContext, Context, Entity, EntityId, Global, Task, WeakEntity}; use http_client::{AsyncBody, Method}; -use language::{Anchor, Buffer, BufferSnapshot, EditPreview, ToOffset as _, ToPoint, text_diff}; +use language::{ + Anchor, Buffer, BufferSnapshot, EditPreview, Point, ToOffset as _, ToPoint, text_diff, +}; use project::Project; use release_channel::{AppCommitSha, AppVersion}; use std::collections::{VecDeque, hash_map}; @@ -28,8 +30,8 @@ use workspace::Workspace; use crate::api::{AutocompleteRequest, AutocompleteResponse, FileChunk}; -const BUFFER_CHANGE_GROUPING_INTERVAL: Duration = Duration::from_secs(1); -const MAX_EVENT_COUNT: usize = 16; +const CHANGE_GROUPING_LINE_SPAN: u32 = 8; +const MAX_EVENT_COUNT: usize = 6; const SWEEP_API_URL: &str = "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; @@ -143,40 +145,6 @@ impl SweepAi { } } - fn push_event(sweep_ai_project: &mut SweepAiProject, event: Event) { - let events = &mut sweep_ai_project.events; - - if let Some(Event::BufferChange { - new_snapshot: last_new_snapshot, - timestamp: last_timestamp, - .. - }) = events.back_mut() - { - // Coalesce edits for the same buffer when they happen one after the other. - let Event::BufferChange { - old_snapshot, - new_snapshot, - timestamp, - } = &event; - - if timestamp.duration_since(*last_timestamp) <= BUFFER_CHANGE_GROUPING_INTERVAL - && old_snapshot.remote_id() == last_new_snapshot.remote_id() - && old_snapshot.version == last_new_snapshot.version - { - *last_new_snapshot = new_snapshot.clone(); - *last_timestamp = *timestamp; - return; - } - } - - if events.len() >= MAX_EVENT_COUNT { - // These are halved instead of popping to improve prompt caching. - events.drain(..MAX_EVENT_COUNT / 2); - } - - events.push_back(event); - } - pub fn register_buffer( &mut self, buffer: &Entity, @@ -314,6 +282,8 @@ impl SweepAi { }) .collect(); + eprintln!("{recent_changes}"); + let request_body = AutocompleteRequest { debug_info, repo_name, @@ -420,24 +390,58 @@ impl SweepAi { buffer: &Entity, project: &Entity, cx: &mut Context, - ) -> BufferSnapshot { + ) { let sweep_ai_project = self.get_or_init_sweep_ai_project(project, cx); let registered_buffer = Self::register_buffer_impl(sweep_ai_project, buffer, project, cx); let new_snapshot = buffer.read(cx).snapshot(); - if new_snapshot.version != registered_buffer.snapshot.version { - let old_snapshot = mem::replace(&mut registered_buffer.snapshot, new_snapshot.clone()); - Self::push_event( - sweep_ai_project, - Event::BufferChange { - old_snapshot, - new_snapshot: new_snapshot.clone(), - timestamp: Instant::now(), - }, - ); + if new_snapshot.version == registered_buffer.snapshot.version { + return; } - new_snapshot + let old_snapshot = mem::replace(&mut registered_buffer.snapshot, new_snapshot.clone()); + let end_edit_anchor = new_snapshot + .anchored_edits_since::(&old_snapshot.version) + .last() + .map(|(_, range)| range.end); + let events = &mut sweep_ai_project.events; + + if let Some(Event::BufferChange { + new_snapshot: last_new_snapshot, + end_edit_anchor: last_end_edit_anchor, + .. + }) = events.back_mut() + { + let is_next_snapshot_of_same_buffer = old_snapshot.remote_id() + == last_new_snapshot.remote_id() + && old_snapshot.version == last_new_snapshot.version; + + let should_coalesce = is_next_snapshot_of_same_buffer + && end_edit_anchor + .as_ref() + .zip(last_end_edit_anchor.as_ref()) + .is_some_and(|(a, b)| { + let a = a.to_point(&new_snapshot); + let b = b.to_point(&new_snapshot); + a.row.abs_diff(b.row) <= CHANGE_GROUPING_LINE_SPAN + }); + + if should_coalesce { + *last_end_edit_anchor = end_edit_anchor; + *last_new_snapshot = new_snapshot; + return; + } + } + + if events.len() >= MAX_EVENT_COUNT { + events.pop_front(); + } + + events.push_back(Event::BufferChange { + old_snapshot, + new_snapshot, + end_edit_anchor, + }); } } @@ -451,7 +455,7 @@ pub enum Event { BufferChange { old_snapshot: BufferSnapshot, new_snapshot: BufferSnapshot, - timestamp: Instant, + end_edit_anchor: Option, }, } From 68b87fc308f17b0071311551d4e11badb4da7698 Mon Sep 17 00:00:00 2001 From: David Kleingeld Date: Wed, 19 Nov 2025 22:06:13 +0100 Subject: [PATCH 0229/1030] Use fixed calloop (#43081) Calloop (used by our linux executor) was running all futures regardless of how long they take. Unfortunaly some of our futures are rather busy and take a while (>10ms). Running all of them froze the editor for multiple seconds or even minutes when opening a large project diff (git reset HEAD~2000 in chromium for example). Closes #ISSUE Release Notes: - N/A --------- Co-authored-by: Jakub Konka --- Cargo.lock | 16 +++++++--------- Cargo.toml | 1 + crates/gpui/Cargo.toml | 4 ++-- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f167ce3ff021521a2f829447e8f3fba5207b4259..0a3d358410784f5fd9057a30f9a70d49e2fd2d90 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2617,26 +2617,24 @@ dependencies = [ [[package]] name = "calloop" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b99da2f8558ca23c71f4fd15dc57c906239752dd27ff3c00a1d56b685b7cbfec" +version = "0.14.3" +source = "git+https://github.com/zed-industries/calloop#eb6b4fd17b9af5ecc226546bdd04185391b3e265" dependencies = [ "bitflags 2.9.4", - "log", "polling", - "rustix 0.38.44", + "rustix 1.1.2", "slab", - "thiserror 1.0.69", + "tracing", ] [[package]] name = "calloop-wayland-source" -version = "0.3.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95a66a987056935f7efce4ab5668920b5d0dac4a7c99991a67395f13702ddd20" +checksum = "138efcf0940a02ebf0cc8d1eff41a1682a46b431630f4c52450d6265876021fa" dependencies = [ "calloop", - "rustix 0.38.44", + "rustix 1.1.2", "wayland-backend", "wayland-client", ] diff --git a/Cargo.toml b/Cargo.toml index 75ad1e34e07894fd0892ff836da758e68efdc824..03a86c9e25bd8f5a1bb8498b3cb0169055672ad4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -784,6 +784,7 @@ features = [ notify = { git = "https://github.com/zed-industries/notify.git", rev = "b4588b2e5aee68f4c0e100f140e808cbce7b1419" } notify-types = { git = "https://github.com/zed-industries/notify.git", rev = "b4588b2e5aee68f4c0e100f140e808cbce7b1419" } windows-capture = { git = "https://github.com/zed-industries/windows-capture.git", rev = "f0d6c1b6691db75461b732f6d5ff56eed002eeb9" } +calloop = { git = "https://github.com/zed-industries/calloop" } [profile.dev] split-debuginfo = "unpacked" diff --git a/crates/gpui/Cargo.toml b/crates/gpui/Cargo.toml index a6649f011d42410671bc7e317a3492803418cc2a..f8c668cbe71ccce399987cb6887abd5e9ef5cb92 100644 --- a/crates/gpui/Cargo.toml +++ b/crates/gpui/Cargo.toml @@ -187,12 +187,12 @@ font-kit = { git = "https://github.com/zed-industries/font-kit", rev = "11052312 "source-fontconfig-dlopen", ], optional = true } -calloop = { version = "0.13.0" } +calloop = { version = "0.14.3" } filedescriptor = { version = "0.8.2", optional = true } open = { version = "5.2.0", optional = true } # Wayland -calloop-wayland-source = { version = "0.3.0", optional = true } +calloop-wayland-source = { version = "0.4.1", optional = true } wayland-backend = { version = "0.3.3", features = [ "client_system", "dlopen", From 09e02a483afa85ad86a03a1fdfc047f63ee19805 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Wed, 19 Nov 2025 13:09:19 -0800 Subject: [PATCH 0230/1030] Allow running zeta evals against sweep (#43039) This PR restructures the subcommands in `zeta-cli`, so that the prediction engine (currently `zeta1` vs `zeta2`) is no longer the highest order subcommand. Instead, there is just one layer of subcommands: `eval`, `predict`, `context`, etc. Within these commands, there are flags for using `zeta1`, `zeta2`, and now `sweep`. Release Notes: - N/A --------- Co-authored-by: Ben Kunkle Co-authored-by: Agus --- Cargo.lock | 1 + crates/sweep_ai/src/sweep_ai.rs | 64 +++-- crates/workspace/src/workspace.rs | 2 +- crates/zeta2/src/zeta2.rs | 15 +- crates/zeta_cli/Cargo.toml | 1 + crates/zeta_cli/src/evaluate.rs | 102 +++---- crates/zeta_cli/src/example.rs | 39 +-- crates/zeta_cli/src/main.rs | 292 ++++++++++--------- crates/zeta_cli/src/predict.rs | 449 ++++++++++++++++-------------- 9 files changed, 517 insertions(+), 448 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0a3d358410784f5fd9057a30f9a70d49e2fd2d90..4f9a3f26e9a20df498bd3b735cfec54aa77c77cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21864,6 +21864,7 @@ dependencies = [ "shellexpand 2.1.2", "smol", "soa-rs", + "sweep_ai", "terminal_view", "toml 0.8.23", "util", diff --git a/crates/sweep_ai/src/sweep_ai.rs b/crates/sweep_ai/src/sweep_ai.rs index 75f6f123d5f2460fa7f2f078bec17fad0eb8acaf..1b4c92120d866a218987f36161e9520a0f3f703a 100644 --- a/crates/sweep_ai/src/sweep_ai.rs +++ b/crates/sweep_ai/src/sweep_ai.rs @@ -11,7 +11,7 @@ use http_client::{AsyncBody, Method}; use language::{ Anchor, Buffer, BufferSnapshot, EditPreview, Point, ToOffset as _, ToPoint, text_diff, }; -use project::Project; +use project::{Project, ProjectPath}; use release_channel::{AppCommitSha, AppVersion}; use std::collections::{VecDeque, hash_map}; use std::fmt::{self, Display}; @@ -48,11 +48,11 @@ impl Global for SweepAiGlobal {} #[derive(Clone)] pub struct EditPrediction { - id: EditPredictionId, - path: Arc, - edits: Arc<[(Range, Arc)]>, - snapshot: BufferSnapshot, - edit_preview: EditPreview, + pub id: EditPredictionId, + pub path: Arc, + pub edits: Arc<[(Range, Arc)]>, + pub snapshot: BufferSnapshot, + pub edit_preview: EditPreview, } impl EditPrediction { @@ -110,7 +110,7 @@ impl SweepAi { } } - fn new(cx: &mut Context) -> Self { + pub fn new(cx: &mut Context) -> Self { Self { api_token: std::env::var("SWEEP_AI_TOKEN").ok(), projects: HashMap::default(), @@ -195,8 +195,8 @@ impl SweepAi { pub fn request_completion( &mut self, - workspace: &WeakEntity, project: &Entity, + recent_buffers: impl Iterator, active_buffer: &Entity, position: language::Anchor, cx: &mut Context, @@ -223,26 +223,17 @@ impl SweepAi { let events = project_state.events.clone(); let http_client = cx.http_client(); - let Some(recent_buffers) = workspace - .read_with(cx, |workspace, cx| { - workspace - .recent_navigation_history_iter(cx) - .filter_map(|(project_path, _)| { - let buffer = project.read(cx).get_open_buffer(&project_path, cx)?; - - if active_buffer == &buffer { - None - } else { - Some(buffer.read(cx).snapshot()) - } - }) - .take(3) - .collect::>() + let recent_buffer_snapshots = recent_buffers + .filter_map(|project_path| { + let buffer = project.read(cx).get_open_buffer(&project_path, cx)?; + if active_buffer == &buffer { + None + } else { + Some(buffer.read(cx).snapshot()) + } }) - .log_err() - else { - return Task::ready(Ok(None)); - }; + .take(3) + .collect::>(); let result = cx.background_spawn({ let full_path = full_path.clone(); @@ -255,7 +246,7 @@ impl SweepAi { writeln!(&mut recent_changes, "{event}")?; } - let file_chunks = recent_buffers + let file_chunks = recent_buffer_snapshots .into_iter() .map(|snapshot| { let end_point = language::Point::new(30, 0).min(snapshot.max_point()); @@ -623,8 +614,23 @@ impl edit_prediction::EditPredictionProvider for SweepAiEditPredictionProvider { let completion_request = this.update(cx, |this, cx| { this.last_request_timestamp = Instant::now(); + this.sweep_ai.update(cx, |sweep_ai, cx| { - sweep_ai.request_completion(&workspace, &project, &buffer, position, cx) + let Some(recent_buffers) = workspace + .read_with(cx, |workspace, cx| { + workspace.recent_navigation_history_iter(cx) + }) + .log_err() + else { + return Task::ready(Ok(None)); + }; + sweep_ai.request_completion( + &project, + recent_buffers.map(move |(project_path, _)| project_path), + &buffer, + position, + cx, + ) }) }); diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 20139e3ae8104fc0d4c1bce98f265144ef344f0d..14b33af6cd1f8778a9bbafeb8e9854cc9fc11247 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -1845,7 +1845,7 @@ impl Workspace { pub fn recent_navigation_history_iter( &self, cx: &App, - ) -> impl Iterator)> { + ) -> impl Iterator)> + use<> { let mut abs_paths_opened: HashMap> = HashMap::default(); let mut history: HashMap, usize)> = HashMap::default(); diff --git a/crates/zeta2/src/zeta2.rs b/crates/zeta2/src/zeta2.rs index 881a7254f876e1b2df636513480115bf36489a24..099cd95134ec3d1fd59bbc33306bc439c0a8ee1a 100644 --- a/crates/zeta2/src/zeta2.rs +++ b/crates/zeta2/src/zeta2.rs @@ -50,7 +50,7 @@ pub mod udiff; mod xml_edits; use crate::assemble_excerpts::assemble_excerpts; -use crate::prediction::EditPrediction; +pub use crate::prediction::EditPrediction; pub use crate::prediction::EditPredictionId; pub use provider::ZetaEditPredictionProvider; @@ -327,6 +327,14 @@ impl Event { } } } + + pub fn project_path(&self, cx: &App) -> Option { + match self { + Event::BufferChange { new_snapshot, .. } => new_snapshot + .file() + .map(|f| project::ProjectPath::from_file(f.as_ref(), cx)), + } + } } impl Zeta { @@ -401,7 +409,10 @@ impl Zeta { } } - pub fn history_for_project(&self, project: &Entity) -> impl Iterator { + pub fn history_for_project( + &self, + project: &Entity, + ) -> impl DoubleEndedIterator { self.projects .get(&project.entity_id()) .map(|project| project.events.iter()) diff --git a/crates/zeta_cli/Cargo.toml b/crates/zeta_cli/Cargo.toml index e18cf54787ca98e2be60db4977dd2de18e9c09e2..35fbcb1c61097156d2f0e172d700ed12d3d3894e 100644 --- a/crates/zeta_cli/Cargo.toml +++ b/crates/zeta_cli/Cargo.toml @@ -49,6 +49,7 @@ settings.workspace = true shellexpand.workspace = true smol.workspace = true soa-rs = "0.8.1" +sweep_ai.workspace = true terminal_view.workspace = true toml.workspace = true util.workspace = true diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index d808e3d743d7009ca66a75b3a349914b0a4f5447..09fbbb29dd6cf58910a2b6e6ff7fb4a31fc4a10a 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -1,41 +1,25 @@ use std::{ collections::{BTreeSet, HashMap}, io::{IsTerminal, Write}, - path::PathBuf, sync::Arc, }; use anyhow::Result; -use clap::Args; use collections::HashSet; use gpui::{AsyncApp, Entity}; use project::Project; +use sweep_ai::SweepAi; use util::ResultExt as _; use zeta2::{Zeta, udiff::DiffLine}; use crate::{ - PromptFormat, + EvaluateArguments, PredictionOptions, PredictionProvider, example::{Example, NamedExample}, headless::ZetaCliAppState, paths::print_run_data_dir, - predict::{CacheMode, PredictionDetails, zeta2_predict}, + predict::{PredictionDetails, perform_predict, setup_sweep, setup_zeta}, }; -#[derive(Debug, Args)] -pub struct EvaluateArguments { - example_paths: Vec, - #[arg(long, value_enum, default_value_t = PromptFormat::default())] - prompt_format: PromptFormat, - #[arg(long)] - use_expected_context: bool, - #[clap(long, value_enum, default_value_t = CacheMode::default())] - cache: CacheMode, - #[clap(short, long, default_value_t = 1, alias = "repeat")] - repetitions: u16, - #[arg(long)] - skip_prediction: bool, -} - #[derive(Debug)] pub(crate) struct ExecutionData { execution_id: String, @@ -52,38 +36,56 @@ pub async fn run_evaluate( eprintln!("No examples provided"); return; } + let all_tasks = args.example_paths.into_iter().map(|path| { + let options = args.options.clone(); let app_state = app_state.clone(); let example = NamedExample::load(&path).expect("Failed to load example"); cx.spawn(async move |cx| { - let (project, zetas, _edited_buffers) = example - .setup_project(&app_state, args.repetitions, cx) - .await - .unwrap(); - - let tasks = zetas.into_iter().enumerate().map(|(repetition_ix, zeta)| { - let repetition_ix = (args.repetitions > 1).then(|| repetition_ix as u16); - let example = example.clone(); - let project = project.clone(); - - cx.spawn(async move |cx| { - let name = example.name.clone(); - run_evaluate_one( - example, - repetition_ix, - project, - zeta, - args.prompt_format, - args.use_expected_context, - !args.skip_prediction, - args.cache, - cx, + let project = example.setup_project(&app_state, cx).await.unwrap(); + + let providers = (0..args.repetitions) + .map(|_| { + ( + setup_zeta(&project, &app_state, cx).unwrap(), + if matches!(args.options.provider, PredictionProvider::Sweep) { + Some(setup_sweep(&project, cx).unwrap()) + } else { + None + }, ) - .await - .map_err(|err| (err, name, repetition_ix)) }) - }); + .collect::>(); + + let _edited_buffers = example.apply_edit_history(&project, cx).await.unwrap(); + + let tasks = + providers + .into_iter() + .enumerate() + .map(move |(repetition_ix, (zeta, sweep))| { + let repetition_ix = (args.repetitions > 1).then(|| repetition_ix as u16); + let example = example.clone(); + let project = project.clone(); + let options = options.clone(); + + cx.spawn(async move |cx| { + let name = example.name.clone(); + run_evaluate_one( + example, + repetition_ix, + project, + zeta, + sweep, + options, + !args.skip_prediction, + cx, + ) + .await + .map_err(|err| (err, name, repetition_ix)) + }) + }); futures::future::join_all(tasks).await }) }); @@ -175,20 +177,18 @@ pub async fn run_evaluate_one( repetition_ix: Option, project: Entity, zeta: Entity, - prompt_format: PromptFormat, - use_expected_context: bool, + sweep: Option>, + prediction_options: PredictionOptions, predict: bool, - cache_mode: CacheMode, cx: &mut AsyncApp, ) -> Result<(EvaluationResult, ExecutionData)> { - let predict_result = zeta2_predict( + let predict_result = perform_predict( example.clone(), project, zeta, + sweep, repetition_ix, - prompt_format, - use_expected_context, - cache_mode, + prediction_options, cx, ) .await?; diff --git a/crates/zeta_cli/src/example.rs b/crates/zeta_cli/src/example.rs index 300e453af93bd3c69a47f5e155e274431aa01c92..67eed23f90dc1a5b48a53a2a7de07f500396ba9f 100644 --- a/crates/zeta_cli/src/example.rs +++ b/crates/zeta_cli/src/example.rs @@ -20,13 +20,13 @@ use futures::{ lock::{Mutex, OwnedMutexGuard}, }; use futures::{FutureExt as _, future::Shared}; -use gpui::{AppContext as _, AsyncApp, Entity, Task, http_client::Url}; +use gpui::{AsyncApp, Entity, Task, http_client::Url}; use language::{Anchor, Buffer}; use project::{Project, ProjectPath}; use pulldown_cmark::CowStr; use serde::{Deserialize, Serialize}; use util::{paths::PathStyle, rel_path::RelPath}; -use zeta2::{Zeta, udiff::OpenedBuffers}; +use zeta2::udiff::OpenedBuffers; use crate::paths::{REPOS_DIR, WORKTREES_DIR}; @@ -318,12 +318,11 @@ impl NamedExample { } } - pub async fn setup_project<'a>( - &'a self, + pub async fn setup_project( + &self, app_state: &Arc, - repetitions: u16, cx: &mut AsyncApp, - ) -> Result<(Entity, Vec>, OpenedBuffers<'a>)> { + ) -> Result> { let worktree_path = self.setup_worktree().await?; static AUTHENTICATED: OnceLock>> = OnceLock::new(); @@ -365,33 +364,7 @@ impl NamedExample { })? .await; - let buffer_store = project.read_with(cx, |project, _| project.buffer_store().clone())?; - - let zetas = (0..repetitions) - .map(|_| { - let zeta = cx.new(|cx| { - zeta2::Zeta::new(app_state.client.clone(), app_state.user_store.clone(), cx) - })?; - - cx.subscribe(&buffer_store, { - let project = project.clone(); - let zeta = zeta.clone(); - move |_, event, cx| match event { - project::buffer_store::BufferStoreEvent::BufferAdded(buffer) => { - zeta.update(cx, |zeta, cx| zeta.register_buffer(&buffer, &project, cx)); - } - _ => {} - } - })? - .detach(); - - anyhow::Ok(zeta) - }) - .collect::>>()?; - - let edited_buffers = self.apply_edit_history(&project, cx).await?; - - anyhow::Ok((project, zetas, edited_buffers)) + anyhow::Ok(project) } pub async fn setup_worktree(&self) -> Result { diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index 517deb6ec7482ca2712a347531b24eca5ed16796..803e02b10cfb7533341a3009e0325a7bcf13df1e 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -7,13 +7,18 @@ mod source_location; mod syntax_retrieval_stats; mod util; -use crate::evaluate::{EvaluateArguments, run_evaluate}; -use crate::example::{ExampleFormat, NamedExample}; -use crate::predict::{PredictArguments, run_zeta2_predict}; -use crate::syntax_retrieval_stats::retrieval_stats; +use crate::{ + evaluate::run_evaluate, + example::{ExampleFormat, NamedExample}, + headless::ZetaCliAppState, + predict::run_predict, + source_location::SourceLocation, + syntax_retrieval_stats::retrieval_stats, + util::{open_buffer, open_buffer_with_language_server}, +}; use ::util::paths::PathStyle; use anyhow::{Result, anyhow}; -use clap::{Args, Parser, Subcommand}; +use clap::{Args, Parser, Subcommand, ValueEnum}; use cloud_llm_client::predict_edits_v3; use edit_prediction_context::{ EditPredictionContextOptions, EditPredictionExcerptOptions, EditPredictionScoreOptions, @@ -28,10 +33,6 @@ use std::time::Duration; use std::{collections::HashSet, path::PathBuf, str::FromStr, sync::Arc}; use zeta2::ContextMode; -use crate::headless::ZetaCliAppState; -use crate::source_location::SourceLocation; -use crate::util::{open_buffer, open_buffer_with_language_server}; - #[derive(Parser, Debug)] #[command(name = "zeta")] struct ZetaCliArgs { @@ -43,14 +44,10 @@ struct ZetaCliArgs { #[derive(Subcommand, Debug)] enum Command { - Zeta1 { - #[command(subcommand)] - command: Zeta1Command, - }, - Zeta2 { - #[command(subcommand)] - command: Zeta2Command, - }, + Context(ContextArgs), + ContextStats(ContextStatsArgs), + Predict(PredictArguments), + Eval(EvaluateArguments), ConvertExample { path: PathBuf, #[arg(long, value_enum, default_value_t = ExampleFormat::Md)] @@ -59,49 +56,24 @@ enum Command { Clean, } -#[derive(Subcommand, Debug)] -enum Zeta1Command { - Context { - #[clap(flatten)] - context_args: ContextArgs, - }, -} - -#[derive(Subcommand, Debug)] -enum Zeta2Command { - Syntax { - #[clap(flatten)] - args: Zeta2Args, - #[clap(flatten)] - syntax_args: Zeta2SyntaxArgs, - #[command(subcommand)] - command: Zeta2SyntaxCommand, - }, - Predict(PredictArguments), - Eval(EvaluateArguments), -} - -#[derive(Subcommand, Debug)] -enum Zeta2SyntaxCommand { - Context { - #[clap(flatten)] - context_args: ContextArgs, - }, - Stats { - #[arg(long)] - worktree: PathBuf, - #[arg(long)] - extension: Option, - #[arg(long)] - limit: Option, - #[arg(long)] - skip: Option, - }, +#[derive(Debug, Args)] +struct ContextStatsArgs { + #[arg(long)] + worktree: PathBuf, + #[arg(long)] + extension: Option, + #[arg(long)] + limit: Option, + #[arg(long)] + skip: Option, + #[clap(flatten)] + zeta2_args: Zeta2Args, } #[derive(Debug, Args)] -#[group(requires = "worktree")] struct ContextArgs { + #[arg(long)] + provider: ContextProvider, #[arg(long)] worktree: PathBuf, #[arg(long)] @@ -110,9 +82,18 @@ struct ContextArgs { use_language_server: bool, #[arg(long)] edit_history: Option, + #[clap(flatten)] + zeta2_args: Zeta2Args, } -#[derive(Debug, Args)] +#[derive(clap::ValueEnum, Default, Debug, Clone, Copy)] +enum ContextProvider { + Zeta1, + #[default] + Syntax, +} + +#[derive(Clone, Debug, Args)] struct Zeta2Args { #[arg(long, default_value_t = 8192)] max_prompt_bytes: usize, @@ -130,39 +111,111 @@ struct Zeta2Args { output_format: OutputFormat, #[arg(long, default_value_t = 42)] file_indexing_parallelism: usize, -} - -#[derive(Debug, Args)] -struct Zeta2SyntaxArgs { #[arg(long, default_value_t = false)] disable_imports_gathering: bool, #[arg(long, default_value_t = u8::MAX)] max_retrieved_definitions: u8, } -fn syntax_args_to_options( - zeta2_args: &Zeta2Args, - syntax_args: &Zeta2SyntaxArgs, - omit_excerpt_overlaps: bool, -) -> zeta2::ZetaOptions { +#[derive(Debug, Args)] +pub struct PredictArguments { + #[clap(long, short, value_enum, default_value_t = PredictionsOutputFormat::Md)] + format: PredictionsOutputFormat, + example_path: PathBuf, + #[clap(flatten)] + options: PredictionOptions, +} + +#[derive(Clone, Debug, Args)] +pub struct PredictionOptions { + #[arg(long)] + use_expected_context: bool, + #[clap(flatten)] + zeta2: Zeta2Args, + #[clap(long)] + provider: PredictionProvider, + #[clap(long, value_enum, default_value_t = CacheMode::default())] + cache: CacheMode, +} + +#[derive(Debug, ValueEnum, Default, Clone, Copy, PartialEq)] +pub enum CacheMode { + /// Use cached LLM requests and responses, except when multiple repetitions are requested + #[default] + Auto, + /// Use cached LLM requests and responses, based on the hash of the prompt and the endpoint. + #[value(alias = "request")] + Requests, + /// Ignore existing cache entries for both LLM and search. + Skip, + /// Use cached LLM responses AND search results for full determinism. Fails if they haven't been cached yet. + /// Useful for reproducing results and fixing bugs outside of search queries + Force, +} + +impl CacheMode { + fn use_cached_llm_responses(&self) -> bool { + self.assert_not_auto(); + matches!(self, CacheMode::Requests | CacheMode::Force) + } + + fn use_cached_search_results(&self) -> bool { + self.assert_not_auto(); + matches!(self, CacheMode::Force) + } + + fn assert_not_auto(&self) { + assert_ne!( + *self, + CacheMode::Auto, + "Cache mode should not be auto at this point!" + ); + } +} + +#[derive(clap::ValueEnum, Debug, Clone)] +pub enum PredictionsOutputFormat { + Json, + Md, + Diff, +} + +#[derive(Debug, Args)] +pub struct EvaluateArguments { + example_paths: Vec, + #[clap(flatten)] + options: PredictionOptions, + #[clap(short, long, default_value_t = 1, alias = "repeat")] + repetitions: u16, + #[arg(long)] + skip_prediction: bool, +} + +#[derive(clap::ValueEnum, Default, Debug, Clone, Copy)] +enum PredictionProvider { + #[default] + Zeta2, + Sweep, +} + +fn zeta2_args_to_options(args: &Zeta2Args, omit_excerpt_overlaps: bool) -> zeta2::ZetaOptions { zeta2::ZetaOptions { context: ContextMode::Syntax(EditPredictionContextOptions { - max_retrieved_declarations: syntax_args.max_retrieved_definitions, - use_imports: !syntax_args.disable_imports_gathering, + max_retrieved_declarations: args.max_retrieved_definitions, + use_imports: !args.disable_imports_gathering, excerpt: EditPredictionExcerptOptions { - max_bytes: zeta2_args.max_excerpt_bytes, - min_bytes: zeta2_args.min_excerpt_bytes, - target_before_cursor_over_total_bytes: zeta2_args - .target_before_cursor_over_total_bytes, + max_bytes: args.max_excerpt_bytes, + min_bytes: args.min_excerpt_bytes, + target_before_cursor_over_total_bytes: args.target_before_cursor_over_total_bytes, }, score: EditPredictionScoreOptions { omit_excerpt_overlaps, }, }), - max_diagnostic_bytes: zeta2_args.max_diagnostic_bytes, - max_prompt_bytes: zeta2_args.max_prompt_bytes, - prompt_format: zeta2_args.prompt_format.into(), - file_indexing_parallelism: zeta2_args.file_indexing_parallelism, + max_diagnostic_bytes: args.max_diagnostic_bytes, + max_prompt_bytes: args.max_prompt_bytes, + prompt_format: args.prompt_format.into(), + file_indexing_parallelism: args.file_indexing_parallelism, buffer_change_grouping_interval: Duration::ZERO, } } @@ -320,8 +373,6 @@ async fn load_context( } async fn zeta2_syntax_context( - zeta2_args: Zeta2Args, - syntax_args: Zeta2SyntaxArgs, args: ContextArgs, app_state: &Arc, cx: &mut AsyncApp, @@ -347,7 +398,7 @@ async fn zeta2_syntax_context( zeta2::Zeta::new(app_state.client.clone(), app_state.user_store.clone(), cx) }); let indexing_done_task = zeta.update(cx, |zeta, cx| { - zeta.set_options(syntax_args_to_options(&zeta2_args, &syntax_args, true)); + zeta.set_options(zeta2_args_to_options(&args.zeta2_args, true)); zeta.register_buffer(&buffer, &project, cx); zeta.wait_for_initial_indexing(&project, cx) }); @@ -362,7 +413,7 @@ async fn zeta2_syntax_context( let (prompt_string, section_labels) = cloud_zeta2_prompt::build_prompt(&request)?; - match zeta2_args.output_format { + match args.zeta2_args.output_format { OutputFormat::Prompt => anyhow::Ok(prompt_string), OutputFormat::Request => anyhow::Ok(serde_json::to_string_pretty(&request)?), OutputFormat::Full => anyhow::Ok(serde_json::to_string_pretty(&json!({ @@ -427,57 +478,40 @@ fn main() { panic!("Expected a command"); } } - Some(Command::Zeta1 { - command: Zeta1Command::Context { context_args }, - }) => { - let context = zeta1_context(context_args, &app_state, cx).await.unwrap(); - let result = serde_json::to_string_pretty(&context.body).unwrap(); - println!("{}", result); + Some(Command::ContextStats(arguments)) => { + let result = retrieval_stats( + arguments.worktree, + app_state, + arguments.extension, + arguments.limit, + arguments.skip, + zeta2_args_to_options(&arguments.zeta2_args, false), + cx, + ) + .await; + println!("{}", result.unwrap()); } - Some(Command::Zeta2 { command }) => match command { - Zeta2Command::Predict(arguments) => { - run_zeta2_predict(arguments, &app_state, cx).await; - } - Zeta2Command::Eval(arguments) => { - run_evaluate(arguments, &app_state, cx).await; - } - Zeta2Command::Syntax { - args, - syntax_args, - command, - } => { - let result = match command { - Zeta2SyntaxCommand::Context { context_args } => { - zeta2_syntax_context( - args, - syntax_args, - context_args, - &app_state, - cx, - ) + Some(Command::Context(context_args)) => { + let result = match context_args.provider { + ContextProvider::Zeta1 => { + let context = + zeta1_context(context_args, &app_state, cx).await.unwrap(); + serde_json::to_string_pretty(&context.body).unwrap() + } + ContextProvider::Syntax => { + zeta2_syntax_context(context_args, &app_state, cx) .await - } - Zeta2SyntaxCommand::Stats { - worktree, - extension, - limit, - skip, - } => { - retrieval_stats( - worktree, - app_state, - extension, - limit, - skip, - syntax_args_to_options(&args, &syntax_args, false), - cx, - ) - .await - } - }; - println!("{}", result.unwrap()); - } - }, + .unwrap() + } + }; + println!("{}", result); + } + Some(Command::Predict(arguments)) => { + run_predict(arguments, &app_state, cx).await; + } + Some(Command::Eval(arguments)) => { + run_evaluate(arguments, &app_state, cx).await; + } Some(Command::ConvertExample { path, output_format, diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index 28eb7e426c21126b1c91dc62132c1bf460a93661..4505035eaf992751e85216a314b731a12ffbd342 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -1,16 +1,18 @@ -use crate::PromptFormat; use crate::example::{ActualExcerpt, ExpectedExcerpt, NamedExample}; use crate::headless::ZetaCliAppState; use crate::paths::{CACHE_DIR, LATEST_EXAMPLE_RUN_DIR, RUN_DIR, print_run_data_dir}; +use crate::{ + CacheMode, PredictArguments, PredictionOptions, PredictionProvider, PredictionsOutputFormat, +}; use ::serde::Serialize; use anyhow::{Context, Result, anyhow}; -use clap::{Args, ValueEnum}; use cloud_zeta2_prompt::{CURSOR_MARKER, write_codeblock}; use collections::HashMap; use futures::StreamExt as _; use gpui::{AppContext, AsyncApp, Entity}; use language::{Anchor, Buffer, Point}; use project::Project; +use project::buffer_store::BufferStoreEvent; use serde::Deserialize; use std::fs; use std::io::{IsTerminal, Write}; @@ -19,98 +21,86 @@ use std::path::PathBuf; use std::sync::Arc; use std::sync::Mutex; use std::time::{Duration, Instant}; +use sweep_ai::SweepAi; use zeta2::{EvalCache, EvalCacheEntryKind, EvalCacheKey, Zeta}; -#[derive(Debug, Args)] -pub struct PredictArguments { - #[arg(long, value_enum, default_value_t = PromptFormat::default())] - prompt_format: PromptFormat, - #[arg(long)] - use_expected_context: bool, - #[clap(long, short, value_enum, default_value_t = PredictionsOutputFormat::Md)] - format: PredictionsOutputFormat, - example_path: PathBuf, - #[clap(long, value_enum, default_value_t = CacheMode::default())] - cache: CacheMode, -} - -#[derive(Debug, ValueEnum, Default, Clone, Copy, PartialEq)] -pub enum CacheMode { - /// Use cached LLM requests and responses, except when multiple repetitions are requested - #[default] - Auto, - /// Use cached LLM requests and responses, based on the hash of the prompt and the endpoint. - #[value(alias = "request")] - Requests, - /// Ignore existing cache entries for both LLM and search. - Skip, - /// Use cached LLM responses AND search results for full determinism. Fails if they haven't been cached yet. - /// Useful for reproducing results and fixing bugs outside of search queries - Force, -} - -impl CacheMode { - fn use_cached_llm_responses(&self) -> bool { - self.assert_not_auto(); - matches!(self, CacheMode::Requests | CacheMode::Force) - } - - fn use_cached_search_results(&self) -> bool { - self.assert_not_auto(); - matches!(self, CacheMode::Force) - } - - fn assert_not_auto(&self) { - assert_ne!( - *self, - CacheMode::Auto, - "Cache mode should not be auto at this point!" - ); - } -} - -#[derive(clap::ValueEnum, Debug, Clone)] -pub enum PredictionsOutputFormat { - Json, - Md, - Diff, -} - -pub async fn run_zeta2_predict( +pub async fn run_predict( args: PredictArguments, app_state: &Arc, cx: &mut AsyncApp, ) { let example = NamedExample::load(args.example_path).unwrap(); - let (project, mut zetas, _edited_buffers) = - example.setup_project(app_state, 1, cx).await.unwrap(); - let result = zeta2_predict( - example, - project, - zetas.remove(0), - None, - args.prompt_format, - args.use_expected_context, - args.cache, - cx, - ) - .await - .unwrap(); + let project = example.setup_project(app_state, cx).await.unwrap(); + let zeta = setup_zeta(&project, app_state, cx).unwrap(); + let sweep = if matches!(args.options.provider, PredictionProvider::Sweep) { + Some(setup_sweep(&project, cx).unwrap()) + } else { + None + }; + let _edited_buffers = example.apply_edit_history(&project, cx).await.unwrap(); + let result = perform_predict(example, project, zeta, sweep, None, args.options, cx) + .await + .unwrap(); result.write(args.format, std::io::stdout()).unwrap(); print_run_data_dir(true, std::io::stdout().is_terminal()); } -pub async fn zeta2_predict( +pub fn setup_zeta( + project: &Entity, + app_state: &Arc, + cx: &mut AsyncApp, +) -> Result> { + let zeta = + cx.new(|cx| zeta2::Zeta::new(app_state.client.clone(), app_state.user_store.clone(), cx))?; + + let buffer_store = project.read_with(cx, |project, _| project.buffer_store().clone())?; + + cx.subscribe(&buffer_store, { + let project = project.clone(); + let zeta = zeta.clone(); + move |_, event, cx| match event { + BufferStoreEvent::BufferAdded(buffer) => { + zeta.update(cx, |zeta, cx| zeta.register_buffer(&buffer, &project, cx)); + } + _ => {} + } + })? + .detach(); + + anyhow::Ok(zeta) +} + +pub fn setup_sweep(project: &Entity, cx: &mut AsyncApp) -> Result> { + let sweep = cx.new(|cx| SweepAi::new(cx))?; + + let buffer_store = project.read_with(cx, |project, _| project.buffer_store().clone())?; + + cx.subscribe(&buffer_store, { + let project = project.clone(); + let sweep = sweep.clone(); + move |_, event, cx| match event { + BufferStoreEvent::BufferAdded(buffer) => { + sweep.update(cx, |sweep, cx| sweep.register_buffer(&buffer, &project, cx)); + } + _ => {} + } + })? + .detach(); + + anyhow::Ok(sweep) +} + +pub async fn perform_predict( example: NamedExample, project: Entity, zeta: Entity, + sweep: Option>, repetition_ix: Option, - prompt_format: PromptFormat, - use_expected_context: bool, - mut cache_mode: CacheMode, + options: PredictionOptions, cx: &mut AsyncApp, ) -> Result { + let mut cache_mode = options.cache; if repetition_ix.is_some() { if cache_mode != CacheMode::Auto && cache_mode != CacheMode::Skip { panic!("Repetitions are not supported in Auto cache mode"); @@ -148,94 +138,8 @@ pub async fn zeta2_predict( let (cursor_buffer, cursor_anchor) = example.cursor_position(&project, cx).await?; let result = Arc::new(Mutex::new(PredictionDetails::new(example_run_dir.clone()))); - let mut debug_rx = zeta.update(cx, |zeta, _| zeta.debug_info())?; - - let debug_task = cx.background_spawn({ - let result = result.clone(); - async move { - let mut start_time = None; - let mut search_queries_generated_at = None; - let mut search_queries_executed_at = None; - while let Some(event) = debug_rx.next().await { - match event { - zeta2::ZetaDebugInfo::ContextRetrievalStarted(info) => { - start_time = Some(info.timestamp); - fs::write( - example_run_dir.join("search_prompt.md"), - &info.search_prompt, - )?; - } - zeta2::ZetaDebugInfo::SearchQueriesGenerated(info) => { - search_queries_generated_at = Some(info.timestamp); - fs::write( - example_run_dir.join("search_queries.json"), - serde_json::to_string_pretty(&info.search_queries).unwrap(), - )?; - } - zeta2::ZetaDebugInfo::SearchQueriesExecuted(info) => { - search_queries_executed_at = Some(info.timestamp); - } - zeta2::ZetaDebugInfo::ContextRetrievalFinished(_info) => {} - zeta2::ZetaDebugInfo::EditPredictionRequested(request) => { - let prediction_started_at = Instant::now(); - start_time.get_or_insert(prediction_started_at); - let prompt = request.local_prompt.unwrap_or_default(); - fs::write(example_run_dir.join("prediction_prompt.md"), &prompt)?; - - { - let mut result = result.lock().unwrap(); - result.prompt_len = prompt.chars().count(); - - for included_file in request.request.included_files { - let insertions = - vec![(request.request.cursor_point, CURSOR_MARKER)]; - result.excerpts.extend(included_file.excerpts.iter().map( - |excerpt| ActualExcerpt { - path: included_file.path.components().skip(1).collect(), - text: String::from(excerpt.text.as_ref()), - }, - )); - write_codeblock( - &included_file.path, - included_file.excerpts.iter(), - if included_file.path == request.request.excerpt_path { - &insertions - } else { - &[] - }, - included_file.max_row, - false, - &mut result.excerpts_text, - ); - } - } - let response = request.response_rx.await?.0.map_err(|err| anyhow!(err))?; - let response = zeta2::text_from_response(response).unwrap_or_default(); - let prediction_finished_at = Instant::now(); - fs::write(example_run_dir.join("prediction_response.md"), &response)?; - - let mut result = result.lock().unwrap(); - result.generated_len = response.chars().count(); - - if !use_expected_context { - result.planning_search_time = - Some(search_queries_generated_at.unwrap() - start_time.unwrap()); - result.running_search_time = Some( - search_queries_executed_at.unwrap() - - search_queries_generated_at.unwrap(), - ); - } - result.prediction_time = prediction_finished_at - prediction_started_at; - result.total_time = prediction_finished_at - start_time.unwrap(); - - break; - } - } - } - anyhow::Ok(()) - } - }); + let prompt_format = options.zeta2.prompt_format; zeta.update(cx, |zeta, _cx| { let mut options = zeta.options().clone(); @@ -243,55 +147,194 @@ pub async fn zeta2_predict( zeta.set_options(options); })?; - if use_expected_context { - let context_excerpts_tasks = example - .example - .expected_context - .iter() - .flat_map(|section| { - section.alternatives[0].excerpts.iter().map(|excerpt| { - resolve_context_entry(project.clone(), excerpt.clone(), cx.clone()) - }) - }) - .collect::>(); - let context_excerpts_vec = futures::future::try_join_all(context_excerpts_tasks).await?; - - let mut context_excerpts = HashMap::default(); - for (buffer, mut excerpts) in context_excerpts_vec { - context_excerpts - .entry(buffer) - .or_insert(Vec::new()) - .append(&mut excerpts); - } + let prediction = match options.provider { + crate::PredictionProvider::Zeta2 => { + let mut debug_rx = zeta.update(cx, |zeta, _| zeta.debug_info())?; + + let debug_task = cx.background_spawn({ + let result = result.clone(); + async move { + let mut start_time = None; + let mut search_queries_generated_at = None; + let mut search_queries_executed_at = None; + while let Some(event) = debug_rx.next().await { + match event { + zeta2::ZetaDebugInfo::ContextRetrievalStarted(info) => { + start_time = Some(info.timestamp); + fs::write( + example_run_dir.join("search_prompt.md"), + &info.search_prompt, + )?; + } + zeta2::ZetaDebugInfo::SearchQueriesGenerated(info) => { + search_queries_generated_at = Some(info.timestamp); + fs::write( + example_run_dir.join("search_queries.json"), + serde_json::to_string_pretty(&info.search_queries).unwrap(), + )?; + } + zeta2::ZetaDebugInfo::SearchQueriesExecuted(info) => { + search_queries_executed_at = Some(info.timestamp); + } + zeta2::ZetaDebugInfo::ContextRetrievalFinished(_info) => {} + zeta2::ZetaDebugInfo::EditPredictionRequested(request) => { + let prediction_started_at = Instant::now(); + start_time.get_or_insert(prediction_started_at); + let prompt = request.local_prompt.unwrap_or_default(); + fs::write(example_run_dir.join("prediction_prompt.md"), &prompt)?; + + { + let mut result = result.lock().unwrap(); + result.prompt_len = prompt.chars().count(); + + for included_file in request.request.included_files { + let insertions = + vec![(request.request.cursor_point, CURSOR_MARKER)]; + result.excerpts.extend(included_file.excerpts.iter().map( + |excerpt| { + ActualExcerpt { + path: included_file + .path + .components() + .skip(1) + .collect(), + text: String::from(excerpt.text.as_ref()), + } + }, + )); + write_codeblock( + &included_file.path, + included_file.excerpts.iter(), + if included_file.path == request.request.excerpt_path { + &insertions + } else { + &[] + }, + included_file.max_row, + false, + &mut result.excerpts_text, + ); + } + } + + let response = + request.response_rx.await?.0.map_err(|err| anyhow!(err))?; + let response = + zeta2::text_from_response(response).unwrap_or_default(); + let prediction_finished_at = Instant::now(); + fs::write( + example_run_dir.join("prediction_response.md"), + &response, + )?; + + let mut result = result.lock().unwrap(); + result.generated_len = response.chars().count(); + + if !options.use_expected_context { + result.planning_search_time = Some( + search_queries_generated_at.unwrap() - start_time.unwrap(), + ); + result.running_search_time = Some( + search_queries_executed_at.unwrap() + - search_queries_generated_at.unwrap(), + ); + } + result.prediction_time = + prediction_finished_at - prediction_started_at; + result.total_time = prediction_finished_at - start_time.unwrap(); + + break; + } + } + } + anyhow::Ok(()) + } + }); + + if options.use_expected_context { + let context_excerpts_tasks = example + .example + .expected_context + .iter() + .flat_map(|section| { + section.alternatives[0].excerpts.iter().map(|excerpt| { + resolve_context_entry(project.clone(), excerpt.clone(), cx.clone()) + }) + }) + .collect::>(); + let context_excerpts_vec = + futures::future::try_join_all(context_excerpts_tasks).await?; + + let mut context_excerpts = HashMap::default(); + for (buffer, mut excerpts) in context_excerpts_vec { + context_excerpts + .entry(buffer) + .or_insert(Vec::new()) + .append(&mut excerpts); + } - zeta.update(cx, |zeta, _cx| { - zeta.set_context(project.clone(), context_excerpts) - })?; - } else { - zeta.update(cx, |zeta, cx| { - zeta.refresh_context(project.clone(), cursor_buffer.clone(), cursor_anchor, cx) - })? - .await?; - } + zeta.update(cx, |zeta, _cx| { + zeta.set_context(project.clone(), context_excerpts) + })?; + } else { + zeta.update(cx, |zeta, cx| { + zeta.refresh_context(project.clone(), cursor_buffer.clone(), cursor_anchor, cx) + })? + .await?; + } - let prediction = zeta - .update(cx, |zeta, cx| { - zeta.request_prediction(&project, &cursor_buffer, cursor_anchor, cx) - })? - .await?; + let prediction = zeta + .update(cx, |zeta, cx| { + zeta.request_prediction(&project, &cursor_buffer, cursor_anchor, cx) + })? + .await? + .map(|prediction| (prediction.buffer, prediction.snapshot, prediction.edits)); + + debug_task.await?; + + prediction + } + crate::PredictionProvider::Sweep => sweep + .unwrap() + .update(cx, |sweep, cx| { + let mut recent_paths = Vec::new(); + for path in zeta + .read(cx) + .history_for_project(&project) + .rev() + .filter_map(|event| event.project_path(cx)) + { + if !recent_paths.contains(&path) { + recent_paths.push(path); + } + } - debug_task.await?; + sweep.request_completion( + &project, + recent_paths.into_iter(), + &cursor_buffer, + cursor_anchor, + cx, + ) + })? + .await? + .map( + |sweep_ai::EditPrediction { + edits, snapshot, .. + }| { (cursor_buffer.clone(), snapshot, edits) }, + ), + }; let mut result = Arc::into_inner(result).unwrap().into_inner().unwrap(); + result.diff = prediction - .map(|prediction| { - let old_text = prediction.snapshot.text(); - let new_text = prediction - .buffer + .map(|(buffer, snapshot, edits)| { + let old_text = snapshot.text(); + let new_text = buffer .update(cx, |buffer, cx| { let branch = buffer.branch(cx); branch.update(cx, |branch, cx| { - branch.edit(prediction.edits.iter().cloned(), None, cx); + branch.edit(edits.iter().cloned(), None, cx); branch.text() }) }) From 08692bb108a50abd23161ebed8e5712c0a5bd7e0 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 19 Nov 2025 22:47:51 +0100 Subject: [PATCH 0231/1030] git: Clear pending ops for remote repos (#43098) Release Notes: - N/A --- crates/project/src/git_store.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/project/src/git_store.rs b/crates/project/src/git_store.rs index f111ef17a0d17f0288ab739c4d02f51148b431f6..40ef0daa29390e229ab03eb840c39900163d4b6a 100644 --- a/crates/project/src/git_store.rs +++ b/crates/project/src/git_store.rs @@ -4999,6 +4999,7 @@ impl Repository { if update.is_last_update { self.snapshot.scan_id = update.scan_id; } + self.clear_pending_ops(cx); Ok(()) } From f312215e935b26722816d2f5c6967231098abcd8 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Wed, 19 Nov 2025 14:50:32 -0700 Subject: [PATCH 0232/1030] Potentially make zip test less flakey (#43099) Authored-By: Claude Release Notes: - N/A --- crates/util/src/archive.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/util/src/archive.rs b/crates/util/src/archive.rs index 9b58b16bedb2114503a3d87756ae4b2c4d460190..5a5dc777722c67d3e5bb96ed7115ccd2a71b8cbe 100644 --- a/crates/util/src/archive.rs +++ b/crates/util/src/archive.rs @@ -169,6 +169,7 @@ mod tests { writer.close().await?; out.flush().await?; + out.sync_all().await?; Ok(()) } From 5e21457f2162b23ee04c3587a5dd6164af978dac Mon Sep 17 00:00:00 2001 From: Cole Miller Date: Wed, 19 Nov 2025 16:53:40 -0500 Subject: [PATCH 0233/1030] Fix panic in the git panel when toggling `sort_by_path` (#43074) We call `entry_by_path` on the `bulk_staging` anchor entry at the beginning of `update_visible_entries`, but in the codepath where `sort_by_path` is toggled on or off, we clear entries without clearing `bulk_staging` or counts, causing that `entry_by_path` to do an out of bounds index. Fixed by clearing `bulk_staging` as well. Release Notes: - N/A --- crates/git_ui/src/git_panel.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index 6889b880623da021bcac64b8cd96d70db817dd5b..cd56473dceb48d3c7da3629818f06d79d656ee03 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -373,6 +373,7 @@ impl GitPanel { let is_sort_by_path = GitPanelSettings::get_global(cx).sort_by_path; if is_sort_by_path != was_sort_by_path { this.entries.clear(); + this.bulk_staging.take(); this.update_visible_entries(window, cx); } was_sort_by_path = is_sort_by_path From c98b2d69448741fdf08828fc2814642ee6947c41 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 19 Nov 2025 23:00:58 +0100 Subject: [PATCH 0234/1030] multi_buffer: Typed `MultiBufferOffset` (#42707) This PR introduces a new `MultiBufferOffset` new type wrapping size. The goal of this is to make it clear at the type level when we are interacting with offsets of a multi buffer versus offsets of a language / text buffer. This improves readability of things quite a bit by making it clear what kind of offsets one is working with while also reducing accidental bugs by using the wrong kin of offset for the wrong API. This PR also uncovered two minor bugs due to that. Does not yet introduce the MultiBufferPoint equivalent, that is for a follow up PR. Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/agent_ui/src/acp/message_editor.rs | 26 +- crates/agent_ui/src/acp/thread_view.rs | 5 +- crates/agent_ui/src/agent_configuration.rs | 16 +- crates/agent_ui/src/buffer_codegen.rs | 7 +- .../src/context_picker/completion_provider.rs | 4 +- crates/agent_ui/src/inline_assistant.rs | 17 +- crates/agent_ui/src/inline_prompt_editor.rs | 4 +- crates/agent_ui/src/text_thread_editor.rs | 58 +- .../assistant_text_thread/src/text_thread.rs | 2 +- .../collab/src/tests/channel_buffer_tests.rs | 14 +- crates/collab/src/tests/editor_tests.rs | 61 +- crates/collab/src/tests/following_tests.rs | 44 +- crates/collab_ui/src/collab_panel.rs | 2 +- .../src/copilot_completion_provider.rs | 5 +- crates/debugger_ui/src/debugger_panel.rs | 30 +- crates/debugger_ui/src/debugger_ui.rs | 13 +- .../src/session/running/console.rs | 20 +- crates/diagnostics/src/diagnostics_tests.rs | 5 +- crates/diagnostics/src/items.rs | 13 +- .../src/edit_prediction_button.rs | 12 +- crates/editor/benches/display_map.rs | 9 +- crates/editor/src/display_map.rs | 50 +- crates/editor/src/display_map/block_map.rs | 16 +- .../src/display_map/custom_highlights.rs | 38 +- crates/editor/src/display_map/fold_map.rs | 247 ++- crates/editor/src/display_map/inlay_map.rs | 223 ++- crates/editor/src/display_map/tab_map.rs | 5 +- crates/editor/src/editor.rs | 521 +++--- crates/editor/src/editor_tests.rs | 321 ++-- crates/editor/src/git/blame.rs | 2 +- .../editor/src/highlight_matching_bracket.rs | 3 +- crates/editor/src/hover_links.rs | 13 +- crates/editor/src/hover_popover.rs | 24 +- crates/editor/src/inlays/inlay_hints.rs | 18 +- crates/editor/src/items.rs | 8 +- crates/editor/src/jsx_tag_auto_close.rs | 17 +- crates/editor/src/linked_editing_ranges.rs | 5 +- crates/editor/src/movement.rs | 33 +- crates/editor/src/selections_collection.rs | 85 +- crates/editor/src/signature_help.rs | 16 +- crates/editor/src/test.rs | 17 +- .../src/test/editor_lsp_test_context.rs | 39 +- crates/editor/src/test/editor_test_context.rs | 31 +- crates/git_ui/src/commit_view.rs | 7 +- crates/git_ui/src/git_panel.rs | 8 +- crates/git_ui/src/project_diff.rs | 3 +- crates/git_ui/src/text_diff_view.rs | 8 +- crates/go_to_line/src/cursor_position.rs | 6 +- crates/journal/src/journal.rs | 2 +- crates/language/src/buffer.rs | 2 +- crates/language_tools/src/lsp_log_view.rs | 4 +- crates/language_tools/src/syntax_tree_view.rs | 10 +- .../src/markdown_preview_view.rs | 25 +- crates/multi_buffer/src/anchor.rs | 23 +- crates/multi_buffer/src/multi_buffer.rs | 1488 ++++++++++++----- crates/multi_buffer/src/multi_buffer_tests.rs | 334 ++-- crates/multi_buffer/src/transaction.rs | 34 +- crates/outline/src/outline.rs | 8 +- crates/project_panel/src/project_panel.rs | 6 +- .../project_panel/src/project_panel_tests.rs | 24 +- crates/repl/src/repl_editor.rs | 6 +- crates/rope/src/offset_utf16.rs | 1 - crates/rope/src/rope.rs | 52 +- crates/search/src/buffer_search.rs | 10 +- crates/tasks_ui/src/tasks_ui.rs | 4 +- crates/text/src/selection.rs | 12 +- crates/text/src/subscription.rs | 43 +- crates/text/src/text.rs | 4 +- crates/vim/src/helix.rs | 8 +- crates/vim/src/helix/boundary.rs | 81 +- crates/vim/src/helix/duplicate.rs | 4 +- crates/vim/src/helix/paste.rs | 2 +- crates/vim/src/motion.rs | 39 +- crates/vim/src/normal/increment.rs | 9 +- crates/vim/src/normal/paste.rs | 10 +- crates/vim/src/object.rs | 66 +- crates/vim/src/surrounds.rs | 12 +- crates/vim/src/test.rs | 7 +- crates/vim/src/vim.rs | 8 +- crates/vim/src/visual.rs | 12 +- crates/zed/src/zed.rs | 30 +- 81 files changed, 2952 insertions(+), 1559 deletions(-) diff --git a/crates/agent_ui/src/acp/message_editor.rs b/crates/agent_ui/src/acp/message_editor.rs index 120fdb26b3937e92312a63056d67cc2f35809068..883a7424e47eaf412278995cb9f3d497fd4f5c67 100644 --- a/crates/agent_ui/src/acp/message_editor.rs +++ b/crates/agent_ui/src/acp/message_editor.rs @@ -13,7 +13,7 @@ use collections::{HashMap, HashSet}; use editor::{ Addon, Anchor, AnchorRangeExt, ContextMenuOptions, ContextMenuPlacement, Editor, EditorElement, EditorEvent, EditorMode, EditorSnapshot, EditorStyle, ExcerptId, FoldPlaceholder, Inlay, - MultiBuffer, ToOffset, + MultiBuffer, MultiBufferOffset, ToOffset, actions::Paste, code_context_menus::CodeContextMenu, display_map::{Crease, CreaseId, FoldId}, @@ -209,7 +209,7 @@ impl MessageEditor { let acp::AvailableCommandInput::Unstructured { mut hint } = available_command.input.clone()?; - let mut hint_pos = parsed_command.source_range.end + 1; + let mut hint_pos = MultiBufferOffset(parsed_command.source_range.end) + 1usize; if hint_pos > snapshot.len() { hint_pos = snapshot.len(); hint.insert(0, ' '); @@ -307,9 +307,9 @@ impl MessageEditor { return Task::ready(()); }; let excerpt_id = start_anchor.excerpt_id; - let end_anchor = snapshot - .buffer_snapshot() - .anchor_before(start_anchor.to_offset(&snapshot.buffer_snapshot()) + content_len + 1); + let end_anchor = snapshot.buffer_snapshot().anchor_before( + start_anchor.to_offset(&snapshot.buffer_snapshot()) + content_len + 1usize, + ); let crease = if let MentionUri::File { abs_path } = &mention_uri && let Some(extension) = abs_path.extension() @@ -739,8 +739,8 @@ impl MessageEditor { }; let crease_range = crease.range().to_offset(&snapshot.buffer_snapshot()); - if crease_range.start > ix { - let chunk = text[ix..crease_range.start].into(); + if crease_range.start.0 > ix { + let chunk = text[ix..crease_range.start.0].into(); chunks.push(chunk); } let chunk = match mention { @@ -808,7 +808,7 @@ impl MessageEditor { }), }; chunks.push(chunk); - ix = crease_range.end; + ix = crease_range.end.0; } if ix < text.len() { @@ -862,7 +862,7 @@ impl MessageEditor { let snapshot = editor.display_snapshot(cx); let cursor = editor.selections.newest::(&snapshot).head(); let offset = cursor.to_offset(&snapshot); - if offset > 0 { + if offset.0 > 0 { snapshot .buffer_snapshot() .reversed_chars_at(offset) @@ -1132,7 +1132,7 @@ impl MessageEditor { let cursor_anchor = editor.selections.newest_anchor().head(); let cursor_offset = cursor_anchor.to_offset(&editor_buffer.snapshot(cx)); let anchor = buffer.update(cx, |buffer, _cx| { - buffer.anchor_before(cursor_offset.min(buffer.len())) + buffer.anchor_before(cursor_offset.0.min(buffer.len())) }); let Some(workspace) = self.workspace.upgrade() else { return; @@ -1258,7 +1258,7 @@ impl MessageEditor { }); for (range, mention_uri, mention) in mentions { - let anchor = snapshot.anchor_before(range.start); + let anchor = snapshot.anchor_before(MultiBufferOffset(range.start)); let Some((crease_id, tx)) = insert_crease_for_mention( anchor.excerpt_id, anchor.text_anchor, @@ -1713,7 +1713,7 @@ mod tests { use agent::{HistoryStore, outline}; use agent_client_protocol as acp; use assistant_text_thread::TextThreadStore; - use editor::{AnchorRangeExt as _, Editor, EditorMode}; + use editor::{AnchorRangeExt as _, Editor, EditorMode, MultiBufferOffset}; use fs::FakeFs; use futures::StreamExt as _; use gpui::{ @@ -2682,7 +2682,7 @@ mod tests { editor.display_map.update(cx, |display_map, cx| { display_map .snapshot(cx) - .folds_in_range(0..snapshot.len()) + .folds_in_range(MultiBufferOffset(0)..snapshot.len()) .map(|fold| fold.range.to_point(&snapshot)) .collect() }) diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 312ca136a2bf6f8b134a0dae0ab01bb71497a3b2..c2d3e5262354b57ae3c7e6dbd10189dedefebfe6 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -6066,6 +6066,7 @@ pub(crate) mod tests { use acp_thread::StubAgentConnection; use agent_client_protocol::SessionId; use assistant_text_thread::TextThreadStore; + use editor::MultiBufferOffset; use fs::FakeFs; use gpui::{EventEmitter, SemanticVersion, TestAppContext, VisualTestContext}; use project::Project; @@ -7234,7 +7235,7 @@ pub(crate) mod tests { Editor::for_buffer(buffer.clone(), Some(project.clone()), window, cx); editor.change_selections(Default::default(), window, cx, |selections| { - selections.select_ranges([8..15]); + selections.select_ranges([MultiBufferOffset(8)..MultiBufferOffset(15)]); }); editor @@ -7296,7 +7297,7 @@ pub(crate) mod tests { Editor::for_buffer(buffer.clone(), Some(project.clone()), window, cx); editor.change_selections(Default::default(), window, cx, |selections| { - selections.select_ranges([8..15]); + selections.select_ranges([MultiBufferOffset(8)..MultiBufferOffset(15)]); }); editor diff --git a/crates/agent_ui/src/agent_configuration.rs b/crates/agent_ui/src/agent_configuration.rs index 60f8606baf7bcbd55a7e4bd9ee6dc44f394319bc..8652f5cbd6c750da9260970ddc9ddcaef8337451 100644 --- a/crates/agent_ui/src/agent_configuration.rs +++ b/crates/agent_ui/src/agent_configuration.rs @@ -12,7 +12,7 @@ use client::zed_urls; use cloud_llm_client::{Plan, PlanV1, PlanV2}; use collections::HashMap; use context_server::ContextServerId; -use editor::{Editor, SelectionEffects, scroll::Autoscroll}; +use editor::{Editor, MultiBufferOffset, SelectionEffects, scroll::Autoscroll}; use extension::ExtensionManifest; use extension_host::ExtensionStore; use fs::Fs; @@ -1362,7 +1362,15 @@ async fn open_new_agent_servers_entry_in_settings_editor( .map(|(range, _)| range.clone()) .collect::>(); - item.edit(edits, cx); + item.edit( + edits.into_iter().map(|(range, s)| { + ( + MultiBufferOffset(range.start)..MultiBufferOffset(range.end), + s, + ) + }), + cx, + ); if let Some((unique_server_name, buffer)) = unique_server_name.zip(item.buffer().read(cx).as_singleton()) { @@ -1375,7 +1383,9 @@ async fn open_new_agent_servers_entry_in_settings_editor( window, cx, |selections| { - selections.select_ranges(vec![range]); + selections.select_ranges(vec![ + MultiBufferOffset(range.start)..MultiBufferOffset(range.end), + ]); }, ); } diff --git a/crates/agent_ui/src/buffer_codegen.rs b/crates/agent_ui/src/buffer_codegen.rs index 51c3c5ad3cb2b89c85e3a276ff34af5cc46115f9..0b7c1f61988979565aa9b55d4bbd245682b680df 100644 --- a/crates/agent_ui/src/buffer_codegen.rs +++ b/crates/agent_ui/src/buffer_codegen.rs @@ -429,7 +429,12 @@ impl CodegenAlternative { let prompt = self .builder - .generate_inline_transformation_prompt(user_prompt, language_name, buffer, range) + .generate_inline_transformation_prompt( + user_prompt, + language_name, + buffer, + range.start.0..range.end.0, + ) .context("generating content prompt")?; let context_task = self.context_store.as_ref().and_then(|context_store| { diff --git a/crates/agent_ui/src/context_picker/completion_provider.rs b/crates/agent_ui/src/context_picker/completion_provider.rs index 5dee769b4d0f0d2556b407721eac5dc70f647060..60e27b305437003b99326da29137727faaaf5c7c 100644 --- a/crates/agent_ui/src/context_picker/completion_provider.rs +++ b/crates/agent_ui/src/context_picker/completion_provider.rs @@ -1082,7 +1082,7 @@ impl MentionCompletion { #[cfg(test)] mod tests { use super::*; - use editor::AnchorRangeExt; + use editor::{AnchorRangeExt, MultiBufferOffset}; use gpui::{EventEmitter, FocusHandle, Focusable, TestAppContext, VisualTestContext}; use project::{Project, ProjectPath}; use serde_json::json; @@ -1677,7 +1677,7 @@ mod tests { editor.display_map.update(cx, |display_map, cx| { display_map .snapshot(cx) - .folds_in_range(0..snapshot.len()) + .folds_in_range(MultiBufferOffset(0)..snapshot.len()) .map(|fold| fold.range.to_point(&snapshot)) .collect() }) diff --git a/crates/agent_ui/src/inline_assistant.rs b/crates/agent_ui/src/inline_assistant.rs index b05dba59e6b19fa5091903882748de853cd9cb93..05cdd42544419969ce76ec168e671d5b2ac2402e 100644 --- a/crates/agent_ui/src/inline_assistant.rs +++ b/crates/agent_ui/src/inline_assistant.rs @@ -16,6 +16,7 @@ use agent_settings::AgentSettings; use anyhow::{Context as _, Result}; use client::telemetry::Telemetry; use collections::{HashMap, HashSet, VecDeque, hash_map}; +use editor::MultiBufferOffset; use editor::RowExt; use editor::SelectionEffects; use editor::scroll::ScrollOffset; @@ -803,7 +804,7 @@ impl InlineAssistant { ( editor .selections - .newest::(&editor.display_snapshot(cx)), + .newest::(&editor.display_snapshot(cx)), editor.buffer().read(cx).snapshot(cx), ) }); @@ -836,7 +837,7 @@ impl InlineAssistant { ( editor .selections - .newest::(&editor.display_snapshot(cx)), + .newest::(&editor.display_snapshot(cx)), editor.buffer().read(cx).snapshot(cx), ) }); @@ -853,12 +854,14 @@ impl InlineAssistant { } else { let distance_from_selection = assist_range .start - .abs_diff(selection.start) - .min(assist_range.start.abs_diff(selection.end)) + .0 + .abs_diff(selection.start.0) + .min(assist_range.start.0.abs_diff(selection.end.0)) + assist_range .end - .abs_diff(selection.start) - .min(assist_range.end.abs_diff(selection.end)); + .0 + .abs_diff(selection.start.0) + .min(assist_range.end.0.abs_diff(selection.end.0)); match closest_assist_fallback { Some((_, old_distance)) => { if distance_from_selection < old_distance { @@ -935,7 +938,7 @@ impl InlineAssistant { EditorEvent::Edited { transaction_id } => { let buffer = editor.read(cx).buffer().read(cx); let edited_ranges = - buffer.edited_ranges_for_transaction::(*transaction_id, cx); + buffer.edited_ranges_for_transaction::(*transaction_id, cx); let snapshot = buffer.snapshot(cx); for assist_id in editor_assists.assist_ids.clone() { diff --git a/crates/agent_ui/src/inline_prompt_editor.rs b/crates/agent_ui/src/inline_prompt_editor.rs index 2d0538a9172ae69e50b2e4208e540662e7d838b2..3a0866f47063a6dc5f68df1c36c3fdb0e07d2b74 100644 --- a/crates/agent_ui/src/inline_prompt_editor.rs +++ b/crates/agent_ui/src/inline_prompt_editor.rs @@ -2,7 +2,7 @@ use agent::HistoryStore; use collections::{HashMap, VecDeque}; use editor::actions::Paste; use editor::display_map::{CreaseId, EditorMargins}; -use editor::{Addon, AnchorRangeExt as _}; +use editor::{Addon, AnchorRangeExt as _, MultiBufferOffset}; use editor::{ ContextMenuOptions, Editor, EditorElement, EditorEvent, EditorMode, EditorStyle, MultiBuffer, actions::{MoveDown, MoveUp}, @@ -1165,7 +1165,7 @@ impl GenerationMode { /// Stored information that can be used to resurrect a context crease when creating an editor for a past message. #[derive(Clone, Debug)] pub struct MessageCrease { - pub range: Range, + pub range: Range, pub icon_path: SharedString, pub label: SharedString, /// None for a deserialized message, Some otherwise. diff --git a/crates/agent_ui/src/text_thread_editor.rs b/crates/agent_ui/src/text_thread_editor.rs index 84f04f8821b2dd540e54f41f567a0b7735116875..daf5cc2c3d770f4e9bdad4ea882b1ad2afc93a4e 100644 --- a/crates/agent_ui/src/text_thread_editor.rs +++ b/crates/agent_ui/src/text_thread_editor.rs @@ -9,8 +9,8 @@ use assistant_slash_commands::{DefaultSlashCommand, FileSlashCommand, selections use client::{proto, zed_urls}; use collections::{BTreeSet, HashMap, HashSet, hash_map}; use editor::{ - Anchor, Editor, EditorEvent, MenuEditPredictionsPolicy, MultiBuffer, MultiBufferSnapshot, - RowExt, ToOffset as _, ToPoint, + Anchor, Editor, EditorEvent, MenuEditPredictionsPolicy, MultiBuffer, MultiBufferOffset, + MultiBufferSnapshot, RowExt, ToOffset as _, ToPoint, actions::{MoveToEndOfLine, Newline, ShowCompletions}, display_map::{ BlockPlacement, BlockProperties, BlockStyle, Crease, CreaseMetadata, CustomBlockId, FoldId, @@ -390,7 +390,7 @@ impl TextThreadEditor { let cursor = user_message .start .to_offset(self.text_thread.read(cx).buffer().read(cx)); - cursor..cursor + MultiBufferOffset(cursor)..MultiBufferOffset(cursor) }; self.editor.update(cx, |editor, cx| { editor.change_selections(Default::default(), window, cx, |selections| { @@ -431,7 +431,7 @@ impl TextThreadEditor { let cursors = self.cursors(cx); self.text_thread.update(cx, |text_thread, cx| { let messages = text_thread - .messages_for_offsets(cursors, cx) + .messages_for_offsets(cursors.into_iter().map(|cursor| cursor.0), cx) .into_iter() .map(|message| message.id) .collect(); @@ -439,9 +439,11 @@ impl TextThreadEditor { }); } - fn cursors(&self, cx: &mut App) -> Vec { + fn cursors(&self, cx: &mut App) -> Vec { let selections = self.editor.update(cx, |editor, cx| { - editor.selections.all::(&editor.display_snapshot(cx)) + editor + .selections + .all::(&editor.display_snapshot(cx)) }); selections .into_iter() @@ -1580,7 +1582,11 @@ impl TextThreadEditor { fn get_clipboard_contents( &mut self, cx: &mut Context, - ) -> (String, CopyMetadata, Vec>) { + ) -> ( + String, + CopyMetadata, + Vec>, + ) { let (mut selection, creases) = self.editor.update(cx, |editor, cx| { let mut selection = editor .selections @@ -1638,30 +1644,26 @@ impl TextThreadEditor { // If selection is empty, we want to copy the entire line if selection.range().is_empty() { - let snapshot = text_thread.buffer().read(cx).snapshot(); + let snapshot = self.editor.read(cx).buffer().read(cx).snapshot(cx); let point = snapshot.offset_to_point(selection.range().start); selection.start = snapshot.point_to_offset(Point::new(point.row, 0)); selection.end = snapshot .point_to_offset(cmp::min(Point::new(point.row + 1, 0), snapshot.max_point())); - for chunk in text_thread - .buffer() - .read(cx) - .text_for_range(selection.range()) - { + for chunk in snapshot.text_for_range(selection.range()) { text.push_str(chunk); } } else { for message in text_thread.messages(cx) { - if message.offset_range.start >= selection.range().end { + if message.offset_range.start >= selection.range().end.0 { break; - } else if message.offset_range.end >= selection.range().start { - let range = cmp::max(message.offset_range.start, selection.range().start) - ..cmp::min(message.offset_range.end, selection.range().end); + } else if message.offset_range.end >= selection.range().start.0 { + let range = cmp::max(message.offset_range.start, selection.range().start.0) + ..cmp::min(message.offset_range.end, selection.range().end.0); if !range.is_empty() { for chunk in text_thread.buffer().read(cx).text_for_range(range) { text.push_str(chunk); } - if message.offset_range.end < selection.range().end { + if message.offset_range.end < selection.range().end.0 { text.push('\n'); } } @@ -1743,7 +1745,7 @@ impl TextThreadEditor { self.editor.update(cx, |editor, cx| { let paste_position = editor .selections - .newest::(&editor.display_snapshot(cx)) + .newest::(&editor.display_snapshot(cx)) .head(); editor.paste(action, window, cx); @@ -1791,13 +1793,16 @@ impl TextThreadEditor { editor.transact(window, cx, |editor, _window, cx| { let edits = editor .selections - .all::(&editor.display_snapshot(cx)) + .all::(&editor.display_snapshot(cx)) .into_iter() .map(|selection| (selection.start..selection.end, "\n")); editor.edit(edits, cx); let snapshot = editor.buffer().read(cx).snapshot(cx); - for selection in editor.selections.all::(&editor.display_snapshot(cx)) { + for selection in editor + .selections + .all::(&editor.display_snapshot(cx)) + { image_positions.push(snapshot.anchor_before(selection.end)); } }); @@ -1889,7 +1894,7 @@ impl TextThreadEditor { let range = selection .map(|endpoint| endpoint.to_offset(&buffer)) .range(); - text_thread.split_message(range, cx); + text_thread.split_message(range.start.0..range.end.0, cx); } }); } @@ -2963,7 +2968,7 @@ pub fn make_lsp_adapter_delegate( #[cfg(test)] mod tests { use super::*; - use editor::SelectionEffects; + use editor::{MultiBufferOffset, SelectionEffects}; use fs::FakeFs; use gpui::{App, TestAppContext, VisualTestContext}; use indoc::indoc; @@ -3169,15 +3174,16 @@ mod tests { text_thread: &Entity, message_ix: usize, cx: &mut TestAppContext, - ) -> Range { - text_thread.update(cx, |text_thread, cx| { + ) -> Range { + let range = text_thread.update(cx, |text_thread, cx| { text_thread .messages(cx) .nth(message_ix) .unwrap() .anchor_range .to_offset(&text_thread.buffer().read(cx).snapshot()) - }) + }); + MultiBufferOffset(range.start)..MultiBufferOffset(range.end) } fn assert_copy_paste_text_thread_editor( diff --git a/crates/assistant_text_thread/src/text_thread.rs b/crates/assistant_text_thread/src/text_thread.rs index 9ad383cdfd43eed236268349e2ff97c34a0178c0..ae5fe25d430e80b7be68000162b6f0b21807e2a2 100644 --- a/crates/assistant_text_thread/src/text_thread.rs +++ b/crates/assistant_text_thread/src/text_thread.rs @@ -667,7 +667,7 @@ pub struct TextThread { buffer: Entity, pub(crate) parsed_slash_commands: Vec, invoked_slash_commands: HashMap, - edits_since_last_parse: language::Subscription, + edits_since_last_parse: language::Subscription, slash_commands: Arc, pub(crate) slash_command_output_sections: Vec>, thought_process_output_sections: Vec>, diff --git a/crates/collab/src/tests/channel_buffer_tests.rs b/crates/collab/src/tests/channel_buffer_tests.rs index 8e857f4f02505998f226b543b843e68222016aeb..62c61d3cf0b22e7adad5ada7ec46598fbadf673c 100644 --- a/crates/collab/src/tests/channel_buffer_tests.rs +++ b/crates/collab/src/tests/channel_buffer_tests.rs @@ -7,7 +7,7 @@ use channel::ACKNOWLEDGE_DEBOUNCE_INTERVAL; use client::{Collaborator, ParticipantIndex, UserId}; use collab_ui::channel_view::ChannelView; use collections::HashMap; -use editor::{Anchor, Editor, ToOffset}; +use editor::{Anchor, Editor, MultiBufferOffset, ToOffset}; use futures::future; use gpui::{BackgroundExecutor, Context, Entity, TestAppContext, Window}; use rpc::{RECEIVE_TIMEOUT, proto::PeerId}; @@ -180,7 +180,7 @@ async fn test_channel_notes_participant_indices( notes.editor.update(cx, |editor, cx| { editor.insert("a", window, cx); editor.change_selections(Default::default(), window, cx, |selections| { - selections.select_ranges(vec![0..1]); + selections.select_ranges(vec![MultiBufferOffset(0)..MultiBufferOffset(1)]); }); }); }); @@ -190,7 +190,7 @@ async fn test_channel_notes_participant_indices( editor.move_down(&Default::default(), window, cx); editor.insert("b", window, cx); editor.change_selections(Default::default(), window, cx, |selections| { - selections.select_ranges(vec![1..2]); + selections.select_ranges(vec![MultiBufferOffset(1)..MultiBufferOffset(2)]); }); }); }); @@ -200,7 +200,7 @@ async fn test_channel_notes_participant_indices( editor.move_down(&Default::default(), window, cx); editor.insert("c", window, cx); editor.change_selections(Default::default(), window, cx, |selections| { - selections.select_ranges(vec![2..3]); + selections.select_ranges(vec![MultiBufferOffset(2)..MultiBufferOffset(3)]); }); }); }); @@ -287,12 +287,12 @@ async fn test_channel_notes_participant_indices( editor_a.update_in(cx_a, |editor, window, cx| { editor.change_selections(Default::default(), window, cx, |selections| { - selections.select_ranges(vec![0..1]); + selections.select_ranges(vec![MultiBufferOffset(0)..MultiBufferOffset(1)]); }); }); editor_b.update_in(cx_b, |editor, window, cx| { editor.change_selections(Default::default(), window, cx, |selections| { - selections.select_ranges(vec![2..3]); + selections.select_ranges(vec![MultiBufferOffset(2)..MultiBufferOffset(3)]); }); }); executor.run_until_parked(); @@ -327,7 +327,7 @@ fn assert_remote_selections( let end = s.selection.end.to_offset(snapshot.buffer_snapshot()); let user_id = collaborators.get(&peer_id).unwrap().user_id; let participant_index = hub.user_participant_indices(cx).get(&user_id).copied(); - (participant_index, start..end) + (participant_index, start.0..end.0) }) .collect::>(); assert_eq!( diff --git a/crates/collab/src/tests/editor_tests.rs b/crates/collab/src/tests/editor_tests.rs index e015550df9482c5850396b8bcf10e9cee24d5b76..33f07bfb388763875565bc9e37bda363f02600f0 100644 --- a/crates/collab/src/tests/editor_tests.rs +++ b/crates/collab/src/tests/editor_tests.rs @@ -4,7 +4,8 @@ use crate::{ }; use call::ActiveCall; use editor::{ - DocumentColorsRenderMode, Editor, FETCH_COLORS_DEBOUNCE_TIMEOUT, RowInfo, SelectionEffects, + DocumentColorsRenderMode, Editor, FETCH_COLORS_DEBOUNCE_TIMEOUT, MultiBufferOffset, RowInfo, + SelectionEffects, actions::{ ConfirmCodeAction, ConfirmCompletion, ConfirmRename, ContextMenuFirst, ExpandMacroRecursively, MoveToEnd, Redo, Rename, SelectAll, ToggleCodeActions, Undo, @@ -381,7 +382,7 @@ async fn test_collaborating_with_completion(cx_a: &mut TestAppContext, cx_b: &mu // Type a completion trigger character as the guest. editor_b.update_in(cx_b, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([13..13]) + s.select_ranges([MultiBufferOffset(13)..MultiBufferOffset(13)]) }); editor.handle_input(".", window, cx); }); @@ -503,7 +504,7 @@ async fn test_collaborating_with_completion(cx_a: &mut TestAppContext, cx_b: &mu // resolved editor_b.update_in(cx_b, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([46..46]) + s.select_ranges([MultiBufferOffset(46)..MultiBufferOffset(46)]) }); editor.handle_input("; a", window, cx); editor.handle_input(".", window, cx); @@ -601,7 +602,7 @@ async fn test_collaborating_with_completion(cx_a: &mut TestAppContext, cx_b: &mu // Add another completion trigger to test the second language server editor_b.update_in(cx_b, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([68..68]) + s.select_ranges([MultiBufferOffset(68)..MultiBufferOffset(68)]) }); editor.handle_input("; b", window, cx); editor.handle_input(".", window, cx); @@ -950,7 +951,7 @@ async fn test_collaborating_with_renames(cx_a: &mut TestAppContext, cx_b: &mut T // Move cursor to a location that can be renamed. let prepare_rename = editor_b.update_in(cx_b, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([7..7]) + s.select_ranges([MultiBufferOffset(7)..MultiBufferOffset(7)]) }); editor.rename(&Rename, window, cx).unwrap() }); @@ -977,17 +978,17 @@ async fn test_collaborating_with_renames(cx_a: &mut TestAppContext, cx_b: &mut T let buffer = editor.buffer().read(cx).snapshot(cx); assert_eq!( rename.range.start.to_offset(&buffer)..rename.range.end.to_offset(&buffer), - 6..9 + MultiBufferOffset(6)..MultiBufferOffset(9) ); rename.editor.update(cx, |rename_editor, cx| { - let rename_selection = rename_editor.selections.newest::(&rename_editor.display_snapshot(cx)); + let rename_selection = rename_editor.selections.newest::(&rename_editor.display_snapshot(cx)); assert_eq!( rename_selection.range(), - 0..3, + MultiBufferOffset(0)..MultiBufferOffset(3), "Rename that was triggered from zero selection caret, should propose the whole word." ); rename_editor.buffer().update(cx, |rename_buffer, cx| { - rename_buffer.edit([(0..3, "THREE")], None, cx); + rename_buffer.edit([(MultiBufferOffset(0)..MultiBufferOffset(3), "THREE")], None, cx); }); }); }); @@ -998,7 +999,7 @@ async fn test_collaborating_with_renames(cx_a: &mut TestAppContext, cx_b: &mut T }); let prepare_rename = editor_b.update_in(cx_b, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([7..8]) + s.select_ranges([MultiBufferOffset(7)..MultiBufferOffset(8)]) }); editor.rename(&Rename, window, cx).unwrap() }); @@ -1025,16 +1026,16 @@ async fn test_collaborating_with_renames(cx_a: &mut TestAppContext, cx_b: &mut T let buffer = editor.buffer().read(cx).snapshot(cx); let lsp_rename_start = rename.range.start.to_offset(&buffer); let lsp_rename_end = rename.range.end.to_offset(&buffer); - assert_eq!(lsp_rename_start..lsp_rename_end, 6..9); + assert_eq!(lsp_rename_start..lsp_rename_end, MultiBufferOffset(6)..MultiBufferOffset(9)); rename.editor.update(cx, |rename_editor, cx| { - let rename_selection = rename_editor.selections.newest::(&rename_editor.display_snapshot(cx)); + let rename_selection = rename_editor.selections.newest::(&rename_editor.display_snapshot(cx)); assert_eq!( rename_selection.range(), - 1..2, + MultiBufferOffset(1)..MultiBufferOffset(2), "Rename that was triggered from a selection, should have the same selection range in the rename proposal" ); rename_editor.buffer().update(cx, |rename_buffer, cx| { - rename_buffer.edit([(0..lsp_rename_end - lsp_rename_start, "THREE")], None, cx); + rename_buffer.edit([(MultiBufferOffset(0)..MultiBufferOffset(lsp_rename_end - lsp_rename_start), "THREE")], None, cx); }); }); }); @@ -1237,7 +1238,7 @@ async fn test_slow_lsp_server(cx_a: &mut TestAppContext, cx_b: &mut TestAppConte // Move cursor to a location, this should trigger the code lens call. editor_b.update_in(cx_b, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([7..7]) + s.select_ranges([MultiBufferOffset(7)..MultiBufferOffset(7)]) }); }); let () = request_started_rx.next().await.unwrap(); @@ -1259,7 +1260,7 @@ async fn test_slow_lsp_server(cx_a: &mut TestAppContext, cx_b: &mut TestAppConte editor_b.update_in(cx_b, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([1..1]) + s.select_ranges([MultiBufferOffset(1)..MultiBufferOffset(1)]) }); }); let () = request_started_rx.next().await.unwrap(); @@ -1281,7 +1282,7 @@ async fn test_slow_lsp_server(cx_a: &mut TestAppContext, cx_b: &mut TestAppConte editor_b.update_in(cx_b, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([2..2]) + s.select_ranges([MultiBufferOffset(2)..MultiBufferOffset(2)]) }); }); let () = request_started_rx.next().await.unwrap(); @@ -1719,7 +1720,7 @@ async fn test_on_input_format_from_host_to_guest( cx_a.focus(&editor_a); editor_a.update_in(cx_a, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([13..13]) + s.select_ranges([MultiBufferOffset(13)..MultiBufferOffset(13)]) }); editor.handle_input(">", window, cx); }); @@ -1828,7 +1829,7 @@ async fn test_on_input_format_from_guest_to_host( cx_b.focus(&editor_b); editor_b.update_in(cx_b, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([13..13]) + s.select_ranges([MultiBufferOffset(13)..MultiBufferOffset(13)]) }); editor.handle_input(":", window, cx); }); @@ -2056,7 +2057,7 @@ async fn test_mutual_editor_inlay_hint_cache_update( let after_client_edit = edits_made.fetch_add(1, atomic::Ordering::Release) + 1; editor_b.update_in(cx_b, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([13..13].clone()) + s.select_ranges([MultiBufferOffset(13)..MultiBufferOffset(13)].clone()) }); editor.handle_input(":", window, cx); }); @@ -2080,7 +2081,7 @@ async fn test_mutual_editor_inlay_hint_cache_update( let after_host_edit = edits_made.fetch_add(1, atomic::Ordering::Release) + 1; editor_a.update_in(cx_a, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([13..13]) + s.select_ranges([MultiBufferOffset(13)..MultiBufferOffset(13)]) }); editor.handle_input("a change to increment both buffers' versions", window, cx); }); @@ -2520,7 +2521,7 @@ async fn test_lsp_document_color(cx_a: &mut TestAppContext, cx_b: &mut TestAppCo editor_a.update_in(cx_a, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([13..13].clone()) + s.select_ranges([MultiBufferOffset(13)..MultiBufferOffset(13)].clone()) }); editor.handle_input(":", window, cx); }); @@ -2957,7 +2958,7 @@ async fn test_lsp_pull_diagnostics( editor_a_main.update(cx_a, |editor, cx| { let snapshot = editor.buffer().read(cx).snapshot(cx); let all_diagnostics = snapshot - .diagnostics_in_range(0..snapshot.len()) + .diagnostics_in_range(MultiBufferOffset(0)..snapshot.len()) .collect::>(); assert_eq!( all_diagnostics.len(), @@ -3086,7 +3087,7 @@ async fn test_lsp_pull_diagnostics( editor_a_main.update(cx_a, |editor, cx| { let snapshot = editor.buffer().read(cx).snapshot(cx); let all_diagnostics = snapshot - .diagnostics_in_range(0..snapshot.len()) + .diagnostics_in_range(MultiBufferOffset(0)..snapshot.len()) .collect::>(); assert_eq!( all_diagnostics.len(), @@ -3133,7 +3134,7 @@ async fn test_lsp_pull_diagnostics( editor_b_main.update(cx_b, |editor, cx| { let snapshot = editor.buffer().read(cx).snapshot(cx); let all_diagnostics = snapshot - .diagnostics_in_range(0..snapshot.len()) + .diagnostics_in_range(MultiBufferOffset(0)..snapshot.len()) .collect::>(); assert_eq!( all_diagnostics.len(), @@ -3180,7 +3181,7 @@ async fn test_lsp_pull_diagnostics( editor_b_lib.update(cx_b, |editor, cx| { let snapshot = editor.buffer().read(cx).snapshot(cx); let all_diagnostics = snapshot - .diagnostics_in_range(0..snapshot.len()) + .diagnostics_in_range(MultiBufferOffset(0)..snapshot.len()) .collect::>(); let expected_messages = [ expected_pull_diagnostic_lib_message, @@ -3247,7 +3248,7 @@ async fn test_lsp_pull_diagnostics( editor_b_lib.update(cx_b, |editor, cx| { let snapshot = editor.buffer().read(cx).snapshot(cx); let all_diagnostics = snapshot - .diagnostics_in_range(0..snapshot.len()) + .diagnostics_in_range(MultiBufferOffset(0)..snapshot.len()) .collect::>(); let expected_messages = [ expected_workspace_pull_diagnostics_lib_message, @@ -3382,7 +3383,7 @@ async fn test_lsp_pull_diagnostics( editor_b_lib.update(cx_b, |editor, cx| { let snapshot = editor.buffer().read(cx).snapshot(cx); let all_diagnostics = snapshot - .diagnostics_in_range(0..snapshot.len()) + .diagnostics_in_range(MultiBufferOffset(0)..snapshot.len()) .collect::>(); let expected_messages = [ expected_workspace_pull_diagnostics_lib_message, @@ -3400,7 +3401,7 @@ async fn test_lsp_pull_diagnostics( editor_b_main.update(cx_b, |editor, cx| { let snapshot = editor.buffer().read(cx).snapshot(cx); let all_diagnostics = snapshot - .diagnostics_in_range(0..snapshot.len()) + .diagnostics_in_range(MultiBufferOffset(0)..snapshot.len()) .collect::>(); assert_eq!(all_diagnostics.len(), 2); @@ -3419,7 +3420,7 @@ async fn test_lsp_pull_diagnostics( editor_a_main.update(cx_a, |editor, cx| { let snapshot = editor.buffer().read(cx).snapshot(cx); let all_diagnostics = snapshot - .diagnostics_in_range(0..snapshot.len()) + .diagnostics_in_range(MultiBufferOffset(0)..snapshot.len()) .collect::>(); assert_eq!(all_diagnostics.len(), 2); let expected_messages = [ diff --git a/crates/collab/src/tests/following_tests.rs b/crates/collab/src/tests/following_tests.rs index 07cf866a3513d27894307216e904b130eb023e22..f3827b6f1195392ddedcab4f45854a8e9790dc28 100644 --- a/crates/collab/src/tests/following_tests.rs +++ b/crates/collab/src/tests/following_tests.rs @@ -6,7 +6,7 @@ use collab_ui::{ channel_view::ChannelView, notifications::project_shared_notification::ProjectSharedNotification, }; -use editor::{Editor, MultiBuffer, PathKey, SelectionEffects}; +use editor::{Editor, MultiBuffer, MultiBufferOffset, PathKey, SelectionEffects}; use gpui::{ AppContext as _, BackgroundExecutor, BorrowAppContext, Entity, SharedString, TestAppContext, VisualContext, VisualTestContext, point, @@ -124,7 +124,7 @@ async fn test_basic_following( editor.select_left(&Default::default(), window, cx); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - vec![3..2] + vec![MultiBufferOffset(3)..MultiBufferOffset(2)] ); }); editor_a2.update_in(cx_a, |editor, window, cx| { @@ -133,7 +133,7 @@ async fn test_basic_following( editor.select_left(&Default::default(), window, cx); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - vec![2..1] + vec![MultiBufferOffset(2)..MultiBufferOffset(1)] ); }); @@ -158,13 +158,13 @@ async fn test_basic_following( editor_b2.update(cx_b, |editor, cx| editor .selections .ranges(&editor.display_snapshot(cx))), - vec![2..1] + vec![MultiBufferOffset(2)..MultiBufferOffset(1)] ); assert_eq!( editor_b1.update(cx_b, |editor, cx| editor .selections .ranges(&editor.display_snapshot(cx))), - vec![3..3] + vec![MultiBufferOffset(3)..MultiBufferOffset(3)] ); executor.run_until_parked(); @@ -386,7 +386,10 @@ async fn test_basic_following( // Changes to client A's editor are reflected on client B. editor_a1.update_in(cx_a, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([1..1, 2..2]) + s.select_ranges([ + MultiBufferOffset(1)..MultiBufferOffset(1), + MultiBufferOffset(2)..MultiBufferOffset(2), + ]) }); }); executor.advance_clock(workspace::item::LEADER_UPDATE_THROTTLE); @@ -396,7 +399,10 @@ async fn test_basic_following( editor_b1.update(cx_b, |editor, cx| { assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - &[1..1, 2..2] + &[ + MultiBufferOffset(1)..MultiBufferOffset(1), + MultiBufferOffset(2)..MultiBufferOffset(2) + ] ); }); @@ -408,7 +414,7 @@ async fn test_basic_following( editor_a1.update_in(cx_a, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([3..3]) + s.select_ranges([MultiBufferOffset(3)..MultiBufferOffset(3)]) }); editor.set_scroll_position(point(0., 100.), window, cx); }); @@ -417,7 +423,7 @@ async fn test_basic_following( editor_b1.update(cx_b, |editor, cx| { assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - &[3..3] + &[MultiBufferOffset(3)..MultiBufferOffset(3)] ); }); @@ -1694,7 +1700,7 @@ async fn test_following_stops_on_unshare(cx_a: &mut TestAppContext, cx_b: &mut T // b should follow a to position 1 editor_a.update_in(cx_a, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([1..1]) + s.select_ranges([MultiBufferOffset(1)..MultiBufferOffset(1)]) }) }); cx_a.executor() @@ -1703,7 +1709,7 @@ async fn test_following_stops_on_unshare(cx_a: &mut TestAppContext, cx_b: &mut T editor_b.update(cx_b, |editor, cx| { assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - vec![1..1] + vec![MultiBufferOffset(1)..MultiBufferOffset(1)] ) }); @@ -1719,7 +1725,7 @@ async fn test_following_stops_on_unshare(cx_a: &mut TestAppContext, cx_b: &mut T // b should not follow a to position 2 editor_a.update_in(cx_a, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([2..2]) + s.select_ranges([MultiBufferOffset(2)..MultiBufferOffset(2)]) }) }); cx_a.executor() @@ -1728,7 +1734,7 @@ async fn test_following_stops_on_unshare(cx_a: &mut TestAppContext, cx_b: &mut T editor_b.update(cx_b, |editor, cx| { assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - vec![1..1] + vec![MultiBufferOffset(1)..MultiBufferOffset(1)] ) }); cx_b.update(|_, cx| { @@ -1829,7 +1835,7 @@ async fn test_following_into_excluded_file( editor.select_left(&Default::default(), window, cx); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - vec![3..2] + vec![MultiBufferOffset(3)..MultiBufferOffset(2)] ); }); editor_for_excluded_a.update_in(cx_a, |editor, window, cx| { @@ -1838,7 +1844,7 @@ async fn test_following_into_excluded_file( editor.select_left(&Default::default(), window, cx); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - vec![18..17] + vec![MultiBufferOffset(18)..MultiBufferOffset(17)] ); }); @@ -1864,7 +1870,7 @@ async fn test_following_into_excluded_file( editor_for_excluded_b.update(cx_b, |editor, cx| editor .selections .ranges(&editor.display_snapshot(cx))), - vec![18..17] + vec![MultiBufferOffset(18)..MultiBufferOffset(17)] ); editor_for_excluded_a.update_in(cx_a, |editor, window, cx| { @@ -2040,7 +2046,7 @@ async fn test_following_to_channel_notes_without_a_shared_project( notes.editor.update(cx, |editor, cx| { editor.insert("Hello from A.", window, cx); editor.change_selections(SelectionEffects::no_scroll(), window, cx, |selections| { - selections.select_ranges(vec![3..4]); + selections.select_ranges(vec![MultiBufferOffset(3)..MultiBufferOffset(4)]); }); }); }); @@ -2076,8 +2082,8 @@ async fn test_following_to_channel_notes_without_a_shared_project( assert_eq!( editor .selections - .ranges::(&editor.display_snapshot(cx)), - &[3..4] + .ranges::(&editor.display_snapshot(cx)), + &[MultiBufferOffset(3)..MultiBufferOffset(4)] ); }) }); diff --git a/crates/collab_ui/src/collab_panel.rs b/crates/collab_ui/src/collab_panel.rs index b57d0279545aed8f896179968c877efb72e7c772..8f2e959b17293af66279793e73fefceea413ca49 100644 --- a/crates/collab_ui/src/collab_panel.rs +++ b/crates/collab_ui/src/collab_panel.rs @@ -1496,7 +1496,7 @@ impl CollabPanel { fn reset_filter_editor_text(&mut self, window: &mut Window, cx: &mut Context) -> bool { self.filter_editor.update(cx, |editor, cx| { - if editor.buffer().read(cx).len(cx) > 0 { + if editor.buffer().read(cx).len(cx).0 > 0 { editor.set_text("", window, cx); true } else { diff --git a/crates/copilot/src/copilot_completion_provider.rs b/crates/copilot/src/copilot_completion_provider.rs index ba8ae82508896884f1b6e9741e7ccd6fd78dce76..30ef6de07ec92f66cc888b52a540cf9c7e673bb4 100644 --- a/crates/copilot/src/copilot_completion_provider.rs +++ b/crates/copilot/src/copilot_completion_provider.rs @@ -270,7 +270,7 @@ fn common_prefix, T2: Iterator>(a: T1, b: mod tests { use super::*; use editor::{ - Editor, ExcerptRange, MultiBuffer, SelectionEffects, + Editor, ExcerptRange, MultiBuffer, MultiBufferOffset, SelectionEffects, test::editor_lsp_test_context::EditorLspTestContext, }; use fs::FakeFs; @@ -1081,8 +1081,9 @@ mod tests { vec![complete_from_marker, replace_range_marker.clone()], ); + let range = marked_ranges.remove(&replace_range_marker).unwrap()[0].clone(); let replace_range = - cx.to_lsp_range(marked_ranges.remove(&replace_range_marker).unwrap()[0].clone()); + cx.to_lsp_range(MultiBufferOffset(range.start)..MultiBufferOffset(range.end)); let mut request = cx.set_request_handler::(move |url, params, _| { diff --git a/crates/debugger_ui/src/debugger_panel.rs b/crates/debugger_ui/src/debugger_panel.rs index 5379591f8ed256d2703a8e61b09925e9743ed341..f2f8b5be32c696c61a50c71090f130fcf34ef271 100644 --- a/crates/debugger_ui/src/debugger_panel.rs +++ b/crates/debugger_ui/src/debugger_panel.rs @@ -14,13 +14,12 @@ use collections::IndexMap; use dap::adapters::DebugAdapterName; use dap::{DapRegistry, StartDebuggingRequestArguments}; use dap::{client::SessionId, debugger_settings::DebuggerSettings}; -use editor::Editor; +use editor::{Editor, MultiBufferOffset, ToPoint}; use gpui::{ Action, App, AsyncWindowContext, ClipboardItem, Context, DismissEvent, Entity, EntityId, EventEmitter, FocusHandle, Focusable, MouseButton, MouseDownEvent, Point, Subscription, Task, WeakEntity, anchored, deferred, }; -use text::ToPoint as _; use itertools::Itertools as _; use language::Buffer; @@ -1216,11 +1215,11 @@ impl DebugPanel { let mut last_offset = None; while let Some(mat) = matches.next() { if let Some(pos) = mat.captures.first().map(|m| m.node.byte_range().end) { - last_offset = Some(pos) + last_offset = Some(MultiBufferOffset(pos)) } } let mut edits = Vec::new(); - let mut cursor_position = 0; + let mut cursor_position = MultiBufferOffset(0); if let Some(pos) = last_offset { edits.push((pos..pos, format!(",\n{new_scenario}"))); @@ -1234,24 +1233,25 @@ impl DebugPanel { if let Some(mat) = matches.next() { if let Some(pos) = mat.captures.first().map(|m| m.node.byte_range().end - 1) { - edits.push((pos..pos, format!("\n{new_scenario}\n"))); - cursor_position = pos + "\n ".len(); + edits.push(( + MultiBufferOffset(pos)..MultiBufferOffset(pos), + format!("\n{new_scenario}\n"), + )); + cursor_position = MultiBufferOffset(pos) + "\n ".len(); } } else { - edits.push((0..0, format!("[\n{}\n]", new_scenario))); - cursor_position = "[\n ".len(); + edits.push(( + MultiBufferOffset(0)..MultiBufferOffset(0), + format!("[\n{}\n]", new_scenario), + )); + cursor_position = MultiBufferOffset("[\n ".len()); } } editor.transact(window, cx, |editor, window, cx| { editor.edit(edits, cx); - let snapshot = editor - .buffer() - .read(cx) - .as_singleton() - .unwrap() - .read(cx) - .snapshot(); + let snapshot = editor.buffer().read(cx).read(cx); let point = cursor_position.to_point(&snapshot); + drop(snapshot); editor.go_to_singleton_buffer_point(point, window, cx); }); Ok(editor.save(SaveOptions::default(), project, window, cx)) diff --git a/crates/debugger_ui/src/debugger_ui.rs b/crates/debugger_ui/src/debugger_ui.rs index 0e38719c19b636918b441440568e8588e29b039e..a9abb50bb68851334285b05064176e0347474014 100644 --- a/crates/debugger_ui/src/debugger_ui.rs +++ b/crates/debugger_ui/src/debugger_ui.rs @@ -1,7 +1,7 @@ use std::any::TypeId; use debugger_panel::DebugPanel; -use editor::Editor; +use editor::{Editor, MultiBufferOffsetUtf16}; use gpui::{Action, App, DispatchPhase, EntityInputHandler, actions}; use new_process_modal::{NewProcessModal, NewProcessMode}; use onboarding_modal::DebuggerOnboardingModal; @@ -390,11 +390,14 @@ pub fn init(cx: &mut App) { maybe!({ let text = editor .update(cx, |editor, cx| { + let range = editor + .selections + .newest::( + &editor.display_snapshot(cx), + ) + .range(); editor.text_for_range( - editor - .selections - .newest(&editor.display_snapshot(cx)) - .range(), + range.start.0.0..range.end.0.0, &mut None, window, cx, diff --git a/crates/debugger_ui/src/session/running/console.rs b/crates/debugger_ui/src/session/running/console.rs index 23b3ca481722c7869caf43958754889f92dc2fe5..f72d92e038ce234327e29776f923c27d6592cf16 100644 --- a/crates/debugger_ui/src/session/running/console.rs +++ b/crates/debugger_ui/src/session/running/console.rs @@ -8,7 +8,7 @@ use collections::HashMap; use dap::{CompletionItem, CompletionItemType, OutputEvent}; use editor::{ Bias, CompletionProvider, Editor, EditorElement, EditorMode, EditorStyle, ExcerptId, - SizingBehavior, + MultiBufferOffset, SizingBehavior, }; use fuzzy::StringMatchCandidate; use gpui::{ @@ -161,7 +161,9 @@ impl Console { ) -> Task> { self.console.update(cx, |_, cx| { cx.spawn_in(window, async move |console, cx| { - let mut len = console.update(cx, |this, cx| this.buffer().read(cx).len(cx))?; + let mut len = console + .update(cx, |this, cx| this.buffer().read(cx).len(cx))? + .0; let (output, spans, background_spans) = cx .background_spawn(async move { let mut all_spans = Vec::new(); @@ -227,8 +229,8 @@ impl Console { for (range, color) in spans { let Some(color) = color else { continue }; let start_offset = range.start; - let range = - buffer.anchor_after(range.start)..buffer.anchor_before(range.end); + let range = buffer.anchor_after(MultiBufferOffset(range.start)) + ..buffer.anchor_before(MultiBufferOffset(range.end)); let style = HighlightStyle { color: Some(terminal_view::terminal_element::convert_color( &color, @@ -247,8 +249,8 @@ impl Console { for (range, color) in background_spans { let Some(color) = color else { continue }; let start_offset = range.start; - let range = - buffer.anchor_after(range.start)..buffer.anchor_before(range.end); + let range = buffer.anchor_after(MultiBufferOffset(range.start)) + ..buffer.anchor_before(MultiBufferOffset(range.end)); console.highlight_background_key::( start_offset, &[range], @@ -961,7 +963,7 @@ fn color_fetcher(color: ansi::Color) -> fn(&Theme) -> Hsla { mod tests { use super::*; use crate::tests::init_test; - use editor::test::editor_test_context::EditorTestContext; + use editor::{MultiBufferOffset, test::editor_test_context::EditorTestContext}; use gpui::TestAppContext; use language::Point; @@ -993,8 +995,8 @@ mod tests { cx.update_editor(|editor, _, cx| { editor.edit( vec![( - snapshot.offset_for_anchor(&replace_range.start) - ..snapshot.offset_for_anchor(&replace_range.end), + MultiBufferOffset(snapshot.offset_for_anchor(&replace_range.start)) + ..MultiBufferOffset(snapshot.offset_for_anchor(&replace_range.end)), replacement, )], cx, diff --git a/crates/diagnostics/src/diagnostics_tests.rs b/crates/diagnostics/src/diagnostics_tests.rs index 3d1d3840108c6842d57190bd56fd9db3984af7c6..d2504fde4a6bcb828db75f85f01aea2f296bd9dd 100644 --- a/crates/diagnostics/src/diagnostics_tests.rs +++ b/crates/diagnostics/src/diagnostics_tests.rs @@ -1,7 +1,7 @@ use super::*; use collections::{HashMap, HashSet}; use editor::{ - DisplayPoint, EditorSettings, Inlay, + DisplayPoint, EditorSettings, Inlay, MultiBufferOffset, actions::{GoToDiagnostic, GoToPreviousDiagnostic, Hover, MoveToBeginning}, display_map::DisplayRow, test::{ @@ -878,7 +878,8 @@ async fn test_random_diagnostics_with_inlays(cx: &mut TestAppContext, mut rng: S diagnostics.editor.update(cx, |editor, cx| { let snapshot = editor.snapshot(window, cx); if !snapshot.buffer_snapshot().is_empty() { - let position = rng.random_range(0..snapshot.buffer_snapshot().len()); + let position = rng + .random_range(MultiBufferOffset(0)..snapshot.buffer_snapshot().len()); let position = snapshot.buffer_snapshot().clip_offset(position, Bias::Left); log::info!( "adding inlay at {position}/{}: {:?}", diff --git a/crates/diagnostics/src/items.rs b/crates/diagnostics/src/items.rs index 413bad5c0d696bfcba92a1127789c9e7c31edc30..b4ca52ea7239b6e4e76160a475d703ddd2933f44 100644 --- a/crates/diagnostics/src/items.rs +++ b/crates/diagnostics/src/items.rs @@ -1,6 +1,6 @@ use std::time::Duration; -use editor::Editor; +use editor::{Editor, MultiBufferOffset}; use gpui::{ Context, Entity, EventEmitter, IntoElement, ParentElement, Render, Styled, Subscription, Task, WeakEntity, Window, @@ -171,14 +171,19 @@ impl DiagnosticIndicator { let buffer = editor.buffer().read(cx).snapshot(cx); let cursor_position = editor .selections - .newest::(&editor.display_snapshot(cx)) + .newest::(&editor.display_snapshot(cx)) .head(); (buffer, cursor_position) }); let new_diagnostic = buffer - .diagnostics_in_range::(cursor_position..cursor_position) + .diagnostics_in_range::(cursor_position..cursor_position) .filter(|entry| !entry.range.is_empty()) - .min_by_key(|entry| (entry.diagnostic.severity, entry.range.len())) + .min_by_key(|entry| { + ( + entry.diagnostic.severity, + entry.range.end - entry.range.start, + ) + }) .map(|entry| entry.diagnostic); if new_diagnostic != self.current_diagnostic.as_ref() { let new_diagnostic = new_diagnostic.cloned(); diff --git a/crates/edit_prediction_button/src/edit_prediction_button.rs b/crates/edit_prediction_button/src/edit_prediction_button.rs index 51f228db76aaee5e286dd950c17dd01b303d29b8..4f5f60d5a2328e5e56d65e87add7338b7e572346 100644 --- a/crates/edit_prediction_button/src/edit_prediction_button.rs +++ b/crates/edit_prediction_button/src/edit_prediction_button.rs @@ -3,7 +3,9 @@ use client::{Client, UserStore, zed_urls}; use cloud_llm_client::UsageLimit; use codestral::CodestralCompletionProvider; use copilot::{Copilot, Status}; -use editor::{Editor, SelectionEffects, actions::ShowEditPrediction, scroll::Autoscroll}; +use editor::{ + Editor, MultiBufferOffset, SelectionEffects, actions::ShowEditPrediction, scroll::Autoscroll, +}; use feature_flags::{FeatureFlagAppExt, PredictEditsRateCompletionsFeatureFlag}; use fs::Fs; use gpui::{ @@ -1107,7 +1109,12 @@ async fn open_disabled_globs_setting_in_editor( }); if !edits.is_empty() { - item.edit(edits, cx); + item.edit( + edits + .into_iter() + .map(|(r, s)| (MultiBufferOffset(r.start)..MultiBufferOffset(r.end), s)), + cx, + ); } let text = item.buffer().read(cx).snapshot(cx).text(); @@ -1122,6 +1129,7 @@ async fn open_disabled_globs_setting_in_editor( .map(|inner_match| inner_match.start()..inner_match.end()) }); if let Some(range) = range { + let range = MultiBufferOffset(range.start)..MultiBufferOffset(range.end); item.change_selections( SelectionEffects::scroll(Autoscroll::newest()), window, diff --git a/crates/editor/benches/display_map.rs b/crates/editor/benches/display_map.rs index 919249ad01b87fe5fbabe1b5fe6e563179b41d10..2459e7466f8054189c9f644e404158b9612e2b9e 100644 --- a/crates/editor/benches/display_map.rs +++ b/crates/editor/benches/display_map.rs @@ -2,6 +2,7 @@ use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; use editor::MultiBuffer; use gpui::TestDispatcher; use itertools::Itertools; +use multi_buffer::MultiBufferOffset; use rand::{Rng, SeedableRng, rngs::StdRng}; use std::num::NonZeroU32; use text::Bias; @@ -24,7 +25,9 @@ fn to_tab_point_benchmark(c: &mut Criterion) { let (_, inlay_snapshot) = InlayMap::new(buffer_snapshot); let (_, fold_snapshot) = FoldMap::new(inlay_snapshot.clone()); let fold_point = fold_snapshot.to_fold_point( - inlay_snapshot.to_point(InlayOffset(rng.random_range(0..length))), + inlay_snapshot.to_point(InlayOffset( + rng.random_range(MultiBufferOffset(0)..MultiBufferOffset(length)), + )), Bias::Left, ); let (_, snapshot) = TabMap::new(fold_snapshot, NonZeroU32::new(4).unwrap()); @@ -69,7 +72,9 @@ fn to_fold_point_benchmark(c: &mut Criterion) { let (_, fold_snapshot) = FoldMap::new(inlay_snapshot.clone()); let fold_point = fold_snapshot.to_fold_point( - inlay_snapshot.to_point(InlayOffset(rng.random_range(0..length))), + inlay_snapshot.to_point(InlayOffset( + rng.random_range(MultiBufferOffset(0)..MultiBufferOffset(length)), + )), Bias::Left, ); diff --git a/crates/editor/src/display_map.rs b/crates/editor/src/display_map.rs index c4c49eb7911e0d7c5ed375d83697584fbb493b81..86aad01a2ea946057fe08876937853f5b84f00bf 100644 --- a/crates/editor/src/display_map.rs +++ b/crates/editor/src/display_map.rs @@ -44,12 +44,10 @@ pub use invisibles::{is_invisible, replacement}; use collections::{HashMap, HashSet}; use gpui::{App, Context, Entity, Font, HighlightStyle, LineLayout, Pixels, UnderlineStyle}; -use language::{ - OffsetUtf16, Point, Subscription as BufferSubscription, language_settings::language_settings, -}; +use language::{Point, Subscription as BufferSubscription, language_settings::language_settings}; use multi_buffer::{ - Anchor, AnchorRangeExt, MultiBuffer, MultiBufferPoint, MultiBufferRow, MultiBufferSnapshot, - RowInfo, ToOffset, ToPoint, + Anchor, AnchorRangeExt, MultiBuffer, MultiBufferOffset, MultiBufferOffsetUtf16, + MultiBufferPoint, MultiBufferRow, MultiBufferSnapshot, RowInfo, ToOffset, ToPoint, }; use project::InlayId; use project::project_settings::DiagnosticSeverity; @@ -104,7 +102,7 @@ type InlayHighlights = TreeMap, - buffer_subscription: BufferSubscription, + buffer_subscription: BufferSubscription, /// Decides where the [`Inlay`]s should be displayed. inlay_map: InlayMap, /// Decides where the fold indicators should be and tracks parts of a source file that are currently folded. @@ -198,7 +196,7 @@ impl DisplayMap { pub fn set_state(&mut self, other: &DisplaySnapshot, cx: &mut Context) { self.fold( other - .folds_in_range(0..other.buffer_snapshot().len()) + .folds_in_range(MultiBufferOffset(0)..other.buffer_snapshot().len()) .map(|fold| { Crease::simple( fold.range.to_offset(other.buffer_snapshot()), @@ -794,7 +792,7 @@ impl DisplaySnapshot { } pub fn is_empty(&self) -> bool { - self.buffer_snapshot().len() == 0 + self.buffer_snapshot().len() == MultiBufferOffset(0) } pub fn row_infos(&self, start_row: DisplayRow) -> impl Iterator + '_ { @@ -1133,7 +1131,10 @@ impl DisplaySnapshot { }) } - pub fn buffer_chars_at(&self, mut offset: usize) -> impl Iterator + '_ { + pub fn buffer_chars_at( + &self, + mut offset: MultiBufferOffset, + ) -> impl Iterator + '_ { self.buffer_snapshot().chars_at(offset).map(move |ch| { let ret = (ch, offset); offset += ch.len_utf8(); @@ -1143,8 +1144,8 @@ impl DisplaySnapshot { pub fn reverse_buffer_chars_at( &self, - mut offset: usize, - ) -> impl Iterator + '_ { + mut offset: MultiBufferOffset, + ) -> impl Iterator + '_ { self.buffer_snapshot() .reversed_chars_at(offset) .map(move |ch| { @@ -1526,7 +1527,7 @@ impl DisplayPoint { map.display_point_to_point(self, Bias::Left) } - pub fn to_offset(self, map: &DisplaySnapshot, bias: Bias) -> usize { + pub fn to_offset(self, map: &DisplaySnapshot, bias: Bias) -> MultiBufferOffset { let wrap_point = map.block_snapshot.to_wrap_point(self.0, bias); let tab_point = map.wrap_snapshot().to_tab_point(wrap_point); let fold_point = map.tab_snapshot().to_fold_point(tab_point, bias).0; @@ -1536,13 +1537,13 @@ impl DisplayPoint { } } -impl ToDisplayPoint for usize { +impl ToDisplayPoint for MultiBufferOffset { fn to_display_point(&self, map: &DisplaySnapshot) -> DisplayPoint { map.point_to_display_point(self.to_point(map.buffer_snapshot()), Bias::Left) } } -impl ToDisplayPoint for OffsetUtf16 { +impl ToDisplayPoint for MultiBufferOffsetUtf16 { fn to_display_point(&self, map: &DisplaySnapshot) -> DisplayPoint { self.to_offset(map.buffer_snapshot()).to_display_point(map) } @@ -1685,7 +1686,7 @@ pub mod tests { let block_properties = (0..rng.random_range(1..=1)) .map(|_| { let position = buffer.anchor_after(buffer.clip_offset( - rng.random_range(0..=buffer.len()), + rng.random_range(MultiBufferOffset(0)..=buffer.len()), Bias::Left, )); @@ -1727,8 +1728,12 @@ pub mod tests { for _ in 0..rng.random_range(1..=3) { buffer.read_with(cx, |buffer, cx| { let buffer = buffer.read(cx); - let end = buffer.clip_offset(rng.random_range(0..=buffer.len()), Right); - let start = buffer.clip_offset(rng.random_range(0..=end), Left); + let end = buffer.clip_offset( + rng.random_range(MultiBufferOffset(0)..=buffer.len()), + Right, + ); + let start = buffer + .clip_offset(rng.random_range(MultiBufferOffset(0)..=end), Left); ranges.push(start..end); }); } @@ -1954,7 +1959,7 @@ pub mod tests { ) ); - let ix = snapshot.buffer_snapshot().text().find("seven").unwrap(); + let ix = MultiBufferOffset(snapshot.buffer_snapshot().text().find("seven").unwrap()); buffer.update(cx, |buffer, cx| { buffer.edit([(ix..ix, "and ")], None, cx); }); @@ -2083,7 +2088,7 @@ pub mod tests { &[], vec![Inlay::edit_prediction( 0, - buffer_snapshot.anchor_after(0), + buffer_snapshot.anchor_after(MultiBufferOffset(0)), "\n", )], cx, @@ -2094,7 +2099,11 @@ pub mod tests { // Regression test: updating the display map does not crash when a // block is immediately followed by a multi-line inlay. buffer.update(cx, |buffer, cx| { - buffer.edit([(1..1, "b")], None, cx); + buffer.edit( + [(MultiBufferOffset(1)..MultiBufferOffset(1), "b")], + None, + cx, + ); }); map.update(cx, |m, cx| assert_eq!(m.snapshot(cx).text(), "\n\n\nab")); } @@ -2694,6 +2703,7 @@ pub mod tests { HighlightKey::Type(TypeId::of::()), highlighted_ranges .into_iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)) .map(|range| { buffer_snapshot.anchor_before(range.start) ..buffer_snapshot.anchor_before(range.end) diff --git a/crates/editor/src/display_map/block_map.rs b/crates/editor/src/display_map/block_map.rs index 639d2a06579ca16eb938f3d23908e48b702254ef..b55c5330dd398428c549ae1932c1f0a25c8e1436 100644 --- a/crates/editor/src/display_map/block_map.rs +++ b/crates/editor/src/display_map/block_map.rs @@ -11,8 +11,8 @@ use collections::{Bound, HashMap, HashSet}; use gpui::{AnyElement, App, EntityId, Pixels, Window}; use language::{Patch, Point}; use multi_buffer::{ - Anchor, ExcerptId, ExcerptInfo, MultiBuffer, MultiBufferRow, MultiBufferSnapshot, RowInfo, - ToOffset, ToPoint as _, + Anchor, ExcerptId, ExcerptInfo, MultiBuffer, MultiBufferOffset, MultiBufferRow, + MultiBufferSnapshot, RowInfo, ToOffset, ToPoint as _, }; use parking_lot::Mutex; use std::{ @@ -1208,7 +1208,7 @@ impl BlockMapWriter<'_> { pub fn remove_intersecting_replace_blocks( &mut self, - ranges: impl IntoIterator>, + ranges: impl IntoIterator>, inclusive: bool, ) { let wrap_snapshot = self.0.wrap_snapshot.borrow(); @@ -1283,7 +1283,7 @@ impl BlockMapWriter<'_> { fn blocks_intersecting_buffer_range( &self, - range: Range, + range: Range, inclusive: bool, ) -> &[Arc] { if range.is_empty() && !inclusive { @@ -3043,8 +3043,10 @@ mod tests { let block_properties = (0..block_count) .map(|_| { let buffer = cx.update(|cx| buffer.read(cx).read(cx).clone()); - let offset = - buffer.clip_offset(rng.random_range(0..=buffer.len()), Bias::Left); + let offset = buffer.clip_offset( + rng.random_range(MultiBufferOffset(0)..=buffer.len()), + Bias::Left, + ); let mut min_height = 0; let placement = match rng.random_range(0..3) { 0 => { @@ -3244,7 +3246,7 @@ mod tests { // Note that this needs to be synced with the related section in BlockMap::sync expected_blocks.extend(block_map.header_and_footer_blocks( &buffer_snapshot, - 0.., + MultiBufferOffset(0).., &wraps_snapshot, )); diff --git a/crates/editor/src/display_map/custom_highlights.rs b/crates/editor/src/display_map/custom_highlights.rs index c6b22bb0b8247420200c2bb8d9e22f55d638386d..e3ae7c99208cb4549a7538ac7f2abcc601c6e6d0 100644 --- a/crates/editor/src/display_map/custom_highlights.rs +++ b/crates/editor/src/display_map/custom_highlights.rs @@ -1,7 +1,7 @@ use collections::BTreeMap; use gpui::HighlightStyle; use language::Chunk; -use multi_buffer::{MultiBufferChunks, MultiBufferSnapshot, ToOffset as _}; +use multi_buffer::{MultiBufferChunks, MultiBufferOffset, MultiBufferSnapshot, ToOffset as _}; use std::{ cmp, iter::{self, Peekable}, @@ -14,7 +14,7 @@ use crate::display_map::{HighlightKey, TextHighlights}; pub struct CustomHighlightsChunks<'a> { buffer_chunks: MultiBufferChunks<'a>, buffer_chunk: Option>, - offset: usize, + offset: MultiBufferOffset, multibuffer_snapshot: &'a MultiBufferSnapshot, highlight_endpoints: Peekable>, @@ -24,14 +24,14 @@ pub struct CustomHighlightsChunks<'a> { #[derive(Debug, Copy, Clone, Eq, PartialEq)] struct HighlightEndpoint { - offset: usize, + offset: MultiBufferOffset, tag: HighlightKey, style: Option, } impl<'a> CustomHighlightsChunks<'a> { pub fn new( - range: Range, + range: Range, language_aware: bool, text_highlights: Option<&'a TextHighlights>, multibuffer_snapshot: &'a MultiBufferSnapshot, @@ -52,7 +52,7 @@ impl<'a> CustomHighlightsChunks<'a> { } } - pub fn seek(&mut self, new_range: Range) { + pub fn seek(&mut self, new_range: Range) { self.highlight_endpoints = create_highlight_endpoints(&new_range, self.text_highlights, self.multibuffer_snapshot); self.offset = new_range.start; @@ -63,7 +63,7 @@ impl<'a> CustomHighlightsChunks<'a> { } fn create_highlight_endpoints( - range: &Range, + range: &Range, text_highlights: Option<&TextHighlights>, buffer: &MultiBufferSnapshot, ) -> iter::Peekable> { @@ -117,7 +117,7 @@ impl<'a> Iterator for CustomHighlightsChunks<'a> { type Item = Chunk<'a>; fn next(&mut self) -> Option { - let mut next_highlight_endpoint = usize::MAX; + let mut next_highlight_endpoint = MultiBufferOffset(usize::MAX); while let Some(endpoint) = self.highlight_endpoints.peek().copied() { if endpoint.offset <= self.offset { if let Some(style) = endpoint.style { @@ -224,20 +224,22 @@ mod tests { let range_count = rng.random_range(1..10); let text = buffer_snapshot.text(); for _ in 0..range_count { - if buffer_snapshot.len() == 0 { + if buffer_snapshot.len() == MultiBufferOffset(0) { continue; } - let mut start = rng.random_range(0..=buffer_snapshot.len().saturating_sub(10)); + let mut start = rng.random_range( + MultiBufferOffset(0)..=buffer_snapshot.len().saturating_sub_usize(10), + ); - while !text.is_char_boundary(start) { - start = start.saturating_sub(1); + while !text.is_char_boundary(start.0) { + start = start.saturating_sub_usize(1); } - let end_end = buffer_snapshot.len().min(start + 100); + let end_end = buffer_snapshot.len().min(start + 100usize); let mut end = rng.random_range(start..=end_end); - while !text.is_char_boundary(end) { - end = end.saturating_sub(1); + while !text.is_char_boundary(end.0) { + end = end.saturating_sub_usize(1); } if start < end { @@ -253,8 +255,12 @@ mod tests { } // Get all chunks and verify their bitmaps - let chunks = - CustomHighlightsChunks::new(0..buffer_snapshot.len(), false, None, &buffer_snapshot); + let chunks = CustomHighlightsChunks::new( + MultiBufferOffset(0)..buffer_snapshot.len(), + false, + None, + &buffer_snapshot, + ); for chunk in chunks { let chunk_text = chunk.text; diff --git a/crates/editor/src/display_map/fold_map.rs b/crates/editor/src/display_map/fold_map.rs index 4a628f866807aa9b1a1edd45fb9714a5fcc3d5d3..1fe68939adc5aebcb502a5bb28e83ecd668ff88b 100644 --- a/crates/editor/src/display_map/fold_map.rs +++ b/crates/editor/src/display_map/fold_map.rs @@ -5,16 +5,17 @@ use super::{ inlay_map::{InlayBufferRows, InlayChunks, InlayEdit, InlayOffset, InlayPoint, InlaySnapshot}, }; use gpui::{AnyElement, App, ElementId, HighlightStyle, Pixels, Window}; -use language::{Edit, HighlightId, Point, TextSummary}; +use language::{Edit, HighlightId, Point}; use multi_buffer::{ - Anchor, AnchorRangeExt, MultiBufferRow, MultiBufferSnapshot, RowInfo, ToOffset, + Anchor, AnchorRangeExt, MBTextSummary, MultiBufferOffset, MultiBufferRow, MultiBufferSnapshot, + RowInfo, ToOffset, }; use project::InlayId; use std::{ any::TypeId, cmp::{self, Ordering}, fmt, iter, - ops::{Add, AddAssign, Deref, DerefMut, Range, Sub}, + ops::{Add, AddAssign, Deref, DerefMut, Range, Sub, SubAssign}, sync::Arc, usize, }; @@ -261,7 +262,7 @@ impl FoldMapWriter<'_> { fold_ixs_to_delete.dedup(); self.0.snapshot.folds = { - let mut cursor = self.0.snapshot.folds.cursor::(buffer); + let mut cursor = self.0.snapshot.folds.cursor::(buffer); let mut folds = SumTree::new(buffer); for fold_ix in fold_ixs_to_delete { folds.append(cursor.slice(&fold_ix, Bias::Right), buffer); @@ -413,7 +414,7 @@ impl FoldMap { let mut new_transforms = SumTree::::default(); let mut cursor = self.snapshot.transforms.cursor::(()); - cursor.seek(&InlayOffset(0), Bias::Right); + cursor.seek(&InlayOffset(MultiBufferOffset(0)), Bias::Right); while let Some(mut edit) = inlay_edits_iter.next() { if let Some(item) = cursor.item() @@ -436,7 +437,7 @@ impl FoldMap { cursor.seek(&edit.old.end, Bias::Right); cursor.next(); - let mut delta = edit.new_len().0 as isize - edit.old_len().0 as isize; + let mut delta = edit.new_len() as isize - edit.old_len() as isize; loop { edit.old.end = *cursor.start(); @@ -446,7 +447,7 @@ impl FoldMap { } let next_edit = inlay_edits_iter.next().unwrap(); - delta += next_edit.new_len().0 as isize - next_edit.old_len().0 as isize; + delta += next_edit.new_len() as isize - next_edit.old_len() as isize; if next_edit.old.end >= edit.old.end { edit.old.end = next_edit.old.end; @@ -458,8 +459,9 @@ impl FoldMap { } } - edit.new.end = - InlayOffset(((edit.new.start + edit.old_len()).0 as isize + delta) as usize); + edit.new.end = InlayOffset(MultiBufferOffset( + ((edit.new.start + edit.old_len()).0.0 as isize + delta) as usize, + )); let anchor = inlay_snapshot .buffer @@ -522,7 +524,7 @@ impl FoldMap { new_transforms.push( Transform { summary: TransformSummary { - output: TextSummary::from(ELLIPSIS), + output: MBTextSummary::from(ELLIPSIS), input: inlay_snapshot .text_summary_for_range(fold_range.start..fold_range.end), }, @@ -579,7 +581,7 @@ impl FoldMap { edit.old.start = old_transforms.start().0; } let old_start = - old_transforms.start().1.0 + (edit.old.start - old_transforms.start().0).0; + old_transforms.start().1.0 + (edit.old.start - old_transforms.start().0); old_transforms.seek_forward(&edit.old.end, Bias::Right); if old_transforms.item().is_some_and(|t| t.is_fold()) { @@ -587,14 +589,14 @@ impl FoldMap { edit.old.end = old_transforms.start().0; } let old_end = - old_transforms.start().1.0 + (edit.old.end - old_transforms.start().0).0; + old_transforms.start().1.0 + (edit.old.end - old_transforms.start().0); new_transforms.seek(&edit.new.start, Bias::Left); if new_transforms.item().is_some_and(|t| t.is_fold()) { edit.new.start = new_transforms.start().0; } let new_start = - new_transforms.start().1.0 + (edit.new.start - new_transforms.start().0).0; + new_transforms.start().1.0 + (edit.new.start - new_transforms.start().0); new_transforms.seek_forward(&edit.new.end, Bias::Right); if new_transforms.item().is_some_and(|t| t.is_fold()) { @@ -602,7 +604,7 @@ impl FoldMap { edit.new.end = new_transforms.start().0; } let new_end = - new_transforms.start().1.0 + (edit.new.end - new_transforms.start().0).0; + new_transforms.start().1.0 + (edit.new.end - new_transforms.start().0); fold_edits.push(FoldEdit { old: FoldOffset(old_start)..FoldOffset(old_end), @@ -649,9 +651,13 @@ impl FoldSnapshot { #[cfg(test)] pub fn text(&self) -> String { - self.chunks(FoldOffset(0)..self.len(), false, Highlights::default()) - .map(|c| c.text) - .collect() + self.chunks( + FoldOffset(MultiBufferOffset(0))..self.len(), + false, + Highlights::default(), + ) + .map(|c| c.text) + .collect() } #[cfg(test)] @@ -659,8 +665,8 @@ impl FoldSnapshot { self.folds.items(&self.inlay_snapshot.buffer).len() } - pub fn text_summary_for_range(&self, range: Range) -> TextSummary { - let mut summary = TextSummary::default(); + pub fn text_summary_for_range(&self, range: Range) -> MBTextSummary { + let mut summary = MBTextSummary::default(); let mut cursor = self .transforms @@ -670,7 +676,7 @@ impl FoldSnapshot { let start_in_transform = range.start.0 - cursor.start().0.0; let end_in_transform = cmp::min(range.end, cursor.end().0).0 - cursor.start().0.0; if let Some(placeholder) = transform.placeholder.as_ref() { - summary = TextSummary::from( + summary = MBTextSummary::from( &placeholder.text [start_in_transform.column as usize..end_in_transform.column as usize], ); @@ -689,14 +695,14 @@ impl FoldSnapshot { if range.end > cursor.end().0 { cursor.next(); - summary += &cursor + summary += cursor .summary::<_, TransformSummary>(&range.end, Bias::Right) .output; if let Some(transform) = cursor.item() { let end_in_transform = range.end.0 - cursor.start().0.0; if let Some(placeholder) = transform.placeholder.as_ref() { summary += - TextSummary::from(&placeholder.text[..end_in_transform.column as usize]); + MBTextSummary::from(&placeholder.text[..end_in_transform.column as usize]); } else { let inlay_start = self.inlay_snapshot.to_offset(cursor.start().1); let inlay_end = self @@ -839,8 +845,8 @@ impl FoldSnapshot { transform_cursor.seek(&range.start, Bias::Right); let inlay_start = { - let overshoot = range.start.0 - transform_cursor.start().0.0; - transform_cursor.start().1 + InlayOffset(overshoot) + let overshoot = range.start - transform_cursor.start().0; + transform_cursor.start().1 + overshoot }; let transform_end = transform_cursor.end(); @@ -851,8 +857,8 @@ impl FoldSnapshot { { inlay_start } else if range.end < transform_end.0 { - let overshoot = range.end.0 - transform_cursor.start().0.0; - transform_cursor.start().1 + InlayOffset(overshoot) + let overshoot = range.end - transform_cursor.start().0; + transform_cursor.start().1 + overshoot } else { transform_end.1 }; @@ -921,7 +927,7 @@ impl FoldSnapshot { } } -fn push_isomorphic(transforms: &mut SumTree, summary: TextSummary) { +fn push_isomorphic(transforms: &mut SumTree, summary: MBTextSummary) { let mut did_merge = false; transforms.update_last( |last| { @@ -950,13 +956,13 @@ fn push_isomorphic(transforms: &mut SumTree, summary: TextSummary) { fn intersecting_folds<'a>( inlay_snapshot: &'a InlaySnapshot, folds: &'a SumTree, - range: Range, + range: Range, inclusive: bool, -) -> FilterCursor<'a, 'a, impl 'a + FnMut(&FoldSummary) -> bool, Fold, usize> { +) -> FilterCursor<'a, 'a, impl 'a + FnMut(&FoldSummary) -> bool, Fold, MultiBufferOffset> { let buffer = &inlay_snapshot.buffer; let start = buffer.anchor_before(range.start.to_offset(buffer)); let end = buffer.anchor_after(range.end.to_offset(buffer)); - let mut cursor = folds.filter::<_, usize>(buffer, move |summary| { + let mut cursor = folds.filter::<_, MultiBufferOffset>(buffer, move |summary| { let start_cmp = start.cmp(&summary.max_end, buffer); let end_cmp = end.cmp(&summary.min_start, buffer); @@ -1061,8 +1067,8 @@ impl Transform { #[derive(Clone, Debug, Default, Eq, PartialEq)] struct TransformSummary { - output: TextSummary, - input: TextSummary, + output: MBTextSummary, + input: MBTextSummary, } impl sum_tree::Item for Transform { @@ -1079,8 +1085,8 @@ impl sum_tree::ContextLessSummary for TransformSummary { } fn add_summary(&mut self, other: &Self) { - self.input += &other.input; - self.output += &other.output; + self.input += other.input; + self.output += other.output; } } @@ -1211,7 +1217,7 @@ impl sum_tree::SeekTarget<'_, FoldSummary, FoldRange> for FoldRange { } } -impl<'a> sum_tree::Dimension<'a, FoldSummary> for usize { +impl<'a> sum_tree::Dimension<'a, FoldSummary> for MultiBufferOffset { fn zero(_cx: &MultiBufferSnapshot) -> Self { Default::default() } @@ -1357,8 +1363,8 @@ impl FoldChunks<'_> { self.transform_cursor.seek(&range.start, Bias::Right); let inlay_start = { - let overshoot = range.start.0 - self.transform_cursor.start().0.0; - self.transform_cursor.start().1 + InlayOffset(overshoot) + let overshoot = range.start - self.transform_cursor.start().0; + self.transform_cursor.start().1 + overshoot }; let transform_end = self.transform_cursor.end(); @@ -1370,8 +1376,8 @@ impl FoldChunks<'_> { { inlay_start } else if range.end < transform_end.0 { - let overshoot = range.end.0 - self.transform_cursor.start().0.0; - self.transform_cursor.start().1 + InlayOffset(overshoot) + let overshoot = range.end - self.transform_cursor.start().0; + self.transform_cursor.start().1 + overshoot } else { transform_end.1 }; @@ -1423,8 +1429,8 @@ impl<'a> Iterator for FoldChunks<'a> { let transform_start = self.transform_cursor.start(); let transform_end = self.transform_cursor.end(); let inlay_end = if self.max_output_offset < transform_end.0 { - let overshoot = self.max_output_offset.0 - transform_start.0.0; - transform_start.1 + InlayOffset(overshoot) + let overshoot = self.max_output_offset - transform_start.0; + transform_start.1 + overshoot } else { transform_end.1 }; @@ -1441,15 +1447,15 @@ impl<'a> Iterator for FoldChunks<'a> { // Otherwise, take a chunk from the buffer's text. if let Some((buffer_chunk_start, mut inlay_chunk)) = self.inlay_chunk.clone() { let chunk = &mut inlay_chunk.chunk; - let buffer_chunk_end = buffer_chunk_start + InlayOffset(chunk.text.len()); + let buffer_chunk_end = buffer_chunk_start + chunk.text.len(); let transform_end = self.transform_cursor.end().1; let chunk_end = buffer_chunk_end.min(transform_end); - let bit_start = (self.inlay_offset - buffer_chunk_start).0; - let bit_end = (chunk_end - buffer_chunk_start).0; + let bit_start = self.inlay_offset - buffer_chunk_start; + let bit_end = chunk_end - buffer_chunk_start; chunk.text = &chunk.text[bit_start..bit_end]; - let bit_end = (chunk_end - buffer_chunk_start).0; + let bit_end = chunk_end - buffer_chunk_start; let mask = 1u128.unbounded_shl(bit_end as u32).wrapping_sub(1); chunk.tabs = (chunk.tabs >> bit_start) & mask; @@ -1483,7 +1489,7 @@ impl<'a> Iterator for FoldChunks<'a> { } #[derive(Copy, Clone, Debug, Default, Eq, Ord, PartialOrd, PartialEq)] -pub struct FoldOffset(pub usize); +pub struct FoldOffset(pub MultiBufferOffset); impl FoldOffset { pub fn to_point(self, snapshot: &FoldSnapshot) -> FoldPoint { @@ -1493,7 +1499,7 @@ impl FoldOffset { let overshoot = if item.is_none_or(|t| t.is_fold()) { Point::new(0, (self.0 - start.0.0) as u32) } else { - let inlay_offset = start.1.input.len + self.0 - start.0.0; + let inlay_offset = start.1.input.len + (self - start.0); let inlay_point = snapshot.inlay_snapshot.to_point(InlayOffset(inlay_offset)); inlay_point.0 - start.1.input.lines }; @@ -1505,7 +1511,7 @@ impl FoldOffset { let (start, _, _) = snapshot .transforms .find::, _>((), &self, Bias::Right); - let overshoot = self.0 - start.0.0; + let overshoot = self - start.0; InlayOffset(start.1.0 + overshoot) } } @@ -1518,17 +1524,46 @@ impl Add for FoldOffset { } } +impl Sub for FoldOffset { + type Output = ::Output; + + fn sub(self, rhs: Self) -> Self::Output { + self.0 - rhs.0 + } +} + +impl SubAssign for FoldOffset +where + MultiBufferOffset: SubAssign, +{ + fn sub_assign(&mut self, rhs: T) { + self.0 -= rhs; + } +} + +impl Add for FoldOffset +where + MultiBufferOffset: Add, +{ + type Output = Self; + + fn add(self, rhs: T) -> Self::Output { + Self(self.0 + rhs) + } +} + impl AddAssign for FoldOffset { fn add_assign(&mut self, rhs: Self) { self.0 += rhs.0; } } -impl Sub for FoldOffset { - type Output = Self; - - fn sub(self, rhs: Self) -> Self::Output { - Self(self.0 - rhs.0) +impl AddAssign for FoldOffset +where + MultiBufferOffset: AddAssign, +{ + fn add_assign(&mut self, rhs: T) { + self.0 += rhs; } } @@ -1538,7 +1573,7 @@ impl<'a> sum_tree::Dimension<'a, TransformSummary> for FoldOffset { } fn add_summary(&mut self, summary: &'a TransformSummary, _: ()) { - self.0 += &summary.output.len; + self.0 += summary.output.len; } } @@ -1558,7 +1593,7 @@ impl<'a> sum_tree::Dimension<'a, TransformSummary> for InlayOffset { } fn add_summary(&mut self, summary: &'a TransformSummary, _: ()) { - self.0 += &summary.input.len; + self.0 += summary.input.len; } } @@ -1596,12 +1631,12 @@ mod tests { edits, &[ FoldEdit { - old: FoldOffset(2)..FoldOffset(16), - new: FoldOffset(2)..FoldOffset(5), + old: FoldOffset(MultiBufferOffset(2))..FoldOffset(MultiBufferOffset(16)), + new: FoldOffset(MultiBufferOffset(2))..FoldOffset(MultiBufferOffset(5)), }, FoldEdit { - old: FoldOffset(18)..FoldOffset(29), - new: FoldOffset(7)..FoldOffset(10) + old: FoldOffset(MultiBufferOffset(18))..FoldOffset(MultiBufferOffset(29)), + new: FoldOffset(MultiBufferOffset(7))..FoldOffset(MultiBufferOffset(10)), }, ] ); @@ -1626,12 +1661,12 @@ mod tests { edits, &[ FoldEdit { - old: FoldOffset(0)..FoldOffset(1), - new: FoldOffset(0)..FoldOffset(3), + old: FoldOffset(MultiBufferOffset(0))..FoldOffset(MultiBufferOffset(1)), + new: FoldOffset(MultiBufferOffset(0))..FoldOffset(MultiBufferOffset(3)), }, FoldEdit { - old: FoldOffset(6)..FoldOffset(6), - new: FoldOffset(8)..FoldOffset(11), + old: FoldOffset(MultiBufferOffset(6))..FoldOffset(MultiBufferOffset(6)), + new: FoldOffset(MultiBufferOffset(8))..FoldOffset(MultiBufferOffset(11)), }, ] ); @@ -1668,15 +1703,24 @@ mod tests { let mut map = FoldMap::new(inlay_snapshot.clone()).0; let (mut writer, _, _) = map.write(inlay_snapshot.clone(), vec![]); - writer.fold(vec![(5..8, FoldPlaceholder::test())]); + writer.fold(vec![( + MultiBufferOffset(5)..MultiBufferOffset(8), + FoldPlaceholder::test(), + )]); let (snapshot, _) = map.read(inlay_snapshot.clone(), vec![]); assert_eq!(snapshot.text(), "abcde⋯ijkl"); // Create an fold adjacent to the start of the first fold. let (mut writer, _, _) = map.write(inlay_snapshot.clone(), vec![]); writer.fold(vec![ - (0..1, FoldPlaceholder::test()), - (2..5, FoldPlaceholder::test()), + ( + MultiBufferOffset(0)..MultiBufferOffset(1), + FoldPlaceholder::test(), + ), + ( + MultiBufferOffset(2)..MultiBufferOffset(5), + FoldPlaceholder::test(), + ), ]); let (snapshot, _) = map.read(inlay_snapshot.clone(), vec![]); assert_eq!(snapshot.text(), "⋯b⋯ijkl"); @@ -1684,8 +1728,14 @@ mod tests { // Create an fold adjacent to the end of the first fold. let (mut writer, _, _) = map.write(inlay_snapshot.clone(), vec![]); writer.fold(vec![ - (11..11, FoldPlaceholder::test()), - (8..10, FoldPlaceholder::test()), + ( + MultiBufferOffset(11)..MultiBufferOffset(11), + FoldPlaceholder::test(), + ), + ( + MultiBufferOffset(8)..MultiBufferOffset(10), + FoldPlaceholder::test(), + ), ]); let (snapshot, _) = map.read(inlay_snapshot.clone(), vec![]); assert_eq!(snapshot.text(), "⋯b⋯kl"); @@ -1697,15 +1747,25 @@ mod tests { // Create two adjacent folds. let (mut writer, _, _) = map.write(inlay_snapshot.clone(), vec![]); writer.fold(vec![ - (0..2, FoldPlaceholder::test()), - (2..5, FoldPlaceholder::test()), + ( + MultiBufferOffset(0)..MultiBufferOffset(2), + FoldPlaceholder::test(), + ), + ( + MultiBufferOffset(2)..MultiBufferOffset(5), + FoldPlaceholder::test(), + ), ]); let (snapshot, _) = map.read(inlay_snapshot, vec![]); assert_eq!(snapshot.text(), "⋯fghijkl"); // Edit within one of the folds. let buffer_snapshot = buffer.update(cx, |buffer, cx| { - buffer.edit([(0..1, "12345")], None, cx); + buffer.edit( + [(MultiBufferOffset(0)..MultiBufferOffset(1), "12345")], + None, + cx, + ); buffer.snapshot(cx) }); let (inlay_snapshot, inlay_edits) = @@ -1849,7 +1909,7 @@ mod tests { for fold_range in map.merged_folds().into_iter().rev() { let fold_inlay_start = inlay_snapshot.to_inlay_offset(fold_range.start); let fold_inlay_end = inlay_snapshot.to_inlay_offset(fold_range.end); - expected_text.replace_range(fold_inlay_start.0..fold_inlay_end.0, "⋯"); + expected_text.replace_range(fold_inlay_start.0.0..fold_inlay_end.0.0, "⋯"); } assert_eq!(snapshot.text(), expected_text); @@ -1898,7 +1958,7 @@ mod tests { .chars() .count(); let mut fold_point = FoldPoint::new(0, 0); - let mut fold_offset = FoldOffset(0); + let mut fold_offset = FoldOffset(MultiBufferOffset(0)); let mut char_column = 0; for c in expected_text.chars() { let inlay_point = fold_point.to_inlay_point(&snapshot); @@ -1944,18 +2004,18 @@ mod tests { for _ in 0..5 { let mut start = snapshot.clip_offset( - FoldOffset(rng.random_range(0..=snapshot.len().0)), + FoldOffset(rng.random_range(MultiBufferOffset(0)..=snapshot.len().0)), Bias::Left, ); let mut end = snapshot.clip_offset( - FoldOffset(rng.random_range(0..=snapshot.len().0)), + FoldOffset(rng.random_range(MultiBufferOffset(0)..=snapshot.len().0)), Bias::Right, ); if start > end { mem::swap(&mut start, &mut end); } - let text = &expected_text[start.0..end.0]; + let text = &expected_text[start.0.0..end.0.0]; assert_eq!( snapshot .chunks(start..end, false, Highlights::default()) @@ -2004,9 +2064,12 @@ mod tests { } for _ in 0..5 { - let end = - buffer_snapshot.clip_offset(rng.random_range(0..=buffer_snapshot.len()), Right); - let start = buffer_snapshot.clip_offset(rng.random_range(0..=end), Left); + let end = buffer_snapshot.clip_offset( + rng.random_range(MultiBufferOffset(0)..=buffer_snapshot.len()), + Right, + ); + let start = + buffer_snapshot.clip_offset(rng.random_range(MultiBufferOffset(0)..=end), Left); let expected_folds = map .snapshot .folds @@ -2046,7 +2109,7 @@ mod tests { let bytes = start.to_offset(&snapshot)..end.to_offset(&snapshot); assert_eq!( snapshot.text_summary_for_range(lines), - TextSummary::from(&text[bytes.start.0..bytes.end.0]) + MBTextSummary::from(&text[bytes.start.0.0..bytes.end.0.0]) ) } @@ -2054,8 +2117,8 @@ mod tests { for (snapshot, edits) in snapshot_edits.drain(..) { let new_text = snapshot.text(); for edit in edits { - let old_bytes = edit.new.start.0..edit.new.start.0 + edit.old_len().0; - let new_bytes = edit.new.start.0..edit.new.end.0; + let old_bytes = edit.new.start.0.0..edit.new.start.0.0 + edit.old_len(); + let new_bytes = edit.new.start.0.0..edit.new.end.0.0; text.replace_range(old_bytes, &new_text[new_bytes]); } @@ -2126,7 +2189,7 @@ mod tests { // Get all chunks and verify their bitmaps let chunks = snapshot.chunks( - FoldOffset(0)..FoldOffset(snapshot.len().0), + FoldOffset(MultiBufferOffset(0))..FoldOffset(snapshot.len().0), false, Highlights::default(), ); @@ -2195,7 +2258,7 @@ mod tests { } impl FoldMap { - fn merged_folds(&self) -> Vec> { + fn merged_folds(&self) -> Vec> { let inlay_snapshot = self.snapshot.inlay_snapshot.clone(); let buffer = &inlay_snapshot.buffer; let mut folds = self.snapshot.folds.items(buffer); @@ -2236,8 +2299,12 @@ mod tests { let buffer = &inlay_snapshot.buffer; let mut to_unfold = Vec::new(); for _ in 0..rng.random_range(1..=3) { - let end = buffer.clip_offset(rng.random_range(0..=buffer.len()), Right); - let start = buffer.clip_offset(rng.random_range(0..=end), Left); + let end = buffer.clip_offset( + rng.random_range(MultiBufferOffset(0)..=buffer.len()), + Right, + ); + let start = + buffer.clip_offset(rng.random_range(MultiBufferOffset(0)..=end), Left); to_unfold.push(start..end); } let inclusive = rng.random(); @@ -2252,8 +2319,12 @@ mod tests { let buffer = &inlay_snapshot.buffer; let mut to_fold = Vec::new(); for _ in 0..rng.random_range(1..=2) { - let end = buffer.clip_offset(rng.random_range(0..=buffer.len()), Right); - let start = buffer.clip_offset(rng.random_range(0..=end), Left); + let end = buffer.clip_offset( + rng.random_range(MultiBufferOffset(0)..=buffer.len()), + Right, + ); + let start = + buffer.clip_offset(rng.random_range(MultiBufferOffset(0)..=end), Left); to_fold.push((start..end, FoldPlaceholder::test())); } log::info!("folding {:?}", to_fold); diff --git a/crates/editor/src/display_map/inlay_map.rs b/crates/editor/src/display_map/inlay_map.rs index f3f3a3eee8ea6d1f95261ae4d313afb6f4d497e3..979c398a23efd34ec223ff6136f023a33dc4a81f 100644 --- a/crates/editor/src/display_map/inlay_map.rs +++ b/crates/editor/src/display_map/inlay_map.rs @@ -4,7 +4,10 @@ use crate::{ }; use collections::BTreeSet; use language::{Chunk, Edit, Point, TextSummary}; -use multi_buffer::{MultiBufferRow, MultiBufferRows, MultiBufferSnapshot, RowInfo, ToOffset}; +use multi_buffer::{ + MBTextSummary, MultiBufferOffset, MultiBufferRow, MultiBufferRows, MultiBufferSnapshot, + RowInfo, ToOffset, +}; use project::InlayId; use std::{ cmp, @@ -42,7 +45,7 @@ impl std::ops::Deref for InlaySnapshot { #[derive(Clone, Debug)] enum Transform { - Isomorphic(TextSummary), + Isomorphic(MBTextSummary), Inlay(Inlay), } @@ -56,8 +59,8 @@ impl sum_tree::Item for Transform { output: *summary, }, Transform::Inlay(inlay) => TransformSummary { - input: TextSummary::default(), - output: inlay.text().summary(), + input: MBTextSummary::default(), + output: MBTextSummary::from(inlay.text().summary()), }, } } @@ -65,8 +68,8 @@ impl sum_tree::Item for Transform { #[derive(Clone, Debug, Default)] struct TransformSummary { - input: TextSummary, - output: TextSummary, + input: MBTextSummary, + output: MBTextSummary, } impl sum_tree::ContextLessSummary for TransformSummary { @@ -75,15 +78,15 @@ impl sum_tree::ContextLessSummary for TransformSummary { } fn add_summary(&mut self, other: &Self) { - self.input += &other.input; - self.output += &other.output; + self.input += other.input; + self.output += other.output; } } pub type InlayEdit = Edit; #[derive(Copy, Clone, Debug, Default, Eq, Ord, PartialOrd, PartialEq)] -pub struct InlayOffset(pub usize); +pub struct InlayOffset(pub MultiBufferOffset); impl Add for InlayOffset { type Output = Self; @@ -94,10 +97,30 @@ impl Add for InlayOffset { } impl Sub for InlayOffset { - type Output = Self; + type Output = ::Output; fn sub(self, rhs: Self) -> Self::Output { - Self(self.0 - rhs.0) + self.0 - rhs.0 + } +} + +impl SubAssign for InlayOffset +where + MultiBufferOffset: SubAssign, +{ + fn sub_assign(&mut self, rhs: T) { + self.0 -= rhs; + } +} + +impl Add for InlayOffset +where + MultiBufferOffset: Add, +{ + type Output = Self; + + fn add(self, rhs: T) -> Self::Output { + Self(self.0 + rhs) } } @@ -107,9 +130,12 @@ impl AddAssign for InlayOffset { } } -impl SubAssign for InlayOffset { - fn sub_assign(&mut self, rhs: Self) { - self.0 -= rhs.0; +impl AddAssign for InlayOffset +where + MultiBufferOffset: AddAssign, +{ + fn add_assign(&mut self, rhs: T) { + self.0 += rhs; } } @@ -119,7 +145,7 @@ impl<'a> sum_tree::Dimension<'a, TransformSummary> for InlayOffset { } fn add_summary(&mut self, summary: &'a TransformSummary, _: ()) { - self.0 += &summary.output.len; + self.0 += summary.output.len; } } @@ -152,13 +178,13 @@ impl<'a> sum_tree::Dimension<'a, TransformSummary> for InlayPoint { } } -impl<'a> sum_tree::Dimension<'a, TransformSummary> for usize { +impl<'a> sum_tree::Dimension<'a, TransformSummary> for MultiBufferOffset { fn zero(_cx: ()) -> Self { Default::default() } fn add_summary(&mut self, summary: &'a TransformSummary, _: ()) { - *self += &summary.input.len; + *self += summary.input.len; } } @@ -181,7 +207,7 @@ pub struct InlayBufferRows<'a> { } pub struct InlayChunks<'a> { - transforms: Cursor<'a, 'static, Transform, Dimensions>, + transforms: Cursor<'a, 'static, Transform, Dimensions>, buffer_chunks: CustomHighlightsChunks<'a>, buffer_chunk: Option>, inlay_chunks: Option>, @@ -332,12 +358,12 @@ impl<'a> Iterator for InlayChunks<'a> { let offset_in_inlay = self.output_offset - self.transforms.start().0; if let Some((style, highlight)) = inlay_style_and_highlight { let range = &highlight.range; - if offset_in_inlay.0 < range.start { - next_inlay_highlight_endpoint = range.start - offset_in_inlay.0; - } else if offset_in_inlay.0 >= range.end { + if offset_in_inlay < range.start { + next_inlay_highlight_endpoint = range.start - offset_in_inlay; + } else if offset_in_inlay >= range.end { next_inlay_highlight_endpoint = usize::MAX; } else { - next_inlay_highlight_endpoint = range.end - offset_in_inlay.0; + next_inlay_highlight_endpoint = range.end - offset_in_inlay; highlight_style = highlight_style .map(|highlight| highlight.highlight(*style)) .or_else(|| Some(*style)); @@ -350,7 +376,7 @@ impl<'a> Iterator for InlayChunks<'a> { let start = offset_in_inlay; let end = cmp::min(self.max_output_offset, self.transforms.end().0) - self.transforms.start().0; - let chunks = inlay.text().chunks_in_range(start.0..end.0); + let chunks = inlay.text().chunks_in_range(start..end); text::ChunkWithBitmaps(chunks) }); let ChunkBitmaps { @@ -488,7 +514,7 @@ impl InlayMap { pub fn sync( &mut self, buffer_snapshot: MultiBufferSnapshot, - mut buffer_edits: Vec>, + mut buffer_edits: Vec>, ) -> (InlaySnapshot, Vec) { let snapshot = &mut self.snapshot; @@ -519,7 +545,7 @@ impl InlayMap { let mut new_transforms = SumTree::default(); let mut cursor = snapshot .transforms - .cursor::>(()); + .cursor::>(()); let mut buffer_edits_iter = buffer_edits.iter().peekable(); while let Some(buffer_edit) = buffer_edits_iter.next() { new_transforms.append(cursor.slice(&buffer_edit.old.start, Bias::Left), ()); @@ -531,11 +557,9 @@ impl InlayMap { } // Remove all the inlays and transforms contained by the edit. - let old_start = - cursor.start().1 + InlayOffset(buffer_edit.old.start - cursor.start().0); + let old_start = cursor.start().1 + (buffer_edit.old.start - cursor.start().0); cursor.seek(&buffer_edit.old.end, Bias::Right); - let old_end = - cursor.start().1 + InlayOffset(buffer_edit.old.end - cursor.start().0); + let old_end = cursor.start().1 + (buffer_edit.old.end - cursor.start().0); // Push the unchanged prefix. let prefix_start = new_transforms.summary().input.len; @@ -687,7 +711,10 @@ impl InlayMap { let snapshot = &mut self.snapshot; for i in 0..rng.random_range(1..=5) { if self.inlays.is_empty() || rng.random() { - let position = snapshot.buffer.random_byte_range(0, rng).start; + let position = snapshot + .buffer + .random_byte_range(MultiBufferOffset(0), rng) + .start; let bias = if rng.random() { Bias::Left } else { @@ -740,9 +767,11 @@ impl InlayMap { impl InlaySnapshot { pub fn to_point(&self, offset: InlayOffset) -> InlayPoint { - let (start, _, item) = self - .transforms - .find::, _>((), &offset, Bias::Right); + let (start, _, item) = self.transforms.find::, _>((), &offset, Bias::Right); let overshoot = offset.0 - start.0.0; match item { Some(Transform::Isomorphic(_)) => { @@ -801,22 +830,24 @@ impl InlaySnapshot { None => self.buffer.max_point(), } } - pub fn to_buffer_offset(&self, offset: InlayOffset) -> usize { - let (start, _, item) = - self.transforms - .find::, _>((), &offset, Bias::Right); + pub fn to_buffer_offset(&self, offset: InlayOffset) -> MultiBufferOffset { + let (start, _, item) = self + .transforms + .find::, _>((), &offset, Bias::Right); match item { Some(Transform::Isomorphic(_)) => { let overshoot = offset - start.0; - start.1 + overshoot.0 + start.1 + overshoot } Some(Transform::Inlay(_)) => start.1, None => self.buffer.len(), } } - pub fn to_inlay_offset(&self, offset: usize) -> InlayOffset { - let mut cursor = self.transforms.cursor::>(()); + pub fn to_inlay_offset(&self, offset: MultiBufferOffset) -> InlayOffset { + let mut cursor = self + .transforms + .cursor::>(()); cursor.seek(&offset, Bias::Left); loop { match cursor.item() { @@ -973,14 +1004,16 @@ impl InlaySnapshot { } } - pub fn text_summary(&self) -> TextSummary { + pub fn text_summary(&self) -> MBTextSummary { self.transforms.summary().output } - pub fn text_summary_for_range(&self, range: Range) -> TextSummary { - let mut summary = TextSummary::default(); + pub fn text_summary_for_range(&self, range: Range) -> MBTextSummary { + let mut summary = MBTextSummary::default(); - let mut cursor = self.transforms.cursor::>(()); + let mut cursor = self + .transforms + .cursor::>(()); cursor.seek(&range.start, Bias::Right); let overshoot = range.start.0 - cursor.start().0.0; @@ -996,7 +1029,12 @@ impl InlaySnapshot { Some(Transform::Inlay(inlay)) => { let suffix_start = overshoot; let suffix_end = cmp::min(cursor.end().0, range.end).0 - cursor.start().0.0; - summary = inlay.text().cursor(suffix_start).summary(suffix_end); + summary = MBTextSummary::from( + inlay + .text() + .cursor(suffix_start) + .summary::(suffix_end), + ); cursor.next(); } None => {} @@ -1014,7 +1052,7 @@ impl InlaySnapshot { let prefix_end = prefix_start + overshoot; summary += self .buffer - .text_summary_for_range::(prefix_start..prefix_end); + .text_summary_for_range::(prefix_start..prefix_end); } Some(Transform::Inlay(inlay)) => { let prefix_end = overshoot; @@ -1070,7 +1108,9 @@ impl InlaySnapshot { language_aware: bool, highlights: Highlights<'a>, ) -> InlayChunks<'a> { - let mut cursor = self.transforms.cursor::>(()); + let mut cursor = self + .transforms + .cursor::>(()); cursor.seek(&range.start, Bias::Right); let buffer_range = self.to_buffer_offset(range.start)..self.to_buffer_offset(range.end); @@ -1122,8 +1162,8 @@ impl InlaySnapshot { } } -fn push_isomorphic(sum_tree: &mut SumTree, summary: TextSummary) { - if summary.len == 0 { +fn push_isomorphic(sum_tree: &mut SumTree, summary: MBTextSummary) { + if summary.len == MultiBufferOffset(0) { return; } @@ -1279,7 +1319,10 @@ mod tests { &[], vec![Inlay::mock_hint( post_inc(&mut next_inlay_id), - buffer.read(cx).snapshot(cx).anchor_after(3), + buffer + .read(cx) + .snapshot(cx) + .anchor_after(MultiBufferOffset(3)), "|123|", )], ); @@ -1335,7 +1378,15 @@ mod tests { // Edits before or after the inlay should not affect it. buffer.update(cx, |buffer, cx| { - buffer.edit([(2..3, "x"), (3..3, "y"), (4..4, "z")], None, cx) + buffer.edit( + [ + (MultiBufferOffset(2)..MultiBufferOffset(3), "x"), + (MultiBufferOffset(3)..MultiBufferOffset(3), "y"), + (MultiBufferOffset(4)..MultiBufferOffset(4), "z"), + ], + None, + cx, + ) }); let (inlay_snapshot, _) = inlay_map.sync( buffer.read(cx).snapshot(cx), @@ -1344,7 +1395,13 @@ mod tests { assert_eq!(inlay_snapshot.text(), "abxy|123|dzefghi"); // An edit surrounding the inlay should invalidate it. - buffer.update(cx, |buffer, cx| buffer.edit([(4..5, "D")], None, cx)); + buffer.update(cx, |buffer, cx| { + buffer.edit( + [(MultiBufferOffset(4)..MultiBufferOffset(5), "D")], + None, + cx, + ) + }); let (inlay_snapshot, _) = inlay_map.sync( buffer.read(cx).snapshot(cx), buffer_edits.consume().into_inner(), @@ -1356,12 +1413,18 @@ mod tests { vec![ Inlay::mock_hint( post_inc(&mut next_inlay_id), - buffer.read(cx).snapshot(cx).anchor_before(3), + buffer + .read(cx) + .snapshot(cx) + .anchor_before(MultiBufferOffset(3)), "|123|", ), Inlay::edit_prediction( post_inc(&mut next_inlay_id), - buffer.read(cx).snapshot(cx).anchor_after(3), + buffer + .read(cx) + .snapshot(cx) + .anchor_after(MultiBufferOffset(3)), "|456|", ), ], @@ -1369,7 +1432,13 @@ mod tests { assert_eq!(inlay_snapshot.text(), "abx|123||456|yDzefghi"); // Edits ending where the inlay starts should not move it if it has a left bias. - buffer.update(cx, |buffer, cx| buffer.edit([(3..3, "JKL")], None, cx)); + buffer.update(cx, |buffer, cx| { + buffer.edit( + [(MultiBufferOffset(3)..MultiBufferOffset(3), "JKL")], + None, + cx, + ) + }); let (inlay_snapshot, _) = inlay_map.sync( buffer.read(cx).snapshot(cx), buffer_edits.consume().into_inner(), @@ -1571,17 +1640,26 @@ mod tests { vec![ Inlay::mock_hint( post_inc(&mut next_inlay_id), - buffer.read(cx).snapshot(cx).anchor_before(0), + buffer + .read(cx) + .snapshot(cx) + .anchor_before(MultiBufferOffset(0)), "|123|\n", ), Inlay::mock_hint( post_inc(&mut next_inlay_id), - buffer.read(cx).snapshot(cx).anchor_before(4), + buffer + .read(cx) + .snapshot(cx) + .anchor_before(MultiBufferOffset(4)), "|456|", ), Inlay::edit_prediction( post_inc(&mut next_inlay_id), - buffer.read(cx).snapshot(cx).anchor_before(7), + buffer + .read(cx) + .snapshot(cx) + .anchor_before(MultiBufferOffset(7)), "\n|567|\n", ), ], @@ -1658,7 +1736,7 @@ mod tests { .collect::>(); let mut expected_text = Rope::from(&buffer_snapshot.text()); for (offset, inlay) in inlays.iter().rev() { - expected_text.replace(*offset..*offset, &inlay.text().to_string()); + expected_text.replace(offset.0..offset.0, &inlay.text().to_string()); } assert_eq!(inlay_snapshot.text(), expected_text.to_string()); @@ -1681,7 +1759,7 @@ mod tests { let mut text_highlights = TextHighlights::default(); let text_highlight_count = rng.random_range(0_usize..10); let mut text_highlight_ranges = (0..text_highlight_count) - .map(|_| buffer_snapshot.random_byte_range(0, &mut rng)) + .map(|_| buffer_snapshot.random_byte_range(MultiBufferOffset(0), &mut rng)) .collect::>(); text_highlight_ranges.sort_by_key(|range| (range.start, Reverse(range.end))); log::info!("highlighting text ranges {text_highlight_ranges:?}"); @@ -1744,12 +1822,13 @@ mod tests { } for _ in 0..5 { - let mut end = rng.random_range(0..=inlay_snapshot.len().0); + let mut end = rng.random_range(0..=inlay_snapshot.len().0.0); end = expected_text.clip_offset(end, Bias::Right); let mut start = rng.random_range(0..=end); start = expected_text.clip_offset(start, Bias::Right); - let range = InlayOffset(start)..InlayOffset(end); + let range = + InlayOffset(MultiBufferOffset(start))..InlayOffset(MultiBufferOffset(end)); log::info!("calling inlay_snapshot.chunks({range:?})"); let actual_text = inlay_snapshot .chunks( @@ -1771,25 +1850,27 @@ mod tests { ); assert_eq!( - inlay_snapshot.text_summary_for_range(InlayOffset(start)..InlayOffset(end)), - expected_text.slice(start..end).summary() + inlay_snapshot.text_summary_for_range( + InlayOffset(MultiBufferOffset(start))..InlayOffset(MultiBufferOffset(end)) + ), + MBTextSummary::from(expected_text.slice(start..end).summary()) ); } for edit in inlay_edits { prev_inlay_text.replace_range( - edit.new.start.0..edit.new.start.0 + edit.old_len().0, - &inlay_snapshot.text()[edit.new.start.0..edit.new.end.0], + edit.new.start.0.0..edit.new.start.0.0 + edit.old_len(), + &inlay_snapshot.text()[edit.new.start.0.0..edit.new.end.0.0], ); } assert_eq!(prev_inlay_text, inlay_snapshot.text()); assert_eq!(expected_text.max_point(), inlay_snapshot.max_point().0); - assert_eq!(expected_text.len(), inlay_snapshot.len().0); + assert_eq!(expected_text.len(), inlay_snapshot.len().0.0); let mut buffer_point = Point::default(); let mut inlay_point = inlay_snapshot.to_inlay_point(buffer_point); - let mut buffer_chars = buffer_snapshot.chars_at(0); + let mut buffer_chars = buffer_snapshot.chars_at(MultiBufferOffset(0)); loop { // Ensure conversion from buffer coordinates to inlay coordinates // is consistent. @@ -1930,7 +2011,7 @@ mod tests { // Get all chunks and verify their bitmaps let chunks = snapshot.chunks( - InlayOffset(0)..InlayOffset(snapshot.len().0), + InlayOffset(MultiBufferOffset(0))..snapshot.len(), false, Highlights::default(), ); @@ -2064,7 +2145,7 @@ mod tests { // Collect chunks - this previously would panic let chunks: Vec<_> = inlay_snapshot .chunks( - InlayOffset(0)..InlayOffset(inlay_snapshot.len().0), + InlayOffset(MultiBufferOffset(0))..inlay_snapshot.len(), false, highlights, ) @@ -2178,7 +2259,7 @@ mod tests { let chunks: Vec<_> = inlay_snapshot .chunks( - InlayOffset(0)..InlayOffset(inlay_snapshot.len().0), + InlayOffset(MultiBufferOffset(0))..inlay_snapshot.len(), false, highlights, ) diff --git a/crates/editor/src/display_map/tab_map.rs b/crates/editor/src/display_map/tab_map.rs index ab3bddf7278605e89b816831059de73873853b32..a8ffbbb177f8806fdeec95fc73f4a1a217b9dc39 100644 --- a/crates/editor/src/display_map/tab_map.rs +++ b/crates/editor/src/display_map/tab_map.rs @@ -648,6 +648,7 @@ mod tests { inlay_map::InlayMap, }, }; + use multi_buffer::MultiBufferOffset; use rand::{Rng, prelude::StdRng}; use util; @@ -1156,7 +1157,7 @@ mod tests { let (_, inlay_snapshot) = InlayMap::new(buffer_snapshot); let (_, fold_snapshot) = FoldMap::new(inlay_snapshot); let chunks = fold_snapshot.chunks( - FoldOffset(0)..fold_snapshot.len(), + FoldOffset(MultiBufferOffset(0))..fold_snapshot.len(), false, Default::default(), ); @@ -1318,7 +1319,7 @@ mod tests { let (_, inlay_snapshot) = InlayMap::new(buffer_snapshot); let (_, fold_snapshot) = FoldMap::new(inlay_snapshot); let chunks = fold_snapshot.chunks( - FoldOffset(0)..fold_snapshot.len(), + FoldOffset(MultiBufferOffset(0))..fold_snapshot.len(), false, Default::default(), ); diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 4b8b6a8c881d51f6c702c7d7cb1301e7a54b5318..9401d5a1e2f36f7b0c00c4c3dec3fc90597290e5 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -64,8 +64,9 @@ pub use items::MAX_TAB_TITLE_LEN; pub use lsp::CompletionContext; pub use lsp_ext::lsp_tasks; pub use multi_buffer::{ - Anchor, AnchorRangeExt, ExcerptId, ExcerptRange, MultiBuffer, MultiBufferSnapshot, PathKey, - RowInfo, ToOffset, ToPoint, + Anchor, AnchorRangeExt, BufferOffset, ExcerptId, ExcerptRange, MBTextSummary, MultiBuffer, + MultiBufferOffset, MultiBufferOffsetUtf16, MultiBufferSnapshot, PathKey, RowInfo, ToOffset, + ToPoint, }; pub use text::Bias; @@ -118,8 +119,7 @@ use language::{ BufferSnapshot, Capability, CharClassifier, CharKind, CharScopeContext, CodeLabel, CursorShape, DiagnosticEntryRef, DiffOptions, EditPredictionsMode, EditPreview, HighlightedText, IndentKind, IndentSize, Language, LanguageRegistry, OffsetRangeExt, OutlineItem, Point, Runnable, - RunnableRange, Selection, SelectionGoal, TextObject, TransactionId, TreeSitterOptions, - WordsQuery, + Selection, SelectionGoal, TextObject, TransactionId, TreeSitterOptions, WordsQuery, language_settings::{ self, LspInsertMode, RewrapBehavior, WordsCompletionMode, all_language_settings, language_settings, @@ -856,9 +856,6 @@ pub struct ResolvedTasks { position: Anchor, } -#[derive(Copy, Clone, Debug, PartialEq, PartialOrd)] -struct BufferOffset(usize); - /// Addons allow storing per-editor state in other crates (e.g. Vim) pub trait Addon: 'static { fn extend_key_context(&self, _: &mut KeyContext, _: &App) {} @@ -1575,7 +1572,7 @@ pub struct ClipboardSelection { // selections, scroll behavior, was newest selection reversed type SelectSyntaxNodeHistoryState = ( - Box<[Selection]>, + Box<[Selection]>, SelectSyntaxNodeScrollBehavior, bool, ); @@ -1930,16 +1927,18 @@ impl Editor { } } project::Event::SnippetEdit(id, snippet_edits) => { - if let Some(buffer) = editor.buffer.read(cx).buffer(*id) { + // todo(lw): Non singletons + if let Some(buffer) = editor.buffer.read(cx).as_singleton() { + let snapshot = buffer.read(cx).snapshot(); let focus_handle = editor.focus_handle(cx); - if focus_handle.is_focused(window) { - let snapshot = buffer.read(cx).snapshot(); + if snapshot.remote_id() == *id && focus_handle.is_focused(window) { for (range, snippet) in snippet_edits { - let editor_range = + let buffer_range = language::range_from_lsp(*range).to_offset(&snapshot); editor .insert_snippet( - &[editor_range], + &[MultiBufferOffset(buffer_range.start) + ..MultiBufferOffset(buffer_range.end)], snippet.clone(), window, cx, @@ -2516,7 +2515,7 @@ impl Editor { } self.selections - .disjoint_in_range::(range.clone(), &self.display_snapshot(cx)) + .disjoint_in_range::(range.clone(), &self.display_snapshot(cx)) .into_iter() .any(|selection| { // This is needed to cover a corner case, if we just check for an existing @@ -3177,7 +3176,9 @@ impl Editor { // Copy selections to primary selection buffer #[cfg(any(target_os = "linux", target_os = "freebsd"))] if local { - let selections = self.selections.all::(&self.display_snapshot(cx)); + let selections = self + .selections + .all::(&self.display_snapshot(cx)); let buffer_handle = self.buffer.read(cx).read(cx); let mut text = String::new(); @@ -3333,8 +3334,8 @@ impl Editor { .iter() .map(|selection| { ( - selection.start.to_offset(&snapshot), - selection.end.to_offset(&snapshot), + selection.start.to_offset(&snapshot).0, + selection.end.to_offset(&snapshot).0, ) }) .collect(); @@ -3376,7 +3377,7 @@ impl Editor { return; }; let inmemory_folds = display_snapshot - .folds_in_range(0..display_snapshot.buffer_snapshot().len()) + .folds_in_range(MultiBufferOffset(0)..display_snapshot.buffer_snapshot().len()) .map(|fold| { fold.range.start.text_anchor.to_point(&snapshot) ..fold.range.end.text_anchor.to_point(&snapshot) @@ -3392,7 +3393,7 @@ impl Editor { let background_executor = cx.background_executor().clone(); let editor_id = cx.entity().entity_id().as_u64() as ItemId; let db_folds = display_snapshot - .folds_in_range(0..display_snapshot.buffer_snapshot().len()) + .folds_in_range(MultiBufferOffset(0)..display_snapshot.buffer_snapshot().len()) .map(|fold| { ( fold.range.start.text_anchor.to_offset(&snapshot), @@ -3649,7 +3650,10 @@ impl Editor { cx: &mut Context, ) { let display_map = self.display_map.update(cx, |map, cx| map.snapshot(cx)); - let tail = self.selections.newest::(&display_map).tail(); + let tail = self + .selections + .newest::(&display_map) + .tail(); let click_count = click_count.max(match self.selections.select_mode() { SelectMode::Character => 1, SelectMode::Word(_) => 2, @@ -3758,7 +3762,7 @@ impl Editor { auto_scroll = true; } _ => { - start = buffer.anchor_before(0); + start = buffer.anchor_before(MultiBufferOffset(0)); end = buffer.anchor_before(buffer.len()); mode = SelectMode::All; auto_scroll = false; @@ -3971,7 +3975,9 @@ impl Editor { fn end_selection(&mut self, window: &mut Window, cx: &mut Context) { self.columnar_selection_state.take(); if let Some(pending_mode) = self.selections.pending_mode() { - let selections = self.selections.all::(&self.display_snapshot(cx)); + let selections = self + .selections + .all::(&self.display_snapshot(cx)); self.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { s.select(selections); s.clear_pending(); @@ -4505,17 +4511,19 @@ impl Editor { let new_anchor_selections = new_selections.iter().map(|e| &e.0); let new_selection_deltas = new_selections.iter().map(|e| e.1); let map = this.display_map.update(cx, |map, cx| map.snapshot(cx)); - let new_selections = - resolve_selections_wrapping_blocks::(new_anchor_selections, &map) - .zip(new_selection_deltas) - .map(|(selection, delta)| Selection { - id: selection.id, - start: selection.start + delta, - end: selection.end + delta, - reversed: selection.reversed, - goal: SelectionGoal::None, - }) - .collect::>(); + let new_selections = resolve_selections_wrapping_blocks::( + new_anchor_selections, + &map, + ) + .zip(new_selection_deltas) + .map(|(selection, delta)| Selection { + id: selection.id, + start: selection.start + delta, + end: selection.end + delta, + reversed: selection.reversed, + goal: SelectionGoal::None, + }) + .collect::>(); let mut i = 0; for (position, delta, selection_id, pair) in new_autoclose_regions { @@ -4651,7 +4659,9 @@ impl Editor { self.hide_mouse_cursor(HideMouseCursorOrigin::TypingAction, cx); self.transact(window, cx, |this, window, cx| { let (edits_with_flags, selection_info): (Vec<_>, Vec<_>) = { - let selections = this.selections.all::(&this.display_snapshot(cx)); + let selections = this + .selections + .all::(&this.display_snapshot(cx)); let multi_buffer = this.buffer.read(cx); let buffer = multi_buffer.snapshot(cx); selections @@ -5151,7 +5161,9 @@ impl Editor { /// If any empty selections is touching the start of its innermost containing autoclose /// region, expand it to select the brackets. fn select_autoclose_pair(&mut self, window: &mut Window, cx: &mut Context) { - let selections = self.selections.all::(&self.display_snapshot(cx)); + let selections = self + .selections + .all::(&self.display_snapshot(cx)); let buffer = self.buffer.read(cx).read(cx); let new_selections = self .selections_with_autoclose_regions(selections, &buffer) @@ -5162,7 +5174,7 @@ impl Editor { if let Some(region) = region { let mut range = region.range.to_offset(&buffer); - if selection.start == range.start && range.start >= region.pair.start.len() { + if selection.start == range.start && range.start.0 >= region.pair.start.len() { range.start -= region.pair.start.len(); if buffer.contains_str_at(range.start, ®ion.pair.start) && buffer.contains_str_at(range.end, ®ion.pair.end) @@ -5193,7 +5205,7 @@ impl Editor { if buffer.contains_str_at(selection.start, &pair.end) { let pair_start_len = pair.start.len(); if buffer.contains_str_at( - selection.start.saturating_sub(pair_start_len), + selection.start.saturating_sub_usize(pair_start_len), &pair.start, ) { selection.start -= pair_start_len; @@ -5330,7 +5342,7 @@ impl Editor { ( multi_buffer.buffer(buffer.remote_id()).unwrap(), buffer.version().clone(), - excerpt_visible_range, + excerpt_visible_range.start.0..excerpt_visible_range.end.0, ), )) } @@ -6013,14 +6025,17 @@ impl Editor { .start .text_anchor .to_offset(buffer) - .saturating_sub(replace_range.start); + .saturating_sub(replace_range.start.0); let lookahead = replace_range .end + .0 .saturating_sub(newest_anchor.end.text_anchor.to_offset(buffer)); let prefix = &old_text[..old_text.len().saturating_sub(lookahead)]; let suffix = &old_text[lookbehind.min(old_text.len())..]; - let selections = self.selections.all::(&self.display_snapshot(cx)); + let selections = self + .selections + .all::(&self.display_snapshot(cx)); let mut ranges = Vec::new(); let mut linked_edits = HashMap::<_, Vec<_>>::default(); @@ -6031,8 +6046,8 @@ impl Editor { let mut range = selection.range(); // if prefix is present, don't duplicate it - if snapshot.contains_str_at(range.start.saturating_sub(lookbehind), prefix) { - range.start = range.start.saturating_sub(lookbehind); + if snapshot.contains_str_at(range.start.saturating_sub_usize(lookbehind), prefix) { + range.start = range.start.saturating_sub_usize(lookbehind); // if suffix is also present, mimic the newest cursor and replace it if selection.id != newest_anchor.id @@ -7008,7 +7023,10 @@ impl Editor { for (buffer_snapshot, search_range, excerpt_id) in buffer_ranges { match_ranges.extend( regex - .search(buffer_snapshot, Some(search_range.clone())) + .search( + buffer_snapshot, + Some(search_range.start.0..search_range.end.0), + ) .await .into_iter() .filter_map(|match_range| { @@ -7055,12 +7073,12 @@ impl Editor { } let task = cx.background_spawn(async move { let new_newlines = snapshot - .buffer_chars_at(0) + .buffer_chars_at(MultiBufferOffset(0)) .filter_map(|(c, i)| { if c == '\n' { Some( snapshot.buffer_snapshot().anchor_after(i) - ..snapshot.buffer_snapshot().anchor_before(i + 1), + ..snapshot.buffer_snapshot().anchor_before(i + 1usize), ) } else { None @@ -7068,7 +7086,7 @@ impl Editor { }) .collect::>(); let existing_newlines = snapshot - .folds_in_range(0..snapshot.buffer_snapshot().len()) + .folds_in_range(MultiBufferOffset(0)..snapshot.buffer_snapshot().len()) .filter_map(|fold| { if fold.placeholder.type_tag == Some(type_id) { Some(fold.range.start..fold.range.end) @@ -7165,7 +7183,7 @@ impl Editor { .is_none_or(|(prev_anchor_range, _)| prev_anchor_range != &query_range) { let multi_buffer_start = multi_buffer_snapshot - .anchor_before(0) + .anchor_before(MultiBufferOffset(0)) .to_point(&multi_buffer_snapshot); let multi_buffer_end = multi_buffer_snapshot .anchor_after(multi_buffer_snapshot.len()) @@ -7597,7 +7615,7 @@ impl Editor { let snapshot = self.buffer.read(cx).snapshot(cx); let cursor_offset = self .selections - .newest::(&self.display_snapshot(cx)) + .newest::(&self.display_snapshot(cx)) .head(); let insertion = edits.iter().find_map(|(range, text)| { let range = range.to_offset(&snapshot); @@ -8589,16 +8607,17 @@ impl Editor { let snapshot = self.buffer.read(cx).snapshot(cx); let offset = self .selections - .newest::(&self.display_snapshot(cx)) + .newest::(&self.display_snapshot(cx)) .head(); - let excerpt = snapshot.excerpt_containing(offset..offset)?; + let mut excerpt = snapshot.excerpt_containing(offset..offset)?; + let offset = excerpt.map_offset_to_buffer(offset); let buffer_id = excerpt.buffer().remote_id(); let layer = excerpt.buffer().syntax_layer_at(offset)?; let mut cursor = layer.node().walk(); - while cursor.goto_first_child_for_byte(offset).is_some() { - if cursor.node().end_byte() == offset { + while cursor.goto_first_child_for_byte(offset.0).is_some() { + if cursor.node().end_byte() == offset.0 { cursor.goto_next_sibling(); } } @@ -8610,7 +8629,7 @@ impl Editor { let symbol_start_row = excerpt.buffer().offset_to_point(node.start_byte()).row; // Check if this node contains our offset - if node_range.start <= offset && node_range.end >= offset { + if node_range.start <= offset.0 && node_range.end >= offset.0 { // If it contains offset, check for task if let Some(tasks) = self.tasks.get(&(buffer_id, symbol_start_row)) { let buffer = self.buffer.read(cx).buffer(buffer_id)?; @@ -9812,7 +9831,7 @@ impl Editor { pub fn insert_snippet( &mut self, - insertion_ranges: &[Range], + insertion_ranges: &[Range], snippet: Snippet, window: &mut Window, cx: &mut Context, @@ -9849,14 +9868,13 @@ impl Editor { .flat_map(|tabstop_range| { let mut delta = 0_isize; insertion_ranges.iter().map(move |insertion_range| { - let insertion_start = insertion_range.start as isize + delta; - delta += - snippet.text.len() as isize - insertion_range.len() as isize; - - let start = ((insertion_start + tabstop_range.start) as usize) - .min(snapshot.len()); - let end = ((insertion_start + tabstop_range.end) as usize) - .min(snapshot.len()); + let insertion_start = insertion_range.start + delta; + delta += snippet.text.len() as isize + - (insertion_range.end - insertion_range.start) as isize; + + let start = + (insertion_start + tabstop_range.start).min(snapshot.len()); + let end = (insertion_start + tabstop_range.end).min(snapshot.len()); snapshot.anchor_before(start)..snapshot.anchor_after(end) }) }) @@ -10506,7 +10524,9 @@ impl Editor { cx, ); }); - let selections = this.selections.all::(&this.display_snapshot(cx)); + let selections = this + .selections + .all::(&this.display_snapshot(cx)); this.change_selections(Default::default(), window, cx, |s| s.select(selections)); }); } @@ -10523,7 +10543,7 @@ impl Editor { self.hide_mouse_cursor(HideMouseCursorOrigin::TypingAction, cx); let selections = self .selections - .all::(&self.display_snapshot(cx)) + .all::(&self.display_snapshot(cx)) .into_iter() .map(|s| s.range()); @@ -10531,7 +10551,9 @@ impl Editor { this.buffer.update(cx, |buffer, cx| { buffer.autoindent_ranges(selections, cx); }); - let selections = this.selections.all::(&this.display_snapshot(cx)); + let selections = this + .selections + .all::(&this.display_snapshot(cx)); this.change_selections(Default::default(), window, cx, |s| s.select(selections)); }); } @@ -10569,7 +10591,7 @@ impl Editor { } else { // If there isn't a line after the range, delete the \n from the line before the // start of the row range - edit_start = edit_start.saturating_sub(1); + edit_start = edit_start.saturating_sub_usize(1); (buffer.len(), rows.start.previous_row()) }; @@ -10820,7 +10842,9 @@ impl Editor { boundaries.into_iter() { let open_offset = start_before.to_offset(&buffer) + start_prefix_len; - let close_offset = end_after.to_offset(&buffer).saturating_sub(end_suffix_len); + let close_offset = end_after + .to_offset(&buffer) + .saturating_sub_usize(end_suffix_len); new_selections.push(open_offset..open_offset); new_selections.push(close_offset..close_offset); } @@ -10850,7 +10874,10 @@ impl Editor { self.hide_mouse_cursor(HideMouseCursorOrigin::TypingAction, cx); let mut buffer_ids = HashSet::default(); let snapshot = self.buffer().read(cx).snapshot(cx); - for selection in self.selections.all::(&self.display_snapshot(cx)) { + for selection in self + .selections + .all::(&self.display_snapshot(cx)) + { buffer_ids.extend(snapshot.buffer_ids_for_range(selection.range())) } @@ -11265,7 +11292,7 @@ impl Editor { .read(cx) .base_text() .as_rope() - .slice(hunk.diff_base_byte_range.clone()); + .slice(hunk.diff_base_byte_range.start.0..hunk.diff_base_byte_range.end.0); let buffer_snapshot = buffer.snapshot(); let buffer_revert_changes = revert_changes.entry(buffer.remote_id()).or_default(); if let Err(i) = buffer_revert_changes.binary_search_by(|probe| { @@ -11705,7 +11732,7 @@ impl Editor { let mut new_selections = Vec::new(); let mut edits = Vec::new(); - let mut selection_adjustment = 0i32; + let mut selection_adjustment = 0isize; for selection in self.selections.all_adjusted(&self.display_snapshot(cx)) { let selection_is_empty = selection.is_empty(); @@ -11721,18 +11748,20 @@ impl Editor { }; let text = buffer.text_for_range(start..end).collect::(); - let old_length = text.len() as i32; + let old_length = text.len() as isize; let text = callback(&text); new_selections.push(Selection { - start: (start as i32 - selection_adjustment) as usize, - end: ((start + text.len()) as i32 - selection_adjustment) as usize, + start: MultiBufferOffset((start.0 as isize - selection_adjustment) as usize), + end: MultiBufferOffset( + ((start.0 + text.len()) as isize - selection_adjustment) as usize, + ), goal: SelectionGoal::None, id: selection.id, reversed: selection.reversed, }); - selection_adjustment += old_length - text.len() as i32; + selection_adjustment += old_length - text.len() as isize; edits.push((start..end, text)); } @@ -12149,7 +12178,7 @@ impl Editor { let text_layout_details = &self.text_layout_details(window); self.transact(window, cx, |this, window, cx| { let edits = this.change_selections(Default::default(), window, cx, |s| { - let mut edits: Vec<(Range, String)> = Default::default(); + let mut edits: Vec<(Range, String)> = Default::default(); s.move_with(|display_map, selection| { if !selection.is_empty() { return; @@ -12160,10 +12189,10 @@ impl Editor { if head.column() == display_map.line_len(head.row()) { transpose_offset = display_map .buffer_snapshot() - .clip_offset(transpose_offset.saturating_sub(1), Bias::Left); + .clip_offset(transpose_offset.saturating_sub_usize(1), Bias::Left); } - if transpose_offset == 0 { + if transpose_offset == MultiBufferOffset(0) { return; } @@ -12178,11 +12207,11 @@ impl Editor { let transpose_start = display_map .buffer_snapshot() - .clip_offset(transpose_offset.saturating_sub(1), Bias::Left); + .clip_offset(transpose_offset.saturating_sub_usize(1), Bias::Left); if edits.last().is_none_or(|e| e.0.end <= transpose_start) { let transpose_end = display_map .buffer_snapshot() - .clip_offset(transpose_offset + 1, Bias::Right); + .clip_offset(transpose_offset + 1usize, Bias::Right); if let Some(ch) = display_map .buffer_snapshot() .chars_at(transpose_start) @@ -12197,7 +12226,9 @@ impl Editor { }); this.buffer .update(cx, |buffer, cx| buffer.edit(edits, None, cx)); - let selections = this.selections.all::(&this.display_snapshot(cx)); + let selections = this + .selections + .all::(&this.display_snapshot(cx)); this.change_selections(Default::default(), window, cx, |s| { s.select(selections); }); @@ -12804,8 +12835,11 @@ impl Editor { self.transact(window, cx, |this, window, cx| { let had_active_edit_prediction = this.has_active_edit_prediction(); let display_map = this.display_snapshot(cx); - let old_selections = this.selections.all::(&display_map); - let cursor_offset = this.selections.last::(&display_map).head(); + let old_selections = this.selections.all::(&display_map); + let cursor_offset = this + .selections + .last::(&display_map) + .head(); if let Some(mut clipboard_selections) = clipboard_selections { let all_selections_were_entire_line = @@ -12890,7 +12924,9 @@ impl Editor { ); }); - let selections = this.selections.all::(&this.display_snapshot(cx)); + let selections = this + .selections + .all::(&this.display_snapshot(cx)); this.change_selections(Default::default(), window, cx, |s| s.select(selections)); } else { let url = url::Url::parse(&clipboard_text).ok(); @@ -12959,7 +12995,9 @@ impl Editor { window: &mut Window, cx: &mut Context, ) { - let selections = self.selections.all::(&self.display_snapshot(cx)); + let selections = self + .selections + .all::(&self.display_snapshot(cx)); if selections.is_empty() { log::warn!("There should always be at least one selection in Zed. This is a bug."); @@ -14212,7 +14250,7 @@ impl Editor { } self.hide_mouse_cursor(HideMouseCursorOrigin::MovementAction, cx); self.change_selections(Default::default(), window, cx, |s| { - s.select_ranges(vec![0..0]); + s.select_ranges(vec![Anchor::min()..Anchor::min()]); }); } @@ -14301,7 +14339,9 @@ impl Editor { pub fn select_to_end(&mut self, _: &SelectToEnd, window: &mut Window, cx: &mut Context) { self.hide_mouse_cursor(HideMouseCursorOrigin::MovementAction, cx); let buffer = self.buffer.read(cx).snapshot(cx); - let mut selection = self.selections.first::(&self.display_snapshot(cx)); + let mut selection = self + .selections + .first::(&self.display_snapshot(cx)); selection.set_head(buffer.len(), SelectionGoal::None); self.change_selections(Default::default(), window, cx, |s| { s.select(vec![selection]); @@ -14310,9 +14350,8 @@ impl Editor { pub fn select_all(&mut self, _: &SelectAll, window: &mut Window, cx: &mut Context) { self.hide_mouse_cursor(HideMouseCursorOrigin::MovementAction, cx); - let end = self.buffer.read(cx).read(cx).len(); self.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges(vec![0..end]); + s.select_ranges(vec![Anchor::min()..Anchor::max()]); }); } @@ -14572,7 +14611,7 @@ impl Editor { fn select_match_ranges( &mut self, - range: Range, + range: Range, reversed: bool, replace_newest: bool, auto_scroll: Option, @@ -14611,7 +14650,7 @@ impl Editor { cx: &mut Context, ) -> Result<()> { let buffer = display_map.buffer_snapshot(); - let mut selections = self.selections.all::(&display_map); + let mut selections = self.selections.all::(&display_map); if let Some(mut select_next_state) = self.select_next_state.take() { let query = &select_next_state.query; if !select_next_state.done { @@ -14621,14 +14660,15 @@ impl Editor { let bytes_after_last_selection = buffer.bytes_in_range(last_selection.end..buffer.len()); - let bytes_before_first_selection = buffer.bytes_in_range(0..first_selection.start); + let bytes_before_first_selection = + buffer.bytes_in_range(MultiBufferOffset(0)..first_selection.start); let query_matches = query .stream_find_iter(bytes_after_last_selection) .map(|result| (last_selection.end, result)) .chain( query .stream_find_iter(bytes_before_first_selection) - .map(|result| (0, result)), + .map(|result| (MultiBufferOffset(0), result)), ); for (start_offset, query_match) in query_matches { @@ -14686,7 +14726,7 @@ impl Editor { } if let Some(next_selection) = selections_iter.peek() { - if next_selection.range().len() == selection.range().len() { + if next_selection.len() == selection.len() { let next_selected_text = buffer .text_for_range(next_selection.range()) .collect::(); @@ -14774,18 +14814,21 @@ impl Editor { let mut new_selections = Vec::new(); - let reversed = self.selections.oldest::(&display_map).reversed; + let reversed = self + .selections + .oldest::(&display_map) + .reversed; let buffer = display_map.buffer_snapshot(); let query_matches = select_next_state .query - .stream_find_iter(buffer.bytes_in_range(0..buffer.len())); + .stream_find_iter(buffer.bytes_in_range(MultiBufferOffset(0)..buffer.len())); for query_match in query_matches.into_iter() { let query_match = query_match.context("query match for select all action")?; // can only fail due to I/O let offset_range = if reversed { - query_match.end()..query_match.start() + MultiBufferOffset(query_match.end())..MultiBufferOffset(query_match.start()) } else { - query_match.start()..query_match.end() + MultiBufferOffset(query_match.start())..MultiBufferOffset(query_match.end()) }; if !select_next_state.wordwise @@ -14838,7 +14881,7 @@ impl Editor { self.hide_mouse_cursor(HideMouseCursorOrigin::MovementAction, cx); let display_map = self.display_map.update(cx, |map, cx| map.snapshot(cx)); let buffer = display_map.buffer_snapshot(); - let mut selections = self.selections.all::(&display_map); + let mut selections = self.selections.all::(&display_map); if let Some(mut select_prev_state) = self.select_prev_state.take() { let query = &select_prev_state.query; if !select_prev_state.done { @@ -14847,7 +14890,7 @@ impl Editor { let mut next_selected_range = None; // When we're iterating matches backwards, the oldest match will actually be the furthest one in the buffer. let bytes_before_last_selection = - buffer.reversed_bytes_in_range(0..last_selection.start); + buffer.reversed_bytes_in_range(MultiBufferOffset(0)..last_selection.start); let bytes_after_first_selection = buffer.reversed_bytes_in_range(first_selection.end..buffer.len()); let query_matches = query @@ -14905,7 +14948,7 @@ impl Editor { } if let Some(next_selection) = selections_iter.peek() { - if next_selection.range().len() == selection.range().len() { + if next_selection.len() == selection.len() { let next_selected_text = buffer .text_for_range(next_selection.range()) .collect::(); @@ -15351,13 +15394,13 @@ impl Editor { let buffer = self.buffer.read(cx).snapshot(cx); let old_selections = self .selections - .all::(&self.display_snapshot(cx)) + .all::(&self.display_snapshot(cx)) .into_boxed_slice(); fn update_selection( - selection: &Selection, + selection: &Selection, buffer_snap: &MultiBufferSnapshot, - ) -> Option> { + ) -> Option> { let cursor = selection.head(); let (_buffer_id, symbols) = buffer_snap.symbols_containing(cursor, None)?; for symbol in symbols.iter().rev() { @@ -15409,7 +15452,7 @@ impl Editor { }; let old_selections: Box<[_]> = self .selections - .all::(&self.display_snapshot(cx)) + .all::(&self.display_snapshot(cx)) .into(); if old_selections.is_empty() { return; @@ -15568,7 +15611,7 @@ impl Editor { let buffer = self.buffer.read(cx).snapshot(cx); let selections = self .selections - .all::(&self.display_snapshot(cx)) + .all::(&self.display_snapshot(cx)) .into_iter() // subtracting the offset requires sorting .sorted_by_key(|i| i.start); @@ -15620,7 +15663,7 @@ impl Editor { let mut selections = vec![]; for (id, parent, text) in full_edits { let start = parent.start - offset; - offset += parent.len() - text.len(); + offset += (parent.end - parent.start) - text.len(); selections.push(Selection { id, start, @@ -15642,7 +15685,7 @@ impl Editor { ) { let old_selections: Box<[_]> = self .selections - .all::(&self.display_snapshot(cx)) + .all::(&self.display_snapshot(cx)) .into(); if old_selections.is_empty() { return; @@ -15658,8 +15701,18 @@ impl Editor { .map(|selection| { let old_range = selection.start..selection.end; - if let Some(node) = buffer.syntax_next_sibling(old_range) { - let new_range = node.byte_range(); + let old_range = + old_range.start.to_offset(&buffer)..old_range.end.to_offset(&buffer); + let excerpt = buffer.excerpt_containing(old_range.clone()); + + if let Some(mut excerpt) = excerpt + && let Some(node) = excerpt + .buffer() + .syntax_next_sibling(excerpt.map_range_to_buffer(old_range)) + { + let new_range = excerpt.map_range_from_buffer( + BufferOffset(node.byte_range().start)..BufferOffset(node.byte_range().end), + ); selected_sibling = true; Selection { id: selection.id, @@ -15694,7 +15747,7 @@ impl Editor { ) { let old_selections: Box<[_]> = self .selections - .all::(&self.display_snapshot(cx)) + .all::(&self.display_snapshot(cx)) .into(); if old_selections.is_empty() { return; @@ -15709,9 +15762,18 @@ impl Editor { .iter() .map(|selection| { let old_range = selection.start..selection.end; + let old_range = + old_range.start.to_offset(&buffer)..old_range.end.to_offset(&buffer); + let excerpt = buffer.excerpt_containing(old_range.clone()); - if let Some(node) = buffer.syntax_prev_sibling(old_range) { - let new_range = node.byte_range(); + if let Some(mut excerpt) = excerpt + && let Some(node) = excerpt + .buffer() + .syntax_prev_sibling(excerpt.map_range_to_buffer(old_range)) + { + let new_range = excerpt.map_range_from_buffer( + BufferOffset(node.byte_range().start)..BufferOffset(node.byte_range().end), + ); selected_sibling = true; Selection { id: selection.id, @@ -15860,7 +15922,7 @@ impl Editor { fn fetch_runnable_ranges( snapshot: &DisplaySnapshot, range: Range, - ) -> Vec { + ) -> Vec<(Range, language::RunnableRange)> { snapshot.buffer_snapshot().runnable_ranges(range).collect() } @@ -15868,12 +15930,12 @@ impl Editor { project: Entity, snapshot: DisplaySnapshot, prefer_lsp: bool, - runnable_ranges: Vec, + runnable_ranges: Vec<(Range, language::RunnableRange)>, cx: AsyncWindowContext, ) -> Task> { cx.spawn(async move |cx| { let mut runnable_rows = Vec::with_capacity(runnable_ranges.len()); - for mut runnable in runnable_ranges { + for (run_range, mut runnable) in runnable_ranges { let Some(tasks) = cx .update(|_, cx| Self::templates_with_tags(&project, &mut runnable.runnable, cx)) .ok() @@ -15891,10 +15953,7 @@ impl Editor { continue; } - let point = runnable - .run_range - .start - .to_point(&snapshot.buffer_snapshot()); + let point = run_range.start.to_point(&snapshot.buffer_snapshot()); let Some(row) = snapshot .buffer_snapshot() .buffer_line_for_row(MultiBufferRow(point.row)) @@ -15909,9 +15968,7 @@ impl Editor { (runnable.buffer_id, row), RunnableTasks { templates: tasks, - offset: snapshot - .buffer_snapshot() - .anchor_before(runnable.run_range.start), + offset: snapshot.buffer_snapshot().anchor_before(run_range.start), context_range, column: point.column, extra_variables: runnable.extra_captures, @@ -15998,7 +16055,7 @@ impl Editor { let mut best_destination = None; for (open, close) in enclosing_bracket_ranges { let close = close.to_inclusive(); - let length = close.end() - open.start; + let length = *close.end() - open.start; let inside = selection.start >= open.end && selection.end <= *close.start(); let in_bracket_range = open.to_inclusive().contains(&selection.head()) || close.contains(&selection.head()); @@ -16257,7 +16314,9 @@ impl Editor { cx: &mut Context, ) { let buffer = self.buffer.read(cx).snapshot(cx); - let selection = self.selections.newest::(&self.display_snapshot(cx)); + let selection = self + .selections + .newest::(&self.display_snapshot(cx)); let mut active_group_id = None; if let ActiveDiagnostic::Group(active_group) = &self.active_diagnostics @@ -16268,8 +16327,8 @@ impl Editor { fn filtered<'a>( severity: GoToDiagnosticSeverityFilter, - diagnostics: impl Iterator>, - ) -> impl Iterator> { + diagnostics: impl Iterator>, + ) -> impl Iterator> { diagnostics .filter(move |entry| severity.matches(entry.diagnostic.severity)) .filter(|entry| entry.range.start != entry.range.end) @@ -16279,7 +16338,7 @@ impl Editor { let before = filtered( severity, buffer - .diagnostics_in_range(0..selection.start) + .diagnostics_in_range(MultiBufferOffset(0)..selection.start) .filter(|entry| entry.range.start <= selection.start), ); let after = filtered( @@ -16289,7 +16348,7 @@ impl Editor { .filter(|entry| entry.range.start >= selection.start), ); - let mut found: Option> = None; + let mut found: Option> = None; if direction == Direction::Prev { 'outer: for prev_diagnostics in [before.collect::>(), after.collect::>()] { @@ -16681,7 +16740,7 @@ impl Editor { }; let head = self .selections - .newest::(&self.display_snapshot(cx)) + .newest::(&self.display_snapshot(cx)) .head(); let buffer = self.buffer.read(cx); let Some((buffer, head)) = buffer.text_anchor_for_position(head, cx) else { @@ -17146,7 +17205,9 @@ impl Editor { window: &mut Window, cx: &mut Context, ) -> Option>> { - let selection = self.selections.newest::(&self.display_snapshot(cx)); + let selection = self + .selections + .newest::(&self.display_snapshot(cx)); let multi_buffer = self.buffer.read(cx); let head = selection.head(); @@ -17420,7 +17481,8 @@ impl Editor { this.take_rename(false, window, cx); let buffer = this.buffer.read(cx).read(cx); let cursor_offset = selection.head().to_offset(&buffer); - let rename_start = cursor_offset.saturating_sub(cursor_offset_in_rename_range); + let rename_start = + cursor_offset.saturating_sub_usize(cursor_offset_in_rename_range); let rename_end = rename_start + rename_buffer_range.len(); let range = buffer.anchor_before(rename_start)..buffer.anchor_after(rename_end); let mut old_highlight_id = None; @@ -17442,8 +17504,16 @@ impl Editor { let rename_editor = cx.new(|cx| { let mut editor = Editor::single_line(window, cx); editor.buffer.update(cx, |buffer, cx| { - buffer.edit([(0..0, old_name.clone())], None, cx) + buffer.edit( + [(MultiBufferOffset(0)..MultiBufferOffset(0), old_name.clone())], + None, + cx, + ) }); + let cursor_offset_in_rename_range = + MultiBufferOffset(cursor_offset_in_rename_range); + let cursor_offset_in_rename_range_end = + MultiBufferOffset(cursor_offset_in_rename_range_end); let rename_selection_range = match cursor_offset_in_rename_range .cmp(&cursor_offset_in_rename_range_end) { @@ -17458,7 +17528,7 @@ impl Editor { cursor_offset_in_rename_range_end..cursor_offset_in_rename_range } }; - if rename_selection_range.end > old_name.len() { + if rename_selection_range.end.0 > old_name.len() { editor.select_all(&SelectAll, window, cx); } else { editor.change_selections(Default::default(), window, cx, |s| { @@ -17623,7 +17693,7 @@ impl Editor { let cursor_in_rename_editor = rename.editor.update(cx, |editor, cx| { editor .selections - .newest::(&editor.display_snapshot(cx)) + .newest::(&editor.display_snapshot(cx)) .head() }); @@ -17922,7 +17992,7 @@ impl Editor { let primary_range_start = active_diagnostics.active_range.start.to_offset(&buffer); let primary_range_end = active_diagnostics.active_range.end.to_offset(&buffer); let is_valid = buffer - .diagnostics_in_range::(primary_range_start..primary_range_end) + .diagnostics_in_range::(primary_range_start..primary_range_end) .any(|entry| { entry.diagnostic.is_primary && !entry.range.is_empty() @@ -17954,7 +18024,7 @@ impl Editor { fn activate_diagnostics( &mut self, buffer_id: BufferId, - diagnostic: DiagnosticEntryRef<'_, usize>, + diagnostic: DiagnosticEntryRef<'_, MultiBufferOffset>, window: &mut Window, cx: &mut Context, ) { @@ -18146,7 +18216,9 @@ impl Editor { let new_inline_diagnostics = cx .background_spawn(async move { let mut inline_diagnostics = Vec::<(Anchor, InlineDiagnostic)>::new(); - for diagnostic_entry in snapshot.diagnostics_in_range(0..snapshot.len()) { + for diagnostic_entry in + snapshot.diagnostics_in_range(MultiBufferOffset(0)..snapshot.len()) + { let message = diagnostic_entry .diagnostic .message @@ -18525,7 +18597,7 @@ impl Editor { if self.buffer.read(cx).is_singleton() { let display_map = self.display_map.update(cx, |map, cx| map.snapshot(cx)); let has_folds = display_map - .folds_in_range(0..display_map.buffer_snapshot().len()) + .folds_in_range(MultiBufferOffset(0)..display_map.buffer_snapshot().len()) .next() .is_some(); @@ -18728,7 +18800,10 @@ impl Editor { let snapshot = self.buffer.read(cx).snapshot(cx); let ranges = snapshot - .text_object_ranges(0..snapshot.len(), TreeSitterOptions::default()) + .text_object_ranges( + MultiBufferOffset(0)..snapshot.len(), + TreeSitterOptions::default(), + ) .filter_map(|(range, obj)| (obj == TextObject::InsideFunction).then_some(range)) .collect::>(); @@ -18885,7 +18960,12 @@ impl Editor { ) { if self.buffer.read(cx).is_singleton() { let display_map = self.display_map.update(cx, |map, cx| map.snapshot(cx)); - self.unfold_ranges(&[0..display_map.buffer_snapshot().len()], true, true, cx); + self.unfold_ranges( + &[MultiBufferOffset(0)..display_map.buffer_snapshot().len()], + true, + true, + cx, + ); } else { self.toggle_fold_multiple_buffers = cx.spawn(async move |editor, cx| { editor @@ -19310,7 +19390,8 @@ impl Editor { &hunks .map(|hunk| buffer_diff::DiffHunk { buffer_range: hunk.buffer_range, - diff_base_byte_range: hunk.diff_base_byte_range, + diff_base_byte_range: hunk.diff_base_byte_range.start.0 + ..hunk.diff_base_byte_range.end.0, secondary_status: hunk.secondary_status, range: Point::zero()..Point::zero(), // unused }) @@ -20882,7 +20963,7 @@ impl Editor { ) -> Vec<(Range, Hsla)> { let snapshot = self.snapshot(window, cx); let buffer = &snapshot.buffer_snapshot(); - let start = buffer.anchor_before(0); + let start = buffer.anchor_before(MultiBufferOffset(0)); let end = buffer.anchor_after(buffer.len()); self.sorted_background_highlights_in_range(start..end, &snapshot, cx.theme()) } @@ -21562,7 +21643,7 @@ impl Editor { new_selections_by_buffer.insert( buffer, ( - vec![jump_to_offset..jump_to_offset], + vec![BufferOffset(jump_to_offset)..BufferOffset(jump_to_offset)], Some(*line_offset_from_top), ), ); @@ -21581,11 +21662,13 @@ impl Editor { .entry(buffer) .or_insert((Vec::new(), Some(*line_offset_from_top))) .0 - .push(buffer_offset..buffer_offset) + .push(BufferOffset(buffer_offset)..BufferOffset(buffer_offset)) } } None => { - let selections = self.selections.all::(&self.display_snapshot(cx)); + let selections = self + .selections + .all::(&self.display_snapshot(cx)); let multi_buffer = self.buffer.read(cx); for selection in selections { for (snapshot, range, _, anchor) in multi_buffer @@ -21601,7 +21684,7 @@ impl Editor { &anchor.text_anchor, &buffer_handle.read(cx).snapshot(), ); - let range = offset..offset; + let range = BufferOffset(offset)..BufferOffset(offset); new_selections_by_buffer .entry(buffer_handle) .or_insert((Vec::new(), None)) @@ -21689,7 +21772,10 @@ impl Editor { window, cx, |s| { - s.select_ranges(ranges); + s.select_ranges(ranges.into_iter().map(|range| { + // we checked that the editor is a singleton editor so the offsets are valid + MultiBufferOffset(range.start.0)..MultiBufferOffset(range.end.0) + })); }, ); editor.nav_history = nav_history; @@ -21705,7 +21791,7 @@ impl Editor { file.is_none_or(|file| project::File::from_dyn(Some(file)).is_some()) } - fn marked_text_ranges(&self, cx: &App) -> Option>> { + fn marked_text_ranges(&self, cx: &App) -> Option>> { let snapshot = self.buffer.read(cx).read(cx); let (_, ranges) = self.text_highlights::(cx)?; Some( @@ -21720,25 +21806,25 @@ impl Editor { fn selection_replacement_ranges( &self, - range: Range, + range: Range, cx: &mut App, - ) -> Vec> { + ) -> Vec> { let selections = self .selections - .all::(&self.display_snapshot(cx)); + .all::(&self.display_snapshot(cx)); let newest_selection = selections .iter() .max_by_key(|selection| selection.id) .unwrap(); - let start_delta = range.start.0 as isize - newest_selection.start.0 as isize; - let end_delta = range.end.0 as isize - newest_selection.end.0 as isize; + let start_delta = range.start.0.0 as isize - newest_selection.start.0.0 as isize; + let end_delta = range.end.0.0 as isize - newest_selection.end.0.0 as isize; let snapshot = self.buffer.read(cx).read(cx); selections .into_iter() .map(|mut selection| { - selection.start.0 = - (selection.start.0 as isize).saturating_add(start_delta) as usize; - selection.end.0 = (selection.end.0 as isize).saturating_add(end_delta) as usize; + selection.start.0.0 = + (selection.start.0.0 as isize).saturating_add(start_delta) as usize; + selection.end.0.0 = (selection.end.0.0 as isize).saturating_add(end_delta) as usize; snapshot.clip_offset_utf16(selection.start, Bias::Left) ..snapshot.clip_offset_utf16(selection.end, Bias::Right) }) @@ -21831,12 +21917,16 @@ impl Editor { None } else { Some( - snapshot.offset_utf16_to_offset(OffsetUtf16(selection.range.start)) - ..snapshot.offset_utf16_to_offset(OffsetUtf16(selection.range.end)), + snapshot.offset_utf16_to_offset(MultiBufferOffsetUtf16(OffsetUtf16( + selection.range.start, + ))) + ..snapshot.offset_utf16_to_offset(MultiBufferOffsetUtf16(OffsetUtf16( + selection.range.end, + ))), ) } }) - .unwrap_or_else(|| 0..snapshot.len()); + .unwrap_or_else(|| MultiBufferOffset(0)..snapshot.len()); let chunks = snapshot.chunks(range, true); let mut lines = Vec::new(); @@ -21914,21 +22004,23 @@ impl Editor { if let Some(relative_utf16_range) = relative_utf16_range { let selections = self .selections - .all::(&self.display_snapshot(cx)); + .all::(&self.display_snapshot(cx)); self.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { let new_ranges = selections.into_iter().map(|range| { - let start = OffsetUtf16( + let start = MultiBufferOffsetUtf16(OffsetUtf16( range .head() .0 + .0 .saturating_add_signed(relative_utf16_range.start), - ); - let end = OffsetUtf16( + )); + let end = MultiBufferOffsetUtf16(OffsetUtf16( range .head() .0 + .0 .saturating_add_signed(relative_utf16_range.end), - ); + )); start..end }); s.select_ranges(new_ranges); @@ -22039,7 +22131,9 @@ impl Editor { } let transaction = self.transact(window, cx, |this, window, cx| { - let selections = this.selections.all::(&this.display_snapshot(cx)); + let selections = this + .selections + .all::(&this.display_snapshot(cx)); let edits = selections .iter() .map(|selection| (selection.end..selection.end, pending.clone())); @@ -22058,7 +22152,7 @@ impl Editor { let snapshot = self.snapshot(window, cx); let ranges = self .selections - .all::(&snapshot.display_snapshot) + .all::(&snapshot.display_snapshot) .into_iter() .map(|selection| { snapshot.buffer_snapshot().anchor_after(selection.end) @@ -22310,8 +22404,8 @@ impl Editor { folds .into_iter() .map(|(start, end)| { - snapshot.clip_offset(start, Bias::Left) - ..snapshot.clip_offset(end, Bias::Right) + snapshot.clip_offset(MultiBufferOffset(start), Bias::Left) + ..snapshot.clip_offset(MultiBufferOffset(end), Bias::Right) }) .collect(), false, @@ -22328,8 +22422,8 @@ impl Editor { self.selection_history.mode = SelectionHistoryMode::Skipping; self.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { s.select_ranges(selections.into_iter().map(|(start, end)| { - snapshot.clip_offset(start, Bias::Left) - ..snapshot.clip_offset(end, Bias::Right) + snapshot.clip_offset(MultiBufferOffset(start), Bias::Left) + ..snapshot.clip_offset(MultiBufferOffset(end), Bias::Right) })); }); self.selection_history.mode = SelectionHistoryMode::Normal; @@ -22388,10 +22482,10 @@ impl Editor { fn edit_for_markdown_paste<'a>( buffer: &MultiBufferSnapshot, - range: Range, + range: Range, to_insert: &'a str, url: Option, -) -> (Range, Cow<'a, str>) { +) -> (Range, Cow<'a, str>) { if url.is_none() { return (range, Cow::Borrowed(to_insert)); }; @@ -22541,22 +22635,23 @@ fn process_completion_for_edit( range_to_replace.end = *cursor_position; } + let replace_range = range_to_replace.to_offset(buffer); CompletionEdit { new_text, - replace_range: range_to_replace.to_offset(buffer), + replace_range: BufferOffset(replace_range.start)..BufferOffset(replace_range.end), snippet, } } struct CompletionEdit { new_text: String, - replace_range: Range, + replace_range: Range, snippet: Option, } fn insert_extra_newline_brackets( buffer: &MultiBufferSnapshot, - range: Range, + range: Range, language: &language::LanguageScope, ) -> bool { let leading_whitespace_len = buffer @@ -22578,11 +22673,17 @@ fn insert_extra_newline_brackets( enabled && pair.newline && buffer.contains_str_at(range.end, pair_end) - && buffer.contains_str_at(range.start.saturating_sub(pair_start.len()), pair_start) + && buffer.contains_str_at( + range.start.saturating_sub_usize(pair_start.len()), + pair_start, + ) }) } -fn insert_extra_newline_tree_sitter(buffer: &MultiBufferSnapshot, range: Range) -> bool { +fn insert_extra_newline_tree_sitter( + buffer: &MultiBufferSnapshot, + range: Range, +) -> bool { let (buffer, range) = match buffer.range_to_buffer_ranges(range).as_slice() { [(buffer, range, _)] => (*buffer, range.clone()), _ => return false, @@ -22591,9 +22692,9 @@ fn insert_extra_newline_tree_sitter(buffer: &MultiBufferSnapshot, range: Range = None; for pair in buffer - .all_bracket_ranges(range.clone()) + .all_bracket_ranges(range.start.0..range.end.0) .filter(move |pair| { - pair.open_range.start <= range.start && pair.close_range.end >= range.end + pair.open_range.start <= range.start.0 && pair.close_range.end >= range.end.0 }) { let len = pair.close_range.end - pair.open_range.start; @@ -22615,8 +22716,8 @@ fn insert_extra_newline_tree_sitter(buffer: &MultiBufferSnapshot, range: Range, ) -> Option { let snapshot = self.buffer.read(cx).read(cx); - let start = snapshot.clip_offset_utf16(OffsetUtf16(range_utf16.start), Bias::Left); - let end = snapshot.clip_offset_utf16(OffsetUtf16(range_utf16.end), Bias::Right); - if (start.0..end.0) != range_utf16 { - adjusted_range.replace(start.0..end.0); + let start = snapshot.clip_offset_utf16( + MultiBufferOffsetUtf16(OffsetUtf16(range_utf16.start)), + Bias::Left, + ); + let end = snapshot.clip_offset_utf16( + MultiBufferOffsetUtf16(OffsetUtf16(range_utf16.end)), + Bias::Right, + ); + if (start.0.0..end.0.0) != range_utf16 { + adjusted_range.replace(start.0.0..end.0.0); } Some(snapshot.text_for_range(start..end).collect()) } @@ -24242,11 +24350,11 @@ impl EntityInputHandler for Editor { let selection = self .selections - .newest::(&self.display_snapshot(cx)); + .newest::(&self.display_snapshot(cx)); let range = selection.range(); Some(UTF16Selection { - range: range.start.0..range.end.0, + range: range.start.0.0..range.end.0.0, reversed: selection.reversed, }) } @@ -24254,7 +24362,7 @@ impl EntityInputHandler for Editor { fn marked_text_range(&self, _: &mut Window, cx: &mut Context) -> Option> { let snapshot = self.buffer.read(cx).read(cx); let range = self.text_highlights::(cx)?.1.first()?; - Some(range.start.to_offset_utf16(&snapshot).0..range.end.to_offset_utf16(&snapshot).0) + Some(range.start.to_offset_utf16(&snapshot).0.0..range.end.to_offset_utf16(&snapshot).0.0) } fn unmark_text(&mut self, _: &mut Window, cx: &mut Context) { @@ -24276,7 +24384,8 @@ impl EntityInputHandler for Editor { self.transact(window, cx, |this, window, cx| { let new_selected_ranges = if let Some(range_utf16) = range_utf16 { - let range_utf16 = OffsetUtf16(range_utf16.start)..OffsetUtf16(range_utf16.end); + let range_utf16 = MultiBufferOffsetUtf16(OffsetUtf16(range_utf16.start)) + ..MultiBufferOffsetUtf16(OffsetUtf16(range_utf16.end)); Some(this.selection_replacement_ranges(range_utf16, cx)) } else { this.marked_text_ranges(cx) @@ -24285,14 +24394,14 @@ impl EntityInputHandler for Editor { let range_to_replace = new_selected_ranges.as_ref().and_then(|ranges_to_replace| { let newest_selection_id = this.selections.newest_anchor().id; this.selections - .all::(&this.display_snapshot(cx)) + .all::(&this.display_snapshot(cx)) .iter() .zip(ranges_to_replace.iter()) .find_map(|(selection, range)| { if selection.id == newest_selection_id { Some( - (range.start.0 as isize - selection.head().0 as isize) - ..(range.end.0 as isize - selection.head().0 as isize), + (range.start.0.0 as isize - selection.head().0.0 as isize) + ..(range.end.0.0 as isize - selection.head().0.0 as isize), ) } else { None @@ -24341,8 +24450,8 @@ impl EntityInputHandler for Editor { let snapshot = this.buffer.read(cx).read(cx); if let Some(relative_range_utf16) = range_utf16.as_ref() { for marked_range in &mut marked_ranges { - marked_range.end.0 = marked_range.start.0 + relative_range_utf16.end; - marked_range.start.0 += relative_range_utf16.start; + marked_range.end = marked_range.start + relative_range_utf16.end; + marked_range.start += relative_range_utf16.start; marked_range.start = snapshot.clip_offset_utf16(marked_range.start, Bias::Left); marked_range.end = @@ -24351,7 +24460,8 @@ impl EntityInputHandler for Editor { } Some(marked_ranges) } else if let Some(range_utf16) = range_utf16 { - let range_utf16 = OffsetUtf16(range_utf16.start)..OffsetUtf16(range_utf16.end); + let range_utf16 = MultiBufferOffsetUtf16(OffsetUtf16(range_utf16.start)) + ..MultiBufferOffsetUtf16(OffsetUtf16(range_utf16.end)); Some(this.selection_replacement_ranges(range_utf16, cx)) } else { None @@ -24360,14 +24470,14 @@ impl EntityInputHandler for Editor { let range_to_replace = ranges_to_replace.as_ref().and_then(|ranges_to_replace| { let newest_selection_id = this.selections.newest_anchor().id; this.selections - .all::(&this.display_snapshot(cx)) + .all::(&this.display_snapshot(cx)) .iter() .zip(ranges_to_replace.iter()) .find_map(|(selection, range)| { if selection.id == newest_selection_id { Some( - (range.start.0 as isize - selection.head().0 as isize) - ..(range.end.0 as isize - selection.head().0 as isize), + (range.start.0.0 as isize - selection.head().0.0 as isize) + ..(range.end.0.0 as isize - selection.head().0.0 as isize), ) } else { None @@ -24429,8 +24539,12 @@ impl EntityInputHandler for Editor { .into_iter() .map(|marked_range| { let insertion_start = marked_range.start.to_offset_utf16(&snapshot).0; - let new_start = OffsetUtf16(new_selected_range.start + insertion_start); - let new_end = OffsetUtf16(new_selected_range.end + insertion_start); + let new_start = MultiBufferOffsetUtf16(OffsetUtf16( + insertion_start.0 + new_selected_range.start, + )); + let new_end = MultiBufferOffsetUtf16(OffsetUtf16( + insertion_start.0 + new_selected_range.end, + )); snapshot.clip_offset_utf16(new_start, Bias::Left) ..snapshot.clip_offset_utf16(new_end, Bias::Right) }) @@ -24473,7 +24587,8 @@ impl EntityInputHandler for Editor { let scroll_position = snapshot.scroll_position(); let scroll_left = scroll_position.x * ScrollOffset::from(em_advance); - let start = OffsetUtf16(range_utf16.start).to_display_point(&snapshot); + let start = + MultiBufferOffsetUtf16(OffsetUtf16(range_utf16.start)).to_display_point(&snapshot); let x = Pixels::from( ScrollOffset::from( snapshot.x_for_display_point(start, &text_layout_details) @@ -24503,7 +24618,7 @@ impl EntityInputHandler for Editor { .snapshot .display_point_to_anchor(display_point, Bias::Left); let utf16_offset = anchor.to_offset_utf16(&position_map.snapshot.buffer_snapshot()); - Some(utf16_offset.0) + Some(utf16_offset.0.0) } fn accepts_text_input(&self, _window: &mut Window, _cx: &mut Context) -> bool { diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index 20ad9ca076ed4ee68679bd351386ddc49f18491a..d46293157ddb1bd6500c1b423279401c8195ea1f 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -35,7 +35,7 @@ use language_settings::Formatter; use languages::markdown_lang; use languages::rust_lang; use lsp::CompletionParams; -use multi_buffer::{IndentGuide, PathKey}; +use multi_buffer::{IndentGuide, MultiBufferOffset, MultiBufferOffsetUtf16, PathKey}; use parking_lot::Mutex; use pretty_assertions::{assert_eq, assert_ne}; use project::{ @@ -197,7 +197,7 @@ fn test_edit_events(cx: &mut TestAppContext) { // No event is emitted when the mutation is a no-op. _ = editor2.update(cx, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([0..0]) + s.select_ranges([MultiBufferOffset(0)..MultiBufferOffset(0)]) }); editor.backspace(&Backspace, window, cx); @@ -222,7 +222,7 @@ fn test_undo_redo_with_selection_restoration(cx: &mut TestAppContext) { _ = editor.update(cx, |editor, window, cx| { editor.start_transaction_at(now, window, cx); editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([2..4]) + s.select_ranges([MultiBufferOffset(2)..MultiBufferOffset(4)]) }); editor.insert("cd", window, cx); @@ -230,38 +230,46 @@ fn test_undo_redo_with_selection_restoration(cx: &mut TestAppContext) { assert_eq!(editor.text(cx), "12cd56"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - vec![4..4] + vec![MultiBufferOffset(4)..MultiBufferOffset(4)] ); editor.start_transaction_at(now, window, cx); editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([4..5]) + s.select_ranges([MultiBufferOffset(4)..MultiBufferOffset(5)]) }); editor.insert("e", window, cx); editor.end_transaction_at(now, cx); assert_eq!(editor.text(cx), "12cde6"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - vec![5..5] + vec![MultiBufferOffset(5)..MultiBufferOffset(5)] ); now += group_interval + Duration::from_millis(1); editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([2..2]) + s.select_ranges([MultiBufferOffset(2)..MultiBufferOffset(2)]) }); // Simulate an edit in another editor buffer.update(cx, |buffer, cx| { buffer.start_transaction_at(now, cx); - buffer.edit([(0..1, "a")], None, cx); - buffer.edit([(1..1, "b")], None, cx); + buffer.edit( + [(MultiBufferOffset(0)..MultiBufferOffset(1), "a")], + None, + cx, + ); + buffer.edit( + [(MultiBufferOffset(1)..MultiBufferOffset(1), "b")], + None, + cx, + ); buffer.end_transaction_at(now, cx); }); assert_eq!(editor.text(cx), "ab2cde6"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - vec![3..3] + vec![MultiBufferOffset(3)..MultiBufferOffset(3)] ); // Last transaction happened past the group interval in a different editor. @@ -270,7 +278,7 @@ fn test_undo_redo_with_selection_restoration(cx: &mut TestAppContext) { assert_eq!(editor.text(cx), "12cde6"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - vec![2..2] + vec![MultiBufferOffset(2)..MultiBufferOffset(2)] ); // First two transactions happened within the group interval in this editor. @@ -280,7 +288,7 @@ fn test_undo_redo_with_selection_restoration(cx: &mut TestAppContext) { assert_eq!(editor.text(cx), "123456"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - vec![0..0] + vec![MultiBufferOffset(0)..MultiBufferOffset(0)] ); // Redo the first two transactions together. @@ -288,7 +296,7 @@ fn test_undo_redo_with_selection_restoration(cx: &mut TestAppContext) { assert_eq!(editor.text(cx), "12cde6"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - vec![5..5] + vec![MultiBufferOffset(5)..MultiBufferOffset(5)] ); // Redo the last transaction on its own. @@ -296,7 +304,7 @@ fn test_undo_redo_with_selection_restoration(cx: &mut TestAppContext) { assert_eq!(editor.text(cx), "ab2cde6"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - vec![6..6] + vec![MultiBufferOffset(6)..MultiBufferOffset(6)] ); // Test empty transactions. @@ -329,7 +337,9 @@ fn test_ime_composition(cx: &mut TestAppContext) { assert_eq!(editor.text(cx), "äbcde"); assert_eq!( editor.marked_text_ranges(cx), - Some(vec![OffsetUtf16(0)..OffsetUtf16(1)]) + Some(vec![ + MultiBufferOffsetUtf16(OffsetUtf16(0))..MultiBufferOffsetUtf16(OffsetUtf16(1)) + ]) ); // Finalize IME composition. @@ -349,7 +359,9 @@ fn test_ime_composition(cx: &mut TestAppContext) { editor.replace_and_mark_text_in_range(Some(0..1), "à", None, window, cx); assert_eq!( editor.marked_text_ranges(cx), - Some(vec![OffsetUtf16(0)..OffsetUtf16(1)]) + Some(vec![ + MultiBufferOffsetUtf16(OffsetUtf16(0))..MultiBufferOffsetUtf16(OffsetUtf16(1)) + ]) ); // Undoing during an IME composition cancels it. @@ -362,7 +374,9 @@ fn test_ime_composition(cx: &mut TestAppContext) { assert_eq!(editor.text(cx), "ābcdè"); assert_eq!( editor.marked_text_ranges(cx), - Some(vec![OffsetUtf16(4)..OffsetUtf16(5)]) + Some(vec![ + MultiBufferOffsetUtf16(OffsetUtf16(4))..MultiBufferOffsetUtf16(OffsetUtf16(5)) + ]) ); // Finalize IME composition with an invalid replacement range, ensuring it gets clipped. @@ -373,9 +387,9 @@ fn test_ime_composition(cx: &mut TestAppContext) { // Start a new IME composition with multiple cursors. editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { s.select_ranges([ - OffsetUtf16(1)..OffsetUtf16(1), - OffsetUtf16(3)..OffsetUtf16(3), - OffsetUtf16(5)..OffsetUtf16(5), + MultiBufferOffsetUtf16(OffsetUtf16(1))..MultiBufferOffsetUtf16(OffsetUtf16(1)), + MultiBufferOffsetUtf16(OffsetUtf16(3))..MultiBufferOffsetUtf16(OffsetUtf16(3)), + MultiBufferOffsetUtf16(OffsetUtf16(5))..MultiBufferOffsetUtf16(OffsetUtf16(5)), ]) }); editor.replace_and_mark_text_in_range(Some(4..5), "XYZ", None, window, cx); @@ -383,9 +397,9 @@ fn test_ime_composition(cx: &mut TestAppContext) { assert_eq!( editor.marked_text_ranges(cx), Some(vec![ - OffsetUtf16(0)..OffsetUtf16(3), - OffsetUtf16(4)..OffsetUtf16(7), - OffsetUtf16(8)..OffsetUtf16(11) + MultiBufferOffsetUtf16(OffsetUtf16(0))..MultiBufferOffsetUtf16(OffsetUtf16(3)), + MultiBufferOffsetUtf16(OffsetUtf16(4))..MultiBufferOffsetUtf16(OffsetUtf16(7)), + MultiBufferOffsetUtf16(OffsetUtf16(8))..MultiBufferOffsetUtf16(OffsetUtf16(11)) ]) ); @@ -395,9 +409,9 @@ fn test_ime_composition(cx: &mut TestAppContext) { assert_eq!( editor.marked_text_ranges(cx), Some(vec![ - OffsetUtf16(1)..OffsetUtf16(2), - OffsetUtf16(5)..OffsetUtf16(6), - OffsetUtf16(9)..OffsetUtf16(10) + MultiBufferOffsetUtf16(OffsetUtf16(1))..MultiBufferOffsetUtf16(OffsetUtf16(2)), + MultiBufferOffsetUtf16(OffsetUtf16(5))..MultiBufferOffsetUtf16(OffsetUtf16(6)), + MultiBufferOffsetUtf16(OffsetUtf16(9))..MultiBufferOffsetUtf16(OffsetUtf16(10)) ]) ); @@ -757,7 +771,11 @@ fn test_clone(cx: &mut TestAppContext) { _ = editor.update(cx, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges(selection_ranges.clone()) + s.select_ranges( + selection_ranges + .iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)), + ) }); editor.fold_creases( vec![ @@ -794,9 +812,11 @@ fn test_clone(cx: &mut TestAppContext) { ); assert_eq!( cloned_snapshot - .folds_in_range(0..text.len()) + .folds_in_range(MultiBufferOffset(0)..MultiBufferOffset(text.len())) + .collect::>(), + snapshot + .folds_in_range(MultiBufferOffset(0)..MultiBufferOffset(text.len())) .collect::>(), - snapshot.folds_in_range(0..text.len()).collect::>(), ); assert_set_eq!( cloned_editor @@ -1418,7 +1438,11 @@ fn test_fold_at_level(cx: &mut TestAppContext) { ); editor.change_selections(SelectionEffects::default(), window, cx, |s| { - s.select_ranges(positions) + s.select_ranges( + positions + .iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)), + ) }); editor.fold_at_level(&FoldAtLevel(2), window, cx); @@ -3700,7 +3724,11 @@ fn test_insert_with_old_selections(cx: &mut TestAppContext) { let buffer = MultiBuffer::build_simple("a( X ), b( Y ), c( Z )", cx); let mut editor = build_editor(buffer, window, cx); editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([3..4, 11..12, 19..20]) + s.select_ranges([ + MultiBufferOffset(3)..MultiBufferOffset(4), + MultiBufferOffset(11)..MultiBufferOffset(12), + MultiBufferOffset(19)..MultiBufferOffset(20), + ]) }); editor }); @@ -3708,12 +3736,24 @@ fn test_insert_with_old_selections(cx: &mut TestAppContext) { _ = editor.update(cx, |editor, window, cx| { // Edit the buffer directly, deleting ranges surrounding the editor's selections editor.buffer.update(cx, |buffer, cx| { - buffer.edit([(2..5, ""), (10..13, ""), (18..21, "")], None, cx); + buffer.edit( + [ + (MultiBufferOffset(2)..MultiBufferOffset(5), ""), + (MultiBufferOffset(10)..MultiBufferOffset(13), ""), + (MultiBufferOffset(18)..MultiBufferOffset(21), ""), + ], + None, + cx, + ); assert_eq!(buffer.read(cx).text(), "a(), b(), c()".unindent()); }); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - &[2..2, 7..7, 12..12], + &[ + MultiBufferOffset(2)..MultiBufferOffset(2), + MultiBufferOffset(7)..MultiBufferOffset(7), + MultiBufferOffset(12)..MultiBufferOffset(12) + ], ); editor.insert("Z", window, cx); @@ -3722,7 +3762,11 @@ fn test_insert_with_old_selections(cx: &mut TestAppContext) { // The selections are moved after the inserted characters assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - &[3..3, 9..9, 15..15], + &[ + MultiBufferOffset(3)..MultiBufferOffset(3), + MultiBufferOffset(9)..MultiBufferOffset(9), + MultiBufferOffset(15)..MultiBufferOffset(15) + ], ); }); } @@ -4692,7 +4736,7 @@ async fn test_custom_newlines_cause_no_false_positive_diffs( assert_eq!( snapshot .buffer_snapshot() - .diff_hunks_in_range(0..snapshot.buffer_snapshot().len()) + .diff_hunks_in_range(MultiBufferOffset(0)..snapshot.buffer_snapshot().len()) .collect::>(), Vec::new(), "Should not have any diffs for files with custom newlines" @@ -5964,27 +6008,27 @@ fn test_transpose(cx: &mut TestAppContext) { let mut editor = build_editor(MultiBuffer::build_simple("abc", cx), window, cx); editor.set_style(EditorStyle::default(), window, cx); editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([1..1]) + s.select_ranges([MultiBufferOffset(1)..MultiBufferOffset(1)]) }); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "bac"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [2..2] + [MultiBufferOffset(2)..MultiBufferOffset(2)] ); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "bca"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [3..3] + [MultiBufferOffset(3)..MultiBufferOffset(3)] ); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "bac"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [3..3] + [MultiBufferOffset(3)..MultiBufferOffset(3)] ); editor @@ -5994,37 +6038,37 @@ fn test_transpose(cx: &mut TestAppContext) { let mut editor = build_editor(MultiBuffer::build_simple("abc\nde", cx), window, cx); editor.set_style(EditorStyle::default(), window, cx); editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([3..3]) + s.select_ranges([MultiBufferOffset(3)..MultiBufferOffset(3)]) }); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "acb\nde"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [3..3] + [MultiBufferOffset(3)..MultiBufferOffset(3)] ); editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([4..4]) + s.select_ranges([MultiBufferOffset(4)..MultiBufferOffset(4)]) }); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "acbd\ne"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [5..5] + [MultiBufferOffset(5)..MultiBufferOffset(5)] ); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "acbde\n"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [6..6] + [MultiBufferOffset(6)..MultiBufferOffset(6)] ); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "acbd\ne"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [6..6] + [MultiBufferOffset(6)..MultiBufferOffset(6)] ); editor @@ -6034,41 +6078,62 @@ fn test_transpose(cx: &mut TestAppContext) { let mut editor = build_editor(MultiBuffer::build_simple("abc\nde", cx), window, cx); editor.set_style(EditorStyle::default(), window, cx); editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([1..1, 2..2, 4..4]) + s.select_ranges([ + MultiBufferOffset(1)..MultiBufferOffset(1), + MultiBufferOffset(2)..MultiBufferOffset(2), + MultiBufferOffset(4)..MultiBufferOffset(4), + ]) }); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "bacd\ne"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [2..2, 3..3, 5..5] + [ + MultiBufferOffset(2)..MultiBufferOffset(2), + MultiBufferOffset(3)..MultiBufferOffset(3), + MultiBufferOffset(5)..MultiBufferOffset(5) + ] ); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "bcade\n"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [3..3, 4..4, 6..6] + [ + MultiBufferOffset(3)..MultiBufferOffset(3), + MultiBufferOffset(4)..MultiBufferOffset(4), + MultiBufferOffset(6)..MultiBufferOffset(6) + ] ); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "bcda\ne"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [4..4, 6..6] + [ + MultiBufferOffset(4)..MultiBufferOffset(4), + MultiBufferOffset(6)..MultiBufferOffset(6) + ] ); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "bcade\n"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [4..4, 6..6] + [ + MultiBufferOffset(4)..MultiBufferOffset(4), + MultiBufferOffset(6)..MultiBufferOffset(6) + ] ); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "bcaed\n"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [5..5, 6..6] + [ + MultiBufferOffset(5)..MultiBufferOffset(5), + MultiBufferOffset(6)..MultiBufferOffset(6) + ] ); editor @@ -6078,27 +6143,27 @@ fn test_transpose(cx: &mut TestAppContext) { let mut editor = build_editor(MultiBuffer::build_simple("🍐🏀✋", cx), window, cx); editor.set_style(EditorStyle::default(), window, cx); editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([4..4]) + s.select_ranges([MultiBufferOffset(4)..MultiBufferOffset(4)]) }); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "🏀🍐✋"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [8..8] + [MultiBufferOffset(8)..MultiBufferOffset(8)] ); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "🏀✋🍐"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [11..11] + [MultiBufferOffset(11)..MultiBufferOffset(11)] ); editor.transpose(&Default::default(), window, cx); assert_eq!(editor.text(cx), "🏀🍐✋"); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - [11..11] + [MultiBufferOffset(11)..MultiBufferOffset(11)] ); editor @@ -9731,7 +9796,11 @@ async fn test_autoindent(cx: &mut TestAppContext) { editor.update_in(cx, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([5..5, 8..8, 9..9]) + s.select_ranges([ + MultiBufferOffset(5)..MultiBufferOffset(5), + MultiBufferOffset(8)..MultiBufferOffset(8), + MultiBufferOffset(9)..MultiBufferOffset(9), + ]) }); editor.newline(&Newline, window, cx); assert_eq!(editor.text(cx), "fn a(\n \n) {\n \n}\n"); @@ -9796,7 +9865,11 @@ async fn test_autoindent_disabled(cx: &mut TestAppContext) { editor.update_in(cx, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([5..5, 8..8, 9..9]) + s.select_ranges([ + MultiBufferOffset(5)..MultiBufferOffset(5), + MultiBufferOffset(8)..MultiBufferOffset(8), + MultiBufferOffset(9)..MultiBufferOffset(9), + ]) }); editor.newline(&Newline, window, cx); assert_eq!( @@ -10453,7 +10526,7 @@ async fn test_autoclose_with_embedded_language(cx: &mut TestAppContext) { let snapshot = editor.snapshot(window, cx); let cursors = editor .selections - .ranges::(&editor.display_snapshot(cx)); + .ranges::(&editor.display_snapshot(cx)); let languages = cursors .iter() .map(|c| snapshot.language_at(c.start).unwrap().name()) @@ -11143,17 +11216,26 @@ async fn test_snippet_placeholder_choices(cx: &mut TestAppContext) { let snippet = Snippet::parse("type ${1|,i32,u32|} = $2").unwrap(); editor - .insert_snippet(&insertion_ranges, snippet, window, cx) + .insert_snippet( + &insertion_ranges + .iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)) + .collect::>(), + snippet, + window, + cx, + ) .unwrap(); fn assert(editor: &mut Editor, cx: &mut Context, marked_text: &str) { let (expected_text, selection_ranges) = marked_text_ranges(marked_text, false); assert_eq!(editor.text(cx), expected_text); assert_eq!( - editor - .selections - .ranges::(&editor.display_snapshot(cx)), + editor.selections.ranges(&editor.display_snapshot(cx)), selection_ranges + .iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)) + .collect::>() ); } @@ -11177,10 +11259,11 @@ async fn test_snippet_tabstop_navigation_with_placeholders(cx: &mut TestAppConte let (expected_text, selection_ranges) = marked_text_ranges(marked_text, false); assert_eq!(editor.text(cx), expected_text); assert_eq!( - editor - .selections - .ranges::(&editor.display_snapshot(cx)), + editor.selections.ranges(&editor.display_snapshot(cx)), selection_ranges + .iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)) + .collect::>() ); } @@ -11198,7 +11281,15 @@ async fn test_snippet_tabstop_navigation_with_placeholders(cx: &mut TestAppConte let snippet = Snippet::parse("type ${1|,i32,u32|} = $2; $3").unwrap(); editor - .insert_snippet(&insertion_ranges, snippet, window, cx) + .insert_snippet( + &insertion_ranges + .iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)) + .collect::>(), + snippet, + window, + cx, + ) .unwrap(); assert_state( @@ -11646,7 +11737,7 @@ async fn test_redo_after_noop_format(cx: &mut TestAppContext) { }); editor.update_in(cx, |editor, window, cx| { editor.change_selections(SelectionEffects::default(), window, cx, |s| { - s.select_ranges([0..0]) + s.select_ranges([MultiBufferOffset(0)..MultiBufferOffset(0)]) }); }); assert!(!cx.read(|cx| editor.is_dirty(cx))); @@ -11812,7 +11903,7 @@ async fn test_multibuffer_format_during_save(cx: &mut TestAppContext) { SelectionEffects::scroll(Autoscroll::Next), window, cx, - |s| s.select_ranges(Some(1..2)), + |s| s.select_ranges(Some(MultiBufferOffset(1)..MultiBufferOffset(2))), ); editor.insert("|one|two|three|", window, cx); }); @@ -11822,7 +11913,7 @@ async fn test_multibuffer_format_during_save(cx: &mut TestAppContext) { SelectionEffects::scroll(Autoscroll::Next), window, cx, - |s| s.select_ranges(Some(60..70)), + |s| s.select_ranges(Some(MultiBufferOffset(60)..MultiBufferOffset(70))), ); editor.insert("|four|five|six|", window, cx); }); @@ -11990,7 +12081,7 @@ async fn test_autosave_with_dirty_buffers(cx: &mut TestAppContext) { SelectionEffects::scroll(Autoscroll::Next), window, cx, - |s| s.select_ranges(Some(10..10)), + |s| s.select_ranges(Some(MultiBufferOffset(10)..MultiBufferOffset(10))), ); editor.insert("// edited", window, cx); }); @@ -13428,7 +13519,7 @@ async fn test_signature_help(cx: &mut TestAppContext) { cx.update_editor(|editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([0..0]) + s.select_ranges([MultiBufferOffset(0)..MultiBufferOffset(0)]) }); }); @@ -16413,7 +16504,11 @@ fn test_editing_overlapping_excerpts(cx: &mut TestAppContext) { ); assert_eq!(editor.text(cx), expected_text); editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges(selection_ranges) + s.select_ranges( + selection_ranges + .iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)), + ) }); editor.handle_input("X", window, cx); @@ -16431,6 +16526,9 @@ fn test_editing_overlapping_excerpts(cx: &mut TestAppContext) { assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), expected_selections + .iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)) + .collect::>() ); editor.newline(&Newline, window, cx); @@ -16451,6 +16549,9 @@ fn test_editing_overlapping_excerpts(cx: &mut TestAppContext) { assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), expected_selections + .iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)) + .collect::>() ); }); } @@ -16826,7 +16927,7 @@ async fn test_following(cx: &mut TestAppContext) { // Update the selections only _ = leader.update(cx, |leader, window, cx| { leader.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([1..1]) + s.select_ranges([MultiBufferOffset(1)..MultiBufferOffset(1)]) }); }); follower @@ -16844,7 +16945,7 @@ async fn test_following(cx: &mut TestAppContext) { _ = follower.update(cx, |follower, _, cx| { assert_eq!( follower.selections.ranges(&follower.display_snapshot(cx)), - vec![1..1] + vec![MultiBufferOffset(1)..MultiBufferOffset(1)] ); }); assert!(*is_still_following.borrow()); @@ -16879,7 +16980,7 @@ async fn test_following(cx: &mut TestAppContext) { // via autoscroll, not via the leader's exact scroll position. _ = leader.update(cx, |leader, window, cx| { leader.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([0..0]) + s.select_ranges([MultiBufferOffset(0)..MultiBufferOffset(0)]) }); leader.request_autoscroll(Autoscroll::newest(), cx); leader.set_scroll_position(gpui::Point::new(1.5, 3.5), window, cx); @@ -16900,7 +17001,7 @@ async fn test_following(cx: &mut TestAppContext) { assert_eq!(follower.scroll_position(cx), gpui::Point::new(1.5, 0.0)); assert_eq!( follower.selections.ranges(&follower.display_snapshot(cx)), - vec![0..0] + vec![MultiBufferOffset(0)..MultiBufferOffset(0)] ); }); assert!(*is_still_following.borrow()); @@ -16908,7 +17009,7 @@ async fn test_following(cx: &mut TestAppContext) { // Creating a pending selection that precedes another selection _ = leader.update(cx, |leader, window, cx| { leader.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([1..1]) + s.select_ranges([MultiBufferOffset(1)..MultiBufferOffset(1)]) }); leader.begin_selection(DisplayPoint::new(DisplayRow(0), 0), true, 1, window, cx); }); @@ -16927,7 +17028,10 @@ async fn test_following(cx: &mut TestAppContext) { _ = follower.update(cx, |follower, _, cx| { assert_eq!( follower.selections.ranges(&follower.display_snapshot(cx)), - vec![0..0, 1..1] + vec![ + MultiBufferOffset(0)..MultiBufferOffset(0), + MultiBufferOffset(1)..MultiBufferOffset(1) + ] ); }); assert!(*is_still_following.borrow()); @@ -16951,13 +17055,17 @@ async fn test_following(cx: &mut TestAppContext) { _ = follower.update(cx, |follower, _, cx| { assert_eq!( follower.selections.ranges(&follower.display_snapshot(cx)), - vec![0..2] + vec![MultiBufferOffset(0)..MultiBufferOffset(2)] ); }); // Scrolling locally breaks the follow _ = follower.update(cx, |follower, window, cx| { - let top_anchor = follower.buffer().read(cx).read(cx).anchor_after(0); + let top_anchor = follower + .buffer() + .read(cx) + .read(cx) + .anchor_after(MultiBufferOffset(0)); follower.set_scroll_anchor( ScrollAnchor { anchor: top_anchor, @@ -19451,7 +19559,7 @@ async fn test_multibuffer_in_navigation_history(cx: &mut TestAppContext) { SelectionEffects::scroll(Autoscroll::Next), window, cx, - |s| s.select_ranges(Some(1..2)), + |s| s.select_ranges(Some(MultiBufferOffset(1)..MultiBufferOffset(2))), ); editor.open_excerpts(&OpenExcerpts, window, cx); }); @@ -19507,7 +19615,7 @@ async fn test_multibuffer_in_navigation_history(cx: &mut TestAppContext) { SelectionEffects::scroll(Autoscroll::Next), window, cx, - |s| s.select_ranges(Some(39..40)), + |s| s.select_ranges(Some(MultiBufferOffset(39)..MultiBufferOffset(40))), ); editor.open_excerpts(&OpenExcerpts, window, cx); }); @@ -19567,7 +19675,7 @@ async fn test_multibuffer_in_navigation_history(cx: &mut TestAppContext) { SelectionEffects::scroll(Autoscroll::Next), window, cx, - |s| s.select_ranges(Some(70..70)), + |s| s.select_ranges(Some(MultiBufferOffset(70)..MultiBufferOffset(70))), ); editor.open_excerpts(&OpenExcerpts, window, cx); }); @@ -22357,7 +22465,7 @@ async fn test_find_enclosing_node_with_task(cx: &mut TestAppContext) { (buffer.read(cx).remote_id(), 3), RunnableTasks { templates: vec![], - offset: snapshot.anchor_before(43), + offset: snapshot.anchor_before(MultiBufferOffset(43)), column: 0, extra_variables: HashMap::default(), context_range: BufferOffset(43)..BufferOffset(85), @@ -22367,7 +22475,7 @@ async fn test_find_enclosing_node_with_task(cx: &mut TestAppContext) { (buffer.read(cx).remote_id(), 8), RunnableTasks { templates: vec![], - offset: snapshot.anchor_before(86), + offset: snapshot.anchor_before(MultiBufferOffset(86)), column: 0, extra_variables: HashMap::default(), context_range: BufferOffset(86)..BufferOffset(191), @@ -25749,7 +25857,10 @@ fn assert_selection_ranges(marked_text: &str, editor: &mut Editor, cx: &mut Cont assert_eq!(editor.text(cx), text); assert_eq!( editor.selections.ranges(&editor.display_snapshot(cx)), - ranges, + ranges + .iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)) + .collect::>(), "Assert selections are {}", marked_text ); @@ -25995,10 +26106,12 @@ pub fn handle_completion_request( vec![complete_from_marker.clone(), replace_range_marker.clone()], ); - let complete_from_position = - cx.to_lsp(marked_ranges.remove(&complete_from_marker).unwrap()[0].start); + let complete_from_position = cx.to_lsp(MultiBufferOffset( + marked_ranges.remove(&complete_from_marker).unwrap()[0].start, + )); + let range = marked_ranges.remove(&replace_range_marker).unwrap()[0].clone(); let replace_range = - cx.to_lsp_range(marked_ranges.remove(&replace_range_marker).unwrap()[0].clone()); + cx.to_lsp_range(MultiBufferOffset(range.start)..MultiBufferOffset(range.end)); let mut request = cx.set_request_handler::(move |url, params, _| { @@ -26059,13 +26172,18 @@ pub fn handle_completion_request_with_insert_and_replace( ], ); - let complete_from_position = - cx.to_lsp(marked_ranges.remove(&complete_from_marker).unwrap()[0].start); + let complete_from_position = cx.to_lsp(MultiBufferOffset( + marked_ranges.remove(&complete_from_marker).unwrap()[0].start, + )); + let range = marked_ranges.remove(&replace_range_marker).unwrap()[0].clone(); let replace_range = - cx.to_lsp_range(marked_ranges.remove(&replace_range_marker).unwrap()[0].clone()); + cx.to_lsp_range(MultiBufferOffset(range.start)..MultiBufferOffset(range.end)); let insert_range = match marked_ranges.remove(&insert_range_marker) { - Some(ranges) if !ranges.is_empty() => cx.to_lsp_range(ranges[0].clone()), + Some(ranges) if !ranges.is_empty() => { + let range1 = ranges[0].clone(); + cx.to_lsp_range(MultiBufferOffset(range1.start)..MultiBufferOffset(range1.end)) + } _ => lsp::Range { start: replace_range.start, end: complete_from_position, @@ -26115,7 +26233,10 @@ fn handle_resolve_completion_request( .iter() .map(|(marked_string, new_text)| { let (_, marked_ranges) = marked_text_ranges(marked_string, false); - let replace_range = cx.to_lsp_range(marked_ranges[0].clone()); + let replace_range = cx.to_lsp_range( + MultiBufferOffset(marked_ranges[0].start) + ..MultiBufferOffset(marked_ranges[0].end), + ); lsp::TextEdit::new(replace_range, new_text.to_string()) }) .collect::>() @@ -26199,7 +26320,7 @@ fn assert_hunk_revert( let snapshot = editor.snapshot(window, cx); let reverted_hunk_statuses = snapshot .buffer_snapshot() - .diff_hunks_in_range(0..snapshot.buffer_snapshot().len()) + .diff_hunks_in_range(MultiBufferOffset(0)..snapshot.buffer_snapshot().len()) .map(|hunk| hunk.status().kind) .collect::>(); @@ -26823,7 +26944,9 @@ async fn test_newline_replacement_in_single_line(cx: &mut TestAppContext) { editor.update(cx, |editor, cx| { assert_eq!(editor.display_text(cx), "oops⋯⋯wow⋯"); }); - editor.update(cx, |editor, cx| editor.edit([(3..5, "")], cx)); + editor.update(cx, |editor, cx| { + editor.edit([(MultiBufferOffset(3)..MultiBufferOffset(5), "")], cx) + }); cx.run_until_parked(); editor.update(cx, |editor, cx| { assert_eq!(editor.display_text(cx), "oop⋯wow⋯"); @@ -27895,7 +28018,7 @@ async fn test_multibuffer_selections_with_folding(cx: &mut TestAppContext) { // Scenario 1: Unfolded buffers, position cursor on "2", select all matches, then insert cx.update_editor(|editor, window, cx| { editor.change_selections(None.into(), window, cx, |s| { - s.select_ranges([2..3]); + s.select_ranges([MultiBufferOffset(2)..MultiBufferOffset(3)]); }); }); cx.assert_excerpts_with_selections(indoc! {" @@ -27952,7 +28075,7 @@ async fn test_multibuffer_selections_with_folding(cx: &mut TestAppContext) { // Select "2" and select all matches cx.update_editor(|editor, window, cx| { editor.change_selections(None.into(), window, cx, |s| { - s.select_ranges([2..3]); + s.select_ranges([MultiBufferOffset(2)..MultiBufferOffset(3)]); }); editor .select_all_matches(&SelectAllMatches, window, cx) @@ -28003,7 +28126,7 @@ async fn test_multibuffer_selections_with_folding(cx: &mut TestAppContext) { // Select "2" and select all matches cx.update_editor(|editor, window, cx| { editor.change_selections(None.into(), window, cx, |s| { - s.select_ranges([2..3]); + s.select_ranges([MultiBufferOffset(2)..MultiBufferOffset(3)]); }); editor .select_all_matches(&SelectAllMatches, window, cx) diff --git a/crates/editor/src/git/blame.rs b/crates/editor/src/git/blame.rs index 52eab4f817acad25deebbfa6d807020f9ce1ac80..008630faef7cc1ccb3b9703e4b11c0b88b7cf17c 100644 --- a/crates/editor/src/git/blame.rs +++ b/crates/editor/src/git/blame.rs @@ -67,7 +67,7 @@ impl<'a> sum_tree::Dimension<'a, GitBlameEntrySummary> for u32 { struct GitBlameBuffer { entries: SumTree, buffer_snapshot: BufferSnapshot, - buffer_edits: text::Subscription, + buffer_edits: text::Subscription, commit_details: HashMap, } diff --git a/crates/editor/src/highlight_matching_bracket.rs b/crates/editor/src/highlight_matching_bracket.rs index 286260e3b0f42da0c3416a07357128ac5e3d0c57..eaef28bed21bf480a32c3abd3440a6c41e42d5f1 100644 --- a/crates/editor/src/highlight_matching_bracket.rs +++ b/crates/editor/src/highlight_matching_bracket.rs @@ -1,6 +1,7 @@ use crate::{Editor, RangeToAnchorExt}; use gpui::{Context, HighlightStyle, Window}; use language::CursorShape; +use multi_buffer::MultiBufferOffset; use theme::ActiveTheme; enum MatchingBracketHighlight {} @@ -15,7 +16,7 @@ impl Editor { let snapshot = self.snapshot(window, cx); let buffer_snapshot = snapshot.buffer_snapshot(); - let newest_selection = self.selections.newest::(&snapshot); + let newest_selection = self.selections.newest::(&snapshot); // Don't highlight brackets if the selection isn't empty if !newest_selection.is_empty() { return; diff --git a/crates/editor/src/hover_links.rs b/crates/editor/src/hover_links.rs index 03fce48f146fbfa3bdab93937038c4101a04a484..5ef52f36dfd609a49d93eb77b74b2df3287df30a 100644 --- a/crates/editor/src/hover_links.rs +++ b/crates/editor/src/hover_links.rs @@ -738,6 +738,7 @@ mod tests { use gpui::Modifiers; use indoc::indoc; use lsp::request::{GotoDefinition, GotoTypeDefinition}; + use multi_buffer::MultiBufferOffset; use settings::InlayHintSettingsContent; use util::{assert_set_eq, path}; use workspace::item::Item; @@ -1067,8 +1068,8 @@ mod tests { .clone(); cx.update_editor(|editor, window, cx| { let snapshot = editor.buffer().read(cx).snapshot(cx); - let anchor_range = snapshot.anchor_before(selection_range.start) - ..snapshot.anchor_after(selection_range.end); + let anchor_range = snapshot.anchor_before(MultiBufferOffset(selection_range.start)) + ..snapshot.anchor_after(MultiBufferOffset(selection_range.end)); editor.change_selections(Default::default(), window, cx, |s| { s.set_pending_anchor_range(anchor_range, crate::SelectMode::Character) }); @@ -1122,7 +1123,7 @@ mod tests { } "})[0] .start; - let hint_position = cx.to_lsp(hint_start_offset); + let hint_position = cx.to_lsp(MultiBufferOffset(hint_start_offset)); let target_range = cx.lsp_range(indoc! {" struct «TestStruct»; @@ -1179,8 +1180,8 @@ mod tests { .unwrap(); let midpoint = cx.update_editor(|editor, window, cx| { let snapshot = editor.snapshot(window, cx); - let previous_valid = inlay_range.start.to_display_point(&snapshot); - let next_valid = inlay_range.end.to_display_point(&snapshot); + let previous_valid = MultiBufferOffset(inlay_range.start).to_display_point(&snapshot); + let next_valid = MultiBufferOffset(inlay_range.end).to_display_point(&snapshot); assert_eq!(previous_valid.row(), next_valid.row()); assert!(previous_valid.column() < next_valid.column()); DisplayPoint::new( @@ -1203,7 +1204,7 @@ mod tests { let buffer_snapshot = editor.buffer().update(cx, |buffer, cx| buffer.snapshot(cx)); let expected_highlight = InlayHighlight { inlay: InlayId::Hint(0), - inlay_position: buffer_snapshot.anchor_after(inlay_range.start), + inlay_position: buffer_snapshot.anchor_after(MultiBufferOffset(inlay_range.start)), range: 0..hint_label.len(), }; assert_set_eq!(actual_highlights, vec![&expected_highlight]); diff --git a/crates/editor/src/hover_popover.rs b/crates/editor/src/hover_popover.rs index 721fce34c8c030322207cd74a69a266119596086..ef16fc92d847763ecbc764c3913266fd84a26006 100644 --- a/crates/editor/src/hover_popover.rs +++ b/crates/editor/src/hover_popover.rs @@ -17,7 +17,7 @@ use itertools::Itertools; use language::{DiagnosticEntry, Language, LanguageRegistry}; use lsp::DiagnosticSeverity; use markdown::{Markdown, MarkdownElement, MarkdownStyle}; -use multi_buffer::{ToOffset, ToPoint}; +use multi_buffer::{MultiBufferOffset, ToOffset, ToPoint}; use project::{HoverBlock, HoverBlockKind, InlayHintLabelPart}; use settings::Settings; use std::{borrow::Cow, cell::RefCell}; @@ -106,7 +106,7 @@ pub fn find_hovered_hint_part( hovered_offset: InlayOffset, ) -> Option<(InlayHintLabelPart, Range)> { if hovered_offset >= hint_start { - let mut hovered_character = (hovered_offset - hint_start).0; + let mut hovered_character = hovered_offset - hint_start; let mut part_start = hint_start; for part in label_parts { let part_len = part.value.chars().count(); @@ -316,12 +316,12 @@ fn show_hover( } else { snapshot .buffer_snapshot() - .diagnostics_with_buffer_ids_in_range::(offset..offset) + .diagnostics_with_buffer_ids_in_range::(offset..offset) .filter(|(_, diagnostic)| { Some(diagnostic.diagnostic.group_id) != active_group_id }) // Find the entry with the most specific range - .min_by_key(|(_, entry)| entry.range.len()) + .min_by_key(|(_, entry)| entry.range.end - entry.range.start) }; let diagnostic_popover = if let Some((buffer_id, local_diagnostic)) = local_diagnostic { @@ -1633,7 +1633,7 @@ mod tests { } "})[0] .start; - let hint_position = cx.to_lsp(hint_start_offset); + let hint_position = cx.to_lsp(MultiBufferOffset(hint_start_offset)); let new_type_target_range = cx.lsp_range(indoc! {" struct TestStruct; @@ -1708,8 +1708,8 @@ mod tests { .unwrap(); let new_type_hint_part_hover_position = cx.update_editor(|editor, window, cx| { let snapshot = editor.snapshot(window, cx); - let previous_valid = inlay_range.start.to_display_point(&snapshot); - let next_valid = inlay_range.end.to_display_point(&snapshot); + let previous_valid = MultiBufferOffset(inlay_range.start).to_display_point(&snapshot); + let next_valid = MultiBufferOffset(inlay_range.end).to_display_point(&snapshot); assert_eq!(previous_valid.row(), next_valid.row()); assert!(previous_valid.column() < next_valid.column()); let exact_unclipped = DisplayPoint::new( @@ -1819,7 +1819,8 @@ mod tests { popover.symbol_range, RangeInEditor::Inlay(InlayHighlight { inlay: InlayId::Hint(0), - inlay_position: buffer_snapshot.anchor_after(inlay_range.start), + inlay_position: buffer_snapshot + .anchor_after(MultiBufferOffset(inlay_range.start)), range: ": ".len()..": ".len() + new_type_label.len(), }), "Popover range should match the new type label part" @@ -1832,8 +1833,8 @@ mod tests { let struct_hint_part_hover_position = cx.update_editor(|editor, window, cx| { let snapshot = editor.snapshot(window, cx); - let previous_valid = inlay_range.start.to_display_point(&snapshot); - let next_valid = inlay_range.end.to_display_point(&snapshot); + let previous_valid = MultiBufferOffset(inlay_range.start).to_display_point(&snapshot); + let next_valid = MultiBufferOffset(inlay_range.end).to_display_point(&snapshot); assert_eq!(previous_valid.row(), next_valid.row()); assert!(previous_valid.column() < next_valid.column()); let exact_unclipped = DisplayPoint::new( @@ -1873,7 +1874,8 @@ mod tests { popover.symbol_range, RangeInEditor::Inlay(InlayHighlight { inlay: InlayId::Hint(0), - inlay_position: buffer_snapshot.anchor_after(inlay_range.start), + inlay_position: buffer_snapshot + .anchor_after(MultiBufferOffset(inlay_range.start)), range: ": ".len() + new_type_label.len() + "<".len() ..": ".len() + new_type_label.len() + "<".len() + struct_label.len(), }), diff --git a/crates/editor/src/inlays/inlay_hints.rs b/crates/editor/src/inlays/inlay_hints.rs index 7aacd1c86e6ec9a2034493d26df6d2271d33724e..b30137cf7796d6c916623d100420ac34eb80224a 100644 --- a/crates/editor/src/inlays/inlay_hints.rs +++ b/crates/editor/src/inlays/inlay_hints.rs @@ -645,9 +645,9 @@ impl Editor { ) { let highlight_start = - (part_range.start - hint_start).0 + extra_shift_left; + (part_range.start - hint_start) + extra_shift_left; let highlight_end = - (part_range.end - hint_start).0 + extra_shift_right; + (part_range.end - hint_start) + extra_shift_right; let highlight = InlayHighlight { inlay: hovered_hint.id, inlay_position: hovered_hint.position, @@ -948,7 +948,7 @@ pub mod tests { use language::{Language, LanguageConfig, LanguageMatcher}; use languages::rust_lang; use lsp::FakeLanguageServer; - use multi_buffer::MultiBuffer; + use multi_buffer::{MultiBuffer, MultiBufferOffset}; use parking_lot::Mutex; use pretty_assertions::assert_eq; use project::{FakeFs, Project}; @@ -1029,7 +1029,7 @@ pub mod tests { editor .update(cx, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([13..13]) + s.select_ranges([MultiBufferOffset(13)..MultiBufferOffset(13)]) }); editor.handle_input("some change", window, cx); }) @@ -1429,7 +1429,7 @@ pub mod tests { rs_editor .update(cx, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([13..13]) + s.select_ranges([MultiBufferOffset(13)..MultiBufferOffset(13)]) }); editor.handle_input("some rs change", window, cx); }) @@ -1461,7 +1461,7 @@ pub mod tests { md_editor .update(cx, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([13..13]) + s.select_ranges([MultiBufferOffset(13)..MultiBufferOffset(13)]) }); editor.handle_input("some md change", window, cx); }) @@ -1909,7 +1909,7 @@ pub mod tests { editor .update(cx, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([13..13]) + s.select_ranges([MultiBufferOffset(13)..MultiBufferOffset(13)]) }); editor.handle_input(change_after_opening, window, cx); }) @@ -1955,7 +1955,7 @@ pub mod tests { task_editor .update(&mut cx, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges([13..13]) + s.select_ranges([MultiBufferOffset(13)..MultiBufferOffset(13)]) }); editor.handle_input(async_later_change, window, cx); }) @@ -2706,7 +2706,7 @@ let c = 3;"# let mut editor = Editor::for_multibuffer(multi_buffer, Some(project.clone()), window, cx); editor.change_selections(SelectionEffects::default(), window, cx, |s| { - s.select_ranges([0..0]) + s.select_ranges([MultiBufferOffset(0)..MultiBufferOffset(0)]) }); editor }); diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index a860e137a856a2e7982f1177c205391b80625944..0e97b95bb6d2818364ec0fa161f6360c850a6dcc 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -21,6 +21,7 @@ use language::{ SelectionGoal, proto::serialize_anchor as serialize_text_anchor, }; use lsp::DiagnosticSeverity; +use multi_buffer::MultiBufferOffset; use project::{ Project, ProjectItem as _, ProjectPath, lsp_store::FormatTrigger, project_settings::ProjectSettings, search::SearchQuery, @@ -1735,7 +1736,7 @@ impl SearchableItem for Editor { let mut ranges = Vec::new(); let search_within_ranges = if search_within_ranges.is_empty() { - vec![buffer.anchor_before(0)..buffer.anchor_after(buffer.len())] + vec![buffer.anchor_before(MultiBufferOffset(0))..buffer.anchor_after(buffer.len())] } else { search_within_ranges }; @@ -1746,7 +1747,10 @@ impl SearchableItem for Editor { { ranges.extend( query - .search(search_buffer, Some(search_range.clone())) + .search( + search_buffer, + Some(search_range.start.0..search_range.end.0), + ) .await .into_iter() .map(|match_range| { diff --git a/crates/editor/src/jsx_tag_auto_close.rs b/crates/editor/src/jsx_tag_auto_close.rs index 0e32bc686ad98a45b83712841c13fffc07421acb..e22fde313df4b99b7b650775ad7e7397e3c4f813 100644 --- a/crates/editor/src/jsx_tag_auto_close.rs +++ b/crates/editor/src/jsx_tag_auto_close.rs @@ -1,7 +1,7 @@ use anyhow::{Context as _, Result, anyhow}; use collections::HashMap; use gpui::{Context, Entity, Window}; -use multi_buffer::{MultiBuffer, ToOffset}; +use multi_buffer::{BufferOffset, MultiBuffer, ToOffset}; use std::ops::Range; use util::ResultExt as _; @@ -546,9 +546,10 @@ pub(crate) fn handle_from( if edit_range_offset.start != edit_range_offset.end { continue; } - if let Some(selection) = - buffer_selection_map.get_mut(&(edit_range_offset.start, edit_range_offset.end)) - { + if let Some(selection) = buffer_selection_map.get_mut(&( + BufferOffset(edit_range_offset.start), + BufferOffset(edit_range_offset.end), + )) { if selection.0.head().bias() != text::Bias::Right || selection.0.tail().bias() != text::Bias::Right { @@ -621,7 +622,7 @@ mod jsx_tag_autoclose_tests { use super::*; use gpui::{AppContext as _, TestAppContext}; use languages::language; - use multi_buffer::ExcerptRange; + use multi_buffer::{ExcerptRange, MultiBufferOffset}; use text::Selection; async fn test_setup(cx: &mut TestAppContext) -> EditorTestContext { @@ -842,9 +843,9 @@ mod jsx_tag_autoclose_tests { cx.update_editor(|editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |selections| { selections.select(vec![ - Selection::from_offset(4), - Selection::from_offset(9), - Selection::from_offset(15), + Selection::from_offset(MultiBufferOffset(4)), + Selection::from_offset(MultiBufferOffset(9)), + Selection::from_offset(MultiBufferOffset(15)), ]) }) }); diff --git a/crates/editor/src/linked_editing_ranges.rs b/crates/editor/src/linked_editing_ranges.rs index ab16fe7eb4bce28ef6bfee2c2bde1d52fda86561..33635a2ae2009031220ab0a58e99f8b07957de94 100644 --- a/crates/editor/src/linked_editing_ranges.rs +++ b/crates/editor/src/linked_editing_ranges.rs @@ -1,6 +1,7 @@ use collections::HashMap; use gpui::{AppContext, Context, Window}; use itertools::Itertools; +use multi_buffer::MultiBufferOffset; use std::{ops::Range, time::Duration}; use text::{AnchorRangeExt, BufferId, ToPoint}; use util::ResultExt; @@ -60,7 +61,9 @@ pub(super) fn refresh_linked_ranges( editor .update(cx, |editor, cx| { let display_snapshot = editor.display_snapshot(cx); - let selections = editor.selections.all::(&display_snapshot); + let selections = editor + .selections + .all::(&display_snapshot); let snapshot = display_snapshot.buffer_snapshot(); let buffer = editor.buffer.read(cx); for selection in selections { diff --git a/crates/editor/src/movement.rs b/crates/editor/src/movement.rs index a83545aaf26b0e148345c185f4f39910e97a727e..8635d89ed13e77d260307667740bf79ab4022e6f 100644 --- a/crates/editor/src/movement.rs +++ b/crates/editor/src/movement.rs @@ -8,7 +8,7 @@ use crate::{ }; use gpui::{Pixels, WindowTextSystem}; use language::{CharClassifier, Point}; -use multi_buffer::{MultiBufferRow, MultiBufferSnapshot}; +use multi_buffer::{MultiBufferOffset, MultiBufferRow, MultiBufferSnapshot}; use serde::Deserialize; use workspace::searchable::Direction; @@ -358,28 +358,28 @@ pub fn adjust_greedy_deletion( let mut whitespace_sequences = Vec::new(); let mut current_offset = trimmed_delete_range.start; - let mut whitespace_sequence_length = 0; - let mut whitespace_sequence_start = 0; + let mut whitespace_sequence_length = MultiBufferOffset(0); + let mut whitespace_sequence_start = MultiBufferOffset(0); for ch in map .buffer_snapshot() .text_for_range(trimmed_delete_range.clone()) .flat_map(str::chars) { if ch.is_whitespace() { - if whitespace_sequence_length == 0 { + if whitespace_sequence_length == MultiBufferOffset(0) { whitespace_sequence_start = current_offset; } whitespace_sequence_length += 1; } else { - if whitespace_sequence_length >= 2 { + if whitespace_sequence_length >= MultiBufferOffset(2) { whitespace_sequences.push((whitespace_sequence_start, current_offset)); } - whitespace_sequence_start = 0; - whitespace_sequence_length = 0; + whitespace_sequence_start = MultiBufferOffset(0); + whitespace_sequence_length = MultiBufferOffset(0); } current_offset += ch.len_utf8(); } - if whitespace_sequence_length >= 2 { + if whitespace_sequence_length >= MultiBufferOffset(2) { whitespace_sequences.push((whitespace_sequence_start, current_offset)); } @@ -731,7 +731,7 @@ pub fn find_preceding_boundary_trail( } let trail = trail_offset - .map(|trail_offset: usize| map.clip_point(trail_offset.to_display_point(map), Bias::Left)); + .map(|trail_offset| map.clip_point(trail_offset.to_display_point(map), Bias::Left)); ( trail, @@ -779,7 +779,7 @@ pub fn find_boundary_trail( } let trail = trail_offset - .map(|trail_offset: usize| map.clip_point(trail_offset.to_display_point(map), Bias::Right)); + .map(|trail_offset| map.clip_point(trail_offset.to_display_point(map), Bias::Right)); ( trail, @@ -810,8 +810,8 @@ pub fn find_boundary_exclusive( /// the [`DisplaySnapshot`]. The offsets are relative to the start of a buffer. pub fn chars_after( map: &DisplaySnapshot, - mut offset: usize, -) -> impl Iterator)> + '_ { + mut offset: MultiBufferOffset, +) -> impl Iterator)> + '_ { map.buffer_snapshot().chars_at(offset).map(move |ch| { let before = offset; offset += ch.len_utf8(); @@ -824,8 +824,8 @@ pub fn chars_after( /// the [`DisplaySnapshot`]. The offsets are relative to the start of a buffer. pub fn chars_before( map: &DisplaySnapshot, - mut offset: usize, -) -> impl Iterator)> + '_ { + mut offset: MultiBufferOffset, +) -> impl Iterator)> + '_ { map.buffer_snapshot() .reversed_chars_at(offset) .map(move |ch| { @@ -1018,8 +1018,9 @@ mod tests { // add all kinds of inlays between two word boundaries: we should be able to cross them all, when looking for another boundary let mut id = 0; - let inlays = (0..buffer_snapshot.len()) + let inlays = (0..buffer_snapshot.len().0) .flat_map(|offset| { + let offset = MultiBufferOffset(offset); [ Inlay::edit_prediction( post_inc(&mut id), @@ -1058,7 +1059,7 @@ mod tests { ), snapshot .buffer_snapshot() - .offset_to_point(5) + .offset_to_point(MultiBufferOffset(5)) .to_display_point(&snapshot), "Should not stop at inlays when looking for boundaries" ); diff --git a/crates/editor/src/selections_collection.rs b/crates/editor/src/selections_collection.rs index 7bb90deda0da84fa8719b9530dffef567c467c36..c1b8d11db94de7394b36ec706f42622993b63785 100644 --- a/crates/editor/src/selections_collection.rs +++ b/crates/editor/src/selections_collection.rs @@ -1,18 +1,18 @@ use std::{ cmp, fmt, iter, mem, - ops::{Deref, DerefMut, Range, Sub}, + ops::{AddAssign, Deref, DerefMut, Range, Sub}, sync::Arc, }; use collections::HashMap; use gpui::Pixels; use itertools::Itertools as _; -use language::{Bias, Point, Selection, SelectionGoal, TextDimension}; +use language::{Bias, Point, Selection, SelectionGoal}; +use multi_buffer::{MultiBufferDimension, MultiBufferOffset}; use util::post_inc; use crate::{ Anchor, DisplayPoint, DisplayRow, ExcerptId, MultiBufferSnapshot, SelectMode, ToOffset, - ToPoint, display_map::{DisplaySnapshot, ToDisplayPoint}, movement::TextLayoutDetails, }; @@ -97,7 +97,7 @@ impl SelectionsCollection { if self.pending.is_none() { self.disjoint_anchors_arc() } else { - let all_offset_selections = self.all::(snapshot); + let all_offset_selections = self.all::(snapshot); all_offset_selections .into_iter() .map(|selection| selection_to_anchor_selection(selection, snapshot)) @@ -113,10 +113,10 @@ impl SelectionsCollection { self.pending.as_mut().map(|pending| &mut pending.selection) } - pub fn pending>( - &self, - snapshot: &DisplaySnapshot, - ) -> Option> { + pub fn pending(&self, snapshot: &DisplaySnapshot) -> Option> + where + D: MultiBufferDimension + Sub + AddAssign<::Output> + Ord, + { resolve_selections_wrapping_blocks(self.pending_anchor(), &snapshot).next() } @@ -124,9 +124,9 @@ impl SelectionsCollection { self.pending.as_ref().map(|pending| pending.mode.clone()) } - pub fn all<'a, D>(&self, snapshot: &DisplaySnapshot) -> Vec> + pub fn all(&self, snapshot: &DisplaySnapshot) -> Vec> where - D: 'a + TextDimension + Ord + Sub, + D: MultiBufferDimension + Sub + AddAssign<::Output> + Ord, { let disjoint_anchors = &self.disjoint; let mut disjoint = @@ -204,13 +204,13 @@ impl SelectionsCollection { } } - pub fn disjoint_in_range<'a, D>( + pub fn disjoint_in_range( &self, range: Range, snapshot: &DisplaySnapshot, ) -> Vec> where - D: 'a + TextDimension + Ord + Sub + std::fmt::Debug, + D: MultiBufferDimension + Sub + AddAssign<::Output> + Ord + std::fmt::Debug, { let start_ix = match self .disjoint @@ -267,10 +267,10 @@ impl SelectionsCollection { .unwrap() } - pub fn newest>( - &self, - snapshot: &DisplaySnapshot, - ) -> Selection { + pub fn newest(&self, snapshot: &DisplaySnapshot) -> Selection + where + D: MultiBufferDimension + Sub + AddAssign<::Output> + Ord, + { resolve_selections_wrapping_blocks([self.newest_anchor()], &snapshot) .next() .unwrap() @@ -290,10 +290,10 @@ impl SelectionsCollection { .unwrap() } - pub fn oldest>( - &self, - snapshot: &DisplaySnapshot, - ) -> Selection { + pub fn oldest(&self, snapshot: &DisplaySnapshot) -> Selection + where + D: MultiBufferDimension + Sub + AddAssign<::Output> + Ord, + { resolve_selections_wrapping_blocks([self.oldest_anchor()], &snapshot) .next() .unwrap() @@ -306,27 +306,27 @@ impl SelectionsCollection { .unwrap_or_else(|| self.disjoint.first().cloned().unwrap()) } - pub fn first>( - &self, - snapshot: &DisplaySnapshot, - ) -> Selection { + pub fn first(&self, snapshot: &DisplaySnapshot) -> Selection + where + D: MultiBufferDimension + Sub + AddAssign<::Output> + Ord, + { self.all(snapshot).first().unwrap().clone() } - pub fn last>( - &self, - snapshot: &DisplaySnapshot, - ) -> Selection { + pub fn last(&self, snapshot: &DisplaySnapshot) -> Selection + where + D: MultiBufferDimension + Sub + AddAssign<::Output> + Ord, + { self.all(snapshot).last().unwrap().clone() } /// Returns a list of (potentially backwards!) ranges representing the selections. /// Useful for test assertions, but prefer `.all()` instead. #[cfg(any(test, feature = "test-support"))] - pub fn ranges>( - &self, - snapshot: &DisplaySnapshot, - ) -> Vec> { + pub fn ranges(&self, snapshot: &DisplaySnapshot) -> Vec> + where + D: MultiBufferDimension + Sub + AddAssign<::Output> + Ord, + { self.all::(snapshot) .iter() .map(|s| { @@ -509,7 +509,7 @@ impl<'snap, 'a> MutableSelectionsCollection<'snap, 'a> { }; if filtered_selections.is_empty() { - let default_anchor = self.snapshot.anchor_before(0); + let default_anchor = self.snapshot.anchor_before(MultiBufferOffset(0)); self.collection.disjoint = Arc::from([Selection { id: post_inc(&mut self.collection.next_selection_id), start: default_anchor, @@ -590,7 +590,7 @@ impl<'snap, 'a> MutableSelectionsCollection<'snap, 'a> { pub fn insert_range(&mut self, range: Range) where - T: 'a + ToOffset + ToPoint + TextDimension + Ord + Sub + std::marker::Copy, + T: ToOffset, { let display_map = self.display_snapshot(); let mut selections = self.collection.all(&display_map); @@ -656,7 +656,8 @@ impl<'snap, 'a> MutableSelectionsCollection<'snap, 'a> { pub fn select_anchors(&mut self, selections: Vec>) { let map = self.display_snapshot(); let resolved_selections = - resolve_selections_wrapping_blocks::(&selections, &map).collect::>(); + resolve_selections_wrapping_blocks::(&selections, &map) + .collect::>(); self.select(resolved_selections); } @@ -673,7 +674,7 @@ impl<'snap, 'a> MutableSelectionsCollection<'snap, 'a> { fn select_offset_ranges(&mut self, ranges: I) where - I: IntoIterator>, + I: IntoIterator>, { let selections = ranges .into_iter() @@ -808,13 +809,13 @@ impl<'snap, 'a> MutableSelectionsCollection<'snap, 'a> { pub fn move_offsets_with( &mut self, - mut move_selection: impl FnMut(&MultiBufferSnapshot, &mut Selection), + mut move_selection: impl FnMut(&MultiBufferSnapshot, &mut Selection), ) { let mut changed = false; let display_map = self.display_snapshot(); let selections = self .collection - .all::(&display_map) + .all::(&display_map) .into_iter() .map(|selection| { let mut moved_selection = selection.clone(); @@ -938,7 +939,7 @@ impl<'snap, 'a> MutableSelectionsCollection<'snap, 'a> { let map = self.display_snapshot(); let resolved_selections = resolve_selections_wrapping_blocks(adjusted_disjoint.iter(), &map).collect(); - self.select::(resolved_selections); + self.select::(resolved_selections); } if let Some(pending) = pending.as_mut() { @@ -981,7 +982,7 @@ impl DerefMut for MutableSelectionsCollection<'_, '_> { } fn selection_to_anchor_selection( - selection: Selection, + selection: Selection, buffer: &MultiBufferSnapshot, ) -> Selection { let end_bias = if selection.start == selection.end { @@ -1054,7 +1055,7 @@ fn resolve_selections_display<'a>( coalesce_selections(selections) } -/// Resolves the passed in anchors to [`TextDimension`]s `D` +/// Resolves the passed in anchors to [`MultiBufferDimension`]s `D` /// wrapping around blocks inbetween. /// /// # Panics @@ -1065,7 +1066,7 @@ pub(crate) fn resolve_selections_wrapping_blocks<'a, D, I>( map: &'a DisplaySnapshot, ) -> impl 'a + Iterator> where - D: TextDimension + Ord + Sub, + D: MultiBufferDimension + Sub + AddAssign<::Output> + Ord, I: 'a + IntoIterator>, { // Transforms `Anchor -> DisplayPoint -> Point -> DisplayPoint -> D` diff --git a/crates/editor/src/signature_help.rs b/crates/editor/src/signature_help.rs index 8d74638e4c2aaf356ffabdeef717b9b105487ee3..b394364e01cbd647a0e17afc0ddc13afdb12ced3 100644 --- a/crates/editor/src/signature_help.rs +++ b/crates/editor/src/signature_help.rs @@ -1,13 +1,13 @@ use crate::actions::ShowSignatureHelp; use crate::hover_popover::open_markdown_url; -use crate::{Editor, EditorSettings, ToggleAutoSignatureHelp, hover_markdown_style}; +use crate::{BufferOffset, Editor, EditorSettings, ToggleAutoSignatureHelp, hover_markdown_style}; use gpui::{ App, Context, Entity, HighlightStyle, MouseButton, ScrollHandle, Size, StyledText, Task, TextStyle, Window, combine_highlights, }; use language::BufferSnapshot; use markdown::{Markdown, MarkdownElement}; -use multi_buffer::{Anchor, ToOffset}; +use multi_buffer::{Anchor, MultiBufferOffset, ToOffset}; use settings::Settings; use std::ops::Range; use text::Rope; @@ -82,7 +82,9 @@ impl Editor { if !(self.signature_help_state.is_shown() || self.auto_signature_help_enabled(cx)) { return false; } - let newest_selection = self.selections.newest::(&self.display_snapshot(cx)); + let newest_selection = self + .selections + .newest::(&self.display_snapshot(cx)); let head = newest_selection.head(); if !newest_selection.is_empty() && head != newest_selection.tail() { @@ -92,14 +94,14 @@ impl Editor { } let buffer_snapshot = self.buffer().read(cx).snapshot(cx); - let bracket_range = |position: usize| match (position, position + 1) { - (0, b) if b <= buffer_snapshot.len() => 0..b, - (0, b) => 0..b - 1, + let bracket_range = |position: MultiBufferOffset| match (position, position + 1usize) { + (MultiBufferOffset(0), b) if b <= buffer_snapshot.len() => MultiBufferOffset(0)..b, + (MultiBufferOffset(0), b) => MultiBufferOffset(0)..b - 1, (a, b) if b <= buffer_snapshot.len() => a - 1..b, (a, b) => a - 1..b - 1, }; let not_quote_like_brackets = - |buffer: &BufferSnapshot, start: Range, end: Range| { + |buffer: &BufferSnapshot, start: Range, end: Range| { let text_start = buffer.text_for_range(start).collect::(); let text_end = buffer.text_for_range(end).collect::(); QUOTE_PAIRS diff --git a/crates/editor/src/test.rs b/crates/editor/src/test.rs index 9d1003e8c08b3d725ffa13b90eb0ee405520d8cd..5a0652bdd199a638f92234b1d50232071db18e07 100644 --- a/crates/editor/src/test.rs +++ b/crates/editor/src/test.rs @@ -16,7 +16,7 @@ use gpui::{ AppContext as _, Context, Entity, EntityId, Font, FontFeatures, FontStyle, FontWeight, Pixels, VisualTestContext, Window, font, size, }; -use multi_buffer::ToPoint; +use multi_buffer::{MultiBufferOffset, ToPoint}; use pretty_assertions::assert_eq; use project::{Project, project_settings::DiagnosticSeverity}; use ui::{App, BorrowAppContext, px}; @@ -78,7 +78,7 @@ pub fn marked_display_snapshot( let snapshot = display_map.update(cx, |map, cx| map.snapshot(cx)); let markers = markers .into_iter() - .map(|offset| offset.to_display_point(&snapshot)) + .map(|offset| MultiBufferOffset(offset).to_display_point(&snapshot)) .collect(); (snapshot, markers) @@ -94,7 +94,11 @@ pub fn select_ranges( let (unmarked_text, text_ranges) = marked_text_ranges(marked_text, true); assert_eq!(editor.text(cx), unmarked_text); editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| { - s.select_ranges(text_ranges) + s.select_ranges( + text_ranges + .into_iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)), + ) }); } @@ -108,7 +112,12 @@ pub fn assert_text_with_selections( assert_eq!(editor.text(cx), unmarked_text, "text doesn't match"); let actual = generate_marked_text( &editor.text(cx), - &editor.selections.ranges(&editor.display_snapshot(cx)), + &editor + .selections + .ranges::(&editor.display_snapshot(cx)) + .into_iter() + .map(|range| range.start.0..range.end.0) + .collect::>(), marked_text.contains("«"), ); assert_eq!(actual, marked_text, "Selections don't match"); diff --git a/crates/editor/src/test/editor_lsp_test_context.rs b/crates/editor/src/test/editor_lsp_test_context.rs index 87cc3357783ef4503b584f9624d14a35a8487dd7..3afe0e6134221fc69837abd30618f2b74ae069f5 100644 --- a/crates/editor/src/test/editor_lsp_test_context.rs +++ b/crates/editor/src/test/editor_lsp_test_context.rs @@ -7,6 +7,7 @@ use std::{ use anyhow::Result; use language::{markdown_lang, rust_lang}; +use multi_buffer::MultiBufferOffset; use serde_json::json; use crate::{Editor, ToPoint}; @@ -333,50 +334,38 @@ impl EditorLspTestContext { #[track_caller] pub fn lsp_range(&mut self, marked_text: &str) -> lsp::Range { let ranges = self.ranges(marked_text); - self.to_lsp_range(ranges[0].clone()) + self.to_lsp_range(MultiBufferOffset(ranges[0].start)..MultiBufferOffset(ranges[0].end)) } #[expect(clippy::wrong_self_convention, reason = "This is test code")] - pub fn to_lsp_range(&mut self, range: Range) -> lsp::Range { + pub fn to_lsp_range(&mut self, range: Range) -> lsp::Range { + use language::ToPointUtf16; let snapshot = self.update_editor(|editor, window, cx| editor.snapshot(window, cx)); let start_point = range.start.to_point(&snapshot.buffer_snapshot()); let end_point = range.end.to_point(&snapshot.buffer_snapshot()); self.editor(|editor, _, cx| { let buffer = editor.buffer().read(cx); - let start = point_to_lsp( - buffer - .point_to_buffer_offset(start_point, cx) - .unwrap() - .1 - .to_point_utf16(&buffer.read(cx)), - ); - let end = point_to_lsp( - buffer - .point_to_buffer_offset(end_point, cx) - .unwrap() - .1 - .to_point_utf16(&buffer.read(cx)), - ); - + let (start_buffer, start_offset) = + buffer.point_to_buffer_offset(start_point, cx).unwrap(); + let start = point_to_lsp(start_offset.to_point_utf16(&start_buffer.read(cx))); + let (end_buffer, end_offset) = buffer.point_to_buffer_offset(end_point, cx).unwrap(); + let end = point_to_lsp(end_offset.to_point_utf16(&end_buffer.read(cx))); lsp::Range { start, end } }) } #[expect(clippy::wrong_self_convention, reason = "This is test code")] - pub fn to_lsp(&mut self, offset: usize) -> lsp::Position { + pub fn to_lsp(&mut self, offset: MultiBufferOffset) -> lsp::Position { + use language::ToPointUtf16; + let snapshot = self.update_editor(|editor, window, cx| editor.snapshot(window, cx)); let point = offset.to_point(&snapshot.buffer_snapshot()); self.editor(|editor, _, cx| { let buffer = editor.buffer().read(cx); - point_to_lsp( - buffer - .point_to_buffer_offset(point, cx) - .unwrap() - .1 - .to_point_utf16(&buffer.read(cx)), - ) + let (buffer, offset) = buffer.point_to_buffer_offset(point, cx).unwrap(); + point_to_lsp(offset.to_point_utf16(&buffer.read(cx))) }) } diff --git a/crates/editor/src/test/editor_test_context.rs b/crates/editor/src/test/editor_test_context.rs index 200c1f08cfb87dec47d66760c385aa357e45ce95..5793bcf576c7ed0e1604c30aada0fb362f65bb9f 100644 --- a/crates/editor/src/test/editor_test_context.rs +++ b/crates/editor/src/test/editor_test_context.rs @@ -13,7 +13,7 @@ use gpui::{ }; use itertools::Itertools; use language::{Buffer, BufferSnapshot, LanguageRegistry}; -use multi_buffer::{Anchor, ExcerptRange, MultiBufferRow}; +use multi_buffer::{Anchor, ExcerptRange, MultiBufferOffset, MultiBufferRow}; use parking_lot::RwLock; use project::{FakeFs, Project}; use std::{ @@ -267,7 +267,7 @@ impl EditorTestContext { let snapshot = self.editor.update_in(&mut self.cx, |editor, window, cx| { editor.snapshot(window, cx) }); - ranges[0].start.to_display_point(&snapshot) + MultiBufferOffset(ranges[0].start).to_display_point(&snapshot) } pub fn pixel_position(&mut self, marked_text: &str) -> Point { @@ -373,7 +373,11 @@ impl EditorTestContext { self.editor.update_in(&mut self.cx, |editor, window, cx| { editor.set_text(unmarked_text, window, cx); editor.change_selections(Default::default(), window, cx, |s| { - s.select_ranges(selection_ranges) + s.select_ranges( + selection_ranges + .into_iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)), + ) }) }); state_context @@ -390,7 +394,11 @@ impl EditorTestContext { self.editor.update_in(&mut self.cx, |editor, window, cx| { assert_eq!(editor.text(cx), unmarked_text); editor.change_selections(Default::default(), window, cx, |s| { - s.select_ranges(selection_ranges) + s.select_ranges( + selection_ranges + .into_iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)), + ) }) }); state_context @@ -576,6 +584,7 @@ impl EditorTestContext { .unwrap_or_default() .iter() .map(|range| range.to_offset(&snapshot.buffer_snapshot())) + .map(|range| range.start.0..range.end.0) .collect() }); assert_set_eq!(actual_ranges, expected_ranges); @@ -591,6 +600,7 @@ impl EditorTestContext { .unwrap_or_default() .into_iter() .map(|range| range.to_offset(&snapshot.buffer_snapshot())) + .map(|range| range.start.0..range.end.0) .collect(); assert_set_eq!(actual_ranges, expected_ranges); } @@ -608,14 +618,16 @@ impl EditorTestContext { fn editor_selections(&mut self) -> Vec> { self.editor .update(&mut self.cx, |editor, cx| { - editor.selections.all::(&editor.display_snapshot(cx)) + editor + .selections + .all::(&editor.display_snapshot(cx)) }) .into_iter() .map(|s| { if s.reversed { - s.end..s.start + s.end.0..s.start.0 } else { - s.start..s.end + s.start.0..s.end.0 } }) .collect::>() @@ -711,7 +723,10 @@ pub fn assert_state_with_diff( snapshot.buffer_snapshot().clone(), editor .selections - .ranges::(&snapshot.display_snapshot), + .ranges::(&snapshot.display_snapshot) + .into_iter() + .map(|range| range.start.0..range.end.0) + .collect::>(), ) }); diff --git a/crates/git_ui/src/commit_view.rs b/crates/git_ui/src/commit_view.rs index 765e1f84a4a3a5b7e257e51df9a9542d0abff067..b0fa80fa7afef96fa48aa80883fb252beeed9629 100644 --- a/crates/git_ui/src/commit_view.rs +++ b/crates/git_ui/src/commit_view.rs @@ -1,6 +1,9 @@ use anyhow::{Context as _, Result}; use buffer_diff::{BufferDiff, BufferDiffSnapshot}; -use editor::{Editor, EditorEvent, MultiBuffer, SelectionEffects, multibuffer_context_lines}; +use editor::{ + Editor, EditorEvent, MultiBuffer, MultiBufferOffset, SelectionEffects, + multibuffer_context_lines, +}; use git::repository::{CommitDetails, CommitDiff, RepoPath}; use gpui::{ Action, AnyElement, AnyView, App, AppContext as _, AsyncApp, AsyncWindowContext, Context, @@ -187,7 +190,7 @@ impl CommitView { editor.update(cx, |editor, cx| { editor.disable_header_for_buffer(metadata_buffer_id.unwrap(), cx); editor.change_selections(SelectionEffects::no_scroll(), window, cx, |selections| { - selections.select_ranges(vec![0..0]); + selections.select_ranges(vec![MultiBufferOffset(0)..MultiBufferOffset(0)]); }); }); } diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index cd56473dceb48d3c7da3629818f06d79d656ee03..ad77820078d43bc72be12ff358f96b5f4edaea0e 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -13,7 +13,8 @@ use anyhow::Context as _; use askpass::AskPassDelegate; use db::kvp::KEY_VALUE_STORE; use editor::{ - Direction, Editor, EditorElement, EditorMode, MultiBuffer, actions::ExpandAllDiffHunks, + Direction, Editor, EditorElement, EditorMode, MultiBuffer, MultiBufferOffset, + actions::ExpandAllDiffHunks, }; use futures::StreamExt as _; use git::blame::ParsedCommitMessage; @@ -1551,7 +1552,10 @@ impl GitPanel { window: &mut Window, cx: &mut Context, ) -> Option { - let git_commit_language = self.commit_editor.read(cx).language_at(0, cx); + let git_commit_language = self + .commit_editor + .read(cx) + .language_at(MultiBufferOffset(0), cx); let message = self.commit_editor.read(cx).text(cx); if message.is_empty() { return self diff --git a/crates/git_ui/src/project_diff.rs b/crates/git_ui/src/project_diff.rs index 6f8195c8b718640de4fed421253d5f1bd2f8f14e..041d8381e92d59d9ef572f26cbc380abdf2d30e5 100644 --- a/crates/git_ui/src/project_diff.rs +++ b/crates/git_ui/src/project_diff.rs @@ -520,7 +520,8 @@ impl ProjectDiff { if was_empty { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |selections| { // TODO select the very beginning (possibly inside a deletion) - selections.select_ranges([0..0]) + selections + .select_ranges([multi_buffer::Anchor::min()..multi_buffer::Anchor::min()]) }); } if is_excerpt_newly_added diff --git a/crates/git_ui/src/text_diff_view.rs b/crates/git_ui/src/text_diff_view.rs index 28eafaf4992667966832eaadd77a2babced7d66c..0975df9402f5b8e8db8bcfa83f2d31272d9983eb 100644 --- a/crates/git_ui/src/text_diff_view.rs +++ b/crates/git_ui/src/text_diff_view.rs @@ -446,7 +446,7 @@ impl Render for TextDiffView { #[cfg(test)] mod tests { use super::*; - use editor::test::editor_test_context::assert_state_with_diff; + use editor::{MultiBufferOffset, test::editor_test_context::assert_state_with_diff}; use gpui::{TestAppContext, VisualContext}; use project::{FakeFs, Project}; use serde_json::json; @@ -691,7 +691,11 @@ mod tests { let (unmarked_text, selection_ranges) = marked_text_ranges(editor_text, false); editor.set_text(unmarked_text, window, cx); editor.change_selections(Default::default(), window, cx, |s| { - s.select_ranges(selection_ranges) + s.select_ranges( + selection_ranges + .into_iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)), + ) }); editor diff --git a/crates/go_to_line/src/cursor_position.rs b/crates/go_to_line/src/cursor_position.rs index 286faa0b8d0185bbdb9b488fd8502cb7566dc388..042d9a46b6c76a461e60d9002a2362190e253cd4 100644 --- a/crates/go_to_line/src/cursor_position.rs +++ b/crates/go_to_line/src/cursor_position.rs @@ -1,4 +1,4 @@ -use editor::{Editor, EditorEvent, MultiBufferSnapshot}; +use editor::{Editor, EditorEvent, MBTextSummary, MultiBufferSnapshot}; use gpui::{App, Entity, FocusHandle, Focusable, Styled, Subscription, Task, WeakEntity}; use settings::{RegisterSetting, Settings}; use std::{fmt::Write, num::NonZeroU32, time::Duration}; @@ -55,7 +55,7 @@ impl UserCaretPosition { let line_start = Point::new(selection_end.row, 0); let chars_to_last_position = snapshot - .text_summary_for_range::(line_start..selection_end) + .text_summary_for_range::(line_start..selection_end) .chars as u32; (selection_end.row, chars_to_last_position) }; @@ -116,7 +116,7 @@ impl CursorPosition { for selection in editor.selections.all_adjusted(&snapshot) { let selection_summary = snapshot .buffer_snapshot() - .text_summary_for_range::( + .text_summary_for_range::( selection.start..selection.end, ); cursor_position.selected_count.characters += diff --git a/crates/journal/src/journal.rs b/crates/journal/src/journal.rs index 2e30b91dab833d18f5fc9c35ad7ea4934d197fa8..f43949c0051f56559388203e387a540b8c593467 100644 --- a/crates/journal/src/journal.rs +++ b/crates/journal/src/journal.rs @@ -159,7 +159,7 @@ pub fn new_journal_entry(workspace: &Workspace, window: &mut Window, cx: &mut Ap cx, |s| s.select_ranges([len..len]), ); - if len > 0 { + if len.0 > 0 { editor.insert("\n\n", window, cx); } editor.insert(&entry_heading, window, cx); diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index f46fc0db0a171349456b2c29a1c9fff556daee2d..95db651350a2e1c703ce0ab52c77f075a83a0500 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -2083,7 +2083,7 @@ impl Buffer { } /// Gets a [`Subscription`] that tracks all of the changes to the buffer's text. - pub fn subscribe(&mut self) -> Subscription { + pub fn subscribe(&mut self) -> Subscription { self.text.subscribe() } diff --git a/crates/language_tools/src/lsp_log_view.rs b/crates/language_tools/src/lsp_log_view.rs index d480eadc73b9546e5a59b204b036a3ff88a018c7..9b52912b8ed09ce2dd7a4f2ea26f7106bfd11c31 100644 --- a/crates/language_tools/src/lsp_log_view.rs +++ b/crates/language_tools/src/lsp_log_view.rs @@ -1,6 +1,6 @@ use collections::VecDeque; use copilot::Copilot; -use editor::{Editor, EditorEvent, actions::MoveToEnd, scroll::Autoscroll}; +use editor::{Editor, EditorEvent, MultiBufferOffset, actions::MoveToEnd, scroll::Autoscroll}; use gpui::{ AnyView, App, Context, Corner, Entity, EventEmitter, FocusHandle, Focusable, IntoElement, ParentElement, Render, Styled, Subscription, Task, WeakEntity, Window, actions, div, @@ -231,7 +231,7 @@ impl LspLogView { let last_offset = editor.buffer().read(cx).len(cx); let newest_cursor_is_at_end = editor .selections - .newest::(&editor.display_snapshot(cx)) + .newest::(&editor.display_snapshot(cx)) .start >= last_offset; editor.edit( diff --git a/crates/language_tools/src/syntax_tree_view.rs b/crates/language_tools/src/syntax_tree_view.rs index e2a0cd4c33a93b7806710e68abca6404290808ce..885f6bed327c765019ae166e21eab112f884e7dd 100644 --- a/crates/language_tools/src/syntax_tree_view.rs +++ b/crates/language_tools/src/syntax_tree_view.rs @@ -1,5 +1,5 @@ use command_palette_hooks::CommandPaletteFilter; -use editor::{Anchor, Editor, ExcerptId, SelectionEffects, scroll::Autoscroll}; +use editor::{Anchor, Editor, ExcerptId, MultiBufferOffset, SelectionEffects, scroll::Autoscroll}; use gpui::{ App, AppContext as _, Context, Div, Entity, EntityId, EventEmitter, FocusHandle, Focusable, Hsla, InteractiveElement, IntoElement, MouseButton, MouseDownEvent, MouseMoveEvent, @@ -254,7 +254,7 @@ impl SyntaxTreeView { let (buffer, range, excerpt_id) = editor_state.editor.update(cx, |editor, cx| { let selection_range = editor .selections - .last::(&editor.display_snapshot(cx)) + .last::(&editor.display_snapshot(cx)) .range(); let multi_buffer = editor.buffer().read(cx); let (buffer, range, excerpt_id) = snapshot @@ -308,8 +308,8 @@ impl SyntaxTreeView { // Within the active layer, find the syntax node under the cursor, // and scroll to it. let mut cursor = layer.node().walk(); - while cursor.goto_first_child_for_byte(range.start).is_some() { - if !range.is_empty() && cursor.node().end_byte() == range.start { + while cursor.goto_first_child_for_byte(range.start.0).is_some() { + if !range.is_empty() && cursor.node().end_byte() == range.start.0 { cursor.goto_next_sibling(); } } @@ -317,7 +317,7 @@ impl SyntaxTreeView { // Ascend to the smallest ancestor that contains the range. loop { let node_range = cursor.node().byte_range(); - if node_range.start <= range.start && node_range.end >= range.end { + if node_range.start <= range.start.0 && node_range.end >= range.end.0 { break; } if !cursor.goto_parent() { diff --git a/crates/markdown_preview/src/markdown_preview_view.rs b/crates/markdown_preview/src/markdown_preview_view.rs index f62ff0874df8079f44868dfeaa1ad2fd0348e474..c4d3c033df6395235603837bf0944eeb59d3dfbc 100644 --- a/crates/markdown_preview/src/markdown_preview_view.rs +++ b/crates/markdown_preview/src/markdown_preview_view.rs @@ -4,7 +4,7 @@ use std::{ops::Range, path::PathBuf}; use anyhow::Result; use editor::scroll::Autoscroll; -use editor::{Editor, EditorEvent, SelectionEffects}; +use editor::{Editor, EditorEvent, MultiBufferOffset, SelectionEffects}; use gpui::{ App, ClickEvent, Context, Entity, EventEmitter, FocusHandle, Focusable, InteractiveElement, IntoElement, IsZero, ListState, ParentElement, Render, RetainAllImageCache, Styled, @@ -281,7 +281,7 @@ impl MarkdownPreviewView { let selection_range = editor.update(cx, |editor, cx| { editor .selections - .last::(&editor.display_snapshot(cx)) + .last::(&editor.display_snapshot(cx)) .range() }); this.selected_block = this.get_block_index_under_cursor(selection_range); @@ -358,7 +358,7 @@ impl MarkdownPreviewView { &self, window: &mut Window, cx: &mut Context, - selection: Range, + selection: Range, ) { if let Some(state) = &self.active_editor { state.editor.update(cx, |editor, cx| { @@ -375,7 +375,7 @@ impl MarkdownPreviewView { /// The absolute path of the file that is currently being previewed. fn get_folder_for_active_editor(editor: &Editor, cx: &App) -> Option { - if let Some(file) = editor.file_at(0, cx) { + if let Some(file) = editor.file_at(MultiBufferOffset(0), cx) { if let Some(file) = file.as_local() { file.abs_path(cx).parent().map(|p| p.to_path_buf()) } else { @@ -386,9 +386,9 @@ impl MarkdownPreviewView { } } - fn get_block_index_under_cursor(&self, selection_range: Range) -> usize { + fn get_block_index_under_cursor(&self, selection_range: Range) -> usize { let mut block_index = None; - let cursor = selection_range.start; + let cursor = selection_range.start.0; let mut last_end = 0; if let Some(content) = &self.contents { @@ -524,7 +524,15 @@ impl Render for MarkdownPreviewView { if e.checked() { "[x]" } else { "[ ]" }; editor.edit( - vec![(e.source_range(), task_marker)], + vec![( + MultiBufferOffset( + e.source_range().start, + ) + ..MultiBufferOffset( + e.source_range().end, + ), + task_marker, + )], cx, ); }); @@ -564,7 +572,8 @@ impl Render for MarkdownPreviewView { this.move_cursor_to_block( window, cx, - source_range.start..source_range.start, + MultiBufferOffset(source_range.start) + ..MultiBufferOffset(source_range.start), ); } }, diff --git a/crates/multi_buffer/src/anchor.rs b/crates/multi_buffer/src/anchor.rs index d5009172084d6d683f722a8ad2aa5b8b21ae0493..57b5244b3f276265c31f1431701a2bd7d8e59aef 100644 --- a/crates/multi_buffer/src/anchor.rs +++ b/crates/multi_buffer/src/anchor.rs @@ -1,8 +1,10 @@ +use crate::{MultiBufferDimension, MultiBufferOffset, MultiBufferOffsetUtf16}; + use super::{ExcerptId, MultiBufferSnapshot, ToOffset, ToPoint}; -use language::{OffsetUtf16, Point, TextDimension}; +use language::Point; use std::{ cmp::Ordering, - ops::{Range, Sub}, + ops::{AddAssign, Range, Sub}, }; use sum_tree::Bias; use text::BufferId; @@ -162,7 +164,11 @@ impl Anchor { pub fn summary(&self, snapshot: &MultiBufferSnapshot) -> D where - D: TextDimension + Ord + Sub, + D: MultiBufferDimension + + Ord + + Sub + + AddAssign, + D::TextDimension: Sub + Ord, { snapshot.summary_for_anchor(self) } @@ -182,10 +188,10 @@ impl Anchor { } impl ToOffset for Anchor { - fn to_offset(&self, snapshot: &MultiBufferSnapshot) -> usize { + fn to_offset(&self, snapshot: &MultiBufferSnapshot) -> MultiBufferOffset { self.summary(snapshot) } - fn to_offset_utf16(&self, snapshot: &MultiBufferSnapshot) -> OffsetUtf16 { + fn to_offset_utf16(&self, snapshot: &MultiBufferSnapshot) -> MultiBufferOffsetUtf16 { self.summary(snapshot) } } @@ -203,7 +209,7 @@ pub trait AnchorRangeExt { fn cmp(&self, other: &Range, buffer: &MultiBufferSnapshot) -> Ordering; fn includes(&self, other: &Range, buffer: &MultiBufferSnapshot) -> bool; fn overlaps(&self, other: &Range, buffer: &MultiBufferSnapshot) -> bool; - fn to_offset(&self, content: &MultiBufferSnapshot) -> Range; + fn to_offset(&self, content: &MultiBufferSnapshot) -> Range; fn to_point(&self, content: &MultiBufferSnapshot) -> Range; } @@ -223,7 +229,7 @@ impl AnchorRangeExt for Range { self.end.cmp(&other.start, buffer).is_ge() && self.start.cmp(&other.end, buffer).is_le() } - fn to_offset(&self, content: &MultiBufferSnapshot) -> Range { + fn to_offset(&self, content: &MultiBufferSnapshot) -> Range { self.start.to_offset(content)..self.end.to_offset(content) } @@ -231,6 +237,3 @@ impl AnchorRangeExt for Range { self.start.to_point(content)..self.end.to_point(content) } } - -#[derive(Clone, Copy, Eq, PartialEq, Debug, Hash, Ord, PartialOrd)] -pub struct Offset(pub usize); diff --git a/crates/multi_buffer/src/multi_buffer.rs b/crates/multi_buffer/src/multi_buffer.rs index 43def73ae257e29f007ef56fb181e03432023edb..c1a2fed7e2a253d3469944a7f2c4fa2275c8abd4 100644 --- a/crates/multi_buffer/src/multi_buffer.rs +++ b/crates/multi_buffer/src/multi_buffer.rs @@ -7,7 +7,7 @@ mod transaction; use self::transaction::History; -pub use anchor::{Anchor, AnchorRangeExt, Offset}; +pub use anchor::{Anchor, AnchorRangeExt}; pub use position::{TypedOffset, TypedPoint, TypedRow}; use anyhow::{Result, anyhow}; @@ -43,13 +43,13 @@ use std::{ io, iter::{self, FromIterator}, mem, - ops::{Range, RangeBounds, Sub}, + ops::{self, AddAssign, Range, RangeBounds, Sub}, rc::Rc, str, sync::Arc, time::Duration, }; -use sum_tree::{Bias, Cursor, Dimension, Dimensions, SumTree, Summary, TreeMap}; +use sum_tree::{Bias, Cursor, Dimension, Dimensions, SumTree, TreeMap}; use text::{ BufferId, Edit, LineIndent, TextSummary, locator::Locator, @@ -78,7 +78,7 @@ pub struct MultiBuffer { paths_by_excerpt: HashMap, /// Mapping from buffer IDs to their diff states diffs: HashMap, - subscriptions: Topic, + subscriptions: Topic, /// If true, the multi-buffer only contains a single [`Buffer`] and a single [`Excerpt`] singleton: bool, /// The history of the multi-buffer. @@ -138,7 +138,7 @@ pub struct MultiBufferDiffHunk { /// The excerpt that contains the diff hunk. pub excerpt_id: ExcerptId, /// The range within the buffer's diff base that this hunk corresponds to. - pub diff_base_byte_range: Range, + pub diff_base_byte_range: Range, /// Whether or not this hunk also appears in the 'secondary diff'. pub secondary_status: DiffHunkSecondaryStatus, } @@ -159,7 +159,7 @@ impl MultiBufferDiffHunk { } pub fn is_created_file(&self) -> bool { - self.diff_base_byte_range == (0..0) + self.diff_base_byte_range == (BufferOffset(0)..BufferOffset(0)) && self.buffer_range == (text::Anchor::MIN..text::Anchor::MAX) } @@ -183,7 +183,7 @@ impl MultiBufferRow { pub const MAX: Self = Self(u32::MAX); } -impl std::ops::Add for MultiBufferRow { +impl ops::Add for MultiBufferRow { type Output = Self; fn add(self, rhs: usize) -> Self::Output { @@ -191,9 +191,305 @@ impl std::ops::Add for MultiBufferRow { } } +pub trait MultiBufferDimension: 'static + Copy + Default + std::fmt::Debug { + type TextDimension: TextDimension; + fn from_summary(summary: &MBTextSummary) -> Self; + + fn add_text_dim(&mut self, summary: &Self::TextDimension); + + fn add_mb_text_summary(&mut self, summary: &MBTextSummary); +} + +// todo(lw): MultiBufferPoint +impl MultiBufferDimension for Point { + type TextDimension = Point; + fn from_summary(summary: &MBTextSummary) -> Self { + summary.lines + } + + fn add_text_dim(&mut self, other: &Self::TextDimension) { + *self += *other; + } + + fn add_mb_text_summary(&mut self, summary: &MBTextSummary) { + *self += summary.lines; + } +} + +// todo(lw): MultiBufferPointUtf16 +impl MultiBufferDimension for PointUtf16 { + type TextDimension = PointUtf16; + fn from_summary(summary: &MBTextSummary) -> Self { + summary.lines_utf16() + } + + fn add_text_dim(&mut self, other: &Self::TextDimension) { + *self += *other; + } + + fn add_mb_text_summary(&mut self, summary: &MBTextSummary) { + *self += summary.lines_utf16(); + } +} + +#[derive(Copy, Clone, Debug, Default, Eq, Ord, PartialOrd, PartialEq, Hash, serde::Deserialize)] +pub struct MultiBufferOffset(pub usize); + +impl fmt::Display for MultiBufferOffset { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + +impl rand::distr::uniform::SampleUniform for MultiBufferOffset { + type Sampler = MultiBufferOffsetUniformSampler; +} + +pub struct MultiBufferOffsetUniformSampler { + sampler: rand::distr::uniform::UniformUsize, +} + +impl rand::distr::uniform::UniformSampler for MultiBufferOffsetUniformSampler { + type X = MultiBufferOffset; + + fn new(low_b: B1, high_b: B2) -> Result + where + B1: rand::distr::uniform::SampleBorrow + Sized, + B2: rand::distr::uniform::SampleBorrow + Sized, + { + let low = *low_b.borrow(); + let high = *high_b.borrow(); + let sampler = rand::distr::uniform::UniformUsize::new(low.0, high.0); + sampler.map(|sampler| MultiBufferOffsetUniformSampler { sampler }) + } + + #[inline] // if the range is constant, this helps LLVM to do the + // calculations at compile-time. + fn new_inclusive(low_b: B1, high_b: B2) -> Result + where + B1: rand::distr::uniform::SampleBorrow + Sized, + B2: rand::distr::uniform::SampleBorrow + Sized, + { + let low = *low_b.borrow(); + let high = *high_b.borrow(); + let sampler = rand::distr::uniform::UniformUsize::new_inclusive(low.0, high.0); + sampler.map(|sampler| MultiBufferOffsetUniformSampler { sampler }) + } + + fn sample(&self, rng: &mut R) -> Self::X { + MultiBufferOffset(self.sampler.sample(rng)) + } +} +impl MultiBufferDimension for MultiBufferOffset { + type TextDimension = usize; + fn from_summary(summary: &MBTextSummary) -> Self { + summary.len + } + + fn add_text_dim(&mut self, other: &Self::TextDimension) { + self.0 += *other; + } + + fn add_mb_text_summary(&mut self, summary: &MBTextSummary) { + *self += summary.len; + } +} +impl MultiBufferDimension for MultiBufferOffsetUtf16 { + type TextDimension = OffsetUtf16; + fn from_summary(summary: &MBTextSummary) -> Self { + MultiBufferOffsetUtf16(summary.len_utf16) + } + + fn add_text_dim(&mut self, other: &Self::TextDimension) { + self.0 += *other; + } + + fn add_mb_text_summary(&mut self, summary: &MBTextSummary) { + self.0 += summary.len_utf16; + } +} + +#[derive(Copy, Clone, Debug, Default, Eq, Ord, PartialOrd, PartialEq, Hash, serde::Deserialize)] +pub struct BufferOffset(pub usize); + +impl TextDimension for BufferOffset { + fn from_text_summary(summary: &TextSummary) -> Self { + BufferOffset(usize::from_text_summary(summary)) + } + fn from_chunk(chunk: rope::ChunkSlice) -> Self { + BufferOffset(usize::from_chunk(chunk)) + } + fn add_assign(&mut self, other: &Self) { + TextDimension::add_assign(&mut self.0, &other.0); + } +} +impl<'a> sum_tree::Dimension<'a, rope::ChunkSummary> for BufferOffset { + fn zero(cx: ()) -> Self { + BufferOffset(>::zero(cx)) + } + + fn add_summary(&mut self, summary: &'a rope::ChunkSummary, cx: ()) { + usize::add_summary(&mut self.0, summary, cx); + } +} + +impl Sub for BufferOffset { + type Output = usize; + + fn sub(self, other: BufferOffset) -> Self::Output { + self.0 - other.0 + } +} + +impl AddAssign> for BufferOffset { + fn add_assign(&mut self, other: DimensionPair) { + self.0 += other.key; + } +} + +impl language::ToPoint for BufferOffset { + fn to_point(&self, snapshot: &text::BufferSnapshot) -> Point { + self.0.to_point(snapshot) + } +} + +impl language::ToPointUtf16 for BufferOffset { + fn to_point_utf16(&self, snapshot: &text::BufferSnapshot) -> PointUtf16 { + self.0.to_point_utf16(snapshot) + } +} + +impl language::ToOffset for BufferOffset { + fn to_offset(&self, snapshot: &text::BufferSnapshot) -> usize { + self.0.to_offset(snapshot) + } +} + +impl language::ToOffsetUtf16 for BufferOffset { + fn to_offset_utf16(&self, snapshot: &text::BufferSnapshot) -> OffsetUtf16 { + self.0.to_offset_utf16(snapshot) + } +} + +#[derive(Copy, Clone, Debug, Default, Eq, Ord, PartialOrd, PartialEq)] +pub struct MultiBufferOffsetUtf16(pub OffsetUtf16); + +impl ops::Add for MultiBufferOffsetUtf16 { + type Output = MultiBufferOffsetUtf16; + + fn add(self, rhs: usize) -> Self::Output { + MultiBufferOffsetUtf16(OffsetUtf16(self.0.0 + rhs)) + } +} + +impl AddAssign for MultiBufferOffsetUtf16 { + fn add_assign(&mut self, rhs: OffsetUtf16) { + self.0 += rhs; + } +} + +impl AddAssign for MultiBufferOffsetUtf16 { + fn add_assign(&mut self, rhs: usize) { + self.0.0 += rhs; + } +} + +impl Sub for MultiBufferOffsetUtf16 { + type Output = OffsetUtf16; + + fn sub(self, other: MultiBufferOffsetUtf16) -> Self::Output { + self.0 - other.0 + } +} + +#[derive(Copy, Clone, Debug, Default, Eq, Ord, PartialOrd, PartialEq)] +pub struct BufferOffsetUtf16(pub OffsetUtf16); + +impl MultiBufferOffset { + const ZERO: Self = Self(0); + pub fn saturating_sub(self, other: MultiBufferOffset) -> usize { + self.0.saturating_sub(other.0) + } + pub fn saturating_sub_usize(self, other: usize) -> MultiBufferOffset { + MultiBufferOffset(self.0.saturating_sub(other)) + } +} + +impl ops::Sub for MultiBufferOffset { + type Output = usize; + + fn sub(self, other: MultiBufferOffset) -> Self::Output { + self.0 - other.0 + } +} + +impl ops::Sub for MultiBufferOffset { + type Output = Self; + + fn sub(self, other: usize) -> Self::Output { + MultiBufferOffset(self.0 - other) + } +} + +impl ops::SubAssign for MultiBufferOffset { + fn sub_assign(&mut self, other: usize) { + self.0 -= other; + } +} + +impl ops::Add for BufferOffset { + type Output = Self; + + fn add(self, rhs: usize) -> Self::Output { + BufferOffset(self.0 + rhs) + } +} + +impl ops::AddAssign for BufferOffset { + fn add_assign(&mut self, other: usize) { + self.0 += other; + } +} + +impl ops::Add for MultiBufferOffset { + type Output = Self; + + fn add(self, rhs: usize) -> Self::Output { + MultiBufferOffset(self.0 + rhs) + } +} + +impl ops::AddAssign for MultiBufferOffset { + fn add_assign(&mut self, other: usize) { + self.0 += other; + } +} + +impl ops::Add for MultiBufferOffset { + type Output = Self; + + fn add(self, rhs: isize) -> Self::Output { + MultiBufferOffset((self.0 as isize + rhs) as usize) + } +} + +impl ops::Add for MultiBufferOffset { + type Output = Self; + + fn add(self, rhs: MultiBufferOffset) -> Self::Output { + MultiBufferOffset(self.0 + rhs.0) + } +} + +impl ops::AddAssign for MultiBufferOffset { + fn add_assign(&mut self, other: MultiBufferOffset) { + self.0 += other.0; + } +} + pub trait ToOffset: 'static + fmt::Debug { - fn to_offset(&self, snapshot: &MultiBufferSnapshot) -> usize; - fn to_offset_utf16(&self, snapshot: &MultiBufferSnapshot) -> OffsetUtf16; + fn to_offset(&self, snapshot: &MultiBufferSnapshot) -> MultiBufferOffset; + fn to_offset_utf16(&self, snapshot: &MultiBufferSnapshot) -> MultiBufferOffsetUtf16; } pub trait ToPoint: 'static + fmt::Debug { @@ -255,7 +551,7 @@ pub struct MultiBufferSnapshot { #[derive(Debug, Clone)] enum DiffTransform { BufferContent { - summary: TextSummary, + summary: MBTextSummary, inserted_hunk_info: Option, }, DeletedHunk { @@ -370,10 +666,12 @@ struct Excerpt { #[derive(Clone)] pub struct MultiBufferExcerpt<'a> { excerpt: &'a Excerpt, - diff_transforms: sum_tree::Cursor<'a, 'static, DiffTransform, DiffTransforms>, - offset: usize, - excerpt_offset: ExcerptDimension, - buffer_offset: usize, + diff_transforms: + sum_tree::Cursor<'a, 'static, DiffTransform, DiffTransforms>, + offset: MultiBufferOffset, + // todo unsure about this type + excerpt_offset: MultiBufferOffset, + buffer_offset: BufferOffset, } #[derive(Clone, Debug)] @@ -408,13 +706,153 @@ pub struct ExcerptSummary { /// The location of the last [`Excerpt`] being summarized excerpt_locator: Locator, widest_line_number: u32, - text: TextSummary, + text: MBTextSummary, } #[derive(Debug, Clone)] pub struct DiffTransformSummary { - input: TextSummary, - output: TextSummary, + input: MBTextSummary, + output: MBTextSummary, +} + +/// Summary of a string of text. +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)] +pub struct MBTextSummary { + /// Length in bytes. + pub len: MultiBufferOffset, + /// Length in UTF-8. + pub chars: usize, + /// Length in UTF-16 code units + pub len_utf16: OffsetUtf16, + /// A point representing the number of lines and the length of the last line. + /// + /// In other words, it marks the point after the last byte in the text, (if + /// EOF was a character, this would be its position). + pub lines: Point, + /// How many `char`s are in the first line + pub first_line_chars: u32, + /// How many `char`s are in the last line + pub last_line_chars: u32, + /// How many UTF-16 code units are in the last line + pub last_line_len_utf16: u32, + /// The row idx of the longest row + pub longest_row: u32, + /// How many `char`s are in the longest row + pub longest_row_chars: u32, +} + +impl From for MBTextSummary { + fn from(summary: TextSummary) -> Self { + MBTextSummary { + len: MultiBufferOffset(summary.len), + chars: summary.chars, + len_utf16: summary.len_utf16, + lines: summary.lines, + first_line_chars: summary.first_line_chars, + last_line_chars: summary.last_line_chars, + last_line_len_utf16: summary.last_line_len_utf16, + longest_row: summary.longest_row, + longest_row_chars: summary.longest_row_chars, + } + } +} +impl From<&str> for MBTextSummary { + fn from(text: &str) -> Self { + MBTextSummary::from(TextSummary::from(text)) + } +} + +impl MultiBufferDimension for MBTextSummary { + type TextDimension = TextSummary; + + fn from_summary(summary: &MBTextSummary) -> Self { + *summary + } + + fn add_text_dim(&mut self, summary: &Self::TextDimension) { + *self += *summary; + } + + fn add_mb_text_summary(&mut self, summary: &MBTextSummary) { + *self += *summary; + } +} + +impl AddAssign for MBTextSummary { + fn add_assign(&mut self, other: MBTextSummary) { + let joined_chars = self.last_line_chars + other.first_line_chars; + if joined_chars > self.longest_row_chars { + self.longest_row = self.lines.row; + self.longest_row_chars = joined_chars; + } + if other.longest_row_chars > self.longest_row_chars { + self.longest_row = self.lines.row + other.longest_row; + self.longest_row_chars = other.longest_row_chars; + } + + if self.lines.row == 0 { + self.first_line_chars += other.first_line_chars; + } + + if other.lines.row == 0 { + self.last_line_chars += other.first_line_chars; + self.last_line_len_utf16 += other.last_line_len_utf16; + } else { + self.last_line_chars = other.last_line_chars; + self.last_line_len_utf16 = other.last_line_len_utf16; + } + + self.chars += other.chars; + self.len += other.len; + self.len_utf16 += other.len_utf16; + self.lines += other.lines; + } +} + +impl AddAssign for MBTextSummary { + fn add_assign(&mut self, other: TextSummary) { + *self += MBTextSummary::from(other); + } +} + +impl MBTextSummary { + pub fn lines_utf16(&self) -> PointUtf16 { + PointUtf16 { + row: self.lines.row, + column: self.last_line_len_utf16, + } + } +} + +impl MultiBufferDimension for DimensionPair +where + K: MultiBufferDimension, + V: MultiBufferDimension, +{ + type TextDimension = DimensionPair; + + fn from_summary(summary: &MBTextSummary) -> Self { + Self { + key: K::from_summary(summary), + value: Some(V::from_summary(summary)), + } + } + + fn add_text_dim(&mut self, summary: &Self::TextDimension) { + self.key.add_text_dim(&summary.key); + if let Some(value) = &mut self.value { + if let Some(other_value) = summary.value.as_ref() { + value.add_text_dim(other_value); + } + } + } + + fn add_mb_text_summary(&mut self, summary: &MBTextSummary) { + self.key.add_mb_text_summary(summary); + if let Some(value) = &mut self.value { + value.add_mb_text_summary(summary); + } + } } #[derive(Clone)] @@ -422,53 +860,54 @@ pub struct MultiBufferRows<'a> { point: Point, is_empty: bool, is_singleton: bool, - cursor: MultiBufferCursor<'a, Point>, + cursor: MultiBufferCursor<'a, Point, Point>, } pub struct MultiBufferChunks<'a> { excerpts: Cursor<'a, 'static, Excerpt, ExcerptOffset>, - diff_transforms: Cursor<'a, 'static, DiffTransform, Dimensions>, + diff_transforms: + Cursor<'a, 'static, DiffTransform, Dimensions>, diffs: &'a TreeMap, diff_base_chunks: Option<(BufferId, BufferChunks<'a>)>, buffer_chunk: Option>, - range: Range, + range: Range, excerpt_offset_range: Range, excerpt_chunks: Option>, language_aware: bool, } pub struct ReversedMultiBufferChunks<'a> { - cursor: MultiBufferCursor<'a, usize>, + cursor: MultiBufferCursor<'a, MultiBufferOffset, BufferOffset>, current_chunks: Option>, - start: usize, - offset: usize, + start: MultiBufferOffset, + offset: MultiBufferOffset, } pub struct MultiBufferBytes<'a> { - range: Range, - cursor: MultiBufferCursor<'a, usize>, + range: Range, + cursor: MultiBufferCursor<'a, MultiBufferOffset, BufferOffset>, excerpt_bytes: Option>, has_trailing_newline: bool, chunk: &'a [u8], } pub struct ReversedMultiBufferBytes<'a> { - range: Range, + range: Range, chunks: ReversedMultiBufferChunks<'a>, chunk: &'a [u8], } #[derive(Clone)] -struct DiffTransforms { - output_dimension: OutputDimension, - excerpt_dimension: ExcerptDimension, +struct DiffTransforms { + output_dimension: OutputDimension, + excerpt_dimension: ExcerptDimension, } -impl<'a, D: TextDimension> Dimension<'a, DiffTransformSummary> for DiffTransforms { +impl<'a, MBD: MultiBufferDimension> Dimension<'a, DiffTransformSummary> for DiffTransforms { fn zero(cx: ::Context<'_>) -> Self { Self { output_dimension: OutputDimension::zero(cx), - excerpt_dimension: as Dimension<'a, DiffTransformSummary>>::zero( + excerpt_dimension: as Dimension<'a, DiffTransformSummary>>::zero( cx, ), } @@ -485,21 +924,21 @@ impl<'a, D: TextDimension> Dimension<'a, DiffTransformSummary> for DiffTransform } #[derive(Clone)] -struct MultiBufferCursor<'a, D: TextDimension> { - excerpts: Cursor<'a, 'static, Excerpt, ExcerptDimension>, - diff_transforms: Cursor<'a, 'static, DiffTransform, DiffTransforms>, +struct MultiBufferCursor<'a, MBD, BD> { + excerpts: Cursor<'a, 'static, Excerpt, ExcerptDimension>, + diff_transforms: Cursor<'a, 'static, DiffTransform, DiffTransforms>, diffs: &'a TreeMap, - cached_region: Option>, + cached_region: Option>, } #[derive(Clone)] -struct MultiBufferRegion<'a, D: TextDimension> { +struct MultiBufferRegion<'a, MBD, BD> { buffer: &'a BufferSnapshot, is_main_buffer: bool, diff_hunk_status: Option, excerpt: &'a Excerpt, - buffer_range: Range, - range: Range, + buffer_range: Range, + range: Range, has_trailing_newline: bool, } @@ -511,7 +950,7 @@ struct ExcerptChunks<'a> { #[derive(Debug)] struct BufferEdit { - range: Range, + range: Range, new_text: Arc, is_insertion: bool, original_indent_column: Option, @@ -693,7 +1132,7 @@ impl MultiBuffer { self.singleton } - pub fn subscribe(&mut self) -> Subscription { + pub fn subscribe(&mut self) -> Subscription { self.subscriptions.subscribe() } @@ -711,7 +1150,7 @@ impl MultiBuffer { // The `is_empty` signature doesn't match what clippy expects #[allow(clippy::len_without_is_empty)] - pub fn len(&self, cx: &App) -> usize { + pub fn len(&self, cx: &App) -> MultiBufferOffset { self.read(cx).len() } @@ -750,7 +1189,7 @@ impl MultiBuffer { // Non-generic part of edit, hoisted out to avoid blowing up LLVM IR. fn edit_internal( this: &mut MultiBuffer, - edits: Vec<(Range, Arc)>, + edits: Vec<(Range, Arc)>, mut autoindent_mode: Option, cx: &mut Context, ) { @@ -849,13 +1288,13 @@ impl MultiBuffer { } fn convert_edits_to_buffer_edits( - edits: Vec<(Range, Arc)>, + edits: Vec<(Range, Arc)>, snapshot: &MultiBufferSnapshot, original_indent_columns: &[Option], ) -> (HashMap>, Vec) { let mut buffer_edits: HashMap> = Default::default(); let mut edited_excerpt_ids = Vec::new(); - let mut cursor = snapshot.cursor::(); + let mut cursor = snapshot.cursor::(); for (ix, (range, new_text)) in edits.into_iter().enumerate() { let original_indent_column = original_indent_columns.get(ix).copied().flatten(); @@ -994,7 +1433,7 @@ impl MultiBuffer { fn autoindent_ranges_internal( this: &mut MultiBuffer, - edits: Vec<(Range, Arc)>, + edits: Vec<(Range, Arc)>, cx: &mut Context, ) { let (buffer_edits, edited_excerpt_ids) = @@ -1005,7 +1444,7 @@ impl MultiBuffer { buffer_ids.push(buffer_id); edits.sort_unstable_by_key(|edit| edit.range.start); - let mut ranges: Vec> = Vec::new(); + let mut ranges: Vec> = Vec::new(); for edit in edits { if let Some(last_range) = ranges.last_mut() && edit.range.start <= last_range.end @@ -1243,7 +1682,7 @@ impl MultiBuffer { let mut new_excerpts = cursor.slice(&prev_locator, Bias::Right); prev_locator = cursor.start().unwrap_or(Locator::min_ref()).clone(); - let edit_start = ExcerptOffset::new(new_excerpts.summary().text.len); + let edit_start = ExcerptOffset::new(new_excerpts.summary().text.len.0); new_excerpts.update_last( |excerpt| { excerpt.has_trailing_newline = true; @@ -1287,7 +1726,7 @@ impl MultiBuffer { new_excerpt_ids.push(ExcerptIdMapping { id, locator }, ()); } - let edit_end = ExcerptOffset::new(new_excerpts.summary().text.len); + let edit_end = ExcerptOffset::new(new_excerpts.summary().text.len.0); let suffix = cursor.suffix(); let changed_trailing_excerpt = suffix.is_empty(); @@ -1345,7 +1784,7 @@ impl MultiBuffer { show_headers: _, } = self.snapshot.get_mut(); let start = ExcerptOffset::new(0); - let prev_len = ExcerptOffset::new(excerpts.summary().text.len); + let prev_len = ExcerptOffset::new(excerpts.summary().text.len.0); *excerpts = Default::default(); *trailing_excerpt_update_count += 1; *is_dirty = false; @@ -1416,7 +1855,7 @@ impl MultiBuffer { if let Some(excerpt) = excerpts.item() && excerpt.locator == *locator { - let excerpt_start = excerpts.start().1.clone(); + let excerpt_start = excerpts.start().1; let excerpt_end = ExcerptDimension(excerpt_start.0 + excerpt.text_summary.lines); diff_transforms.seek_forward(&excerpt_start, Bias::Left); @@ -1459,7 +1898,7 @@ impl MultiBuffer { let snapshot = self.read(cx); let offset = position.to_offset(&snapshot); - let mut cursor = snapshot.cursor::(); + let mut cursor = snapshot.cursor::(); cursor.seek(&offset); cursor .excerpt() @@ -1487,7 +1926,7 @@ impl MultiBuffer { &self, point: T, cx: &App, - ) -> Option<(Entity, usize)> { + ) -> Option<(Entity, BufferOffset)> { let snapshot = self.read(cx); let (buffer, offset) = snapshot.point_to_buffer_offset(point)?; Some(( @@ -1631,7 +2070,7 @@ impl MultiBuffer { // Push an edit for the removal of this run of excerpts. let old_end = cursor.start().1; - let new_start = ExcerptOffset::new(new_excerpts.summary().text.len); + let new_start = ExcerptOffset::new(new_excerpts.summary().text.len.0); edits.push(Edit { old: old_start..old_end, new: new_start..new_start, @@ -1855,7 +2294,7 @@ impl MultiBuffer { let buffer = buffer.read(cx); language_settings(buffer.language().map(|l| l.name()), buffer.file(), cx) }) - .unwrap_or_else(move || self.language_settings_at(0, cx)) + .unwrap_or_else(move || self.language_settings_at(MultiBufferOffset::default(), cx)) } pub fn language_settings_at<'a, T: ToOffset>( @@ -2007,7 +2446,7 @@ impl MultiBuffer { pub fn single_hunk_is_expanded(&self, range: Range, cx: &App) -> bool { let snapshot = self.read(cx); - let mut cursor = snapshot.diff_transforms.cursor::(()); + let mut cursor = snapshot.diff_transforms.cursor::(()); let offset_range = range.to_offset(&snapshot); cursor.seek(&offset_range.start, Bias::Left); while let Some(item) = cursor.item() { @@ -2024,13 +2463,13 @@ impl MultiBuffer { pub fn has_expanded_diff_hunks_in_ranges(&self, ranges: &[Range], cx: &App) -> bool { let snapshot = self.read(cx); - let mut cursor = snapshot.diff_transforms.cursor::(()); + let mut cursor = snapshot.diff_transforms.cursor::(()); for range in ranges { let range = range.to_point(&snapshot); let start = snapshot.point_to_offset(Point::new(range.start.row, 0)); let end = snapshot.point_to_offset(Point::new(range.end.row + 1, 0)); - let start = start.saturating_sub(1); - let end = snapshot.len().min(end + 1); + let start = start.saturating_sub_usize(1); + let end = snapshot.len().min(end + 1usize); cursor.seek(&start, Bias::Right); while let Some(item) = cursor.item() { if *cursor.start() >= end { @@ -2149,7 +2588,7 @@ impl MultiBuffer { .buffer .text_summary_for_range(excerpt.range.context.clone()); - let new_start_offset = ExcerptOffset::new(new_excerpts.summary().text.len); + let new_start_offset = ExcerptOffset::new(new_excerpts.summary().text.len.0); let old_start_offset = cursor.start().1; let new_text_len = ExcerptOffset::new(excerpt.text_summary.len); let edit = Edit { @@ -2255,7 +2694,7 @@ impl MultiBuffer { .buffer .text_summary_for_range(excerpt.range.context.clone()); - let new_start_offset = ExcerptOffset::new(new_excerpts.summary().text.len); + let new_start_offset = ExcerptOffset::new(new_excerpts.summary().text.len.0); let old_start_offset = cursor.start().1; let new_text_len = ExcerptOffset::new(excerpt.text_summary.len); let edit = Edit { @@ -2328,7 +2767,7 @@ impl MultiBuffer { buffers: &HashMap, diffs: &HashMap, cx: &App, - ) -> Vec> { + ) -> Vec> { let MultiBufferSnapshot { excerpts, diffs: buffer_diff, @@ -2417,7 +2856,7 @@ impl MultiBuffer { .map(|edit| { let excerpt_old_start = cursor.start().1; let excerpt_new_start = - ExcerptOffset::new(new_excerpts.summary().text.len); + ExcerptOffset::new(new_excerpts.summary().text.len.0); let old_start = excerpt_old_start + ExcerptOffset::new(edit.old.start); let old_end = excerpt_old_start + ExcerptOffset::new(edit.old.end); let new_start = excerpt_new_start + ExcerptOffset::new(edit.new.start); @@ -2456,7 +2895,7 @@ impl MultiBuffer { snapshot: &mut MultiBufferSnapshot, excerpt_edits: Vec>, change_kind: DiffChangeKind, - ) -> Vec> { + ) -> Vec> { if excerpt_edits.is_empty() { return vec![]; } @@ -2464,7 +2903,7 @@ impl MultiBuffer { let mut excerpts = snapshot.excerpts.cursor::(()); let mut old_diff_transforms = snapshot .diff_transforms - .cursor::>(()); + .cursor::>(()); let mut new_diff_transforms = SumTree::default(); let mut old_expanded_hunks = HashSet::default(); let mut output_edits = Vec::new(); @@ -2496,7 +2935,8 @@ impl MultiBuffer { // Compute the start of the edit in output coordinates. let edit_start_overshoot = (edit.old.start - old_diff_transforms.start().0).value; let edit_old_start = old_diff_transforms.start().1 + edit_start_overshoot; - let edit_new_start = (edit_old_start as isize + output_delta) as usize; + let edit_new_start = + MultiBufferOffset((edit_old_start.0 as isize + output_delta) as usize); let changed_diff_hunks = Self::recompute_diff_transforms_for_edit( &edit, @@ -2588,7 +3028,10 @@ impl MultiBuffer { fn recompute_diff_transforms_for_edit( edit: &Edit>, excerpts: &mut Cursor>, - old_diff_transforms: &mut Cursor, usize>>, + old_diff_transforms: &mut Cursor< + DiffTransform, + Dimensions, MultiBufferOffset>, + >, new_diff_transforms: &mut SumTree, end_of_current_insert: &mut Option<(TypedOffset, DiffTransformHunkInfo)>, old_expanded_hunks: &mut HashSet, @@ -2807,7 +3250,7 @@ impl MultiBuffer { continue; } let summary_to_add = old_snapshot - .text_summary_for_excerpt_offset_range::(start_offset..end_offset); + .text_summary_for_excerpt_offset_range::(start_offset..end_offset); if !Self::extend_last_buffer_content_transform( new_transforms, @@ -2828,7 +3271,7 @@ impl MultiBuffer { fn extend_last_buffer_content_transform( new_transforms: &mut SumTree, new_inserted_hunk_info: Option, - summary_to_add: TextSummary, + summary_to_add: MBTextSummary, ) -> bool { let mut did_extend = false; new_transforms.update_last( @@ -2914,14 +3357,14 @@ impl MultiBuffer { use util::RandomCharIter; let snapshot = self.read(cx); - let mut edits: Vec<(Range, Arc)> = Vec::new(); + let mut edits: Vec<(Range, Arc)> = Vec::new(); let mut last_end = None; for _ in 0..edit_count { if last_end.is_some_and(|last_end| last_end >= snapshot.len()) { break; } - let new_start = last_end.map_or(0, |last_end| last_end + 1); + let new_start = last_end.map_or(MultiBufferOffset::ZERO, |last_end| last_end + 1usize); let end = snapshot.clip_offset(rng.random_range(new_start..=snapshot.len()), Bias::Right); let start = snapshot.clip_offset(rng.random_range(new_start..=end), Bias::Right); @@ -3078,18 +3521,21 @@ impl EventEmitter for MultiBuffer {} impl MultiBufferSnapshot { pub fn text(&self) -> String { - self.chunks(0..self.len(), false) + self.chunks(MultiBufferOffset::ZERO..self.len(), false) .map(|chunk| chunk.text) .collect() } pub fn reversed_chars_at(&self, position: T) -> impl Iterator + '_ { - self.reversed_chunks_in_range(0..position.to_offset(self)) + self.reversed_chunks_in_range(MultiBufferOffset::ZERO..position.to_offset(self)) .flat_map(|c| c.chars().rev()) } - fn reversed_chunks_in_range(&self, range: Range) -> ReversedMultiBufferChunks<'_> { - let mut cursor = self.cursor::(); + fn reversed_chunks_in_range( + &self, + range: Range, + ) -> ReversedMultiBufferChunks<'_> { + let mut cursor = self.cursor::(); cursor.seek(&range.end); let current_chunks = cursor.region().as_ref().map(|region| { let start_overshoot = range.start.saturating_sub(region.range.start); @@ -3173,7 +3619,8 @@ impl MultiBufferSnapshot { buffer_id: excerpt.buffer_id, excerpt_id: excerpt.id, buffer_range: hunk.buffer_range.clone(), - diff_base_byte_range: hunk.diff_base_byte_range.clone(), + diff_base_byte_range: BufferOffset(hunk.diff_base_byte_range.start) + ..BufferOffset(hunk.diff_base_byte_range.end), secondary_status: hunk.secondary_status, }) }) @@ -3184,7 +3631,7 @@ impl MultiBufferSnapshot { range: Range, ) -> impl Iterator + '_ { let range = range.start.to_offset(self)..range.end.to_offset(self); - let mut cursor = self.cursor::(); + let mut cursor = self.cursor::(); cursor.seek(&range.start); std::iter::from_fn(move || { let region = cursor.region()?; @@ -3201,7 +3648,7 @@ impl MultiBufferSnapshot { range: Range, ) -> impl Iterator + '_ { let range = range.start.to_offset(self)..range.end.to_offset(self); - let mut cursor = self.cursor::(); + let mut cursor = self.cursor::(); cursor.seek(&range.start); std::iter::from_fn(move || { let region = cursor.region()?; @@ -3218,21 +3665,21 @@ impl MultiBufferSnapshot { pub fn ranges_to_buffer_ranges( &self, ranges: impl Iterator>, - ) -> impl Iterator, ExcerptId)> { + ) -> impl Iterator, ExcerptId)> { ranges.flat_map(|range| self.range_to_buffer_ranges(range).into_iter()) } pub fn range_to_buffer_ranges( &self, range: Range, - ) -> Vec<(&BufferSnapshot, Range, ExcerptId)> { + ) -> Vec<(&BufferSnapshot, Range, ExcerptId)> { let start = range.start.to_offset(self); let end = range.end.to_offset(self); - let mut cursor = self.cursor::(); + let mut cursor = self.cursor::(); cursor.seek(&start); - let mut result: Vec<(&BufferSnapshot, Range, ExcerptId)> = Vec::new(); + let mut result: Vec<(&BufferSnapshot, Range, ExcerptId)> = Vec::new(); while let Some(region) = cursor.region() { if region.range.start > end { break; @@ -3264,11 +3711,18 @@ impl MultiBufferSnapshot { pub fn range_to_buffer_ranges_with_deleted_hunks( &self, range: Range, - ) -> impl Iterator, ExcerptId, Option)> + '_ { + ) -> impl Iterator< + Item = ( + &BufferSnapshot, + Range, + ExcerptId, + Option, + ), + > + '_ { let start = range.start.to_offset(self); let end = range.end.to_offset(self); - let mut cursor = self.cursor::(); + let mut cursor = self.cursor::(); cursor.seek(&start); std::iter::from_fn(move || { @@ -3314,18 +3768,25 @@ impl MultiBufferSnapshot { /// /// The returned iterator yields each of these metadata items, paired with its range in /// multi-buffer coordinates. - fn lift_buffer_metadata<'a, D, M, I>( + fn lift_buffer_metadata<'a, MBD, M, I>( &'a self, - query_range: Range, - get_buffer_metadata: impl 'a + Fn(&'a BufferSnapshot, Range) -> Option, - ) -> impl Iterator, M, &'a Excerpt)> + 'a + query_range: Range, + get_buffer_metadata: impl 'a + Fn(&'a BufferSnapshot, Range) -> Option, + ) -> impl Iterator, M, &'a Excerpt)> + 'a where - I: Iterator, M)> + 'a, - D: TextDimension + Ord + Sub, + I: Iterator, M)> + 'a, + MBD: MultiBufferDimension + + Ord + + Sub + + ops::Add + + ops::AddAssign, + MBD::TextDimension: Sub + + ops::Add + + AddAssign + + Ord, { - let max_position = D::from_text_summary(&self.text_summary()); let mut current_excerpt_metadata: Option<(ExcerptId, I)> = None; - let mut cursor = self.cursor::(); + let mut cursor = self.cursor::(); // Find the excerpt and buffer offset where the given range ends. cursor.seek(&query_range.end); @@ -3336,9 +3797,9 @@ impl MultiBufferSnapshot { let overshoot = if query_range.end > region.range.start { query_range.end - region.range.start } else { - D::default() + ::default() }; - buffer_end.add_assign(&overshoot); + buffer_end = buffer_end + overshoot; range_end = Some((region.excerpt.id, buffer_end)); break; } @@ -3348,7 +3809,7 @@ impl MultiBufferSnapshot { cursor.seek(&query_range.start); if let Some(region) = cursor.region().filter(|region| !region.is_main_buffer) - && region.range.start > D::zero(()) + && region.range.start > MBD::default() { cursor.prev() } @@ -3373,13 +3834,17 @@ impl MultiBufferSnapshot { buffer_start = region.buffer_range.start; if query_range.start > region.range.start { let overshoot = query_range.start - region.range.start; - buffer_start.add_assign(&overshoot); + buffer_start = buffer_start + overshoot; } buffer_start = buffer_start.min(region.buffer_range.end); } else { buffer_start = cursor.main_buffer_position()?; }; - let mut buffer_end = excerpt.range.context.end.summary::(&excerpt.buffer); + let mut buffer_end = excerpt + .range + .context + .end + .summary::(&excerpt.buffer); if let Some((end_excerpt_id, end_buffer_offset)) = range_end && excerpt.id == end_excerpt_id { @@ -3397,7 +3862,7 @@ impl MultiBufferSnapshot { { // Find the multibuffer regions that contain the start and end of // the metadata item's range. - if metadata_buffer_range.start > D::default() { + if metadata_buffer_range.start > ::default() { while let Some(region) = cursor.region() { if region.is_main_buffer && (region.buffer_range.end >= metadata_buffer_range.start @@ -3426,19 +3891,19 @@ impl MultiBufferSnapshot { if start_region.is_main_buffer && metadata_buffer_range.start > region_buffer_start { - start_position - .add_assign(&(metadata_buffer_range.start - region_buffer_start)); + start_position = + start_position + (metadata_buffer_range.start - region_buffer_start); start_position = start_position.min(start_region.range.end); } - let mut end_position = max_position; + let mut end_position = self.max_position(); if let Some(end_region) = &end_region { end_position = end_region.range.start; debug_assert!(end_region.is_main_buffer); let region_buffer_start = end_region.buffer_range.start; if metadata_buffer_range.end > region_buffer_start { - end_position - .add_assign(&(metadata_buffer_range.end - region_buffer_start)); + end_position = + end_position + (metadata_buffer_range.end - region_buffer_start); } end_position = end_position.min(end_region.range.end); } @@ -3464,7 +3929,9 @@ impl MultiBufferSnapshot { pub fn diff_hunk_before(&self, position: T) -> Option { let offset = position.to_offset(self); - let mut cursor = self.cursor::>(); + let mut cursor = self + .cursor::, DimensionPair>( + ); cursor.seek(&DimensionPair { key: offset, value: None, @@ -3540,7 +4007,7 @@ impl MultiBufferSnapshot { &self, start: T, scope_context: Option, - ) -> (Range, Option) { + ) -> (Range, Option) { let mut start = start.to_offset(self); let mut end = start; let mut next_chars = self.chars_at(start).peekable(); @@ -3599,12 +4066,16 @@ impl MultiBufferSnapshot { } } - pub fn len(&self) -> usize { + pub fn len(&self) -> MultiBufferOffset { self.diff_transforms.summary().output.len } + pub fn max_position(&self) -> MBD { + MBD::from_summary(&self.text_summary()) + } + pub fn is_empty(&self) -> bool { - self.excerpts.summary().text.len == 0 + self.diff_transforms.summary().output.len == MultiBufferOffset(0) } pub fn widest_line_number(&self) -> u32 { @@ -3614,7 +4085,7 @@ impl MultiBufferSnapshot { pub fn bytes_in_range(&self, range: Range) -> MultiBufferBytes<'_> { let range = range.start.to_offset(self)..range.end.to_offset(self); - let mut excerpts = self.cursor::(); + let mut excerpts = self.cursor::(); excerpts.seek(&range.start); let mut chunk; @@ -3622,8 +4093,8 @@ impl MultiBufferSnapshot { let excerpt_bytes; if let Some(region) = excerpts.region() { let mut bytes = region.buffer.bytes_in_range( - region.buffer_range.start + range.start - region.range.start - ..(region.buffer_range.start + range.end - region.range.start) + region.buffer_range.start + (range.start - region.range.start) + ..(region.buffer_range.start + (range.end - region.range.start)) .min(region.buffer_range.end), ); chunk = bytes.next().unwrap_or(&[][..]); @@ -3663,7 +4134,7 @@ impl MultiBufferSnapshot { } pub fn row_infos(&self, start_row: MultiBufferRow) -> MultiBufferRows<'_> { - let mut cursor = self.cursor::(); + let mut cursor = self.cursor::(); cursor.seek(&Point::new(start_row.0, 0)); let mut result = MultiBufferRows { point: Point::new(0, 0), @@ -3682,7 +4153,7 @@ impl MultiBufferSnapshot { ) -> MultiBufferChunks<'_> { let mut chunks = MultiBufferChunks { excerpt_offset_range: ExcerptOffset::new(0)..ExcerptOffset::new(0), - range: 0..0, + range: MultiBufferOffset::ZERO..MultiBufferOffset::ZERO, excerpts: self.excerpts.cursor(()), diff_transforms: self.diff_transforms.cursor(()), diffs: &self.diffs, @@ -3696,7 +4167,7 @@ impl MultiBufferSnapshot { chunks } - pub fn clip_offset(&self, offset: usize, bias: Bias) -> usize { + pub fn clip_offset(&self, offset: MultiBufferOffset, bias: Bias) -> MultiBufferOffset { self.clip_dimension(offset, bias, text::BufferSnapshot::clip_offset) } @@ -3704,7 +4175,11 @@ impl MultiBufferSnapshot { self.clip_dimension(point, bias, text::BufferSnapshot::clip_point) } - pub fn clip_offset_utf16(&self, offset: OffsetUtf16, bias: Bias) -> OffsetUtf16 { + pub fn clip_offset_utf16( + &self, + offset: MultiBufferOffsetUtf16, + bias: Bias, + ) -> MultiBufferOffsetUtf16 { self.clip_dimension(offset, bias, text::BufferSnapshot::clip_offset_utf16) } @@ -3714,11 +4189,11 @@ impl MultiBufferSnapshot { }) } - pub fn offset_to_point(&self, offset: usize) -> Point { + pub fn offset_to_point(&self, offset: MultiBufferOffset) -> Point { self.convert_dimension(offset, text::BufferSnapshot::offset_to_point) } - pub fn offset_to_point_utf16(&self, offset: usize) -> PointUtf16 { + pub fn offset_to_point_utf16(&self, offset: MultiBufferOffset) -> PointUtf16 { self.convert_dimension(offset, text::BufferSnapshot::offset_to_point_utf16) } @@ -3730,40 +4205,41 @@ impl MultiBufferSnapshot { self.convert_dimension(point, text::BufferSnapshot::point_utf16_to_point) } - pub fn point_to_offset(&self, point: Point) -> usize { + pub fn point_to_offset(&self, point: Point) -> MultiBufferOffset { self.convert_dimension(point, text::BufferSnapshot::point_to_offset) } - pub fn point_to_offset_utf16(&self, point: Point) -> OffsetUtf16 { + pub fn point_to_offset_utf16(&self, point: Point) -> MultiBufferOffsetUtf16 { self.convert_dimension(point, text::BufferSnapshot::point_to_offset_utf16) } - pub fn offset_utf16_to_offset(&self, offset: OffsetUtf16) -> usize { + pub fn offset_utf16_to_offset(&self, offset: MultiBufferOffsetUtf16) -> MultiBufferOffset { self.convert_dimension(offset, text::BufferSnapshot::offset_utf16_to_offset) } - pub fn offset_to_offset_utf16(&self, offset: usize) -> OffsetUtf16 { + pub fn offset_to_offset_utf16(&self, offset: MultiBufferOffset) -> MultiBufferOffsetUtf16 { self.convert_dimension(offset, text::BufferSnapshot::offset_to_offset_utf16) } - pub fn point_utf16_to_offset(&self, point: PointUtf16) -> usize { + pub fn point_utf16_to_offset(&self, point: PointUtf16) -> MultiBufferOffset { self.convert_dimension(point, text::BufferSnapshot::point_utf16_to_offset) } - pub fn point_utf16_to_offset_utf16(&self, point: PointUtf16) -> OffsetUtf16 { + pub fn point_utf16_to_offset_utf16(&self, point: PointUtf16) -> MultiBufferOffsetUtf16 { self.convert_dimension(point, text::BufferSnapshot::point_utf16_to_offset_utf16) } - fn clip_dimension( + fn clip_dimension( &self, - position: D, + position: MBD, bias: Bias, - clip_buffer_position: fn(&text::BufferSnapshot, D, Bias) -> D, - ) -> D + clip_buffer_position: fn(&text::BufferSnapshot, BD, Bias) -> BD, + ) -> MBD where - D: TextDimension + Ord + Sub, + MBD: MultiBufferDimension + Ord + Sub + ops::AddAssign<::Output>, + BD: TextDimension + Sub::Output> + AddAssign<::Output>, { - let mut cursor = self.cursor(); + let mut cursor = self.cursor::(); cursor.seek(&position); if let Some(region) = cursor.region() { if position >= region.range.end { @@ -3771,27 +4247,29 @@ impl MultiBufferSnapshot { } let overshoot = position - region.range.start; let mut buffer_position = region.buffer_range.start; - buffer_position.add_assign(&overshoot); + buffer_position += overshoot; let clipped_buffer_position = clip_buffer_position(region.buffer, buffer_position, bias); let mut position = region.range.start; - position.add_assign(&(clipped_buffer_position - region.buffer_range.start)); + position += clipped_buffer_position - region.buffer_range.start; position } else { - D::from_text_summary(&self.text_summary()) + self.max_position() } } - fn convert_dimension( + fn convert_dimension( &self, - key: D1, - convert_buffer_dimension: fn(&text::BufferSnapshot, D1) -> D2, - ) -> D2 + key: MBR1, + convert_buffer_dimension: fn(&text::BufferSnapshot, BR1) -> BR2, + ) -> MBR2 where - D1: TextDimension + Ord + Sub, - D2: TextDimension + Ord + Sub, + MBR1: MultiBufferDimension + Ord + Sub + ops::AddAssign<::Output>, + BR1: TextDimension + Sub::Output> + AddAssign<::Output>, + MBR2: MultiBufferDimension + Ord + Sub + ops::AddAssign<::Output>, + BR2: TextDimension + Sub::Output> + AddAssign<::Output>, { - let mut cursor = self.cursor::>(); + let mut cursor = self.cursor::, DimensionPair>(); cursor.seek(&DimensionPair { key, value: None }); if let Some(region) = cursor.region() { if key >= region.range.end.key { @@ -3802,34 +4280,34 @@ impl MultiBufferSnapshot { let buffer_start_key = region.buffer_range.start.key; let buffer_start_value = region.buffer_range.start.value.unwrap(); let mut buffer_key = buffer_start_key; - buffer_key.add_assign(&(key - start_key)); + buffer_key += key - start_key; let buffer_value = convert_buffer_dimension(region.buffer, buffer_key); let mut result = start_value; - result.add_assign(&(buffer_value - buffer_start_value)); + result += buffer_value - buffer_start_value; result } else { - D2::from_text_summary(&self.text_summary()) + self.max_position() } } pub fn point_to_buffer_offset( &self, point: T, - ) -> Option<(&BufferSnapshot, usize)> { + ) -> Option<(&BufferSnapshot, BufferOffset)> { let offset = point.to_offset(self); - let mut cursor = self.cursor::(); + let mut cursor = self.cursor::(); cursor.seek(&offset); let region = cursor.region()?; let overshoot = offset - region.range.start; let buffer_offset = region.buffer_range.start + overshoot; - if buffer_offset == region.buffer.len() + 1 + if buffer_offset == BufferOffset(region.buffer.len() + 1) && region.has_trailing_newline && !region.is_main_buffer { let main_buffer_position = cursor.main_buffer_position()?; let buffer_snapshot = &cursor.excerpt()?.buffer; return Some((buffer_snapshot, main_buffer_position)); - } else if buffer_offset > region.buffer.len() { + } else if buffer_offset > BufferOffset(region.buffer.len()) { return None; } Some((region.buffer, buffer_offset)) @@ -3839,7 +4317,7 @@ impl MultiBufferSnapshot { &self, point: Point, ) -> Option<(&BufferSnapshot, Point, ExcerptId)> { - let mut cursor = self.cursor::(); + let mut cursor = self.cursor::(); cursor.seek(&point); let region = cursor.region()?; let overshoot = point - region.range.start; @@ -3864,7 +4342,7 @@ impl MultiBufferSnapshot { let mut result = BTreeMap::new(); let mut rows_for_excerpt = Vec::new(); - let mut cursor = self.cursor::(); + let mut cursor = self.cursor::(); let mut rows = rows.into_iter().peekable(); let mut prev_row = u32::MAX; let mut prev_language_indent_size = IndentSize::default(); @@ -4000,7 +4478,7 @@ impl MultiBufferSnapshot { &self, row: MultiBufferRow, ) -> Option<(&BufferSnapshot, Range)> { - let mut cursor = self.cursor::(); + let mut cursor = self.cursor::(); let point = Point::new(row.0, 0); cursor.seek(&point); let region = cursor.region()?; @@ -4023,23 +4501,23 @@ impl MultiBufferSnapshot { MultiBufferRow(self.text_summary().lines.row) } - pub fn text_summary(&self) -> TextSummary { + pub fn text_summary(&self) -> MBTextSummary { self.diff_transforms.summary().output } - pub fn text_summary_for_range(&self, range: Range) -> D + pub fn text_summary_for_range(&self, range: Range) -> MBD where - D: TextDimension, + MBD: MultiBufferDimension + AddAssign, O: ToOffset, { let range = range.start.to_offset(self)..range.end.to_offset(self); let mut cursor = self .diff_transforms - .cursor::>(()); + .cursor::>(()); cursor.seek(&range.start, Bias::Right); let Some(first_transform) = cursor.item() else { - return D::from_text_summary(&TextSummary::default()); + return MBD::from_summary(&MBTextSummary::default()); }; let diff_transform_start = cursor.start().0; @@ -4072,14 +4550,18 @@ impl MultiBufferSnapshot { buffer_end -= 1; } - let mut summary = - base_text.text_summary_for_range::(buffer_start..buffer_end); + let mut summary = base_text + .text_summary_for_range::(buffer_start..buffer_end); if include_trailing_newline { - summary.add_assign(&D::from_text_summary(&TextSummary::newline())) + summary.add_assign(&::from_text_summary( + &TextSummary::newline(), + )) } - summary + let mut result = MBD::default(); + result.add_text_dim(&summary); + result } }; if range.end < diff_transform_end { @@ -4087,9 +4569,11 @@ impl MultiBufferSnapshot { } cursor.next(); - result.add_assign(&D::from_text_summary( - &cursor.summary(&range.end, Bias::Right), - )); + result.add_mb_text_summary( + &cursor + .summary::<_, OutputDimension<_>>(&range.end, Bias::Right) + .0, + ); let Some(last_transform) = cursor.item() else { return result; @@ -4099,7 +4583,7 @@ impl MultiBufferSnapshot { let suffix = match last_transform { DiffTransform::BufferContent { .. } => { let end = cursor.start().1 + ExcerptOffset::new(overshoot); - self.text_summary_for_excerpt_offset_range::(cursor.start().1..end) + self.text_summary_for_excerpt_offset_range::(cursor.start().1..end) } DiffTransform::DeletedHunk { base_text_byte_range, @@ -4112,24 +4596,30 @@ impl MultiBufferSnapshot { panic!("{:?} is in non-existent deleted hunk", range.end) }; - let mut suffix = base_text - .text_summary_for_range::(base_text_byte_range.start..buffer_end); + let mut suffix = base_text.text_summary_for_range::( + base_text_byte_range.start..buffer_end, + ); if *has_trailing_newline && buffer_end == base_text_byte_range.end + 1 { - suffix.add_assign(&D::from_text_summary(&TextSummary::newline())) + suffix.add_assign(&::from_text_summary( + &TextSummary::from("\n"), + )) } - suffix + + let mut result = MBD::default(); + result.add_text_dim(&suffix); + result } }; - result.add_assign(&suffix); + result += suffix; result } - fn text_summary_for_excerpt_offset_range(&self, mut range: Range) -> D + fn text_summary_for_excerpt_offset_range(&self, mut range: Range) -> MBD where - D: TextDimension, + MBD: MultiBufferDimension + AddAssign, { - let mut summary = D::zero(()); + let mut summary = MBD::default(); let mut cursor = self.excerpts.cursor::(()); cursor.seek(&range.start, Bias::Right); if let Some(excerpt) = cursor.item() { @@ -4142,34 +4632,36 @@ impl MultiBufferSnapshot { let start_in_excerpt = excerpt_start + (range.start - *cursor.start()).value; let end_in_excerpt = excerpt_start + (cmp::min(end_before_newline, range.end) - *cursor.start()).value; - summary.add_assign( + summary.add_text_dim( &excerpt .buffer - .text_summary_for_range(start_in_excerpt..end_in_excerpt), + .text_summary_for_range::( + start_in_excerpt..end_in_excerpt, + ), ); if range.end > end_before_newline { - summary.add_assign(&D::from_text_summary(&TextSummary::from("\n"))); + summary.add_mb_text_summary(&MBTextSummary::from(TextSummary::newline())); } cursor.next(); } if range.end > *cursor.start() { - summary.add_assign( - &cursor - .summary::<_, ExcerptDimension>(&range.end, Bias::Right) - .0, - ); + summary += cursor + .summary::<_, ExcerptDimension>(&range.end, Bias::Right) + .0; if let Some(excerpt) = cursor.item() { range.end = cmp::max(*cursor.start(), range.end); let excerpt_start = excerpt.range.context.start.to_offset(&excerpt.buffer); let end_in_excerpt = excerpt_start + (range.end - *cursor.start()).value; - summary.add_assign( + summary.add_text_dim( &excerpt .buffer - .text_summary_for_range(excerpt_start..end_in_excerpt), + .text_summary_for_range::( + excerpt_start..end_in_excerpt, + ), ); } } @@ -4177,24 +4669,28 @@ impl MultiBufferSnapshot { summary } - pub fn summary_for_anchor(&self, anchor: &Anchor) -> D + pub fn summary_for_anchor(&self, anchor: &Anchor) -> MBD where - D: TextDimension + Ord + Sub, + MBD: MultiBufferDimension + + Ord + + Sub + + AddAssign, + MBD::TextDimension: Sub + Ord, { self.summaries_for_anchors([anchor])[0] } - fn resolve_summary_for_anchor( + fn resolve_summary_for_anchor( &self, anchor: &Anchor, - excerpt_position: D, + excerpt_position: MBD, diff_transforms: &mut Cursor< DiffTransform, - Dimensions, OutputDimension>, + Dimensions, OutputDimension>, >, - ) -> D + ) -> MBD where - D: TextDimension + Ord + Sub, + MBD: MultiBufferDimension + Ord + Sub + AddAssign<::Output>, { loop { let transform_end_position = diff_transforms.end().0.0; @@ -4221,10 +4717,11 @@ impl MultiBufferSnapshot { if base_text_offset >= base_text_byte_range.start && base_text_offset <= base_text_byte_range.end { - let position_in_hunk = base_text.text_summary_for_range::( - base_text_byte_range.start..base_text_offset, - ); - position.add_assign(&position_in_hunk); + let position_in_hunk = base_text + .text_summary_for_range::( + base_text_byte_range.start..base_text_offset, + ); + position.add_text_dim(&position_in_hunk); } else if at_transform_end { diff_transforms.next(); continue; @@ -4237,7 +4734,7 @@ impl MultiBufferSnapshot { continue; } let overshoot = excerpt_position - diff_transforms.start().0.0; - position.add_assign(&overshoot); + position += overshoot; } } @@ -4282,16 +4779,20 @@ impl MultiBufferSnapshot { excerpt_id } - pub fn summaries_for_anchors<'a, D, I>(&'a self, anchors: I) -> Vec + pub fn summaries_for_anchors<'a, MBD, I>(&'a self, anchors: I) -> Vec where - D: TextDimension + Ord + Sub, + MBD: MultiBufferDimension + + Ord + + Sub + + AddAssign, + MBD::TextDimension: Sub + Ord, I: 'a + IntoIterator, { let mut anchors = anchors.into_iter().peekable(); let mut cursor = self.excerpts.cursor::(()); let mut diff_transforms_cursor = self .diff_transforms - .cursor::, OutputDimension>>(()); + .cursor::, OutputDimension>>(()); diff_transforms_cursor.next(); let mut summaries = Vec::new(); @@ -4308,7 +4809,7 @@ impl MultiBufferSnapshot { cursor.prev(); } - let excerpt_start_position = D::from_text_summary(&cursor.start().text); + let excerpt_start_position = MBD::from_summary(&cursor.start().text); if let Some(excerpt) = cursor.item() { if excerpt.id != excerpt_id && excerpt_id != ExcerptId::max() { let position = self.resolve_summary_for_anchor( @@ -4319,19 +4820,26 @@ impl MultiBufferSnapshot { summaries.extend(excerpt_anchors.map(|_| position)); continue; } - let excerpt_buffer_start = - excerpt.range.context.start.summary::(&excerpt.buffer); - let excerpt_buffer_end = excerpt.range.context.end.summary::(&excerpt.buffer); + let excerpt_buffer_start = excerpt + .range + .context + .start + .summary::(&excerpt.buffer); + let excerpt_buffer_end = excerpt + .range + .context + .end + .summary::(&excerpt.buffer); for (buffer_summary, anchor) in excerpt .buffer - .summaries_for_anchors_with_payload::( + .summaries_for_anchors_with_payload::( excerpt_anchors.map(|a| (&a.text_anchor, a)), ) { let summary = cmp::min(excerpt_buffer_end, buffer_summary); let mut position = excerpt_start_position; if summary > excerpt_buffer_start { - position.add_assign(&(summary - excerpt_buffer_start)); + position += summary - excerpt_buffer_start; } if position > diff_transforms_cursor.start().0.0 { @@ -4360,14 +4868,14 @@ impl MultiBufferSnapshot { summaries } - pub fn dimensions_from_points<'a, D>( + pub fn dimensions_from_points<'a, MBD>( &'a self, points: impl 'a + IntoIterator, - ) -> impl 'a + Iterator + ) -> impl 'a + Iterator where - D: TextDimension + Sub, + MBD: MultiBufferDimension + Sub + AddAssign<::Output>, { - let mut cursor = self.cursor::>(); + let mut cursor = self.cursor::, Point>(); cursor.seek(&DimensionPair { key: Point::default(), value: None, @@ -4383,19 +4891,19 @@ impl MultiBufferSnapshot { if let Some(region) = cursor.region() { let overshoot = point - region.range.start.key; - let buffer_point = region.buffer_range.start.key + overshoot; + let buffer_point = region.buffer_range.start + overshoot; let mut position = region.range.start.value.unwrap(); - position.add_assign( + position.add_text_dim( ®ion .buffer - .text_summary_for_range(region.buffer_range.start.key..buffer_point), + .text_summary_for_range(region.buffer_range.start..buffer_point), ); if point == region.range.end.key && region.has_trailing_newline { - position.add_assign(&D::from_text_summary(&TextSummary::newline())); + position.add_mb_text_summary(&MBTextSummary::from(TextSummary::newline())); } Some(position) } else { - Some(D::from_text_summary(&self.text_summary())) + Some(MBD::from_summary(&self.text_summary())) } }) } @@ -4503,7 +5011,7 @@ impl MultiBufferSnapshot { // offset in the excerpts, and whether the position is within a deleted hunk. let mut diff_transforms = self .diff_transforms - .cursor::>(()); + .cursor::>(()); diff_transforms.seek(&offset, Bias::Right); if offset == diff_transforms.start().0 @@ -4651,7 +5159,11 @@ impl MultiBufferSnapshot { .map(|excerpt| (excerpt.id, &excerpt.buffer, excerpt.range.clone())) } - fn cursor(&self) -> MultiBufferCursor<'_, D> { + fn cursor<'a, MBD, BD>(&'a self) -> MultiBufferCursor<'a, MBD, BD> + where + MBD: MultiBufferDimension + Ord + Sub + ops::AddAssign<::Output>, + BD: TextDimension + AddAssign<::Output>, + { let excerpts = self.excerpts.cursor(()); let diff_transforms = self.diff_transforms.cursor(()); MultiBufferCursor { @@ -4666,11 +5178,13 @@ impl MultiBufferSnapshot { let start_locator = self.excerpt_locator_for_id(excerpt_id); let mut excerpts = self .excerpts - .cursor::, ExcerptDimension>>(()); + .cursor::, ExcerptDimension>>(()); excerpts.seek(&Some(start_locator), Bias::Left); excerpts.prev(); - let mut diff_transforms = self.diff_transforms.cursor::>(()); + let mut diff_transforms = self + .diff_transforms + .cursor::>(()); diff_transforms.seek(&excerpts.start().1, Bias::Left); if diff_transforms.end().excerpt_dimension < excerpts.start().1 { diff_transforms.next(); @@ -4680,8 +5194,8 @@ impl MultiBufferSnapshot { Some(MultiBufferExcerpt { excerpt, offset: diff_transforms.start().output_dimension.0, - buffer_offset: excerpt.range.context.start.to_offset(&excerpt.buffer), - excerpt_offset: excerpts.start().1.clone(), + buffer_offset: BufferOffset(excerpt.range.context.start.to_offset(&excerpt.buffer)), + excerpt_offset: excerpts.start().1.0, diff_transforms, }) } @@ -4704,7 +5218,7 @@ impl MultiBufferSnapshot { panic!("not supported") } Bound::Unbounded => { - start_offset = 0; + start_offset = MultiBufferOffset::ZERO; Bound::Unbounded } }; @@ -4714,8 +5228,7 @@ impl MultiBufferSnapshot { Bound::Unbounded => Bound::Unbounded, }; let bounds = (start, end); - - let mut cursor = self.cursor::>(); + let mut cursor = self.cursor::, BufferOffset>(); cursor.seek(&DimensionPair { key: start_offset, value: None, @@ -4798,8 +5311,10 @@ impl MultiBufferSnapshot { pub fn innermost_enclosing_bracket_ranges( &self, range: Range, - range_filter: Option<&dyn Fn(&BufferSnapshot, Range, Range) -> bool>, - ) -> Option<(Range, Range)> { + range_filter: Option< + &dyn Fn(&BufferSnapshot, Range, Range) -> bool, + >, + ) -> Option<(Range, Range)> { let range = range.start.to_offset(self)..range.end.to_offset(self); let mut excerpt = self.excerpt_containing(range.clone())?; let buffer = excerpt.buffer(); @@ -4807,9 +5322,15 @@ impl MultiBufferSnapshot { // Filter to ranges contained in the excerpt let range_filter = |open: Range, close: Range| -> bool { - excerpt_buffer_range.contains(&open.start) - && excerpt_buffer_range.contains(&close.end) - && range_filter.is_none_or(|filter| filter(buffer, open, close)) + excerpt_buffer_range.contains(&BufferOffset(open.start)) + && excerpt_buffer_range.contains(&BufferOffset(close.end)) + && range_filter.is_none_or(|filter| { + filter( + buffer, + BufferOffset(open.start)..BufferOffset(close.end), + BufferOffset(close.start)..BufferOffset(close.end), + ) + }) }; let (open, close) = excerpt.buffer().innermost_enclosing_bracket_ranges( @@ -4818,8 +5339,8 @@ impl MultiBufferSnapshot { )?; Some(( - excerpt.map_range_from_buffer(open), - excerpt.map_range_from_buffer(close), + excerpt.map_range_from_buffer(BufferOffset(open.start)..BufferOffset(open.end)), + excerpt.map_range_from_buffer(BufferOffset(close.start)..BufferOffset(close.end)), )) } @@ -4828,7 +5349,8 @@ impl MultiBufferSnapshot { pub fn enclosing_bracket_ranges( &self, range: Range, - ) -> Option, Range)> + '_> { + ) -> Option, Range)> + '_> + { let range = range.start.to_offset(self)..range.end.to_offset(self); let mut excerpt = self.excerpt_containing(range.clone())?; @@ -4837,10 +5359,14 @@ impl MultiBufferSnapshot { .buffer() .enclosing_bracket_ranges(excerpt.map_range_to_buffer(range)) .filter_map(move |pair| { - if excerpt.contains_buffer_range(pair.open_range.start..pair.close_range.end) { + let open_range = + BufferOffset(pair.open_range.start)..BufferOffset(pair.open_range.end); + let close_range = + BufferOffset(pair.close_range.start)..BufferOffset(pair.close_range.end); + if excerpt.contains_buffer_range(open_range.start..close_range.end) { Some(( - excerpt.map_range_from_buffer(pair.open_range), - excerpt.map_range_from_buffer(pair.close_range), + excerpt.map_range_from_buffer(open_range), + excerpt.map_range_from_buffer(close_range), )) } else { None @@ -4855,7 +5381,7 @@ impl MultiBufferSnapshot { &self, range: Range, options: TreeSitterOptions, - ) -> impl Iterator, TextObject)> + '_ { + ) -> impl Iterator, TextObject)> + '_ { let range = range.start.to_offset(self)..range.end.to_offset(self); self.excerpt_containing(range.clone()) .map(|mut excerpt| { @@ -4863,6 +5389,7 @@ impl MultiBufferSnapshot { .buffer() .text_object_ranges(excerpt.map_range_to_buffer(range), options) .filter_map(move |(range, text_object)| { + let range = BufferOffset(range.start)..BufferOffset(range.end); if excerpt.contains_buffer_range(range.clone()) { Some((excerpt.map_range_from_buffer(range), text_object)) } else { @@ -4879,7 +5406,8 @@ impl MultiBufferSnapshot { pub fn bracket_ranges( &self, range: Range, - ) -> Option, Range)> + '_> { + ) -> Option, Range)> + '_> + { let range = range.start.to_offset(self)..range.end.to_offset(self); let mut excerpt = self.excerpt_containing(range.clone())?; @@ -4888,11 +5416,14 @@ impl MultiBufferSnapshot { .buffer() .bracket_ranges(excerpt.map_range_to_buffer(range)) .filter_map(move |pair| { - let buffer_range = pair.open_range.start..pair.close_range.end; - if excerpt.contains_buffer_range(buffer_range) { + let open_range = + BufferOffset(pair.open_range.start)..BufferOffset(pair.open_range.end); + let close_range = + BufferOffset(pair.close_range.start)..BufferOffset(pair.close_range.end); + if excerpt.contains_buffer_range(open_range.start..close_range.end) { Some(( - excerpt.map_range_from_buffer(pair.open_range), - excerpt.map_range_from_buffer(pair.close_range), + excerpt.map_range_from_buffer(open_range), + excerpt.map_range_from_buffer(close_range), )) } else { None @@ -4905,7 +5436,7 @@ impl MultiBufferSnapshot { &'a self, range: Range, redaction_enabled: impl Fn(Option<&Arc>) -> bool + 'a, - ) -> impl Iterator> + 'a { + ) -> impl Iterator> + 'a { let range = range.start.to_offset(self)..range.end.to_offset(self); self.lift_buffer_metadata(range, move |buffer, range| { if redaction_enabled(buffer.file()) { @@ -4920,7 +5451,7 @@ impl MultiBufferSnapshot { pub fn runnable_ranges( &self, range: Range, - ) -> impl Iterator + '_ { + ) -> impl Iterator, language::RunnableRange)> + '_ { let range = range.start.to_offset(self)..range.end.to_offset(self); self.lift_buffer_metadata(range, move |buffer, range| { Some( @@ -4933,10 +5464,7 @@ impl MultiBufferSnapshot { .map(|runnable| (runnable.run_range.clone(), runnable)), ) }) - .map(|(run_range, runnable, _)| language::RunnableRange { - run_range, - ..runnable - }) + .map(|(run_range, runnable, _)| (run_range, runnable)) } pub fn line_indents( @@ -4945,7 +5473,7 @@ impl MultiBufferSnapshot { buffer_filter: impl Fn(&BufferSnapshot) -> bool, ) -> impl Iterator { let max_point = self.max_point(); - let mut cursor = self.cursor::(); + let mut cursor = self.cursor::(); cursor.seek(&Point::new(start_row.0, 0)); iter::from_fn(move || { let mut region = cursor.region()?; @@ -4983,7 +5511,7 @@ impl MultiBufferSnapshot { buffer_filter: impl Fn(&BufferSnapshot) -> bool, ) -> impl Iterator { let max_point = self.max_point(); - let mut cursor = self.cursor::(); + let mut cursor = self.cursor::(); cursor.seek(&Point::new(end_row.0, 0)); iter::from_fn(move || { let mut region = cursor.region()?; @@ -5290,7 +5818,7 @@ impl MultiBufferSnapshot { cx, ) }) - .unwrap_or_else(move || self.language_settings_at(0, cx)) + .unwrap_or_else(move || self.language_settings_at(MultiBufferOffset::ZERO, cx)) } pub fn language_settings_at<'a, T: ToOffset>( @@ -5350,34 +5878,44 @@ impl MultiBufferSnapshot { buffer_id: BufferId, group_id: usize, ) -> impl Iterator> + '_ { - self.lift_buffer_metadata(Point::zero()..self.max_point(), move |buffer, range| { - if buffer.remote_id() != buffer_id { - return None; - }; - Some( - buffer - .diagnostics_in_range(range, false) - .filter(move |diagnostic| diagnostic.diagnostic.group_id == group_id) - .map(move |DiagnosticEntryRef { diagnostic, range }| (range, diagnostic)), - ) - }) + self.lift_buffer_metadata::( + Point::zero()..self.max_point(), + move |buffer, range| { + if buffer.remote_id() != buffer_id { + return None; + }; + Some( + buffer + .diagnostics_in_range(range, false) + .filter(move |diagnostic| diagnostic.diagnostic.group_id == group_id) + .map(move |DiagnosticEntryRef { diagnostic, range }| (range, diagnostic)), + ) + }, + ) .map(|(range, diagnostic, _)| DiagnosticEntryRef { diagnostic, range }) } - pub fn diagnostics_in_range<'a, T>( + pub fn diagnostics_in_range<'a, MBD>( &'a self, - range: Range, - ) -> impl Iterator> + 'a + range: Range, + ) -> impl Iterator> + 'a where - T: 'a + MBD::TextDimension: 'a + text::ToOffset + text::FromAnchor - + TextDimension + + Sub + + fmt::Debug + + ops::Add + + ops::AddAssign + + Ord, + MBD: MultiBufferDimension + Ord - + Sub - + fmt::Debug, + + Sub + + ops::Add + + ops::AddAssign + + 'a, { - self.lift_buffer_metadata(range, move |buffer, buffer_range| { + self.lift_buffer_metadata::(range, move |buffer, buffer_range| { Some( buffer .diagnostics_in_range(buffer_range.start..buffer_range.end, false) @@ -5387,20 +5925,24 @@ impl MultiBufferSnapshot { .map(|(range, diagnostic, _)| DiagnosticEntryRef { diagnostic, range }) } - pub fn diagnostics_with_buffer_ids_in_range<'a, T>( + pub fn diagnostics_with_buffer_ids_in_range<'a, MBD>( &'a self, - range: Range, - ) -> impl Iterator)> + 'a + range: Range, + ) -> impl Iterator)> + 'a where - T: 'a + MBD: MultiBufferDimension + + Ord + + Sub + + ops::Add + + ops::AddAssign, + MBD::TextDimension: Sub + + ops::Add + text::ToOffset + text::FromAnchor - + TextDimension - + Ord - + Sub - + fmt::Debug, + + AddAssign + + Ord, { - self.lift_buffer_metadata(range, move |buffer, buffer_range| { + self.lift_buffer_metadata::(range, move |buffer, buffer_range| { Some( buffer .diagnostics_in_range(buffer_range.start..buffer_range.end, false) @@ -5413,41 +5955,20 @@ impl MultiBufferSnapshot { pub fn syntax_ancestor( &self, range: Range, - ) -> Option<(tree_sitter::Node<'_>, Range)> { + ) -> Option<(tree_sitter::Node<'_>, Range)> { let range = range.start.to_offset(self)..range.end.to_offset(self); let mut excerpt = self.excerpt_containing(range.clone())?; let node = excerpt .buffer() .syntax_ancestor(excerpt.map_range_to_buffer(range))?; let node_range = node.byte_range(); + let node_range = BufferOffset(node_range.start)..BufferOffset(node_range.end); if !excerpt.contains_buffer_range(node_range.clone()) { return None; }; Some((node, excerpt.map_range_from_buffer(node_range))) } - pub fn syntax_next_sibling( - &self, - range: Range, - ) -> Option> { - let range = range.start.to_offset(self)..range.end.to_offset(self); - let mut excerpt = self.excerpt_containing(range.clone())?; - excerpt - .buffer() - .syntax_next_sibling(excerpt.map_range_to_buffer(range)) - } - - pub fn syntax_prev_sibling( - &self, - range: Range, - ) -> Option> { - let range = range.start.to_offset(self)..range.end.to_offset(self); - let mut excerpt = self.excerpt_containing(range.clone())?; - excerpt - .buffer() - .syntax_prev_sibling(excerpt.map_range_to_buffer(range)) - } - pub fn outline(&self, theme: Option<&SyntaxTheme>) -> Option> { let (excerpt_id, _, buffer) = self.as_singleton()?; let outline = buffer.outline(theme); @@ -5587,7 +6108,7 @@ impl MultiBufferSnapshot { sought_exact = true; } if sought_exact { - let start = cursor.start().1.clone(); + let start = cursor.start().1; let end = cursor.end().1; let mut diff_transforms = self .diff_transforms @@ -5625,7 +6146,7 @@ impl MultiBufferSnapshot { range: Range, ) -> Option> { let range = range.start.to_offset(self)..range.end.to_offset(self); - let mut cursor = self.cursor::(); + let mut cursor = self.cursor::(); cursor.seek(&range.start); let start_excerpt = cursor.excerpt()?; @@ -5640,7 +6161,7 @@ impl MultiBufferSnapshot { let region = cursor.region()?; let offset = region.range.start; let buffer_offset = start_excerpt.buffer_start_offset(); - let excerpt_offset = cursor.excerpts.start().clone(); + let excerpt_offset = cursor.excerpts.start().0; Some(MultiBufferExcerpt { diff_transforms: cursor.diff_transforms, excerpt: start_excerpt, @@ -5761,7 +6282,11 @@ impl MultiBufferSnapshot { #[cfg(any(test, feature = "test-support"))] impl MultiBufferSnapshot { - pub fn random_byte_range(&self, start_offset: usize, rng: &mut impl rand::Rng) -> Range { + pub fn random_byte_range( + &self, + start_offset: MultiBufferOffset, + rng: &mut impl rand::Rng, + ) -> Range { let end = self.clip_offset(rng.random_range(start_offset..=self.len()), Bias::Right); let start = self.clip_offset(rng.random_range(start_offset..=end), Bias::Right); start..end @@ -5795,7 +6320,7 @@ impl MultiBufferSnapshot { if self.diff_transforms.summary().input != self.excerpts.summary().text { panic!( "incorrect input summary. expected {:?}, got {:?}. transforms: {:+?}", - self.excerpts.summary().text.len, + self.excerpts.summary().text, self.diff_transforms.summary().input, self.diff_transforms.items(()), ); @@ -5819,7 +6344,7 @@ impl MultiBufferSnapshot { self.diff_transforms.items(()) ); } - if summary.len == 0 && !self.is_empty() { + if summary.len == MultiBufferOffset(0) && !self.is_empty() { panic!("empty buffer content transform"); } } @@ -5828,11 +6353,12 @@ impl MultiBufferSnapshot { } } -impl<'a, D> MultiBufferCursor<'a, D> +impl<'a, MBD, BD> MultiBufferCursor<'a, MBD, BD> where - D: TextDimension + Ord + Sub, + MBD: MultiBufferDimension + Ord + Sub + ops::AddAssign<::Output>, + BD: TextDimension + AddAssign<::Output>, { - fn seek(&mut self, position: &D) { + fn seek(&mut self, position: &MBD) { self.cached_region.take(); self.diff_transforms .seek(&OutputDimension(*position), Bias::Right); @@ -5842,20 +6368,19 @@ where self.diff_transforms.prev(); } - let mut excerpt_position = self.diff_transforms.start().excerpt_dimension.0; + let mut excerpt_position = self.diff_transforms.start().excerpt_dimension; if let Some(DiffTransform::BufferContent { .. }) = self.diff_transforms.item() { let overshoot = *position - self.diff_transforms.start().output_dimension.0; - excerpt_position.add_assign(&overshoot); + excerpt_position.0 += overshoot; } - self.excerpts - .seek(&ExcerptDimension(excerpt_position), Bias::Right); - if self.excerpts.item().is_none() && excerpt_position == self.excerpts.start().0 { + self.excerpts.seek(&excerpt_position, Bias::Right); + if self.excerpts.item().is_none() && excerpt_position == *self.excerpts.start() { self.excerpts.prev(); } } - fn seek_forward(&mut self, position: &D) { + fn seek_forward(&mut self, position: &MBD) { self.cached_region.take(); self.diff_transforms .seek_forward(&OutputDimension(*position), Bias::Right); @@ -5866,14 +6391,13 @@ where } let overshoot = *position - self.diff_transforms.start().output_dimension.0; - let mut excerpt_position = self.diff_transforms.start().excerpt_dimension.0; + let mut excerpt_position = self.diff_transforms.start().excerpt_dimension; if let Some(DiffTransform::BufferContent { .. }) = self.diff_transforms.item() { - excerpt_position.add_assign(&overshoot); + excerpt_position.0 += overshoot; } - self.excerpts - .seek_forward(&ExcerptDimension(excerpt_position), Bias::Right); - if self.excerpts.item().is_none() && excerpt_position == self.excerpts.start().0 { + self.excerpts.seek_forward(&excerpt_position, Bias::Right); + if self.excerpts.item().is_none() && excerpt_position == *self.excerpts.start() { self.excerpts.prev(); } } @@ -5949,7 +6473,7 @@ where } } - fn region(&mut self) -> Option> { + fn region(&mut self) -> Option> { if self.cached_region.is_none() { self.cached_region = self.build_region(); } @@ -5991,17 +6515,17 @@ where }) } - fn main_buffer_position(&self) -> Option { + fn main_buffer_position(&self) -> Option { let excerpt = self.excerpts.item()?; let buffer = &excerpt.buffer; - let buffer_context_start = excerpt.range.context.start.summary::(buffer); + let buffer_context_start = excerpt.range.context.start.summary::(buffer); let mut buffer_start = buffer_context_start; - let overshoot = self.diff_transforms.end().excerpt_dimension.0 - self.excerpts.start().0; - buffer_start.add_assign(&overshoot); + let overshoot = self.diff_transforms.end().excerpt_dimension - *self.excerpts.start(); + buffer_start += overshoot.0; Some(buffer_start) } - fn build_region(&self) -> Option> { + fn build_region(&self) -> Option> { let excerpt = self.excerpts.item()?; match self.diff_transforms.item()? { DiffTransform::DeletedHunk { @@ -6014,10 +6538,10 @@ where let diff = self.diffs.get(buffer_id)?; let buffer = diff.base_text(); let mut rope_cursor = buffer.as_rope().cursor(0); - let buffer_start = rope_cursor.summary::(base_text_byte_range.start); - let buffer_range_len = rope_cursor.summary::(base_text_byte_range.end); + let buffer_start = rope_cursor.summary::(base_text_byte_range.start); + let buffer_range_len = rope_cursor.summary::(base_text_byte_range.end); let mut buffer_end = buffer_start; - buffer_end.add_assign(&buffer_range_len); + TextDimension::add_assign(&mut buffer_end, &buffer_range_len); let start = self.diff_transforms.start().output_dimension.0; let end = self.diff_transforms.end().output_dimension.0; Some(MultiBufferRegion { @@ -6036,36 +6560,36 @@ where inserted_hunk_info, .. } => { let buffer = &excerpt.buffer; - let buffer_context_start = excerpt.range.context.start.summary::(buffer); + let buffer_context_start = excerpt.range.context.start.summary::(buffer); let mut start = self.diff_transforms.start().output_dimension.0; let mut buffer_start = buffer_context_start; if self.diff_transforms.start().excerpt_dimension < *self.excerpts.start() { let overshoot = - self.excerpts.start().0 - self.diff_transforms.start().excerpt_dimension.0; - start.add_assign(&overshoot); + *self.excerpts.start() - self.diff_transforms.start().excerpt_dimension; + start += overshoot.0; } else { let overshoot = - self.diff_transforms.start().excerpt_dimension.0 - self.excerpts.start().0; - buffer_start.add_assign(&overshoot); + self.diff_transforms.start().excerpt_dimension - *self.excerpts.start(); + buffer_start += overshoot.0; } let mut end; let mut buffer_end; let has_trailing_newline; - if self.diff_transforms.end().excerpt_dimension.0 < self.excerpts.end().0 { + if self.diff_transforms.end().excerpt_dimension < self.excerpts.end() { let overshoot = - self.diff_transforms.end().excerpt_dimension.0 - self.excerpts.start().0; + self.diff_transforms.end().excerpt_dimension - *self.excerpts.start(); end = self.diff_transforms.end().output_dimension.0; buffer_end = buffer_context_start; - buffer_end.add_assign(&overshoot); + buffer_end += overshoot.0; has_trailing_newline = false; } else { let overshoot = - self.excerpts.end().0 - self.diff_transforms.start().excerpt_dimension.0; + self.excerpts.end() - self.diff_transforms.start().excerpt_dimension; end = self.diff_transforms.start().output_dimension.0; - end.add_assign(&overshoot); - buffer_end = excerpt.range.context.end.summary::(buffer); + end += overshoot.0; + buffer_end = excerpt.range.context.end.summary::(buffer); has_trailing_newline = excerpt.has_trailing_newline; }; @@ -6171,12 +6695,12 @@ impl Excerpt { } /// The [`Excerpt`]'s start offset in its [`Buffer`] - fn buffer_start_offset(&self) -> usize { - self.range.context.start.to_offset(&self.buffer) + fn buffer_start_offset(&self) -> BufferOffset { + BufferOffset(self.range.context.start.to_offset(&self.buffer)) } /// The [`Excerpt`]'s end offset in its [`Buffer`] - fn buffer_end_offset(&self) -> usize { + fn buffer_end_offset(&self) -> BufferOffset { self.buffer_start_offset() + self.text_summary.len } } @@ -6210,27 +6734,28 @@ impl<'a> MultiBufferExcerpt<'a> { &self.excerpt.buffer } - pub fn buffer_range(&self) -> Range { + pub fn buffer_range(&self) -> Range { self.buffer_offset - ..self - .excerpt - .range - .context - .end - .to_offset(&self.excerpt.buffer.text) + ..BufferOffset( + self.excerpt + .range + .context + .end + .to_offset(&self.excerpt.buffer.text), + ) } - pub fn start_offset(&self) -> usize { + pub fn start_offset(&self) -> MultiBufferOffset { self.offset } /// Maps an offset within the [`MultiBuffer`] to an offset within the [`Buffer`] - pub fn map_offset_to_buffer(&mut self, offset: usize) -> usize { + pub fn map_offset_to_buffer(&mut self, offset: MultiBufferOffset) -> BufferOffset { self.map_range_to_buffer(offset..offset).start } /// Maps a range within the [`MultiBuffer`] to a range within the [`Buffer`] - pub fn map_range_to_buffer(&mut self, range: Range) -> Range { + pub fn map_range_to_buffer(&mut self, range: Range) -> Range { self.diff_transforms .seek(&OutputDimension(range.start), Bias::Right); let start = self.map_offset_to_buffer_internal(range.start); @@ -6244,47 +6769,52 @@ impl<'a> MultiBufferExcerpt<'a> { start..end } - fn map_offset_to_buffer_internal(&self, offset: usize) -> usize { - let mut excerpt_offset = self.diff_transforms.start().excerpt_dimension.clone(); + fn map_offset_to_buffer_internal(&self, offset: MultiBufferOffset) -> BufferOffset { + let mut excerpt_offset = self.diff_transforms.start().excerpt_dimension; if let Some(DiffTransform::BufferContent { .. }) = self.diff_transforms.item() { excerpt_offset.0 += offset - self.diff_transforms.start().output_dimension.0; }; - let offset_in_excerpt = excerpt_offset.0.saturating_sub(self.excerpt_offset.0); + let offset_in_excerpt = excerpt_offset.0.0.saturating_sub(self.excerpt_offset.0); self.buffer_offset + offset_in_excerpt } /// Map an offset within the [`Buffer`] to an offset within the [`MultiBuffer`] - pub fn map_offset_from_buffer(&mut self, buffer_offset: usize) -> usize { + pub fn map_offset_from_buffer(&mut self, buffer_offset: BufferOffset) -> MultiBufferOffset { self.map_range_from_buffer(buffer_offset..buffer_offset) .start } /// Map a range within the [`Buffer`] to a range within the [`MultiBuffer`] - pub fn map_range_from_buffer(&mut self, buffer_range: Range) -> Range { + pub fn map_range_from_buffer( + &mut self, + buffer_range: Range, + ) -> Range { if buffer_range.start < self.buffer_offset { log::warn!( "Attempting to map a range from a buffer offset that starts before the current buffer offset" ); - return buffer_range; + return self.offset..self.offset; } let overshoot = buffer_range.start - self.buffer_offset; - let excerpt_offset = ExcerptDimension(self.excerpt_offset.0 + overshoot); - self.diff_transforms.seek(&excerpt_offset, Bias::Right); - if excerpt_offset.0 < self.diff_transforms.start().excerpt_dimension.0 { + let excerpt_offset = self.excerpt_offset + overshoot; + let excerpt_seek_dim = ExcerptDimension(excerpt_offset); + self.diff_transforms.seek(&excerpt_seek_dim, Bias::Right); + if excerpt_offset < self.diff_transforms.start().excerpt_dimension.0 { log::warn!( "Attempting to map a range from a buffer offset that starts before the current buffer offset" ); - return buffer_range; + return self.offset..self.offset; } - let overshoot = excerpt_offset.0 - self.diff_transforms.start().excerpt_dimension.0; + let overshoot = excerpt_offset - self.diff_transforms.start().excerpt_dimension.0; let start = self.diff_transforms.start().output_dimension.0 + overshoot; let end = if buffer_range.end > buffer_range.start { let overshoot = buffer_range.end - self.buffer_offset; - let excerpt_offset = ExcerptDimension(self.excerpt_offset.0 + overshoot); + let excerpt_offset = self.excerpt_offset + overshoot; + let excerpt_seek_dim = ExcerptDimension(excerpt_offset); self.diff_transforms - .seek_forward(&excerpt_offset, Bias::Right); - let overshoot = excerpt_offset.0 - self.diff_transforms.start().excerpt_dimension.0; + .seek_forward(&excerpt_seek_dim, Bias::Right); + let overshoot = excerpt_offset - self.diff_transforms.start().excerpt_dimension.0; self.diff_transforms.start().output_dimension.0 + overshoot } else { start @@ -6294,7 +6824,7 @@ impl<'a> MultiBufferExcerpt<'a> { } /// Returns true if the entirety of the given range is in the buffer's excerpt - pub fn contains_buffer_range(&self, range: Range) -> bool { + pub fn contains_buffer_range(&self, range: Range) -> bool { range.start >= self.excerpt.buffer_start_offset() && range.end <= self.excerpt.buffer_end_offset() } @@ -6359,7 +6889,7 @@ impl sum_tree::Item for Excerpt { excerpt_id: self.id, excerpt_locator: self.locator.clone(), widest_line_number: self.max_buffer_row, - text, + text: text.into(), } } } @@ -6400,9 +6930,9 @@ impl sum_tree::Item for DiffTransform { input: *summary, output: *summary, }, - DiffTransform::DeletedHunk { summary, .. } => DiffTransformSummary { - input: TextSummary::default(), - output: *summary, + &DiffTransform::DeletedHunk { summary, .. } => DiffTransformSummary { + input: MBTextSummary::default(), + output: summary.into(), }, } } @@ -6410,15 +6940,15 @@ impl sum_tree::Item for DiffTransform { impl DiffTransformSummary { fn excerpt_len(&self) -> ExcerptOffset { - ExcerptOffset::new(self.input.len) + ExcerptOffset::new(self.input.len.0) } } impl sum_tree::ContextLessSummary for DiffTransformSummary { fn zero() -> Self { DiffTransformSummary { - input: TextSummary::default(), - output: TextSummary::default(), + input: MBTextSummary::default(), + output: MBTextSummary::default(), } } @@ -6446,7 +6976,7 @@ impl sum_tree::ContextLessSummary for ExcerptSummary { fn add_summary(&mut self, summary: &Self) { debug_assert!(summary.excerpt_locator > self.excerpt_locator); self.excerpt_locator = summary.excerpt_locator.clone(); - Summary::add_summary(&mut self.text, &summary.text, ()); + self.text += summary.text; self.widest_line_number = cmp::max(self.widest_line_number, summary.widest_line_number); } } @@ -6457,13 +6987,41 @@ impl<'a> sum_tree::Dimension<'a, ExcerptSummary> for ExcerptOffset { } fn add_summary(&mut self, summary: &'a ExcerptSummary, _: ()) { - self.value += summary.text.len; + self.value += summary.text.len.0; + } +} +impl<'a> sum_tree::Dimension<'a, ExcerptSummary> for MultiBufferOffset { + fn zero((): ()) -> Self { + Default::default() + } + + fn add_summary(&mut self, summary: &'a ExcerptSummary, (): ()) { + *self += summary.text.len; + } +} + +impl<'a> sum_tree::Dimension<'a, ExcerptSummary> for MultiBufferOffsetUtf16 { + fn zero((): ()) -> Self { + Default::default() + } + + fn add_summary(&mut self, summary: &'a ExcerptSummary, (): ()) { + self.0 += summary.text.len_utf16; + } +} +impl<'a> sum_tree::Dimension<'a, ExcerptSummary> for PointUtf16 { + fn zero((): ()) -> Self { + Default::default() + } + + fn add_summary(&mut self, summary: &'a ExcerptSummary, (): ()) { + TextDimension::add_assign(self, &summary.text.lines_utf16()) } } impl sum_tree::SeekTarget<'_, ExcerptSummary, ExcerptSummary> for ExcerptOffset { fn cmp(&self, cursor_location: &ExcerptSummary, _: ()) -> cmp::Ordering { - Ord::cmp(&self.value, &cursor_location.text.len) + Ord::cmp(&self.value, &cursor_location.text.len.0) } } @@ -6489,15 +7047,25 @@ impl<'a> sum_tree::Dimension<'a, ExcerptSummary> for ExcerptPoint { } } -impl<'a, D: TextDimension + Default> sum_tree::Dimension<'a, ExcerptSummary> - for ExcerptDimension +impl<'a> sum_tree::Dimension<'a, ExcerptSummary> for Point { + fn zero(_cx: ()) -> Self { + Default::default() + } + + fn add_summary(&mut self, summary: &'a ExcerptSummary, _: ()) { + TextDimension::add_assign(self, &summary.text.lines); + } +} + +impl<'a, MBD: MultiBufferDimension + Default> sum_tree::Dimension<'a, ExcerptSummary> + for ExcerptDimension { fn zero(_: ()) -> Self { - ExcerptDimension(D::default()) + ExcerptDimension(MBD::default()) } fn add_summary(&mut self, summary: &'a ExcerptSummary, _: ()) { - self.0.add_assign(&D::from_text_summary(&summary.text)) + MultiBufferDimension::add_mb_text_summary(&mut self.0, &summary.text) } } @@ -6521,19 +7089,59 @@ impl<'a> sum_tree::Dimension<'a, ExcerptSummary> for Option { } } -#[derive(Clone, PartialOrd, Ord, Eq, PartialEq, Debug)] +#[derive(Copy, Clone, PartialOrd, Ord, Eq, PartialEq, Debug)] struct ExcerptDimension(T); +impl ops::Sub for ExcerptDimension +where + T: ops::Sub, +{ + type Output = ExcerptDimension; + + fn sub(self, other: Self) -> Self::Output { + ExcerptDimension(self.0 - other.0) + } +} + +impl AddAssign for ExcerptDimension +where + T: AddAssign, +{ + fn add_assign(&mut self, other: Self) { + self.0 += other.0; + } +} + #[derive(Clone, PartialOrd, Ord, Eq, PartialEq, Debug)] struct OutputDimension(T); +impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for MultiBufferOffset { + fn zero(_: ()) -> Self { + MultiBufferOffset::ZERO + } + + fn add_summary(&mut self, summary: &'a DiffTransformSummary, _: ()) { + *self += summary.output.len; + } +} + +impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for MultiBufferOffsetUtf16 { + fn zero(_: ()) -> Self { + MultiBufferOffsetUtf16(OffsetUtf16(0)) + } + + fn add_summary(&mut self, summary: &'a DiffTransformSummary, _: ()) { + self.0 += summary.output.len_utf16; + } +} + impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for ExcerptOffset { fn zero(_: ()) -> Self { ExcerptOffset::new(0) } fn add_summary(&mut self, summary: &'a DiffTransformSummary, _: ()) { - self.value += summary.input.len; + self.value += summary.input.len.0; } } @@ -6547,77 +7155,77 @@ impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for ExcerptPoint { } } -impl sum_tree::SeekTarget<'_, DiffTransformSummary, DiffTransformSummary> - for ExcerptDimension +impl sum_tree::SeekTarget<'_, DiffTransformSummary, DiffTransformSummary> + for ExcerptDimension +where + MBD: MultiBufferDimension + Ord, { fn cmp(&self, cursor_location: &DiffTransformSummary, _: ()) -> cmp::Ordering { - Ord::cmp(&self.0, &D::from_text_summary(&cursor_location.input)) + Ord::cmp(&self.0, &MBD::from_summary(&cursor_location.input)) } } -impl sum_tree::SeekTarget<'_, DiffTransformSummary, DiffTransforms> - for ExcerptDimension +impl<'a, MBD> sum_tree::SeekTarget<'a, DiffTransformSummary, DiffTransforms> + for ExcerptDimension +where + MBD: MultiBufferDimension + Ord, { - fn cmp(&self, cursor_location: &DiffTransforms, _: ()) -> cmp::Ordering { + fn cmp(&self, cursor_location: &DiffTransforms, _: ()) -> cmp::Ordering { Ord::cmp(&self.0, &cursor_location.excerpt_dimension.0) } } -impl<'a, D: TextDimension> sum_tree::Dimension<'a, DiffTransformSummary> for ExcerptDimension { +impl<'a, MBD: MultiBufferDimension> sum_tree::Dimension<'a, DiffTransformSummary> + for ExcerptDimension +{ fn zero(_: ()) -> Self { - ExcerptDimension(D::default()) + ExcerptDimension(MBD::default()) } fn add_summary(&mut self, summary: &'a DiffTransformSummary, _: ()) { - self.0.add_assign(&D::from_text_summary(&summary.input)) + self.0.add_mb_text_summary(&summary.input) } } -impl sum_tree::SeekTarget<'_, DiffTransformSummary, DiffTransforms> - for OutputDimension +impl<'a, MBD> sum_tree::SeekTarget<'a, DiffTransformSummary, DiffTransforms> + for OutputDimension +where + MBD: MultiBufferDimension + Ord, { - fn cmp(&self, cursor_location: &DiffTransforms, _: ()) -> cmp::Ordering { + fn cmp(&self, cursor_location: &DiffTransforms, _: ()) -> cmp::Ordering { Ord::cmp(&self.0, &cursor_location.output_dimension.0) } } -impl<'a, D: TextDimension> sum_tree::Dimension<'a, DiffTransformSummary> for OutputDimension { - fn zero(_: ()) -> Self { - OutputDimension(D::default()) - } - - fn add_summary(&mut self, summary: &'a DiffTransformSummary, _: ()) { - self.0.add_assign(&D::from_text_summary(&summary.output)) - } -} - -impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for TextSummary { +impl<'a, MBD: MultiBufferDimension> sum_tree::Dimension<'a, DiffTransformSummary> + for OutputDimension +{ fn zero(_: ()) -> Self { - TextSummary::default() + OutputDimension(MBD::default()) } fn add_summary(&mut self, summary: &'a DiffTransformSummary, _: ()) { - *self += summary.output + self.0.add_mb_text_summary(&summary.output) } } -impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for usize { +impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for Point { fn zero(_: ()) -> Self { - 0 + Point::new(0, 0) } fn add_summary(&mut self, summary: &'a DiffTransformSummary, _: ()) { - *self += summary.output.len + *self += summary.output.lines } } -impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for Point { +impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for PointUtf16 { fn zero(_: ()) -> Self { - Point::new(0, 0) + PointUtf16::new(0, 0) } fn add_summary(&mut self, summary: &'a DiffTransformSummary, _: ()) { - *self += summary.output.lines + *self += summary.output.lines_utf16() } } @@ -6751,11 +7359,11 @@ impl Iterator for MultiBufferRows<'_> { } impl<'a> MultiBufferChunks<'a> { - pub fn offset(&self) -> usize { + pub fn offset(&self) -> MultiBufferOffset { self.range.start } - pub fn seek(&mut self, range: Range) { + pub fn seek(&mut self, range: Range) { self.diff_transforms.seek(&range.end, Bias::Right); let mut excerpt_end = self.diff_transforms.start().1; if let Some(DiffTransform::BufferContent { .. }) = self.diff_transforms.item() { @@ -6859,7 +7467,13 @@ impl<'a> Iterator for MultiBufferChunks<'a> { let diff_transform_start = self.diff_transforms.start().0; let diff_transform_end = self.diff_transforms.end().0; - debug_assert!(self.range.start < diff_transform_end); + debug_assert!( + self.range.start < diff_transform_end, + "{:?} < {:?} of ({1:?}..{2:?})", + self.range.start, + diff_transform_end, + diff_transform_start + ); let diff_transform = self.diff_transforms.item()?; match diff_transform { @@ -6904,9 +7518,9 @@ impl<'a> Iterator for MultiBufferChunks<'a> { .. } => { let base_text_start = - base_text_byte_range.start + self.range.start - diff_transform_start; + base_text_byte_range.start + (self.range.start - diff_transform_start); let base_text_end = - base_text_byte_range.start + self.range.end - diff_transform_start; + base_text_byte_range.start + (self.range.end - diff_transform_start); let base_text_end = base_text_end.min(base_text_byte_range.end); let mut chunks = if let Some((_, mut chunks)) = self @@ -6959,7 +7573,7 @@ impl MultiBufferBytes<'_> { if let Some(region) = self.cursor.region() { let mut excerpt_bytes = region.buffer.bytes_in_range( region.buffer_range.start - ..(region.buffer_range.start + self.range.end - region.range.start) + ..(region.buffer_range.start + (self.range.end - region.range.start)) .min(region.buffer_range.end), ); self.chunk = excerpt_bytes.next().unwrap_or(&[]); @@ -7044,50 +7658,50 @@ impl<'a> Iterator for ExcerptChunks<'a> { } impl ToOffset for Point { - fn to_offset<'a>(&self, snapshot: &MultiBufferSnapshot) -> usize { + fn to_offset<'a>(&self, snapshot: &MultiBufferSnapshot) -> MultiBufferOffset { snapshot.point_to_offset(*self) } - fn to_offset_utf16(&self, snapshot: &MultiBufferSnapshot) -> OffsetUtf16 { + fn to_offset_utf16(&self, snapshot: &MultiBufferSnapshot) -> MultiBufferOffsetUtf16 { snapshot.point_to_offset_utf16(*self) } } -impl ToOffset for usize { +impl ToOffset for MultiBufferOffset { #[track_caller] - fn to_offset<'a>(&self, snapshot: &MultiBufferSnapshot) -> usize { + fn to_offset<'a>(&self, snapshot: &MultiBufferSnapshot) -> MultiBufferOffset { assert!( *self <= snapshot.len(), "offset {} is greater than the snapshot.len() {}", - *self, - snapshot.len(), + self.0, + snapshot.len().0, ); *self } - fn to_offset_utf16(&self, snapshot: &MultiBufferSnapshot) -> OffsetUtf16 { + fn to_offset_utf16(&self, snapshot: &MultiBufferSnapshot) -> MultiBufferOffsetUtf16 { snapshot.offset_to_offset_utf16(*self) } } -impl ToOffset for OffsetUtf16 { - fn to_offset<'a>(&self, snapshot: &MultiBufferSnapshot) -> usize { +impl ToOffset for MultiBufferOffsetUtf16 { + fn to_offset<'a>(&self, snapshot: &MultiBufferSnapshot) -> MultiBufferOffset { snapshot.offset_utf16_to_offset(*self) } - fn to_offset_utf16(&self, _snapshot: &MultiBufferSnapshot) -> OffsetUtf16 { + fn to_offset_utf16(&self, _snapshot: &MultiBufferSnapshot) -> MultiBufferOffsetUtf16 { *self } } impl ToOffset for PointUtf16 { - fn to_offset<'a>(&self, snapshot: &MultiBufferSnapshot) -> usize { + fn to_offset<'a>(&self, snapshot: &MultiBufferSnapshot) -> MultiBufferOffset { snapshot.point_utf16_to_offset(*self) } - fn to_offset_utf16(&self, snapshot: &MultiBufferSnapshot) -> OffsetUtf16 { + fn to_offset_utf16(&self, snapshot: &MultiBufferSnapshot) -> MultiBufferOffsetUtf16 { snapshot.point_utf16_to_offset_utf16(*self) } } -impl ToPoint for usize { +impl ToPoint for MultiBufferOffset { fn to_point<'a>(&self, snapshot: &MultiBufferSnapshot) -> Point { snapshot.offset_to_point(*self) } @@ -7125,15 +7739,17 @@ pub mod debug { use super::*; pub trait ToMultiBufferDebugRanges { - fn to_multi_buffer_debug_ranges(&self, snapshot: &MultiBufferSnapshot) - -> Vec>; + fn to_multi_buffer_debug_ranges( + &self, + snapshot: &MultiBufferSnapshot, + ) -> Vec>; } impl ToMultiBufferDebugRanges for T { fn to_multi_buffer_debug_ranges( &self, snapshot: &MultiBufferSnapshot, - ) -> Vec> { + ) -> Vec> { [self.to_offset(snapshot)].to_multi_buffer_debug_ranges(snapshot) } } @@ -7142,7 +7758,7 @@ pub mod debug { fn to_multi_buffer_debug_ranges( &self, snapshot: &MultiBufferSnapshot, - ) -> Vec> { + ) -> Vec> { [self.start.to_offset(snapshot)..self.end.to_offset(snapshot)] .to_multi_buffer_debug_ranges(snapshot) } @@ -7152,7 +7768,7 @@ pub mod debug { fn to_multi_buffer_debug_ranges( &self, snapshot: &MultiBufferSnapshot, - ) -> Vec> { + ) -> Vec> { self.as_slice().to_multi_buffer_debug_ranges(snapshot) } } @@ -7161,7 +7777,7 @@ pub mod debug { fn to_multi_buffer_debug_ranges( &self, snapshot: &MultiBufferSnapshot, - ) -> Vec> { + ) -> Vec> { self.as_slice().to_multi_buffer_debug_ranges(snapshot) } } @@ -7170,7 +7786,7 @@ pub mod debug { fn to_multi_buffer_debug_ranges( &self, snapshot: &MultiBufferSnapshot, - ) -> Vec> { + ) -> Vec> { self.iter() .map(|item| { let offset = item.to_offset(snapshot); @@ -7184,7 +7800,7 @@ pub mod debug { fn to_multi_buffer_debug_ranges( &self, snapshot: &MultiBufferSnapshot, - ) -> Vec> { + ) -> Vec> { self.iter() .map(|range| range.start.to_offset(snapshot)..range.end.to_offset(snapshot)) .collect() diff --git a/crates/multi_buffer/src/multi_buffer_tests.rs b/crates/multi_buffer/src/multi_buffer_tests.rs index 22c041267f9c78c1f20609b74e2332516639f39b..0151805d065b779569b3a2f8f02157f3ce129295 100644 --- a/crates/multi_buffer/src/multi_buffer_tests.rs +++ b/crates/multi_buffer/src/multi_buffer_tests.rs @@ -130,8 +130,8 @@ fn test_excerpt_boundaries_and_clipping(cx: &mut App) { assert_eq!( subscription.consume().into_inner(), [Edit { - old: 0..0, - new: 0..10 + old: MultiBufferOffset(0)..MultiBufferOffset(0), + new: MultiBufferOffset(0)..MultiBufferOffset(10) }] ); @@ -148,8 +148,8 @@ fn test_excerpt_boundaries_and_clipping(cx: &mut App) { assert_eq!( subscription.consume().into_inner(), [Edit { - old: 10..10, - new: 10..22 + old: MultiBufferOffset(10)..MultiBufferOffset(10), + new: MultiBufferOffset(10)..MultiBufferOffset(22) }] ); @@ -282,8 +282,8 @@ fn test_excerpt_boundaries_and_clipping(cx: &mut App) { assert_eq!( subscription.consume().into_inner(), [Edit { - old: 6..8, - new: 6..7 + old: MultiBufferOffset(6)..MultiBufferOffset(8), + new: MultiBufferOffset(6)..MultiBufferOffset(7) }] ); @@ -925,7 +925,7 @@ fn test_empty_diff_excerpt(cx: &mut TestAppContext) { .next() .unwrap(); - assert_eq!(hunk.diff_base_byte_range.start, 0); + assert_eq!(hunk.diff_base_byte_range.start, BufferOffset(0)); let buf2 = cx.new(|cx| Buffer::local("X", cx)); multibuffer.update(cx, |multibuffer, cx| { @@ -971,10 +971,30 @@ fn test_singleton_multibuffer_anchors(cx: &mut App) { assert_eq!(old_snapshot.text(), "abcd"); assert_eq!(new_snapshot.text(), "XabcdY"); - assert_eq!(old_snapshot.anchor_before(0).to_offset(&new_snapshot), 0); - assert_eq!(old_snapshot.anchor_after(0).to_offset(&new_snapshot), 1); - assert_eq!(old_snapshot.anchor_before(4).to_offset(&new_snapshot), 5); - assert_eq!(old_snapshot.anchor_after(4).to_offset(&new_snapshot), 6); + assert_eq!( + old_snapshot + .anchor_before(MultiBufferOffset(0)) + .to_offset(&new_snapshot), + MultiBufferOffset(0) + ); + assert_eq!( + old_snapshot + .anchor_after(MultiBufferOffset(0)) + .to_offset(&new_snapshot), + MultiBufferOffset(1) + ); + assert_eq!( + old_snapshot + .anchor_before(MultiBufferOffset(4)) + .to_offset(&new_snapshot), + MultiBufferOffset(5) + ); + assert_eq!( + old_snapshot + .anchor_after(MultiBufferOffset(4)) + .to_offset(&new_snapshot), + MultiBufferOffset(6) + ); } #[gpui::test] @@ -989,12 +1009,28 @@ fn test_multibuffer_anchors(cx: &mut App) { }); let old_snapshot = multibuffer.read(cx).snapshot(cx); - assert_eq!(old_snapshot.anchor_before(0).to_offset(&old_snapshot), 0); - assert_eq!(old_snapshot.anchor_after(0).to_offset(&old_snapshot), 0); - assert_eq!(Anchor::min().to_offset(&old_snapshot), 0); - assert_eq!(Anchor::min().to_offset(&old_snapshot), 0); - assert_eq!(Anchor::max().to_offset(&old_snapshot), 10); - assert_eq!(Anchor::max().to_offset(&old_snapshot), 10); + assert_eq!( + old_snapshot + .anchor_before(MultiBufferOffset(0)) + .to_offset(&old_snapshot), + MultiBufferOffset(0) + ); + assert_eq!( + old_snapshot + .anchor_after(MultiBufferOffset(0)) + .to_offset(&old_snapshot), + MultiBufferOffset(0) + ); + assert_eq!(Anchor::min().to_offset(&old_snapshot), MultiBufferOffset(0)); + assert_eq!(Anchor::min().to_offset(&old_snapshot), MultiBufferOffset(0)); + assert_eq!( + Anchor::max().to_offset(&old_snapshot), + MultiBufferOffset(10) + ); + assert_eq!( + Anchor::max().to_offset(&old_snapshot), + MultiBufferOffset(10) + ); buffer_1.update(cx, |buffer, cx| { buffer.edit([(0..0, "W")], None, cx); @@ -1009,16 +1045,66 @@ fn test_multibuffer_anchors(cx: &mut App) { assert_eq!(old_snapshot.text(), "abcd\nefghi"); assert_eq!(new_snapshot.text(), "WabcdX\nYefghiZ"); - assert_eq!(old_snapshot.anchor_before(0).to_offset(&new_snapshot), 0); - assert_eq!(old_snapshot.anchor_after(0).to_offset(&new_snapshot), 1); - assert_eq!(old_snapshot.anchor_before(1).to_offset(&new_snapshot), 2); - assert_eq!(old_snapshot.anchor_after(1).to_offset(&new_snapshot), 2); - assert_eq!(old_snapshot.anchor_before(2).to_offset(&new_snapshot), 3); - assert_eq!(old_snapshot.anchor_after(2).to_offset(&new_snapshot), 3); - assert_eq!(old_snapshot.anchor_before(5).to_offset(&new_snapshot), 7); - assert_eq!(old_snapshot.anchor_after(5).to_offset(&new_snapshot), 8); - assert_eq!(old_snapshot.anchor_before(10).to_offset(&new_snapshot), 13); - assert_eq!(old_snapshot.anchor_after(10).to_offset(&new_snapshot), 14); + assert_eq!( + old_snapshot + .anchor_before(MultiBufferOffset(0)) + .to_offset(&new_snapshot), + MultiBufferOffset(0) + ); + assert_eq!( + old_snapshot + .anchor_after(MultiBufferOffset(0)) + .to_offset(&new_snapshot), + MultiBufferOffset(1) + ); + assert_eq!( + old_snapshot + .anchor_before(MultiBufferOffset(1)) + .to_offset(&new_snapshot), + MultiBufferOffset(2) + ); + assert_eq!( + old_snapshot + .anchor_after(MultiBufferOffset(1)) + .to_offset(&new_snapshot), + MultiBufferOffset(2) + ); + assert_eq!( + old_snapshot + .anchor_before(MultiBufferOffset(2)) + .to_offset(&new_snapshot), + MultiBufferOffset(3) + ); + assert_eq!( + old_snapshot + .anchor_after(MultiBufferOffset(2)) + .to_offset(&new_snapshot), + MultiBufferOffset(3) + ); + assert_eq!( + old_snapshot + .anchor_before(MultiBufferOffset(5)) + .to_offset(&new_snapshot), + MultiBufferOffset(7) + ); + assert_eq!( + old_snapshot + .anchor_after(MultiBufferOffset(5)) + .to_offset(&new_snapshot), + MultiBufferOffset(8) + ); + assert_eq!( + old_snapshot + .anchor_before(MultiBufferOffset(10)) + .to_offset(&new_snapshot), + MultiBufferOffset(13) + ); + assert_eq!( + old_snapshot + .anchor_after(MultiBufferOffset(10)) + .to_offset(&new_snapshot), + MultiBufferOffset(14) + ); } #[gpui::test] @@ -1066,26 +1152,30 @@ fn test_resolving_anchors_after_replacing_their_excerpts(cx: &mut App) { // The current excerpts are from a different buffer, so we don't attempt to // resolve the old text anchor in the new buffer. assert_eq!( - snapshot_2.summary_for_anchor::(&snapshot_1.anchor_before(2)), - 0 + snapshot_2.summary_for_anchor::( + &snapshot_1.anchor_before(MultiBufferOffset(2)) + ), + MultiBufferOffset(0) ); assert_eq!( - snapshot_2.summaries_for_anchors::(&[ - snapshot_1.anchor_before(2), - snapshot_1.anchor_after(3) + snapshot_2.summaries_for_anchors::(&[ + snapshot_1.anchor_before(MultiBufferOffset(2)), + snapshot_1.anchor_after(MultiBufferOffset(3)) ]), - vec![0, 0] + vec![MultiBufferOffset(0), MultiBufferOffset(0)] ); // Refresh anchors from the old snapshot. The return value indicates that both // anchors lost their original excerpt. - let refresh = - snapshot_2.refresh_anchors(&[snapshot_1.anchor_before(2), snapshot_1.anchor_after(3)]); + let refresh = snapshot_2.refresh_anchors(&[ + snapshot_1.anchor_before(MultiBufferOffset(2)), + snapshot_1.anchor_after(MultiBufferOffset(3)), + ]); assert_eq!( refresh, &[ - (0, snapshot_2.anchor_before(0), false), - (1, snapshot_2.anchor_after(0), false), + (0, snapshot_2.anchor_before(MultiBufferOffset(0)), false), + (1, snapshot_2.anchor_after(MultiBufferOffset(0)), false), ] ); @@ -1112,14 +1202,19 @@ fn test_resolving_anchors_after_replacing_their_excerpts(cx: &mut App) { // The third anchor can't be resolved, since its excerpt has been removed, // so it resolves to the same position as its predecessor. let anchors = [ - snapshot_2.anchor_before(0), - snapshot_2.anchor_after(2), - snapshot_2.anchor_after(6), - snapshot_2.anchor_after(14), + snapshot_2.anchor_before(MultiBufferOffset(0)), + snapshot_2.anchor_after(MultiBufferOffset(2)), + snapshot_2.anchor_after(MultiBufferOffset(6)), + snapshot_2.anchor_after(MultiBufferOffset(14)), ]; assert_eq!( - snapshot_3.summaries_for_anchors::(&anchors), - &[0, 2, 9, 13] + snapshot_3.summaries_for_anchors::(&anchors), + &[ + MultiBufferOffset(0), + MultiBufferOffset(2), + MultiBufferOffset(9), + MultiBufferOffset(13) + ] ); let new_anchors = snapshot_3.refresh_anchors(&anchors); @@ -1128,8 +1223,13 @@ fn test_resolving_anchors_after_replacing_their_excerpts(cx: &mut App) { &[(0, true), (1, true), (2, true), (3, true)] ); assert_eq!( - snapshot_3.summaries_for_anchors::(new_anchors.iter().map(|a| &a.1)), - &[0, 2, 7, 13] + snapshot_3.summaries_for_anchors::(new_anchors.iter().map(|a| &a.1)), + &[ + MultiBufferOffset(0), + MultiBufferOffset(2), + MultiBufferOffset(7), + MultiBufferOffset(13) + ] ); } @@ -1371,7 +1471,7 @@ fn test_basic_diff_hunks(cx: &mut TestAppContext) { assert_eq!( snapshot - .diff_hunks_in_range(0..snapshot.len()) + .diff_hunks_in_range(MultiBufferOffset(0)..snapshot.len()) .map(|hunk| hunk.row_range.start.0..hunk.row_range.end.0) .collect::>(), &[0..4, 5..7] @@ -2072,7 +2172,7 @@ fn test_diff_hunks_with_multiple_excerpts(cx: &mut TestAppContext) { assert_eq!( snapshot - .diff_hunks_in_range(0..snapshot.len()) + .diff_hunks_in_range(MultiBufferOffset(0)..snapshot.len()) .map(|hunk| hunk.row_range.start.0..hunk.row_range.end.0) .collect::>(), &[0..1, 2..4, 5..7, 9..10, 12..13, 14..17] @@ -2636,14 +2736,16 @@ async fn test_random_multibuffer(cx: &mut TestAppContext, mut rng: StdRng) { 30..=39 if !reference.excerpts.is_empty() => { let multibuffer = multibuffer.read_with(cx, |multibuffer, cx| multibuffer.snapshot(cx)); - let offset = - multibuffer.clip_offset(rng.random_range(0..=multibuffer.len()), Bias::Left); + let offset = multibuffer.clip_offset( + MultiBufferOffset(rng.random_range(0..=multibuffer.len().0)), + Bias::Left, + ); let bias = if rng.random() { Bias::Left } else { Bias::Right }; - log::info!("Creating anchor at {} with bias {:?}", offset, bias); + log::info!("Creating anchor at {} with bias {:?}", offset.0, bias); anchors.push(multibuffer.anchor_at(offset, bias)); anchors.sort_by(|a, b| a.cmp(b, &multibuffer)); } @@ -2796,7 +2898,7 @@ async fn test_random_multibuffer(cx: &mut TestAppContext, mut rng: StdRng) { let snapshot = multibuffer.read_with(cx, |multibuffer, cx| multibuffer.snapshot(cx)); let actual_text = snapshot.text(); let actual_boundary_rows = snapshot - .excerpt_boundaries_in_range(0..) + .excerpt_boundaries_in_range(MultiBufferOffset(0)..) .map(|b| b.row) .collect::>(); let actual_row_infos = snapshot.row_infos(MultiBufferRow(0)).collect::>(); @@ -2874,9 +2976,14 @@ async fn test_random_multibuffer(cx: &mut TestAppContext, mut rng: StdRng) { }) .collect::>() }); - for i in 0..snapshot.len() { - let excerpt = snapshot.excerpt_containing(i..i).unwrap(); - assert_eq!(excerpt.buffer_range(), reference_ranges[&excerpt.id()]); + for i in 0..snapshot.len().0 { + let excerpt = snapshot + .excerpt_containing(MultiBufferOffset(i)..MultiBufferOffset(i)) + .unwrap(); + assert_eq!( + excerpt.buffer_range().start.0..excerpt.buffer_range().end.0, + reference_ranges[&excerpt.id()] + ); } assert_consistent_line_numbers(&snapshot); @@ -2897,7 +3004,7 @@ async fn test_random_multibuffer(cx: &mut TestAppContext, mut rng: StdRng) { let start_ix = text_rope.clip_offset(rng.random_range(0..=end_ix), Bias::Left); let text_for_range = snapshot - .text_for_range(start_ix..end_ix) + .text_for_range(MultiBufferOffset(start_ix)..MultiBufferOffset(end_ix)) .collect::(); assert_eq!( text_for_range, @@ -2906,9 +3013,12 @@ async fn test_random_multibuffer(cx: &mut TestAppContext, mut rng: StdRng) { start_ix..end_ix ); - let expected_summary = TextSummary::from(&expected_text[start_ix..end_ix]); + let expected_summary = + MBTextSummary::from(TextSummary::from(&expected_text[start_ix..end_ix])); assert_eq!( - snapshot.text_summary_for_range::(start_ix..end_ix), + snapshot.text_summary_for_range::( + MultiBufferOffset(start_ix)..MultiBufferOffset(end_ix) + ), expected_summary, "incorrect summary for range {:?}", start_ix..end_ix @@ -2916,12 +3026,12 @@ async fn test_random_multibuffer(cx: &mut TestAppContext, mut rng: StdRng) { } // Anchor resolution - let summaries = snapshot.summaries_for_anchors::(&anchors); + let summaries = snapshot.summaries_for_anchors::(&anchors); assert_eq!(anchors.len(), summaries.len()); for (anchor, resolved_offset) in anchors.iter().zip(summaries) { assert!(resolved_offset <= snapshot.len()); assert_eq!( - snapshot.summary_for_anchor::(anchor), + snapshot.summary_for_anchor::(anchor), resolved_offset, "anchor: {:?}", anchor @@ -2931,7 +3041,9 @@ async fn test_random_multibuffer(cx: &mut TestAppContext, mut rng: StdRng) { for _ in 0..10 { let end_ix = text_rope.clip_offset(rng.random_range(0..=text_rope.len()), Bias::Right); assert_eq!( - snapshot.reversed_chars_at(end_ix).collect::(), + snapshot + .reversed_chars_at(MultiBufferOffset(end_ix)) + .collect::(), expected_text[..end_ix].chars().rev().collect::(), ); } @@ -2941,7 +3053,7 @@ async fn test_random_multibuffer(cx: &mut TestAppContext, mut rng: StdRng) { let start_ix = rng.random_range(0..=end_ix); assert_eq!( snapshot - .bytes_in_range(start_ix..end_ix) + .bytes_in_range(MultiBufferOffset(start_ix)..MultiBufferOffset(end_ix)) .flatten() .copied() .collect::>(), @@ -2964,8 +3076,13 @@ async fn test_random_multibuffer(cx: &mut TestAppContext, mut rng: StdRng) { let mut text = old_snapshot.text(); for edit in edits { - let new_text: String = snapshot.text_for_range(edit.new.clone()).collect(); - text.replace_range(edit.new.start..edit.new.start + edit.old.len(), &new_text); + let new_text: String = snapshot + .text_for_range(edit.new.start..edit.new.end) + .collect(); + text.replace_range( + edit.new.start.0..edit.new.start.0 + (edit.old.end.0 - edit.old.start.0), + &new_text, + ); } assert_eq!(text.to_string(), snapshot.text()); } @@ -3038,7 +3155,11 @@ fn test_history(cx: &mut App) { // Edit buffer 1 through the multibuffer now += 2 * group_interval; multibuffer.start_transaction_at(now, cx); - multibuffer.edit([(2..2, "C")], None, cx); + multibuffer.edit( + [(MultiBufferOffset(2)..MultiBufferOffset(2), "C")], + None, + cx, + ); multibuffer.end_transaction_at(now, cx); assert_eq!(multibuffer.read(cx).text(), "ABC1234\nAB5678"); @@ -3091,7 +3212,11 @@ fn test_history(cx: &mut App) { // Redo stack gets cleared after an edit. now += 2 * group_interval; multibuffer.start_transaction_at(now, cx); - multibuffer.edit([(0..0, "X")], None, cx); + multibuffer.edit( + [(MultiBufferOffset(0)..MultiBufferOffset(0), "X")], + None, + cx, + ); multibuffer.end_transaction_at(now, cx); assert_eq!(multibuffer.read(cx).text(), "XABCD1234\nAB5678"); multibuffer.redo(cx); @@ -3320,7 +3445,7 @@ fn test_trailing_deletion_without_newline(cx: &mut TestAppContext) { ); assert_eq!(snapshot.max_point(), Point::new(2, 0)); - assert_eq!(snapshot.len(), 8); + assert_eq!(snapshot.len().0, 8); assert_eq!( snapshot @@ -3330,7 +3455,7 @@ fn test_trailing_deletion_without_newline(cx: &mut TestAppContext) { ); let (_, translated_offset) = snapshot.point_to_buffer_offset(Point::new(2, 0)).unwrap(); - assert_eq!(translated_offset, "one\n".len()); + assert_eq!(translated_offset.0, "one\n".len()); let (_, translated_point, _) = snapshot.point_to_buffer_point(Point::new(2, 0)).unwrap(); assert_eq!(translated_point, Point::new(1, 0)); @@ -3371,7 +3496,7 @@ fn test_trailing_deletion_without_newline(cx: &mut TestAppContext) { let buffer_1_id = buffer_1.read_with(cx, |buffer_1, _| buffer_1.remote_id()); let (buffer, translated_offset) = snapshot.point_to_buffer_offset(Point::new(2, 0)).unwrap(); assert_eq!(buffer.remote_id(), buffer_1_id); - assert_eq!(translated_offset, "one\n".len()); + assert_eq!(translated_offset.0, "one\n".len()); let (buffer, translated_point, _) = snapshot.point_to_buffer_point(Point::new(2, 0)).unwrap(); assert_eq!(buffer.remote_id(), buffer_1_id); assert_eq!(translated_point, Point::new(1, 0)); @@ -3439,7 +3564,7 @@ fn assert_excerpts_match( fn assert_new_snapshot( multibuffer: &Entity, snapshot: &mut MultiBufferSnapshot, - subscription: &mut Subscription, + subscription: &mut Subscription, cx: &mut TestAppContext, expected_diff: &str, ) { @@ -3462,15 +3587,15 @@ fn assert_new_snapshot( fn check_edits( old_snapshot: &MultiBufferSnapshot, new_snapshot: &MultiBufferSnapshot, - edits: &[Edit], + edits: &[Edit], ) { let mut text = old_snapshot.text(); let new_text = new_snapshot.text(); for edit in edits.iter().rev() { - if !text.is_char_boundary(edit.old.start) - || !text.is_char_boundary(edit.old.end) - || !new_text.is_char_boundary(edit.new.start) - || !new_text.is_char_boundary(edit.new.end) + if !text.is_char_boundary(edit.old.start.0) + || !text.is_char_boundary(edit.old.end.0) + || !new_text.is_char_boundary(edit.new.start.0) + || !new_text.is_char_boundary(edit.new.end.0) { panic!( "invalid edits: {:?}\nold text: {:?}\nnew text: {:?}", @@ -3479,8 +3604,8 @@ fn check_edits( } text.replace_range( - edit.old.start..edit.old.end, - &new_text[edit.new.start..edit.new.end], + edit.old.start.0..edit.old.end.0, + &new_text[edit.new.start.0..edit.new.end.0], ); } @@ -3491,8 +3616,8 @@ fn check_edits( fn assert_chunks_in_ranges(snapshot: &MultiBufferSnapshot) { let full_text = snapshot.text(); for ix in 0..full_text.len() { - let mut chunks = snapshot.chunks(0..snapshot.len(), false); - chunks.seek(ix..snapshot.len()); + let mut chunks = snapshot.chunks(MultiBufferOffset(0)..snapshot.len(), false); + chunks.seek(MultiBufferOffset(ix)..snapshot.len()); let tail = chunks.map(|chunk| chunk.text).collect::(); assert_eq!(tail, &full_text[ix..], "seek to range: {:?}", ix..); } @@ -3522,44 +3647,49 @@ fn assert_position_translation(snapshot: &MultiBufferSnapshot) { let mut offsets = Vec::new(); let mut points = Vec::new(); for offset in 0..=text.len() + 1 { + let offset = MultiBufferOffset(offset); let clipped_left = snapshot.clip_offset(offset, Bias::Left); let clipped_right = snapshot.clip_offset(offset, Bias::Right); assert_eq!( - clipped_left, - text.clip_offset(offset, Bias::Left), + clipped_left.0, + text.clip_offset(offset.0, Bias::Left), "clip_offset({offset:?}, Left)" ); assert_eq!( - clipped_right, - text.clip_offset(offset, Bias::Right), + clipped_right.0, + text.clip_offset(offset.0, Bias::Right), "clip_offset({offset:?}, Right)" ); assert_eq!( snapshot.offset_to_point(clipped_left), - text.offset_to_point(clipped_left), - "offset_to_point({clipped_left})" + text.offset_to_point(clipped_left.0), + "offset_to_point({})", + clipped_left.0 ); assert_eq!( snapshot.offset_to_point(clipped_right), - text.offset_to_point(clipped_right), - "offset_to_point({clipped_right})" + text.offset_to_point(clipped_right.0), + "offset_to_point({})", + clipped_right.0 ); let anchor_after = snapshot.anchor_after(clipped_left); assert_eq!( anchor_after.to_offset(snapshot), clipped_left, - "anchor_after({clipped_left}).to_offset {anchor_after:?}" + "anchor_after({}).to_offset {anchor_after:?}", + clipped_left.0 ); let anchor_before = snapshot.anchor_before(clipped_left); assert_eq!( anchor_before.to_offset(snapshot), clipped_left, - "anchor_before({clipped_left}).to_offset" + "anchor_before({}).to_offset", + clipped_left.0 ); left_anchors.push(anchor_before); right_anchors.push(anchor_after); offsets.push(clipped_left); - points.push(text.offset_to_point(clipped_left)); + points.push(text.offset_to_point(clipped_left.0)); } for row in 0..text.max_point().row { @@ -3578,12 +3708,12 @@ fn assert_position_translation(snapshot: &MultiBufferSnapshot) { "clip_point({point:?}, Right)" ); assert_eq!( - snapshot.point_to_offset(clipped_left), + snapshot.point_to_offset(clipped_left).0, text.point_to_offset(clipped_left), "point_to_offset({clipped_left:?})" ); assert_eq!( - snapshot.point_to_offset(clipped_right), + snapshot.point_to_offset(clipped_right).0, text.point_to_offset(clipped_right), "point_to_offset({clipped_right:?})" ); @@ -3591,7 +3721,7 @@ fn assert_position_translation(snapshot: &MultiBufferSnapshot) { } assert_eq!( - snapshot.summaries_for_anchors::(&left_anchors), + snapshot.summaries_for_anchors::(&left_anchors), offsets, "left_anchors <-> offsets" ); @@ -3601,7 +3731,7 @@ fn assert_position_translation(snapshot: &MultiBufferSnapshot) { "left_anchors <-> points" ); assert_eq!( - snapshot.summaries_for_anchors::(&right_anchors), + snapshot.summaries_for_anchors::(&right_anchors), offsets, "right_anchors <-> offsets" ); @@ -3613,7 +3743,7 @@ fn assert_position_translation(snapshot: &MultiBufferSnapshot) { for (anchors, bias) in [(&left_anchors, Bias::Left), (&right_anchors, Bias::Right)] { for (ix, (offset, anchor)) in offsets.iter().zip(anchors).enumerate() { - if ix > 0 && *offset == 252 && offset > &offsets[ix - 1] { + if ix > 0 && *offset == MultiBufferOffset(252) && offset > &offsets[ix - 1] { let prev_anchor = left_anchors[ix - 1]; assert!( anchor.cmp(&prev_anchor, snapshot).is_gt(), @@ -3632,7 +3762,7 @@ fn assert_position_translation(snapshot: &MultiBufferSnapshot) { } if let Some((buffer, offset)) = snapshot.point_to_buffer_offset(snapshot.max_point()) { - assert!(offset <= buffer.len()); + assert!(offset.0 <= buffer.len()); } if let Some((buffer, point, _)) = snapshot.point_to_buffer_point(snapshot.max_point()) { assert!(point <= buffer.max_point()); @@ -3747,7 +3877,7 @@ fn test_random_chunk_bitmaps(cx: &mut App, mut rng: StdRng) { let snapshot = multibuffer.read(cx).snapshot(cx); - let chunks = snapshot.chunks(0..snapshot.len(), false); + let chunks = snapshot.chunks(MultiBufferOffset(0)..snapshot.len(), false); for chunk in chunks { let chunk_text = chunk.text; @@ -3879,24 +4009,24 @@ fn test_random_chunk_bitmaps_with_diffs(cx: &mut App, mut rng: StdRng) { let mut ranges = Vec::new(); for _ in 0..rng.random_range(1..5) { - if snapshot.len() == 0 { + if snapshot.len().0 == 0 { break; } let diff_size = rng.random_range(5..1000); - let mut start = rng.random_range(0..snapshot.len()); + let mut start = rng.random_range(0..snapshot.len().0); while !text.is_char_boundary(start) { start = start.saturating_sub(1); } - let mut end = rng.random_range(start..snapshot.len().min(start + diff_size)); + let mut end = rng.random_range(start..snapshot.len().0.min(start + diff_size)); while !text.is_char_boundary(end) { end = end.saturating_add(1); } - let start_anchor = snapshot.anchor_after(start); - let end_anchor = snapshot.anchor_before(end); + let start_anchor = snapshot.anchor_after(MultiBufferOffset(start)); + let end_anchor = snapshot.anchor_before(MultiBufferOffset(end)); ranges.push(start_anchor..end_anchor); } multibuffer.expand_diff_hunks(ranges, cx); @@ -3905,7 +4035,7 @@ fn test_random_chunk_bitmaps_with_diffs(cx: &mut App, mut rng: StdRng) { let snapshot = multibuffer.read(cx).snapshot(cx); - let chunks = snapshot.chunks(0..snapshot.len(), false); + let chunks = snapshot.chunks(MultiBufferOffset(0)..snapshot.len(), false); for chunk in chunks { let chunk_text = chunk.text; diff --git a/crates/multi_buffer/src/transaction.rs b/crates/multi_buffer/src/transaction.rs index 062d25d8233777190113aaa3e6a7f62396cfd08f..a65e394c8f1834a95ccbc70532aa03d2a3e6e34c 100644 --- a/crates/multi_buffer/src/transaction.rs +++ b/crates/multi_buffer/src/transaction.rs @@ -1,14 +1,14 @@ use gpui::{App, Context, Entity}; -use language::{self, Buffer, TextDimension, TransactionId}; +use language::{self, Buffer, TransactionId}; use std::{ collections::HashMap, - ops::{Range, Sub}, + ops::{AddAssign, Range, Sub}, time::{Duration, Instant}, }; use sum_tree::Bias; use text::BufferId; -use crate::BufferState; +use crate::{BufferState, MultiBufferDimension}; use super::{Event, ExcerptSummary, MultiBuffer}; @@ -320,7 +320,11 @@ impl MultiBuffer { cx: &App, ) -> Vec> where - D: TextDimension + Ord + Sub, + D: MultiBufferDimension + + Ord + + Sub + + AddAssign, + D::TextDimension: PartialOrd + Sub, { let Some(transaction) = self.history.transaction(transaction_id) else { return Vec::new(); @@ -336,24 +340,34 @@ impl MultiBuffer { }; let buffer = buffer_state.buffer.read(cx); - for range in buffer.edited_ranges_for_transaction_id::(*buffer_transaction) { + for range in + buffer.edited_ranges_for_transaction_id::(*buffer_transaction) + { for excerpt_id in &buffer_state.excerpts { cursor.seek(excerpt_id, Bias::Left); if let Some(excerpt) = cursor.item() && excerpt.locator == *excerpt_id { - let excerpt_buffer_start = excerpt.range.context.start.summary::(buffer); - let excerpt_buffer_end = excerpt.range.context.end.summary::(buffer); + let excerpt_buffer_start = excerpt + .range + .context + .start + .summary::(buffer); + let excerpt_buffer_end = excerpt + .range + .context + .end + .summary::(buffer); let excerpt_range = excerpt_buffer_start..excerpt_buffer_end; if excerpt_range.contains(&range.start) && excerpt_range.contains(&range.end) { - let excerpt_start = D::from_text_summary(&cursor.start().text); + let excerpt_start = D::from_summary(&cursor.start().text); let mut start = excerpt_start; - start.add_assign(&(range.start - excerpt_buffer_start)); + start += range.start - excerpt_buffer_start; let mut end = excerpt_start; - end.add_assign(&(range.end - excerpt_buffer_start)); + end += range.end - excerpt_buffer_start; ranges.push(start..end); break; diff --git a/crates/outline/src/outline.rs b/crates/outline/src/outline.rs index 7ffbd5ef440996718337d839730a159b1f6593b7..7127627226d3aa55877f067038b69e6e848e1c3a 100644 --- a/crates/outline/src/outline.rs +++ b/crates/outline/src/outline.rs @@ -6,7 +6,7 @@ use std::{ use editor::scroll::ScrollOffset; use editor::{Anchor, AnchorRangeExt, Editor, scroll::Autoscroll}; -use editor::{RowHighlightOptions, SelectionEffects}; +use editor::{MultiBufferOffset, RowHighlightOptions, SelectionEffects}; use fuzzy::StringMatch; use gpui::{ App, Context, DismissEvent, Entity, EventEmitter, FocusHandle, Focusable, HighlightStyle, @@ -247,7 +247,7 @@ impl PickerDelegate for OutlineViewDelegate { let buffer = editor.buffer().read(cx).snapshot(cx); let cursor_offset = editor .selections - .newest::(&editor.display_snapshot(cx)) + .newest::(&editor.display_snapshot(cx)) .head(); (buffer, cursor_offset) }); @@ -259,8 +259,8 @@ impl PickerDelegate for OutlineViewDelegate { .map(|(ix, item)| { let range = item.range.to_offset(&buffer); let distance_to_closest_endpoint = cmp::min( - (range.start as isize - cursor_offset as isize).abs(), - (range.end as isize - cursor_offset as isize).abs(), + (range.start.0 as isize - cursor_offset.0 as isize).abs(), + (range.end.0 as isize - cursor_offset.0 as isize).abs(), ); let depth = if range.contains(&cursor_offset) { Some(item.depth) diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index b21fd02a05c7fca93f09b436488318fdc3bd33c4..6a7036fce81eee5810dfbc41f57119efd22cfdca 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -7,7 +7,7 @@ use collections::{BTreeSet, HashMap, hash_map}; use command_palette_hooks::CommandPaletteFilter; use db::kvp::KEY_VALUE_STORE; use editor::{ - Editor, EditorEvent, + Editor, EditorEvent, MultiBufferOffset, items::{ entry_diagnostic_aware_icon_decoration_and_color, entry_diagnostic_aware_icon_name_and_color, entry_git_aware_label_color, @@ -1925,7 +1925,9 @@ impl ProjectPanel { self.filename_editor.update(cx, |editor, cx| { editor.set_text(file_name, window, cx); editor.change_selections(Default::default(), window, cx, |s| { - s.select_ranges([selection]) + s.select_ranges([ + MultiBufferOffset(selection.start)..MultiBufferOffset(selection.end) + ]) }); }); self.update_visible_entries(None, true, true, window, cx); diff --git a/crates/project_panel/src/project_panel_tests.rs b/crates/project_panel/src/project_panel_tests.rs index a85ba36c5297d7f40eb08ff42ddf086408a01316..6cf487bf9849a9252abc21504171b8c6bdf7e298 100644 --- a/crates/project_panel/src/project_panel_tests.rs +++ b/crates/project_panel/src/project_panel_tests.rs @@ -1,5 +1,6 @@ use super::*; use collections::HashSet; +use editor::MultiBufferOffset; use gpui::{Empty, Entity, TestAppContext, VisualTestContext, WindowHandle}; use pretty_assertions::assert_eq; use project::FakeFs; @@ -658,7 +659,9 @@ async fn test_editing_files(cx: &mut gpui::TestAppContext) { let confirm = panel.update_in(cx, |panel, window, cx| { panel.filename_editor.update(cx, |editor, cx| { - let file_name_selections = editor.selections.all::(&editor.display_snapshot(cx)); + let file_name_selections = editor + .selections + .all::(&editor.display_snapshot(cx)); assert_eq!( file_name_selections.len(), 1, @@ -666,12 +669,13 @@ async fn test_editing_files(cx: &mut gpui::TestAppContext) { ); let file_name_selection = &file_name_selections[0]; assert_eq!( - file_name_selection.start, 0, + file_name_selection.start, + MultiBufferOffset(0), "Should select the file name from the start" ); assert_eq!( file_name_selection.end, - "another-filename".len(), + MultiBufferOffset("another-filename".len()), "Should not select file extension" ); @@ -732,11 +736,11 @@ async fn test_editing_files(cx: &mut gpui::TestAppContext) { panel.update_in(cx, |panel, window, cx| { panel.filename_editor.update(cx, |editor, cx| { - let file_name_selections = editor.selections.all::(&editor.display_snapshot(cx)); + let file_name_selections = editor.selections.all::(&editor.display_snapshot(cx)); assert_eq!(file_name_selections.len(), 1, "File editing should have a single selection, but got: {file_name_selections:?}"); let file_name_selection = &file_name_selections[0]; - assert_eq!(file_name_selection.start, 0, "Should select the file name from the start"); - assert_eq!(file_name_selection.end, "a-different-filename.tar".len(), "Should not select file extension, but still may select anything up to the last dot.."); + assert_eq!(file_name_selection.start, MultiBufferOffset(0), "Should select the file name from the start"); + assert_eq!(file_name_selection.end, MultiBufferOffset("a-different-filename.tar".len()), "Should not select file extension, but still may select anything up to the last dot.."); }); panel.cancel(&menu::Cancel, window, cx) @@ -1218,7 +1222,9 @@ async fn test_copy_paste(cx: &mut gpui::TestAppContext) { panel.update_in(cx, |panel, window, cx| { panel.filename_editor.update(cx, |editor, cx| { - let file_name_selections = editor.selections.all::(&editor.display_snapshot(cx)); + let file_name_selections = editor + .selections + .all::(&editor.display_snapshot(cx)); assert_eq!( file_name_selections.len(), 1, @@ -1227,12 +1233,12 @@ async fn test_copy_paste(cx: &mut gpui::TestAppContext) { let file_name_selection = &file_name_selections[0]; assert_eq!( file_name_selection.start, - "one".len(), + MultiBufferOffset("one".len()), "Should select the file name disambiguation after the original file name" ); assert_eq!( file_name_selection.end, - "one copy".len(), + MultiBufferOffset("one copy".len()), "Should select the file name disambiguation until the extension" ); }); diff --git a/crates/repl/src/repl_editor.rs b/crates/repl/src/repl_editor.rs index 84293fb27fdac7cfec7c3b7d38ecbc6527345e5b..9e52637ab75c02b14e798600584fe18ca3f55805 100644 --- a/crates/repl/src/repl_editor.rs +++ b/crates/repl/src/repl_editor.rs @@ -4,7 +4,7 @@ use std::ops::Range; use std::sync::Arc; use anyhow::{Context as _, Result}; -use editor::Editor; +use editor::{Editor, MultiBufferOffset}; use gpui::{App, Entity, WeakEntity, Window, prelude::*}; use language::{BufferSnapshot, Language, LanguageName, Point}; use project::{ProjectItem as _, WorktreeId}; @@ -478,7 +478,9 @@ fn get_language(editor: WeakEntity, cx: &mut App) -> Option(&display_snapshot); + let selection = editor + .selections + .newest::(&display_snapshot); display_snapshot .buffer_snapshot() .language_at(selection.head()) diff --git a/crates/rope/src/offset_utf16.rs b/crates/rope/src/offset_utf16.rs index 9a52b3c3f900788262696fe136dcc7d5995b3a5a..1223fbbe3898285cb7da5124f2f5a2cbe45a1f64 100644 --- a/crates/rope/src/offset_utf16.rs +++ b/crates/rope/src/offset_utf16.rs @@ -32,7 +32,6 @@ impl Sub for OffsetUtf16 { type Output = OffsetUtf16; fn sub(self, other: Self) -> Self::Output { - debug_assert!(other <= self); Self(self.0 - other.0) } } diff --git a/crates/rope/src/rope.rs b/crates/rope/src/rope.rs index a5699554a32b552e395001ded24512e10d645d4b..ad39022c0d6181bd5d5f4fdfc1b84ea4a667340d 100644 --- a/crates/rope/src/rope.rs +++ b/crates/rope/src/rope.rs @@ -1534,39 +1534,63 @@ where } } -impl ops::Sub for DimensionPair +impl ops::Sub for DimensionPair where - K: ops::Sub, - V: ops::Sub, + K: ops::Sub, + V: ops::Sub, { - type Output = Self; + type Output = DimensionPair; fn sub(self, rhs: Self) -> Self::Output { - Self { + DimensionPair { key: self.key - rhs.key, value: self.value.zip(rhs.value).map(|(a, b)| a - b), } } } +impl ops::AddAssign> for DimensionPair +where + K: ops::AddAssign, + V: ops::AddAssign, +{ + fn add_assign(&mut self, rhs: DimensionPair) { + self.key += rhs.key; + if let Some(value) = &mut self.value { + if let Some(other_value) = rhs.value { + *value += other_value; + } else { + self.value.take(); + } + } + } +} + +impl std::ops::AddAssign> for Point { + fn add_assign(&mut self, rhs: DimensionPair) { + *self += rhs.key; + } +} + impl cmp::Eq for DimensionPair where K: cmp::Eq {} -impl<'a, K, V> sum_tree::Dimension<'a, ChunkSummary> for DimensionPair +impl<'a, K, V, S> sum_tree::Dimension<'a, S> for DimensionPair where - K: sum_tree::Dimension<'a, ChunkSummary>, - V: sum_tree::Dimension<'a, ChunkSummary>, + S: sum_tree::Summary, + K: sum_tree::Dimension<'a, S>, + V: sum_tree::Dimension<'a, S>, { - fn zero(_cx: ()) -> Self { + fn zero(cx: S::Context<'_>) -> Self { Self { - key: K::zero(_cx), - value: Some(V::zero(_cx)), + key: K::zero(cx), + value: Some(V::zero(cx)), } } - fn add_summary(&mut self, summary: &'a ChunkSummary, _cx: ()) { - self.key.add_summary(summary, _cx); + fn add_summary(&mut self, summary: &'a S, cx: S::Context<'_>) { + self.key.add_summary(summary, cx); if let Some(value) = &mut self.value { - value.add_summary(summary, _cx); + value.add_summary(summary, cx); } } } diff --git a/crates/search/src/buffer_search.rs b/crates/search/src/buffer_search.rs index a601f5a683f2c464e792c351c566358212bdf312..0b45455faea1c6cd4474ac630d725ee57e1021f4 100644 --- a/crates/search/src/buffer_search.rs +++ b/crates/search/src/buffer_search.rs @@ -10,7 +10,7 @@ use any_vec::AnyVec; use anyhow::Context as _; use collections::HashMap; use editor::{ - DisplayPoint, Editor, EditorSettings, + DisplayPoint, Editor, EditorSettings, MultiBufferOffset, actions::{Backtab, Tab}, }; use futures::channel::oneshot; @@ -868,7 +868,11 @@ impl BufferSearchBar { .buffer() .update(cx, |replacement_buffer, cx| { let len = replacement_buffer.len(cx); - replacement_buffer.edit([(0..len, replacement.unwrap())], None, cx); + replacement_buffer.edit( + [(MultiBufferOffset(0)..len, replacement.unwrap())], + None, + cx, + ); }); }); } @@ -892,7 +896,7 @@ impl BufferSearchBar { self.query_editor.update(cx, |query_editor, cx| { query_editor.buffer().update(cx, |query_buffer, cx| { let len = query_buffer.len(cx); - query_buffer.edit([(0..len, query)], None, cx); + query_buffer.edit([(MultiBufferOffset(0)..len, query)], None, cx); }); }); self.set_search_options(options, cx); diff --git a/crates/tasks_ui/src/tasks_ui.rs b/crates/tasks_ui/src/tasks_ui.rs index b0185f98568399326ab0e45ffe713f7f1dc504fb..35c8a2ee220c6dba3732ca0f323bc50eb592ce19 100644 --- a/crates/tasks_ui/src/tasks_ui.rs +++ b/crates/tasks_ui/src/tasks_ui.rs @@ -392,7 +392,7 @@ fn worktree_context(worktree_abs_path: &Path) -> TaskContext { mod tests { use std::{collections::HashMap, sync::Arc}; - use editor::{Editor, SelectionEffects}; + use editor::{Editor, MultiBufferOffset, SelectionEffects}; use gpui::TestAppContext; use language::{Language, LanguageConfig}; use project::{BasicContextProvider, FakeFs, Project, task_store::TaskStore}; @@ -539,7 +539,7 @@ mod tests { // And now, let's select an identifier. editor2.update_in(cx, |editor, window, cx| { editor.change_selections(SelectionEffects::no_scroll(), window, cx, |selections| { - selections.select_ranges([14..18]) + selections.select_ranges([MultiBufferOffset(14)..MultiBufferOffset(18)]) }) }); diff --git a/crates/text/src/selection.rs b/crates/text/src/selection.rs index 349d557fab8c980901149698223cae78739797d8..e355f70c492ff3cdf2632f4e6204723fb05c9235 100644 --- a/crates/text/src/selection.rs +++ b/crates/text/src/selection.rs @@ -130,9 +130,15 @@ impl Selection { } } -impl Selection { +impl Selection { + pub fn len(&self) -> ::Output { + self.end - self.start + } +} + +impl Selection { #[cfg(feature = "test-support")] - pub fn from_offset(offset: usize) -> Self { + pub fn from_offset(offset: T) -> Self { Selection { id: 0, start: offset, @@ -142,7 +148,7 @@ impl Selection { } } - pub fn equals(&self, offset_range: &Range) -> bool { + pub fn equals(&self, offset_range: &Range) -> bool { self.start == offset_range.start && self.end == offset_range.end } } diff --git a/crates/text/src/subscription.rs b/crates/text/src/subscription.rs index 878e8a2cfe0a82782300089881b8cf31b428d2c2..50857a2de4ca2f9a89514a482973a0d14cce2163 100644 --- a/crates/text/src/subscription.rs +++ b/crates/text/src/subscription.rs @@ -6,36 +6,55 @@ use std::{ }; #[derive(Default)] -pub struct Topic(Mutex>>>>); +pub struct Topic(Mutex>>>>); -pub struct Subscription(Arc>>); +pub struct Subscription(Arc>>); -impl Topic { - pub fn subscribe(&mut self) -> Subscription { +impl Topic +where + T: 'static + + Copy + + Ord + + std::ops::Sub + + std::ops::Add + + std::ops::AddAssign + + Default, + TDelta: Ord + Copy, +{ + pub fn subscribe(&mut self) -> Subscription { let subscription = Subscription(Default::default()); self.0.get_mut().push(Arc::downgrade(&subscription.0)); subscription } - pub fn publish(&self, edits: impl Clone + IntoIterator>) { + pub fn publish(&self, edits: impl Clone + IntoIterator>) { publish(&mut self.0.lock(), edits); } - pub fn publish_mut(&mut self, edits: impl Clone + IntoIterator>) { + pub fn publish_mut(&mut self, edits: impl Clone + IntoIterator>) { publish(self.0.get_mut(), edits); } } -impl Subscription { - pub fn consume(&self) -> Patch { +impl Subscription { + pub fn consume(&self) -> Patch { mem::take(&mut *self.0.lock()) } } -fn publish( - subscriptions: &mut Vec>>>, - edits: impl Clone + IntoIterator>, -) { +fn publish( + subscriptions: &mut Vec>>>, + edits: impl Clone + IntoIterator>, +) where + T: 'static + + Copy + + Ord + + std::ops::Sub + + std::ops::Add + + std::ops::AddAssign + + Default, + TDelta: Ord + Copy, +{ subscriptions.retain(|subscription| { if let Some(subscription) = subscription.upgrade() { let mut patch = subscription.lock(); diff --git a/crates/text/src/text.rs b/crates/text/src/text.rs index 316bdb59faed8438d9664a904c7900491c59376b..e476103879d700dc6121882055bc7e2cabf3ed5a 100644 --- a/crates/text/src/text.rs +++ b/crates/text/src/text.rs @@ -54,7 +54,7 @@ pub struct Buffer { deferred_ops: OperationQueue, deferred_replicas: HashSet, pub lamport_clock: clock::Lamport, - subscriptions: Topic, + subscriptions: Topic, edit_id_resolvers: HashMap>>, wait_for_version_txs: Vec<(clock::Global, oneshot::Sender<()>)>, } @@ -1619,7 +1619,7 @@ impl Buffer { self.edited_ranges_for_edit_ids(&transaction.edit_ids) } - pub fn subscribe(&mut self) -> Subscription { + pub fn subscribe(&mut self) -> Subscription { self.subscriptions.subscribe() } diff --git a/crates/vim/src/helix.rs b/crates/vim/src/helix.rs index e1cc58b89560e46a68e05fe6c8e75dbefb4e3e83..eb0749794adb321d8ce19f8ad5adcf67b9a41bba 100644 --- a/crates/vim/src/helix.rs +++ b/crates/vim/src/helix.rs @@ -6,8 +6,8 @@ mod select; use editor::display_map::DisplaySnapshot; use editor::{ - DisplayPoint, Editor, EditorSettings, HideMouseCursorOrigin, SelectionEffects, ToOffset, - ToPoint, movement, + DisplayPoint, Editor, EditorSettings, HideMouseCursorOrigin, MultiBufferOffset, + SelectionEffects, ToOffset, ToPoint, movement, }; use gpui::actions; use gpui::{Context, Window}; @@ -523,7 +523,7 @@ impl Vim { ..range.end.to_offset(&display_map, Bias::Left); if !byte_range.is_empty() { - let replacement_text = text.repeat(byte_range.len()); + let replacement_text = text.repeat(byte_range.end - byte_range.start); edits.push((byte_range, replacement_text)); } } @@ -620,7 +620,7 @@ impl Vim { self.update_editor(cx, |_, editor, cx| { let newest = editor .selections - .newest::(&editor.display_snapshot(cx)); + .newest::(&editor.display_snapshot(cx)); editor.change_selections(Default::default(), window, cx, |s| s.select(vec![newest])); }); } diff --git a/crates/vim/src/helix/boundary.rs b/crates/vim/src/helix/boundary.rs index a6de926bc5a10415dad6584f3d07476b2bf0e5d7..0c2ebbeef00a306a388756455bfb6ffcd40395e8 100644 --- a/crates/vim/src/helix/boundary.rs +++ b/crates/vim/src/helix/boundary.rs @@ -1,10 +1,7 @@ -use std::{ - cmp::Ordering, - ops::{Deref, DerefMut, Range}, -}; +use std::{cmp::Ordering, ops::Range}; use editor::{ - DisplayPoint, + DisplayPoint, MultiBufferOffset, display_map::{DisplaySnapshot, ToDisplayPoint}, movement, }; @@ -104,8 +101,8 @@ trait BoundedObject { let next_end = self.next_end(map, end_search_start, outer)?; let maybe_next_start = self.next_start(map, start_search_start, outer); if let Some(next_start) = maybe_next_start - && (*next_start < *next_end - || *next_start == *next_end && self.can_be_zero_width(outer)) + && (next_start.0 < next_end.0 + || next_start.0 == next_end.0 && self.can_be_zero_width(outer)) && !self.ambiguous_outer() { let closing = self.close_at_end(next_start, map, outer)?; @@ -133,8 +130,8 @@ trait BoundedObject { let previous_start = self.previous_start(map, start_search_end, outer)?; let maybe_previous_end = self.previous_end(map, end_search_end, outer); if let Some(previous_end) = maybe_previous_end - && (*previous_end > *previous_start - || *previous_end == *previous_start && self.can_be_zero_width(outer)) + && (previous_end.0 > previous_start.0 + || previous_end.0 == previous_start.0 && self.can_be_zero_width(outer)) && !self.ambiguous_outer() { let closing = self.close_at_start(previous_end, map, outer)?; @@ -151,30 +148,22 @@ trait BoundedObject { } } -#[derive(Clone, Copy, PartialEq, Debug)] -struct Offset(usize); -impl Deref for Offset { - type Target = usize; - fn deref(&self) -> &Self::Target { - &self.0 - } -} -impl DerefMut for Offset { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } -} +#[derive(Clone, Copy, PartialEq, Debug, PartialOrd, Ord, Eq)] +struct Offset(MultiBufferOffset); impl Offset { fn next(self, map: &DisplaySnapshot) -> Option { - let next = Self(map.buffer_snapshot().clip_offset(*self + 1, Bias::Right)); - (*next > *self).then(|| next) + let next = Self( + map.buffer_snapshot() + .clip_offset(self.0 + 1usize, Bias::Right), + ); + (next.0 > self.0).then(|| next) } fn previous(self, map: &DisplaySnapshot) -> Option { - if *self == 0 { + if self.0 == MultiBufferOffset(0) { return None; } Some(Self( - map.buffer_snapshot().clip_offset(*self - 1, Bias::Left), + map.buffer_snapshot().clip_offset(self.0 - 1, Bias::Left), )) } fn range( @@ -211,7 +200,7 @@ impl HelixTextObject for B { let max_end = self.close_at_end(search_start, map, find_outer)?; let min_start = self.close_at_start(max_end, map, find_outer)?; - (*min_start <= *relative_to.start).then(|| min_start..max_end) + (min_start <= relative_to.start).then(|| min_start..max_end) }) } @@ -279,8 +268,8 @@ fn relative_range( min_start..max_end }; - let start = wanted_range.start.clone().to_display_point(map); - let end = wanted_range.end.clone().to_display_point(map); + let start = wanted_range.start.0.to_display_point(map); + let end = wanted_range.end.0.to_display_point(map); Some(start..end) } @@ -390,7 +379,7 @@ impl ImmediateBoundary { impl BoundedObject for ImmediateBoundary { fn next_start(&self, map: &DisplaySnapshot, from: Offset, outer: bool) -> Option { try_find_boundary(map, from, |left, right| { - let classifier = map.buffer_snapshot().char_classifier_at(*from); + let classifier = map.buffer_snapshot().char_classifier_at(from.0); if outer { self.is_outer_start(left, right, classifier) } else { @@ -400,7 +389,7 @@ impl BoundedObject for ImmediateBoundary { } fn next_end(&self, map: &DisplaySnapshot, from: Offset, outer: bool) -> Option { try_find_boundary(map, from, |left, right| { - let classifier = map.buffer_snapshot().char_classifier_at(*from); + let classifier = map.buffer_snapshot().char_classifier_at(from.0); if outer { self.is_outer_end(left, right, classifier) } else { @@ -410,7 +399,7 @@ impl BoundedObject for ImmediateBoundary { } fn previous_start(&self, map: &DisplaySnapshot, from: Offset, outer: bool) -> Option { try_find_preceding_boundary(map, from, |left, right| { - let classifier = map.buffer_snapshot().char_classifier_at(*from); + let classifier = map.buffer_snapshot().char_classifier_at(from.0); if outer { self.is_outer_start(left, right, classifier) } else { @@ -420,7 +409,7 @@ impl BoundedObject for ImmediateBoundary { } fn previous_end(&self, map: &DisplaySnapshot, from: Offset, outer: bool) -> Option { try_find_preceding_boundary(map, from, |left, right| { - let classifier = map.buffer_snapshot().char_classifier_at(*from); + let classifier = map.buffer_snapshot().char_classifier_at(from.0); if outer { self.is_outer_end(left, right, classifier) } else { @@ -572,7 +561,7 @@ impl FuzzyBoundary { boundary_kind: Boundary, ) -> Option { let generate_boundary_data = |left, right, point: Offset| { - let classifier = map.buffer_snapshot().char_classifier_at(*from); + let classifier = map.buffer_snapshot().char_classifier_at(from.0); let reach_boundary = if outer && boundary_kind == Boundary::Start { self.is_near_potential_outer_start(left, right, &classifier) } else if !outer && boundary_kind == Boundary::Start { @@ -598,9 +587,9 @@ impl FuzzyBoundary { Ordering::Greater => !backward, }); if backward { - boundaries.max_by_key(|boundary| **boundary) + boundaries.max_by_key(|boundary| *boundary) } else { - boundaries.min_by_key(|boundary| **boundary) + boundaries.min_by_key(|boundary| *boundary) } } } @@ -662,15 +651,15 @@ fn try_find_boundary_data( ) -> Option { let mut prev_ch = map .buffer_snapshot() - .reversed_chars_at(*from) + .reversed_chars_at(from.0) .next() .unwrap_or('\0'); - for ch in map.buffer_snapshot().chars_at(*from).chain(['\0']) { + for ch in map.buffer_snapshot().chars_at(from.0).chain(['\0']) { if let Some(boundary_information) = boundary_information(prev_ch, ch, from) { return Some(boundary_information); } - *from += ch.len_utf8(); + from.0 += ch.len_utf8(); prev_ch = ch; } @@ -702,13 +691,21 @@ fn try_find_preceding_boundary_data( mut from: Offset, is_boundary: impl Fn(char, char, Offset) -> Option, ) -> Option { - let mut prev_ch = map.buffer_snapshot().chars_at(*from).next().unwrap_or('\0'); + let mut prev_ch = map + .buffer_snapshot() + .chars_at(from.0) + .next() + .unwrap_or('\0'); - for ch in map.buffer_snapshot().reversed_chars_at(*from).chain(['\0']) { + for ch in map + .buffer_snapshot() + .reversed_chars_at(from.0) + .chain(['\0']) + { if let Some(boundary_information) = is_boundary(ch, prev_ch, from) { return Some(boundary_information); } - from.0 = from.0.saturating_sub(ch.len_utf8()); + from.0.0 = from.0.0.saturating_sub(ch.len_utf8()); prev_ch = ch; } diff --git a/crates/vim/src/helix/duplicate.rs b/crates/vim/src/helix/duplicate.rs index 1b1f10b00b6a7381f22c6ec3be674dc2c085eff6..37796c57aa0b9e27f2d7d786c9b8870e49d5871e 100644 --- a/crates/vim/src/helix/duplicate.rs +++ b/crates/vim/src/helix/duplicate.rs @@ -1,6 +1,6 @@ use std::ops::Range; -use editor::{DisplayPoint, display_map::DisplaySnapshot}; +use editor::{DisplayPoint, MultiBufferOffset, display_map::DisplaySnapshot}; use gpui::Context; use text::Bias; use ui::Window; @@ -111,7 +111,7 @@ fn find_next_valid_duplicate_space( fn display_point_range_to_offset_range( range: &Range, map: &DisplaySnapshot, -) -> Range { +) -> Range { range.start.to_offset(map, Bias::Left)..range.end.to_offset(map, Bias::Right) } diff --git a/crates/vim/src/helix/paste.rs b/crates/vim/src/helix/paste.rs index 67af7650011b0220f4ad05cebb6badf5d0ba7aa7..d91b138853abb07dc10957a4ee1f5af158066e06 100644 --- a/crates/vim/src/helix/paste.rs +++ b/crates/vim/src/helix/paste.rs @@ -125,7 +125,7 @@ impl Vim { s.select_ranges(new_selections.into_iter().map(|(anchor, len)| { let offset = anchor.to_offset(&snapshot); if action.before { - offset.saturating_sub(len)..offset + offset.saturating_sub_usize(len)..offset } else { offset..(offset + len) } diff --git a/crates/vim/src/motion.rs b/crates/vim/src/motion.rs index fd4171a36bc5baf0dc1cc60efe707fa275e4be81..b0faa7bb068135a3feafc507e4f8a6ed97863e8c 100644 --- a/crates/vim/src/motion.rs +++ b/crates/vim/src/motion.rs @@ -1,5 +1,5 @@ use editor::{ - Anchor, Bias, DisplayPoint, Editor, RowExt, ToOffset, ToPoint, + Anchor, Bias, BufferOffset, DisplayPoint, Editor, MultiBufferOffset, RowExt, ToOffset, ToPoint, display_map::{DisplayRow, DisplaySnapshot, FoldPoint, ToDisplayPoint}, movement::{ self, FindRange, TextLayoutDetails, find_boundary, find_preceding_boundary_display_point, @@ -2143,7 +2143,7 @@ pub(crate) fn sentence_backwards( if start_of_next_sentence < start { times = times.saturating_sub(1); } - if times == 0 || offset == 0 { + if times == 0 || offset.0 == 0 { return map.clip_point( start_of_next_sentence .to_offset(&map.buffer_snapshot()) @@ -2207,7 +2207,7 @@ pub(crate) fn sentence_forwards( map.max_point() } -fn next_non_blank(map: &DisplaySnapshot, start: usize) -> usize { +fn next_non_blank(map: &DisplaySnapshot, start: MultiBufferOffset) -> MultiBufferOffset { for (c, o) in map.buffer_chars_at(start) { if c == '\n' || !c.is_whitespace() { return o; @@ -2219,7 +2219,10 @@ fn next_non_blank(map: &DisplaySnapshot, start: usize) -> usize { // given the offset after a ., !, or ? find the start of the next sentence. // if this is not a sentence boundary, returns None. -fn start_of_next_sentence(map: &DisplaySnapshot, end_of_sentence: usize) -> Option { +fn start_of_next_sentence( + map: &DisplaySnapshot, + end_of_sentence: MultiBufferOffset, +) -> Option { let chars = map.buffer_chars_at(end_of_sentence); let mut seen_space = false; @@ -2253,10 +2256,10 @@ fn go_to_line(map: &DisplaySnapshot, display_point: DisplayPoint, line: usize) - .clip_point(Point::new((line - 1) as u32, point.column), Bias::Left), ); let buffer_range = excerpt.buffer_range(); - if offset >= buffer_range.start && offset <= buffer_range.end { + if offset >= buffer_range.start.0 && offset <= buffer_range.end.0 { let point = map .buffer_snapshot() - .offset_to_point(excerpt.map_offset_from_buffer(offset)); + .offset_to_point(excerpt.map_offset_from_buffer(BufferOffset(offset))); return map.clip_point(map.point_to_display_point(point, Bias::Left), Bias::Left); } let mut last_position = None; @@ -2360,6 +2363,9 @@ fn matching_tag(map: &DisplaySnapshot, head: DisplayPoint) -> Option DisplayPoint { + if !map.is_singleton() { + return display_point; + } // https://github.com/vim/vim/blob/1d87e11a1ef201b26ed87585fba70182ad0c468a/runtime/doc/motion.txt#L1200 let display_point = map.clip_at_line_end(display_point); let point = display_point.to_point(map); @@ -2375,9 +2381,10 @@ fn matching(map: &DisplaySnapshot, display_point: DisplayPoint) -> DisplayPoint // Attempt to find the smallest enclosing bracket range that also contains // the offset, which only happens if the cursor is currently in a bracket. let range_filter = |_buffer: &language::BufferSnapshot, - opening_range: Range, - closing_range: Range| { - opening_range.contains(&offset) || closing_range.contains(&offset) + opening_range: Range, + closing_range: Range| { + opening_range.contains(&BufferOffset(offset.0)) + || closing_range.contains(&BufferOffset(offset.0)) }; let bracket_ranges = snapshot @@ -2840,7 +2847,7 @@ fn method_motion( for _ in 0..times { let point = map.display_point_to_point(display_point, Bias::Left); - let offset = point.to_offset(&map.buffer_snapshot()); + let offset = point.to_offset(&map.buffer_snapshot()).0; let range = if direction == Direction::Prev { 0..offset } else { @@ -2869,7 +2876,7 @@ fn method_motion( } else { possibilities.min().unwrap_or(offset) }; - let new_point = map.clip_point(dest.to_display_point(map), Bias::Left); + let new_point = map.clip_point(MultiBufferOffset(dest).to_display_point(map), Bias::Left); if new_point == display_point { break; } @@ -2890,7 +2897,7 @@ fn comment_motion( for _ in 0..times { let point = map.display_point_to_point(display_point, Bias::Left); - let offset = point.to_offset(&map.buffer_snapshot()); + let offset = point.to_offset(&map.buffer_snapshot()).0; let range = if direction == Direction::Prev { 0..offset } else { @@ -2923,7 +2930,7 @@ fn comment_motion( } else { possibilities.min().unwrap_or(offset) }; - let new_point = map.clip_point(dest.to_display_point(map), Bias::Left); + let new_point = map.clip_point(MultiBufferOffset(dest).to_display_point(map), Bias::Left); if new_point == display_point { break; } @@ -2946,7 +2953,7 @@ fn section_motion( .display_point_to_point(display_point, Bias::Left) .to_offset(&map.buffer_snapshot()); let range = if direction == Direction::Prev { - 0..offset + MultiBufferOffset(0)..offset } else { offset..map.buffer_snapshot().len() }; @@ -2977,7 +2984,7 @@ fn section_motion( let relevant = if is_start { range.start } else { range.end }; if direction == Direction::Prev && relevant < offset { Some(relevant) - } else if direction == Direction::Next && relevant > offset + 1 { + } else if direction == Direction::Next && relevant > offset + 1usize { Some(relevant) } else { None @@ -2985,7 +2992,7 @@ fn section_motion( }); let offset = if direction == Direction::Prev { - possibilities.max().unwrap_or(0) + possibilities.max().unwrap_or(MultiBufferOffset(0)) } else { possibilities.min().unwrap_or(map.buffer_snapshot().len()) }; diff --git a/crates/vim/src/normal/increment.rs b/crates/vim/src/normal/increment.rs index 7eadf5053a15f33946031a332cb8a7f2dcb8ed52..a6a76b22aa16d6fd774dc32a8b4988804ad8e42c 100644 --- a/crates/vim/src/normal/increment.rs +++ b/crates/vim/src/normal/increment.rs @@ -211,9 +211,14 @@ fn find_target( let mut pre_char = String::new(); // Backward scan to find the start of the number, but stop at start_offset - for ch in snapshot.reversed_chars_at(offset + if offset < snapshot.len() { 1 } else { 0 }) { + let next_offset = if offset < snapshot.len() { + offset + 1usize + } else { + offset + }; + for ch in snapshot.reversed_chars_at(next_offset) { // Search boundaries - if offset == 0 || ch.is_whitespace() || (need_range && offset <= start_offset) { + if offset.0 == 0 || ch.is_whitespace() || (need_range && offset <= start_offset) { break; } diff --git a/crates/vim/src/normal/paste.rs b/crates/vim/src/normal/paste.rs index 74a28322d13b6ab0f563e6953f6b1edbfea66740..77305ea783c34e340a7ed840658088f3e6191abb 100644 --- a/crates/vim/src/normal/paste.rs +++ b/crates/vim/src/normal/paste.rs @@ -1,4 +1,7 @@ -use editor::{DisplayPoint, RowExt, SelectionEffects, display_map::ToDisplayPoint, movement}; +use editor::{ + DisplayPoint, MultiBufferOffset, RowExt, SelectionEffects, display_map::ToDisplayPoint, + movement, +}; use gpui::{Action, Context, Window}; use language::{Bias, SelectionGoal}; use schemars::JsonSchema; @@ -174,7 +177,10 @@ impl Vim { original_indent_columns.push(original_indent_column); } - let cursor_offset = editor.selections.last::(&display_map).head(); + let cursor_offset = editor + .selections + .last::(&display_map) + .head(); if editor .buffer() .read(cx) diff --git a/crates/vim/src/object.rs b/crates/vim/src/object.rs index f361dd8f274879f067c49bf04c0a73ebbc34be06..2f5ccac07bfe5f6f11b048e317523292dd74294d 100644 --- a/crates/vim/src/object.rs +++ b/crates/vim/src/object.rs @@ -6,7 +6,7 @@ use crate::{ state::{Mode, Operator}, }; use editor::{ - Bias, DisplayPoint, Editor, ToOffset, + Bias, BufferOffset, DisplayPoint, Editor, MultiBufferOffset, ToOffset, display_map::{DisplaySnapshot, ToDisplayPoint}, movement::{self, FindRange}, }; @@ -81,8 +81,8 @@ pub struct CandidateRange { #[derive(Debug, Clone)] pub struct CandidateWithRanges { candidate: CandidateRange, - open_range: Range, - close_range: Range, + open_range: Range, + close_range: Range, } /// Selects text at the same indentation level. @@ -120,7 +120,7 @@ struct CurlyBrackets { opening: bool, } -fn cover_or_next, Range)>>( +fn cover_or_next, Range)>>( candidates: Option, caret: DisplayPoint, map: &DisplaySnapshot, @@ -128,7 +128,7 @@ fn cover_or_next, Range)>>( let caret_offset = caret.to_offset(map, Bias::Left); let mut covering = vec![]; let mut next_ones = vec![]; - let snapshot = &map.buffer_snapshot(); + let snapshot = map.buffer_snapshot(); if let Some(ranges) = candidates { for (open_range, close_range) in ranges { @@ -171,7 +171,7 @@ fn cover_or_next, Range)>>( if !next_ones.is_empty() { return next_ones.into_iter().min_by_key(|r| { let start = r.candidate.start.to_offset(map, Bias::Left); - (start as isize - caret_offset as isize).abs() + (start.0 as isize - caret_offset.0 as isize).abs() }); } @@ -181,8 +181,8 @@ fn cover_or_next, Range)>>( type DelimiterPredicate = dyn Fn(&BufferSnapshot, usize, usize) -> bool; struct DelimiterRange { - open: Range, - close: Range, + open: Range, + close: Range, } impl DelimiterRange { @@ -221,14 +221,14 @@ fn find_mini_delimiters( .buffer_snapshot() .bracket_ranges(visible_line_range) .map(|ranges| { - ranges.filter_map(move |(open, close)| { + ranges.filter_map(|(open, close)| { // Convert the ranges from multibuffer space to buffer space as // that is what `is_valid_delimiter` expects, otherwise it might // panic as the values might be out of bounds. let buffer_open = excerpt.map_range_to_buffer(open.clone()); let buffer_close = excerpt.map_range_to_buffer(close.clone()); - if is_valid_delimiter(buffer, buffer_open.start, buffer_close.start) { + if is_valid_delimiter(buffer, buffer_open.start.0, buffer_close.start.0) { Some((open, close)) } else { None @@ -252,8 +252,12 @@ fn find_mini_delimiters( Some( DelimiterRange { - open: open_bracket, - close: close_bracket, + open: excerpt.map_range_from_buffer( + BufferOffset(open_bracket.start)..BufferOffset(open_bracket.end), + ), + close: excerpt.map_range_from_buffer( + BufferOffset(close_bracket.start)..BufferOffset(close_bracket.end), + ), } .to_display_range(map, around), ) @@ -899,7 +903,7 @@ pub fn surrounding_html_tag( // Find the most closest to current offset let mut cursor = buffer.syntax_layer_at(offset)?.node().walk(); let mut last_child_node = cursor.node(); - while cursor.goto_first_child_for_byte(offset).is_some() { + while cursor.goto_first_child_for_byte(offset.0).is_some() { last_child_node = cursor.node(); } @@ -916,10 +920,16 @@ pub fn surrounding_html_tag( - range.start.to_offset(map, Bias::Left) <= 1 { - offset <= last_child.end_byte() + offset.0 <= last_child.end_byte() } else { - range.start.to_offset(map, Bias::Left) >= first_child.start_byte() - && range.end.to_offset(map, Bias::Left) <= last_child.start_byte() + 1 + excerpt + .map_offset_to_buffer(range.start.to_offset(map, Bias::Left)) + .0 + >= first_child.start_byte() + && excerpt + .map_offset_to_buffer(range.end.to_offset(map, Bias::Left)) + .0 + <= last_child.start_byte() + 1 }; if open_tag.is_some() && open_tag == close_tag && is_valid { let range = if around { @@ -927,6 +937,7 @@ pub fn surrounding_html_tag( } else { first_child.byte_range().end..last_child.byte_range().start }; + let range = BufferOffset(range.start)..BufferOffset(range.end); if excerpt.contains_buffer_range(range.clone()) { let result = excerpt.map_range_from_buffer(range); return Some( @@ -1093,7 +1104,8 @@ fn text_object( .collect(); matches.sort_by_key(|r| r.end - r.start); if let Some(buffer_range) = matches.first() { - let range = excerpt.map_range_from_buffer(buffer_range.clone()); + let buffer_range = BufferOffset(buffer_range.start)..BufferOffset(buffer_range.end); + let range = excerpt.map_range_from_buffer(buffer_range); return Some(range.start.to_display_point(map)..range.end.to_display_point(map)); } @@ -1113,10 +1125,12 @@ fn text_object( if let Some(buffer_range) = matches.first() && !buffer_range.is_empty() { - let range = excerpt.map_range_from_buffer(buffer_range.clone()); + let buffer_range = BufferOffset(buffer_range.start)..BufferOffset(buffer_range.end); + let range = excerpt.map_range_from_buffer(buffer_range); return Some(range.start.to_display_point(map)..range.end.to_display_point(map)); } - let buffer_range = excerpt.map_range_from_buffer(around_range.clone()); + let around_range = BufferOffset(around_range.start)..BufferOffset(around_range.end); + let buffer_range = excerpt.map_range_from_buffer(around_range); return Some(buffer_range.start.to_display_point(map)..buffer_range.end.to_display_point(map)); } @@ -1134,9 +1148,9 @@ fn argument( fn comma_delimited_range_at( buffer: &BufferSnapshot, - mut offset: usize, + mut offset: BufferOffset, include_comma: bool, - ) -> Option> { + ) -> Option> { // Seek to the first non-whitespace character offset += buffer .chars_at(offset) @@ -1151,7 +1165,7 @@ fn argument( } // If the cursor is outside the brackets, ignore them - if open.start == offset || close.end == offset { + if open.start == offset.0 || close.end == offset.0 { return false; } @@ -1167,7 +1181,7 @@ fn argument( let (open_bracket, close_bracket) = buffer.innermost_enclosing_bracket_ranges(offset..offset, Some(&bracket_filter))?; - let inner_bracket_range = open_bracket.end..close_bracket.start; + let inner_bracket_range = BufferOffset(open_bracket.end)..BufferOffset(close_bracket.start); let layer = buffer.syntax_layer_at(offset)?; let node = layer.node(); @@ -1186,7 +1200,7 @@ fn argument( parent_covers_bracket_range = covers_bracket_range; // Unable to find a child node with a parent that covers the bracket range, so no argument to select - cursor.goto_first_child_for_byte(offset)?; + cursor.goto_first_child_for_byte(offset.0)?; } let mut argument_node = cursor.node(); @@ -1256,7 +1270,7 @@ fn argument( } } - Some(start..end) + Some(BufferOffset(start)..BufferOffset(end)) } let result = comma_delimited_range_at(buffer, excerpt.map_offset_to_buffer(offset), around)?; @@ -1387,7 +1401,7 @@ fn is_possible_sentence_start(character: char) -> bool { const SENTENCE_END_PUNCTUATION: &[char] = &['.', '!', '?']; const SENTENCE_END_FILLERS: &[char] = &[')', ']', '"', '\'']; const SENTENCE_END_WHITESPACE: &[char] = &[' ', '\t', '\n']; -fn is_sentence_end(map: &DisplaySnapshot, offset: usize) -> bool { +fn is_sentence_end(map: &DisplaySnapshot, offset: MultiBufferOffset) -> bool { let mut next_chars = map.buffer_chars_at(offset).peekable(); if let Some((char, _)) = next_chars.next() { // We are at a double newline. This position is a sentence end. diff --git a/crates/vim/src/surrounds.rs b/crates/vim/src/surrounds.rs index 579ab7842096f1e5cb1bb4c70e2fd8f4256355d0..b3f9307aac3df18334cf24a619dc640ccb625e24 100644 --- a/crates/vim/src/surrounds.rs +++ b/crates/vim/src/surrounds.rs @@ -4,7 +4,7 @@ use crate::{ object::{Object, surrounding_markers}, state::Mode, }; -use editor::{Bias, movement}; +use editor::{Bias, MultiBufferOffset, movement}; use gpui::{Context, Window}; use language::BracketPair; @@ -175,7 +175,7 @@ impl Vim { while let Some((ch, offset)) = chars_and_offset.next() { if ch.to_string() == pair.start { let start = offset; - let mut end = start + 1; + let mut end = start + 1usize; if surround && let Some((next_ch, _)) = chars_and_offset.peek() && next_ch.eq(&' ') @@ -193,7 +193,7 @@ impl Vim { while let Some((ch, offset)) = reverse_chars_and_offsets.next() { if ch.to_string() == pair.end { let mut start = offset; - let end = start + 1; + let end = start + 1usize; if surround && let Some((next_ch, _)) = reverse_chars_and_offsets.peek() && next_ch.eq(&' ') @@ -282,7 +282,7 @@ impl Vim { // that the end replacement string does not exceed // this value. Helpful when dealing with newlines. let mut edit_len = 0; - let mut open_range_end = 0; + let mut open_range_end = MultiBufferOffset(0); let mut chars_and_offset = display_map .buffer_chars_at(range.start.to_offset(&display_map, Bias::Left)) .peekable(); @@ -291,7 +291,7 @@ impl Vim { if ch.to_string() == will_replace_pair.start { let mut open_str = pair.start.clone(); let start = offset; - open_range_end = start + 1; + open_range_end = start + 1usize; while let Some((next_ch, _)) = chars_and_offset.next() && next_ch == ' ' { @@ -322,7 +322,7 @@ impl Vim { if ch.to_string() == will_replace_pair.end { let mut close_str = String::new(); let mut start = offset; - let end = start + 1; + let end = start + 1usize; while let Some((next_ch, _)) = reverse_chars_and_offsets.next() && next_ch == ' ' && close_str.len() < edit_len - 1 diff --git a/crates/vim/src/test.rs b/crates/vim/src/test.rs index 3cd0646ff4fc0a6966f12db75b64999e3655ab98..5a98ec47b122e0d1ed7fd1edfc7c5e2265c40d90 100644 --- a/crates/vim/src/test.rs +++ b/crates/vim/src/test.rs @@ -7,7 +7,7 @@ use std::{sync::Arc, time::Duration}; use collections::HashMap; use command_palette::CommandPalette; use editor::{ - AnchorRangeExt, DisplayPoint, Editor, EditorMode, MultiBuffer, + AnchorRangeExt, DisplayPoint, Editor, EditorMode, MultiBuffer, MultiBufferOffset, actions::{DeleteLine, WrapSelectionsInTag}, code_context_menus::CodeContextMenu, display_map::DisplayRow, @@ -908,6 +908,9 @@ fn assert_pending_input(cx: &mut VimTestContext, expected: &str) { .map(|highlight| highlight.to_offset(&snapshot.buffer_snapshot())) .collect::>(), ranges + .iter() + .map(|range| MultiBufferOffset(range.start)..MultiBufferOffset(range.end)) + .collect::>() ) }); } @@ -967,7 +970,7 @@ async fn test_jk_delay(cx: &mut gpui::TestAppContext) { .iter() .map(|highlight| highlight.to_offset(&snapshot.buffer_snapshot())) .collect::>(), - vec![0..1] + vec![MultiBufferOffset(0)..MultiBufferOffset(1)] ) }); cx.executor().advance_clock(Duration::from_millis(500)); diff --git a/crates/vim/src/vim.rs b/crates/vim/src/vim.rs index 14ee4709a74ba68e92f07dd53182416ea93ed6d5..b633c9ef3c5aa13286277d602ec08efc3ab03373 100644 --- a/crates/vim/src/vim.rs +++ b/crates/vim/src/vim.rs @@ -21,8 +21,8 @@ mod visual; use collections::HashMap; use editor::{ - Anchor, Bias, Editor, EditorEvent, EditorSettings, HideMouseCursorOrigin, SelectionEffects, - ToPoint, + Anchor, Bias, Editor, EditorEvent, EditorSettings, HideMouseCursorOrigin, MultiBufferOffset, + SelectionEffects, ToPoint, actions::Paste, movement::{self, FindRange}, }; @@ -1388,7 +1388,7 @@ impl Vim { let newest_selection_empty = editor.update(cx, |editor, cx| { editor .selections - .newest::(&editor.display_snapshot(cx)) + .newest::(&editor.display_snapshot(cx)) .is_empty() }); let editor = editor.read(cx); @@ -1488,7 +1488,7 @@ impl Vim { let snapshot = &editor.snapshot(window, cx); let selection = editor .selections - .newest::(&snapshot.display_snapshot); + .newest::(&snapshot.display_snapshot); let snapshot = snapshot.buffer_snapshot(); let (range, kind) = diff --git a/crates/vim/src/visual.rs b/crates/vim/src/visual.rs index 0abba86e993a76b6c2a1c18f02d68d72d092e78c..3c6f237435e3924a907e059ed1a878641c287e7e 100644 --- a/crates/vim/src/visual.rs +++ b/crates/vim/src/visual.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use collections::HashMap; use editor::{ - Bias, DisplayPoint, Editor, SelectionEffects, + Bias, DisplayPoint, Editor, MultiBufferOffset, SelectionEffects, display_map::{DisplaySnapshot, ToDisplayPoint}, movement, }; @@ -778,7 +778,7 @@ impl Vim { { let range = row_range.start.to_offset(&display_map, Bias::Right) ..row_range.end.to_offset(&display_map, Bias::Right); - let text = text.repeat(range.len()); + let text = text.repeat(range.end - range.start); edits.push((range, text)); } } @@ -844,8 +844,8 @@ impl Vim { return; }; let vim_is_normal = self.mode == Mode::Normal; - let mut start_selection = 0usize; - let mut end_selection = 0usize; + let mut start_selection = MultiBufferOffset(0); + let mut end_selection = MultiBufferOffset(0); self.update_editor(cx, |_, editor, _| { editor.set_collapse_matches(false); @@ -868,7 +868,7 @@ impl Vim { self.update_editor(cx, |_, editor, cx| { let latest = editor .selections - .newest::(&editor.display_snapshot(cx)); + .newest::(&editor.display_snapshot(cx)); start_selection = latest.start; end_selection = latest.end; }); @@ -891,7 +891,7 @@ impl Vim { self.update_editor(cx, |_, editor, cx| { let latest = editor .selections - .newest::(&editor.display_snapshot(cx)); + .newest::(&editor.display_snapshot(cx)); if vim_is_normal { start_selection = latest.start; end_selection = latest.end; diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 18f4a12b5d4abf8a11ae825bed2ad44feb1563ec..e90cf59f38e69be74e67969d230d5532b72dbba7 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -2241,7 +2241,9 @@ mod tests { use super::*; use assets::Assets; use collections::HashSet; - use editor::{DisplayPoint, Editor, SelectionEffects, display_map::DisplayRow}; + use editor::{ + DisplayPoint, Editor, MultiBufferOffset, SelectionEffects, display_map::DisplayRow, + }; use gpui::{ Action, AnyWindowHandle, App, AssetSource, BorrowAppContext, SemanticVersion, TestAppContext, UpdateGlobal, VisualTestContext, WindowHandle, actions, @@ -3508,7 +3510,11 @@ mod tests { assert!(!editor.is_dirty(cx)); assert_eq!(editor.title(cx), "untitled"); assert!(Arc::ptr_eq( - &editor.buffer().read(cx).language_at(0, cx).unwrap(), + &editor + .buffer() + .read(cx) + .language_at(MultiBufferOffset(0), cx) + .unwrap(), &languages::PLAIN_TEXT )); editor.handle_input("hi", window, cx); @@ -3542,7 +3548,12 @@ mod tests { assert!(!editor.is_dirty(cx)); assert_eq!(editor.title(cx), "the-new-name.rs"); assert_eq!( - editor.buffer().read(cx).language_at(0, cx).unwrap().name(), + editor + .buffer() + .read(cx) + .language_at(MultiBufferOffset(0), cx) + .unwrap() + .name(), "Rust".into() ); }); @@ -3648,7 +3659,11 @@ mod tests { .update(cx, |_, window, cx| { editor.update(cx, |editor, cx| { assert!(Arc::ptr_eq( - &editor.buffer().read(cx).language_at(0, cx).unwrap(), + &editor + .buffer() + .read(cx) + .language_at(MultiBufferOffset(0), cx) + .unwrap(), &languages::PLAIN_TEXT )); editor.handle_input("hi", window, cx); @@ -3672,7 +3687,12 @@ mod tests { editor.update(cx, |editor, cx| { assert!(!editor.is_dirty(cx)); assert_eq!( - editor.buffer().read(cx).language_at(0, cx).unwrap().name(), + editor + .buffer() + .read(cx) + .language_at(MultiBufferOffset(0), cx) + .unwrap() + .name(), "Rust".into() ) }); From c70f2d16adced88fcea30a989939a9d424add68e Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 19 Nov 2025 23:40:17 +0100 Subject: [PATCH 0235/1030] lsp_button: Do not surface language servers from different windows in current workspace (#42733) This led to a problem where we'd have a zombie entries in LSP dropdown because they were treated as if they originated from an unknown worktree. Closes #42077 Release Notes: - Fixed LSP status list containing zombie entries for LSPs in other windows --------- Co-authored-by: HactarCE <6060305+HactarCE@users.noreply.github.com> Co-authored-by: Kirill Bulatov --- crates/language_tools/src/lsp_button.rs | 83 ++++++++----------------- 1 file changed, 26 insertions(+), 57 deletions(-) diff --git a/crates/language_tools/src/lsp_button.rs b/crates/language_tools/src/lsp_button.rs index ee49114b787e764989453fae1d12f61253eea099..335381c6f79d950498a0f0c1d330cb21c681f32e 100644 --- a/crates/language_tools/src/lsp_button.rs +++ b/crates/language_tools/src/lsp_button.rs @@ -94,7 +94,7 @@ struct LanguageServerBinaryStatus { #[derive(Debug)] struct ServerInfo { name: LanguageServerName, - id: Option, + id: LanguageServerId, health: Option, binary_status: Option, message: Option, @@ -102,9 +102,7 @@ struct ServerInfo { impl ServerInfo { fn server_selector(&self) -> LanguageServerSelector { - self.id - .map(LanguageServerSelector::Id) - .unwrap_or_else(|| LanguageServerSelector::Name(self.name.clone())) + LanguageServerSelector::Id(self.id) } } @@ -214,7 +212,6 @@ impl LanguageServerState { let Some(server_info) = item.server_info() else { continue; }; - let server_selector = server_info.server_selector(); let is_remote = self .lsp_store @@ -430,7 +427,7 @@ enum ServerData<'a> { binary_status: Option<&'a LanguageServerBinaryStatus>, }, WithBinaryStatus { - server_id: Option, + server_id: LanguageServerId, server_name: &'a LanguageServerName, binary_status: &'a LanguageServerBinaryStatus, }, @@ -444,7 +441,7 @@ enum LspMenuItem { binary_status: Option, }, WithBinaryStatus { - server_id: Option, + server_id: LanguageServerId, server_name: LanguageServerName, binary_status: LanguageServerBinaryStatus, }, @@ -469,7 +466,7 @@ impl LspMenuItem { .. } => Some(ServerInfo { name: health.name.clone(), - id: Some(*server_id), + id: *server_id, health: health.health(), binary_status: binary_status.clone(), message: health.message(), @@ -591,6 +588,8 @@ impl LspButton { }; let mut updated = false; + // TODO `LspStore` is global and reports status from all language servers, even from the other windows. + // Also, we do not get "LSP removed" events so LSPs are never removed. match e { LspStoreEvent::LanguageServerUpdate { language_server_id, @@ -758,7 +757,6 @@ impl LspButton { .ok(); let mut servers_per_worktree = BTreeMap::>::new(); - let mut servers_without_worktree = Vec::::new(); let mut servers_with_health_checks = HashSet::default(); for (server_id, health) in &state.language_servers.health_statuses { @@ -780,12 +778,11 @@ impl LspButton { health, binary_status, }; - match worktree_name { - Some(worktree_name) => servers_per_worktree + if let Some(worktree_name) = worktree_name { + servers_per_worktree .entry(worktree_name.clone()) .or_default() - .push(server_data), - None => servers_without_worktree.push(server_data), + .push(server_data); } } @@ -822,42 +819,25 @@ impl LspButton { BinaryStatus::Failed { .. } => {} } - match server_names_to_worktrees.get(server_name) { - Some(worktrees_for_name) => { - match worktrees_for_name - .iter() - .find(|(worktree, _)| active_worktrees.contains(worktree)) - .or_else(|| worktrees_for_name.iter().next()) - { - Some((worktree, server_id)) => { - let worktree_name = - SharedString::new(worktree.read(cx).root_name_str()); - servers_per_worktree - .entry(worktree_name.clone()) - .or_default() - .push(ServerData::WithBinaryStatus { - server_name, - binary_status, - server_id: Some(*server_id), - }); - } - None => servers_without_worktree.push(ServerData::WithBinaryStatus { - server_name, - binary_status, - server_id: None, - }), - } - } - None => servers_without_worktree.push(ServerData::WithBinaryStatus { - server_name, - binary_status, - server_id: None, - }), + if let Some(worktrees_for_name) = server_names_to_worktrees.get(server_name) + && let Some((worktree, server_id)) = worktrees_for_name + .iter() + .find(|(worktree, _)| active_worktrees.contains(worktree)) + .or_else(|| worktrees_for_name.iter().next()) + { + let worktree_name = SharedString::new(worktree.read(cx).root_name_str()); + servers_per_worktree + .entry(worktree_name.clone()) + .or_default() + .push(ServerData::WithBinaryStatus { + server_name, + binary_status, + server_id: *server_id, + }); } } - let mut new_lsp_items = - Vec::with_capacity(servers_per_worktree.len() + servers_without_worktree.len() + 2); + let mut new_lsp_items = Vec::with_capacity(servers_per_worktree.len() + 1); for (worktree_name, worktree_servers) in servers_per_worktree { if worktree_servers.is_empty() { continue; @@ -868,17 +848,6 @@ impl LspButton { }); new_lsp_items.extend(worktree_servers.into_iter().map(ServerData::into_lsp_item)); } - if !servers_without_worktree.is_empty() { - new_lsp_items.push(LspMenuItem::Header { - header: Some(SharedString::from("Unknown worktree")), - separator: false, - }); - new_lsp_items.extend( - servers_without_worktree - .into_iter() - .map(ServerData::into_lsp_item), - ); - } if !new_lsp_items.is_empty() { if can_stop_all { new_lsp_items.push(LspMenuItem::ToggleServersButton { restart: true }); From f2f40a5099db9eeb9fdbacc0f800aa4ad7f233ee Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Wed, 19 Nov 2025 15:40:06 -0800 Subject: [PATCH 0236/1030] zeta2: Merge Sweep and Zeta2 Providers (#43097) Closes #ISSUE Release Notes: - N/A *or* Added/Fixed/Improved ... --------- Co-authored-by: Max Brunsfeld --- Cargo.lock | 32 +- Cargo.toml | 2 - crates/edit_prediction_button/Cargo.toml | 2 +- .../src/edit_prediction_button.rs | 2 +- crates/sweep_ai/Cargo.toml | 43 - crates/sweep_ai/LICENSE-GPL | 1 - crates/sweep_ai/src/sweep_ai.rs | 784 ------------------ crates/zed/Cargo.toml | 1 - .../zed/src/zed/edit_prediction_registry.rs | 71 +- crates/zeta2/Cargo.toml | 1 + crates/zeta2/src/provider.rs | 11 +- .../src/api.rs => zeta2/src/sweep_ai.rs} | 48 ++ crates/zeta2/src/zeta2.rs | 358 ++++++-- crates/zeta_cli/Cargo.toml | 1 - crates/zeta_cli/src/evaluate.rs | 68 +- crates/zeta_cli/src/main.rs | 2 +- crates/zeta_cli/src/predict.rs | 334 +++----- 17 files changed, 558 insertions(+), 1203 deletions(-) delete mode 100644 crates/sweep_ai/Cargo.toml delete mode 120000 crates/sweep_ai/LICENSE-GPL delete mode 100644 crates/sweep_ai/src/sweep_ai.rs rename crates/{sweep_ai/src/api.rs => zeta2/src/sweep_ai.rs} (59%) diff --git a/Cargo.lock b/Cargo.lock index 4f9a3f26e9a20df498bd3b735cfec54aa77c77cd..873fcdbb63fcabee0f722ae27beac486d0ce8670 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5314,13 +5314,13 @@ dependencies = [ "serde_json", "settings", "supermaven", - "sweep_ai", "telemetry", "theme", "ui", "workspace", "zed_actions", "zeta", + "zeta2", ] [[package]] @@ -16590,33 +16590,6 @@ dependencies = [ "zeno", ] -[[package]] -name = "sweep_ai" -version = "0.1.0" -dependencies = [ - "anyhow", - "arrayvec", - "brotli", - "client", - "collections", - "edit_prediction", - "feature_flags", - "futures 0.3.31", - "gpui", - "http_client", - "indoc", - "language", - "project", - "release_channel", - "reqwest_client", - "serde", - "serde_json", - "tree-sitter-rust", - "util", - "workspace", - "zlog", -] - [[package]] name = "symphonia" version = "0.5.5" @@ -21343,7 +21316,6 @@ dependencies = [ "snippets_ui", "supermaven", "svg_preview", - "sweep_ai", "sysinfo 0.37.2", "system_specs", "tab_switcher", @@ -21754,6 +21726,7 @@ version = "0.1.0" dependencies = [ "anyhow", "arrayvec", + "brotli", "chrono", "client", "clock", @@ -21864,7 +21837,6 @@ dependencies = [ "shellexpand 2.1.2", "smol", "soa-rs", - "sweep_ai", "terminal_view", "toml 0.8.23", "util", diff --git a/Cargo.toml b/Cargo.toml index 03a86c9e25bd8f5a1bb8498b3cb0169055672ad4..a4c9caccd9539ffde7d57d36dcfaf4cf162c7e92 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -165,7 +165,6 @@ members = [ "crates/sum_tree", "crates/supermaven", "crates/supermaven_api", - "crates/sweep_ai", "crates/codestral", "crates/svg_preview", "crates/system_specs", @@ -399,7 +398,6 @@ streaming_diff = { path = "crates/streaming_diff" } sum_tree = { path = "crates/sum_tree" } supermaven = { path = "crates/supermaven" } supermaven_api = { path = "crates/supermaven_api" } -sweep_ai = { path = "crates/sweep_ai" } codestral = { path = "crates/codestral" } system_specs = { path = "crates/system_specs" } tab_switcher = { path = "crates/tab_switcher" } diff --git a/crates/edit_prediction_button/Cargo.toml b/crates/edit_prediction_button/Cargo.toml index 3ed3d9411510ad2d978b221d8cb3412465a66879..9877b70161b3fdd16a0f667d85085520c9fe4f86 100644 --- a/crates/edit_prediction_button/Cargo.toml +++ b/crates/edit_prediction_button/Cargo.toml @@ -30,12 +30,12 @@ project.workspace = true regex.workspace = true settings.workspace = true supermaven.workspace = true -sweep_ai.workspace = true telemetry.workspace = true ui.workspace = true workspace.workspace = true zed_actions.workspace = true zeta.workspace = true +zeta2.workspace = true [dev-dependencies] copilot = { workspace = true, features = ["test-support"] } diff --git a/crates/edit_prediction_button/src/edit_prediction_button.rs b/crates/edit_prediction_button/src/edit_prediction_button.rs index 4f5f60d5a2328e5e56d65e87add7338b7e572346..ba00e95c488dc8e8704274638087c8334f96e1a3 100644 --- a/crates/edit_prediction_button/src/edit_prediction_button.rs +++ b/crates/edit_prediction_button/src/edit_prediction_button.rs @@ -28,7 +28,6 @@ use std::{ time::Duration, }; use supermaven::{AccountStatus, Supermaven}; -use sweep_ai::SweepFeatureFlag; use ui::{ Clickable, ContextMenu, ContextMenuEntry, DocumentationEdge, DocumentationSide, IconButton, IconButtonShape, Indicator, PopoverMenu, PopoverMenuHandle, ProgressBar, Tooltip, prelude::*, @@ -39,6 +38,7 @@ use workspace::{ }; use zed_actions::OpenBrowser; use zeta::RateCompletions; +use zeta2::SweepFeatureFlag; actions!( edit_prediction, diff --git a/crates/sweep_ai/Cargo.toml b/crates/sweep_ai/Cargo.toml deleted file mode 100644 index 4edf7ea1bb6af9a6657ccfe310c0253b118ec2e7..0000000000000000000000000000000000000000 --- a/crates/sweep_ai/Cargo.toml +++ /dev/null @@ -1,43 +0,0 @@ -[package] -name = "sweep_ai" -version = "0.1.0" -edition.workspace = true -publish.workspace = true -license = "GPL-3.0-or-later" -exclude = ["fixtures"] - -[lints] -workspace = true - -[lib] -path = "src/sweep_ai.rs" -doctest = false - -[dependencies] -anyhow.workspace = true -arrayvec.workspace = true -brotli.workspace = true -client.workspace = true -collections.workspace = true -edit_prediction.workspace = true -feature_flags.workspace = true -futures.workspace = true -gpui.workspace = true -http_client.workspace = true -language.workspace = true -project.workspace = true -release_channel.workspace = true -serde.workspace = true -serde_json.workspace = true -util.workspace = true -workspace.workspace = true - -[dev-dependencies] -gpui = { workspace = true, features = ["test-support"] } -http_client = { workspace = true, features = ["test-support"] } -indoc.workspace = true -language = { workspace = true, features = ["test-support"] } -reqwest_client = { workspace = true, features = ["test-support"] } -tree-sitter-rust.workspace = true -workspace = { workspace = true, features = ["test-support"] } -zlog.workspace = true diff --git a/crates/sweep_ai/LICENSE-GPL b/crates/sweep_ai/LICENSE-GPL deleted file mode 120000 index 89e542f750cd3860a0598eff0dc34b56d7336dc4..0000000000000000000000000000000000000000 --- a/crates/sweep_ai/LICENSE-GPL +++ /dev/null @@ -1 +0,0 @@ -../../LICENSE-GPL \ No newline at end of file diff --git a/crates/sweep_ai/src/sweep_ai.rs b/crates/sweep_ai/src/sweep_ai.rs deleted file mode 100644 index 1b4c92120d866a218987f36161e9520a0f3f703a..0000000000000000000000000000000000000000 --- a/crates/sweep_ai/src/sweep_ai.rs +++ /dev/null @@ -1,784 +0,0 @@ -mod api; - -use anyhow::{Context as _, Result}; -use arrayvec::ArrayVec; -use client::telemetry; -use collections::HashMap; -use feature_flags::FeatureFlag; -use futures::AsyncReadExt as _; -use gpui::{App, AppContext, Context, Entity, EntityId, Global, Task, WeakEntity}; -use http_client::{AsyncBody, Method}; -use language::{ - Anchor, Buffer, BufferSnapshot, EditPreview, Point, ToOffset as _, ToPoint, text_diff, -}; -use project::{Project, ProjectPath}; -use release_channel::{AppCommitSha, AppVersion}; -use std::collections::{VecDeque, hash_map}; -use std::fmt::{self, Display}; -use std::mem; -use std::{ - cmp, - fmt::Write, - ops::Range, - path::Path, - sync::Arc, - time::{Duration, Instant}, -}; -use util::ResultExt; -use util::rel_path::RelPath; -use workspace::Workspace; - -use crate::api::{AutocompleteRequest, AutocompleteResponse, FileChunk}; - -const CHANGE_GROUPING_LINE_SPAN: u32 = 8; -const MAX_EVENT_COUNT: usize = 6; - -const SWEEP_API_URL: &str = "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; - -pub struct SweepFeatureFlag; - -impl FeatureFlag for SweepFeatureFlag { - const NAME: &str = "sweep-ai"; -} - -#[derive(Clone)] -struct SweepAiGlobal(Entity); - -impl Global for SweepAiGlobal {} - -#[derive(Clone)] -pub struct EditPrediction { - pub id: EditPredictionId, - pub path: Arc, - pub edits: Arc<[(Range, Arc)]>, - pub snapshot: BufferSnapshot, - pub edit_preview: EditPreview, -} - -impl EditPrediction { - fn interpolate(&self, new_snapshot: &BufferSnapshot) -> Option, Arc)>> { - edit_prediction::interpolate_edits(&self.snapshot, new_snapshot, &self.edits) - } -} - -impl fmt::Debug for EditPrediction { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("EditPrediction") - .field("path", &self.path) - .field("edits", &self.edits) - .finish_non_exhaustive() - } -} - -#[derive(Clone, Default, Debug, PartialEq, Eq, Hash)] -pub struct EditPredictionId(String); - -impl Display for EditPredictionId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -pub struct SweepAi { - projects: HashMap, - debug_info: Arc, - api_token: Option, -} - -struct SweepAiProject { - events: VecDeque, - registered_buffers: HashMap, -} - -impl SweepAi { - pub fn global(cx: &mut App) -> Option> { - cx.try_global::() - .map(|global| global.0.clone()) - } - - pub fn register(cx: &mut App) -> Entity { - Self::global(cx).unwrap_or_else(|| { - let entity = cx.new(|cx| Self::new(cx)); - cx.set_global(SweepAiGlobal(entity.clone())); - entity - }) - } - - pub fn clear_history(&mut self) { - for sweep_ai_project in self.projects.values_mut() { - sweep_ai_project.events.clear(); - } - } - - pub fn new(cx: &mut Context) -> Self { - Self { - api_token: std::env::var("SWEEP_AI_TOKEN").ok(), - projects: HashMap::default(), - debug_info: format!( - "Zed v{version} ({sha}) - OS: {os} - Zed v{version}", - version = AppVersion::global(cx), - sha = AppCommitSha::try_global(cx).map_or("unknown".to_string(), |sha| sha.full()), - os = telemetry::os_name(), - ) - .into(), - } - } - - fn get_or_init_sweep_ai_project( - &mut self, - project: &Entity, - cx: &mut Context, - ) -> &mut SweepAiProject { - let project_id = project.entity_id(); - match self.projects.entry(project_id) { - hash_map::Entry::Occupied(entry) => entry.into_mut(), - hash_map::Entry::Vacant(entry) => { - cx.observe_release(project, move |this, _, _cx| { - this.projects.remove(&project_id); - }) - .detach(); - entry.insert(SweepAiProject { - events: VecDeque::with_capacity(MAX_EVENT_COUNT), - registered_buffers: HashMap::default(), - }) - } - } - } - - pub fn register_buffer( - &mut self, - buffer: &Entity, - project: &Entity, - cx: &mut Context, - ) { - let sweep_ai_project = self.get_or_init_sweep_ai_project(project, cx); - Self::register_buffer_impl(sweep_ai_project, buffer, project, cx); - } - - fn register_buffer_impl<'a>( - sweep_ai_project: &'a mut SweepAiProject, - buffer: &Entity, - project: &Entity, - cx: &mut Context, - ) -> &'a mut RegisteredBuffer { - let buffer_id = buffer.entity_id(); - match sweep_ai_project.registered_buffers.entry(buffer_id) { - hash_map::Entry::Occupied(entry) => entry.into_mut(), - hash_map::Entry::Vacant(entry) => { - let snapshot = buffer.read(cx).snapshot(); - let project_entity_id = project.entity_id(); - entry.insert(RegisteredBuffer { - snapshot, - _subscriptions: [ - cx.subscribe(buffer, { - let project = project.downgrade(); - move |this, buffer, event, cx| { - if let language::BufferEvent::Edited = event - && let Some(project) = project.upgrade() - { - this.report_changes_for_buffer(&buffer, &project, cx); - } - } - }), - cx.observe_release(buffer, move |this, _buffer, _cx| { - let Some(sweep_ai_project) = this.projects.get_mut(&project_entity_id) - else { - return; - }; - sweep_ai_project.registered_buffers.remove(&buffer_id); - }), - ], - }) - } - } - } - - pub fn request_completion( - &mut self, - project: &Entity, - recent_buffers: impl Iterator, - active_buffer: &Entity, - position: language::Anchor, - cx: &mut Context, - ) -> Task>> { - let snapshot = active_buffer.read(cx).snapshot(); - let debug_info = self.debug_info.clone(); - let Some(api_token) = self.api_token.clone() else { - return Task::ready(Ok(None)); - }; - let full_path: Arc = snapshot - .file() - .map(|file| file.full_path(cx)) - .unwrap_or_else(|| "untitled".into()) - .into(); - - let project_file = project::File::from_dyn(snapshot.file()); - let repo_name = project_file - .map(|file| file.worktree.read(cx).root_name_str()) - .unwrap_or("untitled") - .into(); - let offset = position.to_offset(&snapshot); - - let project_state = self.get_or_init_sweep_ai_project(project, cx); - let events = project_state.events.clone(); - let http_client = cx.http_client(); - - let recent_buffer_snapshots = recent_buffers - .filter_map(|project_path| { - let buffer = project.read(cx).get_open_buffer(&project_path, cx)?; - if active_buffer == &buffer { - None - } else { - Some(buffer.read(cx).snapshot()) - } - }) - .take(3) - .collect::>(); - - let result = cx.background_spawn({ - let full_path = full_path.clone(); - async move { - let text = snapshot.text(); - - let mut recent_changes = String::new(); - - for event in events { - writeln!(&mut recent_changes, "{event}")?; - } - - let file_chunks = recent_buffer_snapshots - .into_iter() - .map(|snapshot| { - let end_point = language::Point::new(30, 0).min(snapshot.max_point()); - FileChunk { - content: snapshot - .text_for_range(language::Point::zero()..end_point) - .collect(), - file_path: snapshot - .file() - .map(|f| f.path().as_unix_str()) - .unwrap_or("untitled") - .to_string(), - start_line: 0, - end_line: end_point.row as usize, - timestamp: snapshot.file().and_then(|file| { - Some( - file.disk_state() - .mtime()? - .to_seconds_and_nanos_for_persistence()? - .0, - ) - }), - } - }) - .collect(); - - eprintln!("{recent_changes}"); - - let request_body = AutocompleteRequest { - debug_info, - repo_name, - file_path: full_path.clone(), - file_contents: text.clone(), - original_file_contents: text, - cursor_position: offset, - recent_changes: recent_changes.clone(), - changes_above_cursor: true, - multiple_suggestions: false, - branch: None, - file_chunks, - retrieval_chunks: vec![], - recent_user_actions: vec![], - // TODO - privacy_mode_enabled: false, - }; - - let mut buf: Vec = Vec::new(); - let writer = brotli::CompressorWriter::new(&mut buf, 4096, 11, 22); - serde_json::to_writer(writer, &request_body)?; - let body: AsyncBody = buf.into(); - - let request = http_client::Request::builder() - .uri(SWEEP_API_URL) - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {}", api_token)) - .header("Connection", "keep-alive") - .header("Content-Encoding", "br") - .method(Method::POST) - .body(body)?; - - let mut response = http_client.send(request).await?; - - let mut body: Vec = Vec::new(); - response.body_mut().read_to_end(&mut body).await?; - - if !response.status().is_success() { - anyhow::bail!( - "Request failed with status: {:?}\nBody: {}", - response.status(), - String::from_utf8_lossy(&body), - ); - }; - - let response: AutocompleteResponse = serde_json::from_slice(&body)?; - - let old_text = snapshot - .text_for_range(response.start_index..response.end_index) - .collect::(); - let edits = text_diff(&old_text, &response.completion) - .into_iter() - .map(|(range, text)| { - ( - snapshot.anchor_after(response.start_index + range.start) - ..snapshot.anchor_before(response.start_index + range.end), - text, - ) - }) - .collect::>(); - - anyhow::Ok((response.autocomplete_id, edits, snapshot)) - } - }); - - let buffer = active_buffer.clone(); - - cx.spawn(async move |_, cx| { - let (id, edits, old_snapshot) = result.await?; - - if edits.is_empty() { - return anyhow::Ok(None); - } - - let Some((edits, new_snapshot, preview_task)) = - buffer.read_with(cx, |buffer, cx| { - let new_snapshot = buffer.snapshot(); - - let edits: Arc<[(Range, Arc)]> = - edit_prediction::interpolate_edits(&old_snapshot, &new_snapshot, &edits)? - .into(); - let preview_task = buffer.preview_edits(edits.clone(), cx); - - Some((edits, new_snapshot, preview_task)) - })? - else { - return anyhow::Ok(None); - }; - - let prediction = EditPrediction { - id: EditPredictionId(id), - path: full_path, - edits, - snapshot: new_snapshot, - edit_preview: preview_task.await, - }; - - anyhow::Ok(Some(prediction)) - }) - } - - fn report_changes_for_buffer( - &mut self, - buffer: &Entity, - project: &Entity, - cx: &mut Context, - ) { - let sweep_ai_project = self.get_or_init_sweep_ai_project(project, cx); - let registered_buffer = Self::register_buffer_impl(sweep_ai_project, buffer, project, cx); - - let new_snapshot = buffer.read(cx).snapshot(); - if new_snapshot.version == registered_buffer.snapshot.version { - return; - } - - let old_snapshot = mem::replace(&mut registered_buffer.snapshot, new_snapshot.clone()); - let end_edit_anchor = new_snapshot - .anchored_edits_since::(&old_snapshot.version) - .last() - .map(|(_, range)| range.end); - let events = &mut sweep_ai_project.events; - - if let Some(Event::BufferChange { - new_snapshot: last_new_snapshot, - end_edit_anchor: last_end_edit_anchor, - .. - }) = events.back_mut() - { - let is_next_snapshot_of_same_buffer = old_snapshot.remote_id() - == last_new_snapshot.remote_id() - && old_snapshot.version == last_new_snapshot.version; - - let should_coalesce = is_next_snapshot_of_same_buffer - && end_edit_anchor - .as_ref() - .zip(last_end_edit_anchor.as_ref()) - .is_some_and(|(a, b)| { - let a = a.to_point(&new_snapshot); - let b = b.to_point(&new_snapshot); - a.row.abs_diff(b.row) <= CHANGE_GROUPING_LINE_SPAN - }); - - if should_coalesce { - *last_end_edit_anchor = end_edit_anchor; - *last_new_snapshot = new_snapshot; - return; - } - } - - if events.len() >= MAX_EVENT_COUNT { - events.pop_front(); - } - - events.push_back(Event::BufferChange { - old_snapshot, - new_snapshot, - end_edit_anchor, - }); - } -} - -struct RegisteredBuffer { - snapshot: BufferSnapshot, - _subscriptions: [gpui::Subscription; 2], -} - -#[derive(Clone)] -pub enum Event { - BufferChange { - old_snapshot: BufferSnapshot, - new_snapshot: BufferSnapshot, - end_edit_anchor: Option, - }, -} - -impl Display for Event { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Event::BufferChange { - old_snapshot, - new_snapshot, - .. - } => { - let old_path = old_snapshot - .file() - .map(|f| f.path().as_ref()) - .unwrap_or(RelPath::unix("untitled").unwrap()); - let new_path = new_snapshot - .file() - .map(|f| f.path().as_ref()) - .unwrap_or(RelPath::unix("untitled").unwrap()); - if old_path != new_path { - // TODO confirm how to do this for sweep - // writeln!(f, "User renamed {:?} to {:?}\n", old_path, new_path)?; - } - - let diff = language::unified_diff(&old_snapshot.text(), &new_snapshot.text()); - if !diff.is_empty() { - write!( - f, - "File: {}:\n{}\n", - new_path.display(util::paths::PathStyle::Posix), - diff - )? - } - - fmt::Result::Ok(()) - } - } - } -} - -#[derive(Debug, Clone)] -struct CurrentEditPrediction { - buffer_id: EntityId, - completion: EditPrediction, -} - -impl CurrentEditPrediction { - fn should_replace_completion(&self, old_completion: &Self, snapshot: &BufferSnapshot) -> bool { - if self.buffer_id != old_completion.buffer_id { - return true; - } - - let Some(old_edits) = old_completion.completion.interpolate(snapshot) else { - return true; - }; - let Some(new_edits) = self.completion.interpolate(snapshot) else { - return false; - }; - - if old_edits.len() == 1 && new_edits.len() == 1 { - let (old_range, old_text) = &old_edits[0]; - let (new_range, new_text) = &new_edits[0]; - new_range == old_range && new_text.starts_with(old_text.as_ref()) - } else { - true - } - } -} - -struct PendingCompletion { - id: usize, - _task: Task<()>, -} - -pub struct SweepAiEditPredictionProvider { - workspace: WeakEntity, - sweep_ai: Entity, - pending_completions: ArrayVec, - next_pending_completion_id: usize, - current_completion: Option, - last_request_timestamp: Instant, - project: Entity, -} - -impl SweepAiEditPredictionProvider { - pub const THROTTLE_TIMEOUT: Duration = Duration::from_millis(300); - - pub fn new( - sweep_ai: Entity, - workspace: WeakEntity, - project: Entity, - ) -> Self { - Self { - sweep_ai, - pending_completions: ArrayVec::new(), - next_pending_completion_id: 0, - current_completion: None, - last_request_timestamp: Instant::now(), - project, - workspace, - } - } -} - -impl edit_prediction::EditPredictionProvider for SweepAiEditPredictionProvider { - fn name() -> &'static str { - "zed-predict" - } - - fn display_name() -> &'static str { - "Zed's Edit Predictions" - } - - fn show_completions_in_menu() -> bool { - true - } - - fn show_tab_accept_marker() -> bool { - true - } - - fn is_enabled( - &self, - _buffer: &Entity, - _cursor_position: language::Anchor, - cx: &App, - ) -> bool { - self.sweep_ai.read(cx).api_token.is_some() - } - - fn is_refreshing(&self) -> bool { - !self.pending_completions.is_empty() - } - - fn refresh( - &mut self, - buffer: Entity, - position: language::Anchor, - _debounce: bool, - cx: &mut Context, - ) { - if let Some(current_completion) = self.current_completion.as_ref() { - let snapshot = buffer.read(cx).snapshot(); - if current_completion - .completion - .interpolate(&snapshot) - .is_some() - { - return; - } - } - - let pending_completion_id = self.next_pending_completion_id; - self.next_pending_completion_id += 1; - let last_request_timestamp = self.last_request_timestamp; - - let project = self.project.clone(); - let workspace = self.workspace.clone(); - let task = cx.spawn(async move |this, cx| { - if let Some(timeout) = (last_request_timestamp + Self::THROTTLE_TIMEOUT) - .checked_duration_since(Instant::now()) - { - cx.background_executor().timer(timeout).await; - } - - let completion_request = this.update(cx, |this, cx| { - this.last_request_timestamp = Instant::now(); - - this.sweep_ai.update(cx, |sweep_ai, cx| { - let Some(recent_buffers) = workspace - .read_with(cx, |workspace, cx| { - workspace.recent_navigation_history_iter(cx) - }) - .log_err() - else { - return Task::ready(Ok(None)); - }; - sweep_ai.request_completion( - &project, - recent_buffers.map(move |(project_path, _)| project_path), - &buffer, - position, - cx, - ) - }) - }); - - let completion = match completion_request { - Ok(completion_request) => { - let completion_request = completion_request.await; - completion_request.map(|c| { - c.map(|completion| CurrentEditPrediction { - buffer_id: buffer.entity_id(), - completion, - }) - }) - } - Err(error) => Err(error), - }; - - let Some(new_completion) = completion - .context("edit prediction failed") - .log_err() - .flatten() - else { - this.update(cx, |this, cx| { - if this.pending_completions[0].id == pending_completion_id { - this.pending_completions.remove(0); - } else { - this.pending_completions.clear(); - } - - cx.notify(); - }) - .ok(); - return; - }; - - this.update(cx, |this, cx| { - if this.pending_completions[0].id == pending_completion_id { - this.pending_completions.remove(0); - } else { - this.pending_completions.clear(); - } - - if let Some(old_completion) = this.current_completion.as_ref() { - let snapshot = buffer.read(cx).snapshot(); - if new_completion.should_replace_completion(old_completion, &snapshot) { - this.current_completion = Some(new_completion); - } - } else { - this.current_completion = Some(new_completion); - } - - cx.notify(); - }) - .ok(); - }); - - // We always maintain at most two pending completions. When we already - // have two, we replace the newest one. - if self.pending_completions.len() <= 1 { - self.pending_completions.push(PendingCompletion { - id: pending_completion_id, - _task: task, - }); - } else if self.pending_completions.len() == 2 { - self.pending_completions.pop(); - self.pending_completions.push(PendingCompletion { - id: pending_completion_id, - _task: task, - }); - } - } - - fn cycle( - &mut self, - _buffer: Entity, - _cursor_position: language::Anchor, - _direction: edit_prediction::Direction, - _cx: &mut Context, - ) { - // Right now we don't support cycling. - } - - fn accept(&mut self, _cx: &mut Context) { - self.pending_completions.clear(); - } - - fn discard(&mut self, _cx: &mut Context) { - self.pending_completions.clear(); - self.current_completion.take(); - } - - fn suggest( - &mut self, - buffer: &Entity, - cursor_position: language::Anchor, - cx: &mut Context, - ) -> Option { - let CurrentEditPrediction { - buffer_id, - completion, - .. - } = self.current_completion.as_mut()?; - - // Invalidate previous completion if it was generated for a different buffer. - if *buffer_id != buffer.entity_id() { - self.current_completion.take(); - return None; - } - - let buffer = buffer.read(cx); - let Some(edits) = completion.interpolate(&buffer.snapshot()) else { - self.current_completion.take(); - return None; - }; - - let cursor_row = cursor_position.to_point(buffer).row; - let (closest_edit_ix, (closest_edit_range, _)) = - edits.iter().enumerate().min_by_key(|(_, (range, _))| { - let distance_from_start = cursor_row.abs_diff(range.start.to_point(buffer).row); - let distance_from_end = cursor_row.abs_diff(range.end.to_point(buffer).row); - cmp::min(distance_from_start, distance_from_end) - })?; - - let mut edit_start_ix = closest_edit_ix; - for (range, _) in edits[..edit_start_ix].iter().rev() { - let distance_from_closest_edit = - closest_edit_range.start.to_point(buffer).row - range.end.to_point(buffer).row; - if distance_from_closest_edit <= 1 { - edit_start_ix -= 1; - } else { - break; - } - } - - let mut edit_end_ix = closest_edit_ix + 1; - for (range, _) in &edits[edit_end_ix..] { - let distance_from_closest_edit = - range.start.to_point(buffer).row - closest_edit_range.end.to_point(buffer).row; - if distance_from_closest_edit <= 1 { - edit_end_ix += 1; - } else { - break; - } - } - - Some(edit_prediction::EditPrediction::Local { - id: Some(completion.id.to_string().into()), - edits: edits[edit_start_ix..edit_end_ix].to_vec(), - edit_preview: Some(completion.edit_preview.clone()), - }) - } -} diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 78d650793ebb98cdc9a52e50adc9fa57c7c24b4f..b0a4f6344c9a710af5cf6a391d7b2c0f03efe7b1 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -133,7 +133,6 @@ snippet_provider.workspace = true snippets_ui.workspace = true supermaven.workspace = true svg_preview.workspace = true -sweep_ai.workspace = true sysinfo.workspace = true tab_switcher.workspace = true task.workspace = true diff --git a/crates/zed/src/zed/edit_prediction_registry.rs b/crates/zed/src/zed/edit_prediction_registry.rs index 1723ca91f143c8529e14e24e0bdd85dc7b1c14d4..250a2b5a0e585d5acad7658a25f89bce12f766d2 100644 --- a/crates/zed/src/zed/edit_prediction_registry.rs +++ b/crates/zed/src/zed/edit_prediction_registry.rs @@ -10,9 +10,9 @@ use language_models::MistralLanguageModelProvider; use settings::{EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, SettingsStore}; use std::{cell::RefCell, rc::Rc, sync::Arc}; use supermaven::{Supermaven, SupermavenCompletionProvider}; -use sweep_ai::{SweepAiEditPredictionProvider, SweepFeatureFlag}; use ui::Window; use zeta::ZetaEditPredictionProvider; +use zeta2::SweepFeatureFlag; use zeta2::Zeta2FeatureFlag; pub fn init(client: Arc, user_store: Entity, cx: &mut App) { @@ -203,55 +203,41 @@ fn assign_edit_prediction_provider( let provider = cx.new(|_| CodestralCompletionProvider::new(http_client)); editor.set_edit_prediction_provider(Some(provider), window, cx); } - EditPredictionProvider::Experimental(name) => { - if name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME - && cx.has_flag::() - { - if let Some(project) = editor.project() - && let Some(workspace) = editor.workspace() + value @ (EditPredictionProvider::Experimental(_) | EditPredictionProvider::Zed) => { + if let Some(project) = editor.project() { + let mut worktree = None; + if let Some(buffer) = &singleton_buffer + && let Some(file) = buffer.read(cx).file() { - let sweep_ai = sweep_ai::SweepAi::register(cx); - - if let Some(buffer) = &singleton_buffer - && buffer.read(cx).file().is_some() - { - sweep_ai.update(cx, |sweep_ai, cx| { - sweep_ai.register_buffer(buffer, project, cx); - }); - } + let id = file.worktree_id(cx); + worktree = project.read(cx).worktree_for_id(id, cx); + } - let provider = cx.new(|_| { - sweep_ai::SweepAiEditPredictionProvider::new( - sweep_ai, - workspace.downgrade(), + if let EditPredictionProvider::Experimental(name) = value + && name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME + && cx.has_flag::() + { + let zeta2 = zeta2::Zeta::global(client, &user_store, cx); + let provider = cx.new(|cx| { + zeta2::ZetaEditPredictionProvider::new( project.clone(), + &client, + &user_store, + cx, ) }); - editor.set_edit_prediction_provider(Some(provider), window, cx); - } - } else { - editor.set_edit_prediction_provider::( - None, window, cx, - ); - } - } - EditPredictionProvider::Zed => { - if user_store.read(cx).current_user().is_some() { - let mut worktree = None; - if let Some(buffer) = &singleton_buffer - && let Some(file) = buffer.read(cx).file() - { - let id = file.worktree_id(cx); - if let Some(inner_worktree) = editor - .project() - .and_then(|project| project.read(cx).worktree_for_id(id, cx)) + if let Some(buffer) = &singleton_buffer + && buffer.read(cx).file().is_some() { - worktree = Some(inner_worktree); + zeta2.update(cx, |zeta, cx| { + zeta.set_edit_prediction_model(zeta2::ZetaEditPredictionModel::Sweep); + zeta.register_buffer(buffer, project, cx); + }); } - } - if let Some(project) = editor.project() { + editor.set_edit_prediction_provider(Some(provider), window, cx); + } else if user_store.read(cx).current_user().is_some() { if cx.has_flag::() { let zeta = zeta2::Zeta::global(client, &user_store, cx); let provider = cx.new(|cx| { @@ -268,6 +254,9 @@ fn assign_edit_prediction_provider( && buffer.read(cx).file().is_some() { zeta.update(cx, |zeta, cx| { + zeta.set_edit_prediction_model( + zeta2::ZetaEditPredictionModel::ZedCloud, + ); zeta.register_buffer(buffer, project, cx); }); } diff --git a/crates/zeta2/Cargo.toml b/crates/zeta2/Cargo.toml index 1eef507e6def3d80560ff1515623d0c42687d74a..0f156f68fac881d65d76f178315f40df1dba9d7f 100644 --- a/crates/zeta2/Cargo.toml +++ b/crates/zeta2/Cargo.toml @@ -17,6 +17,7 @@ eval-support = [] [dependencies] anyhow.workspace = true arrayvec.workspace = true +brotli.workspace = true chrono.workspace = true client.workspace = true cloud_llm_client.workspace = true diff --git a/crates/zeta2/src/provider.rs b/crates/zeta2/src/provider.rs index a19e7f9a1da5e1808c48e3ce0469d8b390698760..1b82826f663b092b5763935d9a7a2d4bb9607ebf 100644 --- a/crates/zeta2/src/provider.rs +++ b/crates/zeta2/src/provider.rs @@ -12,7 +12,7 @@ use language::ToPoint as _; use project::Project; use util::ResultExt as _; -use crate::{BufferEditPrediction, Zeta}; +use crate::{BufferEditPrediction, Zeta, ZetaEditPredictionModel}; pub struct ZetaEditPredictionProvider { zeta: Entity, @@ -85,9 +85,14 @@ impl EditPredictionProvider for ZetaEditPredictionProvider { &self, _buffer: &Entity, _cursor_position: language::Anchor, - _cx: &App, + cx: &App, ) -> bool { - true + let zeta = self.zeta.read(cx); + if zeta.edit_prediction_model == ZetaEditPredictionModel::Sweep { + zeta.sweep_api_token.is_some() + } else { + true + } } fn is_refreshing(&self) -> bool { diff --git a/crates/sweep_ai/src/api.rs b/crates/zeta2/src/sweep_ai.rs similarity index 59% rename from crates/sweep_ai/src/api.rs rename to crates/zeta2/src/sweep_ai.rs index edb392885e476e3924d285613af1f0a4e8be8599..c56d7409fa212734c5f5a73a6b24319c27c7494f 100644 --- a/crates/sweep_ai/src/api.rs +++ b/crates/zeta2/src/sweep_ai.rs @@ -1,6 +1,8 @@ +use std::fmt; use std::{path::Path, sync::Arc}; use serde::{Deserialize, Serialize}; +use util::rel_path::RelPath; #[derive(Debug, Clone, Serialize)] pub struct AutocompleteRequest { @@ -88,3 +90,49 @@ pub struct AdditionalCompletion { pub logprobs: Option, pub finish_reason: Option, } + +pub(crate) fn write_event(event: crate::Event, f: &mut impl fmt::Write) -> fmt::Result { + match event { + crate::Event::BufferChange { + old_snapshot, + new_snapshot, + .. + } => { + let old_path = old_snapshot + .file() + .map(|f| f.path().as_ref()) + .unwrap_or(RelPath::unix("untitled").unwrap()); + let new_path = new_snapshot + .file() + .map(|f| f.path().as_ref()) + .unwrap_or(RelPath::unix("untitled").unwrap()); + if old_path != new_path { + // TODO confirm how to do this for sweep + // writeln!(f, "User renamed {:?} to {:?}\n", old_path, new_path)?; + } + + let diff = language::unified_diff(&old_snapshot.text(), &new_snapshot.text()); + if !diff.is_empty() { + write!( + f, + "File: {}:\n{}\n", + new_path.display(util::paths::PathStyle::Posix), + diff + )? + } + + fmt::Result::Ok(()) + } + } +} + +pub(crate) fn debug_info(cx: &gpui::App) -> Arc { + format!( + "Zed v{version} ({sha}) - OS: {os} - Zed v{version}", + version = release_channel::AppVersion::global(cx), + sha = release_channel::AppCommitSha::try_global(cx) + .map_or("unknown".to_string(), |sha| sha.full()), + os = client::telemetry::os_name(), + ) + .into() +} diff --git a/crates/zeta2/src/zeta2.rs b/crates/zeta2/src/zeta2.rs index 099cd95134ec3d1fd59bbc33306bc439c0a8ee1a..6eacc5190f403594ad20f7365512b011d2226719 100644 --- a/crates/zeta2/src/zeta2.rs +++ b/crates/zeta2/src/zeta2.rs @@ -22,30 +22,31 @@ use gpui::{ App, Entity, EntityId, Global, SemanticVersion, SharedString, Subscription, Task, WeakEntity, http_client, prelude::*, }; -use language::{Anchor, Buffer, DiagnosticSet, LanguageServerId, ToOffset as _, ToPoint}; +use language::{Anchor, Buffer, DiagnosticSet, LanguageServerId, Point, ToOffset as _, ToPoint}; use language::{BufferSnapshot, OffsetRangeExt}; use language_model::{LlmApiToken, RefreshLlmTokenListener}; use open_ai::FunctionDefinition; -use project::Project; +use project::{Project, ProjectPath}; use release_channel::AppVersion; use serde::de::DeserializeOwned; use std::collections::{VecDeque, hash_map}; -use std::env; use std::ops::Range; use std::path::Path; use std::str::FromStr as _; use std::sync::{Arc, LazyLock}; use std::time::{Duration, Instant}; +use std::{env, mem}; use thiserror::Error; use util::rel_path::RelPathBuf; -use util::{LogErrorFuture, TryFutureExt}; +use util::{LogErrorFuture, ResultExt as _, TryFutureExt}; use workspace::notifications::{ErrorMessagePrompt, NotificationId, show_app_notification}; pub mod assemble_excerpts; mod prediction; mod provider; pub mod retrieval_search; +mod sweep_ai; pub mod udiff; mod xml_edits; @@ -55,8 +56,15 @@ pub use crate::prediction::EditPredictionId; pub use provider::ZetaEditPredictionProvider; /// Maximum number of events to track. -const MAX_EVENT_COUNT: usize = 16; +const EVENT_COUNT_MAX_SWEEP: usize = 6; +const EVENT_COUNT_MAX_ZETA: usize = 16; +const CHANGE_GROUPING_LINE_SPAN: u32 = 8; +pub struct SweepFeatureFlag; + +impl FeatureFlag for SweepFeatureFlag { + const NAME: &str = "sweep-ai"; +} pub const DEFAULT_EXCERPT_OPTIONS: EditPredictionExcerptOptions = EditPredictionExcerptOptions { max_bytes: 512, min_bytes: 128, @@ -143,6 +151,15 @@ pub struct Zeta { debug_tx: Option>, #[cfg(feature = "eval-support")] eval_cache: Option>, + edit_prediction_model: ZetaEditPredictionModel, + sweep_api_token: Option, + sweep_ai_debug_info: Arc, +} + +#[derive(PartialEq, Eq)] +pub enum ZetaEditPredictionModel { + ZedCloud, + Sweep, } #[derive(Debug, Clone, PartialEq)] @@ -219,12 +236,14 @@ pub type RequestDebugInfo = predict_edits_v3::DebugInfo; struct ZetaProject { syntax_index: Option>, events: VecDeque, + recent_paths: VecDeque, registered_buffers: HashMap, current_prediction: Option, context: Option, Vec>>>, refresh_context_task: Option>>>, refresh_context_debounce_task: Option>>, refresh_context_timestamp: Option, + _subscription: gpui::Subscription, } #[derive(Debug, Clone)] @@ -287,6 +306,7 @@ pub enum Event { BufferChange { old_snapshot: BufferSnapshot, new_snapshot: BufferSnapshot, + end_edit_anchor: Option, timestamp: Instant, }, } @@ -381,7 +401,19 @@ impl Zeta { debug_tx: None, #[cfg(feature = "eval-support")] eval_cache: None, + edit_prediction_model: ZetaEditPredictionModel::ZedCloud, + sweep_api_token: None, + sweep_ai_debug_info: sweep_ai::debug_info(cx), + } + } + + pub fn set_edit_prediction_model(&mut self, model: ZetaEditPredictionModel) { + if model == ZetaEditPredictionModel::Sweep { + self.sweep_api_token = std::env::var("SWEEP_AI_TOKEN") + .context("No SWEEP_AI_TOKEN environment variable set") + .log_err(); } + self.edit_prediction_model = model; } #[cfg(feature = "eval-support")] @@ -443,7 +475,7 @@ impl Zeta { self.user_store.read(cx).edit_prediction_usage() } - pub fn register_project(&mut self, project: &Entity, cx: &mut App) { + pub fn register_project(&mut self, project: &Entity, cx: &mut Context) { self.get_or_init_zeta_project(project, cx); } @@ -460,7 +492,7 @@ impl Zeta { fn get_or_init_zeta_project( &mut self, project: &Entity, - cx: &mut App, + cx: &mut Context, ) -> &mut ZetaProject { self.projects .entry(project.entity_id()) @@ -473,12 +505,31 @@ impl Zeta { None }, events: VecDeque::new(), + recent_paths: VecDeque::new(), registered_buffers: HashMap::default(), current_prediction: None, context: None, refresh_context_task: None, refresh_context_debounce_task: None, refresh_context_timestamp: None, + _subscription: cx.subscribe(&project, |this, project, event, cx| { + // TODO [zeta2] init with recent paths + if let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) { + if let project::Event::ActiveEntryChanged(Some(active_entry_id)) = event { + let path = project.read(cx).path_for_entry(*active_entry_id, cx); + if let Some(path) = path { + if let Some(ix) = zeta_project + .recent_paths + .iter() + .position(|probe| probe == &path) + { + zeta_project.recent_paths.remove(ix); + } + zeta_project.recent_paths.push_front(path); + } + } + } + }), }) } @@ -525,66 +576,64 @@ impl Zeta { buffer: &Entity, project: &Entity, cx: &mut Context, - ) -> BufferSnapshot { - let buffer_change_grouping_interval = self.options.buffer_change_grouping_interval; - let zeta_project = self.get_or_init_zeta_project(project, cx); - let registered_buffer = Self::register_buffer_impl(zeta_project, buffer, project, cx); + ) { + let event_count_max = match self.edit_prediction_model { + ZetaEditPredictionModel::ZedCloud => EVENT_COUNT_MAX_ZETA, + ZetaEditPredictionModel::Sweep => EVENT_COUNT_MAX_SWEEP, + }; + + let sweep_ai_project = self.get_or_init_zeta_project(project, cx); + let registered_buffer = Self::register_buffer_impl(sweep_ai_project, buffer, project, cx); let new_snapshot = buffer.read(cx).snapshot(); - if new_snapshot.version != registered_buffer.snapshot.version { - let old_snapshot = - std::mem::replace(&mut registered_buffer.snapshot, new_snapshot.clone()); - Self::push_event( - zeta_project, - buffer_change_grouping_interval, - Event::BufferChange { - old_snapshot, - new_snapshot: new_snapshot.clone(), - timestamp: Instant::now(), - }, - ); + if new_snapshot.version == registered_buffer.snapshot.version { + return; } - new_snapshot - } - - fn push_event( - zeta_project: &mut ZetaProject, - buffer_change_grouping_interval: Duration, - event: Event, - ) { - let events = &mut zeta_project.events; + let old_snapshot = mem::replace(&mut registered_buffer.snapshot, new_snapshot.clone()); + let end_edit_anchor = new_snapshot + .anchored_edits_since::(&old_snapshot.version) + .last() + .map(|(_, range)| range.end); + let events = &mut sweep_ai_project.events; - if buffer_change_grouping_interval > Duration::ZERO - && let Some(Event::BufferChange { - new_snapshot: last_new_snapshot, - timestamp: last_timestamp, - .. - }) = events.back_mut() + if let Some(Event::BufferChange { + new_snapshot: last_new_snapshot, + end_edit_anchor: last_end_edit_anchor, + .. + }) = events.back_mut() { - // Coalesce edits for the same buffer when they happen one after the other. - let Event::BufferChange { - old_snapshot, - new_snapshot, - timestamp, - } = &event; - - if timestamp.duration_since(*last_timestamp) <= buffer_change_grouping_interval - && old_snapshot.remote_id() == last_new_snapshot.remote_id() - && old_snapshot.version == last_new_snapshot.version - { - *last_new_snapshot = new_snapshot.clone(); - *last_timestamp = *timestamp; + let is_next_snapshot_of_same_buffer = old_snapshot.remote_id() + == last_new_snapshot.remote_id() + && old_snapshot.version == last_new_snapshot.version; + + let should_coalesce = is_next_snapshot_of_same_buffer + && end_edit_anchor + .as_ref() + .zip(last_end_edit_anchor.as_ref()) + .is_some_and(|(a, b)| { + let a = a.to_point(&new_snapshot); + let b = b.to_point(&new_snapshot); + a.row.abs_diff(b.row) <= CHANGE_GROUPING_LINE_SPAN + }); + + if should_coalesce { + *last_end_edit_anchor = end_edit_anchor; + *last_new_snapshot = new_snapshot; return; } } - if events.len() >= MAX_EVENT_COUNT { - // These are halved instead of popping to improve prompt caching. - events.drain(..MAX_EVENT_COUNT / 2); + if events.len() >= event_count_max { + events.pop_front(); } - events.push_back(event); + events.push_back(Event::BufferChange { + old_snapshot, + new_snapshot, + end_edit_anchor, + timestamp: Instant::now(), + }); } fn current_prediction_for_buffer( @@ -706,6 +755,203 @@ impl Zeta { active_buffer: &Entity, position: language::Anchor, cx: &mut Context, + ) -> Task>> { + match self.edit_prediction_model { + ZetaEditPredictionModel::ZedCloud => { + self.request_prediction_with_zed_cloud(project, active_buffer, position, cx) + } + ZetaEditPredictionModel::Sweep => { + self.request_prediction_with_sweep(project, active_buffer, position, cx) + } + } + } + + fn request_prediction_with_sweep( + &mut self, + project: &Entity, + active_buffer: &Entity, + position: language::Anchor, + cx: &mut Context, + ) -> Task>> { + let snapshot = active_buffer.read(cx).snapshot(); + let debug_info = self.sweep_ai_debug_info.clone(); + let Some(api_token) = self.sweep_api_token.clone() else { + return Task::ready(Ok(None)); + }; + let full_path: Arc = snapshot + .file() + .map(|file| file.full_path(cx)) + .unwrap_or_else(|| "untitled".into()) + .into(); + + let project_file = project::File::from_dyn(snapshot.file()); + let repo_name = project_file + .map(|file| file.worktree.read(cx).root_name_str()) + .unwrap_or("untitled") + .into(); + let offset = position.to_offset(&snapshot); + + let project_state = self.get_or_init_zeta_project(project, cx); + let events = project_state.events.clone(); + let recent_buffers = project_state.recent_paths.iter().cloned(); + let http_client = cx.http_client(); + + let recent_buffer_snapshots = recent_buffers + .filter_map(|project_path| { + let buffer = project.read(cx).get_open_buffer(&project_path, cx)?; + if active_buffer == &buffer { + None + } else { + Some(buffer.read(cx).snapshot()) + } + }) + .take(3) + .collect::>(); + + let result = cx.background_spawn(async move { + let text = snapshot.text(); + + let mut recent_changes = String::new(); + for event in events { + sweep_ai::write_event(event, &mut recent_changes).unwrap(); + } + + let file_chunks = recent_buffer_snapshots + .into_iter() + .map(|snapshot| { + let end_point = language::Point::new(30, 0).min(snapshot.max_point()); + sweep_ai::FileChunk { + content: snapshot + .text_for_range(language::Point::zero()..end_point) + .collect(), + file_path: snapshot + .file() + .map(|f| f.path().as_unix_str()) + .unwrap_or("untitled") + .to_string(), + start_line: 0, + end_line: end_point.row as usize, + timestamp: snapshot.file().and_then(|file| { + Some( + file.disk_state() + .mtime()? + .to_seconds_and_nanos_for_persistence()? + .0, + ) + }), + } + }) + .collect(); + + let request_body = sweep_ai::AutocompleteRequest { + debug_info, + repo_name, + file_path: full_path.clone(), + file_contents: text.clone(), + original_file_contents: text, + cursor_position: offset, + recent_changes: recent_changes.clone(), + changes_above_cursor: true, + multiple_suggestions: false, + branch: None, + file_chunks, + retrieval_chunks: vec![], + recent_user_actions: vec![], + // TODO + privacy_mode_enabled: false, + }; + + let mut buf: Vec = Vec::new(); + let writer = brotli::CompressorWriter::new(&mut buf, 4096, 11, 22); + serde_json::to_writer(writer, &request_body)?; + let body: AsyncBody = buf.into(); + + const SWEEP_API_URL: &str = + "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; + + let request = http_client::Request::builder() + .uri(SWEEP_API_URL) + .header("Content-Type", "application/json") + .header("Authorization", format!("Bearer {}", api_token)) + .header("Connection", "keep-alive") + .header("Content-Encoding", "br") + .method(Method::POST) + .body(body)?; + + let mut response = http_client.send(request).await?; + + let mut body: Vec = Vec::new(); + response.body_mut().read_to_end(&mut body).await?; + + if !response.status().is_success() { + anyhow::bail!( + "Request failed with status: {:?}\nBody: {}", + response.status(), + String::from_utf8_lossy(&body), + ); + }; + + let response: sweep_ai::AutocompleteResponse = serde_json::from_slice(&body)?; + + let old_text = snapshot + .text_for_range(response.start_index..response.end_index) + .collect::(); + let edits = language::text_diff(&old_text, &response.completion) + .into_iter() + .map(|(range, text)| { + ( + snapshot.anchor_after(response.start_index + range.start) + ..snapshot.anchor_before(response.start_index + range.end), + text, + ) + }) + .collect::>(); + + anyhow::Ok((response.autocomplete_id, edits, snapshot)) + }); + + let buffer = active_buffer.clone(); + + cx.spawn(async move |_, cx| { + let (id, edits, old_snapshot) = result.await?; + + if edits.is_empty() { + return anyhow::Ok(None); + } + + let Some((edits, new_snapshot, preview_task)) = + buffer.read_with(cx, |buffer, cx| { + let new_snapshot = buffer.snapshot(); + + let edits: Arc<[(Range, Arc)]> = + edit_prediction::interpolate_edits(&old_snapshot, &new_snapshot, &edits)? + .into(); + let preview_task = buffer.preview_edits(edits.clone(), cx); + + Some((edits, new_snapshot, preview_task)) + })? + else { + return anyhow::Ok(None); + }; + + let prediction = EditPrediction { + id: EditPredictionId(id.into()), + edits, + snapshot: new_snapshot, + edit_preview: preview_task.await, + buffer, + }; + + anyhow::Ok(Some(prediction)) + }) + } + + fn request_prediction_with_zed_cloud( + &mut self, + project: &Entity, + active_buffer: &Entity, + position: language::Anchor, + cx: &mut Context, ) -> Task>> { let project_state = self.projects.get(&project.entity_id()); @@ -1653,7 +1899,7 @@ impl Zeta { pub fn wait_for_initial_indexing( &mut self, project: &Entity, - cx: &mut App, + cx: &mut Context, ) -> Task> { let zeta_project = self.get_or_init_zeta_project(project, cx); if let Some(syntax_index) = &zeta_project.syntax_index { diff --git a/crates/zeta_cli/Cargo.toml b/crates/zeta_cli/Cargo.toml index 35fbcb1c61097156d2f0e172d700ed12d3d3894e..e18cf54787ca98e2be60db4977dd2de18e9c09e2 100644 --- a/crates/zeta_cli/Cargo.toml +++ b/crates/zeta_cli/Cargo.toml @@ -49,7 +49,6 @@ settings.workspace = true shellexpand.workspace = true smol.workspace = true soa-rs = "0.8.1" -sweep_ai.workspace = true terminal_view.workspace = true toml.workspace = true util.workspace = true diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index 09fbbb29dd6cf58910a2b6e6ff7fb4a31fc4a10a..a9d7acaee2287450eac828bd2d770b88a8150940 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -8,16 +8,15 @@ use anyhow::Result; use collections::HashSet; use gpui::{AsyncApp, Entity}; use project::Project; -use sweep_ai::SweepAi; use util::ResultExt as _; use zeta2::{Zeta, udiff::DiffLine}; use crate::{ - EvaluateArguments, PredictionOptions, PredictionProvider, + EvaluateArguments, PredictionOptions, example::{Example, NamedExample}, headless::ZetaCliAppState, paths::print_run_data_dir, - predict::{PredictionDetails, perform_predict, setup_sweep, setup_zeta}, + predict::{PredictionDetails, perform_predict, setup_zeta}, }; #[derive(Debug)] @@ -46,46 +45,35 @@ pub async fn run_evaluate( let project = example.setup_project(&app_state, cx).await.unwrap(); let providers = (0..args.repetitions) - .map(|_| { - ( - setup_zeta(&project, &app_state, cx).unwrap(), - if matches!(args.options.provider, PredictionProvider::Sweep) { - Some(setup_sweep(&project, cx).unwrap()) - } else { - None - }, - ) - }) + .map(|_| setup_zeta(args.options.provider, &project, &app_state, cx).unwrap()) .collect::>(); let _edited_buffers = example.apply_edit_history(&project, cx).await.unwrap(); - let tasks = - providers - .into_iter() - .enumerate() - .map(move |(repetition_ix, (zeta, sweep))| { - let repetition_ix = (args.repetitions > 1).then(|| repetition_ix as u16); - let example = example.clone(); - let project = project.clone(); - let options = options.clone(); - - cx.spawn(async move |cx| { - let name = example.name.clone(); - run_evaluate_one( - example, - repetition_ix, - project, - zeta, - sweep, - options, - !args.skip_prediction, - cx, - ) - .await - .map_err(|err| (err, name, repetition_ix)) - }) - }); + let tasks = providers + .into_iter() + .enumerate() + .map(move |(repetition_ix, zeta)| { + let repetition_ix = (args.repetitions > 1).then(|| repetition_ix as u16); + let example = example.clone(); + let project = project.clone(); + let options = options.clone(); + + cx.spawn(async move |cx| { + let name = example.name.clone(); + run_evaluate_one( + example, + repetition_ix, + project, + zeta, + options, + !args.skip_prediction, + cx, + ) + .await + .map_err(|err| (err, name, repetition_ix)) + }) + }); futures::future::join_all(tasks).await }) }); @@ -177,7 +165,6 @@ pub async fn run_evaluate_one( repetition_ix: Option, project: Entity, zeta: Entity, - sweep: Option>, prediction_options: PredictionOptions, predict: bool, cx: &mut AsyncApp, @@ -186,7 +173,6 @@ pub async fn run_evaluate_one( example.clone(), project, zeta, - sweep, repetition_ix, prediction_options, cx, diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index 803e02b10cfb7533341a3009e0325a7bcf13df1e..53f231599b7d0449b1f2a9cdef8227a7c3e6bbd5 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -191,7 +191,7 @@ pub struct EvaluateArguments { skip_prediction: bool, } -#[derive(clap::ValueEnum, Default, Debug, Clone, Copy)] +#[derive(clap::ValueEnum, Default, Debug, Clone, Copy, PartialEq)] enum PredictionProvider { #[default] Zeta2, diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index 4505035eaf992751e85216a314b731a12ffbd342..c792b318cec6de42e518793ed5400df0010ae5ea 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -21,7 +21,6 @@ use std::path::PathBuf; use std::sync::Arc; use std::sync::Mutex; use std::time::{Duration, Instant}; -use sweep_ai::SweepAi; use zeta2::{EvalCache, EvalCacheEntryKind, EvalCacheKey, Zeta}; pub async fn run_predict( @@ -31,14 +30,9 @@ pub async fn run_predict( ) { let example = NamedExample::load(args.example_path).unwrap(); let project = example.setup_project(app_state, cx).await.unwrap(); - let zeta = setup_zeta(&project, app_state, cx).unwrap(); - let sweep = if matches!(args.options.provider, PredictionProvider::Sweep) { - Some(setup_sweep(&project, cx).unwrap()) - } else { - None - }; + let zeta = setup_zeta(args.options.provider, &project, app_state, cx).unwrap(); let _edited_buffers = example.apply_edit_history(&project, cx).await.unwrap(); - let result = perform_predict(example, project, zeta, sweep, None, args.options, cx) + let result = perform_predict(example, project, zeta, None, args.options, cx) .await .unwrap(); result.write(args.format, std::io::stdout()).unwrap(); @@ -47,6 +41,7 @@ pub async fn run_predict( } pub fn setup_zeta( + provider: PredictionProvider, project: &Entity, app_state: &Arc, cx: &mut AsyncApp, @@ -54,6 +49,14 @@ pub fn setup_zeta( let zeta = cx.new(|cx| zeta2::Zeta::new(app_state.client.clone(), app_state.user_store.clone(), cx))?; + zeta.update(cx, |zeta, _cx| { + let model = match provider { + PredictionProvider::Zeta2 => zeta2::ZetaEditPredictionModel::ZedCloud, + PredictionProvider::Sweep => zeta2::ZetaEditPredictionModel::Sweep, + }; + zeta.set_edit_prediction_model(model); + })?; + let buffer_store = project.read_with(cx, |project, _| project.buffer_store().clone())?; cx.subscribe(&buffer_store, { @@ -71,31 +74,10 @@ pub fn setup_zeta( anyhow::Ok(zeta) } -pub fn setup_sweep(project: &Entity, cx: &mut AsyncApp) -> Result> { - let sweep = cx.new(|cx| SweepAi::new(cx))?; - - let buffer_store = project.read_with(cx, |project, _| project.buffer_store().clone())?; - - cx.subscribe(&buffer_store, { - let project = project.clone(); - let sweep = sweep.clone(); - move |_, event, cx| match event { - BufferStoreEvent::BufferAdded(buffer) => { - sweep.update(cx, |sweep, cx| sweep.register_buffer(&buffer, &project, cx)); - } - _ => {} - } - })? - .detach(); - - anyhow::Ok(sweep) -} - pub async fn perform_predict( example: NamedExample, project: Entity, zeta: Entity, - sweep: Option>, repetition_ix: Option, options: PredictionOptions, cx: &mut AsyncApp, @@ -147,194 +129,152 @@ pub async fn perform_predict( zeta.set_options(options); })?; - let prediction = match options.provider { - crate::PredictionProvider::Zeta2 => { - let mut debug_rx = zeta.update(cx, |zeta, _| zeta.debug_info())?; - - let debug_task = cx.background_spawn({ - let result = result.clone(); - async move { - let mut start_time = None; - let mut search_queries_generated_at = None; - let mut search_queries_executed_at = None; - while let Some(event) = debug_rx.next().await { - match event { - zeta2::ZetaDebugInfo::ContextRetrievalStarted(info) => { - start_time = Some(info.timestamp); - fs::write( - example_run_dir.join("search_prompt.md"), - &info.search_prompt, - )?; - } - zeta2::ZetaDebugInfo::SearchQueriesGenerated(info) => { - search_queries_generated_at = Some(info.timestamp); - fs::write( - example_run_dir.join("search_queries.json"), - serde_json::to_string_pretty(&info.search_queries).unwrap(), - )?; - } - zeta2::ZetaDebugInfo::SearchQueriesExecuted(info) => { - search_queries_executed_at = Some(info.timestamp); - } - zeta2::ZetaDebugInfo::ContextRetrievalFinished(_info) => {} - zeta2::ZetaDebugInfo::EditPredictionRequested(request) => { - let prediction_started_at = Instant::now(); - start_time.get_or_insert(prediction_started_at); - let prompt = request.local_prompt.unwrap_or_default(); - fs::write(example_run_dir.join("prediction_prompt.md"), &prompt)?; - - { - let mut result = result.lock().unwrap(); - result.prompt_len = prompt.chars().count(); - - for included_file in request.request.included_files { - let insertions = - vec![(request.request.cursor_point, CURSOR_MARKER)]; - result.excerpts.extend(included_file.excerpts.iter().map( - |excerpt| { - ActualExcerpt { - path: included_file - .path - .components() - .skip(1) - .collect(), - text: String::from(excerpt.text.as_ref()), - } - }, - )); - write_codeblock( - &included_file.path, - included_file.excerpts.iter(), - if included_file.path == request.request.excerpt_path { - &insertions - } else { - &[] - }, - included_file.max_row, - false, - &mut result.excerpts_text, - ); - } - } - - let response = - request.response_rx.await?.0.map_err(|err| anyhow!(err))?; - let response = - zeta2::text_from_response(response).unwrap_or_default(); - let prediction_finished_at = Instant::now(); - fs::write( - example_run_dir.join("prediction_response.md"), - &response, - )?; - + let mut debug_task = gpui::Task::ready(Ok(())); + + if options.provider == crate::PredictionProvider::Zeta2 { + let mut debug_rx = zeta.update(cx, |zeta, _| zeta.debug_info())?; + + debug_task = cx.background_spawn({ + let result = result.clone(); + async move { + let mut start_time = None; + let mut search_queries_generated_at = None; + let mut search_queries_executed_at = None; + while let Some(event) = debug_rx.next().await { + match event { + zeta2::ZetaDebugInfo::ContextRetrievalStarted(info) => { + start_time = Some(info.timestamp); + fs::write( + example_run_dir.join("search_prompt.md"), + &info.search_prompt, + )?; + } + zeta2::ZetaDebugInfo::SearchQueriesGenerated(info) => { + search_queries_generated_at = Some(info.timestamp); + fs::write( + example_run_dir.join("search_queries.json"), + serde_json::to_string_pretty(&info.search_queries).unwrap(), + )?; + } + zeta2::ZetaDebugInfo::SearchQueriesExecuted(info) => { + search_queries_executed_at = Some(info.timestamp); + } + zeta2::ZetaDebugInfo::ContextRetrievalFinished(_info) => {} + zeta2::ZetaDebugInfo::EditPredictionRequested(request) => { + let prediction_started_at = Instant::now(); + start_time.get_or_insert(prediction_started_at); + let prompt = request.local_prompt.unwrap_or_default(); + fs::write(example_run_dir.join("prediction_prompt.md"), &prompt)?; + + { let mut result = result.lock().unwrap(); - result.generated_len = response.chars().count(); - - if !options.use_expected_context { - result.planning_search_time = Some( - search_queries_generated_at.unwrap() - start_time.unwrap(), - ); - result.running_search_time = Some( - search_queries_executed_at.unwrap() - - search_queries_generated_at.unwrap(), + result.prompt_len = prompt.chars().count(); + + for included_file in request.request.included_files { + let insertions = + vec![(request.request.cursor_point, CURSOR_MARKER)]; + result.excerpts.extend(included_file.excerpts.iter().map( + |excerpt| ActualExcerpt { + path: included_file.path.components().skip(1).collect(), + text: String::from(excerpt.text.as_ref()), + }, + )); + write_codeblock( + &included_file.path, + included_file.excerpts.iter(), + if included_file.path == request.request.excerpt_path { + &insertions + } else { + &[] + }, + included_file.max_row, + false, + &mut result.excerpts_text, ); } - result.prediction_time = - prediction_finished_at - prediction_started_at; - result.total_time = prediction_finished_at - start_time.unwrap(); + } - break; + let response = + request.response_rx.await?.0.map_err(|err| anyhow!(err))?; + let response = zeta2::text_from_response(response).unwrap_or_default(); + let prediction_finished_at = Instant::now(); + fs::write(example_run_dir.join("prediction_response.md"), &response)?; + + let mut result = result.lock().unwrap(); + result.generated_len = response.chars().count(); + + if !options.use_expected_context { + result.planning_search_time = Some( + search_queries_generated_at.unwrap() - start_time.unwrap(), + ); + result.running_search_time = Some( + search_queries_executed_at.unwrap() + - search_queries_generated_at.unwrap(), + ); } + result.prediction_time = prediction_finished_at - prediction_started_at; + result.total_time = prediction_finished_at - start_time.unwrap(); + + break; } } - anyhow::Ok(()) } - }); - - if options.use_expected_context { - let context_excerpts_tasks = example - .example - .expected_context - .iter() - .flat_map(|section| { - section.alternatives[0].excerpts.iter().map(|excerpt| { - resolve_context_entry(project.clone(), excerpt.clone(), cx.clone()) - }) + anyhow::Ok(()) + } + }); + + if options.use_expected_context { + let context_excerpts_tasks = example + .example + .expected_context + .iter() + .flat_map(|section| { + section.alternatives[0].excerpts.iter().map(|excerpt| { + resolve_context_entry(project.clone(), excerpt.clone(), cx.clone()) }) - .collect::>(); - let context_excerpts_vec = - futures::future::try_join_all(context_excerpts_tasks).await?; - - let mut context_excerpts = HashMap::default(); - for (buffer, mut excerpts) in context_excerpts_vec { - context_excerpts - .entry(buffer) - .or_insert(Vec::new()) - .append(&mut excerpts); - } - - zeta.update(cx, |zeta, _cx| { - zeta.set_context(project.clone(), context_excerpts) - })?; - } else { - zeta.update(cx, |zeta, cx| { - zeta.refresh_context(project.clone(), cursor_buffer.clone(), cursor_anchor, cx) - })? - .await?; + }) + .collect::>(); + let context_excerpts_vec = + futures::future::try_join_all(context_excerpts_tasks).await?; + + let mut context_excerpts = HashMap::default(); + for (buffer, mut excerpts) in context_excerpts_vec { + context_excerpts + .entry(buffer) + .or_insert(Vec::new()) + .append(&mut excerpts); } - let prediction = zeta - .update(cx, |zeta, cx| { - zeta.request_prediction(&project, &cursor_buffer, cursor_anchor, cx) - })? - .await? - .map(|prediction| (prediction.buffer, prediction.snapshot, prediction.edits)); - - debug_task.await?; - - prediction + zeta.update(cx, |zeta, _cx| { + zeta.set_context(project.clone(), context_excerpts) + })?; + } else { + zeta.update(cx, |zeta, cx| { + zeta.refresh_context(project.clone(), cursor_buffer.clone(), cursor_anchor, cx) + })? + .await?; } - crate::PredictionProvider::Sweep => sweep - .unwrap() - .update(cx, |sweep, cx| { - let mut recent_paths = Vec::new(); - for path in zeta - .read(cx) - .history_for_project(&project) - .rev() - .filter_map(|event| event.project_path(cx)) - { - if !recent_paths.contains(&path) { - recent_paths.push(path); - } - } + } - sweep.request_completion( - &project, - recent_paths.into_iter(), - &cursor_buffer, - cursor_anchor, - cx, - ) - })? - .await? - .map( - |sweep_ai::EditPrediction { - edits, snapshot, .. - }| { (cursor_buffer.clone(), snapshot, edits) }, - ), - }; + let prediction = zeta + .update(cx, |zeta, cx| { + zeta.request_prediction(&project, &cursor_buffer, cursor_anchor, cx) + })? + .await?; + + debug_task.await?; let mut result = Arc::into_inner(result).unwrap().into_inner().unwrap(); result.diff = prediction - .map(|(buffer, snapshot, edits)| { - let old_text = snapshot.text(); - let new_text = buffer + .map(|prediction| { + let old_text = prediction.snapshot.text(); + let new_text = prediction + .buffer .update(cx, |buffer, cx| { let branch = buffer.branch(cx); branch.update(cx, |branch, cx| { - branch.edit(edits.iter().cloned(), None, cx); + branch.edit(prediction.edits.iter().cloned(), None, cx); branch.text() }) }) From 1fab43d467c3e610bd72cfb94ac12d09d791f397 Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Thu, 20 Nov 2025 00:40:54 +0100 Subject: [PATCH 0237/1030] Allow styling the container of markdown elements (#43107) Closes #43033 Release Notes: - FIxed an issue where the padding on info popovers would overlay text when the content was scrollable. --- crates/editor/src/hover_popover.rs | 4 ++-- crates/markdown/src/markdown.rs | 21 +++++++++++++++++++-- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/crates/editor/src/hover_popover.rs b/crates/editor/src/hover_popover.rs index ef16fc92d847763ecbc764c3913266fd84a26006..5f831341bab2a4e37410a1e3e168bcf72bba93a8 100644 --- a/crates/editor/src/hover_popover.rs +++ b/crates/editor/src/hover_popover.rs @@ -893,7 +893,6 @@ impl InfoPopover { *keyboard_grace = false; cx.stop_propagation(); }) - .p_2() .when_some(self.parsed_content.clone(), |this, markdown| { this.child( div() @@ -909,7 +908,8 @@ impl InfoPopover { copy_button_on_hover: false, border: false, }) - .on_url_click(open_markdown_url), + .on_url_click(open_markdown_url) + .p_2(), ), ) .custom_scrollbars( diff --git a/crates/markdown/src/markdown.rs b/crates/markdown/src/markdown.rs index 9a1596092ae0497fe2d45a1d756a34e81d601b7c..1de6d494ffbf445ca8ee3df9d1e83b5575f8224e 100644 --- a/crates/markdown/src/markdown.rs +++ b/crates/markdown/src/markdown.rs @@ -54,6 +54,7 @@ pub struct HeadingLevelStyles { #[derive(Clone)] pub struct MarkdownStyle { pub base_text_style: TextStyle, + pub container_style: StyleRefinement, pub code_block: StyleRefinement, pub code_block_overflow_x_scroll: bool, pub inline_code: TextStyleRefinement, @@ -74,6 +75,7 @@ impl Default for MarkdownStyle { fn default() -> Self { Self { base_text_style: Default::default(), + container_style: Default::default(), code_block: Default::default(), code_block_overflow_x_scroll: false, inline_code: Default::default(), @@ -748,6 +750,12 @@ impl MarkdownElement { } } +impl Styled for MarkdownElement { + fn style(&mut self) -> &mut StyleRefinement { + &mut self.style.container_style + } +} + impl Element for MarkdownElement { type RequestLayoutState = RenderedMarkdown; type PrepaintState = Hitbox; @@ -768,6 +776,7 @@ impl Element for MarkdownElement { cx: &mut App, ) -> (gpui::LayoutId, Self::RequestLayoutState) { let mut builder = MarkdownElementBuilder::new( + &self.style.container_style, self.style.base_text_style.clone(), self.style.syntax.clone(), ); @@ -1441,9 +1450,17 @@ struct ListStackEntry { } impl MarkdownElementBuilder { - fn new(base_text_style: TextStyle, syntax_theme: Arc) -> Self { + fn new( + container_style: &StyleRefinement, + base_text_style: TextStyle, + syntax_theme: Arc, + ) -> Self { Self { - div_stack: vec![div().debug_selector(|| "inner".into()).into()], + div_stack: vec![{ + let mut base_div = div(); + base_div.style().refine(container_style); + base_div.debug_selector(|| "inner".into()).into() + }], rendered_lines: Vec::new(), pending_line: PendingLine::default(), rendered_links: Vec::new(), From ba596267d8a234c42d73cef52b748ed4cd3720d7 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Thu, 20 Nov 2025 01:28:25 -0300 Subject: [PATCH 0238/1030] ui: Remove the `ToggleButton` component (#43115) This PR removes the old `ToggleButton` component, replacing it with the newer `ToggleButtonGroup` component in the couple of places that used to use it. Ended up also adding a few more methods to the newer toggle button group so the UI for the extensions page and the debugger main picker didn't get visually impacted much. Then, as I was already in the extensions page, decided to bake in some reasonably small UI improvements to it as well. Release Notes: - N/A --- crates/debugger_ui/src/new_process_modal.rs | 131 ++++--- crates/extensions_ui/src/extensions_ui.rs | 151 ++++---- crates/storybook/src/story_selector.rs | 2 - .../ui/src/components/button/toggle_button.rs | 327 ++---------------- crates/ui/src/components/stories.rs | 2 - .../src/components/stories/toggle_button.rs | 93 ----- 6 files changed, 174 insertions(+), 532 deletions(-) delete mode 100644 crates/ui/src/components/stories/toggle_button.rs diff --git a/crates/debugger_ui/src/new_process_modal.rs b/crates/debugger_ui/src/new_process_modal.rs index 9960ae8a3642f727069661871e70b7f02fcb3f95..174b8759b418946e1426d6194351a8212888c1d6 100644 --- a/crates/debugger_ui/src/new_process_modal.rs +++ b/crates/debugger_ui/src/new_process_modal.rs @@ -25,12 +25,9 @@ use settings::Settings; use task::{DebugScenario, RevealTarget, VariableName, ZedDebugConfig}; use theme::ThemeSettings; use ui::{ - ActiveTheme, Button, ButtonCommon, ButtonSize, CheckboxWithLabel, Clickable, Color, Context, - ContextMenu, Disableable, DropdownMenu, FluentBuilder, Icon, IconName, IconSize, - IconWithIndicator, Indicator, InteractiveElement, IntoElement, KeyBinding, Label, - LabelCommon as _, LabelSize, ListItem, ListItemSpacing, ParentElement, RenderOnce, - SharedString, Styled, StyledExt, ToggleButton, ToggleState, Toggleable, Tooltip, Window, div, - h_flex, relative, rems, v_flex, + CheckboxWithLabel, ContextMenu, DropdownMenu, FluentBuilder, IconWithIndicator, Indicator, + KeyBinding, ListItem, ListItemSpacing, ToggleButtonGroup, ToggleButtonSimple, ToggleState, + Tooltip, prelude::*, }; use util::{ResultExt, rel_path::RelPath, shell::ShellKind}; use workspace::{ModalView, Workspace, notifications::DetachAndPromptErr, pane}; @@ -620,72 +617,64 @@ impl Render for NewProcessModal { .border_b_1() .border_color(cx.theme().colors().border_variant) .child( - ToggleButton::new( - "debugger-session-ui-tasks-button", - NewProcessMode::Task.to_string(), - ) - .size(ButtonSize::Default) - .toggle_state(matches!(self.mode, NewProcessMode::Task)) - .style(ui::ButtonStyle::Subtle) - .on_click(cx.listener(|this, _, window, cx| { - this.mode = NewProcessMode::Task; - this.mode_focus_handle(cx).focus(window); - cx.notify(); - })) - .tooltip(Tooltip::text("Run predefined task")) - .first(), - ) - .child( - ToggleButton::new( - "debugger-session-ui-launch-button", - NewProcessMode::Debug.to_string(), - ) - .size(ButtonSize::Default) - .style(ui::ButtonStyle::Subtle) - .toggle_state(matches!(self.mode, NewProcessMode::Debug)) - .on_click(cx.listener(|this, _, window, cx| { - this.mode = NewProcessMode::Debug; - this.mode_focus_handle(cx).focus(window); - cx.notify(); - })) - .tooltip(Tooltip::text("Start a predefined debug scenario")) - .middle(), - ) - .child( - ToggleButton::new( - "debugger-session-ui-attach-button", - NewProcessMode::Attach.to_string(), - ) - .size(ButtonSize::Default) - .toggle_state(matches!(self.mode, NewProcessMode::Attach)) - .style(ui::ButtonStyle::Subtle) - .on_click(cx.listener(|this, _, window, cx| { - this.mode = NewProcessMode::Attach; - - if let Some(debugger) = this.debugger.as_ref() { - Self::update_attach_picker(&this.attach_mode, debugger, window, cx); - } - this.mode_focus_handle(cx).focus(window); - cx.notify(); - })) - .tooltip(Tooltip::text("Attach the debugger to a running process")) - .middle(), - ) - .child( - ToggleButton::new( - "debugger-session-ui-custom-button", - NewProcessMode::Launch.to_string(), + ToggleButtonGroup::single_row( + "debugger-mode-buttons", + [ + ToggleButtonSimple::new( + NewProcessMode::Task.to_string(), + cx.listener(|this, _, window, cx| { + this.mode = NewProcessMode::Task; + this.mode_focus_handle(cx).focus(window); + cx.notify(); + }), + ) + .tooltip(Tooltip::text("Run predefined task")), + ToggleButtonSimple::new( + NewProcessMode::Debug.to_string(), + cx.listener(|this, _, window, cx| { + this.mode = NewProcessMode::Debug; + this.mode_focus_handle(cx).focus(window); + cx.notify(); + }), + ) + .tooltip(Tooltip::text("Start a predefined debug scenario")), + ToggleButtonSimple::new( + NewProcessMode::Attach.to_string(), + cx.listener(|this, _, window, cx| { + this.mode = NewProcessMode::Attach; + + if let Some(debugger) = this.debugger.as_ref() { + Self::update_attach_picker( + &this.attach_mode, + debugger, + window, + cx, + ); + } + this.mode_focus_handle(cx).focus(window); + cx.notify(); + }), + ) + .tooltip(Tooltip::text("Attach the debugger to a running process")), + ToggleButtonSimple::new( + NewProcessMode::Launch.to_string(), + cx.listener(|this, _, window, cx| { + this.mode = NewProcessMode::Launch; + this.mode_focus_handle(cx).focus(window); + cx.notify(); + }), + ) + .tooltip(Tooltip::text("Launch a new process with a debugger")), + ], ) - .size(ButtonSize::Default) - .toggle_state(matches!(self.mode, NewProcessMode::Launch)) - .style(ui::ButtonStyle::Subtle) - .on_click(cx.listener(|this, _, window, cx| { - this.mode = NewProcessMode::Launch; - this.mode_focus_handle(cx).focus(window); - cx.notify(); - })) - .tooltip(Tooltip::text("Launch a new process with a debugger")) - .last(), + .label_size(LabelSize::Default) + .auto_width() + .selected_index(match self.mode { + NewProcessMode::Task => 0, + NewProcessMode::Debug => 1, + NewProcessMode::Attach => 2, + NewProcessMode::Launch => 3, + }), ), ) .child(v_flex().child(self.render_mode(window, cx))) diff --git a/crates/extensions_ui/src/extensions_ui.rs b/crates/extensions_ui/src/extensions_ui.rs index 3a7e1a80dd348d97a54f1dce21794760a2399740..657a39f09e6465042f5f1a5d113bdfa6e61c43ce 100644 --- a/crates/extensions_ui/src/extensions_ui.rs +++ b/crates/extensions_ui/src/extensions_ui.rs @@ -24,8 +24,9 @@ use settings::{Settings, SettingsContent}; use strum::IntoEnumIterator as _; use theme::ThemeSettings; use ui::{ - Banner, Chip, ContextMenu, Divider, PopoverMenu, ScrollableHandle, Switch, ToggleButton, - Tooltip, WithScrollbar, prelude::*, + Banner, Chip, ContextMenu, Divider, PopoverMenu, ScrollableHandle, Switch, ToggleButtonGroup, + ToggleButtonGroupSize, ToggleButtonGroupStyle, ToggleButtonSimple, Tooltip, WithScrollbar, + prelude::*, }; use vim_mode_setting::VimModeSetting; use workspace::{ @@ -805,37 +806,47 @@ impl ExtensionsPage { ) .child( h_flex() - .gap_1() .justify_between() .child( - Icon::new(IconName::Person) - .size(IconSize::XSmall) - .color(Color::Muted), - ) - .child( - Label::new(extension.manifest.authors.join(", ")) - .size(LabelSize::Small) - .color(Color::Muted) - .truncate(), + h_flex() + .gap_1() + .child( + Icon::new(IconName::Person) + .size(IconSize::XSmall) + .color(Color::Muted), + ) + .child( + Label::new(extension.manifest.authors.join(", ")) + .size(LabelSize::Small) + .color(Color::Muted) + .truncate(), + ), ) .child( h_flex() - .ml_auto() .gap_1() - .child( + .child({ + let repo_url_for_tooltip = repository_url.clone(); + IconButton::new( SharedString::from(format!("repository-{}", extension.id)), IconName::Github, ) .icon_size(IconSize::Small) - .on_click(cx.listener({ - let repository_url = repository_url.clone(); + .tooltip(move |_, cx| { + Tooltip::with_meta( + "Visit Extension Repository", + None, + repo_url_for_tooltip.clone(), + cx, + ) + }) + .on_click(cx.listener( move |_, _, _, cx| { cx.open_url(&repository_url); - } - })) - .tooltip(Tooltip::text(repository_url)), - ) + }, + )) + }) .child( PopoverMenu::new(SharedString::from(format!( "more-{}", @@ -1136,15 +1147,14 @@ impl ExtensionsPage { h_flex() .key_context(key_context) .h_8() - .flex_1() .min_w(rems_from_px(384.)) + .flex_1() .pl_1p5() .pr_2() - .py_1() .gap_2() .border_1() .border_color(editor_border) - .rounded_lg() + .rounded_md() .child(Icon::new(IconName::MagnifyingGlass).color(Color::Muted)) .child(self.render_text_input(&self.query_editor, cx)) } @@ -1544,13 +1554,13 @@ impl Render for ExtensionsPage { .child( h_flex() .w_full() - .gap_2() + .gap_1p5() .justify_between() .child(Headline::new("Extensions").size(HeadlineSize::XLarge)) .child( Button::new("install-dev-extension", "Install Dev Extension") - .style(ButtonStyle::Filled) - .size(ButtonSize::Large) + .style(ButtonStyle::Outlined) + .size(ButtonSize::Medium) .on_click(|_event, window, cx| { window.dispatch_action(Box::new(InstallDevExtension), cx) }), @@ -1559,58 +1569,51 @@ impl Render for ExtensionsPage { .child( h_flex() .w_full() - .gap_4() .flex_wrap() + .gap_2() .child(self.render_search(cx)) .child( - h_flex() - .child( - ToggleButton::new("filter-all", "All") - .style(ButtonStyle::Filled) - .size(ButtonSize::Large) - .toggle_state(self.filter == ExtensionFilter::All) - .on_click(cx.listener(|this, _event, _, cx| { - this.filter = ExtensionFilter::All; - this.filter_extension_entries(cx); - this.scroll_to_top(cx); - })) - .tooltip(move |_, cx| { - Tooltip::simple("Show all extensions", cx) - }) - .first(), - ) - .child( - ToggleButton::new("filter-installed", "Installed") - .style(ButtonStyle::Filled) - .size(ButtonSize::Large) - .toggle_state(self.filter == ExtensionFilter::Installed) - .on_click(cx.listener(|this, _event, _, cx| { - this.filter = ExtensionFilter::Installed; - this.filter_extension_entries(cx); - this.scroll_to_top(cx); - })) - .tooltip(move |_, cx| { - Tooltip::simple("Show installed extensions", cx) - }) - .middle(), + div().child( + ToggleButtonGroup::single_row( + "filter-buttons", + [ + ToggleButtonSimple::new( + "All", + cx.listener(|this, _event, _, cx| { + this.filter = ExtensionFilter::All; + this.filter_extension_entries(cx); + this.scroll_to_top(cx); + }), + ), + ToggleButtonSimple::new( + "Installed", + cx.listener(|this, _event, _, cx| { + this.filter = ExtensionFilter::Installed; + this.filter_extension_entries(cx); + this.scroll_to_top(cx); + }), + ), + ToggleButtonSimple::new( + "Not Installed", + cx.listener(|this, _event, _, cx| { + this.filter = ExtensionFilter::NotInstalled; + this.filter_extension_entries(cx); + this.scroll_to_top(cx); + }), + ), + ], ) - .child( - ToggleButton::new("filter-not-installed", "Not Installed") - .style(ButtonStyle::Filled) - .size(ButtonSize::Large) - .toggle_state( - self.filter == ExtensionFilter::NotInstalled, - ) - .on_click(cx.listener(|this, _event, _, cx| { - this.filter = ExtensionFilter::NotInstalled; - this.filter_extension_entries(cx); - this.scroll_to_top(cx); - })) - .tooltip(move |_, cx| { - Tooltip::simple("Show not installed extensions", cx) - }) - .last(), - ), + .style(ToggleButtonGroupStyle::Outlined) + .size(ToggleButtonGroupSize::Custom(rems_from_px(30.))) // Perfectly matches the input + .label_size(LabelSize::Default) + .auto_width() + .selected_index(match self.filter { + ExtensionFilter::All => 0, + ExtensionFilter::Installed => 1, + ExtensionFilter::NotInstalled => 2, + }) + .into_any_element(), + ), ), ), ) diff --git a/crates/storybook/src/story_selector.rs b/crates/storybook/src/story_selector.rs index aad3875410e9ac303c4c03469be44c1d11bfb56f..2cf6c0beac90ad5be56af61028c8f8329fb95b7c 100644 --- a/crates/storybook/src/story_selector.rs +++ b/crates/storybook/src/story_selector.rs @@ -28,7 +28,6 @@ pub enum ComponentStory { Tab, TabBar, Text, - ToggleButton, ViewportUnits, WithRemSize, IndentGuides, @@ -58,7 +57,6 @@ impl ComponentStory { Self::Tab => cx.new(|_| ui::TabStory).into(), Self::TabBar => cx.new(|_| ui::TabBarStory).into(), Self::Text => TextStory::model(cx).into(), - Self::ToggleButton => cx.new(|_| ui::ToggleButtonStory).into(), Self::ViewportUnits => cx.new(|_| crate::stories::ViewportUnitsStory).into(), Self::WithRemSize => cx.new(|_| crate::stories::WithRemSizeStory).into(), Self::IndentGuides => crate::stories::IndentGuidesStory::model(window, cx).into(), diff --git a/crates/ui/src/components/button/toggle_button.rs b/crates/ui/src/components/button/toggle_button.rs index 2a3db701d15d12361ebe623d8d56fa35ae0016a7..5cecfef0625648d9fc5cc3a2b143a0ea114b3def 100644 --- a/crates/ui/src/components/button/toggle_button.rs +++ b/crates/ui/src/components/button/toggle_button.rs @@ -2,7 +2,7 @@ use std::rc::Rc; use gpui::{AnyView, ClickEvent, relative}; -use crate::{ButtonLike, ButtonLikeRounding, ElevationIndex, TintColor, Tooltip, prelude::*}; +use crate::{ButtonLike, ButtonLikeRounding, TintColor, Tooltip, prelude::*}; /// The position of a [`ToggleButton`] within a group of buttons. #[derive(Debug, PartialEq, Eq, Clone, Copy)] @@ -43,290 +43,6 @@ impl ToggleButtonPosition { } } -#[derive(IntoElement, RegisterComponent)] -pub struct ToggleButton { - base: ButtonLike, - position_in_group: Option, - label: SharedString, - label_color: Option, -} - -impl ToggleButton { - pub fn new(id: impl Into, label: impl Into) -> Self { - Self { - base: ButtonLike::new(id), - position_in_group: None, - label: label.into(), - label_color: None, - } - } - - pub fn color(mut self, label_color: impl Into>) -> Self { - self.label_color = label_color.into(); - self - } - - pub fn position_in_group(mut self, position: ToggleButtonPosition) -> Self { - self.position_in_group = Some(position); - self - } - - pub fn first(self) -> Self { - self.position_in_group(ToggleButtonPosition::HORIZONTAL_FIRST) - } - - pub fn middle(self) -> Self { - self.position_in_group(ToggleButtonPosition::HORIZONTAL_MIDDLE) - } - - pub fn last(self) -> Self { - self.position_in_group(ToggleButtonPosition::HORIZONTAL_LAST) - } -} - -impl Toggleable for ToggleButton { - fn toggle_state(mut self, selected: bool) -> Self { - self.base = self.base.toggle_state(selected); - self - } -} - -impl SelectableButton for ToggleButton { - fn selected_style(mut self, style: ButtonStyle) -> Self { - self.base.selected_style = Some(style); - self - } -} - -impl FixedWidth for ToggleButton { - fn width(mut self, width: impl Into) -> Self { - self.base.width = Some(width.into()); - self - } - - fn full_width(mut self) -> Self { - self.base.width = Some(relative(1.)); - self - } -} - -impl Disableable for ToggleButton { - fn disabled(mut self, disabled: bool) -> Self { - self.base = self.base.disabled(disabled); - self - } -} - -impl Clickable for ToggleButton { - fn on_click(mut self, handler: impl Fn(&ClickEvent, &mut Window, &mut App) + 'static) -> Self { - self.base = self.base.on_click(handler); - self - } - - fn cursor_style(mut self, cursor_style: gpui::CursorStyle) -> Self { - self.base = self.base.cursor_style(cursor_style); - self - } -} - -impl ButtonCommon for ToggleButton { - fn id(&self) -> &ElementId { - self.base.id() - } - - fn style(mut self, style: ButtonStyle) -> Self { - self.base = self.base.style(style); - self - } - - fn size(mut self, size: ButtonSize) -> Self { - self.base = self.base.size(size); - self - } - - fn tooltip(mut self, tooltip: impl Fn(&mut Window, &mut App) -> AnyView + 'static) -> Self { - self.base = self.base.tooltip(tooltip); - self - } - - fn tab_index(mut self, tab_index: impl Into) -> Self { - self.base = self.base.tab_index(tab_index); - self - } - - fn layer(mut self, elevation: ElevationIndex) -> Self { - self.base = self.base.layer(elevation); - self - } - - fn track_focus(mut self, focus_handle: &gpui::FocusHandle) -> Self { - self.base = self.base.track_focus(focus_handle); - self - } -} - -impl RenderOnce for ToggleButton { - fn render(self, _window: &mut Window, _cx: &mut App) -> impl IntoElement { - let is_disabled = self.base.disabled; - let is_selected = self.base.selected; - - let label_color = if is_disabled { - Color::Disabled - } else if is_selected { - Color::Selected - } else { - self.label_color.unwrap_or_default() - }; - - self.base - .when_some(self.position_in_group, |this, position| { - this.rounding(position.to_rounding()) - }) - .child( - Label::new(self.label) - .color(label_color) - .line_height_style(LineHeightStyle::UiLabel), - ) - } -} - -impl Component for ToggleButton { - fn scope() -> ComponentScope { - ComponentScope::Input - } - - fn sort_name() -> &'static str { - "ButtonC" - } - - fn preview(_window: &mut Window, _cx: &mut App) -> Option { - Some( - v_flex() - .gap_6() - .children(vec![ - example_group_with_title( - "Button Styles", - vec![ - single_example( - "Off", - ToggleButton::new("off", "Off") - .layer(ElevationIndex::Background) - .style(ButtonStyle::Filled) - .into_any_element(), - ), - single_example( - "On", - ToggleButton::new("on", "On") - .layer(ElevationIndex::Background) - .toggle_state(true) - .style(ButtonStyle::Filled) - .into_any_element(), - ), - single_example( - "Off – Disabled", - ToggleButton::new("disabled_off", "Disabled Off") - .layer(ElevationIndex::Background) - .disabled(true) - .style(ButtonStyle::Filled) - .into_any_element(), - ), - single_example( - "On – Disabled", - ToggleButton::new("disabled_on", "Disabled On") - .layer(ElevationIndex::Background) - .disabled(true) - .toggle_state(true) - .style(ButtonStyle::Filled) - .into_any_element(), - ), - ], - ), - example_group_with_title( - "Button Group", - vec![ - single_example( - "Three Buttons", - h_flex() - .child( - ToggleButton::new("three_btn_first", "First") - .layer(ElevationIndex::Background) - .style(ButtonStyle::Filled) - .first() - .into_any_element(), - ) - .child( - ToggleButton::new("three_btn_middle", "Middle") - .layer(ElevationIndex::Background) - .style(ButtonStyle::Filled) - .middle() - .toggle_state(true) - .into_any_element(), - ) - .child( - ToggleButton::new("three_btn_last", "Last") - .layer(ElevationIndex::Background) - .style(ButtonStyle::Filled) - .last() - .into_any_element(), - ) - .into_any_element(), - ), - single_example( - "Two Buttons", - h_flex() - .child( - ToggleButton::new("two_btn_first", "First") - .layer(ElevationIndex::Background) - .style(ButtonStyle::Filled) - .first() - .into_any_element(), - ) - .child( - ToggleButton::new("two_btn_last", "Last") - .layer(ElevationIndex::Background) - .style(ButtonStyle::Filled) - .last() - .into_any_element(), - ) - .into_any_element(), - ), - ], - ), - example_group_with_title( - "Alternate Sizes", - vec![ - single_example( - "None", - ToggleButton::new("none", "None") - .layer(ElevationIndex::Background) - .style(ButtonStyle::Filled) - .size(ButtonSize::None) - .into_any_element(), - ), - single_example( - "Compact", - ToggleButton::new("compact", "Compact") - .layer(ElevationIndex::Background) - .style(ButtonStyle::Filled) - .size(ButtonSize::Compact) - .into_any_element(), - ), - single_example( - "Large", - ToggleButton::new("large", "Large") - .layer(ElevationIndex::Background) - .style(ButtonStyle::Filled) - .size(ButtonSize::Large) - .into_any_element(), - ), - ], - ), - ]) - .into_any_element(), - ) - } -} - pub struct ButtonConfiguration { label: SharedString, icon: Option, @@ -447,6 +163,8 @@ pub enum ToggleButtonGroupStyle { pub enum ToggleButtonGroupSize { Default, Medium, + Large, + Custom(Rems), } #[derive(IntoElement)] @@ -458,7 +176,9 @@ where rows: [[T; COLS]; ROWS], style: ToggleButtonGroupStyle, size: ToggleButtonGroupSize, + label_size: LabelSize, group_width: Option, + auto_width: bool, selected_index: usize, tab_index: Option, } @@ -470,7 +190,9 @@ impl ToggleButtonGroup { rows: [buttons], style: ToggleButtonGroupStyle::Transparent, size: ToggleButtonGroupSize::Default, + label_size: LabelSize::Small, group_width: None, + auto_width: false, selected_index: 0, tab_index: None, } @@ -488,7 +210,9 @@ impl ToggleButtonGroup { rows: [first_row, second_row], style: ToggleButtonGroupStyle::Transparent, size: ToggleButtonGroupSize::Default, + label_size: LabelSize::Small, group_width: None, + auto_width: false, selected_index: 0, tab_index: None, } @@ -511,6 +235,18 @@ impl ToggleButtonGroup Self { + self.auto_width = true; + self + } + + pub fn label_size(mut self, label_size: LabelSize) -> Self { + self.label_size = label_size; + self + } + /// Sets the tab index for the toggle button group. /// The tab index is set to the initial value provided, then the /// value is incremented by the number of buttons in the group. @@ -543,6 +279,11 @@ impl RenderOnce for ToggleButtonGroup { fn render(self, _window: &mut Window, cx: &mut App) -> impl IntoElement { + let custom_height = match self.size { + ToggleButtonGroupSize::Custom(height) => Some(height), + _ => None, + }; + let entries = self.rows.into_iter().enumerate().map(|(row_index, row)| { let group_name = self.group_name.clone(); @@ -558,7 +299,7 @@ impl RenderOnce let entry_index = row_index * COLS + col_index; ButtonLike::new((group_name.clone(), entry_index)) - .full_width() + .when(!self.auto_width, |this| this.full_width()) .rounding(Some( ToggleButtonPosition { leftmost: col_index == 0, @@ -581,13 +322,17 @@ impl RenderOnce .when(self.size == ToggleButtonGroupSize::Medium, |button| { button.size(ButtonSize::Medium) }) + .when(self.size == ToggleButtonGroupSize::Large, |button| { + button.size(ButtonSize::Large) + }) + .when_some(custom_height, |button, height| button.height(height.into())) .child( h_flex() .w_full() + .px_2() .gap_1p5() - .px_3() - .py_1() .justify_center() + .flex_none() .when_some(icon, |this, icon| { this.py_2() .child(Icon::new(icon).size(IconSize::XSmall).map(|this| { @@ -598,7 +343,7 @@ impl RenderOnce } })) }) - .child(Label::new(label).size(LabelSize::Small).when( + .child(Label::new(label).size(self.label_size).when( entry_index == self.selected_index || selected, |this| this.color(Color::Accent), )), @@ -620,6 +365,8 @@ impl RenderOnce .map(|this| { if let Some(width) = self.group_width { this.w(width) + } else if self.auto_width { + this } else { this.w_full() } @@ -646,7 +393,7 @@ impl RenderOnce .when(is_outlined_or_filled && !last_item, |this| { this.border_r_1().border_color(border_color) }) - .w(Self::button_width()) + .when(!self.auto_width, |this| this.w(Self::button_width())) .overflow_hidden() .child(item) })) diff --git a/crates/ui/src/components/stories.rs b/crates/ui/src/components/stories.rs index 05e8cd18d51d16ee4bc355bfa455891183d1749b..29d72ad08b783f8d2cfa5b62ed8fef9d845c9315 100644 --- a/crates/ui/src/components/stories.rs +++ b/crates/ui/src/components/stories.rs @@ -6,7 +6,6 @@ mod list_header; mod list_item; mod tab; mod tab_bar; -mod toggle_button; pub use context_menu::*; pub use icon_button::*; @@ -16,4 +15,3 @@ pub use list_header::*; pub use list_item::*; pub use tab::*; pub use tab_bar::*; -pub use toggle_button::*; diff --git a/crates/ui/src/components/stories/toggle_button.rs b/crates/ui/src/components/stories/toggle_button.rs deleted file mode 100644 index 903c7059a872448d7d227340a066ef044a8db100..0000000000000000000000000000000000000000 --- a/crates/ui/src/components/stories/toggle_button.rs +++ /dev/null @@ -1,93 +0,0 @@ -use gpui::Render; -use story::{Story, StoryItem, StorySection}; - -use crate::{ToggleButton, prelude::*}; - -pub struct ToggleButtonStory; - -impl Render for ToggleButtonStory { - fn render(&mut self, _window: &mut Window, cx: &mut Context) -> impl IntoElement { - Story::container(cx) - .child(Story::title_for::(cx)) - .child( - StorySection::new().child( - StoryItem::new( - "Default", - ToggleButton::new("default_toggle_button", "Hello"), - ) - .description("Displays a toggle button.") - .usage(""), - ), - ) - .child( - StorySection::new().child( - StoryItem::new( - "Toggle button group", - h_flex() - .child( - ToggleButton::new(1, "Apple") - .style(ButtonStyle::Filled) - .size(ButtonSize::Large) - .first(), - ) - .child( - ToggleButton::new(2, "Banana") - .style(ButtonStyle::Filled) - .size(ButtonSize::Large) - .middle(), - ) - .child( - ToggleButton::new(3, "Cherry") - .style(ButtonStyle::Filled) - .size(ButtonSize::Large) - .middle(), - ) - .child( - ToggleButton::new(4, "Dragonfruit") - .style(ButtonStyle::Filled) - .size(ButtonSize::Large) - .last(), - ), - ) - .description("Displays a group of toggle buttons.") - .usage(""), - ), - ) - .child( - StorySection::new().child( - StoryItem::new( - "Toggle button group with selection", - h_flex() - .child( - ToggleButton::new(1, "Apple") - .style(ButtonStyle::Filled) - .size(ButtonSize::Large) - .first(), - ) - .child( - ToggleButton::new(2, "Banana") - .style(ButtonStyle::Filled) - .size(ButtonSize::Large) - .toggle_state(true) - .middle(), - ) - .child( - ToggleButton::new(3, "Cherry") - .style(ButtonStyle::Filled) - .size(ButtonSize::Large) - .middle(), - ) - .child( - ToggleButton::new(4, "Dragonfruit") - .style(ButtonStyle::Filled) - .size(ButtonSize::Large) - .last(), - ), - ) - .description("Displays a group of toggle buttons.") - .usage(""), - ), - ) - .into_element() - } -} From 66382acd52905527e38d7d15d04018febc960d53 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Thu, 20 Nov 2025 01:52:13 -0300 Subject: [PATCH 0239/1030] ui: Remove outdated/unused component stories (#43118) This PR removes basically all of the component stories, with the exception of the context menu, which is a bit more intricate to set up. All of the component that won't have a story after this PR will have an entry in the Component Preview, which serves basically the same purpose. Release Notes: - N/A --- crates/storybook/src/story_selector.rs | 14 -- crates/ui/src/components/list/list.rs | 52 +++++- .../src/components/list/list_bullet_item.rs | 45 +++++- crates/ui/src/components/list/list_header.rs | 82 +++++++++- crates/ui/src/components/list/list_item.rs | 115 +++++++++++++- .../ui/src/components/list/list_sub_header.rs | 66 +++++++- crates/ui/src/components/stories.rs | 14 -- crates/ui/src/components/stories/avatar.rs | 0 crates/ui/src/components/stories/button.rs | 0 .../ui/src/components/stories/disclosure.rs | 18 --- .../ui/src/components/stories/icon_button.rs | 148 ------------------ .../ui/src/components/stories/keybinding.rs | 136 ---------------- crates/ui/src/components/stories/list.rs | 36 ----- .../ui/src/components/stories/list_header.rs | 31 ---- crates/ui/src/components/stories/list_item.rs | 131 ---------------- crates/ui/src/components/stories/tab.rs | 114 -------------- crates/ui/src/components/stories/tab_bar.rs | 59 ------- 17 files changed, 352 insertions(+), 709 deletions(-) delete mode 100644 crates/ui/src/components/stories/avatar.rs delete mode 100644 crates/ui/src/components/stories/button.rs delete mode 100644 crates/ui/src/components/stories/disclosure.rs delete mode 100644 crates/ui/src/components/stories/icon_button.rs delete mode 100644 crates/ui/src/components/stories/keybinding.rs delete mode 100644 crates/ui/src/components/stories/list.rs delete mode 100644 crates/ui/src/components/stories/list_header.rs delete mode 100644 crates/ui/src/components/stories/list_item.rs delete mode 100644 crates/ui/src/components/stories/tab.rs delete mode 100644 crates/ui/src/components/stories/tab_bar.rs diff --git a/crates/storybook/src/story_selector.rs b/crates/storybook/src/story_selector.rs index 2cf6c0beac90ad5be56af61028c8f8329fb95b7c..7f70d58b3b4e5cfb7c23b60c84b17a6a941f0804 100644 --- a/crates/storybook/src/story_selector.rs +++ b/crates/storybook/src/story_selector.rs @@ -17,16 +17,9 @@ pub enum ComponentStory { ContextMenu, Cursor, Focus, - IconButton, - Keybinding, - List, - ListHeader, - ListItem, OverflowScroll, Picker, Scroll, - Tab, - TabBar, Text, ViewportUnits, WithRemSize, @@ -46,16 +39,9 @@ impl ComponentStory { Self::ContextMenu => cx.new(|_| ui::ContextMenuStory).into(), Self::Cursor => cx.new(|_| crate::stories::CursorStory).into(), Self::Focus => FocusStory::model(window, cx).into(), - Self::IconButton => cx.new(|_| ui::IconButtonStory).into(), - Self::Keybinding => cx.new(|_| ui::KeybindingStory).into(), - Self::List => cx.new(|_| ui::ListStory).into(), - Self::ListHeader => cx.new(|_| ui::ListHeaderStory).into(), - Self::ListItem => cx.new(|_| ui::ListItemStory).into(), Self::OverflowScroll => cx.new(|_| crate::stories::OverflowScrollStory).into(), Self::Picker => PickerStory::new(window, cx).into(), Self::Scroll => ScrollStory::model(cx).into(), - Self::Tab => cx.new(|_| ui::TabStory).into(), - Self::TabBar => cx.new(|_| ui::TabBarStory).into(), Self::Text => TextStory::model(cx).into(), Self::ViewportUnits => cx.new(|_| crate::stories::ViewportUnitsStory).into(), Self::WithRemSize => cx.new(|_| crate::stories::WithRemSizeStory).into(), diff --git a/crates/ui/src/components/list/list.rs b/crates/ui/src/components/list/list.rs index b6950f06a4449265cccd48f9f13590650619a01c..ccae5bed23d9509ea6d4989f84620d444499245b 100644 --- a/crates/ui/src/components/list/list.rs +++ b/crates/ui/src/components/list/list.rs @@ -1,14 +1,15 @@ +use component::{Component, ComponentScope, example_group_with_title, single_example}; use gpui::AnyElement; use smallvec::SmallVec; -use crate::{Label, ListHeader, prelude::*, v_flex}; +use crate::{Label, ListHeader, ListItem, prelude::*}; pub enum EmptyMessage { Text(SharedString), Element(AnyElement), } -#[derive(IntoElement)] +#[derive(IntoElement, RegisterComponent)] pub struct List { /// Message to display when the list is empty /// Defaults to "No items" @@ -92,3 +93,50 @@ impl RenderOnce for List { }) } } + +impl Component for List { + fn scope() -> ComponentScope { + ComponentScope::Layout + } + + fn description() -> Option<&'static str> { + Some( + "A container component for displaying a collection of list items with optional header and empty state.", + ) + } + + fn preview(_window: &mut Window, _cx: &mut App) -> Option { + Some( + v_flex() + .gap_6() + .children(vec![example_group_with_title( + "Basic Lists", + vec![ + single_example( + "Simple List", + List::new() + .child(ListItem::new("item1").child(Label::new("Item 1"))) + .child(ListItem::new("item2").child(Label::new("Item 2"))) + .child(ListItem::new("item3").child(Label::new("Item 3"))) + .into_any_element(), + ), + single_example( + "With Header", + List::new() + .header(ListHeader::new("Section Header")) + .child(ListItem::new("item1").child(Label::new("Item 1"))) + .child(ListItem::new("item2").child(Label::new("Item 2"))) + .into_any_element(), + ), + single_example( + "Empty List", + List::new() + .empty_message("No items to display") + .into_any_element(), + ), + ], + )]) + .into_any_element(), + ) + } +} diff --git a/crates/ui/src/components/list/list_bullet_item.rs b/crates/ui/src/components/list/list_bullet_item.rs index 9ac2095b5757d90bd22496052b806f41a5f8d163..17731488f7139522bf19aeaab18fb395d1eb68b0 100644 --- a/crates/ui/src/components/list/list_bullet_item.rs +++ b/crates/ui/src/components/list/list_bullet_item.rs @@ -1,7 +1,8 @@ use crate::{ListItem, prelude::*}; +use component::{Component, ComponentScope, example_group_with_title, single_example}; use gpui::{IntoElement, ParentElement, SharedString}; -#[derive(IntoElement)] +#[derive(IntoElement, RegisterComponent)] pub struct ListBulletItem { label: SharedString, } @@ -38,3 +39,45 @@ impl RenderOnce for ListBulletItem { .into_any_element() } } + +impl Component for ListBulletItem { + fn scope() -> ComponentScope { + ComponentScope::DataDisplay + } + + fn description() -> Option<&'static str> { + Some("A list item with a bullet point indicator for unordered lists.") + } + + fn preview(_window: &mut Window, _cx: &mut App) -> Option { + Some( + v_flex() + .gap_6() + .child(example_group_with_title( + "Bullet Items", + vec![ + single_example( + "Simple", + ListBulletItem::new("First bullet item").into_any_element(), + ), + single_example( + "Multiple Lines", + v_flex() + .child(ListBulletItem::new("First item")) + .child(ListBulletItem::new("Second item")) + .child(ListBulletItem::new("Third item")) + .into_any_element(), + ), + single_example( + "Long Text", + ListBulletItem::new( + "A longer bullet item that demonstrates text wrapping behavior", + ) + .into_any_element(), + ), + ], + )) + .into_any_element(), + ) + } +} diff --git a/crates/ui/src/components/list/list_header.rs b/crates/ui/src/components/list/list_header.rs index d59af07fa5271c070fca8433156b94301cc134aa..8726dca50dada193b3051f14b6609a373fc60730 100644 --- a/crates/ui/src/components/list/list_header.rs +++ b/crates/ui/src/components/list/list_header.rs @@ -1,11 +1,12 @@ use std::sync::Arc; -use crate::{Disclosure, Label, h_flex, prelude::*}; +use crate::{Disclosure, prelude::*}; +use component::{Component, ComponentScope, example_group_with_title, single_example}; use gpui::{AnyElement, ClickEvent}; use settings::Settings; use theme::ThemeSettings; -#[derive(IntoElement)] +#[derive(IntoElement, RegisterComponent)] pub struct ListHeader { /// The label of the header. label: SharedString, @@ -138,3 +139,80 @@ impl RenderOnce for ListHeader { ) } } + +impl Component for ListHeader { + fn scope() -> ComponentScope { + ComponentScope::DataDisplay + } + + fn description() -> Option<&'static str> { + Some( + "A header component for lists with support for icons, actions, and collapsible sections.", + ) + } + + fn preview(_window: &mut Window, _cx: &mut App) -> Option { + Some( + v_flex() + .gap_6() + .children(vec![ + example_group_with_title( + "Basic Headers", + vec![ + single_example( + "Simple", + ListHeader::new("Section Header").into_any_element(), + ), + single_example( + "With Icon", + ListHeader::new("Files") + .start_slot(Icon::new(IconName::File)) + .into_any_element(), + ), + single_example( + "With End Slot", + ListHeader::new("Recent") + .end_slot(Label::new("5").color(Color::Muted)) + .into_any_element(), + ), + ], + ), + example_group_with_title( + "Collapsible Headers", + vec![ + single_example( + "Expanded", + ListHeader::new("Expanded Section") + .toggle(Some(true)) + .into_any_element(), + ), + single_example( + "Collapsed", + ListHeader::new("Collapsed Section") + .toggle(Some(false)) + .into_any_element(), + ), + ], + ), + example_group_with_title( + "States", + vec![ + single_example( + "Selected", + ListHeader::new("Selected Header") + .toggle_state(true) + .into_any_element(), + ), + single_example( + "Inset", + ListHeader::new("Inset Header") + .inset(true) + .into_any_element(), + ), + ], + ), + ]) + .into_any_element(), + ) + } +} diff --git a/crates/ui/src/components/list/list_item.rs b/crates/ui/src/components/list/list_item.rs index a58291438a1d10bb1b61149f412151375b6b0a1f..d581fad9453d9812f17b7bc9e0297fb9927c8188 100644 --- a/crates/ui/src/components/list/list_item.rs +++ b/crates/ui/src/components/list/list_item.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use component::{Component, ComponentScope, example_group_with_title, single_example}; use gpui::{AnyElement, AnyView, ClickEvent, MouseButton, MouseDownEvent, Pixels, px}; use smallvec::SmallVec; @@ -13,7 +14,7 @@ pub enum ListItemSpacing { Sparse, } -#[derive(IntoElement)] +#[derive(IntoElement, RegisterComponent)] pub struct ListItem { id: ElementId, group_name: Option, @@ -355,3 +356,115 @@ impl RenderOnce for ListItem { ) } } + +impl Component for ListItem { + fn scope() -> ComponentScope { + ComponentScope::DataDisplay + } + + fn description() -> Option<&'static str> { + Some( + "A flexible list item component with support for icons, actions, disclosure toggles, and hierarchical display.", + ) + } + + fn preview(_window: &mut Window, _cx: &mut App) -> Option { + Some( + v_flex() + .gap_6() + .children(vec![ + example_group_with_title( + "Basic List Items", + vec![ + single_example( + "Simple", + ListItem::new("simple") + .child(Label::new("Simple list item")) + .into_any_element(), + ), + single_example( + "With Icon", + ListItem::new("with_icon") + .start_slot(Icon::new(IconName::File)) + .child(Label::new("List item with icon")) + .into_any_element(), + ), + single_example( + "Selected", + ListItem::new("selected") + .toggle_state(true) + .start_slot(Icon::new(IconName::Check)) + .child(Label::new("Selected item")) + .into_any_element(), + ), + ], + ), + example_group_with_title( + "List Item Spacing", + vec![ + single_example( + "Dense", + ListItem::new("dense") + .spacing(ListItemSpacing::Dense) + .child(Label::new("Dense spacing")) + .into_any_element(), + ), + single_example( + "Extra Dense", + ListItem::new("extra_dense") + .spacing(ListItemSpacing::ExtraDense) + .child(Label::new("Extra dense spacing")) + .into_any_element(), + ), + single_example( + "Sparse", + ListItem::new("sparse") + .spacing(ListItemSpacing::Sparse) + .child(Label::new("Sparse spacing")) + .into_any_element(), + ), + ], + ), + example_group_with_title( + "With Slots", + vec![ + single_example( + "End Slot", + ListItem::new("end_slot") + .child(Label::new("Item with end slot")) + .end_slot(Icon::new(IconName::ChevronRight)) + .into_any_element(), + ), + single_example( + "With Toggle", + ListItem::new("with_toggle") + .toggle(Some(true)) + .child(Label::new("Expandable item")) + .into_any_element(), + ), + ], + ), + example_group_with_title( + "States", + vec![ + single_example( + "Disabled", + ListItem::new("disabled") + .disabled(true) + .child(Label::new("Disabled item")) + .into_any_element(), + ), + single_example( + "Non-selectable", + ListItem::new("non_selectable") + .selectable(false) + .child(Label::new("Non-selectable item")) + .into_any_element(), + ), + ], + ), + ]) + .into_any_element(), + ) + } +} diff --git a/crates/ui/src/components/list/list_sub_header.rs b/crates/ui/src/components/list/list_sub_header.rs index e6f5abfe0ab5ee4f7e10a85c3d2b15402df8fc53..b4a82fb2edf5fcfbe068dee2570884698b8b0663 100644 --- a/crates/ui/src/components/list/list_sub_header.rs +++ b/crates/ui/src/components/list/list_sub_header.rs @@ -1,7 +1,7 @@ use crate::prelude::*; -use crate::{Icon, IconName, IconSize, Label, h_flex}; +use component::{Component, ComponentScope, example_group_with_title, single_example}; -#[derive(IntoElement)] +#[derive(IntoElement, RegisterComponent)] pub struct ListSubHeader { label: SharedString, start_slot: Option, @@ -85,3 +85,65 @@ impl RenderOnce for ListSubHeader { ) } } + +impl Component for ListSubHeader { + fn scope() -> ComponentScope { + ComponentScope::DataDisplay + } + + fn description() -> Option<&'static str> { + Some( + "A sub-header component for organizing list content into subsections with optional icons and end slots.", + ) + } + + fn preview(_window: &mut Window, _cx: &mut App) -> Option { + Some( + v_flex() + .gap_6() + .children(vec![ + example_group_with_title( + "Basic Sub-headers", + vec![ + single_example( + "Simple", + ListSubHeader::new("Subsection").into_any_element(), + ), + single_example( + "With Icon", + ListSubHeader::new("Documents") + .left_icon(Some(IconName::File)) + .into_any_element(), + ), + single_example( + "With End Slot", + ListSubHeader::new("Recent") + .end_slot( + Label::new("3").color(Color::Muted).into_any_element(), + ) + .into_any_element(), + ), + ], + ), + example_group_with_title( + "States", + vec![ + single_example( + "Selected", + ListSubHeader::new("Selected") + .toggle_state(true) + .into_any_element(), + ), + single_example( + "Inset", + ListSubHeader::new("Inset Sub-header") + .inset(true) + .into_any_element(), + ), + ], + ), + ]) + .into_any_element(), + ) + } +} diff --git a/crates/ui/src/components/stories.rs b/crates/ui/src/components/stories.rs index 29d72ad08b783f8d2cfa5b62ed8fef9d845c9315..bcfcfd04c3176d6a19385101360ab21bffb9cc8e 100644 --- a/crates/ui/src/components/stories.rs +++ b/crates/ui/src/components/stories.rs @@ -1,17 +1,3 @@ mod context_menu; -mod icon_button; -mod keybinding; -mod list; -mod list_header; -mod list_item; -mod tab; -mod tab_bar; pub use context_menu::*; -pub use icon_button::*; -pub use keybinding::*; -pub use list::*; -pub use list_header::*; -pub use list_item::*; -pub use tab::*; -pub use tab_bar::*; diff --git a/crates/ui/src/components/stories/avatar.rs b/crates/ui/src/components/stories/avatar.rs deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/crates/ui/src/components/stories/button.rs b/crates/ui/src/components/stories/button.rs deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/crates/ui/src/components/stories/disclosure.rs b/crates/ui/src/components/stories/disclosure.rs deleted file mode 100644 index 5a395388f450a19270426a6df7efa78d490792c2..0000000000000000000000000000000000000000 --- a/crates/ui/src/components/stories/disclosure.rs +++ /dev/null @@ -1,18 +0,0 @@ -use gpui::Render; -use story::Story; - -use crate::Disclosure; -use crate::prelude::*; - -pub struct DisclosureStory; - -impl Render for DisclosureStory { - fn render(&mut self, _window: &mut Window, cx: &mut Context) -> impl IntoElement { - Story::container(cx) - .child(Story::title_for::(cx)) - .child(Story::label("Toggled")) - .child(Disclosure::new("toggled", true)) - .child(Story::label("Not Toggled")) - .child(Disclosure::new("not_toggled", false)) - } -} diff --git a/crates/ui/src/components/stories/icon_button.rs b/crates/ui/src/components/stories/icon_button.rs deleted file mode 100644 index 166297eabc389ca5cc4dea5070c21cb9efa00133..0000000000000000000000000000000000000000 --- a/crates/ui/src/components/stories/icon_button.rs +++ /dev/null @@ -1,148 +0,0 @@ -use gpui::Render; -use story::{Story, StoryItem, StorySection}; - -use crate::{IconButton, IconName}; -use crate::{IconButtonShape, Tooltip, prelude::*}; - -pub struct IconButtonStory; - -impl Render for IconButtonStory { - fn render(&mut self, _window: &mut Window, cx: &mut Context) -> impl IntoElement { - let default_button = StoryItem::new( - "Default", - IconButton::new("default_icon_button", IconName::Hash), - ) - .description("Displays an icon button.") - .usage( - r#" - IconButton::new("default_icon_button", Icon::Hash) - "#, - ); - - let selected_button = StoryItem::new( - "Selected", - IconButton::new("selected_icon_button", IconName::Hash).toggle_state(true), - ) - .description("Displays an icon button that is selected.") - .usage( - r#" - IconButton::new("selected_icon_button", Icon::Hash).selected(true) - "#, - ); - - let selected_with_selected_icon = StoryItem::new( - "Selected with `selected_icon`", - IconButton::new("selected_with_selected_icon_button", IconName::AudioOn) - .toggle_state(true) - .selected_icon(IconName::AudioOff), - ) - .description( - "Displays an icon button that is selected and shows a different icon when selected.", - ) - .usage( - r#" - IconButton::new("selected_with_selected_icon_button", Icon::AudioOn) - .selected(true) - .selected_icon(Icon::AudioOff) - "#, - ); - - let disabled_button = StoryItem::new( - "Disabled", - IconButton::new("disabled_icon_button", IconName::Hash).disabled(true), - ) - .description("Displays an icon button that is disabled.") - .usage( - r#" - IconButton::new("disabled_icon_button", Icon::Hash).disabled(true) - "#, - ); - - let with_on_click_button = StoryItem::new( - "With `on_click`", - IconButton::new("with_on_click_button", IconName::Ai).on_click( - |_event, _window, _cx| { - println!("Clicked!"); - }, - ), - ) - .description("Displays an icon button which triggers an event on click.") - .usage( - r#" - IconButton::new("with_on_click_button", Icon::Ai).on_click(|_event, _cx| { - println!("Clicked!"); - }) - "#, - ); - - let with_tooltip_button = StoryItem::new( - "With `tooltip`", - IconButton::new("with_tooltip_button", IconName::Chat) - .tooltip(Tooltip::text("Open messages")), - ) - .description("Displays an icon button that has a tooltip when hovered.") - .usage( - r#" - IconButton::new("with_tooltip_button", Icon::MessageBubbles) - .tooltip(Tooltip::text_f("Open messages")) - "#, - ); - - let selected_with_tooltip_button = StoryItem::new( - "Selected with `tooltip`", - IconButton::new("selected_with_tooltip_button", IconName::CaseSensitive) - .toggle_state(true) - .tooltip(Tooltip::text("Toggle inlay hints")), - ) - .description("Displays a selected icon button with tooltip.") - .usage( - r#" - IconButton::new("selected_with_tooltip_button", Icon::InlayHint) - .selected(true) - .tooltip(Tooltip::text_f("Toggle inlay hints")) - "#, - ); - - let buttons = vec![ - default_button, - selected_button, - selected_with_selected_icon, - disabled_button, - with_on_click_button, - with_tooltip_button, - selected_with_tooltip_button, - ]; - - Story::container(cx) - .child(Story::title_for::(cx)) - .child(StorySection::new().children(buttons)) - .child( - StorySection::new().child(StoryItem::new( - "Square", - h_flex() - .gap_2() - .child( - IconButton::new("square-medium", IconName::Close) - .shape(IconButtonShape::Square) - .icon_size(IconSize::Medium), - ) - .child( - IconButton::new("square-small", IconName::Close) - .shape(IconButtonShape::Square) - .icon_size(IconSize::Small), - ) - .child( - IconButton::new("square-xsmall", IconName::Close) - .shape(IconButtonShape::Square) - .icon_size(IconSize::XSmall), - ) - .child( - IconButton::new("square-indicator", IconName::Close) - .shape(IconButtonShape::Square) - .icon_size(IconSize::Indicator), - ), - )), - ) - .into_element() - } -} diff --git a/crates/ui/src/components/stories/keybinding.rs b/crates/ui/src/components/stories/keybinding.rs deleted file mode 100644 index 5840a11cf702f7a47aed06791ab47f12e2418d9c..0000000000000000000000000000000000000000 --- a/crates/ui/src/components/stories/keybinding.rs +++ /dev/null @@ -1,136 +0,0 @@ -use gpui::NoAction; -use gpui::Render; -use itertools::Itertools; -use settings::KeybindSource; -use story::Story; - -use crate::{KeyBinding, prelude::*}; - -pub struct KeybindingStory; - -pub fn binding(key: &str) -> gpui::KeyBinding { - gpui::KeyBinding::new(key, NoAction {}, None) -} - -impl Render for KeybindingStory { - fn render(&mut self, _window: &mut Window, cx: &mut Context) -> impl IntoElement { - let all_modifier_permutations = ["ctrl", "alt", "cmd", "shift"].into_iter().permutations(2); - - const SOURCE: KeybindSource = KeybindSource::Base; - - Story::container(cx) - .child(Story::title_for::(cx)) - .child(Story::label("Single Key", cx)) - .child(KeyBinding::from_keystrokes( - binding("Z").keystrokes().into(), - SOURCE, - )) - .child(Story::label("Single Key with Modifier", cx)) - .child( - div() - .flex() - .gap_3() - .child(KeyBinding::from_keystrokes( - binding("ctrl-c").keystrokes().into(), - SOURCE, - )) - .child(KeyBinding::from_keystrokes( - binding("alt-c").keystrokes().into(), - SOURCE, - )) - .child(KeyBinding::from_keystrokes( - binding("cmd-c").keystrokes().into(), - SOURCE, - )) - .child(KeyBinding::from_keystrokes( - binding("shift-c").keystrokes().into(), - SOURCE, - )), - ) - .child(Story::label("Single Key with Modifier (Permuted)", cx)) - .child( - div().flex().flex_col().children( - all_modifier_permutations - .chunks(4) - .into_iter() - .map(|chunk| { - div() - .flex() - .gap_4() - .py_3() - .children(chunk.map(|permutation| { - KeyBinding::from_keystrokes( - binding(&(permutation.join("-") + "-x")) - .keystrokes() - .into(), - SOURCE, - ) - })) - }), - ), - ) - .child(Story::label("Single Key with All Modifiers", cx)) - .child(KeyBinding::from_keystrokes( - binding("ctrl-alt-cmd-shift-z").keystrokes().into(), - SOURCE, - )) - .child(Story::label("Chord", cx)) - .child(KeyBinding::from_keystrokes( - binding("a z").keystrokes().into(), - SOURCE, - )) - .child(Story::label("Chord with Modifier", cx)) - .child(KeyBinding::from_keystrokes( - binding("ctrl-a shift-z").keystrokes().into(), - SOURCE, - )) - .child(KeyBinding::from_keystrokes( - binding("fn-s").keystrokes().into(), - SOURCE, - )) - .child(Story::label("Single Key with All Modifiers (Linux)", cx)) - .child( - KeyBinding::from_keystrokes( - binding("ctrl-alt-cmd-shift-z").keystrokes().into(), - SOURCE, - ) - .platform_style(PlatformStyle::Linux), - ) - .child(Story::label("Chord (Linux)", cx)) - .child( - KeyBinding::from_keystrokes(binding("a z").keystrokes().into(), SOURCE) - .platform_style(PlatformStyle::Linux), - ) - .child(Story::label("Chord with Modifier (Linux)", cx)) - .child( - KeyBinding::from_keystrokes(binding("ctrl-a shift-z").keystrokes().into(), SOURCE) - .platform_style(PlatformStyle::Linux), - ) - .child( - KeyBinding::from_keystrokes(binding("fn-s").keystrokes().into(), SOURCE) - .platform_style(PlatformStyle::Linux), - ) - .child(Story::label("Single Key with All Modifiers (Windows)", cx)) - .child( - KeyBinding::from_keystrokes( - binding("ctrl-alt-cmd-shift-z").keystrokes().into(), - SOURCE, - ) - .platform_style(PlatformStyle::Windows), - ) - .child(Story::label("Chord (Windows)", cx)) - .child( - KeyBinding::from_keystrokes(binding("a z").keystrokes().into(), SOURCE) - .platform_style(PlatformStyle::Windows), - ) - .child(Story::label("Chord with Modifier (Windows)", cx)) - .child( - KeyBinding::from_keystrokes(binding("ctrl-a shift-z").keystrokes().into(), SOURCE) - .platform_style(PlatformStyle::Windows), - ) - .child( - KeyBinding::from_keystrokes(binding("fn-s").keystrokes().into(), SOURCE) - .platform_style(PlatformStyle::Windows), - ) - } -} diff --git a/crates/ui/src/components/stories/list.rs b/crates/ui/src/components/stories/list.rs deleted file mode 100644 index 6a0e672d31771fd2c946e2c207ae052baf77fb01..0000000000000000000000000000000000000000 --- a/crates/ui/src/components/stories/list.rs +++ /dev/null @@ -1,36 +0,0 @@ -use gpui::Render; -use story::Story; - -use crate::{List, ListItem}; -use crate::{ListHeader, ListSeparator, ListSubHeader, prelude::*}; - -pub struct ListStory; - -impl Render for ListStory { - fn render(&mut self, _window: &mut Window, cx: &mut Context) -> impl IntoElement { - Story::container(cx) - .child(Story::title_for::(cx)) - .child(Story::label("Default", cx)) - .child( - List::new() - .child(ListItem::new("apple").child("Apple")) - .child(ListItem::new("banana").child("Banana")) - .child(ListItem::new("cherry").child("Cherry")), - ) - .child(Story::label("With sections", cx)) - .child( - List::new() - .header(ListHeader::new("Produce")) - .child(ListSubHeader::new("Fruits")) - .child(ListItem::new("apple").child("Apple")) - .child(ListItem::new("banana").child("Banana")) - .child(ListItem::new("cherry").child("Cherry")) - .child(ListSeparator) - .child(ListSubHeader::new("Root Vegetables")) - .child(ListItem::new("carrot").child("Carrot")) - .child(ListItem::new("potato").child("Potato")) - .child(ListSubHeader::new("Leafy Vegetables")) - .child(ListItem::new("kale").child("Kale")), - ) - } -} diff --git a/crates/ui/src/components/stories/list_header.rs b/crates/ui/src/components/stories/list_header.rs deleted file mode 100644 index f7fa068d5a11cb0bd772dc4c10fd19c048ae0181..0000000000000000000000000000000000000000 --- a/crates/ui/src/components/stories/list_header.rs +++ /dev/null @@ -1,31 +0,0 @@ -use gpui::Render; -use story::Story; - -use crate::{IconButton, prelude::*}; -use crate::{IconName, ListHeader}; - -pub struct ListHeaderStory; - -impl Render for ListHeaderStory { - fn render(&mut self, _window: &mut Window, cx: &mut Context) -> impl IntoElement { - Story::container(cx) - .child(Story::title_for::(cx)) - .child(Story::label("Default", cx)) - .child(ListHeader::new("Section 1")) - .child(Story::label("With left icon", cx)) - .child(ListHeader::new("Section 2").start_slot(Icon::new(IconName::Bell))) - .child(Story::label("With left icon and meta", cx)) - .child( - ListHeader::new("Section 3") - .start_slot(Icon::new(IconName::BellOff)) - .end_slot(IconButton::new("action_1", IconName::BoltFilled)), - ) - .child(Story::label("With multiple meta", cx)) - .child( - ListHeader::new("Section 4") - .end_slot(IconButton::new("action_1", IconName::BoltFilled)) - .end_slot(IconButton::new("action_2", IconName::Warning)) - .end_slot(IconButton::new("action_3", IconName::Plus)), - ) - } -} diff --git a/crates/ui/src/components/stories/list_item.rs b/crates/ui/src/components/stories/list_item.rs deleted file mode 100644 index ee8f5e6c7280215c81f4bc9e71685a1ffec11c80..0000000000000000000000000000000000000000 --- a/crates/ui/src/components/stories/list_item.rs +++ /dev/null @@ -1,131 +0,0 @@ -use gpui::Render; -use story::Story; - -use crate::{Avatar, prelude::*}; -use crate::{IconName, ListItem}; - -const OVERFLOWING_TEXT: &str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aenean mauris ligula, luctus vel dignissim eu, vestibulum sed libero. Sed at convallis velit."; - -pub struct ListItemStory; - -impl Render for ListItemStory { - fn render(&mut self, _: &mut Window, cx: &mut Context) -> impl IntoElement { - Story::container(cx) - .bg(cx.theme().colors().background) - .child(Story::title_for::(cx)) - .child(Story::label("Default", cx)) - .child(ListItem::new("hello_world").child("Hello, world!")) - .child(Story::label("Inset", cx)) - .child( - ListItem::new("inset_list_item") - .inset(true) - .start_slot( - Icon::new(IconName::Bell) - .size(IconSize::Small) - .color(Color::Muted), - ) - .child("Hello, world!") - .end_slot( - Icon::new(IconName::Bell) - .size(IconSize::Small) - .color(Color::Muted), - ), - ) - .child(Story::label("With start slot icon", cx)) - .child( - ListItem::new("with start slot_icon") - .child("Hello, world!") - .start_slot( - Icon::new(IconName::Bell) - .size(IconSize::Small) - .color(Color::Muted), - ), - ) - .child(Story::label("With start slot avatar", cx)) - .child( - ListItem::new("with_start slot avatar") - .child("Hello, world!") - .start_slot(Avatar::new( - "https://avatars.githubusercontent.com/u/1714999?v=4", - )), - ) - .child(Story::label("With end slot", cx)) - .child( - ListItem::new("with_left_avatar") - .child("Hello, world!") - .end_slot(Avatar::new( - "https://avatars.githubusercontent.com/u/1714999?v=4", - )), - ) - .child(Story::label("With end hover slot", cx)) - .child( - ListItem::new("with_end_hover_slot") - .child("Hello, world!") - .end_slot( - h_flex() - .gap_2() - .child(Avatar::new( - "https://avatars.githubusercontent.com/u/1789?v=4", - )) - .child(Avatar::new( - "https://avatars.githubusercontent.com/u/1789?v=4", - )) - .child(Avatar::new( - "https://avatars.githubusercontent.com/u/1789?v=4", - )) - .child(Avatar::new( - "https://avatars.githubusercontent.com/u/1789?v=4", - )) - .child(Avatar::new( - "https://avatars.githubusercontent.com/u/1789?v=4", - )), - ) - .end_hover_slot(Avatar::new( - "https://avatars.githubusercontent.com/u/1714999?v=4", - )), - ) - .child(Story::label("With `on_click`", cx)) - .child(ListItem::new("with_on_click").child("Click me").on_click( - |_event, _window, _cx| { - println!("Clicked!"); - }, - )) - .child(Story::label("With `on_secondary_mouse_down`", cx)) - .child( - ListItem::new("with_on_secondary_mouse_down") - .child("Right click me") - .on_secondary_mouse_down(|_event, _window, _cx| { - println!("Right mouse down!"); - }), - ) - .child(Story::label( - "With overflowing content in the `end_slot`", - cx, - )) - .child( - ListItem::new("with_overflowing_content_in_end_slot") - .child("An excerpt") - .end_slot(Label::new(OVERFLOWING_TEXT).color(Color::Muted)), - ) - .child(Story::label( - "`inset` with overflowing content in the `end_slot`", - cx, - )) - .child( - ListItem::new("inset_with_overflowing_content_in_end_slot") - .inset(true) - .child("An excerpt") - .end_slot(Label::new(OVERFLOWING_TEXT).color(Color::Muted)), - ) - .child(Story::label( - "`inset` with overflowing content in `children` and `end_slot`", - cx, - )) - .child( - ListItem::new("inset_with_overflowing_content_in_children_and_end_slot") - .inset(true) - .child(Label::new(OVERFLOWING_TEXT)) - .end_slot(Label::new(OVERFLOWING_TEXT).color(Color::Muted)), - ) - } -} diff --git a/crates/ui/src/components/stories/tab.rs b/crates/ui/src/components/stories/tab.rs deleted file mode 100644 index e6c80c54e9752ff7ebee68a70f3af6d7023d0c74..0000000000000000000000000000000000000000 --- a/crates/ui/src/components/stories/tab.rs +++ /dev/null @@ -1,114 +0,0 @@ -use std::cmp::Ordering; - -use gpui::Render; -use story::Story; - -use crate::{IconButtonShape, TabPosition, prelude::*}; -use crate::{Indicator, Tab}; - -pub struct TabStory; - -impl Render for TabStory { - fn render(&mut self, _window: &mut Window, cx: &mut Context) -> impl IntoElement { - Story::container(cx) - .child(Story::title_for::(cx)) - .child(Story::label("Default", cx)) - .child(h_flex().child(Tab::new("tab_1").child("Tab 1"))) - .child(Story::label("With indicator", cx)) - .child( - h_flex().child( - Tab::new("tab_1") - .start_slot(Indicator::dot().color(Color::Warning)) - .child("Tab 1"), - ), - ) - .child(Story::label("With close button", cx)) - .child( - h_flex().child( - Tab::new("tab_1") - .end_slot( - IconButton::new("close_button", IconName::Close) - .visible_on_hover("") - .shape(IconButtonShape::Square) - .icon_color(Color::Muted) - .size(ButtonSize::None) - .icon_size(IconSize::XSmall), - ) - .child("Tab 1"), - ), - ) - .child(Story::label("List of tabs", cx)) - .child( - h_flex() - .child(Tab::new("tab_1").child("Tab 1")) - .child(Tab::new("tab_2").child("Tab 2")), - ) - .child(Story::label("List of tabs with first tab selected", cx)) - .child( - h_flex() - .child( - Tab::new("tab_1") - .toggle_state(true) - .position(TabPosition::First) - .child("Tab 1"), - ) - .child( - Tab::new("tab_2") - .position(TabPosition::Middle(Ordering::Greater)) - .child("Tab 2"), - ) - .child( - Tab::new("tab_3") - .position(TabPosition::Middle(Ordering::Greater)) - .child("Tab 3"), - ) - .child(Tab::new("tab_4").position(TabPosition::Last).child("Tab 4")), - ) - .child(Story::label("List of tabs with last tab selected", cx)) - .child( - h_flex() - .child( - Tab::new("tab_1") - .position(TabPosition::First) - .child("Tab 1"), - ) - .child( - Tab::new("tab_2") - .position(TabPosition::Middle(Ordering::Less)) - .child("Tab 2"), - ) - .child( - Tab::new("tab_3") - .position(TabPosition::Middle(Ordering::Less)) - .child("Tab 3"), - ) - .child( - Tab::new("tab_4") - .position(TabPosition::Last) - .toggle_state(true) - .child("Tab 4"), - ), - ) - .child(Story::label("List of tabs with second tab selected", cx)) - .child( - h_flex() - .child( - Tab::new("tab_1") - .position(TabPosition::First) - .child("Tab 1"), - ) - .child( - Tab::new("tab_2") - .position(TabPosition::Middle(Ordering::Equal)) - .toggle_state(true) - .child("Tab 2"), - ) - .child( - Tab::new("tab_3") - .position(TabPosition::Middle(Ordering::Greater)) - .child("Tab 3"), - ) - .child(Tab::new("tab_4").position(TabPosition::Last).child("Tab 4")), - ) - } -} diff --git a/crates/ui/src/components/stories/tab_bar.rs b/crates/ui/src/components/stories/tab_bar.rs deleted file mode 100644 index fbb6c8c248af49a40c0246b9b249961b0198d880..0000000000000000000000000000000000000000 --- a/crates/ui/src/components/stories/tab_bar.rs +++ /dev/null @@ -1,59 +0,0 @@ -use gpui::Render; -use story::Story; - -use crate::{Tab, TabBar, TabPosition, prelude::*}; - -pub struct TabBarStory; - -impl Render for TabBarStory { - fn render(&mut self, _window: &mut Window, cx: &mut Context) -> impl IntoElement { - let tab_count = 20; - let selected_tab_index = 3; - - let tabs = (0..tab_count) - .map(|index| { - Tab::new(index) - .toggle_state(index == selected_tab_index) - .position(if index == 0 { - TabPosition::First - } else if index == tab_count - 1 { - TabPosition::Last - } else { - TabPosition::Middle(index.cmp(&selected_tab_index)) - }) - .child(Label::new(format!("Tab {}", index + 1)).color( - if index == selected_tab_index { - Color::Default - } else { - Color::Muted - }, - )) - }) - .collect::>(); - - Story::container(cx) - .child(Story::title_for::(cx)) - .child(Story::label("Default", cx)) - .child( - h_flex().child( - TabBar::new("tab_bar_1") - .start_child( - IconButton::new("navigate_backward", IconName::ArrowLeft) - .icon_size(IconSize::Small), - ) - .start_child( - IconButton::new("navigate_forward", IconName::ArrowRight) - .icon_size(IconSize::Small), - ) - .end_child( - IconButton::new("new", IconName::Plus).icon_size(IconSize::Small), - ) - .end_child( - IconButton::new("split_pane", IconName::Split) - .icon_size(IconSize::Small), - ) - .children(tabs), - ), - ) - } -} From 5052a460b4b36bdc2517bd18470d97534d78ad94 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 20 Nov 2025 08:12:50 +0100 Subject: [PATCH 0240/1030] vim: Fix increment panicking due to invalid utf8 offsets (#43101) Fixes ZED-3ER Release Notes: - Fixed a panic when using vim increment on a multibyte character --- crates/vim/src/normal/increment.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/crates/vim/src/normal/increment.rs b/crates/vim/src/normal/increment.rs index a6a76b22aa16d6fd774dc32a8b4988804ad8e42c..d9ef32deba5a3beb530d9ee42e2a6254df8c253b 100644 --- a/crates/vim/src/normal/increment.rs +++ b/crates/vim/src/normal/increment.rs @@ -210,12 +210,12 @@ fn find_target( .map_or(false, |ch| ch.is_ascii_hexdigit()); let mut pre_char = String::new(); + let next_offset = offset + + snapshot + .chars_at(start_offset) + .next() + .map_or(0, |ch| ch.len_utf8()); // Backward scan to find the start of the number, but stop at start_offset - let next_offset = if offset < snapshot.len() { - offset + 1usize - } else { - offset - }; for ch in snapshot.reversed_chars_at(next_offset) { // Search boundaries if offset.0 == 0 || ch.is_whitespace() || (need_range && offset <= start_offset) { @@ -823,6 +823,14 @@ mod test { cx.set_state("trueˇ 1 2 3", Mode::Normal); cx.simulate_keystrokes("ctrl-a"); cx.assert_state("true ˇ2 2 3", Mode::Normal); + + cx.set_state("falseˇ", Mode::Normal); + cx.simulate_keystrokes("ctrl-a"); + cx.assert_state("truˇe", Mode::Normal); + + cx.set_state("⚡️ˇ⚡️", Mode::Normal); + cx.simulate_keystrokes("ctrl-a"); + cx.assert_state("⚡️ˇ⚡️", Mode::Normal); } #[gpui::test] From 681a56506b2f3a0bc22362afc6a6cda1affc1388 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Raz=20Guzm=C3=A1n=20Macedo?= Date: Thu, 20 Nov 2025 03:00:13 -0600 Subject: [PATCH 0241/1030] project_panel: Add CollapseAllEntries keybinding (#43112) Motivated by user feature requests * https://github.com/zed-industries/zed/issues/6880 * https://discord.com/channels/869392257814519848/1439453067119562793 In analogy with VSCode functionality, we're adding a keybinding to the project panel. This is particularly for useful for large monorepos. Release Notes: - Keybinding added for `CollapseAllEntries` when in the `ProjectPanel`. Co-authored-by: mikayla --- assets/keymaps/default-linux.json | 1 + assets/keymaps/default-macos.json | 1 + assets/keymaps/default-windows.json | 1 + 3 files changed, 3 insertions(+) diff --git a/assets/keymaps/default-linux.json b/assets/keymaps/default-linux.json index 253c36f987a0995d51a91ab0eea75f422be83085..c69ce1d40a54fa68ab7a2473827653575e285a4d 100644 --- a/assets/keymaps/default-linux.json +++ b/assets/keymaps/default-linux.json @@ -865,6 +865,7 @@ "context": "ProjectPanel", "bindings": { "left": "project_panel::CollapseSelectedEntry", + "ctrl-left": "project_panel::CollapseAllEntries", "right": "project_panel::ExpandSelectedEntry", "new": "project_panel::NewFile", "ctrl-n": "project_panel::NewFile", diff --git a/assets/keymaps/default-macos.json b/assets/keymaps/default-macos.json index 16690a65b1d348a5393c55d4adc808be52e52c99..aa47e2081203e753cf8d14cafa1b01cadaf97327 100644 --- a/assets/keymaps/default-macos.json +++ b/assets/keymaps/default-macos.json @@ -935,6 +935,7 @@ "use_key_equivalents": true, "bindings": { "left": "project_panel::CollapseSelectedEntry", + "cmd-left": "project_panel::CollapseAllEntries", "right": "project_panel::ExpandSelectedEntry", "cmd-n": "project_panel::NewFile", "cmd-d": "project_panel::Duplicate", diff --git a/assets/keymaps/default-windows.json b/assets/keymaps/default-windows.json index 216048d0634a039e3011b908ad20bcf522477885..b37d750a4e9c96d73db5963bf9bc4c5338781f5f 100644 --- a/assets/keymaps/default-windows.json +++ b/assets/keymaps/default-windows.json @@ -879,6 +879,7 @@ "use_key_equivalents": true, "bindings": { "left": "project_panel::CollapseSelectedEntry", + "ctrl-left": "project_panel::CollapseAllEntries", "right": "project_panel::ExpandSelectedEntry", "ctrl-n": "project_panel::NewFile", "alt-n": "project_panel::NewDirectory", From 95cb467cd9d17d424b6784e77b7871ce83469b5d Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 20 Nov 2025 10:31:18 +0100 Subject: [PATCH 0242/1030] multi_buffer: Remove redundant `TypedOffset`/`TypedPoint` (#43139) Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/editor/src/items.rs | 2 +- crates/multi_buffer/src/multi_buffer.rs | 468 +++++++++++------------- crates/multi_buffer/src/position.rs | 264 ------------- 3 files changed, 219 insertions(+), 515 deletions(-) delete mode 100644 crates/multi_buffer/src/position.rs diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 0e97b95bb6d2818364ec0fa161f6360c850a6dcc..5fa31f251448a0381501fb475913371170ac495a 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -942,7 +942,7 @@ impl Item for Editor { fn breadcrumbs(&self, variant: &Theme, cx: &App) -> Option> { let cursor = self.selections.newest_anchor().head(); - let multibuffer = &self.buffer().read(cx); + let multibuffer = self.buffer().read(cx); let (buffer_id, symbols) = multibuffer .read(cx) .symbols_containing(cursor, Some(variant.syntax()))?; diff --git a/crates/multi_buffer/src/multi_buffer.rs b/crates/multi_buffer/src/multi_buffer.rs index c1a2fed7e2a253d3469944a7f2c4fa2275c8abd4..1ade63b5d17b9558c6686bc4f95bcd9193938f7d 100644 --- a/crates/multi_buffer/src/multi_buffer.rs +++ b/crates/multi_buffer/src/multi_buffer.rs @@ -2,13 +2,11 @@ mod anchor; #[cfg(test)] mod multi_buffer_tests; mod path_key; -mod position; mod transaction; use self::transaction::History; pub use anchor::{Anchor, AnchorRangeExt}; -pub use position::{TypedOffset, TypedPoint, TypedRow}; use anyhow::{Result, anyhow}; use buffer_diff::{ @@ -43,7 +41,7 @@ use std::{ io, iter::{self, FromIterator}, mem, - ops::{self, AddAssign, Range, RangeBounds, Sub}, + ops::{self, AddAssign, Range, RangeBounds, Sub, SubAssign}, rc::Rc, str, sync::Arc, @@ -171,8 +169,8 @@ impl MultiBufferDiffHunk { } pub type MultiBufferPoint = Point; -type ExcerptOffset = TypedOffset; -type ExcerptPoint = TypedPoint; +type ExcerptOffset = ExcerptDimension; +type ExcerptPoint = ExcerptDimension; #[derive(Copy, Clone, Debug, Default, Eq, Ord, PartialOrd, PartialEq, Hash, serde::Deserialize)] #[serde(transparent)] @@ -668,9 +666,10 @@ pub struct MultiBufferExcerpt<'a> { excerpt: &'a Excerpt, diff_transforms: sum_tree::Cursor<'a, 'static, DiffTransform, DiffTransforms>, + /// The offset in the multibuffer considering diff transforms. offset: MultiBufferOffset, - // todo unsure about this type - excerpt_offset: MultiBufferOffset, + /// The offset in the multibuffer without diff transforms. + excerpt_offset: ExcerptOffset, buffer_offset: BufferOffset, } @@ -1682,7 +1681,7 @@ impl MultiBuffer { let mut new_excerpts = cursor.slice(&prev_locator, Bias::Right); prev_locator = cursor.start().unwrap_or(Locator::min_ref()).clone(); - let edit_start = ExcerptOffset::new(new_excerpts.summary().text.len.0); + let edit_start = ExcerptDimension(new_excerpts.summary().text.len); new_excerpts.update_last( |excerpt| { excerpt.has_trailing_newline = true; @@ -1726,7 +1725,7 @@ impl MultiBuffer { new_excerpt_ids.push(ExcerptIdMapping { id, locator }, ()); } - let edit_end = ExcerptOffset::new(new_excerpts.summary().text.len.0); + let edit_end = ExcerptDimension(new_excerpts.summary().text.len); let suffix = cursor.suffix(); let changed_trailing_excerpt = suffix.is_empty(); @@ -1783,8 +1782,8 @@ impl MultiBuffer { all_diff_hunks_expanded: _, show_headers: _, } = self.snapshot.get_mut(); - let start = ExcerptOffset::new(0); - let prev_len = ExcerptOffset::new(excerpts.summary().text.len.0); + let start = ExcerptDimension(MultiBufferOffset::ZERO); + let prev_len = ExcerptDimension(excerpts.summary().text.len); *excerpts = Default::default(); *trailing_excerpt_update_count += 1; *is_dirty = false; @@ -1839,10 +1838,10 @@ impl MultiBuffer { let snapshot = self.read(cx); let mut excerpts = snapshot .excerpts - .cursor::, ExcerptDimension>>(()); + .cursor::, ExcerptPoint>>(()); let mut diff_transforms = snapshot .diff_transforms - .cursor::, OutputDimension>>(()); + .cursor::>>(()); diff_transforms.next(); let locators = self .buffers @@ -1856,17 +1855,17 @@ impl MultiBuffer { && excerpt.locator == *locator { let excerpt_start = excerpts.start().1; - let excerpt_end = ExcerptDimension(excerpt_start.0 + excerpt.text_summary.lines); + let excerpt_end = excerpt_start + excerpt.text_summary.lines; diff_transforms.seek_forward(&excerpt_start, Bias::Left); - let overshoot = excerpt_start.0 - diff_transforms.start().0.0; - let start = diff_transforms.start().1.0 + overshoot; + let overshoot = excerpt_start - diff_transforms.start().0; + let start = diff_transforms.start().1 + overshoot; diff_transforms.seek_forward(&excerpt_end, Bias::Right); - let overshoot = excerpt_end.0 - diff_transforms.start().0.0; - let end = diff_transforms.start().1.0 + overshoot; + let overshoot = excerpt_end - diff_transforms.start().0; + let end = diff_transforms.start().1 + overshoot; - result.push(start..end) + result.push(start.0..end.0) } } result @@ -2063,14 +2062,14 @@ impl MultiBuffer { // When removing the last excerpt, remove the trailing newline from // the previous excerpt. - if cursor.item().is_none() && old_start.value > 0 { - old_start.value -= 1; + if cursor.item().is_none() && old_start > MultiBufferOffset::ZERO { + old_start -= 1; new_excerpts.update_last(|e| e.has_trailing_newline = false, ()); } // Push an edit for the removal of this run of excerpts. let old_end = cursor.start().1; - let new_start = ExcerptOffset::new(new_excerpts.summary().text.len.0); + let new_start = ExcerptDimension(new_excerpts.summary().text.len); edits.push(Edit { old: old_start..old_end, new: new_start..new_start, @@ -2224,17 +2223,13 @@ impl MultiBuffer { continue; } let excerpt_start = cursor.start().1; - let excerpt_len = ExcerptOffset::new(excerpt.text_summary.len); - let diff_change_start_in_excerpt = ExcerptOffset::new( - diff_change_range - .start - .saturating_sub(excerpt_buffer_range.start), - ); - let diff_change_end_in_excerpt = ExcerptOffset::new( - diff_change_range - .end - .saturating_sub(excerpt_buffer_range.start), - ); + let excerpt_len = excerpt.text_summary.len; + let diff_change_start_in_excerpt = diff_change_range + .start + .saturating_sub(excerpt_buffer_range.start); + let diff_change_end_in_excerpt = diff_change_range + .end + .saturating_sub(excerpt_buffer_range.start); let edit_start = excerpt_start + diff_change_start_in_excerpt.min(excerpt_len); let edit_end = excerpt_start + diff_change_end_in_excerpt.min(excerpt_len); excerpt_edits.push(Edit { @@ -2578,7 +2573,7 @@ impl MultiBuffer { new_excerpts.append(prefix, ()); let mut excerpt = cursor.item().unwrap().clone(); - let old_text_len = ExcerptOffset::new(excerpt.text_summary.len); + let old_text_len = excerpt.text_summary.len; excerpt.range.context.start = range.start; excerpt.range.context.end = range.end; @@ -2588,9 +2583,9 @@ impl MultiBuffer { .buffer .text_summary_for_range(excerpt.range.context.clone()); - let new_start_offset = ExcerptOffset::new(new_excerpts.summary().text.len.0); + let new_start_offset = ExcerptDimension(new_excerpts.summary().text.len); let old_start_offset = cursor.start().1; - let new_text_len = ExcerptOffset::new(excerpt.text_summary.len); + let new_text_len = excerpt.text_summary.len; let edit = Edit { old: old_start_offset..old_start_offset + old_text_len, new: new_start_offset..new_start_offset + new_text_len, @@ -2657,7 +2652,7 @@ impl MultiBuffer { new_excerpts.append(prefix, ()); let mut excerpt = cursor.item().unwrap().clone(); - let old_text_len = ExcerptOffset::new(excerpt.text_summary.len); + let old_text_len = excerpt.text_summary.len; let up_line_count = if direction.should_expand_up() { line_count @@ -2694,9 +2689,9 @@ impl MultiBuffer { .buffer .text_summary_for_range(excerpt.range.context.clone()); - let new_start_offset = ExcerptOffset::new(new_excerpts.summary().text.len.0); + let new_start_offset = ExcerptDimension(new_excerpts.summary().text.len); let old_start_offset = cursor.start().1; - let new_text_len = ExcerptOffset::new(excerpt.text_summary.len); + let new_text_len = excerpt.text_summary.len; let edit = Edit { old: old_start_offset..old_start_offset + old_text_len, new: new_start_offset..new_start_offset + new_text_len, @@ -2856,11 +2851,11 @@ impl MultiBuffer { .map(|edit| { let excerpt_old_start = cursor.start().1; let excerpt_new_start = - ExcerptOffset::new(new_excerpts.summary().text.len.0); - let old_start = excerpt_old_start + ExcerptOffset::new(edit.old.start); - let old_end = excerpt_old_start + ExcerptOffset::new(edit.old.end); - let new_start = excerpt_new_start + ExcerptOffset::new(edit.new.start); - let new_end = excerpt_new_start + ExcerptOffset::new(edit.new.end); + ExcerptDimension(new_excerpts.summary().text.len); + let old_start = excerpt_old_start + edit.old.start; + let old_end = excerpt_old_start + edit.old.end; + let new_start = excerpt_new_start + edit.new.start; + let new_end = excerpt_new_start + edit.new.end; Edit { old: old_start..old_end, new: new_start..new_end, @@ -2933,7 +2928,7 @@ impl MultiBuffer { } // Compute the start of the edit in output coordinates. - let edit_start_overshoot = (edit.old.start - old_diff_transforms.start().0).value; + let edit_start_overshoot = edit.old.start - old_diff_transforms.start().0; let edit_old_start = old_diff_transforms.start().1 + edit_start_overshoot; let edit_new_start = MultiBufferOffset((edit_old_start.0 as isize + output_delta) as usize); @@ -2952,9 +2947,8 @@ impl MultiBuffer { // Compute the end of the edit in output coordinates. let edit_old_end_overshoot = edit.old.end - old_diff_transforms.start().0; let edit_new_end_overshoot = edit.new.end - new_diff_transforms.summary().excerpt_len(); - let edit_old_end = old_diff_transforms.start().1 + edit_old_end_overshoot.value; - let edit_new_end = - new_diff_transforms.summary().output.len + edit_new_end_overshoot.value; + let edit_old_end = old_diff_transforms.start().1 + edit_old_end_overshoot; + let edit_new_end = new_diff_transforms.summary().output.len + edit_new_end_overshoot; let output_edit = Edit { old: edit_old_start..edit_old_end, new: edit_new_start..edit_new_end, @@ -3026,30 +3020,30 @@ impl MultiBuffer { } fn recompute_diff_transforms_for_edit( - edit: &Edit>, - excerpts: &mut Cursor>, + edit: &Edit, + excerpts: &mut Cursor, old_diff_transforms: &mut Cursor< DiffTransform, - Dimensions, MultiBufferOffset>, + Dimensions, >, new_diff_transforms: &mut SumTree, - end_of_current_insert: &mut Option<(TypedOffset, DiffTransformHunkInfo)>, + end_of_current_insert: &mut Option<(ExcerptOffset, DiffTransformHunkInfo)>, old_expanded_hunks: &mut HashSet, snapshot: &MultiBufferSnapshot, change_kind: DiffChangeKind, ) -> bool { log::trace!( "recomputing diff transform for edit {:?} => {:?}", - edit.old.start.value..edit.old.end.value, - edit.new.start.value..edit.new.end.value + edit.old.start..edit.old.end, + edit.new.start..edit.new.end ); // Record which hunks were previously expanded. while let Some(item) = old_diff_transforms.item() { if let Some(hunk_info) = item.hunk_info() { log::trace!( - "previously expanded hunk at {}", - old_diff_transforms.start().0 + "previously expanded hunk at {:?}", + old_diff_transforms.start() ); old_expanded_hunks.insert(hunk_info); } @@ -3076,13 +3070,13 @@ impl MultiBuffer { if let Some(diff) = snapshot.diffs.get(&excerpt.buffer_id) { let buffer = &excerpt.buffer; let excerpt_start = *excerpts.start(); - let excerpt_end = excerpt_start + ExcerptOffset::new(excerpt.text_summary.len); + let excerpt_end = excerpt_start + excerpt.text_summary.len; let excerpt_buffer_start = excerpt.range.context.start.to_offset(buffer); let excerpt_buffer_end = excerpt_buffer_start + excerpt.text_summary.len; let edit_buffer_start = - excerpt_buffer_start + edit.new.start.value.saturating_sub(excerpt_start.value); + excerpt_buffer_start + edit.new.start.saturating_sub(excerpt_start); let edit_buffer_end = - excerpt_buffer_start + edit.new.end.value.saturating_sub(excerpt_start.value); + excerpt_buffer_start + edit.new.end.saturating_sub(excerpt_start); let edit_buffer_end = edit_buffer_end.min(excerpt_buffer_end); let edit_anchor_range = buffer.anchor_before(edit_buffer_start)..buffer.anchor_after(edit_buffer_end); @@ -3105,13 +3099,9 @@ impl MultiBuffer { }; let hunk_excerpt_start = excerpt_start - + ExcerptOffset::new( - hunk_buffer_range.start.saturating_sub(excerpt_buffer_start), - ); - let hunk_excerpt_end = excerpt_end.min( - excerpt_start - + ExcerptOffset::new(hunk_buffer_range.end - excerpt_buffer_start), - ); + + hunk_buffer_range.start.saturating_sub(excerpt_buffer_start); + let hunk_excerpt_end = excerpt_end + .min(excerpt_start + (hunk_buffer_range.end - excerpt_buffer_start)); Self::push_buffer_content_transform( snapshot, @@ -3143,7 +3133,7 @@ impl MultiBuffer { did_expand_hunks = true; log::trace!( "expanding hunk {:?}, excerpt:{:?}", - hunk_excerpt_start.value..hunk_excerpt_end.value, + hunk_excerpt_start..hunk_excerpt_end, excerpt.id ); @@ -4152,7 +4142,8 @@ impl MultiBufferSnapshot { language_aware: bool, ) -> MultiBufferChunks<'_> { let mut chunks = MultiBufferChunks { - excerpt_offset_range: ExcerptOffset::new(0)..ExcerptOffset::new(0), + excerpt_offset_range: ExcerptDimension(MultiBufferOffset::ZERO) + ..ExcerptDimension(MultiBufferOffset::ZERO), range: MultiBufferOffset::ZERO..MultiBufferOffset::ZERO, excerpts: self.excerpts.cursor(()), diff_transforms: self.diff_transforms.cursor(()), @@ -4528,8 +4519,8 @@ impl MultiBufferSnapshot { let mut result = match first_transform { DiffTransform::BufferContent { .. } => { - let excerpt_start = cursor.start().1 + ExcerptOffset::new(start_overshoot); - let excerpt_end = cursor.start().1 + ExcerptOffset::new(end_overshoot); + let excerpt_start = cursor.start().1 + start_overshoot; + let excerpt_end = cursor.start().1 + end_overshoot; self.text_summary_for_excerpt_offset_range(excerpt_start..excerpt_end) } DiffTransform::DeletedHunk { @@ -4582,7 +4573,7 @@ impl MultiBufferSnapshot { let overshoot = range.end - cursor.start().0; let suffix = match last_transform { DiffTransform::BufferContent { .. } => { - let end = cursor.start().1 + ExcerptOffset::new(overshoot); + let end = cursor.start().1 + overshoot; self.text_summary_for_excerpt_offset_range::(cursor.start().1..end) } DiffTransform::DeletedHunk { @@ -4625,13 +4616,13 @@ impl MultiBufferSnapshot { if let Some(excerpt) = cursor.item() { let mut end_before_newline = cursor.end(); if excerpt.has_trailing_newline { - end_before_newline -= ExcerptOffset::new(1); + end_before_newline -= 1; } let excerpt_start = excerpt.range.context.start.to_offset(&excerpt.buffer); - let start_in_excerpt = excerpt_start + (range.start - *cursor.start()).value; + let start_in_excerpt = excerpt_start + (range.start - *cursor.start()); let end_in_excerpt = - excerpt_start + (cmp::min(end_before_newline, range.end) - *cursor.start()).value; + excerpt_start + (cmp::min(end_before_newline, range.end) - *cursor.start()); summary.add_text_dim( &excerpt .buffer @@ -4655,7 +4646,7 @@ impl MultiBufferSnapshot { range.end = cmp::max(*cursor.start(), range.end); let excerpt_start = excerpt.range.context.start.to_offset(&excerpt.buffer); - let end_in_excerpt = excerpt_start + (range.end - *cursor.start()).value; + let end_in_excerpt = excerpt_start + (range.end - *cursor.start()); summary.add_text_dim( &excerpt .buffer @@ -4683,7 +4674,7 @@ impl MultiBufferSnapshot { fn resolve_summary_for_anchor( &self, anchor: &Anchor, - excerpt_position: MBD, + excerpt_position: ExcerptDimension, diff_transforms: &mut Cursor< DiffTransform, Dimensions, OutputDimension>, @@ -4693,15 +4684,15 @@ impl MultiBufferSnapshot { MBD: MultiBufferDimension + Ord + Sub + AddAssign<::Output>, { loop { - let transform_end_position = diff_transforms.end().0.0; + let transform_end_position = diff_transforms.end().0; let at_transform_end = - excerpt_position == transform_end_position && diff_transforms.item().is_some(); + transform_end_position == excerpt_position && diff_transforms.item().is_some(); if at_transform_end && anchor.text_anchor.bias == Bias::Right { diff_transforms.next(); continue; } - let mut position = diff_transforms.start().1.0; + let mut position = diff_transforms.start().1; match diff_transforms.item() { Some(DiffTransform::DeletedHunk { buffer_id, @@ -4721,7 +4712,7 @@ impl MultiBufferSnapshot { .text_summary_for_range::( base_text_byte_range.start..base_text_offset, ); - position.add_text_dim(&position_in_hunk); + position.0.add_text_dim(&position_in_hunk); } else if at_transform_end { diff_transforms.next(); continue; @@ -4733,12 +4724,12 @@ impl MultiBufferSnapshot { diff_transforms.next(); continue; } - let overshoot = excerpt_position - diff_transforms.start().0.0; + let overshoot = excerpt_position - diff_transforms.start().0; position += overshoot; } } - return position; + return position.0; } } @@ -4766,7 +4757,7 @@ impl MultiBufferSnapshot { excerpt.buffer.offset_for_anchor(&anchor.text_anchor), ); if buffer_position > excerpt_buffer_start { - position.value += buffer_position - excerpt_buffer_start; + position += buffer_position - excerpt_buffer_start; } } position @@ -4809,7 +4800,7 @@ impl MultiBufferSnapshot { cursor.prev(); } - let excerpt_start_position = MBD::from_summary(&cursor.start().text); + let excerpt_start_position = ExcerptDimension(MBD::from_summary(&cursor.start().text)); if let Some(excerpt) = cursor.item() { if excerpt.id != excerpt_id && excerpt_id != ExcerptId::max() { let position = self.resolve_summary_for_anchor( @@ -4842,9 +4833,8 @@ impl MultiBufferSnapshot { position += summary - excerpt_buffer_start; } - if position > diff_transforms_cursor.start().0.0 { - diff_transforms_cursor - .seek_forward(&ExcerptDimension(position), Bias::Left); + if diff_transforms_cursor.start().0 < position { + diff_transforms_cursor.seek_forward(&position, Bias::Left); } summaries.push(self.resolve_summary_for_anchor( @@ -4854,8 +4844,7 @@ impl MultiBufferSnapshot { )); } } else { - diff_transforms_cursor - .seek_forward(&ExcerptDimension(excerpt_start_position), Bias::Left); + diff_transforms_cursor.seek_forward(&excerpt_start_position, Bias::Left); let position = self.resolve_summary_for_anchor( &Anchor::max(), excerpt_start_position, @@ -5043,7 +5032,7 @@ impl MultiBufferSnapshot { bias = Bias::Left; } } else { - excerpt_offset += ExcerptOffset::new(offset_in_transform); + excerpt_offset += MultiBufferOffset(offset_in_transform); }; let mut excerpts = self @@ -5054,7 +5043,7 @@ impl MultiBufferSnapshot { excerpts.prev(); } if let Some(excerpt) = excerpts.item() { - let mut overshoot = excerpt_offset.saturating_sub(excerpts.start().0).value; + let mut overshoot = excerpt_offset.saturating_sub(excerpts.start().0); if excerpt.has_trailing_newline && excerpt_offset == excerpts.end().0 { overshoot -= 1; bias = Bias::Right; @@ -5068,7 +5057,8 @@ impl MultiBufferSnapshot { Some(diff_base_anchor) => anchor.with_diff_base_anchor(diff_base_anchor), None => anchor, } - } else if excerpt_offset.is_zero() && bias == Bias::Left { + } else if excerpt_offset == ExcerptDimension(MultiBufferOffset::ZERO) && bias == Bias::Left + { Anchor::min() } else { Anchor::max() @@ -5178,7 +5168,7 @@ impl MultiBufferSnapshot { let start_locator = self.excerpt_locator_for_id(excerpt_id); let mut excerpts = self .excerpts - .cursor::, ExcerptDimension>>(()); + .cursor::, ExcerptOffset>>(()); excerpts.seek(&Some(start_locator), Bias::Left); excerpts.prev(); @@ -5195,7 +5185,7 @@ impl MultiBufferSnapshot { excerpt, offset: diff_transforms.start().output_dimension.0, buffer_offset: BufferOffset(excerpt.range.context.start.to_offset(&excerpt.buffer)), - excerpt_offset: excerpts.start().1.0, + excerpt_offset: excerpts.start().1, diff_transforms, }) } @@ -6098,7 +6088,7 @@ impl MultiBufferSnapshot { pub fn range_for_excerpt(&self, excerpt_id: ExcerptId) -> Option> { let mut cursor = self .excerpts - .cursor::, ExcerptDimension>>(()); + .cursor::, ExcerptPoint>>(()); let locator = self.excerpt_locator_for_id(excerpt_id); let mut sought_exact = cursor.seek(&Some(locator), Bias::Left); if cursor.item().is_none() && excerpt_id == ExcerptId::max() { @@ -6112,14 +6102,14 @@ impl MultiBufferSnapshot { let end = cursor.end().1; let mut diff_transforms = self .diff_transforms - .cursor::, OutputDimension>>(()); + .cursor::>>(()); diff_transforms.seek(&start, Bias::Left); - let overshoot = start.0 - diff_transforms.start().0.0; - let start = diff_transforms.start().1.0 + overshoot; + let overshoot = start - diff_transforms.start().0; + let start = diff_transforms.start().1 + overshoot; diff_transforms.seek(&end, Bias::Right); - let overshoot = end.0 - diff_transforms.start().0.0; - let end = diff_transforms.start().1.0 + overshoot; - Some(start..end) + let overshoot = end - diff_transforms.start().0; + let end = diff_transforms.start().1 + overshoot; + Some(start.0..end.0) } else { None } @@ -6161,7 +6151,7 @@ impl MultiBufferSnapshot { let region = cursor.region()?; let offset = region.range.start; let buffer_offset = start_excerpt.buffer_start_offset(); - let excerpt_offset = cursor.excerpts.start().0; + let excerpt_offset = *cursor.excerpts.start(); Some(MultiBufferExcerpt { diff_transforms: cursor.diff_transforms, excerpt: start_excerpt, @@ -6359,19 +6349,19 @@ where BD: TextDimension + AddAssign<::Output>, { fn seek(&mut self, position: &MBD) { + let position = OutputDimension(*position); self.cached_region.take(); - self.diff_transforms - .seek(&OutputDimension(*position), Bias::Right); + self.diff_transforms.seek(&position, Bias::Right); if self.diff_transforms.item().is_none() - && *position == self.diff_transforms.start().output_dimension.0 + && self.diff_transforms.start().output_dimension == position { self.diff_transforms.prev(); } let mut excerpt_position = self.diff_transforms.start().excerpt_dimension; if let Some(DiffTransform::BufferContent { .. }) = self.diff_transforms.item() { - let overshoot = *position - self.diff_transforms.start().output_dimension.0; - excerpt_position.0 += overshoot; + let overshoot = position - self.diff_transforms.start().output_dimension; + excerpt_position += overshoot; } self.excerpts.seek(&excerpt_position, Bias::Right); @@ -6381,19 +6371,19 @@ where } fn seek_forward(&mut self, position: &MBD) { + let position = OutputDimension(*position); self.cached_region.take(); - self.diff_transforms - .seek_forward(&OutputDimension(*position), Bias::Right); + self.diff_transforms.seek_forward(&position, Bias::Right); if self.diff_transforms.item().is_none() - && *position == self.diff_transforms.start().output_dimension.0 + && self.diff_transforms.start().output_dimension == position { self.diff_transforms.prev(); } - let overshoot = *position - self.diff_transforms.start().output_dimension.0; + let overshoot = position - self.diff_transforms.start().output_dimension; let mut excerpt_position = self.diff_transforms.start().excerpt_dimension; if let Some(DiffTransform::BufferContent { .. }) = self.diff_transforms.item() { - excerpt_position.0 += overshoot; + excerpt_position += overshoot; } self.excerpts.seek_forward(&excerpt_position, Bias::Right); @@ -6521,7 +6511,7 @@ where let buffer_context_start = excerpt.range.context.start.summary::(buffer); let mut buffer_start = buffer_context_start; let overshoot = self.diff_transforms.end().excerpt_dimension - *self.excerpts.start(); - buffer_start += overshoot.0; + buffer_start += overshoot; Some(buffer_start) } @@ -6567,11 +6557,11 @@ where if self.diff_transforms.start().excerpt_dimension < *self.excerpts.start() { let overshoot = *self.excerpts.start() - self.diff_transforms.start().excerpt_dimension; - start += overshoot.0; + start += overshoot; } else { let overshoot = self.diff_transforms.start().excerpt_dimension - *self.excerpts.start(); - buffer_start += overshoot.0; + buffer_start += overshoot; } let mut end; @@ -6582,13 +6572,13 @@ where self.diff_transforms.end().excerpt_dimension - *self.excerpts.start(); end = self.diff_transforms.end().output_dimension.0; buffer_end = buffer_context_start; - buffer_end += overshoot.0; + buffer_end += overshoot; has_trailing_newline = false; } else { let overshoot = self.excerpts.end() - self.diff_transforms.start().excerpt_dimension; end = self.diff_transforms.start().output_dimension.0; - end += overshoot.0; + end += overshoot; buffer_end = excerpt.range.context.end.summary::(buffer); has_trailing_newline = excerpt.has_trailing_newline; }; @@ -6772,9 +6762,9 @@ impl<'a> MultiBufferExcerpt<'a> { fn map_offset_to_buffer_internal(&self, offset: MultiBufferOffset) -> BufferOffset { let mut excerpt_offset = self.diff_transforms.start().excerpt_dimension; if let Some(DiffTransform::BufferContent { .. }) = self.diff_transforms.item() { - excerpt_offset.0 += offset - self.diff_transforms.start().output_dimension.0; + excerpt_offset += offset - self.diff_transforms.start().output_dimension.0; }; - let offset_in_excerpt = excerpt_offset.0.0.saturating_sub(self.excerpt_offset.0); + let offset_in_excerpt = excerpt_offset.saturating_sub(self.excerpt_offset); self.buffer_offset + offset_in_excerpt } @@ -6797,24 +6787,25 @@ impl<'a> MultiBufferExcerpt<'a> { } let overshoot = buffer_range.start - self.buffer_offset; let excerpt_offset = self.excerpt_offset + overshoot; - let excerpt_seek_dim = ExcerptDimension(excerpt_offset); + let excerpt_seek_dim = excerpt_offset; self.diff_transforms.seek(&excerpt_seek_dim, Bias::Right); - if excerpt_offset < self.diff_transforms.start().excerpt_dimension.0 { + if self.diff_transforms.start().excerpt_dimension > excerpt_offset { log::warn!( "Attempting to map a range from a buffer offset that starts before the current buffer offset" ); return self.offset..self.offset; } - let overshoot = excerpt_offset - self.diff_transforms.start().excerpt_dimension.0; + let overshoot = excerpt_offset - self.diff_transforms.start().excerpt_dimension; let start = self.diff_transforms.start().output_dimension.0 + overshoot; - let end = if buffer_range.end > buffer_range.start { + let end = if buffer_range.start < buffer_range.end { let overshoot = buffer_range.end - self.buffer_offset; let excerpt_offset = self.excerpt_offset + overshoot; - let excerpt_seek_dim = ExcerptDimension(excerpt_offset); + let excerpt_seek_dim = excerpt_offset; self.diff_transforms .seek_forward(&excerpt_seek_dim, Bias::Right); - let overshoot = excerpt_offset - self.diff_transforms.start().excerpt_dimension.0; + let overshoot = excerpt_offset - self.diff_transforms.start().excerpt_dimension; + // todo(lw): Clamp end to the excerpt boundaries self.diff_transforms.start().output_dimension.0 + overshoot } else { start @@ -6940,7 +6931,7 @@ impl sum_tree::Item for DiffTransform { impl DiffTransformSummary { fn excerpt_len(&self) -> ExcerptOffset { - ExcerptOffset::new(self.input.len.0) + ExcerptDimension(self.input.len) } } @@ -6981,50 +6972,6 @@ impl sum_tree::ContextLessSummary for ExcerptSummary { } } -impl<'a> sum_tree::Dimension<'a, ExcerptSummary> for ExcerptOffset { - fn zero(_cx: ()) -> Self { - Default::default() - } - - fn add_summary(&mut self, summary: &'a ExcerptSummary, _: ()) { - self.value += summary.text.len.0; - } -} -impl<'a> sum_tree::Dimension<'a, ExcerptSummary> for MultiBufferOffset { - fn zero((): ()) -> Self { - Default::default() - } - - fn add_summary(&mut self, summary: &'a ExcerptSummary, (): ()) { - *self += summary.text.len; - } -} - -impl<'a> sum_tree::Dimension<'a, ExcerptSummary> for MultiBufferOffsetUtf16 { - fn zero((): ()) -> Self { - Default::default() - } - - fn add_summary(&mut self, summary: &'a ExcerptSummary, (): ()) { - self.0 += summary.text.len_utf16; - } -} -impl<'a> sum_tree::Dimension<'a, ExcerptSummary> for PointUtf16 { - fn zero((): ()) -> Self { - Default::default() - } - - fn add_summary(&mut self, summary: &'a ExcerptSummary, (): ()) { - TextDimension::add_assign(self, &summary.text.lines_utf16()) - } -} - -impl sum_tree::SeekTarget<'_, ExcerptSummary, ExcerptSummary> for ExcerptOffset { - fn cmp(&self, cursor_location: &ExcerptSummary, _: ()) -> cmp::Ordering { - Ord::cmp(&self.value, &cursor_location.text.len.0) - } -} - impl<'a> sum_tree::SeekTarget<'a, ExcerptSummary, Option<&'a Locator>> for Locator { fn cmp(&self, cursor_location: &Option<&'a Locator>, _: ()) -> cmp::Ordering { Ord::cmp(&Some(self), cursor_location) @@ -7037,28 +6984,9 @@ impl sum_tree::SeekTarget<'_, ExcerptSummary, ExcerptSummary> for Locator { } } -impl<'a> sum_tree::Dimension<'a, ExcerptSummary> for ExcerptPoint { - fn zero(_cx: ()) -> Self { - Default::default() - } - - fn add_summary(&mut self, summary: &'a ExcerptSummary, _: ()) { - self.value += summary.text.lines; - } -} - -impl<'a> sum_tree::Dimension<'a, ExcerptSummary> for Point { - fn zero(_cx: ()) -> Self { - Default::default() - } - - fn add_summary(&mut self, summary: &'a ExcerptSummary, _: ()) { - TextDimension::add_assign(self, &summary.text.lines); - } -} - -impl<'a, MBD: MultiBufferDimension + Default> sum_tree::Dimension<'a, ExcerptSummary> - for ExcerptDimension +impl<'a, MBD> sum_tree::Dimension<'a, ExcerptSummary> for ExcerptDimension +where + MBD: MultiBufferDimension + Default, { fn zero(_: ()) -> Self { ExcerptDimension(MBD::default()) @@ -7090,68 +7018,128 @@ impl<'a> sum_tree::Dimension<'a, ExcerptSummary> for Option { } #[derive(Copy, Clone, PartialOrd, Ord, Eq, PartialEq, Debug)] -struct ExcerptDimension(T); +struct OutputDimension(T); + +impl PartialEq for OutputDimension { + fn eq(&self, other: &T) -> bool { + self.0 == *other + } +} -impl ops::Sub for ExcerptDimension +impl PartialOrd for OutputDimension { + fn partial_cmp(&self, other: &T) -> Option { + self.0.partial_cmp(other) + } +} + +impl ops::Sub> for OutputDimension where - T: ops::Sub, + T: ops::Sub, { - type Output = ExcerptDimension; + type Output = R; - fn sub(self, other: Self) -> Self::Output { - ExcerptDimension(self.0 - other.0) + fn sub(self, other: OutputDimension) -> Self::Output { + self.0 - other.0 } } -impl AddAssign for ExcerptDimension +impl ops::Add for OutputDimension where - T: AddAssign, + T: ops::Add, { - fn add_assign(&mut self, other: Self) { - self.0 += other.0; + type Output = OutputDimension; + + fn add(self, other: U) -> Self::Output { + OutputDimension(self.0 + other) } } -#[derive(Clone, PartialOrd, Ord, Eq, PartialEq, Debug)] -struct OutputDimension(T); +impl AddAssign for OutputDimension +where + T: AddAssign, +{ + fn add_assign(&mut self, other: U) { + self.0 += other; + } +} -impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for MultiBufferOffset { - fn zero(_: ()) -> Self { - MultiBufferOffset::ZERO +impl SubAssign for OutputDimension +where + T: SubAssign, +{ + fn sub_assign(&mut self, other: U) { + self.0 -= other; } +} - fn add_summary(&mut self, summary: &'a DiffTransformSummary, _: ()) { - *self += summary.output.len; +#[derive(Copy, Clone, PartialOrd, Ord, Eq, PartialEq, Debug)] +struct ExcerptDimension(T); + +impl PartialEq for ExcerptDimension { + fn eq(&self, other: &T) -> bool { + self.0 == *other } } -impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for MultiBufferOffsetUtf16 { - fn zero(_: ()) -> Self { - MultiBufferOffsetUtf16(OffsetUtf16(0)) +impl PartialOrd for ExcerptDimension { + fn partial_cmp(&self, other: &T) -> Option { + self.0.partial_cmp(other) } +} - fn add_summary(&mut self, summary: &'a DiffTransformSummary, _: ()) { - self.0 += summary.output.len_utf16; +impl ExcerptOffset { + fn saturating_sub(self, other: ExcerptOffset) -> usize { + self.0.saturating_sub(other.0) } } -impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for ExcerptOffset { - fn zero(_: ()) -> Self { - ExcerptOffset::new(0) +impl ops::Sub> for ExcerptDimension +where + T: ops::Sub, +{ + type Output = R; + + fn sub(self, other: ExcerptDimension) -> Self::Output { + self.0 - other.0 } +} - fn add_summary(&mut self, summary: &'a DiffTransformSummary, _: ()) { - self.value += summary.input.len.0; +impl ops::Add for ExcerptDimension +where + T: ops::Add, +{ + type Output = ExcerptDimension; + + fn add(self, other: U) -> Self::Output { + ExcerptDimension(self.0 + other) + } +} + +impl AddAssign for ExcerptDimension +where + T: AddAssign, +{ + fn add_assign(&mut self, other: U) { + self.0 += other; + } +} + +impl SubAssign for ExcerptDimension +where + T: SubAssign, +{ + fn sub_assign(&mut self, other: U) { + self.0 -= other; } } -impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for ExcerptPoint { +impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for MultiBufferOffset { fn zero(_: ()) -> Self { - ExcerptPoint::new(0, 0) + MultiBufferOffset::ZERO } fn add_summary(&mut self, summary: &'a DiffTransformSummary, _: ()) { - self.value += summary.input.lines; + *self += summary.output.len; } } @@ -7209,26 +7197,6 @@ impl<'a, MBD: MultiBufferDimension> sum_tree::Dimension<'a, DiffTransformSummary } } -impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for Point { - fn zero(_: ()) -> Self { - Point::new(0, 0) - } - - fn add_summary(&mut self, summary: &'a DiffTransformSummary, _: ()) { - *self += summary.output.lines - } -} - -impl<'a> sum_tree::Dimension<'a, DiffTransformSummary> for PointUtf16 { - fn zero(_: ()) -> Self { - PointUtf16::new(0, 0) - } - - fn add_summary(&mut self, summary: &'a DiffTransformSummary, _: ()) { - *self += summary.output.lines_utf16() - } -} - impl MultiBufferRows<'_> { pub fn seek(&mut self, MultiBufferRow(row): MultiBufferRow) { self.point = Point::new(row, 0); @@ -7368,14 +7336,14 @@ impl<'a> MultiBufferChunks<'a> { let mut excerpt_end = self.diff_transforms.start().1; if let Some(DiffTransform::BufferContent { .. }) = self.diff_transforms.item() { let overshoot = range.end - self.diff_transforms.start().0; - excerpt_end.value += overshoot; + excerpt_end += overshoot; } self.diff_transforms.seek(&range.start, Bias::Right); let mut excerpt_start = self.diff_transforms.start().1; if let Some(DiffTransform::BufferContent { .. }) = self.diff_transforms.item() { let overshoot = range.start - self.diff_transforms.start().0; - excerpt_start.value += overshoot; + excerpt_start += overshoot; } self.seek_to_excerpt_offset_range(excerpt_start..excerpt_end); @@ -7395,13 +7363,13 @@ impl<'a> MultiBufferChunks<'a> { { excerpt.seek_chunks( excerpt_chunks, - (self.excerpt_offset_range.start - excerpt_start).value - ..(self.excerpt_offset_range.end - excerpt_start).value, + (self.excerpt_offset_range.start - excerpt_start) + ..(self.excerpt_offset_range.end - excerpt_start), ); } else { self.excerpt_chunks = Some(excerpt.chunks_in_range( - (self.excerpt_offset_range.start - excerpt_start).value - ..(self.excerpt_offset_range.end - excerpt_start).value, + (self.excerpt_offset_range.start - excerpt_start) + ..(self.excerpt_offset_range.end - excerpt_start), self.language_aware, )); } @@ -7415,13 +7383,13 @@ impl<'a> MultiBufferChunks<'a> { if self.excerpt_offset_range.is_empty() { return None; } else if let Some(chunk) = self.excerpt_chunks.as_mut()?.next() { - self.excerpt_offset_range.start.value += chunk.text.len(); + self.excerpt_offset_range.start += chunk.text.len(); return Some(chunk); } else { self.excerpts.next(); let excerpt = self.excerpts.item()?; self.excerpt_chunks = Some(excerpt.chunks_in_range( - 0..(self.excerpt_offset_range.end - *self.excerpts.start()).value, + 0..(self.excerpt_offset_range.end - *self.excerpts.start()), self.language_aware, )); } diff --git a/crates/multi_buffer/src/position.rs b/crates/multi_buffer/src/position.rs deleted file mode 100644 index 8a3ce78d0d9f7a6880dbc3202c002507c800b7b0..0000000000000000000000000000000000000000 --- a/crates/multi_buffer/src/position.rs +++ /dev/null @@ -1,264 +0,0 @@ -use std::{ - fmt::{Debug, Display}, - marker::PhantomData, - ops::{Add, AddAssign, Sub, SubAssign}, -}; -use text::Point; - -#[repr(transparent)] -pub struct TypedOffset { - pub value: usize, - _marker: PhantomData, -} - -#[repr(transparent)] -pub struct TypedPoint { - pub value: Point, - _marker: PhantomData, -} - -#[repr(transparent)] -pub struct TypedRow { - pub value: u32, - _marker: PhantomData, -} - -impl TypedOffset { - pub fn new(offset: usize) -> Self { - Self { - value: offset, - _marker: PhantomData, - } - } - - pub fn saturating_sub(self, n: TypedOffset) -> Self { - Self { - value: self.value.saturating_sub(n.value), - _marker: PhantomData, - } - } - - pub fn zero() -> Self { - Self::new(0) - } - - pub fn is_zero(&self) -> bool { - self.value == 0 - } -} - -impl TypedPoint { - pub fn new(row: u32, column: u32) -> Self { - Self { - value: Point::new(row, column), - _marker: PhantomData, - } - } - - pub fn wrap(point: Point) -> Self { - Self { - value: point, - _marker: PhantomData, - } - } - - pub fn row(&self) -> u32 { - self.value.row - } - - pub fn column(&self) -> u32 { - self.value.column - } - - pub fn zero() -> Self { - Self::wrap(Point::zero()) - } - - pub fn is_zero(&self) -> bool { - self.value.is_zero() - } -} - -impl TypedRow { - pub fn new(row: u32) -> Self { - Self { - value: row, - _marker: PhantomData, - } - } -} - -impl Copy for TypedOffset {} -impl Copy for TypedPoint {} -impl Copy for TypedRow {} - -impl Clone for TypedOffset { - fn clone(&self) -> Self { - *self - } -} -impl Clone for TypedPoint { - fn clone(&self) -> Self { - *self - } -} -impl Clone for TypedRow { - fn clone(&self) -> Self { - *self - } -} - -impl Default for TypedOffset { - fn default() -> Self { - Self::new(0) - } -} -impl Default for TypedPoint { - fn default() -> Self { - Self::wrap(Point::default()) - } -} -impl Default for TypedRow { - fn default() -> Self { - Self::new(0) - } -} - -impl PartialOrd for TypedOffset { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} -impl PartialOrd for TypedPoint { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} -impl PartialOrd for TypedRow { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for TypedOffset { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.value.cmp(&other.value) - } -} -impl Ord for TypedPoint { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.value.cmp(&other.value) - } -} -impl Ord for TypedRow { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.value.cmp(&other.value) - } -} - -impl PartialEq for TypedOffset { - fn eq(&self, other: &Self) -> bool { - self.value == other.value - } -} -impl PartialEq for TypedPoint { - fn eq(&self, other: &Self) -> bool { - self.value == other.value - } -} -impl PartialEq for TypedRow { - fn eq(&self, other: &Self) -> bool { - self.value == other.value - } -} - -impl Eq for TypedOffset {} -impl Eq for TypedPoint {} -impl Eq for TypedRow {} - -impl Debug for TypedOffset { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}Offset({})", type_name::(), self.value) - } -} -impl Debug for TypedPoint { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{}Point({}, {})", - type_name::(), - self.value.row, - self.value.column - ) - } -} -impl Debug for TypedRow { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}Row({})", type_name::(), self.value) - } -} - -impl Display for TypedOffset { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - Display::fmt(&self.value, f) - } -} -impl Display for TypedRow { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - Display::fmt(&self.value, f) - } -} - -fn type_name() -> &'static str { - std::any::type_name::().split("::").last().unwrap() -} - -impl Add> for TypedOffset { - type Output = Self; - - fn add(self, other: Self) -> Self { - TypedOffset::new(self.value + other.value) - } -} -impl Add> for TypedPoint { - type Output = Self; - - fn add(self, other: Self) -> Self { - TypedPoint::wrap(self.value + other.value) - } -} - -impl Sub> for TypedOffset { - type Output = Self; - fn sub(self, other: Self) -> Self { - TypedOffset::new(self.value - other.value) - } -} -impl Sub> for TypedPoint { - type Output = Self; - fn sub(self, other: Self) -> Self { - TypedPoint::wrap(self.value - other.value) - } -} - -impl AddAssign> for TypedOffset { - fn add_assign(&mut self, other: Self) { - self.value += other.value; - } -} -impl AddAssign> for TypedPoint { - fn add_assign(&mut self, other: Self) { - self.value += other.value; - } -} - -impl SubAssign for TypedOffset { - fn sub_assign(&mut self, other: Self) { - self.value -= other.value; - } -} -impl SubAssign for TypedRow { - fn sub_assign(&mut self, other: Self) { - self.value -= other.value; - } -} From 28f50977cfd12479a0d918887e090e2665d56e7f Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Thu, 20 Nov 2025 07:00:01 -0300 Subject: [PATCH 0243/1030] agent_ui: Add support for setting a model as the default for external agents (#43122) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR builds on top of the `default_mode` feature where it was possible to set an external agent mode as the default if you held a modifier while clicking on the desired option. Now, if you want to have, for example, Haiku as your default Claude Code model, you can do that. This feature adds parity between external agents and Zed's built-in one, which already supported this feature for a little while. Note: This still doesn't work with external agents installed from extensions. At the moment, this is limited to Claude Code, Codex, and Gemini—the ones we include out of the box. Release Notes: - agent: Added the ability to set a model as the default for a given built-in external agent (Claude Code, Codex CLI, or Gemini CLI). --- crates/agent_servers/src/acp.rs | 54 ++++++++++++++++ crates/agent_servers/src/agent_servers.rs | 12 ++++ crates/agent_servers/src/claude.rs | 23 +++++++ crates/agent_servers/src/codex.rs | 23 +++++++ crates/agent_servers/src/custom.rs | 30 +++++++++ crates/agent_servers/src/e2e_tests.rs | 2 + crates/agent_servers/src/gemini.rs | 2 + crates/agent_ui/src/acp/mode_selector.rs | 31 +-------- crates/agent_ui/src/acp/model_selector.rs | 63 +++++++++++++++---- .../src/acp/model_selector_popover.rs | 7 ++- crates/agent_ui/src/acp/thread_view.rs | 4 ++ crates/agent_ui/src/agent_configuration.rs | 1 + crates/agent_ui/src/ui.rs | 2 + crates/agent_ui/src/ui/hold_for_default.rs | 40 ++++++++++++ crates/project/src/agent_server_store.rs | 11 ++++ crates/settings/src/settings_content/agent.rs | 12 ++++ 16 files changed, 276 insertions(+), 41 deletions(-) create mode 100644 crates/agent_ui/src/ui/hold_for_default.rs diff --git a/crates/agent_servers/src/acp.rs b/crates/agent_servers/src/acp.rs index 2ec9beb71bf08c90ea85b8752410405714d31537..a44bdd1f22478e92ace192c939561f855c2814bd 100644 --- a/crates/agent_servers/src/acp.rs +++ b/crates/agent_servers/src/acp.rs @@ -35,6 +35,7 @@ pub struct AcpConnection { auth_methods: Vec, agent_capabilities: acp::AgentCapabilities, default_mode: Option, + default_model: Option, root_dir: PathBuf, // NB: Don't move this into the wait_task, since we need to ensure the process is // killed on drop (setting kill_on_drop on the command seems to not always work). @@ -57,6 +58,7 @@ pub async fn connect( command: AgentServerCommand, root_dir: &Path, default_mode: Option, + default_model: Option, is_remote: bool, cx: &mut AsyncApp, ) -> Result> { @@ -66,6 +68,7 @@ pub async fn connect( command.clone(), root_dir, default_mode, + default_model, is_remote, cx, ) @@ -82,6 +85,7 @@ impl AcpConnection { command: AgentServerCommand, root_dir: &Path, default_mode: Option, + default_model: Option, is_remote: bool, cx: &mut AsyncApp, ) -> Result { @@ -207,6 +211,7 @@ impl AcpConnection { sessions, agent_capabilities: response.agent_capabilities, default_mode, + default_model, _io_task: io_task, _wait_task: wait_task, _stderr_task: stderr_task, @@ -245,6 +250,7 @@ impl AgentConnection for AcpConnection { let conn = self.connection.clone(); let sessions = self.sessions.clone(); let default_mode = self.default_mode.clone(); + let default_model = self.default_model.clone(); let cwd = cwd.to_path_buf(); let context_server_store = project.read(cx).context_server_store().read(cx); let mcp_servers = @@ -333,6 +339,7 @@ impl AgentConnection for AcpConnection { let default_mode = default_mode.clone(); let session_id = response.session_id.clone(); let modes = modes.clone(); + let conn = conn.clone(); async move |_| { let result = conn.set_session_mode(acp::SetSessionModeRequest { session_id, @@ -367,6 +374,53 @@ impl AgentConnection for AcpConnection { } } + if let Some(default_model) = default_model { + if let Some(models) = models.as_ref() { + let mut models_ref = models.borrow_mut(); + let has_model = models_ref.available_models.iter().any(|model| model.model_id == default_model); + + if has_model { + let initial_model_id = models_ref.current_model_id.clone(); + + cx.spawn({ + let default_model = default_model.clone(); + let session_id = response.session_id.clone(); + let models = models.clone(); + let conn = conn.clone(); + async move |_| { + let result = conn.set_session_model(acp::SetSessionModelRequest { + session_id, + model_id: default_model, + meta: None, + }) + .await.log_err(); + + if result.is_none() { + models.borrow_mut().current_model_id = initial_model_id; + } + } + }).detach(); + + models_ref.current_model_id = default_model; + } else { + let available_models = models_ref + .available_models + .iter() + .map(|model| format!("- `{}`: {}", model.model_id, model.name)) + .collect::>() + .join("\n"); + + log::warn!( + "`{default_model}` is not a valid {name} model. Available options:\n{available_models}", + ); + } + } else { + log::warn!( + "`{name}` does not support model selection, but `default_model` was set in settings.", + ); + } + } + let session_id = response.session_id; let action_log = cx.new(|_| ActionLog::new(project.clone()))?; let thread = cx.new(|cx| { diff --git a/crates/agent_servers/src/agent_servers.rs b/crates/agent_servers/src/agent_servers.rs index b44c2123fb5052e2487464d813936cd1edf9821a..cf03b71a78b358d7b110c450f769f9645094baaa 100644 --- a/crates/agent_servers/src/agent_servers.rs +++ b/crates/agent_servers/src/agent_servers.rs @@ -68,6 +68,18 @@ pub trait AgentServer: Send { ) { } + fn default_model(&self, _cx: &mut App) -> Option { + None + } + + fn set_default_model( + &self, + _model_id: Option, + _fs: Arc, + _cx: &mut App, + ) { + } + fn connect( &self, root_dir: Option<&Path>, diff --git a/crates/agent_servers/src/claude.rs b/crates/agent_servers/src/claude.rs index cd3207824a7c05ddfaafeca965deea0918ccfb39..ac79ab7484de90a84ce3d6720f54bcec6addc6b5 100644 --- a/crates/agent_servers/src/claude.rs +++ b/crates/agent_servers/src/claude.rs @@ -55,6 +55,27 @@ impl AgentServer for ClaudeCode { }); } + fn default_model(&self, cx: &mut App) -> Option { + let settings = cx.read_global(|settings: &SettingsStore, _| { + settings.get::(None).claude.clone() + }); + + settings + .as_ref() + .and_then(|s| s.default_model.clone().map(|m| acp::ModelId(m.into()))) + } + + fn set_default_model(&self, model_id: Option, fs: Arc, cx: &mut App) { + update_settings_file(fs, cx, |settings, _| { + settings + .agent_servers + .get_or_insert_default() + .claude + .get_or_insert_default() + .default_model = model_id.map(|m| m.to_string()) + }); + } + fn connect( &self, root_dir: Option<&Path>, @@ -68,6 +89,7 @@ impl AgentServer for ClaudeCode { let store = delegate.store.downgrade(); let extra_env = load_proxy_env(cx); let default_mode = self.default_mode(cx); + let default_model = self.default_model(cx); cx.spawn(async move |cx| { let (command, root_dir, login) = store @@ -90,6 +112,7 @@ impl AgentServer for ClaudeCode { command, root_dir.as_ref(), default_mode, + default_model, is_remote, cx, ) diff --git a/crates/agent_servers/src/codex.rs b/crates/agent_servers/src/codex.rs index 95375ad412c31272dbfce9262b4b5fd38fe55c50..ec01cd4e523b5696b2f09b5e51e7137fcfb16c91 100644 --- a/crates/agent_servers/src/codex.rs +++ b/crates/agent_servers/src/codex.rs @@ -56,6 +56,27 @@ impl AgentServer for Codex { }); } + fn default_model(&self, cx: &mut App) -> Option { + let settings = cx.read_global(|settings: &SettingsStore, _| { + settings.get::(None).codex.clone() + }); + + settings + .as_ref() + .and_then(|s| s.default_model.clone().map(|m| acp::ModelId(m.into()))) + } + + fn set_default_model(&self, model_id: Option, fs: Arc, cx: &mut App) { + update_settings_file(fs, cx, |settings, _| { + settings + .agent_servers + .get_or_insert_default() + .codex + .get_or_insert_default() + .default_model = model_id.map(|m| m.to_string()) + }); + } + fn connect( &self, root_dir: Option<&Path>, @@ -69,6 +90,7 @@ impl AgentServer for Codex { let store = delegate.store.downgrade(); let extra_env = load_proxy_env(cx); let default_mode = self.default_mode(cx); + let default_model = self.default_model(cx); cx.spawn(async move |cx| { let (command, root_dir, login) = store @@ -92,6 +114,7 @@ impl AgentServer for Codex { command, root_dir.as_ref(), default_mode, + default_model, is_remote, cx, ) diff --git a/crates/agent_servers/src/custom.rs b/crates/agent_servers/src/custom.rs index 7d36cc758389a828b819a822c91c9bb4b3444985..b417e2bdf30a7ed6b9e2ab4baa6211cee2a9a890 100644 --- a/crates/agent_servers/src/custom.rs +++ b/crates/agent_servers/src/custom.rs @@ -61,6 +61,34 @@ impl crate::AgentServer for CustomAgentServer { }); } + fn default_model(&self, cx: &mut App) -> Option { + let settings = cx.read_global(|settings: &SettingsStore, _| { + settings + .get::(None) + .custom + .get(&self.name()) + .cloned() + }); + + settings + .as_ref() + .and_then(|s| s.default_model.clone().map(|m| acp::ModelId(m.into()))) + } + + fn set_default_model(&self, model_id: Option, fs: Arc, cx: &mut App) { + let name = self.name(); + update_settings_file(fs, cx, move |settings, _| { + if let Some(settings) = settings + .agent_servers + .get_or_insert_default() + .custom + .get_mut(&name) + { + settings.default_model = model_id.map(|m| m.to_string()) + } + }); + } + fn connect( &self, root_dir: Option<&Path>, @@ -72,6 +100,7 @@ impl crate::AgentServer for CustomAgentServer { let root_dir = root_dir.map(|root_dir| root_dir.to_string_lossy().into_owned()); let is_remote = delegate.project.read(cx).is_via_remote_server(); let default_mode = self.default_mode(cx); + let default_model = self.default_model(cx); let store = delegate.store.downgrade(); let extra_env = load_proxy_env(cx); @@ -98,6 +127,7 @@ impl crate::AgentServer for CustomAgentServer { command, root_dir.as_ref(), default_mode, + default_model, is_remote, cx, ) diff --git a/crates/agent_servers/src/e2e_tests.rs b/crates/agent_servers/src/e2e_tests.rs index 7618625278121cc1426f06ed8626a68759f34995..824b999bdaff46cf3ad3a570b62fecd596612563 100644 --- a/crates/agent_servers/src/e2e_tests.rs +++ b/crates/agent_servers/src/e2e_tests.rs @@ -476,6 +476,7 @@ pub async fn init_test(cx: &mut TestAppContext) -> Arc { env: None, ignore_system_version: None, default_mode: None, + default_model: None, }), gemini: Some(crate::gemini::tests::local_command().into()), codex: Some(BuiltinAgentServerSettings { @@ -484,6 +485,7 @@ pub async fn init_test(cx: &mut TestAppContext) -> Arc { env: None, ignore_system_version: None, default_mode: None, + default_model: None, }), custom: collections::HashMap::default(), }, diff --git a/crates/agent_servers/src/gemini.rs b/crates/agent_servers/src/gemini.rs index feaa221cbccb789ed3a89bed9f23d544e1d3b5f7..c1b2efb081551f82752dc15a909eec64ff78d94e 100644 --- a/crates/agent_servers/src/gemini.rs +++ b/crates/agent_servers/src/gemini.rs @@ -37,6 +37,7 @@ impl AgentServer for Gemini { let store = delegate.store.downgrade(); let mut extra_env = load_proxy_env(cx); let default_mode = self.default_mode(cx); + let default_model = self.default_model(cx); cx.spawn(async move |cx| { extra_env.insert("SURFACE".to_owned(), "zed".to_owned()); @@ -69,6 +70,7 @@ impl AgentServer for Gemini { command, root_dir.as_ref(), default_mode, + default_model, is_remote, cx, ) diff --git a/crates/agent_ui/src/acp/mode_selector.rs b/crates/agent_ui/src/acp/mode_selector.rs index 83ab9c299976848b973af28192462fda4eb69409..2db031cafeb8a66e43120be9766debe3c16eb2d0 100644 --- a/crates/agent_ui/src/acp/mode_selector.rs +++ b/crates/agent_ui/src/acp/mode_selector.rs @@ -11,7 +11,7 @@ use ui::{ PopoverMenu, PopoverMenuHandle, Tooltip, prelude::*, }; -use crate::{CycleModeSelector, ToggleProfileSelector}; +use crate::{CycleModeSelector, ToggleProfileSelector, ui::HoldForDefault}; pub struct ModeSelector { connection: Rc, @@ -108,36 +108,11 @@ impl ModeSelector { entry.documentation_aside(side, DocumentationEdge::Bottom, { let description = description.clone(); - move |cx| { + move |_| { v_flex() .gap_1() .child(Label::new(description.clone())) - .child( - h_flex() - .pt_1() - .border_t_1() - .border_color(cx.theme().colors().border_variant) - .gap_0p5() - .text_sm() - .text_color(Color::Muted.color(cx)) - .child("Hold") - .child(h_flex().flex_shrink_0().children( - ui::render_modifiers( - &gpui::Modifiers::secondary_key(), - PlatformStyle::platform(), - None, - Some(ui::TextSize::Default.rems(cx).into()), - true, - ), - )) - .child(div().map(|this| { - if is_default { - this.child("to also unset as default") - } else { - this.child("to also set as default") - } - })), - ) + .child(HoldForDefault::new(is_default)) .into_any_element() } }) diff --git a/crates/agent_ui/src/acp/model_selector.rs b/crates/agent_ui/src/acp/model_selector.rs index 91aacde2aebcd0a2d4c8098119bbc43342d3ef74..c60a3b6cb61970caba02df82506848b6efa90cc1 100644 --- a/crates/agent_ui/src/acp/model_selector.rs +++ b/crates/agent_ui/src/acp/model_selector.rs @@ -1,8 +1,10 @@ use std::{cmp::Reverse, rc::Rc, sync::Arc}; use acp_thread::{AgentModelInfo, AgentModelList, AgentModelSelector}; +use agent_servers::AgentServer; use anyhow::Result; use collections::IndexMap; +use fs::Fs; use futures::FutureExt; use fuzzy::{StringMatchCandidate, match_strings}; use gpui::{AsyncWindowContext, BackgroundExecutor, DismissEvent, Task, WeakEntity}; @@ -14,14 +16,18 @@ use ui::{ }; use util::ResultExt; +use crate::ui::HoldForDefault; + pub type AcpModelSelector = Picker; pub fn acp_model_selector( selector: Rc, + agent_server: Rc, + fs: Arc, window: &mut Window, cx: &mut Context, ) -> AcpModelSelector { - let delegate = AcpModelPickerDelegate::new(selector, window, cx); + let delegate = AcpModelPickerDelegate::new(selector, agent_server, fs, window, cx); Picker::list(delegate, window, cx) .show_scrollbar(true) .width(rems(20.)) @@ -35,10 +41,12 @@ enum AcpModelPickerEntry { pub struct AcpModelPickerDelegate { selector: Rc, + agent_server: Rc, + fs: Arc, filtered_entries: Vec, models: Option, selected_index: usize, - selected_description: Option<(usize, SharedString)>, + selected_description: Option<(usize, SharedString, bool)>, selected_model: Option, _refresh_models_task: Task<()>, } @@ -46,6 +54,8 @@ pub struct AcpModelPickerDelegate { impl AcpModelPickerDelegate { fn new( selector: Rc, + agent_server: Rc, + fs: Arc, window: &mut Window, cx: &mut Context, ) -> Self { @@ -86,6 +96,8 @@ impl AcpModelPickerDelegate { Self { selector, + agent_server, + fs, filtered_entries: Vec::new(), models: None, selected_model: None, @@ -181,6 +193,21 @@ impl PickerDelegate for AcpModelPickerDelegate { if let Some(AcpModelPickerEntry::Model(model_info)) = self.filtered_entries.get(self.selected_index) { + if window.modifiers().secondary() { + let default_model = self.agent_server.default_model(cx); + let is_default = default_model.as_ref() == Some(&model_info.id); + + self.agent_server.set_default_model( + if is_default { + None + } else { + Some(model_info.id.clone()) + }, + self.fs.clone(), + cx, + ); + } + self.selector .select_model(model_info.id.clone(), cx) .detach_and_log_err(cx); @@ -225,6 +252,8 @@ impl PickerDelegate for AcpModelPickerDelegate { ), AcpModelPickerEntry::Model(model_info) => { let is_selected = Some(model_info) == self.selected_model.as_ref(); + let default_model = self.agent_server.default_model(cx); + let is_default = default_model.as_ref() == Some(&model_info.id); let model_icon_color = if is_selected { Color::Accent @@ -239,8 +268,8 @@ impl PickerDelegate for AcpModelPickerDelegate { this .on_hover(cx.listener(move |menu, hovered, _, cx| { if *hovered { - menu.delegate.selected_description = Some((ix, description.clone())); - } else if matches!(menu.delegate.selected_description, Some((id, _)) if id == ix) { + menu.delegate.selected_description = Some((ix, description.clone(), is_default)); + } else if matches!(menu.delegate.selected_description, Some((id, _, _)) if id == ix) { menu.delegate.selected_description = None; } cx.notify(); @@ -283,14 +312,24 @@ impl PickerDelegate for AcpModelPickerDelegate { _window: &mut Window, _cx: &mut Context>, ) -> Option { - self.selected_description.as_ref().map(|(_, description)| { - let description = description.clone(); - DocumentationAside::new( - DocumentationSide::Left, - DocumentationEdge::Top, - Rc::new(move |_| Label::new(description.clone()).into_any_element()), - ) - }) + self.selected_description + .as_ref() + .map(|(_, description, is_default)| { + let description = description.clone(); + let is_default = *is_default; + + DocumentationAside::new( + DocumentationSide::Left, + DocumentationEdge::Top, + Rc::new(move |_| { + v_flex() + .gap_1() + .child(Label::new(description.clone())) + .child(HoldForDefault::new(is_default)) + .into_any_element() + }), + ) + }) } } diff --git a/crates/agent_ui/src/acp/model_selector_popover.rs b/crates/agent_ui/src/acp/model_selector_popover.rs index 2e8ade95ffcb65d8c7742b60fa0facc70358ae1e..04e7e06a85aadf7c7fb1b69bfcaf81ec6ff6bf89 100644 --- a/crates/agent_ui/src/acp/model_selector_popover.rs +++ b/crates/agent_ui/src/acp/model_selector_popover.rs @@ -1,6 +1,9 @@ use std::rc::Rc; +use std::sync::Arc; use acp_thread::{AgentModelInfo, AgentModelSelector}; +use agent_servers::AgentServer; +use fs::Fs; use gpui::{Entity, FocusHandle}; use picker::popover_menu::PickerPopoverMenu; use ui::{ @@ -20,13 +23,15 @@ pub struct AcpModelSelectorPopover { impl AcpModelSelectorPopover { pub(crate) fn new( selector: Rc, + agent_server: Rc, + fs: Arc, menu_handle: PopoverMenuHandle, focus_handle: FocusHandle, window: &mut Window, cx: &mut Context, ) -> Self { Self { - selector: cx.new(move |cx| acp_model_selector(selector, window, cx)), + selector: cx.new(move |cx| acp_model_selector(selector, agent_server, fs, window, cx)), menu_handle, focus_handle, } diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index c2d3e5262354b57ae3c7e6dbd10189dedefebfe6..784fef0b9f3862047c868dddf88a8fcd217c278d 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -591,9 +591,13 @@ impl AcpThreadView { .connection() .model_selector(thread.read(cx).session_id()) .map(|selector| { + let agent_server = this.agent.clone(); + let fs = this.project.read(cx).fs().clone(); cx.new(|cx| { AcpModelSelectorPopover::new( selector, + agent_server, + fs, PopoverMenuHandle::default(), this.focus_handle(cx), window, diff --git a/crates/agent_ui/src/agent_configuration.rs b/crates/agent_ui/src/agent_configuration.rs index 8652f5cbd6c750da9260970ddc9ddcaef8337451..45ba29a595b59f4a1c329d46e43030a1b9c7ed14 100644 --- a/crates/agent_ui/src/agent_configuration.rs +++ b/crates/agent_ui/src/agent_configuration.rs @@ -1348,6 +1348,7 @@ async fn open_new_agent_servers_entry_in_settings_editor( args: vec![], env: Some(HashMap::default()), default_mode: None, + default_model: None, }, ); } diff --git a/crates/agent_ui/src/ui.rs b/crates/agent_ui/src/ui.rs index 5363949b904d74d3749c066357e0c60fef19d3b9..f556f8eece8efef77f4a6c286fee032cbfcb42df 100644 --- a/crates/agent_ui/src/ui.rs +++ b/crates/agent_ui/src/ui.rs @@ -4,6 +4,7 @@ mod burn_mode_tooltip; mod claude_code_onboarding_modal; mod context_pill; mod end_trial_upsell; +mod hold_for_default; mod onboarding_modal; mod unavailable_editing_tooltip; mod usage_callout; @@ -14,6 +15,7 @@ pub use burn_mode_tooltip::*; pub use claude_code_onboarding_modal::*; pub use context_pill::*; pub use end_trial_upsell::*; +pub use hold_for_default::*; pub use onboarding_modal::*; pub use unavailable_editing_tooltip::*; pub use usage_callout::*; diff --git a/crates/agent_ui/src/ui/hold_for_default.rs b/crates/agent_ui/src/ui/hold_for_default.rs new file mode 100644 index 0000000000000000000000000000000000000000..409e5d59707caa3a6bc62bbf470e33cb150183f5 --- /dev/null +++ b/crates/agent_ui/src/ui/hold_for_default.rs @@ -0,0 +1,40 @@ +use gpui::{App, IntoElement, Modifiers, RenderOnce, Window}; +use ui::{prelude::*, render_modifiers}; + +#[derive(IntoElement)] +pub struct HoldForDefault { + is_default: bool, +} + +impl HoldForDefault { + pub fn new(is_default: bool) -> Self { + Self { is_default } + } +} + +impl RenderOnce for HoldForDefault { + fn render(self, _window: &mut Window, cx: &mut App) -> impl IntoElement { + h_flex() + .pt_1() + .border_t_1() + .border_color(cx.theme().colors().border_variant) + .gap_0p5() + .text_sm() + .text_color(Color::Muted.color(cx)) + .child("Hold") + .child(h_flex().flex_shrink_0().children(render_modifiers( + &Modifiers::secondary_key(), + PlatformStyle::platform(), + None, + Some(TextSize::Default.rems(cx).into()), + true, + ))) + .child(div().map(|this| { + if self.is_default { + this.child("to unset as default") + } else { + this.child("to set as default") + } + })) + } +} diff --git a/crates/project/src/agent_server_store.rs b/crates/project/src/agent_server_store.rs index f1fb210084fb118832f5ca8f5ffa78990c892aa1..944eb593185bd5016e397d1417ed834da3ee73ef 100644 --- a/crates/project/src/agent_server_store.rs +++ b/crates/project/src/agent_server_store.rs @@ -1777,6 +1777,7 @@ pub struct BuiltinAgentServerSettings { pub env: Option>, pub ignore_system_version: Option, pub default_mode: Option, + pub default_model: Option, } impl BuiltinAgentServerSettings { @@ -1799,6 +1800,7 @@ impl From for BuiltinAgentServerSettings { env: value.env, ignore_system_version: value.ignore_system_version, default_mode: value.default_mode, + default_model: value.default_model, } } } @@ -1823,6 +1825,12 @@ pub struct CustomAgentServerSettings { /// /// Default: None pub default_mode: Option, + /// The default model to use for this agent. + /// + /// This should be the model ID as reported by the agent. + /// + /// Default: None + pub default_model: Option, } impl From for CustomAgentServerSettings { @@ -1834,6 +1842,7 @@ impl From for CustomAgentServerSettings { env: value.env, }, default_mode: value.default_mode, + default_model: value.default_model, } } } @@ -2156,6 +2165,7 @@ mod extension_agent_tests { env: None, ignore_system_version: None, default_mode: None, + default_model: None, }; let BuiltinAgentServerSettings { path, .. } = settings.into(); @@ -2171,6 +2181,7 @@ mod extension_agent_tests { args: vec!["serve".into()], env: None, default_mode: None, + default_model: None, }; let CustomAgentServerSettings { diff --git a/crates/settings/src/settings_content/agent.rs b/crates/settings/src/settings_content/agent.rs index 425b5f05ff46fa705c073838dceab6c431c74bde..59b5a4e0f516387ce6316cd31376bb45c2c5cb94 100644 --- a/crates/settings/src/settings_content/agent.rs +++ b/crates/settings/src/settings_content/agent.rs @@ -332,6 +332,12 @@ pub struct BuiltinAgentServerSettings { /// /// Default: None pub default_mode: Option, + /// The default model to use for this agent. + /// + /// This should be the model ID as reported by the agent. + /// + /// Default: None + pub default_model: Option, } #[skip_serializing_none] @@ -348,4 +354,10 @@ pub struct CustomAgentServerSettings { /// /// Default: None pub default_mode: Option, + /// The default model to use for this agent. + /// + /// This should be the model ID as reported by the agent. + /// + /// Default: None + pub default_model: Option, } From 1e45c99c80a20727fe5b704251dc7a47b002212a Mon Sep 17 00:00:00 2001 From: Aaron Saunders Date: Thu, 20 Nov 2025 17:14:46 +0600 Subject: [PATCH 0244/1030] Improve readability of files in the git changes panel (#41857) Closes _unknown_ image This PR places the file_name before the file_path so that when the panel is slim it is still usable, mirrors the behaviour of the file picker (cmd+P) Release Notes: - Improved readability of files in the git changes panel --- assets/settings/default.json | 5 +- crates/git_ui/src/git_panel.rs | 81 ++++++++++++++----- crates/project/src/project_settings.rs | 21 +++++ .../settings/src/settings_content/project.rs | 26 ++++++ crates/settings_ui/src/page_data.rs | 13 +++ crates/settings_ui/src/settings_ui.rs | 1 + 6 files changed, 127 insertions(+), 20 deletions(-) diff --git a/assets/settings/default.json b/assets/settings/default.json index 36c140dce7f8949ea73c163b9786b63ebeed0869..63ef8b51bb84c8f8dc5475dc172b82a78cee8eac 100644 --- a/assets/settings/default.json +++ b/assets/settings/default.json @@ -1316,7 +1316,10 @@ // "hunk_style": "staged_hollow" // 2. Show unstaged hunks hollow and staged hunks filled: // "hunk_style": "unstaged_hollow" - "hunk_style": "staged_hollow" + "hunk_style": "staged_hollow", + // Should the name or path be displayed first in the git view. + // "path_style": "file_name_first" or "file_path_first" + "path_style": "file_name_first" }, // The list of custom Git hosting providers. "git_hosting_providers": [ diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index ad77820078d43bc72be12ff358f96b5f4edaea0e..5c86d79eee41e61d7cc904bfe148be6e8c9abdd2 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -51,6 +51,7 @@ use panel::{ use project::{ Fs, Project, ProjectPath, git_store::{GitStoreEvent, Repository, RepositoryEvent, RepositoryId, pending_op}, + project_settings::{GitPathStyle, ProjectSettings}, }; use serde::{Deserialize, Serialize}; use settings::{Settings, SettingsStore, StatusStyle}; @@ -3954,6 +3955,7 @@ impl GitPanel { cx: &Context, ) -> AnyElement { let path_style = self.project.read(cx).path_style(cx); + let git_path_style = ProjectSettings::get_global(cx).git.path_style; let display_name = entry.display_name(path_style); let selected = self.selected_entry == Some(ix); @@ -4053,7 +4055,6 @@ impl GitPanel { } else { cx.theme().colors().ghost_element_active }; - h_flex() .id(id) .h(self.list_item_height()) @@ -4151,28 +4152,70 @@ impl GitPanel { h_flex() .items_center() .flex_1() - // .overflow_hidden() - .when_some(entry.parent_dir(path_style), |this, parent| { - if !parent.is_empty() { - this.child( - self.entry_label( - format!("{parent}{}", path_style.separator()), - path_color, - ) - .when(status.is_deleted(), |this| this.strikethrough()), - ) - } else { - this - } - }) - .child( - self.entry_label(display_name, label_color) - .when(status.is_deleted(), |this| this.strikethrough()), - ), + .child(h_flex().items_center().flex_1().map(|this| { + self.path_formatted( + this, + entry.parent_dir(path_style), + path_color, + display_name, + label_color, + path_style, + git_path_style, + status.is_deleted(), + ) + })), ) .into_any_element() } + fn path_formatted( + &self, + parent: Div, + directory: Option, + path_color: Color, + file_name: String, + label_color: Color, + path_style: PathStyle, + git_path_style: GitPathStyle, + strikethrough: bool, + ) -> Div { + parent + .when(git_path_style == GitPathStyle::FileNameFirst, |this| { + this.child( + self.entry_label( + match directory.as_ref().is_none_or(|d| d.is_empty()) { + true => file_name.clone(), + false => format!("{file_name} "), + }, + label_color, + ) + .when(strikethrough, Label::strikethrough), + ) + }) + .when_some(directory, |this, dir| { + match ( + !dir.is_empty(), + git_path_style == GitPathStyle::FileNameFirst, + ) { + (true, true) => this.child( + self.entry_label(dir, path_color) + .when(strikethrough, Label::strikethrough), + ), + (true, false) => this.child( + self.entry_label(format!("{dir}{}", path_style.separator()), path_color) + .when(strikethrough, Label::strikethrough), + ), + _ => this, + } + }) + .when(git_path_style == GitPathStyle::FilePathFirst, |this| { + this.child( + self.entry_label(file_name, label_color) + .when(strikethrough, Label::strikethrough), + ) + }) + } + fn has_write_access(&self, cx: &App) -> bool { !self.project.read(cx).is_read_only(cx) } diff --git a/crates/project/src/project_settings.rs b/crates/project/src/project_settings.rs index 1bfd44957b2b0d75f8fda2b42a875c92e37d63f4..05d5612f7db5b35e3c2fe6513cc45a05ddaac68c 100644 --- a/crates/project/src/project_settings.rs +++ b/crates/project/src/project_settings.rs @@ -348,6 +348,26 @@ pub struct GitSettings { /// /// Default: staged_hollow pub hunk_style: settings::GitHunkStyleSetting, + /// How file paths are displayed in the git gutter. + /// + /// Default: file_name_first + pub path_style: GitPathStyle, +} + +#[derive(Clone, Copy, Debug, PartialEq, Default)] +pub enum GitPathStyle { + #[default] + FileNameFirst, + FilePathFirst, +} + +impl From for GitPathStyle { + fn from(style: settings::GitPathStyle) -> Self { + match style { + settings::GitPathStyle::FileNameFirst => GitPathStyle::FileNameFirst, + settings::GitPathStyle::FilePathFirst => GitPathStyle::FilePathFirst, + } + } } #[derive(Clone, Copy, Debug)] @@ -501,6 +521,7 @@ impl Settings for ProjectSettings { } }, hunk_style: git.hunk_style.unwrap(), + path_style: git.path_style.unwrap().into(), }; Self { context_servers: project diff --git a/crates/settings/src/settings_content/project.rs b/crates/settings/src/settings_content/project.rs index 83e0537940870bd944cb75f20e35cc522059570c..c9021ee22e4c419af544bea8e76387615e2a949d 100644 --- a/crates/settings/src/settings_content/project.rs +++ b/crates/settings/src/settings_content/project.rs @@ -311,6 +311,10 @@ pub struct GitSettings { /// /// Default: staged_hollow pub hunk_style: Option, + /// How file paths are displayed in the git gutter. + /// + /// Default: file_name_first + pub path_style: Option, } #[derive( @@ -406,6 +410,28 @@ pub enum GitHunkStyleSetting { UnstagedHollow, } +#[derive( + Copy, + Clone, + Debug, + PartialEq, + Default, + Serialize, + Deserialize, + JsonSchema, + MergeFrom, + strum::VariantArray, + strum::VariantNames, +)] +#[serde(rename_all = "snake_case")] +pub enum GitPathStyle { + /// Show file name first, then path + #[default] + FileNameFirst, + /// Show full path first + FilePathFirst, +} + #[skip_serializing_none] #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct DiagnosticsSettingsContent { diff --git a/crates/settings_ui/src/page_data.rs b/crates/settings_ui/src/page_data.rs index a6baaf94842955a323f348dcbae8130dcfd060c6..d5368e278914044196f55aaf852e2efefed07117 100644 --- a/crates/settings_ui/src/page_data.rs +++ b/crates/settings_ui/src/page_data.rs @@ -5494,6 +5494,19 @@ pub(crate) fn settings_data(cx: &App) -> Vec { metadata: None, files: USER, }), + SettingsPageItem::SettingItem(SettingItem { + title: "Path Style", + description: "Should the name or path be displayed first in the git view.", + field: Box::new(SettingField { + json_path: Some("git.path_style"), + pick: |settings_content| settings_content.git.as_ref()?.path_style.as_ref(), + write: |settings_content, value| { + settings_content.git.get_or_insert_default().path_style = value; + }, + }), + metadata: None, + files: USER, + }), ], }, SettingsPage { diff --git a/crates/settings_ui/src/settings_ui.rs b/crates/settings_ui/src/settings_ui.rs index 7b90464633c47caf7a2b11421fdbc6ac5aefe129..ef8cf4928665113a72d97b804931295d6181dde4 100644 --- a/crates/settings_ui/src/settings_ui.rs +++ b/crates/settings_ui/src/settings_ui.rs @@ -452,6 +452,7 @@ fn init_renderers(cx: &mut App) { .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) + .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) From 29f9853978a61b0109e08b1680aea36710fa5ea5 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 20 Nov 2025 12:18:50 +0100 Subject: [PATCH 0245/1030] svg_preview: Remove unnecessary dependency on editor (#43147) Editor is a choke point in our compilation graph while also being a very common crate that is being edited. So reducing things that depend on it will generally improve compilation times for us. Release Notes: - N/A *or* Added/Fixed/Improved ... --- Cargo.lock | 2 +- crates/agent_ui/src/agent_diff.rs | 10 ++-- crates/agent_ui/src/text_thread_editor.rs | 18 +++---- crates/breadcrumbs/src/breadcrumbs.rs | 2 +- crates/collab_ui/src/channel_view.rs | 10 ++-- crates/debugger_ui/src/stack_trace_view.rs | 8 +-- crates/diagnostics/src/buffer_diagnostics.rs | 6 +-- crates/diagnostics/src/diagnostics.rs | 8 +-- crates/editor/src/editor_tests.rs | 2 +- crates/editor/src/items.rs | 15 ++++++ crates/git_ui/src/commit_view.rs | 12 ++--- crates/git_ui/src/file_diff_view.rs | 10 ++-- crates/git_ui/src/project_diff.rs | 8 +-- crates/git_ui/src/text_diff_view.rs | 10 ++-- crates/language_tools/src/lsp_log_view.rs | 10 ++-- crates/search/src/project_search.rs | 9 ++-- crates/svg_preview/Cargo.toml | 2 +- crates/svg_preview/src/svg_preview_view.rs | 50 ++++++++----------- crates/workspace/src/item.rs | 26 +++++----- crates/workspace/src/pane.rs | 6 +-- crates/workspace/src/searchable.rs | 4 +- crates/workspace/src/workspace.rs | 8 +-- .../zed/src/zed/quick_action_bar/preview.rs | 2 +- crates/zeta2_tools/src/zeta2_context_view.rs | 8 +-- 24 files changed, 127 insertions(+), 119 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 873fcdbb63fcabee0f722ae27beac486d0ce8670..0dc6a3c99a79ae3b3baad983a4427b710fb22080 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16561,10 +16561,10 @@ checksum = "0193cc4331cfd2f3d2011ef287590868599a2f33c3e69bc22c1a3d3acf9e02fb" name = "svg_preview" version = "0.1.0" dependencies = [ - "editor", "file_icons", "gpui", "language", + "multi_buffer", "ui", "workspace", ] diff --git a/crates/agent_ui/src/agent_diff.rs b/crates/agent_ui/src/agent_diff.rs index 85e3264458364f10d1c90d7c18c3609c9c7a7fd4..53e7a2f46d37e4cd2f0688d5af2a7d4a01174801 100644 --- a/crates/agent_ui/src/agent_diff.rs +++ b/crates/agent_ui/src/agent_diff.rs @@ -13,8 +13,8 @@ use editor::{ scroll::Autoscroll, }; use gpui::{ - Action, AnyElement, AnyView, App, AppContext, Empty, Entity, EventEmitter, FocusHandle, - Focusable, Global, SharedString, Subscription, Task, WeakEntity, Window, prelude::*, + Action, AnyElement, App, AppContext, Empty, Entity, EventEmitter, FocusHandle, Focusable, + Global, SharedString, Subscription, Task, WeakEntity, Window, prelude::*, }; use language::{Buffer, Capability, DiskState, OffsetRangeExt, Point}; @@ -580,11 +580,11 @@ impl Item for AgentDiffPane { type_id: TypeId, self_handle: &'a Entity, _: &'a App, - ) -> Option { + ) -> Option { if type_id == TypeId::of::() { - Some(self_handle.to_any()) + Some(self_handle.clone().into()) } else if type_id == TypeId::of::() { - Some(self.editor.to_any()) + Some(self.editor.clone().into()) } else { None } diff --git a/crates/agent_ui/src/text_thread_editor.rs b/crates/agent_ui/src/text_thread_editor.rs index daf5cc2c3d770f4e9bdad4ea882b1ad2afc93a4e..2a3c7e10318da78729f35476da872a0651c4a145 100644 --- a/crates/agent_ui/src/text_thread_editor.rs +++ b/crates/agent_ui/src/text_thread_editor.rs @@ -22,11 +22,11 @@ use editor::{FoldPlaceholder, display_map::CreaseId}; use fs::Fs; use futures::FutureExt; use gpui::{ - Action, Animation, AnimationExt, AnyElement, AnyView, App, ClipboardEntry, ClipboardItem, - Empty, Entity, EventEmitter, FocusHandle, Focusable, FontWeight, Global, InteractiveElement, - IntoElement, ParentElement, Pixels, Render, RenderImage, SharedString, Size, - StatefulInteractiveElement, Styled, Subscription, Task, WeakEntity, actions, div, img, point, - prelude::*, pulsating_between, size, + Action, Animation, AnimationExt, AnyElement, App, ClipboardEntry, ClipboardItem, Empty, Entity, + EventEmitter, FocusHandle, Focusable, FontWeight, Global, InteractiveElement, IntoElement, + ParentElement, Pixels, Render, RenderImage, SharedString, Size, StatefulInteractiveElement, + Styled, Subscription, Task, WeakEntity, actions, div, img, point, prelude::*, + pulsating_between, size, }; use language::{ BufferSnapshot, LspAdapterDelegate, ToOffset, @@ -66,7 +66,7 @@ use workspace::{ }; use workspace::{ Save, Toast, Workspace, - item::{self, FollowableItem, Item, ItemHandle}, + item::{self, FollowableItem, Item}, notifications::NotificationId, pane, searchable::{SearchEvent, SearchableItem}, @@ -2588,11 +2588,11 @@ impl Item for TextThreadEditor { type_id: TypeId, self_handle: &'a Entity, _: &'a App, - ) -> Option { + ) -> Option { if type_id == TypeId::of::() { - Some(self_handle.to_any()) + Some(self_handle.clone().into()) } else if type_id == TypeId::of::() { - Some(self.editor.to_any()) + Some(self.editor.clone().into()) } else { None } diff --git a/crates/breadcrumbs/src/breadcrumbs.rs b/crates/breadcrumbs/src/breadcrumbs.rs index 7664de3c87673a405118911526cb6606a2fecacf..00c1c0939bbfaf18ea6d0550633f2ae05e16ef25 100644 --- a/crates/breadcrumbs/src/breadcrumbs.rs +++ b/crates/breadcrumbs/src/breadcrumbs.rs @@ -123,7 +123,7 @@ impl Render for Breadcrumbs { .upgrade() .zip(zed_actions::outline::TOGGLE_OUTLINE.get()) { - callback(editor.to_any(), window, cx); + callback(editor.to_any_view(), window, cx); } } }) diff --git a/crates/collab_ui/src/channel_view.rs b/crates/collab_ui/src/channel_view.rs index 5db588fdb3aad3f523864b5f90600e49eca9d8b6..483597359e091eb166df51bbe3c9fa9448ee3d4f 100644 --- a/crates/collab_ui/src/channel_view.rs +++ b/crates/collab_ui/src/channel_view.rs @@ -11,7 +11,7 @@ use editor::{ display_map::ToDisplayPoint, scroll::Autoscroll, }; use gpui::{ - AnyView, App, ClipboardItem, Context, Entity, EventEmitter, Focusable, Pixels, Point, Render, + App, ClipboardItem, Context, Entity, EventEmitter, Focusable, Pixels, Point, Render, Subscription, Task, VisualContext as _, WeakEntity, Window, actions, }; use project::Project; @@ -25,7 +25,7 @@ use util::ResultExt; use workspace::{CollaboratorId, item::TabContentParams}; use workspace::{ ItemNavHistory, Pane, SaveIntent, Toast, ViewId, Workspace, WorkspaceId, - item::{FollowableItem, Item, ItemEvent, ItemHandle}, + item::{FollowableItem, Item, ItemEvent}, searchable::SearchableItemHandle, }; use workspace::{item::Dedup, notifications::NotificationId}; @@ -441,11 +441,11 @@ impl Item for ChannelView { type_id: TypeId, self_handle: &'a Entity, _: &'a App, - ) -> Option { + ) -> Option { if type_id == TypeId::of::() { - Some(self_handle.to_any()) + Some(self_handle.clone().into()) } else if type_id == TypeId::of::() { - Some(self.editor.to_any()) + Some(self.editor.clone().into()) } else { None } diff --git a/crates/debugger_ui/src/stack_trace_view.rs b/crates/debugger_ui/src/stack_trace_view.rs index 07caabaacaf00d2752a04c5ba68be07a5678c40a..cdbd9aaff0cff250fdc3e5091ffa7dcabc70861a 100644 --- a/crates/debugger_ui/src/stack_trace_view.rs +++ b/crates/debugger_ui/src/stack_trace_view.rs @@ -7,7 +7,7 @@ use editor::{ RowHighlightOptions, SelectionEffects, ToPoint, scroll::Autoscroll, }; use gpui::{ - AnyView, App, AppContext, Entity, EventEmitter, Focusable, IntoElement, Render, SharedString, + App, AppContext, Entity, EventEmitter, Focusable, IntoElement, Render, SharedString, Subscription, Task, WeakEntity, Window, }; use language::{BufferSnapshot, Capability, Point, Selection, SelectionGoal, TreeSitterOptions}; @@ -418,11 +418,11 @@ impl Item for StackTraceView { type_id: TypeId, self_handle: &'a Entity, _: &'a App, - ) -> Option { + ) -> Option { if type_id == TypeId::of::() { - Some(self_handle.to_any()) + Some(self_handle.clone().into()) } else if type_id == TypeId::of::() { - Some(self.editor.to_any()) + Some(self.editor.clone().into()) } else { None } diff --git a/crates/diagnostics/src/buffer_diagnostics.rs b/crates/diagnostics/src/buffer_diagnostics.rs index ed079c34864100238fd459cb2ec116bf21827fdd..0fd8783dd514f8da3c53d41dcb6f8e9004ae501c 100644 --- a/crates/diagnostics/src/buffer_diagnostics.rs +++ b/crates/diagnostics/src/buffer_diagnostics.rs @@ -680,11 +680,11 @@ impl Item for BufferDiagnosticsEditor { type_id: std::any::TypeId, self_handle: &'a Entity, _: &'a App, - ) -> Option { + ) -> Option { if type_id == TypeId::of::() { - Some(self_handle.to_any()) + Some(self_handle.clone().into()) } else if type_id == TypeId::of::() { - Some(self.editor.to_any()) + Some(self.editor.clone().into()) } else { None } diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index eca7ae359a3ebafbbe13316bb757c1fbfc7f72ce..b24a63b830b93cdbe14e2329abe524f6523cbbd6 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -17,7 +17,7 @@ use editor::{ multibuffer_context_lines, }; use gpui::{ - AnyElement, AnyView, App, AsyncApp, Context, Entity, EventEmitter, FocusHandle, FocusOutEvent, + AnyElement, App, AsyncApp, Context, Entity, EventEmitter, FocusHandle, FocusOutEvent, Focusable, Global, InteractiveElement, IntoElement, ParentElement, Render, SharedString, Styled, Subscription, Task, WeakEntity, Window, actions, div, }; @@ -880,11 +880,11 @@ impl Item for ProjectDiagnosticsEditor { type_id: TypeId, self_handle: &'a Entity, _: &'a App, - ) -> Option { + ) -> Option { if type_id == TypeId::of::() { - Some(self_handle.to_any()) + Some(self_handle.clone().into()) } else if type_id == TypeId::of::() { - Some(self.editor.to_any()) + Some(self.editor.clone().into()) } else { None } diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index d46293157ddb1bd6500c1b423279401c8195ea1f..9d567513b2a428a89b5a58ba75a1276411dce639 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -26983,7 +26983,7 @@ async fn test_non_utf_8_opens(cx: &mut TestAppContext) { .unwrap(); assert_eq!( - handle.to_any().entity_type(), + handle.to_any_view().entity_type(), TypeId::of::() ); } diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 5fa31f251448a0381501fb475913371170ac495a..e7342f8a5834ded137eaea3ae367442ab99821fd 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -588,6 +588,21 @@ fn deserialize_anchor(buffer: &MultiBufferSnapshot, anchor: proto::EditorAnchor) impl Item for Editor { type Event = EditorEvent; + fn act_as_type<'a>( + &'a self, + type_id: TypeId, + self_handle: &'a Entity, + cx: &'a App, + ) -> Option { + if TypeId::of::() == type_id { + Some(self_handle.clone().into()) + } else if TypeId::of::() == type_id { + Some(self_handle.read(cx).buffer.clone().into()) + } else { + None + } + } + fn navigate( &mut self, data: Box, diff --git a/crates/git_ui/src/commit_view.rs b/crates/git_ui/src/commit_view.rs index b0fa80fa7afef96fa48aa80883fb252beeed9629..3251ab43f71292d2d46503ef83f61692f385dc76 100644 --- a/crates/git_ui/src/commit_view.rs +++ b/crates/git_ui/src/commit_view.rs @@ -6,9 +6,9 @@ use editor::{ }; use git::repository::{CommitDetails, CommitDiff, RepoPath}; use gpui::{ - Action, AnyElement, AnyView, App, AppContext as _, AsyncApp, AsyncWindowContext, Context, - Entity, EventEmitter, FocusHandle, Focusable, IntoElement, PromptLevel, Render, Task, - WeakEntity, Window, actions, + Action, AnyElement, App, AppContext as _, AsyncApp, AsyncWindowContext, Context, Entity, + EventEmitter, FocusHandle, Focusable, IntoElement, PromptLevel, Render, Task, WeakEntity, + Window, actions, }; use language::{ Anchor, Buffer, Capability, DiskState, File, LanguageRegistry, LineEnding, OffsetRangeExt as _, @@ -499,11 +499,11 @@ impl Item for CommitView { type_id: TypeId, self_handle: &'a Entity, _: &'a App, - ) -> Option { + ) -> Option { if type_id == TypeId::of::() { - Some(self_handle.to_any()) + Some(self_handle.clone().into()) } else if type_id == TypeId::of::() { - Some(self.editor.to_any()) + Some(self.editor.clone().into()) } else { None } diff --git a/crates/git_ui/src/file_diff_view.rs b/crates/git_ui/src/file_diff_view.rs index 815eaf871ef17f055e60df34cff3e9b9741fb3fb..1599ccd12415e366adcf6a42d67a5c5d77a52151 100644 --- a/crates/git_ui/src/file_diff_view.rs +++ b/crates/git_ui/src/file_diff_view.rs @@ -5,8 +5,8 @@ use buffer_diff::{BufferDiff, BufferDiffSnapshot}; use editor::{Editor, EditorEvent, MultiBuffer}; use futures::{FutureExt, select_biased}; use gpui::{ - AnyElement, AnyView, App, AppContext as _, AsyncApp, Context, Entity, EventEmitter, - FocusHandle, Focusable, IntoElement, Render, Task, Window, + AnyElement, App, AppContext as _, AsyncApp, Context, Entity, EventEmitter, FocusHandle, + Focusable, IntoElement, Render, Task, Window, }; use language::Buffer; use project::Project; @@ -268,11 +268,11 @@ impl Item for FileDiffView { type_id: TypeId, self_handle: &'a Entity, _: &'a App, - ) -> Option { + ) -> Option { if type_id == TypeId::of::() { - Some(self_handle.to_any()) + Some(self_handle.clone().into()) } else if type_id == TypeId::of::() { - Some(self.editor.to_any()) + Some(self.editor.clone().into()) } else { None } diff --git a/crates/git_ui/src/project_diff.rs b/crates/git_ui/src/project_diff.rs index 041d8381e92d59d9ef572f26cbc380abdf2d30e5..715b74db333e78081a245f2fb362426591db79d9 100644 --- a/crates/git_ui/src/project_diff.rs +++ b/crates/git_ui/src/project_diff.rs @@ -19,7 +19,7 @@ use git::{ status::FileStatus, }; use gpui::{ - Action, AnyElement, AnyView, App, AppContext as _, AsyncWindowContext, Entity, EventEmitter, + Action, AnyElement, App, AppContext as _, AsyncWindowContext, Entity, EventEmitter, FocusHandle, Focusable, Render, Subscription, Task, WeakEntity, actions, }; use language::{Anchor, Buffer, Capability, OffsetRangeExt}; @@ -775,11 +775,11 @@ impl Item for ProjectDiff { type_id: TypeId, self_handle: &'a Entity, _: &'a App, - ) -> Option { + ) -> Option { if type_id == TypeId::of::() { - Some(self_handle.to_any()) + Some(self_handle.clone().into()) } else if type_id == TypeId::of::() { - Some(self.editor.to_any()) + Some(self.editor.clone().into()) } else { None } diff --git a/crates/git_ui/src/text_diff_view.rs b/crates/git_ui/src/text_diff_view.rs index 0975df9402f5b8e8db8bcfa83f2d31272d9983eb..f95c2626f6c45fd50348daea599ee114231e9426 100644 --- a/crates/git_ui/src/text_diff_view.rs +++ b/crates/git_ui/src/text_diff_view.rs @@ -5,8 +5,8 @@ use buffer_diff::{BufferDiff, BufferDiffSnapshot}; use editor::{Editor, EditorEvent, MultiBuffer, ToPoint, actions::DiffClipboardWithSelectionData}; use futures::{FutureExt, select_biased}; use gpui::{ - AnyElement, AnyView, App, AppContext as _, AsyncApp, Context, Entity, EventEmitter, - FocusHandle, Focusable, IntoElement, Render, Task, Window, + AnyElement, App, AppContext as _, AsyncApp, Context, Entity, EventEmitter, FocusHandle, + Focusable, IntoElement, Render, Task, Window, }; use language::{self, Buffer, Point}; use project::Project; @@ -329,11 +329,11 @@ impl Item for TextDiffView { type_id: TypeId, self_handle: &'a Entity, _: &'a App, - ) -> Option { + ) -> Option { if type_id == TypeId::of::() { - Some(self_handle.to_any()) + Some(self_handle.clone().into()) } else if type_id == TypeId::of::() { - Some(self.diff_editor.to_any()) + Some(self.diff_editor.clone().into()) } else { None } diff --git a/crates/language_tools/src/lsp_log_view.rs b/crates/language_tools/src/lsp_log_view.rs index 9b52912b8ed09ce2dd7a4f2ea26f7106bfd11c31..5f96f8e03048a14f82626ab774a21aab02dc89bf 100644 --- a/crates/language_tools/src/lsp_log_view.rs +++ b/crates/language_tools/src/lsp_log_view.rs @@ -2,8 +2,8 @@ use collections::VecDeque; use copilot::Copilot; use editor::{Editor, EditorEvent, MultiBufferOffset, actions::MoveToEnd, scroll::Autoscroll}; use gpui::{ - AnyView, App, Context, Corner, Entity, EventEmitter, FocusHandle, Focusable, IntoElement, - ParentElement, Render, Styled, Subscription, Task, WeakEntity, Window, actions, div, + App, Context, Corner, Entity, EventEmitter, FocusHandle, Focusable, IntoElement, ParentElement, + Render, Styled, Subscription, Task, WeakEntity, Window, actions, div, }; use itertools::Itertools; use language::{LanguageServerId, language_settings::SoftWrap}; @@ -748,11 +748,11 @@ impl Item for LspLogView { type_id: TypeId, self_handle: &'a Entity, _: &'a App, - ) -> Option { + ) -> Option { if type_id == TypeId::of::() { - Some(self_handle.to_any()) + Some(self_handle.clone().into()) } else if type_id == TypeId::of::() { - Some(self.editor.to_any()) + Some(self.editor.clone().into()) } else { None } diff --git a/crates/search/src/project_search.rs b/crates/search/src/project_search.rs index 1768b0f18541fd289126bad77ae46eded1aad326..68e3dca1ce07a1773856a3cacecf553d4c88f7e3 100644 --- a/crates/search/src/project_search.rs +++ b/crates/search/src/project_search.rs @@ -17,10 +17,9 @@ use editor::{ }; use futures::{StreamExt, stream::FuturesOrdered}; use gpui::{ - Action, AnyElement, AnyView, App, Axis, Context, Entity, EntityId, EventEmitter, FocusHandle, - Focusable, Global, Hsla, InteractiveElement, IntoElement, KeyContext, ParentElement, Point, - Render, SharedString, Styled, Subscription, Task, UpdateGlobal, WeakEntity, Window, actions, - div, + Action, AnyElement, App, Axis, Context, Entity, EntityId, EventEmitter, FocusHandle, Focusable, + Global, Hsla, InteractiveElement, IntoElement, KeyContext, ParentElement, Point, Render, + SharedString, Styled, Subscription, Task, UpdateGlobal, WeakEntity, Window, actions, div, }; use language::{Buffer, Language}; use menu::Confirm; @@ -497,7 +496,7 @@ impl Item for ProjectSearchView { type_id: TypeId, self_handle: &'a Entity, _: &'a App, - ) -> Option { + ) -> Option { if type_id == TypeId::of::() { Some(self_handle.clone().into()) } else if type_id == TypeId::of::() { diff --git a/crates/svg_preview/Cargo.toml b/crates/svg_preview/Cargo.toml index e78a042180a62d31fd74da659df7afe8baa2caa7..18f55e28d5d1364d75492455508bb34a5e24422b 100644 --- a/crates/svg_preview/Cargo.toml +++ b/crates/svg_preview/Cargo.toml @@ -12,7 +12,7 @@ workspace = true path = "src/svg_preview.rs" [dependencies] -editor.workspace = true +multi_buffer.workspace = true file_icons.workspace = true gpui.workspace = true language.workspace = true diff --git a/crates/svg_preview/src/svg_preview_view.rs b/crates/svg_preview/src/svg_preview_view.rs index de27a7237d5f85b8dc18d47e09a6ac7fe22ee89f..a286dba437f746db24648b1c9cc06b30979b7110 100644 --- a/crates/svg_preview/src/svg_preview_view.rs +++ b/crates/svg_preview/src/svg_preview_view.rs @@ -1,13 +1,13 @@ use std::mem; use std::sync::Arc; -use editor::Editor; use file_icons::FileIcons; use gpui::{ App, Context, Entity, EventEmitter, FocusHandle, Focusable, IntoElement, ParentElement, Render, RenderImage, Styled, Subscription, Task, WeakEntity, Window, div, img, }; use language::{Buffer, BufferEvent}; +use multi_buffer::MultiBuffer; use ui::prelude::*; use workspace::item::Item; use workspace::{Pane, Workspace}; @@ -34,7 +34,7 @@ pub enum SvgPreviewMode { impl SvgPreviewView { pub fn new( mode: SvgPreviewMode, - active_editor: Entity, + active_buffer: Entity, workspace_handle: WeakEntity, window: &mut Window, cx: &mut Context, @@ -48,11 +48,7 @@ impl SvgPreviewView { None }; - let buffer = active_editor - .read(cx) - .buffer() - .clone() - .read_with(cx, |buffer, _cx| buffer.as_singleton()); + let buffer = active_buffer.read_with(cx, |buffer, _cx| buffer.as_singleton()); let subscription = buffer .as_ref() @@ -84,10 +80,10 @@ impl SvgPreviewView { if let workspace::Event::ActiveItemChanged = event { let workspace = workspace.read(cx); if let Some(active_item) = workspace.active_item(cx) - && let Some(editor) = active_item.downcast::() - && Self::is_svg_file(&editor, cx) + && let Some(buffer) = active_item.downcast::() + && Self::is_svg_file(&buffer, cx) { - let Some(buffer) = editor.read(cx).buffer().read(cx).as_singleton() else { + let Some(buffer) = buffer.read(cx).as_singleton() else { return; }; if this.buffer.as_ref() != Some(&buffer) { @@ -142,10 +138,10 @@ impl SvgPreviewView { fn find_existing_preview_item_idx( pane: &Pane, - editor: &Entity, + buffer: &Entity, cx: &App, ) -> Option { - let buffer_id = editor.read(cx).buffer().entity_id(); + let buffer_id = buffer.entity_id(); pane.items_of_type::() .find(|view| { view.read(cx) @@ -156,25 +152,25 @@ impl SvgPreviewView { .and_then(|view| pane.index_for_item(&view)) } - pub fn resolve_active_item_as_svg_editor( + pub fn resolve_active_item_as_svg_buffer( workspace: &Workspace, cx: &mut Context, - ) -> Option> { + ) -> Option> { workspace .active_item(cx)? - .act_as::(cx) - .filter(|editor| Self::is_svg_file(&editor, cx)) + .act_as::(cx) + .filter(|buffer| Self::is_svg_file(&buffer, cx)) } fn create_svg_view( mode: SvgPreviewMode, workspace: &mut Workspace, - editor: Entity, + buffer: Entity, window: &mut Window, cx: &mut Context, ) -> Entity { let workspace_handle = workspace.weak_handle(); - SvgPreviewView::new(mode, editor, workspace_handle, window, cx) + SvgPreviewView::new(mode, buffer, workspace_handle, window, cx) } fn create_buffer_subscription( @@ -194,10 +190,8 @@ impl SvgPreviewView { ) } - pub fn is_svg_file(editor: &Entity, cx: &App) -> bool { - editor - .read(cx) - .buffer() + pub fn is_svg_file(buffer: &Entity, cx: &App) -> bool { + buffer .read(cx) .as_singleton() .and_then(|buffer| buffer.read(cx).file()) @@ -210,19 +204,19 @@ impl SvgPreviewView { pub fn register(workspace: &mut Workspace, _window: &mut Window, _cx: &mut Context) { workspace.register_action(move |workspace, _: &OpenPreview, window, cx| { - if let Some(editor) = Self::resolve_active_item_as_svg_editor(workspace, cx) - && Self::is_svg_file(&editor, cx) + if let Some(buffer) = Self::resolve_active_item_as_svg_buffer(workspace, cx) + && Self::is_svg_file(&buffer, cx) { let view = Self::create_svg_view( SvgPreviewMode::Default, workspace, - editor.clone(), + buffer.clone(), window, cx, ); workspace.active_pane().update(cx, |pane, cx| { if let Some(existing_view_idx) = - Self::find_existing_preview_item_idx(pane, &editor, cx) + Self::find_existing_preview_item_idx(pane, &buffer, cx) { pane.activate_item(existing_view_idx, true, true, window, cx); } else { @@ -234,7 +228,7 @@ impl SvgPreviewView { }); workspace.register_action(move |workspace, _: &OpenPreviewToTheSide, window, cx| { - if let Some(editor) = Self::resolve_active_item_as_svg_editor(workspace, cx) + if let Some(editor) = Self::resolve_active_item_as_svg_buffer(workspace, cx) && Self::is_svg_file(&editor, cx) { let editor_clone = editor.clone(); @@ -269,7 +263,7 @@ impl SvgPreviewView { }); workspace.register_action(move |workspace, _: &OpenFollowingPreview, window, cx| { - if let Some(editor) = Self::resolve_active_item_as_svg_editor(workspace, cx) + if let Some(editor) = Self::resolve_active_item_as_svg_buffer(workspace, cx) && Self::is_svg_file(&editor, cx) { let view = diff --git a/crates/workspace/src/item.rs b/crates/workspace/src/item.rs index e68b5a99ee352bb1f5c0242f68e9ab894362497e..1f37c961159b8adeb89c38a4063bc682724fbee5 100644 --- a/crates/workspace/src/item.rs +++ b/crates/workspace/src/item.rs @@ -11,9 +11,9 @@ use anyhow::Result; use client::{Client, proto}; use futures::{StreamExt, channel::mpsc}; use gpui::{ - Action, AnyElement, AnyView, App, AppContext, Context, Entity, EntityId, EventEmitter, - FocusHandle, Focusable, Font, HighlightStyle, Pixels, Point, Render, SharedString, Task, - WeakEntity, Window, + Action, AnyElement, AnyEntity, AnyView, App, AppContext, Context, Entity, EntityId, + EventEmitter, FocusHandle, Focusable, Font, HighlightStyle, Pixels, Point, Render, + SharedString, Task, WeakEntity, Window, }; use project::{Project, ProjectEntryId, ProjectPath}; pub use settings::{ @@ -279,7 +279,7 @@ pub trait Item: Focusable + EventEmitter + Render + Sized { type_id: TypeId, self_handle: &'a Entity, _: &'a App, - ) -> Option { + ) -> Option { if TypeId::of::() == type_id { Some(self_handle.clone().into()) } else { @@ -454,7 +454,7 @@ pub trait ItemHandle: 'static + Send { fn workspace_deactivated(&self, window: &mut Window, cx: &mut App); fn navigate(&self, data: Box, window: &mut Window, cx: &mut App) -> bool; fn item_id(&self) -> EntityId; - fn to_any(&self) -> AnyView; + fn to_any_view(&self) -> AnyView; fn is_dirty(&self, cx: &App) -> bool; fn has_deleted_file(&self, cx: &App) -> bool; fn has_conflict(&self, cx: &App) -> bool; @@ -480,7 +480,7 @@ pub trait ItemHandle: 'static + Send { window: &mut Window, cx: &mut App, ) -> Task>; - fn act_as_type(&self, type_id: TypeId, cx: &App) -> Option; + fn act_as_type(&self, type_id: TypeId, cx: &App) -> Option; fn to_followable_item_handle(&self, cx: &App) -> Option>; fn to_serializable_item_handle(&self, cx: &App) -> Option>; fn on_release( @@ -513,7 +513,7 @@ pub trait WeakItemHandle: Send + Sync { impl dyn ItemHandle { pub fn downcast(&self) -> Option> { - self.to_any().downcast().ok() + self.to_any_view().downcast().ok() } pub fn act_as(&self, cx: &App) -> Option> { @@ -911,7 +911,7 @@ impl ItemHandle for Entity { self.entity_id() } - fn to_any(&self) -> AnyView { + fn to_any_view(&self) -> AnyView { self.clone().into() } @@ -964,7 +964,7 @@ impl ItemHandle for Entity { self.update(cx, |item, cx| item.reload(project, window, cx)) } - fn act_as_type<'a>(&'a self, type_id: TypeId, cx: &'a App) -> Option { + fn act_as_type<'a>(&'a self, type_id: TypeId, cx: &'a App) -> Option { self.read(cx).act_as_type(type_id, self, cx) } @@ -1009,7 +1009,7 @@ impl ItemHandle for Entity { } fn to_serializable_item_handle(&self, cx: &App) -> Option> { - SerializableItemRegistry::view_to_serializable_item_handle(self.to_any(), cx) + SerializableItemRegistry::view_to_serializable_item_handle(self.to_any_view(), cx) } fn preserve_preview(&self, cx: &App) -> bool { @@ -1030,13 +1030,13 @@ impl ItemHandle for Entity { impl From> for AnyView { fn from(val: Box) -> Self { - val.to_any() + val.to_any_view() } } impl From<&Box> for AnyView { fn from(val: &Box) -> Self { - val.to_any() + val.to_any_view() } } @@ -1247,7 +1247,7 @@ impl FollowableItemHandle for Entity { window: &mut Window, cx: &mut App, ) -> Option { - let existing = existing.to_any().downcast::().ok()?; + let existing = existing.to_any_view().downcast::().ok()?; self.read(cx).dedup(existing.read(cx), window, cx) } diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index dcfd634dfd37c7e5a078f9cef862acf333c84a2a..f149c320a1ee2a17c67781828eacdd412affe663 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -1203,7 +1203,7 @@ impl Pane { pub fn items_of_type(&self) -> impl '_ + Iterator> { self.items .iter() - .filter_map(|item| item.to_any().downcast().ok()) + .filter_map(|item| item.to_any_view().downcast().ok()) } pub fn active_item(&self) -> Option> { @@ -3869,7 +3869,7 @@ impl Render for Pane { .size_full() .overflow_hidden() .child(self.toolbar.clone()) - .child(item.to_any()) + .child(item.to_any_view()) } else { let placeholder = div .id("pane_placeholder") @@ -6957,7 +6957,7 @@ mod tests { .enumerate() .map(|(ix, item)| { let mut state = item - .to_any() + .to_any_view() .downcast::() .unwrap() .read(cx) diff --git a/crates/workspace/src/searchable.rs b/crates/workspace/src/searchable.rs index 0becddc1641e8abb388837187f47f0a80327a6b5..64dad0345fa323eb724b6b51656b841c8d433688 100644 --- a/crates/workspace/src/searchable.rs +++ b/crates/workspace/src/searchable.rs @@ -399,13 +399,13 @@ impl SearchableItemHandle for Entity { impl From> for AnyView { fn from(this: Box) -> Self { - this.to_any() + this.to_any_view() } } impl From<&Box> for AnyView { fn from(this: &Box) -> Self { - this.to_any() + this.to_any_view() } } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 14b33af6cd1f8778a9bbafeb8e9854cc9fc11247..6e553ac93588ab4a127437adc03bf9323d47014f 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -2869,7 +2869,7 @@ impl Workspace { pub fn active_item_as(&self, cx: &App) -> Option> { let item = self.active_item(cx)?; - item.to_any().downcast::().ok() + item.to_any_view().downcast::().ok() } fn active_project_path(&self, cx: &App) -> Option { @@ -11214,7 +11214,7 @@ mod tests { // Now we can check if the handle we got back errored or not assert_eq!( - handle.to_any().entity_type(), + handle.to_any_view().entity_type(), TypeId::of::() ); @@ -11227,7 +11227,7 @@ mod tests { .unwrap(); assert_eq!( - handle.to_any().entity_type(), + handle.to_any_view().entity_type(), TypeId::of::() ); @@ -11276,7 +11276,7 @@ mod tests { // This _must_ be the second item registered assert_eq!( - handle.to_any().entity_type(), + handle.to_any_view().entity_type(), TypeId::of::() ); diff --git a/crates/zed/src/zed/quick_action_bar/preview.rs b/crates/zed/src/zed/quick_action_bar/preview.rs index 630d243cf6971ecebda694091acbfd5ba4c049e4..5d43e79542357977b06fbbd884472f94ad3595c8 100644 --- a/crates/zed/src/zed/quick_action_bar/preview.rs +++ b/crates/zed/src/zed/quick_action_bar/preview.rs @@ -32,7 +32,7 @@ impl QuickActionBar { .is_some() { preview_type = Some(PreviewType::Markdown); - } else if SvgPreviewView::resolve_active_item_as_svg_editor(workspace, cx).is_some() + } else if SvgPreviewView::resolve_active_item_as_svg_buffer(workspace, cx).is_some() { preview_type = Some(PreviewType::Svg); } diff --git a/crates/zeta2_tools/src/zeta2_context_view.rs b/crates/zeta2_tools/src/zeta2_context_view.rs index 1826bd22df6d08ce717ef9bdf0070f88ad63c433..759d0d0a3da1adbd9e61fa05b5d305ca9de1f823 100644 --- a/crates/zeta2_tools/src/zeta2_context_view.rs +++ b/crates/zeta2_tools/src/zeta2_context_view.rs @@ -24,7 +24,7 @@ use ui::{ IconSize, InteractiveElement, IntoElement, ListHeader, ListItem, StyledTypography, div, h_flex, v_flex, }; -use workspace::{Item, ItemHandle as _}; +use workspace::Item; use zeta2::{ Zeta, ZetaContextRetrievalDebugInfo, ZetaContextRetrievalStartedDebugInfo, ZetaDebugInfo, ZetaSearchQueryDebugInfo, @@ -402,11 +402,11 @@ impl Item for Zeta2ContextView { type_id: TypeId, self_handle: &'a Entity, _: &'a App, - ) -> Option { + ) -> Option { if type_id == TypeId::of::() { - Some(self_handle.to_any()) + Some(self_handle.clone().into()) } else if type_id == TypeId::of::() { - Some(self.runs.get(self.current_ix)?.editor.to_any()) + Some(self.runs.get(self.current_ix)?.editor.clone().into()) } else { None } From 9094eb811b6897c2dd327ddc801c4507bce0e1c7 Mon Sep 17 00:00:00 2001 From: Bhuminjay Soni Date: Thu, 20 Nov 2025 17:00:34 +0530 Subject: [PATCH 0246/1030] git: Compress diff for commit message generation (#42835) This PR compresses diff capped at 20000 bytes by: - Truncation of all lines to 256 chars - Iteratively removing last hunks from each file until size <= 20000 bytes. Closes #34486 Release Notes: - Improved: Compress large diffs for commit message generation (thanks @11happy) --------- Signed-off-by: 11happy Co-authored-by: Oleksiy Syvokon --- crates/git_ui/src/git_panel.rs | 224 +++++++++++++++++++++++++++++++-- 1 file changed, 217 insertions(+), 7 deletions(-) diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index 5c86d79eee41e61d7cc904bfe148be6e8c9abdd2..e810672c69b9ed602ddf76c2ca1f1035b958cd26 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -11,6 +11,7 @@ use crate::{ use agent_settings::AgentSettings; use anyhow::Context as _; use askpass::AskPassDelegate; +use cloud_llm_client::CompletionIntent; use db::kvp::KEY_VALUE_STORE; use editor::{ Direction, Editor, EditorElement, EditorMode, MultiBuffer, MultiBufferOffset, @@ -68,14 +69,11 @@ use ui::{ use util::paths::PathStyle; use util::{ResultExt, TryFutureExt, maybe}; use workspace::SERIALIZATION_THROTTLE_TIME; - -use cloud_llm_client::CompletionIntent; use workspace::{ Workspace, dock::{DockPosition, Panel, PanelEvent}, notifications::{DetachAndPromptErr, ErrorMessagePrompt, NotificationId, NotifyResultExt}, }; - actions!( git_panel, [ @@ -275,6 +273,69 @@ impl GitStatusEntry { } } +struct TruncatedPatch { + header: String, + hunks: Vec, + hunks_to_keep: usize, +} + +impl TruncatedPatch { + fn from_unified_diff(patch_str: &str) -> Option { + let lines: Vec<&str> = patch_str.lines().collect(); + if lines.len() < 2 { + return None; + } + let header = format!("{}\n{}\n", lines[0], lines[1]); + let mut hunks = Vec::new(); + let mut current_hunk = String::new(); + for line in &lines[2..] { + if line.starts_with("@@") { + if !current_hunk.is_empty() { + hunks.push(current_hunk); + } + current_hunk = format!("{}\n", line); + } else if !current_hunk.is_empty() { + current_hunk.push_str(line); + current_hunk.push('\n'); + } + } + if !current_hunk.is_empty() { + hunks.push(current_hunk); + } + if hunks.is_empty() { + return None; + } + let hunks_to_keep = hunks.len(); + Some(TruncatedPatch { + header, + hunks, + hunks_to_keep, + }) + } + fn calculate_size(&self) -> usize { + let mut size = self.header.len(); + for (i, hunk) in self.hunks.iter().enumerate() { + if i < self.hunks_to_keep { + size += hunk.len(); + } + } + size + } + fn to_string(&self) -> String { + let mut out = self.header.clone(); + for (i, hunk) in self.hunks.iter().enumerate() { + if i < self.hunks_to_keep { + out.push_str(hunk); + } + } + let skipped_hunks = self.hunks.len() - self.hunks_to_keep; + if skipped_hunks > 0 { + out.push_str(&format!("[...skipped {} hunks...]\n", skipped_hunks)); + } + out + } +} + pub struct GitPanel { pub(crate) active_repository: Option>, pub(crate) commit_editor: Entity, @@ -1816,6 +1877,96 @@ impl GitPanel { self.generate_commit_message(cx); } + fn split_patch(patch: &str) -> Vec { + let mut result = Vec::new(); + let mut current_patch = String::new(); + + for line in patch.lines() { + if line.starts_with("---") && !current_patch.is_empty() { + result.push(current_patch.trim_end_matches('\n').into()); + current_patch = String::new(); + } + current_patch.push_str(line); + current_patch.push('\n'); + } + + if !current_patch.is_empty() { + result.push(current_patch.trim_end_matches('\n').into()); + } + + result + } + fn truncate_iteratively(patch: &str, max_bytes: usize) -> String { + let mut current_size = patch.len(); + if current_size <= max_bytes { + return patch.to_string(); + } + let file_patches = Self::split_patch(patch); + let mut file_infos: Vec = file_patches + .iter() + .filter_map(|patch| TruncatedPatch::from_unified_diff(patch)) + .collect(); + + if file_infos.is_empty() { + return patch.to_string(); + } + + current_size = file_infos.iter().map(|f| f.calculate_size()).sum::(); + while current_size > max_bytes { + let file_idx = file_infos + .iter() + .enumerate() + .filter(|(_, f)| f.hunks_to_keep > 1) + .max_by_key(|(_, f)| f.hunks_to_keep) + .map(|(idx, _)| idx); + match file_idx { + Some(idx) => { + let file = &mut file_infos[idx]; + let size_before = file.calculate_size(); + file.hunks_to_keep -= 1; + let size_after = file.calculate_size(); + let saved = size_before.saturating_sub(size_after); + current_size = current_size.saturating_sub(saved); + } + None => { + break; + } + } + } + + file_infos + .iter() + .map(|info| info.to_string()) + .collect::>() + .join("\n") + } + + pub fn compress_commit_diff(diff_text: &str, max_bytes: usize) -> String { + if diff_text.len() <= max_bytes { + return diff_text.to_string(); + } + + let mut compressed = diff_text + .lines() + .map(|line| { + if line.len() > 256 { + format!("{}...[truncated]\n", &line[..256]) + } else { + format!("{}\n", line) + } + }) + .collect::>() + .join(""); + + if compressed.len() <= max_bytes { + return compressed; + } + + compressed = Self::truncate_iteratively(&compressed, max_bytes); + + compressed + } + /// Generates a commit message using an LLM. pub fn generate_commit_message(&mut self, cx: &mut Context) { if !self.can_commit() || !AgentSettings::get_global(cx).enabled(cx) { @@ -1874,10 +2025,8 @@ impl GitPanel { } }; - const ONE_MB: usize = 1_000_000; - if diff_text.len() > ONE_MB { - diff_text = diff_text.chars().take(ONE_MB).collect() - } + const MAX_DIFF_BYTES: usize = 20_000; + diff_text = Self::compress_commit_diff(&diff_text, MAX_DIFF_BYTES); let subject = this.update(cx, |this, cx| { this.commit_editor.read(cx).text(cx).lines().next().map(ToOwned::to_owned).unwrap_or_default() @@ -5032,6 +5181,7 @@ mod tests { status::{StatusCode, UnmergedStatus, UnmergedStatusCode}, }; use gpui::{TestAppContext, UpdateGlobal, VisualTestContext}; + use indoc::indoc; use project::FakeFs; use serde_json::json; use settings::SettingsStore; @@ -5731,4 +5881,64 @@ mod tests { ); } } + + #[test] + fn test_compress_diff_no_truncation() { + let diff = indoc! {" + --- a/file.txt + +++ b/file.txt + @@ -1,2 +1,2 @@ + -old + +new + "}; + let result = GitPanel::compress_commit_diff(diff, 1000); + assert_eq!(result, diff); + } + + #[test] + fn test_compress_diff_truncate_long_lines() { + let long_line = "a".repeat(300); + let diff = indoc::formatdoc! {" + --- a/file.txt + +++ b/file.txt + @@ -1,2 +1,3 @@ + context + +{} + more context + ", long_line}; + let result = GitPanel::compress_commit_diff(&diff, 100); + assert!(result.contains("...[truncated]")); + assert!(result.len() < diff.len()); + } + + #[test] + fn test_compress_diff_truncate_hunks() { + let diff = indoc! {" + --- a/file.txt + +++ b/file.txt + @@ -1,2 +1,2 @@ + context + -old1 + +new1 + @@ -5,2 +5,2 @@ + context 2 + -old2 + +new2 + @@ -10,2 +10,2 @@ + context 3 + -old3 + +new3 + "}; + let result = GitPanel::compress_commit_diff(diff, 100); + let expected = indoc! {" + --- a/file.txt + +++ b/file.txt + @@ -1,2 +1,2 @@ + context + -old1 + +new1 + [...skipped 2 hunks...] + "}; + assert_eq!(result, expected); + } } From dd5482a899c2228dd6bc1278faf99dc5c8da2f66 Mon Sep 17 00:00:00 2001 From: Arun Chapagain <99020948+ArunChapagain@users.noreply.github.com> Date: Thu, 20 Nov 2025 18:18:13 +0545 Subject: [PATCH 0247/1030] docs: Update developing extension docs for updating specific submodule (#42548) Release Notes: - N/A --- docs/src/extensions/developing-extensions.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/src/extensions/developing-extensions.md b/docs/src/extensions/developing-extensions.md index e86533c0c0c357147d291894167b149d99af5d1d..539cbe3d3044afe30b6ced4f3ceb61f537ebde75 100644 --- a/docs/src/extensions/developing-extensions.md +++ b/docs/src/extensions/developing-extensions.md @@ -109,6 +109,18 @@ git submodule init git submodule update ``` +## Update Your Extension + +When developing/updating your extension, you will likely need to update its content from its submodule in the extensions repository. +To quickly fetch the latest code for only specific extension (and avoid updating all others), use the specific path: + +```sh +# From the root of the repository: +git submodule update --remote extensions/your-extension-name +``` + +> Note: If you need to update all submodules (e.g., if multiple extensions have changed, or for a full clean build), you can run `git submodule update` without a path, but this will take longer. + ## Extension License Requirements As of October 1st, 2025, extension repositories must include a license. From 61f512af032f73fcc52dfdc18d9e1b31934b8e15 Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Thu, 20 Nov 2025 13:48:39 +0100 Subject: [PATCH 0248/1030] Move protobuf action to default linux runner (#43085) Release Notes: - N/A --------- Co-authored-by: Conrad Irwin --- .github/workflows/run_tests.yml | 5 ++++- tooling/xtask/src/tasks/workflows/run_tests.rs | 4 +++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 883be0c1905507639664e6d1f35b4c7e48d0928e..12ca53431994ee2090c9f7d83ee19ac92aa313be 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -493,7 +493,10 @@ jobs: needs: - orchestrate if: needs.orchestrate.outputs.run_tests == 'true' - runs-on: self-mini-macos + runs-on: namespace-profile-16x32-ubuntu-2204 + env: + GIT_AUTHOR_NAME: Protobuf Action + GIT_AUTHOR_EMAIL: ci@zed.dev steps: - name: steps::checkout_repo uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 diff --git a/tooling/xtask/src/tasks/workflows/run_tests.rs b/tooling/xtask/src/tasks/workflows/run_tests.rs index 31ebddc8b3dfff2bd93de7be4bce39cbb0a6195c..5894f92df88d8154ddd0eeb49b5cb4825931879a 100644 --- a/tooling/xtask/src/tasks/workflows/run_tests.rs +++ b/tooling/xtask/src/tasks/workflows/run_tests.rs @@ -363,7 +363,9 @@ pub(crate) fn check_postgres_and_protobuf_migrations() -> NamedJob { named::job( release_job(&[]) - .runs_on(runners::MAC_DEFAULT) + .runs_on(runners::LINUX_DEFAULT) + .add_env(("GIT_AUTHOR_NAME", "Protobuf Action")) + .add_env(("GIT_AUTHOR_EMAIL", "ci@zed.dev")) .add_step(steps::checkout_repo().with(("fetch-depth", 0))) // fetch full history .add_step(remove_untracked_files()) .add_step(ensure_fresh_merge()) From e033829ef27a704e6704d51f4915b48767545c63 Mon Sep 17 00:00:00 2001 From: Vinh Tran Date: Thu, 20 Nov 2025 21:52:15 +0700 Subject: [PATCH 0249/1030] Fix diff highlights (#38384) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Per https://github.com/zed-industries/zed/discussions/23371#discussioncomment-13533635, the issue is not new and I don't know how to solve the problem more holistically yet. All of the native themes don't have spec for `@diff.plus` and `@diff.minus` leaving addition and deletion not being highlighted. For diff file, the most valuable highlighting comes from exactly what we're missing. Hence, I think this is worth fixing. Perhaps, the ideal fix would be standardizing and documenting captures such as `@diff.plus` and `@diff.minus` on https://zed.dev/docs/extensions/languages#syntax-highlighting for theme writers to adopt. But the existing list of captures seems to be language-agnostic so I'm not sure if that's the best way forward. Per https://github.com/the-mikedavis/tree-sitter-diff/pull/18#issuecomment-2569785346, `tree-sitter-diff`'s author prefers using `@keyword` and `@string` so that `tree-sitter highlight` can work out of the box. So it seems to be an ok choice for Zed. Another approach is just adding `@diff.plus` and `@diff.minus` to the native themes. Let me know if I should pursue this instead. Before Screenshot 2025-09-18 at 11 16 14 AM After Screenshot 2025-09-18 at 12 11
15 PM Screenshot 2025-09-18 at 12 12 14 PM Closes https://github.com/zed-industries/extensions/issues/490 Release Notes: - Fixed highlighting for addition and deletion for diff language --------- Co-authored-by: MrSubidubi --- crates/languages/src/diff/highlights.scm | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/languages/src/diff/highlights.scm b/crates/languages/src/diff/highlights.scm index 70ec01aa1165b6de0a23040ff3f71a32ba9fc314..4a344389032b9ff12f7c00e42adffb00721737e1 100644 --- a/crates/languages/src/diff/highlights.scm +++ b/crates/languages/src/diff/highlights.scm @@ -3,12 +3,14 @@ [ (addition) (new_file) -] @diff.plus +] @string +;; TODO: This should eventually be `@diff.plus` with a fallback of `@string` [ (deletion) (old_file) -] @diff.minus +] @keyword +;; TODO: This should eventually be `@diff.minus` with a fallback of `@keyword` (commit) @constant @@ -18,8 +20,6 @@ "diff" @function (argument) @variable.parameter) -(filename) @string.special.path - (mode) @number ([ From 56401fc99cddbf02f9c1e9ca5f9f849f909b5f83 Mon Sep 17 00:00:00 2001 From: Anthony Eid <56899983+Anthony-Eid@users.noreply.github.com> Date: Thu, 20 Nov 2025 10:12:59 -0500 Subject: [PATCH 0250/1030] debugger: Allow users to include PickProcessId in debug tasks and resolve Pid (#42913) Closes #33286 This PR adds support for Zed's `$ZED_PICK_PID` command in debug configurations, which allows users to select a process to attach to at debug time. When this variable is present in a debug configuration, Zed automatically opens a process picker modal. Follow up for this will be integrating this variable in the task system instead of just the debug configuration system. Release Notes: - Added `$ZED_PICK_PID` variable for debug configurations, allowing users to select which process to attach the debugger to at runtime --------- Co-authored-by: Remco Smits --- crates/debugger_ui/src/attach_modal.rs | 169 +++++++++++-------- crates/debugger_ui/src/new_process_modal.rs | 27 ++- crates/debugger_ui/src/session/running.rs | 73 +++++++- crates/debugger_ui/src/tests/attach_modal.rs | 160 ++++++++++++++++-- crates/task/src/task.rs | 25 ++- crates/task/src/vscode_debug_format.rs | 43 ++++- 6 files changed, 410 insertions(+), 87 deletions(-) diff --git a/crates/debugger_ui/src/attach_modal.rs b/crates/debugger_ui/src/attach_modal.rs index e39a842f63590375898c9870c345574e1932a788..64146169f53cfe44c3bdcb59b93e78d0f9223abd 100644 --- a/crates/debugger_ui/src/attach_modal.rs +++ b/crates/debugger_ui/src/attach_modal.rs @@ -1,4 +1,5 @@ use dap::{DapRegistry, DebugRequest}; +use futures::channel::oneshot; use fuzzy::{StringMatch, StringMatchCandidate}; use gpui::{AppContext, DismissEvent, Entity, EventEmitter, Focusable, Render, Task}; use gpui::{Subscription, WeakEntity}; @@ -9,6 +10,7 @@ use task::ZedDebugConfig; use util::debug_panic; use std::sync::Arc; + use sysinfo::{ProcessRefreshKind, RefreshKind, System, UpdateKind}; use ui::{Context, Tooltip, prelude::*}; use ui::{ListItem, ListItemSpacing}; @@ -23,11 +25,16 @@ pub(super) struct Candidate { pub(super) command: Vec, } +pub(crate) enum ModalIntent { + ResolveProcessId(Option>>), + AttachToProcess(ZedDebugConfig), +} + pub(crate) struct AttachModalDelegate { selected_index: usize, matches: Vec, placeholder_text: Arc, - pub(crate) definition: ZedDebugConfig, + pub(crate) intent: ModalIntent, workspace: WeakEntity, candidates: Arc<[Candidate]>, } @@ -35,13 +42,13 @@ pub(crate) struct AttachModalDelegate { impl AttachModalDelegate { fn new( workspace: WeakEntity, - definition: ZedDebugConfig, + intent: ModalIntent, candidates: Arc<[Candidate]>, ) -> Self { Self { workspace, - definition, candidates, + intent, selected_index: 0, matches: Vec::default(), placeholder_text: Arc::from("Select the process you want to attach the debugger to"), @@ -55,8 +62,8 @@ pub struct AttachModal { } impl AttachModal { - pub fn new( - definition: ZedDebugConfig, + pub(crate) fn new( + intent: ModalIntent, workspace: WeakEntity, project: Entity, modal: bool, @@ -65,7 +72,7 @@ impl AttachModal { ) -> Self { let processes_task = get_processes_for_project(&project, cx); - let modal = Self::with_processes(workspace, definition, Arc::new([]), modal, window, cx); + let modal = Self::with_processes(workspace, Arc::new([]), modal, intent, window, cx); cx.spawn_in(window, async move |this, cx| { let processes = processes_task.await; @@ -84,15 +91,15 @@ impl AttachModal { pub(super) fn with_processes( workspace: WeakEntity, - definition: ZedDebugConfig, processes: Arc<[Candidate]>, modal: bool, + intent: ModalIntent, window: &mut Window, cx: &mut Context, ) -> Self { let picker = cx.new(|cx| { Picker::uniform_list( - AttachModalDelegate::new(workspace, definition, processes), + AttachModalDelegate::new(workspace, intent, processes), window, cx, ) @@ -207,7 +214,7 @@ impl PickerDelegate for AttachModalDelegate { }) } - fn confirm(&mut self, secondary: bool, window: &mut Window, cx: &mut Context>) { + fn confirm(&mut self, _secondary: bool, window: &mut Window, cx: &mut Context>) { let candidate = self .matches .get(self.selected_index()) @@ -216,69 +223,86 @@ impl PickerDelegate for AttachModalDelegate { self.candidates.get(ix) }); - let Some(candidate) = candidate else { - return cx.emit(DismissEvent); - }; + match &mut self.intent { + ModalIntent::ResolveProcessId(sender) => { + cx.emit(DismissEvent); - match &mut self.definition.request { - DebugRequest::Attach(config) => { - config.process_id = Some(candidate.pid); - } - DebugRequest::Launch(_) => { - debug_panic!("Debugger attach modal used on launch debug config"); - return; + if let Some(sender) = sender.take() { + sender + .send(candidate.map(|candidate| candidate.pid as i32)) + .ok(); + } } - } - - let workspace = self.workspace.clone(); - let Some(panel) = workspace - .update(cx, |workspace, cx| workspace.panel::(cx)) - .ok() - .flatten() - else { - return; - }; - - if secondary { - // let Some(id) = worktree_id else { return }; - // cx.spawn_in(window, async move |_, cx| { - // panel - // .update_in(cx, |debug_panel, window, cx| { - // debug_panel.save_scenario(&debug_scenario, id, window, cx) - // })? - // .await?; - // anyhow::Ok(()) - // }) - // .detach_and_log_err(cx); - } - let Some(adapter) = cx.read_global::(|registry, _| { - registry.adapter(&self.definition.adapter) - }) else { - return; - }; - - let definition = self.definition.clone(); - cx.spawn_in(window, async move |this, cx| { - let Ok(scenario) = adapter.config_from_zed_format(definition).await else { - return; - }; + ModalIntent::AttachToProcess(definition) => { + let Some(candidate) = candidate else { + return cx.emit(DismissEvent); + }; + + match &mut definition.request { + DebugRequest::Attach(config) => { + config.process_id = Some(candidate.pid); + } + DebugRequest::Launch(_) => { + debug_panic!("Debugger attach modal used on launch debug config"); + return; + } + } - panel - .update_in(cx, |panel, window, cx| { - panel.start_session(scenario, Default::default(), None, None, window, cx); + let workspace = self.workspace.clone(); + let Some(panel) = workspace + .update(cx, |workspace, cx| workspace.panel::(cx)) + .ok() + .flatten() + else { + return; + }; + + let Some(adapter) = cx.read_global::(|registry, _| { + registry.adapter(&definition.adapter) + }) else { + return; + }; + + let definition = definition.clone(); + cx.spawn_in(window, async move |this, cx| { + let Ok(scenario) = adapter.config_from_zed_format(definition).await else { + return; + }; + + panel + .update_in(cx, |panel, window, cx| { + panel.start_session( + scenario, + Default::default(), + None, + None, + window, + cx, + ); + }) + .ok(); + this.update(cx, |_, cx| { + cx.emit(DismissEvent); + }) + .ok(); }) - .ok(); - this.update(cx, |_, cx| { - cx.emit(DismissEvent); - }) - .ok(); - }) - .detach(); + .detach(); + } + } } fn dismissed(&mut self, _window: &mut Window, cx: &mut Context>) { self.selected_index = 0; + match &mut self.intent { + ModalIntent::ResolveProcessId(sender) => { + if let Some(sender) = sender.take() { + sender.send(None).ok(); + } + } + ModalIntent::AttachToProcess(_) => {} + } + cx.emit(DismissEvent); } @@ -338,7 +362,7 @@ fn get_processes_for_project(project: &Entity, cx: &mut App) -> Task, cx: &mut App) -> Task) -> Vec { +#[cfg(test)] +pub(crate) fn set_candidates( + modal: &AttachModal, + candidates: Arc<[Candidate]>, + window: &mut Window, + cx: &mut Context, +) { + modal.picker.update(cx, |picker, cx| { + picker.delegate.candidates = candidates; + picker.refresh(window, cx); + }); +} + +#[cfg(test)] +pub(crate) fn process_names(modal: &AttachModal, cx: &mut Context) -> Vec { modal.picker.read_with(cx, |picker, _| { picker .delegate diff --git a/crates/debugger_ui/src/new_process_modal.rs b/crates/debugger_ui/src/new_process_modal.rs index 174b8759b418946e1426d6194351a8212888c1d6..c343110b47527adc0f8d4e3e097a5f769b80682c 100644 --- a/crates/debugger_ui/src/new_process_modal.rs +++ b/crates/debugger_ui/src/new_process_modal.rs @@ -29,10 +29,13 @@ use ui::{ KeyBinding, ListItem, ListItemSpacing, ToggleButtonGroup, ToggleButtonSimple, ToggleState, Tooltip, prelude::*, }; -use util::{ResultExt, rel_path::RelPath, shell::ShellKind}; +use util::{ResultExt, debug_panic, rel_path::RelPath, shell::ShellKind}; use workspace::{ModalView, Workspace, notifications::DetachAndPromptErr, pane}; -use crate::{attach_modal::AttachModal, debugger_panel::DebugPanel}; +use crate::{ + attach_modal::{AttachModal, ModalIntent}, + debugger_panel::DebugPanel, +}; pub(super) struct NewProcessModal { workspace: WeakEntity, @@ -395,8 +398,15 @@ impl NewProcessModal { this.attach_picker.update(cx, |this, cx| { this.picker.update(cx, |this, cx| { - this.delegate.definition.adapter = adapter.0.clone(); - this.focus(window, cx); + match &mut this.delegate.intent { + ModalIntent::AttachToProcess(definition) => { + definition.adapter = adapter.0.clone(); + this.focus(window, cx); + }, + ModalIntent::ResolveProcessId(_) => { + debug_panic!("Attach picker attempted to update config when in resolve Process ID mode"); + } + } }) }); } @@ -942,7 +952,14 @@ impl AttachMode { stop_on_entry: Some(false), }; let attach_picker = cx.new(|cx| { - let modal = AttachModal::new(definition.clone(), workspace, project, false, window, cx); + let modal = AttachModal::new( + ModalIntent::AttachToProcess(definition.clone()), + workspace, + project, + false, + window, + cx, + ); window.focus(&modal.focus_handle(cx)); modal diff --git a/crates/debugger_ui/src/session/running.rs b/crates/debugger_ui/src/session/running.rs index 0e21ef1268412418c381fc14617a917f9529834d..b82f839edee82f884c1419d44a2344c39c8abd0d 100644 --- a/crates/debugger_ui/src/session/running.rs +++ b/crates/debugger_ui/src/session/running.rs @@ -5,16 +5,23 @@ pub(crate) mod memory_view; pub(crate) mod module_list; pub mod stack_frame_list; pub mod variable_list; -use std::{any::Any, ops::ControlFlow, path::PathBuf, sync::Arc, time::Duration}; +use std::{ + any::Any, + ops::ControlFlow, + path::PathBuf, + sync::{Arc, LazyLock}, + time::Duration, +}; use crate::{ ToggleExpandItem, + attach_modal::{AttachModal, ModalIntent}, new_process_modal::resolve_path, persistence::{self, DebuggerPaneItem, SerializedLayout}, session::running::memory_view::MemoryView, }; -use anyhow::{Context as _, Result, anyhow}; +use anyhow::{Context as _, Result, anyhow, bail}; use breakpoint_list::BreakpointList; use collections::{HashMap, IndexMap}; use console::Console; @@ -56,6 +63,9 @@ use workspace::{ Workspace, item::TabContentParams, move_item, pane::Event, }; +static PROCESS_ID_PLACEHOLDER: LazyLock = + LazyLock::new(|| task::VariableName::PickProcessId.template_value()); + pub struct RunningState { session: Entity, thread_id: Option, @@ -653,6 +663,40 @@ impl RunningState { } } + pub(crate) fn contains_substring(config: &serde_json::Value, substring: &str) -> bool { + match config { + serde_json::Value::Object(obj) => obj + .values() + .any(|value| Self::contains_substring(value, substring)), + serde_json::Value::Array(array) => array + .iter() + .any(|value| Self::contains_substring(value, substring)), + serde_json::Value::String(s) => s.contains(substring), + _ => false, + } + } + + pub(crate) fn substitute_process_id_in_config(config: &mut serde_json::Value, process_id: i32) { + match config { + serde_json::Value::Object(obj) => { + obj.values_mut().for_each(|value| { + Self::substitute_process_id_in_config(value, process_id); + }); + } + serde_json::Value::Array(array) => { + array.iter_mut().for_each(|value| { + Self::substitute_process_id_in_config(value, process_id); + }); + } + serde_json::Value::String(s) => { + if s.contains(PROCESS_ID_PLACEHOLDER.as_str()) { + *s = s.replace(PROCESS_ID_PLACEHOLDER.as_str(), &process_id.to_string()); + } + } + _ => {} + } + } + pub(crate) fn relativize_paths( key: Option<&str>, config: &mut serde_json::Value, @@ -955,6 +999,31 @@ impl RunningState { Self::relativize_paths(None, &mut config, &task_context); Self::substitute_variables_in_config(&mut config, &task_context); + if Self::contains_substring(&config, PROCESS_ID_PLACEHOLDER.as_str()) || label.as_ref().contains(PROCESS_ID_PLACEHOLDER.as_str()) { + let (tx, rx) = futures::channel::oneshot::channel::>(); + + let weak_workspace_clone = weak_workspace.clone(); + weak_workspace.update_in(cx, |workspace, window, cx| { + let project = workspace.project().clone(); + workspace.toggle_modal(window, cx, |window, cx| { + AttachModal::new( + ModalIntent::ResolveProcessId(Some(tx)), + weak_workspace_clone, + project, + true, + window, + cx, + ) + }); + }).ok(); + + let Some(process_id) = rx.await.ok().flatten() else { + bail!("No process selected with config that contains {}", PROCESS_ID_PLACEHOLDER.as_str()) + }; + + Self::substitute_process_id_in_config(&mut config, process_id); + } + let request_type = match dap_registry .adapter(&adapter) .with_context(|| format!("{}: is not a valid adapter name", &adapter)) { diff --git a/crates/debugger_ui/src/tests/attach_modal.rs b/crates/debugger_ui/src/tests/attach_modal.rs index 80e2b73d5a100bbd21462f0ad80def1997e184de..4df3ebf5196dea266287041e51dd65363d5f685c 100644 --- a/crates/debugger_ui/src/tests/attach_modal.rs +++ b/crates/debugger_ui/src/tests/attach_modal.rs @@ -1,4 +1,8 @@ -use crate::{attach_modal::Candidate, tests::start_debug_session_with, *}; +use crate::{ + attach_modal::{Candidate, ModalIntent}, + tests::start_debug_session_with, + *, +}; use attach_modal::AttachModal; use dap::{FakeAdapter, adapters::DebugTaskDefinition}; use gpui::{BackgroundExecutor, TestAppContext, VisualTestContext}; @@ -98,12 +102,6 @@ async fn test_show_attach_modal_and_select_process( workspace.toggle_modal(window, cx, |window, cx| { AttachModal::with_processes( workspace_handle, - task::ZedDebugConfig { - adapter: FakeAdapter::ADAPTER_NAME.into(), - request: dap::DebugRequest::Attach(AttachRequest::default()), - label: "attach example".into(), - stop_on_entry: None, - }, vec![ Candidate { pid: 0, @@ -124,6 +122,12 @@ async fn test_show_attach_modal_and_select_process( .into_iter() .collect(), true, + ModalIntent::AttachToProcess(task::ZedDebugConfig { + adapter: FakeAdapter::ADAPTER_NAME.into(), + request: dap::DebugRequest::Attach(AttachRequest::default()), + label: "attach example".into(), + stop_on_entry: None, + }), window, cx, ) @@ -138,8 +142,7 @@ async fn test_show_attach_modal_and_select_process( // assert we got the expected processes workspace .update(cx, |_, window, cx| { - let names = - attach_modal.update(cx, |modal, cx| attach_modal::_process_names(modal, cx)); + let names = attach_modal.update(cx, |modal, cx| attach_modal::process_names(modal, cx)); // Initially all processes are visible. assert_eq!(3, names.len()); attach_modal.update(cx, |this, cx| { @@ -153,8 +156,7 @@ async fn test_show_attach_modal_and_select_process( // assert we got the expected processes workspace .update(cx, |_, _, cx| { - let names = - attach_modal.update(cx, |modal, cx| attach_modal::_process_names(modal, cx)); + let names = attach_modal.update(cx, |modal, cx| attach_modal::process_names(modal, cx)); // Initially all processes are visible. assert_eq!(2, names.len()); }) @@ -171,3 +173,139 @@ async fn test_show_attach_modal_and_select_process( }) .unwrap(); } + +#[gpui::test] +async fn test_attach_with_pick_pid_variable(executor: BackgroundExecutor, cx: &mut TestAppContext) { + init_test(cx); + + let fs = FakeFs::new(executor.clone()); + + fs.insert_tree( + path!("/project"), + json!({ + "main.rs": "First line\nSecond line\nThird line\nFourth line", + }), + ) + .await; + + let project = Project::test(fs, [path!("/project").as_ref()], cx).await; + let workspace = init_test_workspace(&project, cx).await; + let cx = &mut VisualTestContext::from_window(*workspace, cx); + + let _initialize_subscription = + project::debugger::test::intercept_debug_sessions(cx, |client| { + client.on_request::(move |_, args| { + let raw = &args.raw; + assert_eq!(raw["request"], "attach"); + assert_eq!( + raw["process_id"], "42", + "verify process id has been replaced" + ); + + Ok(()) + }); + }); + + let pick_pid_placeholder = task::VariableName::PickProcessId.template_value(); + workspace + .update(cx, |workspace, window, cx| { + workspace.start_debug_session( + DebugTaskDefinition { + adapter: FakeAdapter::ADAPTER_NAME.into(), + label: "attach with picker".into(), + config: json!({ + "request": "attach", + "process_id": pick_pid_placeholder, + }), + tcp_connection: None, + } + .to_scenario(), + task::TaskContext::default(), + None, + None, + window, + cx, + ) + }) + .unwrap(); + + cx.run_until_parked(); + + let attach_modal = workspace + .update(cx, |workspace, _window, cx| { + workspace.active_modal::(cx) + }) + .unwrap(); + + assert!( + attach_modal.is_some(), + "Attach modal should open when config contains ZED_PICK_PID" + ); + + let attach_modal = attach_modal.unwrap(); + + workspace + .update(cx, |_, window, cx| { + attach_modal.update(cx, |modal, cx| { + attach_modal::set_candidates( + modal, + vec![ + Candidate { + pid: 10, + name: "process-1".into(), + command: vec![], + }, + Candidate { + pid: 42, + name: "target-process".into(), + command: vec![], + }, + Candidate { + pid: 99, + name: "process-3".into(), + command: vec![], + }, + ] + .into_iter() + .collect(), + window, + cx, + ) + }) + }) + .unwrap(); + + cx.run_until_parked(); + + workspace + .update(cx, |_, window, cx| { + attach_modal.update(cx, |modal, cx| { + modal.picker.update(cx, |picker, cx| { + picker.set_query("target", window, cx); + }) + }) + }) + .unwrap(); + + cx.run_until_parked(); + + workspace + .update(cx, |_, _, cx| { + let names = attach_modal.update(cx, |modal, cx| attach_modal::process_names(modal, cx)); + assert_eq!(names.len(), 1); + assert_eq!(names[0], " 42 target-process"); + }) + .unwrap(); + + cx.dispatch_action(Confirm); + cx.run_until_parked(); + + workspace + .update(cx, |workspace, _window, cx| { + assert!( + workspace.active_modal::(cx).is_none(), + "Attach modal should be dismissed after selection" + ); + }) + .unwrap(); +} diff --git a/crates/task/src/task.rs b/crates/task/src/task.rs index 280bf5ccdb91271d7ff738654d507573c9d667d4..92d59094190de9924327722bb659058569869269 100644 --- a/crates/task/src/task.rs +++ b/crates/task/src/task.rs @@ -173,6 +173,9 @@ pub enum VariableName { SelectedText, /// The symbol selected by the symbol tagging system, specifically the @run capture in a runnables.scm RunnableSymbol, + /// Open a Picker to select a process ID to use in place + /// Can only be used to debug configurations + PickProcessId, /// Custom variable, provided by the plugin or other external source. /// Will be printed with `CUSTOM_` prefix to avoid potential conflicts with other variables. Custom(Cow<'static, str>), @@ -240,6 +243,7 @@ impl std::fmt::Display for VariableName { Self::Column => write!(f, "{ZED_VARIABLE_NAME_PREFIX}COLUMN"), Self::SelectedText => write!(f, "{ZED_VARIABLE_NAME_PREFIX}SELECTED_TEXT"), Self::RunnableSymbol => write!(f, "{ZED_VARIABLE_NAME_PREFIX}RUNNABLE_SYMBOL"), + Self::PickProcessId => write!(f, "{ZED_VARIABLE_NAME_PREFIX}PICK_PID"), Self::Custom(s) => write!( f, "{ZED_VARIABLE_NAME_PREFIX}{ZED_CUSTOM_VARIABLE_NAME_PREFIX}{s}" @@ -346,15 +350,28 @@ pub fn shell_to_proto(shell: Shell) -> proto::Shell { } type VsCodeEnvVariable = String; +type VsCodeCommand = String; type ZedEnvVariable = String; struct EnvVariableReplacer { variables: HashMap, + commands: HashMap, } impl EnvVariableReplacer { fn new(variables: HashMap) -> Self { - Self { variables } + Self { + variables, + commands: HashMap::default(), + } + } + + fn with_commands( + mut self, + commands: impl IntoIterator, + ) -> Self { + self.commands = commands.into_iter().collect(); + self } fn replace_value(&self, input: serde_json::Value) -> serde_json::Value { @@ -380,7 +397,13 @@ impl EnvVariableReplacer { if left == "env" && !right.is_empty() { let variable_name = &right[1..]; return Some(format!("${{{variable_name}}}")); + } else if left == "command" && !right.is_empty() { + let command_name = &right[1..]; + if let Some(replacement_command) = self.commands.get(command_name) { + return Some(format!("${{{replacement_command}}}")); + } } + let (variable_name, default) = (left, right); let append_previous_default = |ret: &mut String| { if !default.is_empty() { diff --git a/crates/task/src/vscode_debug_format.rs b/crates/task/src/vscode_debug_format.rs index 9b3f2e808b750e60d494c0b92abf78bb8c698227..bef64c8d409ce0b3ee2c062ee97196af7ea22311 100644 --- a/crates/task/src/vscode_debug_format.rs +++ b/crates/task/src/vscode_debug_format.rs @@ -68,7 +68,11 @@ impl TryFrom for DebugTaskFile { VariableName::RelativeFile.to_string(), ), ("file".to_owned(), VariableName::File.to_string()), - ])); + ])) + .with_commands([( + "pickMyProcess".to_owned(), + VariableName::PickProcessId.to_string(), + )]); let templates = file .configurations .into_iter() @@ -96,7 +100,7 @@ fn task_type_to_adapter_name(task_type: &str) -> String { mod tests { use serde_json::json; - use crate::{DebugScenario, DebugTaskFile}; + use crate::{DebugScenario, DebugTaskFile, VariableName}; use super::VsCodeDebugTaskFile; @@ -152,4 +156,39 @@ mod tests { }]) ); } + + #[test] + fn test_command_pickmyprocess_replacement() { + let raw = r#" + { + "version": "0.2.0", + "configurations": [ + { + "name": "Attach to Process", + "request": "attach", + "type": "cppdbg", + "processId": "${command:pickMyProcess}" + } + ] + } + "#; + let parsed: VsCodeDebugTaskFile = + serde_json_lenient::from_str(raw).expect("deserializing launch.json"); + let zed = DebugTaskFile::try_from(parsed).expect("converting to Zed debug templates"); + + let expected_placeholder = format!("${{{}}}", VariableName::PickProcessId); + pretty_assertions::assert_eq!( + zed, + DebugTaskFile(vec![DebugScenario { + label: "Attach to Process".into(), + adapter: "CodeLLDB".into(), + config: json!({ + "request": "attach", + "processId": expected_placeholder, + }), + tcp_connection: None, + build: None + }]) + ); + } } From 73568fc45460c102fd76772e61363e2b4962b472 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Thu, 20 Nov 2025 12:45:26 -0300 Subject: [PATCH 0251/1030] ui: Add `ThreadItem` component (#43167) Release Notes: - N/A --- crates/ui/src/components.rs | 2 + crates/ui/src/components/thread_item.rs | 214 ++++++++++++++++++++++++ crates/zed/src/zed/component_preview.rs | 9 +- 3 files changed, 221 insertions(+), 4 deletions(-) create mode 100644 crates/ui/src/components/thread_item.rs diff --git a/crates/ui/src/components.rs b/crates/ui/src/components.rs index fae444c0ef81d0f7b631769112f4286f8e75ea23..0b02b4315ac04dceed170890f86b336a8d2a27c4 100644 --- a/crates/ui/src/components.rs +++ b/crates/ui/src/components.rs @@ -35,6 +35,7 @@ mod stack; mod sticky_items; mod tab; mod tab_bar; +mod thread_item; mod toggle; mod tooltip; mod tree_view_item; @@ -79,6 +80,7 @@ pub use stack::*; pub use sticky_items::*; pub use tab::*; pub use tab_bar::*; +pub use thread_item::*; pub use toggle::*; pub use tooltip::*; pub use tree_view_item::*; diff --git a/crates/ui/src/components/thread_item.rs b/crates/ui/src/components/thread_item.rs new file mode 100644 index 0000000000000000000000000000000000000000..0cb6a42ad11d16eddd3a2afb3d8a9dc9475b6165 --- /dev/null +++ b/crates/ui/src/components/thread_item.rs @@ -0,0 +1,214 @@ +use crate::{Chip, Indicator, SpinnerLabel, prelude::*}; +use gpui::{ClickEvent, SharedString}; + +#[derive(IntoElement, RegisterComponent)] +pub struct ThreadItem { + id: ElementId, + icon: IconName, + title: SharedString, + timestamp: SharedString, + running: bool, + generation_done: bool, + selected: bool, + has_changes: bool, + worktree: Option, + on_click: Option>, +} + +impl ThreadItem { + pub fn new(id: impl Into, title: impl Into) -> Self { + Self { + id: id.into(), + icon: IconName::ZedAgent, + title: title.into(), + timestamp: "".into(), + running: false, + generation_done: false, + selected: false, + has_changes: false, + worktree: None, + on_click: None, + } + } + + pub fn timestamp(mut self, timestamp: impl Into) -> Self { + self.timestamp = timestamp.into(); + self + } + + pub fn icon(mut self, icon: IconName) -> Self { + self.icon = icon; + self + } + + pub fn running(mut self, running: bool) -> Self { + self.running = running; + self + } + + pub fn generation_done(mut self, generation_done: bool) -> Self { + self.generation_done = generation_done; + self + } + + pub fn selected(mut self, selected: bool) -> Self { + self.selected = selected; + self + } + + pub fn has_changes(mut self, has_changes: bool) -> Self { + self.has_changes = has_changes; + self + } + + pub fn worktree(mut self, worktree: impl Into) -> Self { + self.worktree = Some(worktree.into()); + self + } + + pub fn on_click( + mut self, + handler: impl Fn(&ClickEvent, &mut Window, &mut App) + 'static, + ) -> Self { + self.on_click = Some(Box::new(handler)); + self + } +} + +impl RenderOnce for ThreadItem { + fn render(self, _: &mut Window, cx: &mut App) -> impl IntoElement { + let icon_container = || h_flex().size_4().justify_center(); + let icon = if self.generation_done { + icon_container().child(Indicator::dot().color(Color::Accent)) + } else if self.running { + icon_container().child(SpinnerLabel::new().color(Color::Accent)) + } else { + icon_container().child( + Icon::new(self.icon) + .color(Color::Muted) + .size(IconSize::Small), + ) + }; + + v_flex() + .id(self.id) + .cursor_pointer() + .p_2() + .when(self.selected, |this| { + this.bg(cx.theme().colors().element_active) + }) + .hover(|s| s.bg(cx.theme().colors().element_hover)) + .child( + h_flex() + .w_full() + .gap_1p5() + .child(icon) + .child(Label::new(self.title).truncate()), + ) + .child( + h_flex() + .gap_1p5() + .child(icon_container()) // Icon Spacing + .when_some(self.worktree, |this, name| { + this.child(Chip::new(name).label_size(LabelSize::XSmall)) + }) + .child( + Label::new(self.timestamp) + .size(LabelSize::Small) + .color(Color::Muted), + ) + .child( + Label::new("•") + .size(LabelSize::Small) + .color(Color::Muted) + .alpha(0.5), + ) + .when(!self.has_changes, |this| { + this.child( + Label::new("No Changes") + .size(LabelSize::Small) + .color(Color::Muted), + ) + }), + ) + .when_some(self.on_click, |this, on_click| this.on_click(on_click)) + } +} + +impl Component for ThreadItem { + fn scope() -> ComponentScope { + ComponentScope::Agent + } + + fn preview(_window: &mut Window, cx: &mut App) -> Option { + let container = || { + v_flex() + .w_72() + .border_1() + .border_color(cx.theme().colors().border_variant) + .bg(cx.theme().colors().panel_background) + }; + + let thread_item_examples = vec![ + single_example( + "Default", + container() + .child( + ThreadItem::new("ti-1", "Linking to the Agent Panel Depending on Settings") + .icon(IconName::AiOpenAi) + .timestamp("1:33 AM"), + ) + .into_any_element(), + ), + single_example( + "Generation Done", + container() + .child( + ThreadItem::new("ti-2", "Refine thread view scrolling behavior") + .timestamp("12:12 AM") + .generation_done(true), + ) + .into_any_element(), + ), + single_example( + "Running Agent", + container() + .child( + ThreadItem::new("ti-3", "Add line numbers option to FileEditBlock") + .icon(IconName::AiClaude) + .timestamp("7:30 PM") + .running(true), + ) + .into_any_element(), + ), + single_example( + "In Worktree", + container() + .child( + ThreadItem::new("ti-4", "Add line numbers option to FileEditBlock") + .icon(IconName::AiClaude) + .timestamp("7:37 PM") + .worktree("link-agent-panel"), + ) + .into_any_element(), + ), + single_example( + "Selected Item", + container() + .child( + ThreadItem::new("ti-5", "Refine textarea interaction behavior") + .icon(IconName::AiGemini) + .timestamp("3:00 PM") + .selected(true), + ) + .into_any_element(), + ), + ]; + + Some( + example_group(thread_item_examples) + .vertical() + .into_any_element(), + ) + } +} diff --git a/crates/zed/src/zed/component_preview.rs b/crates/zed/src/zed/component_preview.rs index d62f39ef6306593eba4b5fe6bff427db036e82dc..18279d8ee88821d44166fb5aedebca2e51ae9491 100644 --- a/crates/zed/src/zed/component_preview.rs +++ b/crates/zed/src/zed/component_preview.rs @@ -934,15 +934,16 @@ impl ComponentPreviewPage { fn render_header(&self, _: &Window, cx: &App) -> impl IntoElement { v_flex() - .py_12() - .px_16() + .min_w_0() + .w_full() + .p_12() .gap_6() .bg(cx.theme().colors().surface_background) .border_b_1() .border_color(cx.theme().colors().border) .child( v_flex() - .gap_0p5() + .gap_1() .child( Label::new(self.component.scope().to_string()) .size(LabelSize::Small) @@ -959,7 +960,7 @@ impl ComponentPreviewPage { ), ) .when_some(self.component.description(), |this, description| { - this.child(div().text_sm().child(description)) + this.child(Label::new(description).size(LabelSize::Small)) }) } From ba93a5d62f87f40fa87fb69b45f8c1dda65ad845 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Thu, 20 Nov 2025 12:45:49 -0300 Subject: [PATCH 0252/1030] ui: Remove `Badge` component (#43168) We're not using it anywhere anymore, so I think we can clean it up now. This was a somewhat specific component we did for the sake of Onboarding, but it ended up being removed. Release Notes: - N/A --- crates/ui/src/components.rs | 2 - crates/ui/src/components/badge.rs | 94 ------------------------------- 2 files changed, 96 deletions(-) delete mode 100644 crates/ui/src/components/badge.rs diff --git a/crates/ui/src/components.rs b/crates/ui/src/components.rs index 0b02b4315ac04dceed170890f86b336a8d2a27c4..712a07d3bdddb1d0c2300f6d256fa5634b16e764 100644 --- a/crates/ui/src/components.rs +++ b/crates/ui/src/components.rs @@ -1,5 +1,4 @@ mod avatar; -mod badge; mod banner; mod button; mod callout; @@ -44,7 +43,6 @@ mod tree_view_item; mod stories; pub use avatar::*; -pub use badge::*; pub use banner::*; pub use button::*; pub use callout::*; diff --git a/crates/ui/src/components/badge.rs b/crates/ui/src/components/badge.rs deleted file mode 100644 index 9db6fd616f56769b03d1856cfda3fdeef66e446f..0000000000000000000000000000000000000000 --- a/crates/ui/src/components/badge.rs +++ /dev/null @@ -1,94 +0,0 @@ -use std::rc::Rc; - -use crate::Divider; -use crate::DividerColor; -use crate::Tooltip; -use crate::component_prelude::*; -use crate::prelude::*; -use gpui::AnyView; -use gpui::{AnyElement, IntoElement, SharedString, Window}; - -#[derive(IntoElement, RegisterComponent)] -pub struct Badge { - label: SharedString, - icon: IconName, - tooltip: Option AnyView>>, -} - -impl Badge { - pub fn new(label: impl Into) -> Self { - Self { - label: label.into(), - icon: IconName::Check, - tooltip: None, - } - } - - pub fn icon(mut self, icon: IconName) -> Self { - self.icon = icon; - self - } - - pub fn tooltip(mut self, tooltip: impl Fn(&mut Window, &mut App) -> AnyView + 'static) -> Self { - self.tooltip = Some(Rc::new(tooltip)); - self - } -} - -impl RenderOnce for Badge { - fn render(self, _window: &mut Window, cx: &mut App) -> impl IntoElement { - let tooltip = self.tooltip; - - h_flex() - .id(self.label.clone()) - .h_full() - .gap_1() - .pl_1() - .pr_2() - .border_1() - .border_color(cx.theme().colors().border.opacity(0.6)) - .bg(cx.theme().colors().element_background) - .rounded_sm() - .overflow_hidden() - .child( - Icon::new(self.icon) - .size(IconSize::XSmall) - .color(Color::Muted), - ) - .child(Divider::vertical().color(DividerColor::Border)) - .child(Label::new(self.label.clone()).size(LabelSize::Small).ml_1()) - .when_some(tooltip, |this, tooltip| { - this.hoverable_tooltip(move |window, cx| tooltip(window, cx)) - }) - } -} - -impl Component for Badge { - fn scope() -> ComponentScope { - ComponentScope::DataDisplay - } - - fn description() -> Option<&'static str> { - Some( - "A compact, labeled component with optional icon for displaying status, categories, or metadata.", - ) - } - - fn preview(_window: &mut Window, _cx: &mut App) -> Option { - Some( - v_flex() - .gap_6() - .child(single_example( - "Basic Badge", - Badge::new("Default").into_any_element(), - )) - .child(single_example( - "With Tooltip", - Badge::new("Tooltip") - .tooltip(Tooltip::text("This is a tooltip.")) - .into_any_element(), - )) - .into_any_element(), - ) - } -} From 5ef6402d645b40993e58704a2ba735015cc7f668 Mon Sep 17 00:00:00 2001 From: Dino Date: Thu, 20 Nov 2025 16:25:09 +0000 Subject: [PATCH 0253/1030] editor: Ensure all menus and popups are dismissed (#43169) While investigating a bug report that, in Helix mode, pressing the `escape` key would only hide the signature help popup and not the completions menu, when `auto_signature_help` was enabled, it was noted that the `editor::Editor.dismiss_menus_and_popups` method was not dismissing all possible menus and popups and was, instead, stopping as soon as a single menu or popup was dismissed. From the name of the method it appears that we actually want to dismiss all so this commit updates it as such, ensuring that the bug reported is also fixed. Closes #42499 Release Notes: - Fixed issue with popups and menus not being dismissed while using `auto_signature_help` in Helix Mode --- crates/editor/src/editor.rs | 43 ++++++++++--------------------------- 1 file changed, 11 insertions(+), 32 deletions(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 9401d5a1e2f36f7b0c00c4c3dec3fc90597290e5..5d1c24bcad76333beee8941ee729b9578bb7ad65 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -4122,44 +4122,23 @@ impl Editor { window: &mut Window, cx: &mut Context, ) -> bool { - if self.take_rename(false, window, cx).is_some() { - return true; - } - - if self.hide_blame_popover(true, cx) { - return true; - } - - if hide_hover(self, cx) { - return true; - } - - if self.hide_signature_help(cx, SignatureHelpHiddenBy::Escape) { - return true; - } - - if self.hide_context_menu(window, cx).is_some() { - return true; - } - - if self.mouse_context_menu.take().is_some() { - return true; - } - - if is_user_requested && self.discard_edit_prediction(true, cx) { - return true; - } + let mut dismissed = false; - if self.snippet_stack.pop().is_some() { - return true; - } + dismissed |= self.take_rename(false, window, cx).is_some(); + dismissed |= self.hide_blame_popover(true, cx); + dismissed |= hide_hover(self, cx); + dismissed |= self.hide_signature_help(cx, SignatureHelpHiddenBy::Escape); + dismissed |= self.hide_context_menu(window, cx).is_some(); + dismissed |= self.mouse_context_menu.take().is_some(); + dismissed |= is_user_requested && self.discard_edit_prediction(true, cx); + dismissed |= self.snippet_stack.pop().is_some(); if self.mode.is_full() && matches!(self.active_diagnostics, ActiveDiagnostic::Group(_)) { self.dismiss_diagnostics(cx); - return true; + dismissed = true; } - false + dismissed } fn linked_editing_ranges_for( From a5c3267b3e995cbd0270532683e669bc93b269bf Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Thu, 20 Nov 2025 22:35:24 +0530 Subject: [PATCH 0254/1030] extensions: Add `-` as linked edit character for HTML (#43179) Closes https://github.com/zed-industries/zed/issues/43060 Release Notes: - Fixed issue where typing in custom HTML tag would not complete subsequent end tag for `-` character. Co-authored-by: Kunall Banerjee --- extensions/html/languages/html/config.toml | 3 +++ extensions/html/languages/html/overrides.scm | 5 +++++ 2 files changed, 8 insertions(+) diff --git a/extensions/html/languages/html/config.toml b/extensions/html/languages/html/config.toml index 388949d95caf56803690b5533c871978a3f0d100..fc7d5571981e99fdb8bde68441821f07a1a94889 100644 --- a/extensions/html/languages/html/config.toml +++ b/extensions/html/languages/html/config.toml @@ -14,3 +14,6 @@ brackets = [ ] completion_query_characters = ["-"] prettier_parser_name = "html" + +[overrides.default] +linked_edit_characters = ["-"] diff --git a/extensions/html/languages/html/overrides.scm b/extensions/html/languages/html/overrides.scm index 7108d48fbdcd50d18dbd6bbc6f5f403090664736..434f610e70242be8589a9f58cc7fd4704d5d9296 100644 --- a/extensions/html/languages/html/overrides.scm +++ b/extensions/html/languages/html/overrides.scm @@ -1,2 +1,7 @@ (comment) @comment (quoted_attribute_value) @string + +[ + (start_tag) + (end_tag) +] @default From 2a40dcfd770302bbb8483a21f0e757efcd981911 Mon Sep 17 00:00:00 2001 From: Bennet Bo Fenner Date: Thu, 20 Nov 2025 18:12:00 +0100 Subject: [PATCH 0255/1030] acp: Support specifying settings for extensions (#43177) This allows you to specify default_model and default_mode for ACP extensions, e.g. ``` "auggie": { "default_model": "gpt-5", "default_mode": "default", "type": "extension" }, ``` Release Notes: - Added support for specifying settings for ACP extensions (`default_mode`, `default_model`) --- crates/agent_servers/src/custom.rs | 36 +++-- crates/agent_ui/src/agent_configuration.rs | 2 +- crates/agent_ui/src/agent_panel.rs | 41 +----- crates/agent_ui/src/agent_ui.rs | 18 +-- crates/migrator/src/migrations.rs | 6 + .../src/migrations/m_2025_11_20/settings.rs | 76 ++++++++++ crates/migrator/src/migrator.rs | 65 +++++++++ crates/project/src/agent_server_store.rs | 132 +++++++++++++----- .../remote_server/src/remote_editing_tests.rs | 1 + crates/settings/src/settings_content/agent.rs | 53 ++++--- 10 files changed, 317 insertions(+), 113 deletions(-) create mode 100644 crates/migrator/src/migrations/m_2025_11_20/settings.rs diff --git a/crates/agent_servers/src/custom.rs b/crates/agent_servers/src/custom.rs index b417e2bdf30a7ed6b9e2ab4baa6211cee2a9a890..e7625c2cc06095c9a24a2537e4e83bced26d73f3 100644 --- a/crates/agent_servers/src/custom.rs +++ b/crates/agent_servers/src/custom.rs @@ -44,19 +44,27 @@ impl crate::AgentServer for CustomAgentServer { settings .as_ref() - .and_then(|s| s.default_mode.clone().map(|m| acp::SessionModeId(m.into()))) + .and_then(|s| s.default_mode().map(|m| acp::SessionModeId(m.into()))) } fn set_default_mode(&self, mode_id: Option, fs: Arc, cx: &mut App) { let name = self.name(); update_settings_file(fs, cx, move |settings, _| { - if let Some(settings) = settings + let settings = settings .agent_servers .get_or_insert_default() .custom - .get_mut(&name) - { - settings.default_mode = mode_id.map(|m| m.to_string()) + .entry(name.clone()) + .or_insert_with(|| settings::CustomAgentServerSettings::Extension { + default_model: None, + default_mode: None, + }); + + match settings { + settings::CustomAgentServerSettings::Custom { default_mode, .. } + | settings::CustomAgentServerSettings::Extension { default_mode, .. } => { + *default_mode = mode_id.map(|m| m.to_string()); + } } }); } @@ -72,19 +80,27 @@ impl crate::AgentServer for CustomAgentServer { settings .as_ref() - .and_then(|s| s.default_model.clone().map(|m| acp::ModelId(m.into()))) + .and_then(|s| s.default_model().map(|m| acp::ModelId(m.into()))) } fn set_default_model(&self, model_id: Option, fs: Arc, cx: &mut App) { let name = self.name(); update_settings_file(fs, cx, move |settings, _| { - if let Some(settings) = settings + let settings = settings .agent_servers .get_or_insert_default() .custom - .get_mut(&name) - { - settings.default_model = model_id.map(|m| m.to_string()) + .entry(name.clone()) + .or_insert_with(|| settings::CustomAgentServerSettings::Extension { + default_model: None, + default_mode: None, + }); + + match settings { + settings::CustomAgentServerSettings::Custom { default_model, .. } + | settings::CustomAgentServerSettings::Extension { default_model, .. } => { + *default_model = model_id.map(|m| m.to_string()); + } } }); } diff --git a/crates/agent_ui/src/agent_configuration.rs b/crates/agent_ui/src/agent_configuration.rs index 45ba29a595b59f4a1c329d46e43030a1b9c7ed14..ef6b90ad89e2e038e96d8864d4c2ce0ecf333d6e 100644 --- a/crates/agent_ui/src/agent_configuration.rs +++ b/crates/agent_ui/src/agent_configuration.rs @@ -1343,7 +1343,7 @@ async fn open_new_agent_servers_entry_in_settings_editor( .custom .insert( server_name, - settings::CustomAgentServerSettings { + settings::CustomAgentServerSettings::Custom { path: "path_to_executable".into(), args: vec![], env: Some(HashMap::default()), diff --git a/crates/agent_ui/src/agent_panel.rs b/crates/agent_ui/src/agent_panel.rs index dfc4d27e1153c61dc07c00a807c958f69db77b5a..3cbedfbe198cf826a2e82e1f42f1a0d794da49e6 100644 --- a/crates/agent_ui/src/agent_panel.rs +++ b/crates/agent_ui/src/agent_panel.rs @@ -8,9 +8,7 @@ use agent::{ContextServerRegistry, DbThreadMetadata, HistoryEntry, HistoryStore} use db::kvp::{Dismissable, KEY_VALUE_STORE}; use project::{ ExternalAgentServerName, - agent_server_store::{ - AgentServerCommand, AllAgentServersSettings, CLAUDE_CODE_NAME, CODEX_NAME, GEMINI_NAME, - }, + agent_server_store::{CLAUDE_CODE_NAME, CODEX_NAME, GEMINI_NAME}, }; use serde::{Deserialize, Serialize}; use settings::{ @@ -35,9 +33,7 @@ use crate::{ ExpandMessageEditor, acp::{AcpThreadHistory, ThreadHistoryEvent}, }; -use crate::{ - ExternalAgent, NewExternalAgentThread, NewNativeAgentThreadFromSummary, placeholder_command, -}; +use crate::{ExternalAgent, NewExternalAgentThread, NewNativeAgentThreadFromSummary}; use crate::{ManageProfiles, context_store::ContextStore}; use agent_settings::AgentSettings; use ai_onboarding::AgentPanelOnboarding; @@ -61,7 +57,7 @@ use project::{Project, ProjectPath, Worktree}; use prompt_store::{PromptBuilder, PromptStore, UserPromptId}; use rules_library::{RulesLibrary, open_rules_library}; use search::{BufferSearchBar, buffer_search}; -use settings::{Settings, SettingsStore, update_settings_file}; +use settings::{Settings, update_settings_file}; use theme::ThemeSettings; use ui::utils::WithRemSize; use ui::{ @@ -248,7 +244,6 @@ pub enum AgentType { Codex, Custom { name: SharedString, - command: AgentServerCommand, }, } @@ -280,7 +275,7 @@ impl From for AgentType { ExternalAgent::Gemini => Self::Gemini, ExternalAgent::ClaudeCode => Self::ClaudeCode, ExternalAgent::Codex => Self::Codex, - ExternalAgent::Custom { name, command } => Self::Custom { name, command }, + ExternalAgent::Custom { name } => Self::Custom { name }, ExternalAgent::NativeAgent => Self::NativeAgent, } } @@ -1459,8 +1454,8 @@ impl AgentPanel { self.serialize(cx); self.external_thread(Some(crate::ExternalAgent::Codex), None, None, window, cx) } - AgentType::Custom { name, command } => self.external_thread( - Some(crate::ExternalAgent::Custom { name, command }), + AgentType::Custom { name } => self.external_thread( + Some(crate::ExternalAgent::Custom { name }), None, None, window, @@ -2085,22 +2080,11 @@ impl AgentPanel { .cloned() .collect::>(); - let custom_settings = cx - .global::() - .get::(None) - .custom - .clone(); - for agent_name in agent_names { let icon_path = agent_server_store.agent_icon(&agent_name); let mut entry = ContextMenuEntry::new(agent_name.clone()); - let command = custom_settings - .get(&agent_name.0) - .map(|settings| settings.command.clone()) - .unwrap_or(placeholder_command()); - if let Some(icon_path) = icon_path { entry = entry.custom_icon_svg(icon_path); } else { @@ -2110,7 +2094,6 @@ impl AgentPanel { .when( is_agent_selected(AgentType::Custom { name: agent_name.0.clone(), - command: command.clone(), }), |this| { this.action(Box::new(NewExternalAgentThread { agent: None })) @@ -2121,7 +2104,6 @@ impl AgentPanel { .handler({ let workspace = workspace.clone(); let agent_name = agent_name.clone(); - let custom_settings = custom_settings.clone(); move |window, cx| { if let Some(workspace) = workspace.upgrade() { workspace.update(cx, |workspace, cx| { @@ -2134,17 +2116,6 @@ impl AgentPanel { name: agent_name .clone() .into(), - command: custom_settings - .get(&agent_name.0) - .map(|settings| { - settings - .command - .clone() - }) - .unwrap_or( - placeholder_command( - ), - ), }, window, cx, diff --git a/crates/agent_ui/src/agent_ui.rs b/crates/agent_ui/src/agent_ui.rs index 6396b68cbc5f805466618bd460f9ed46ce05d086..e06364988c1b49ab8877e40571393e02c252b47b 100644 --- a/crates/agent_ui/src/agent_ui.rs +++ b/crates/agent_ui/src/agent_ui.rs @@ -38,7 +38,6 @@ use language_model::{ ConfiguredModel, LanguageModel, LanguageModelId, LanguageModelProviderId, LanguageModelRegistry, }; use project::DisableAiSettings; -use project::agent_server_store::AgentServerCommand; use prompt_store::PromptBuilder; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -162,18 +161,7 @@ pub enum ExternalAgent { ClaudeCode, Codex, NativeAgent, - Custom { - name: SharedString, - command: AgentServerCommand, - }, -} - -fn placeholder_command() -> AgentServerCommand { - AgentServerCommand { - path: "/placeholder".into(), - args: vec![], - env: None, - } + Custom { name: SharedString }, } impl ExternalAgent { @@ -197,9 +185,7 @@ impl ExternalAgent { Self::ClaudeCode => Rc::new(agent_servers::ClaudeCode), Self::Codex => Rc::new(agent_servers::Codex), Self::NativeAgent => Rc::new(agent::NativeAgentServer::new(fs, history)), - Self::Custom { name, command: _ } => { - Rc::new(agent_servers::CustomAgentServer::new(name.clone())) - } + Self::Custom { name } => Rc::new(agent_servers::CustomAgentServer::new(name.clone())), } } } diff --git a/crates/migrator/src/migrations.rs b/crates/migrator/src/migrations.rs index 2587e7a30829d4fa0e0832b91ab0294a86abc97e..8a481c734f9efcce4f6342789df6ff1d7fc01562 100644 --- a/crates/migrator/src/migrations.rs +++ b/crates/migrator/src/migrations.rs @@ -141,3 +141,9 @@ pub(crate) mod m_2025_11_12 { pub(crate) use settings::SETTINGS_PATTERNS; } + +pub(crate) mod m_2025_11_20 { + mod settings; + + pub(crate) use settings::SETTINGS_PATTERNS; +} diff --git a/crates/migrator/src/migrations/m_2025_11_20/settings.rs b/crates/migrator/src/migrations/m_2025_11_20/settings.rs new file mode 100644 index 0000000000000000000000000000000000000000..db56fb04d476a3557b224d362c99ced388dacdf0 --- /dev/null +++ b/crates/migrator/src/migrations/m_2025_11_20/settings.rs @@ -0,0 +1,76 @@ +use std::ops::Range; + +use tree_sitter::{Query, QueryMatch}; + +use crate::MigrationPatterns; + +pub const SETTINGS_PATTERNS: MigrationPatterns = &[( + SETTINGS_AGENT_SERVERS_CUSTOM_PATTERN, + migrate_custom_agent_settings, +)]; + +const SETTINGS_AGENT_SERVERS_CUSTOM_PATTERN: &str = r#"(document + (object + (pair + key: (string (string_content) @agent-servers) + value: (object + (pair + key: (string (string_content) @server-name) + value: (object) @server-settings + ) + ) + ) + ) + (#eq? @agent-servers "agent_servers") +)"#; + +fn migrate_custom_agent_settings( + contents: &str, + mat: &QueryMatch, + query: &Query, +) -> Option<(Range, String)> { + let server_name_index = query.capture_index_for_name("server-name")?; + let server_name = mat.nodes_for_capture_index(server_name_index).next()?; + let server_name_text = &contents[server_name.byte_range()]; + + if matches!(server_name_text, "gemini" | "claude" | "codex") { + return None; + } + + let server_settings_index = query.capture_index_for_name("server-settings")?; + let server_settings = mat.nodes_for_capture_index(server_settings_index).next()?; + + let mut column = None; + + // Parse the server settings to check what keys it contains + let mut cursor = server_settings.walk(); + for child in server_settings.children(&mut cursor) { + if child.kind() == "pair" { + if let Some(key_node) = child.child_by_field_name("key") { + if let (None, Some(quote_content)) = (column, key_node.child(0)) { + column = Some(quote_content.start_position().column); + } + if let Some(string_content) = key_node.child(1) { + let key = &contents[string_content.byte_range()]; + match key { + // If it already has a type key, don't modify it + "type" => return None, + _ => {} + } + } + } + } + } + + // Insert the type key at the beginning of the object + let start = server_settings.start_byte() + 1; + let indent = " ".repeat(column.unwrap_or(12)); + + Some(( + start..start, + format!( + r#" +{indent}"type": "custom","# + ), + )) +} diff --git a/crates/migrator/src/migrator.rs b/crates/migrator/src/migrator.rs index 74b73114cae81b57e5d0dc4227bafcd2cca31d10..fd30bf24982d2625e4f40669aa2e0142b8634186 100644 --- a/crates/migrator/src/migrator.rs +++ b/crates/migrator/src/migrator.rs @@ -219,6 +219,10 @@ pub fn migrate_settings(text: &str) -> Result> { migrations::m_2025_11_12::SETTINGS_PATTERNS, &SETTINGS_QUERY_2025_11_12, ), + MigrationType::TreeSitter( + migrations::m_2025_11_20::SETTINGS_PATTERNS, + &SETTINGS_QUERY_2025_11_20, + ), ]; run_migrations(text, migrations) } @@ -341,6 +345,10 @@ define_query!( SETTINGS_QUERY_2025_11_12, migrations::m_2025_11_12::SETTINGS_PATTERNS ); +define_query!( + SETTINGS_QUERY_2025_11_20, + migrations::m_2025_11_20::SETTINGS_PATTERNS +); // custom query static EDIT_PREDICTION_SETTINGS_MIGRATION_QUERY: LazyLock = LazyLock::new(|| { @@ -1192,6 +1200,63 @@ mod tests { ); } + #[test] + fn test_custom_agent_server_settings_migration() { + assert_migrate_settings_with_migrations( + &[MigrationType::TreeSitter( + migrations::m_2025_11_20::SETTINGS_PATTERNS, + &SETTINGS_QUERY_2025_11_20, + )], + r#"{ + "agent_servers": { + "gemini": { + "default_model": "gemini-1.5-pro" + }, + "claude": {}, + "codex": {}, + "my-custom-agent": { + "command": "/path/to/agent", + "args": ["--foo"], + "default_model": "my-model" + }, + "already-migrated-agent": { + "type": "custom", + "command": "/path/to/agent" + }, + "future-extension-agent": { + "type": "extension", + "default_model": "ext-model" + } + } +}"#, + Some( + r#"{ + "agent_servers": { + "gemini": { + "default_model": "gemini-1.5-pro" + }, + "claude": {}, + "codex": {}, + "my-custom-agent": { + "type": "custom", + "command": "/path/to/agent", + "args": ["--foo"], + "default_model": "my-model" + }, + "already-migrated-agent": { + "type": "custom", + "command": "/path/to/agent" + }, + "future-extension-agent": { + "type": "extension", + "default_model": "ext-model" + } + } +}"#, + ), + ); + } + #[test] fn test_remove_version_fields() { assert_migrate_settings( diff --git a/crates/project/src/agent_server_store.rs b/crates/project/src/agent_server_store.rs index 944eb593185bd5016e397d1417ed834da3ee73ef..d6bd83531eda515e6c2841c65d51619da82e9ae4 100644 --- a/crates/project/src/agent_server_store.rs +++ b/crates/project/src/agent_server_store.rs @@ -469,15 +469,21 @@ impl AgentServerStore { }), ); self.external_agents - .extend(new_settings.custom.iter().map(|(name, settings)| { - ( - ExternalAgentServerName(name.clone()), - Box::new(LocalCustomAgent { - command: settings.command.clone(), - project_environment: project_environment.clone(), - }) as Box, - ) - })); + .extend( + new_settings + .custom + .iter() + .filter_map(|(name, settings)| match settings { + CustomAgentServerSettings::Custom { command, .. } => Some(( + ExternalAgentServerName(name.clone()), + Box::new(LocalCustomAgent { + command: command.clone(), + project_environment: project_environment.clone(), + }) as Box, + )), + CustomAgentServerSettings::Extension { .. } => None, + }), + ); self.external_agents.extend(extension_agents.iter().map( |(agent_name, ext_id, targets, env, icon_path)| { let name = ExternalAgentServerName(agent_name.clone().into()); @@ -1817,32 +1823,88 @@ impl From for BuiltinAgentServerSettings { } #[derive(Clone, JsonSchema, Debug, PartialEq)] -pub struct CustomAgentServerSettings { - pub command: AgentServerCommand, - /// The default mode to use for this agent. - /// - /// Note: Not only all agents support modes. - /// - /// Default: None - pub default_mode: Option, - /// The default model to use for this agent. - /// - /// This should be the model ID as reported by the agent. - /// - /// Default: None - pub default_model: Option, +pub enum CustomAgentServerSettings { + Custom { + command: AgentServerCommand, + /// The default mode to use for this agent. + /// + /// Note: Not only all agents support modes. + /// + /// Default: None + default_mode: Option, + /// The default model to use for this agent. + /// + /// This should be the model ID as reported by the agent. + /// + /// Default: None + default_model: Option, + }, + Extension { + /// The default mode to use for this agent. + /// + /// Note: Not only all agents support modes. + /// + /// Default: None + default_mode: Option, + /// The default model to use for this agent. + /// + /// This should be the model ID as reported by the agent. + /// + /// Default: None + default_model: Option, + }, +} + +impl CustomAgentServerSettings { + pub fn command(&self) -> Option<&AgentServerCommand> { + match self { + CustomAgentServerSettings::Custom { command, .. } => Some(command), + CustomAgentServerSettings::Extension { .. } => None, + } + } + + pub fn default_mode(&self) -> Option<&str> { + match self { + CustomAgentServerSettings::Custom { default_mode, .. } + | CustomAgentServerSettings::Extension { default_mode, .. } => default_mode.as_deref(), + } + } + + pub fn default_model(&self) -> Option<&str> { + match self { + CustomAgentServerSettings::Custom { default_model, .. } + | CustomAgentServerSettings::Extension { default_model, .. } => { + default_model.as_deref() + } + } + } } impl From for CustomAgentServerSettings { fn from(value: settings::CustomAgentServerSettings) -> Self { - CustomAgentServerSettings { - command: AgentServerCommand { - path: PathBuf::from(shellexpand::tilde(&value.path.to_string_lossy()).as_ref()), - args: value.args, - env: value.env, + match value { + settings::CustomAgentServerSettings::Custom { + path, + args, + env, + default_mode, + default_model, + } => CustomAgentServerSettings::Custom { + command: AgentServerCommand { + path: PathBuf::from(shellexpand::tilde(&path.to_string_lossy()).as_ref()), + args, + env, + }, + default_mode, + default_model, + }, + settings::CustomAgentServerSettings::Extension { + default_mode, + default_model, + } => CustomAgentServerSettings::Extension { + default_mode, + default_model, }, - default_mode: value.default_mode, - default_model: value.default_model, } } } @@ -2176,7 +2238,7 @@ mod extension_agent_tests { "Tilde should be expanded for builtin agent path" ); - let settings = settings::CustomAgentServerSettings { + let settings = settings::CustomAgentServerSettings::Custom { path: PathBuf::from("~/custom/agent"), args: vec!["serve".into()], env: None, @@ -2184,10 +2246,14 @@ mod extension_agent_tests { default_model: None, }; - let CustomAgentServerSettings { + let converted: CustomAgentServerSettings = settings.into(); + let CustomAgentServerSettings::Custom { command: AgentServerCommand { path, .. }, .. - } = settings.into(); + } = converted + else { + panic!("Expected Custom variant"); + }; assert!( !path.to_string_lossy().starts_with("~"), diff --git a/crates/remote_server/src/remote_editing_tests.rs b/crates/remote_server/src/remote_editing_tests.rs index 4ff0c57d5571c5fb9e16df18078514e16ea12867..1cb63b8cd01e201c5fb2a212a2643cfdf481642a 100644 --- a/crates/remote_server/src/remote_editing_tests.rs +++ b/crates/remote_server/src/remote_editing_tests.rs @@ -1838,6 +1838,7 @@ async fn test_remote_external_agent_server( &json!({ "agent_servers": { "foo": { + "type": "custom", "command": "foo-cli", "args": ["--flag"], "env": { diff --git a/crates/settings/src/settings_content/agent.rs b/crates/settings/src/settings_content/agent.rs index 59b5a4e0f516387ce6316cd31376bb45c2c5cb94..c6ed2fc4a8980ff56153c6da69c03f3c3b7bf9c7 100644 --- a/crates/settings/src/settings_content/agent.rs +++ b/crates/settings/src/settings_content/agent.rs @@ -342,22 +342,39 @@ pub struct BuiltinAgentServerSettings { #[skip_serializing_none] #[derive(Deserialize, Serialize, Clone, JsonSchema, MergeFrom, Debug, PartialEq)] -pub struct CustomAgentServerSettings { - #[serde(rename = "command")] - pub path: PathBuf, - #[serde(default)] - pub args: Vec, - pub env: Option>, - /// The default mode to use for this agent. - /// - /// Note: Not only all agents support modes. - /// - /// Default: None - pub default_mode: Option, - /// The default model to use for this agent. - /// - /// This should be the model ID as reported by the agent. - /// - /// Default: None - pub default_model: Option, +#[serde(tag = "type", rename_all = "snake_case")] +pub enum CustomAgentServerSettings { + Custom { + #[serde(rename = "command")] + path: PathBuf, + #[serde(default)] + args: Vec, + env: Option>, + /// The default mode to use for this agent. + /// + /// Note: Not only all agents support modes. + /// + /// Default: None + default_mode: Option, + /// The default model to use for this agent. + /// + /// This should be the model ID as reported by the agent. + /// + /// Default: None + default_model: Option, + }, + Extension { + /// The default mode to use for this agent. + /// + /// Note: Not only all agents support modes. + /// + /// Default: None + default_mode: Option, + /// The default model to use for this agent. + /// + /// This should be the model ID as reported by the agent. + /// + /// Default: None + default_model: Option, + }, } From 58fe19d55ea339270cd01d3398dc8f9e7f4c04bf Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Thu, 20 Nov 2025 18:44:55 +0100 Subject: [PATCH 0256/1030] project search: Skip loading of gitignored paths when their descendants will never match an inclusion/exclusion query (#42968) Co-authored-by: dino Related-to: #38799 Release Notes: - Improved project search performance with "Also search files ignored by configuration" combined with file inclusion/exclusion queries. --------- Co-authored-by: dino --- Cargo.lock | 45 +++++ Cargo.toml | 1 + crates/project/Cargo.toml | 1 + crates/project/src/project_search.rs | 258 +++++++++++++++++++++++++-- 4 files changed, 292 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0dc6a3c99a79ae3b3baad983a4427b710fb22080..a21c80c8b279206a791020231100abe6468ece6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3668,6 +3668,26 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "const_format" +version = "0.2.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7faa7469a93a566e9ccc1c73fe783b4a65c274c5ace346038dca9c39fe0030ad" +dependencies = [ + "const_format_proc_macros", +] + +[[package]] +name = "const_format_proc_macros" +version = "0.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d57c2eccfb16dbac1f4e61e206105db5820c9d26c3c472bc17c774259ef7744" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -12752,6 +12772,15 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5da3b0203fd7ee5720aa0b5e790b591aa5d3f41c3ed2c34a3a393382198af2f7" +[[package]] +name = "pori" +version = "0.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a63d338dec139f56dacc692ca63ad35a6be6a797442479b55acd611d79e906" +dependencies = [ + "nom 7.1.3", +] + [[package]] name = "portable-atomic" version = "1.11.1" @@ -13068,6 +13097,7 @@ dependencies = [ "url", "util", "watch", + "wax", "which 6.0.3", "worktree", "zeroize", @@ -19492,6 +19522,21 @@ dependencies = [ "zlog", ] +[[package]] +name = "wax" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d12a78aa0bab22d2f26ed1a96df7ab58e8a93506a3e20adb47c51a93b4e1357" +dependencies = [ + "const_format", + "itertools 0.11.0", + "nom 7.1.3", + "pori", + "regex", + "thiserror 1.0.69", + "walkdir", +] + [[package]] name = "wayland-backend" version = "0.3.11" diff --git a/Cargo.toml b/Cargo.toml index a4c9caccd9539ffde7d57d36dcfaf4cf162c7e92..fa85879698521a70686e9d96c6a108e8d1cbe28d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -719,6 +719,7 @@ wasmtime = { version = "29", default-features = false, features = [ "parallel-compilation", ] } wasmtime-wasi = "29" +wax = "0.6" which = "6.0.0" windows-core = "0.61" wit-component = "0.221" diff --git a/crates/project/Cargo.toml b/crates/project/Cargo.toml index 9b67fde1e0bd31856bfa19d01818c1a5c6564218..a33efb9896959cc12fd828986c881f73e84e0ec7 100644 --- a/crates/project/Cargo.toml +++ b/crates/project/Cargo.toml @@ -86,6 +86,7 @@ toml.workspace = true url.workspace = true util.workspace = true watch.workspace = true +wax.workspace = true which.workspace = true worktree.workspace = true zeroize.workspace = true diff --git a/crates/project/src/project_search.rs b/crates/project/src/project_search.rs index e638240ebba2be8fbc35b04a0653ab32519497dd..fa8279e4506c6bbcd44856756b6d6521af809281 100644 --- a/crates/project/src/project_search.rs +++ b/crates/project/src/project_search.rs @@ -1,7 +1,9 @@ use std::{ + cell::LazyCell, + collections::BTreeSet, io::{BufRead, BufReader}, ops::Range, - path::Path, + path::{Path, PathBuf}, pin::pin, sync::Arc, }; @@ -22,7 +24,7 @@ use smol::{ use text::BufferId; use util::{ResultExt, maybe, paths::compare_rel_paths}; -use worktree::{Entry, ProjectEntryId, Snapshot, Worktree}; +use worktree::{Entry, ProjectEntryId, Snapshot, Worktree, WorktreeSettings}; use crate::{ Project, ProjectItem, ProjectPath, RemotelyCreatedModels, @@ -178,7 +180,7 @@ impl Search { let (find_all_matches_tx, find_all_matches_rx) = bounded(MAX_CONCURRENT_BUFFER_OPENS); - + let query = Arc::new(query); let (candidate_searcher, tasks) = match self.kind { SearchKind::OpenBuffersOnly => { let Ok(open_buffers) = cx.update(|cx| self.all_loaded_buffers(&query, cx)) @@ -207,11 +209,10 @@ impl Search { let (sorted_search_results_tx, sorted_search_results_rx) = unbounded(); let (input_paths_tx, input_paths_rx) = unbounded(); - let tasks = vec![ cx.spawn(Self::provide_search_paths( std::mem::take(worktrees), - query.include_ignored(), + query.clone(), input_paths_tx, sorted_search_results_tx, )) @@ -366,26 +367,30 @@ impl Search { fn provide_search_paths( worktrees: Vec>, - include_ignored: bool, + query: Arc, tx: Sender, results: Sender>, ) -> impl AsyncFnOnce(&mut AsyncApp) { async move |cx| { _ = maybe!(async move { + let gitignored_tracker = PathInclusionMatcher::new(query.clone()); for worktree in worktrees { let (mut snapshot, worktree_settings) = worktree .read_with(cx, |this, _| { Some((this.snapshot(), this.as_local()?.settings())) })? .context("The worktree is not local")?; - if include_ignored { + if query.include_ignored() { // Pre-fetch all of the ignored directories as they're going to be searched. let mut entries_to_refresh = vec![]; - for entry in snapshot.entries(include_ignored, 0) { - if entry.is_ignored && entry.kind.is_unloaded() { - if !worktree_settings.is_path_excluded(&entry.path) { - entries_to_refresh.push(entry.path.clone()); - } + + for entry in snapshot.entries(query.include_ignored(), 0) { + if gitignored_tracker.should_scan_gitignored_dir( + entry, + &snapshot, + &worktree_settings, + ) { + entries_to_refresh.push(entry.path.clone()); } } let barrier = worktree.update(cx, |this, _| { @@ -404,8 +409,9 @@ impl Search { cx.background_executor() .scoped(|scope| { scope.spawn(async { - for entry in snapshot.files(include_ignored, 0) { + for entry in snapshot.files(query.include_ignored(), 0) { let (should_scan_tx, should_scan_rx) = oneshot::channel(); + let Ok(_) = tx .send(InputPath { entry: entry.clone(), @@ -788,3 +794,229 @@ struct MatchingEntry { path: ProjectPath, should_scan_tx: oneshot::Sender, } + +/// This struct encapsulates the logic to decide whether a given gitignored directory should be +/// scanned based on include/exclude patterns of a search query (as include/exclude parameters may match paths inside it). +/// It is kind-of doing an inverse of glob. Given a glob pattern like `src/**/` and a parent path like `src`, we need to decide whether the parent +/// may contain glob hits. +struct PathInclusionMatcher { + included: BTreeSet, + query: Arc, +} + +impl PathInclusionMatcher { + fn new(query: Arc) -> Self { + let mut included = BTreeSet::new(); + // To do an inverse glob match, we split each glob into it's prefix and the glob part. + // For example, `src/**/*.rs` becomes `src/` and `**/*.rs`. The glob part gets dropped. + // Then, when checking whether a given directory should be scanned, we check whether it is a non-empty substring of any glob prefix. + if query.filters_path() { + included.extend( + query + .files_to_include() + .sources() + .iter() + .flat_map(|glob| Some(wax::Glob::new(glob).ok()?.partition().0)), + ); + } + Self { included, query } + } + + fn should_scan_gitignored_dir( + &self, + entry: &Entry, + snapshot: &Snapshot, + worktree_settings: &WorktreeSettings, + ) -> bool { + if !entry.is_ignored || !entry.kind.is_unloaded() { + return false; + } + if !self.query.include_ignored() { + return false; + } + if worktree_settings.is_path_excluded(&entry.path) { + return false; + } + if !self.query.filters_path() { + return true; + } + + let as_abs_path = LazyCell::new(move || snapshot.absolutize(&entry.path)); + let entry_path = entry.path.as_std_path(); + // 3. Check Exclusions (Pruning) + // If the current path is a child of an excluded path, we stop. + let is_excluded = self.path_is_definitely_excluded(entry_path, snapshot); + + if is_excluded { + return false; + } + + // 4. Check Inclusions (Traversal) + if self.included.is_empty() { + return true; + } + + // We scan if the current path is a descendant of an include prefix + // OR if the current path is an ancestor of an include prefix (we need to go deeper to find it). + let is_included = self.included.iter().any(|prefix| { + let (prefix_matches_entry, entry_matches_prefix) = if prefix.is_absolute() { + ( + prefix.starts_with(&**as_abs_path), + as_abs_path.starts_with(prefix), + ) + } else { + ( + prefix.starts_with(entry_path), + entry_path.starts_with(prefix), + ) + }; + + // Logic: + // 1. entry_matches_prefix: We are inside the target zone (e.g. glob: src/, current: src/lib/). Keep scanning. + // 2. prefix_matches_entry: We are above the target zone (e.g. glob: src/foo/, current: src/). Keep scanning to reach foo. + prefix_matches_entry || entry_matches_prefix + }); + + is_included + } + fn path_is_definitely_excluded(&self, path: &Path, snapshot: &Snapshot) -> bool { + if !self.query.files_to_exclude().sources().is_empty() { + let mut path = if self.query.match_full_paths() { + let mut full_path = snapshot.root_name().as_std_path().to_owned(); + full_path.push(path); + full_path + } else { + path.to_owned() + }; + loop { + if self.query.files_to_exclude().is_match(&path) { + return true; + } else if !path.pop() { + return false; + } + } + } else { + false + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use fs::FakeFs; + use serde_json::json; + use settings::Settings; + use util::{ + path, + paths::{PathMatcher, PathStyle}, + rel_path::RelPath, + }; + use worktree::{Entry, EntryKind, WorktreeSettings}; + + use crate::{ + Project, project_search::PathInclusionMatcher, project_tests::init_test, + search::SearchQuery, + }; + + #[gpui::test] + async fn test_path_inclusion_matcher(cx: &mut gpui::TestAppContext) { + init_test(cx); + + let fs = FakeFs::new(cx.background_executor.clone()); + fs.insert_tree( + "/root", + json!({ + ".gitignore": "src/data/\n", + "src": { + "data": { + "main.csv": "field_1,field_2,field_3", + }, + "lib": { + "main.txt": "Are you familiar with fields?", + }, + }, + }), + ) + .await; + + let project = Project::test(fs.clone(), [path!("/root").as_ref()], cx).await; + let worktree = project.update(cx, |project, cx| project.worktrees(cx).next().unwrap()); + let (worktree_settings, worktree_snapshot) = worktree.update(cx, |worktree, cx| { + let settings_location = worktree.settings_location(cx); + return ( + WorktreeSettings::get(Some(settings_location), cx).clone(), + worktree.snapshot(), + ); + }); + + // Manually create a test entry for the gitignored directory since it won't + // be loaded by the worktree + let entry = Entry { + id: ProjectEntryId::from_proto(1), + kind: EntryKind::UnloadedDir, + path: Arc::from(RelPath::unix(Path::new("src/data")).unwrap()), + inode: 0, + mtime: None, + canonical_path: None, + is_ignored: true, + is_hidden: false, + is_always_included: false, + is_external: false, + is_private: false, + size: 0, + char_bag: Default::default(), + is_fifo: false, + }; + + // 1. Test searching for `field`, including ignored files without any + // inclusion and exclusion filters. + let include_ignored = true; + let files_to_include = PathMatcher::default(); + let files_to_exclude = PathMatcher::default(); + let match_full_paths = false; + let search_query = SearchQuery::text( + "field", + false, + false, + include_ignored, + files_to_include, + files_to_exclude, + match_full_paths, + None, + ) + .unwrap(); + + let path_matcher = PathInclusionMatcher::new(Arc::new(search_query)); + assert!(path_matcher.should_scan_gitignored_dir( + &entry, + &worktree_snapshot, + &worktree_settings + )); + + // 2. Test searching for `field`, including ignored files but updating + // `files_to_include` to only include files under `src/lib`. + let include_ignored = true; + let files_to_include = PathMatcher::new(vec!["src/lib"], PathStyle::Posix).unwrap(); + let files_to_exclude = PathMatcher::default(); + let match_full_paths = false; + let search_query = SearchQuery::text( + "field", + false, + false, + include_ignored, + files_to_include, + files_to_exclude, + match_full_paths, + None, + ) + .unwrap(); + + let path_matcher = PathInclusionMatcher::new(Arc::new(search_query)); + assert!(!path_matcher.should_scan_gitignored_dir( + &entry, + &worktree_snapshot, + &worktree_settings + )); + } +} From 18f14a6ebf6251cd54cd56a5e2dd2a5222de01d3 Mon Sep 17 00:00:00 2001 From: Adrian Date: Thu, 20 Nov 2025 10:12:57 -0800 Subject: [PATCH 0257/1030] vim: Fix paste action for visual modes (#43031) Closes #41810 Release Notes: - Fixed paste not working correctly in vim visual modes --- crates/vim/src/normal.rs | 2 +- crates/vim/src/normal/paste.rs | 2 +- crates/vim/src/vim.rs | 4 ++++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/crates/vim/src/normal.rs b/crates/vim/src/normal.rs index e53e2306d6f14b1c230ea383103bdf67fe8196c0..aee0b424f04d49cc634048bb64f95805beef8455 100644 --- a/crates/vim/src/normal.rs +++ b/crates/vim/src/normal.rs @@ -3,7 +3,7 @@ mod convert; mod delete; mod increment; pub(crate) mod mark; -mod paste; +pub(crate) mod paste; pub(crate) mod repeat; mod scroll; pub(crate) mod search; diff --git a/crates/vim/src/normal/paste.rs b/crates/vim/src/normal/paste.rs index 77305ea783c34e340a7ed840658088f3e6191abb..dffabde8fd84081762d7cb62bfa14a2f4ab4a59d 100644 --- a/crates/vim/src/normal/paste.rs +++ b/crates/vim/src/normal/paste.rs @@ -18,7 +18,7 @@ use crate::{ }; /// Pastes text from the specified register at the cursor position. -#[derive(Clone, Deserialize, JsonSchema, PartialEq, Action)] +#[derive(Clone, Default, Deserialize, JsonSchema, PartialEq, Action)] #[action(namespace = vim)] #[serde(deny_unknown_fields)] pub struct Paste { diff --git a/crates/vim/src/vim.rs b/crates/vim/src/vim.rs index b633c9ef3c5aa13286277d602ec08efc3ab03373..9f31b3d3ac0c23457d585990de3a0b201f08b795 100644 --- a/crates/vim/src/vim.rs +++ b/crates/vim/src/vim.rs @@ -19,6 +19,7 @@ mod state; mod surrounds; mod visual; +use crate::normal::paste::Paste as VimPaste; use collections::HashMap; use editor::{ Anchor, Bias, Editor, EditorEvent, EditorSettings, HideMouseCursorOrigin, MultiBufferOffset, @@ -922,6 +923,9 @@ impl Vim { cx, |vim, _: &editor::actions::Paste, window, cx| match vim.mode { Mode::Replace => vim.paste_replace(window, cx), + Mode::Visual | Mode::VisualLine | Mode::VisualBlock => { + vim.paste(&VimPaste::default(), window, cx); + } _ => { vim.update_editor(cx, |_, editor, cx| editor.paste(&Paste, window, cx)); } From d6d967f4431aebb2e3489217aadb0c49d2fe306a Mon Sep 17 00:00:00 2001 From: Andrew Farkas <6060305+HactarCE@users.noreply.github.com> Date: Thu, 20 Nov 2025 13:40:27 -0500 Subject: [PATCH 0258/1030] Re-resolve anchor before applying AI inline assist edits (#43103) Closes #39088 Release Notes: - Fixed AI assistant edits being scrambled when file was modified while it was open -- Co-authored-by: Conrad Irwin --- crates/agent_ui/src/buffer_codegen.rs | 8 ++++++++ crates/language/src/buffer_tests.rs | 19 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/crates/agent_ui/src/buffer_codegen.rs b/crates/agent_ui/src/buffer_codegen.rs index 0b7c1f61988979565aa9b55d4bbd245682b680df..3877bede3370589d1b6f74529cfa3c6ca1f34f0a 100644 --- a/crates/agent_ui/src/buffer_codegen.rs +++ b/crates/agent_ui/src/buffer_codegen.rs @@ -491,6 +491,14 @@ impl CodegenAlternative { cx: &mut Context, ) { let start_time = Instant::now(); + + // Make a new snapshot and re-resolve anchor in case the document was modified. + // This can happen often if the editor loses focus and is saved + reformatted, + // as in https://github.com/zed-industries/zed/issues/39088 + self.snapshot = self.buffer.read(cx).snapshot(cx); + self.range = self.snapshot.anchor_after(self.range.start) + ..self.snapshot.anchor_after(self.range.end); + let snapshot = self.snapshot.clone(); let selected_text = snapshot .text_for_range(self.range.start..self.range.end) diff --git a/crates/language/src/buffer_tests.rs b/crates/language/src/buffer_tests.rs index 4f3f760ba8b841c45031cd1a811f6bb1e8fd2534..14475af5984d75de9e166dd1d8a0379c6a66f3fd 100644 --- a/crates/language/src/buffer_tests.rs +++ b/crates/language/src/buffer_tests.rs @@ -3455,6 +3455,25 @@ fn test_contiguous_ranges() { ); } +#[gpui::test] +fn test_insertion_after_deletion(cx: &mut gpui::App) { + let buffer = cx.new(|cx| Buffer::local("struct Foo {\n \n}", cx)); + buffer.update(cx, |buffer, cx| { + let mut anchor = buffer.anchor_after(17); + buffer.edit([(12..18, "")], None, cx); + let snapshot = buffer.snapshot(); + assert_eq!(snapshot.text(), "struct Foo {}"); + if !anchor.is_valid(&snapshot) { + anchor = snapshot.anchor_after(snapshot.offset_for_anchor(&anchor)); + } + buffer.edit([(anchor..anchor, "\n")], None, cx); + buffer.edit([(anchor..anchor, "field1:")], None, cx); + buffer.edit([(anchor..anchor, " i32,")], None, cx); + let snapshot = buffer.snapshot(); + assert_eq!(snapshot.text(), "struct Foo {\nfield1: i32,}"); + }) +} + #[gpui::test(iterations = 500)] fn test_trailing_whitespace_ranges(mut rng: StdRng) { // Generate a random multi-line string containing From e6e5ccbf10c82678b14723807ba456b8ae70b5f5 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 20 Nov 2025 20:30:34 +0100 Subject: [PATCH 0259/1030] ui: Render fallback icon for avatars that failed to load (#43183) Before we were simply not rendering anything which could lead to some very surprising situations when joining channels ... Now it will look like so image Release Notes: - Improved rendering of avatars that failed to load by rendering a fallback icon --- crates/ui/src/components/avatar.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/ui/src/components/avatar.rs b/crates/ui/src/components/avatar.rs index 19f7c4660bc64e756950df6f5ab0e19192f4096b..551ab4e3edff90f3b987ddd5ef19cfdeae1fa214 100644 --- a/crates/ui/src/components/avatar.rs +++ b/crates/ui/src/components/avatar.rs @@ -91,7 +91,12 @@ impl RenderOnce for Avatar { self.image .size(image_size) .rounded_full() - .bg(cx.theme().colors().ghost_element_background), + .bg(cx.theme().colors().ghost_element_background) + .with_fallback(|| { + Icon::new(IconName::Person) + .color(Color::Muted) + .into_any_element() + }), ) .children(self.indicator.map(|indicator| div().child(indicator))) } From 7e341bcf949d3838ca0e0cf0b51be3669e36015b Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Thu, 20 Nov 2025 21:47:39 +0200 Subject: [PATCH 0260/1030] Support bracket colorization (rainbow brackets) (#43172) Deals with https://github.com/zed-industries/zed/issues/5259 Highlights brackets with different colors based on their depth. Uses existing tree-sitter queries from brackets.scm to find brackets, uses theme's accents to color them. https://github.com/user-attachments/assets/cc5f3aba-22fa-446d-9af7-ba6e772029da 1. Adds `colorize_brackets` language setting that allows, per language or globally for all languages, to configure whether Zed should color the brackets for a particular language. Disabled for all languages by default. 2. Any given language can opt-out a certain bracket pair by amending the brackets.scm like `("\"" @open "\"" @close) ` -> `(("\"" @open "\"" @close) (#set! rainbow.exclude))` 3. Brackets are using colors from theme accents, which can be overridden as ```jsonc "theme_overrides": { "One Dark": { "accents": ["#ff69b4", "#7fff00", "#ff1493", "#00ffff", "#ff8c00", "#9400d3"] } }, ``` Release Notes: - Added bracket colorization (rainbow brackets) support. Use `colorize_brackets` language setting to enable. --------- Co-authored-by: MrSubidubi Co-authored-by: Lukas Wirth Co-authored-by: MrSubidubi Co-authored-by: Lukas Wirth Co-authored-by: Smit Barmase --- Cargo.lock | 2 + assets/settings/default.json | 6 + .../src/session/running/console.rs | 1 + crates/editor/src/bracket_colorization.rs | 1287 +++++++++++++++++ crates/editor/src/display_map.rs | 62 +- .../src/display_map/custom_highlights.rs | 14 +- crates/editor/src/editor.rs | 108 +- crates/editor/src/editor_tests.rs | 22 +- crates/editor/src/scroll.rs | 1 + crates/language/src/buffer.rs | 257 +++- crates/language/src/buffer/row_chunk.rs | 121 ++ crates/language/src/buffer_tests.rs | 15 +- crates/language/src/language.rs | 12 +- crates/language/src/language_settings.rs | 13 +- crates/languages/src/bash/brackets.scm | 18 +- crates/languages/src/c/brackets.scm | 4 +- crates/languages/src/cpp/brackets.scm | 4 +- crates/languages/src/css/brackets.scm | 4 +- crates/languages/src/go/brackets.scm | 6 +- crates/languages/src/javascript/brackets.scm | 6 +- crates/languages/src/json/brackets.scm | 2 +- crates/languages/src/jsonc/brackets.scm | 2 +- crates/languages/src/markdown/brackets.scm | 8 +- crates/languages/src/python/brackets.scm | 2 +- crates/languages/src/rust/brackets.scm | 4 +- crates/languages/src/tsx/brackets.scm | 8 +- crates/languages/src/typescript/brackets.scm | 6 +- crates/languages/src/yaml/brackets.scm | 4 +- crates/multi_buffer/src/multi_buffer.rs | 29 +- crates/project/src/lsp_store.rs | 33 +- .../project/src/lsp_store/inlay_hint_cache.rs | 88 +- crates/remote_server/Cargo.toml | 1 + .../remote_server/src/remote_editing_tests.rs | 8 + crates/search/Cargo.toml | 1 + crates/search/src/project_search.rs | 5 +- .../settings/src/settings_content/language.rs | 4 + crates/settings/src/settings_content/theme.rs | 2 +- crates/settings/src/vscode_import.rs | 1 + crates/settings_ui/src/page_data.rs | 19 + docs/src/ai/external-agents.md | 1 + docs/src/configuring-languages.md | 1 + docs/src/configuring-zed.md | 12 + docs/src/extensions/languages.md | 8 + docs/src/themes.md | 10 +- docs/src/visual-customization.md | 2 + extensions/html/languages/html/brackets.scm | 4 +- 46 files changed, 1982 insertions(+), 246 deletions(-) create mode 100644 crates/editor/src/bracket_colorization.rs create mode 100644 crates/language/src/buffer/row_chunk.rs diff --git a/Cargo.lock b/Cargo.lock index a21c80c8b279206a791020231100abe6468ece6f..9cace971e3d9248361c7e97def9481206fa3cc1b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14082,6 +14082,7 @@ dependencies = [ "smol", "sysinfo 0.37.2", "task", + "theme", "thiserror 2.0.17", "toml 0.8.23", "unindent", @@ -15135,6 +15136,7 @@ dependencies = [ "language", "lsp", "menu", + "pretty_assertions", "project", "schemars 1.0.4", "serde", diff --git a/assets/settings/default.json b/assets/settings/default.json index 63ef8b51bb84c8f8dc5475dc172b82a78cee8eac..bd8a6e96dd3929c63f47b20f54d3422051363511 100644 --- a/assets/settings/default.json +++ b/assets/settings/default.json @@ -255,6 +255,12 @@ // Whether to display inline and alongside documentation for items in the // completions menu "show_completion_documentation": true, + // Whether to colorize brackets in the editor. + // (also known as "rainbow brackets") + // + // The colors that are used for different indentation levels are defined in the theme (theme key: `accents`). + // They can be customized by using theme overrides. + "colorize_brackets": false, // When to show the scrollbar in the completion menu. // This setting can take four values: // diff --git a/crates/debugger_ui/src/session/running/console.rs b/crates/debugger_ui/src/session/running/console.rs index f72d92e038ce234327e29776f923c27d6592cf16..2939079f256d4c2742e514f002a4c9fe5e58b49a 100644 --- a/crates/debugger_ui/src/session/running/console.rs +++ b/crates/debugger_ui/src/session/running/console.rs @@ -242,6 +242,7 @@ impl Console { start_offset, vec![range], style, + false, cx, ); } diff --git a/crates/editor/src/bracket_colorization.rs b/crates/editor/src/bracket_colorization.rs new file mode 100644 index 0000000000000000000000000000000000000000..902ec2b7702b945bb482e4e4700cf37b36ae907b --- /dev/null +++ b/crates/editor/src/bracket_colorization.rs @@ -0,0 +1,1287 @@ +//! Bracket highlights, also known as "rainbow brackets". +//! Uses tree-sitter queries from brackets.scm to capture bracket pairs, +//! and theme accents to colorize those. + +use std::ops::Range; + +use crate::Editor; +use collections::HashMap; +use gpui::{Context, HighlightStyle}; +use language::language_settings; +use multi_buffer::{Anchor, ExcerptId}; +use ui::{ActiveTheme, utils::ensure_minimum_contrast}; + +struct ColorizedBracketsHighlight; + +impl Editor { + pub(crate) fn colorize_brackets(&mut self, invalidate: bool, cx: &mut Context) { + if !self.mode.is_full() { + return; + } + + if invalidate { + self.fetched_tree_sitter_chunks.clear(); + } + + let accents_count = cx.theme().accents().0.len(); + let multi_buffer_snapshot = self.buffer().read(cx).snapshot(cx); + let all_excerpts = self.buffer().read(cx).excerpt_ids(); + let anchor_in_multi_buffer = |current_excerpt: ExcerptId, text_anchor: text::Anchor| { + multi_buffer_snapshot + .anchor_in_excerpt(current_excerpt, text_anchor) + .or_else(|| { + all_excerpts + .iter() + .filter(|&&excerpt_id| excerpt_id != current_excerpt) + .find_map(|&excerpt_id| { + multi_buffer_snapshot.anchor_in_excerpt(excerpt_id, text_anchor) + }) + }) + }; + + let bracket_matches_by_accent = self.visible_excerpts(cx).into_iter().fold( + HashMap::default(), + |mut acc, (excerpt_id, (buffer, buffer_version, buffer_range))| { + let buffer_snapshot = buffer.read(cx).snapshot(); + if language_settings::language_settings( + buffer_snapshot.language().map(|language| language.name()), + buffer_snapshot.file(), + cx, + ) + .colorize_brackets + { + let fetched_chunks = self + .fetched_tree_sitter_chunks + .entry(excerpt_id) + .or_default(); + + let brackets_by_accent = buffer_snapshot + .fetch_bracket_ranges( + buffer_range.start..buffer_range.end, + Some((&buffer_version, fetched_chunks)), + ) + .into_iter() + .flat_map(|(chunk_range, pairs)| { + if fetched_chunks.insert(chunk_range) { + pairs + } else { + Vec::new() + } + }) + .filter_map(|pair| { + let color_index = pair.color_index?; + + let buffer_open_range = buffer_snapshot + .anchor_before(pair.open_range.start) + ..buffer_snapshot.anchor_after(pair.open_range.end); + let buffer_close_range = buffer_snapshot + .anchor_before(pair.close_range.start) + ..buffer_snapshot.anchor_after(pair.close_range.end); + let multi_buffer_open_range = + anchor_in_multi_buffer(excerpt_id, buffer_open_range.start) + .zip(anchor_in_multi_buffer(excerpt_id, buffer_open_range.end)); + let multi_buffer_close_range = + anchor_in_multi_buffer(excerpt_id, buffer_close_range.start).zip( + anchor_in_multi_buffer(excerpt_id, buffer_close_range.end), + ); + + let mut ranges = Vec::with_capacity(2); + if let Some((open_start, open_end)) = multi_buffer_open_range { + ranges.push(open_start..open_end); + } + if let Some((close_start, close_end)) = multi_buffer_close_range { + ranges.push(close_start..close_end); + } + if ranges.is_empty() { + None + } else { + Some((color_index % accents_count, ranges)) + } + }); + + for (accent_number, new_ranges) in brackets_by_accent { + let ranges = acc + .entry(accent_number) + .or_insert_with(Vec::>::new); + + for new_range in new_ranges { + let i = ranges + .binary_search_by(|probe| { + probe.start.cmp(&new_range.start, &multi_buffer_snapshot) + }) + .unwrap_or_else(|i| i); + ranges.insert(i, new_range); + } + } + } + + acc + }, + ); + + if invalidate { + self.clear_highlights::(cx); + } + + let editor_background = cx.theme().colors().editor_background; + for (accent_number, bracket_highlights) in bracket_matches_by_accent { + let bracket_color = cx.theme().accents().color_for_index(accent_number as u32); + let adjusted_color = ensure_minimum_contrast(bracket_color, editor_background, 55.0); + let style = HighlightStyle { + color: Some(adjusted_color), + ..HighlightStyle::default() + }; + + self.highlight_text_key::( + accent_number, + bracket_highlights, + style, + true, + cx, + ); + } + } +} + +#[cfg(test)] +mod tests { + use std::{cmp, sync::Arc, time::Duration}; + + use super::*; + use crate::{ + DisplayPoint, EditorSnapshot, MoveToBeginning, MoveToEnd, MoveUp, + display_map::{DisplayRow, ToDisplayPoint}, + editor_tests::init_test, + test::{ + editor_lsp_test_context::EditorLspTestContext, editor_test_context::EditorTestContext, + }, + }; + use collections::HashSet; + use fs::FakeFs; + use gpui::{AppContext as _, UpdateGlobal as _}; + use indoc::indoc; + use itertools::Itertools; + use language::Capability; + use languages::rust_lang; + use multi_buffer::{ExcerptRange, MultiBuffer}; + use pretty_assertions::assert_eq; + use project::Project; + use rope::Point; + use serde_json::json; + use settings::{AccentContent, SettingsStore}; + use text::{Bias, OffsetRangeExt, ToOffset}; + use theme::ThemeStyleContent; + use ui::SharedString; + use util::{path, post_inc}; + + #[gpui::test] + async fn test_basic_bracket_colorization(cx: &mut gpui::TestAppContext) { + init_test(cx, |language_settings| { + language_settings.defaults.colorize_brackets = Some(true); + }); + let mut cx = EditorLspTestContext::new( + Arc::into_inner(rust_lang()).unwrap(), + lsp::ServerCapabilities::default(), + cx, + ) + .await; + + cx.set_state(indoc! {r#"ˇuse std::{collections::HashMap, future::Future}; + +fn main() { + let a = one((), { () }, ()); + println!("{a}"); + println!("{a}"); + for i in 0..a { + println!("{i}"); + } + + let b = { + { + { + [([([([([([([([([([((), ())])])])])])])])])])] + } + } + }; +} + +#[rustfmt::skip] +fn one(a: (), (): (), c: ()) -> usize { 1 } + +fn two(a: HashMap>>) -> usize +where + T: Future>>>>, +{ + 2 +} +"#}); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + + assert_eq!( + r#"use std::«1{collections::HashMap, future::Future}1»; + +fn main«1()1» «1{ + let a = one«2(«3()3», «3{ «4()4» }3», «3()3»)2»; + println!«2("{a}")2»; + println!«2("{a}")2»; + for i in 0..a «2{ + println!«3("{i}")3»; + }2» + + let b = «2{ + «3{ + «4{ + «5[«6(«7[«1(«2[«3(«4[«5(«6[«7(«1[«2(«3[«4(«5[«6(«7[«1(«2[«3(«4()4», «4()4»)3»]2»)1»]7»)6»]5»)4»]3»)2»]1»)7»]6»)5»]4»)3»]2»)1»]7»)6»]5» + }4» + }3» + }2»; +}1» + +#«1[rustfmt::skip]1» +fn one«1(a: «2()2», «2()2»: «2()2», c: «2()2»)1» -> usize «1{ 1 }1» + +fn two«11»«1(a: HashMap«24»>3»>2»)1» -> usize +where + T: Future«15»>4»>3»>2»>1», +«1{ + 2 +}1» + +1 hsla(207.80, 16.20%, 69.19%, 1.00) +2 hsla(29.00, 54.00%, 65.88%, 1.00) +3 hsla(286.00, 51.00%, 75.25%, 1.00) +4 hsla(187.00, 47.00%, 59.22%, 1.00) +5 hsla(355.00, 65.00%, 75.94%, 1.00) +6 hsla(95.00, 38.00%, 62.00%, 1.00) +7 hsla(39.00, 67.00%, 69.00%, 1.00) +"#, + &bracket_colors_markup(&mut cx), + "All brackets should be colored based on their depth" + ); + } + + #[gpui::test] + async fn test_bracket_colorization_when_editing(cx: &mut gpui::TestAppContext) { + init_test(cx, |language_settings| { + language_settings.defaults.colorize_brackets = Some(true); + }); + let mut cx = EditorLspTestContext::new( + Arc::into_inner(rust_lang()).unwrap(), + lsp::ServerCapabilities::default(), + cx, + ) + .await; + + cx.set_state(indoc! {r#" +struct Foo<'a, T> { + data: Vec>, +} + +fn process_data() { + let map:ˇ +} +"#}); + + cx.update_editor(|editor, window, cx| { + editor.handle_input(" Result<", window, cx); + }); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + assert_eq!( + indoc! {r#" +struct Foo«1<'a, T>1» «1{ + data: Vec«23»>2», +}1» + +fn process_data«1()1» «1{ + let map: Result< +}1» + +1 hsla(207.80, 16.20%, 69.19%, 1.00) +2 hsla(29.00, 54.00%, 65.88%, 1.00) +3 hsla(286.00, 51.00%, 75.25%, 1.00) +"#}, + &bracket_colors_markup(&mut cx), + "Brackets without pairs should be ignored and not colored" + ); + + cx.update_editor(|editor, window, cx| { + editor.handle_input("Option1» «1{ + data: Vec«23»>2», +}1» + +fn process_data«1()1» «1{ + let map: Result", window, cx); + }); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + assert_eq!( + indoc! {r#" +struct Foo«1<'a, T>1» «1{ + data: Vec«23»>2», +}1» + +fn process_data«1()1» «1{ + let map: Result2» +}1» + +1 hsla(207.80, 16.20%, 69.19%, 1.00) +2 hsla(29.00, 54.00%, 65.88%, 1.00) +3 hsla(286.00, 51.00%, 75.25%, 1.00) +"#}, + &bracket_colors_markup(&mut cx), + "When brackets start to get closed, inner brackets are re-colored based on their depth" + ); + + cx.update_editor(|editor, window, cx| { + editor.handle_input(">", window, cx); + }); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + assert_eq!( + indoc! {r#" +struct Foo«1<'a, T>1» «1{ + data: Vec«23»>2», +}1» + +fn process_data«1()1» «1{ + let map: Result3»>2» +}1» + +1 hsla(207.80, 16.20%, 69.19%, 1.00) +2 hsla(29.00, 54.00%, 65.88%, 1.00) +3 hsla(286.00, 51.00%, 75.25%, 1.00) +4 hsla(187.00, 47.00%, 59.22%, 1.00) +"#}, + &bracket_colors_markup(&mut cx), + ); + + cx.update_editor(|editor, window, cx| { + editor.handle_input(", ()> = unimplemented!();", window, cx); + }); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + assert_eq!( + indoc! {r#" +struct Foo«1<'a, T>1» «1{ + data: Vec«23»>2», +}1» + +fn process_data«1()1» «1{ + let map: Result«24»>3», «3()3»>2» = unimplemented!«2()2»; +}1» + +1 hsla(207.80, 16.20%, 69.19%, 1.00) +2 hsla(29.00, 54.00%, 65.88%, 1.00) +3 hsla(286.00, 51.00%, 75.25%, 1.00) +4 hsla(187.00, 47.00%, 59.22%, 1.00) +5 hsla(355.00, 65.00%, 75.94%, 1.00) +"#}, + &bracket_colors_markup(&mut cx), + ); + } + + #[gpui::test] + async fn test_bracket_colorization_chunks(cx: &mut gpui::TestAppContext) { + let comment_lines = 100; + + init_test(cx, |language_settings| { + language_settings.defaults.colorize_brackets = Some(true); + }); + let mut cx = EditorLspTestContext::new( + Arc::into_inner(rust_lang()).unwrap(), + lsp::ServerCapabilities::default(), + cx, + ) + .await; + + cx.set_state(&separate_with_comment_lines( + indoc! {r#" +mod foo { + ˇfn process_data_1() { + let map: Option> = None; + } +"#}, + indoc! {r#" + fn process_data_2() { + let map: Option> = None; + } +} +"#}, + comment_lines, + )); + + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + assert_eq!( + &separate_with_comment_lines( + indoc! {r#" +mod foo «1{ + fn process_data_1«2()2» «2{ + let map: Option«34»>3» = None; + }2» +"#}, + indoc! {r#" + fn process_data_2() { + let map: Option> = None; + } +}1» + +1 hsla(207.80, 16.20%, 69.19%, 1.00) +2 hsla(29.00, 54.00%, 65.88%, 1.00) +3 hsla(286.00, 51.00%, 75.25%, 1.00) +4 hsla(187.00, 47.00%, 59.22%, 1.00) +5 hsla(355.00, 65.00%, 75.94%, 1.00) +"#}, + comment_lines, + ), + &bracket_colors_markup(&mut cx), + "First, the only visible chunk is getting the bracket highlights" + ); + + cx.update_editor(|editor, window, cx| { + editor.move_to_end(&MoveToEnd, window, cx); + editor.move_up(&MoveUp, window, cx); + }); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + assert_eq!( + &separate_with_comment_lines( + indoc! {r#" +mod foo «1{ + fn process_data_1«2()2» «2{ + let map: Option«34»>3» = None; + }2» +"#}, + indoc! {r#" + fn process_data_2«2()2» «2{ + let map: Option«34»>3» = None; + }2» +}1» + +1 hsla(207.80, 16.20%, 69.19%, 1.00) +2 hsla(29.00, 54.00%, 65.88%, 1.00) +3 hsla(286.00, 51.00%, 75.25%, 1.00) +4 hsla(187.00, 47.00%, 59.22%, 1.00) +5 hsla(355.00, 65.00%, 75.94%, 1.00) +"#}, + comment_lines, + ), + &bracket_colors_markup(&mut cx), + "After scrolling to the bottom, both chunks should have the highlights" + ); + + cx.update_editor(|editor, window, cx| { + editor.handle_input("{{}}}", window, cx); + }); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + assert_eq!( + &separate_with_comment_lines( + indoc! {r#" +mod foo «1{ + fn process_data_1() { + let map: Option> = None; + } +"#}, + indoc! {r#" + fn process_data_2«2()2» «2{ + let map: Option«34»>3» = None; + } + «3{«4{}4»}3»}2»}1» + +1 hsla(207.80, 16.20%, 69.19%, 1.00) +2 hsla(29.00, 54.00%, 65.88%, 1.00) +3 hsla(286.00, 51.00%, 75.25%, 1.00) +4 hsla(187.00, 47.00%, 59.22%, 1.00) +5 hsla(355.00, 65.00%, 75.94%, 1.00) +"#}, + comment_lines, + ), + &bracket_colors_markup(&mut cx), + "First chunk's brackets are invalidated after an edit, and only 2nd (visible) chunk is re-colorized" + ); + + cx.update_editor(|editor, window, cx| { + editor.move_to_beginning(&MoveToBeginning, window, cx); + }); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + assert_eq!( + &separate_with_comment_lines( + indoc! {r#" +mod foo «1{ + fn process_data_1«2()2» «2{ + let map: Option«34»>3» = None; + }2» +"#}, + indoc! {r#" + fn process_data_2«2()2» «2{ + let map: Option«34»>3» = None; + } + «3{«4{}4»}3»}2»}1» + +1 hsla(207.80, 16.20%, 69.19%, 1.00) +2 hsla(29.00, 54.00%, 65.88%, 1.00) +3 hsla(286.00, 51.00%, 75.25%, 1.00) +4 hsla(187.00, 47.00%, 59.22%, 1.00) +5 hsla(355.00, 65.00%, 75.94%, 1.00) +"#}, + comment_lines, + ), + &bracket_colors_markup(&mut cx), + "Scrolling back to top should re-colorize all chunks' brackets" + ); + + cx.update(|_, cx| { + SettingsStore::update_global(cx, |store, cx| { + store.update_user_settings(cx, |settings| { + settings.project.all_languages.defaults.colorize_brackets = Some(false); + }); + }); + }); + assert_eq!( + &separate_with_comment_lines( + indoc! {r#" +mod foo { + fn process_data_1() { + let map: Option> = None; + } +"#}, + r#" fn process_data_2() { + let map: Option> = None; + } + {{}}}} + +"#, + comment_lines, + ), + &bracket_colors_markup(&mut cx), + "Turning bracket colorization off should remove all bracket colors" + ); + + cx.update(|_, cx| { + SettingsStore::update_global(cx, |store, cx| { + store.update_user_settings(cx, |settings| { + settings.project.all_languages.defaults.colorize_brackets = Some(true); + }); + }); + }); + assert_eq!( + &separate_with_comment_lines( + indoc! {r#" +mod foo «1{ + fn process_data_1«2()2» «2{ + let map: Option«34»>3» = None; + }2» +"#}, + r#" fn process_data_2() { + let map: Option> = None; + } + {{}}}}1» + +1 hsla(207.80, 16.20%, 69.19%, 1.00) +2 hsla(29.00, 54.00%, 65.88%, 1.00) +3 hsla(286.00, 51.00%, 75.25%, 1.00) +4 hsla(187.00, 47.00%, 59.22%, 1.00) +5 hsla(355.00, 65.00%, 75.94%, 1.00) +"#, + comment_lines, + ), + &bracket_colors_markup(&mut cx), + "Turning bracket colorization back on refreshes the visible excerpts' bracket colors" + ); + } + + #[gpui::test] + async fn test_rainbow_bracket_highlights(cx: &mut gpui::TestAppContext) { + init_test(cx, |language_settings| { + language_settings.defaults.colorize_brackets = Some(true); + }); + let mut cx = EditorLspTestContext::new( + Arc::into_inner(rust_lang()).unwrap(), + lsp::ServerCapabilities::default(), + cx, + ) + .await; + + // taken from r-a https://github.com/rust-lang/rust-analyzer/blob/d733c07552a2dc0ec0cc8f4df3f0ca969a93fd90/crates/ide/src/inlay_hints.rs#L81-L297 + cx.set_state(indoc! {r#"ˇ + pub(crate) fn inlay_hints( + db: &RootDatabase, + file_id: FileId, + range_limit: Option, + config: &InlayHintsConfig, + ) -> Vec { + let _p = tracing::info_span!("inlay_hints").entered(); + let sema = Semantics::new(db); + let file_id = sema + .attach_first_edition(file_id) + .unwrap_or_else(|| EditionedFileId::current_edition(db, file_id)); + let file = sema.parse(file_id); + let file = file.syntax(); + + let mut acc = Vec::new(); + + let Some(scope) = sema.scope(file) else { + return acc; + }; + let famous_defs = FamousDefs(&sema, scope.krate()); + let display_target = famous_defs.1.to_display_target(sema.db); + + let ctx = &mut InlayHintCtx::default(); + let mut hints = |event| { + if let Some(node) = handle_event(ctx, event) { + hints(&mut acc, ctx, &famous_defs, config, file_id, display_target, node); + } + }; + let mut preorder = file.preorder(); + salsa::attach(sema.db, || { + while let Some(event) = preorder.next() { + if matches!((&event, range_limit), (WalkEvent::Enter(node), Some(range)) if range.intersect(node.text_range()).is_none()) + { + preorder.skip_subtree(); + continue; + } + hints(event); + } + }); + if let Some(range_limit) = range_limit { + acc.retain(|hint| range_limit.contains_range(hint.range)); + } + acc + } + + #[derive(Default)] + struct InlayHintCtx { + lifetime_stacks: Vec>, + extern_block_parent: Option, + } + + pub(crate) fn inlay_hints_resolve( + db: &RootDatabase, + file_id: FileId, + resolve_range: TextRange, + hash: u64, + config: &InlayHintsConfig, + hasher: impl Fn(&InlayHint) -> u64, + ) -> Option { + let _p = tracing::info_span!("inlay_hints_resolve").entered(); + let sema = Semantics::new(db); + let file_id = sema + .attach_first_edition(file_id) + .unwrap_or_else(|| EditionedFileId::current_edition(db, file_id)); + let file = sema.parse(file_id); + let file = file.syntax(); + + let scope = sema.scope(file)?; + let famous_defs = FamousDefs(&sema, scope.krate()); + let mut acc = Vec::new(); + + let display_target = famous_defs.1.to_display_target(sema.db); + + let ctx = &mut InlayHintCtx::default(); + let mut hints = |event| { + if let Some(node) = handle_event(ctx, event) { + hints(&mut acc, ctx, &famous_defs, config, file_id, display_target, node); + } + }; + + let mut preorder = file.preorder(); + while let Some(event) = preorder.next() { + // This can miss some hints that require the parent of the range to calculate + if matches!(&event, WalkEvent::Enter(node) if resolve_range.intersect(node.text_range()).is_none()) + { + preorder.skip_subtree(); + continue; + } + hints(event); + } + acc.into_iter().find(|hint| hasher(hint) == hash) + } + + fn handle_event(ctx: &mut InlayHintCtx, node: WalkEvent) -> Option { + match node { + WalkEvent::Enter(node) => { + if let Some(node) = ast::AnyHasGenericParams::cast(node.clone()) { + let params = node + .generic_param_list() + .map(|it| { + it.lifetime_params() + .filter_map(|it| { + it.lifetime().map(|it| format_smolstr!("{}", &it.text()[1..])) + }) + .collect() + }) + .unwrap_or_default(); + ctx.lifetime_stacks.push(params); + } + if let Some(node) = ast::ExternBlock::cast(node.clone()) { + ctx.extern_block_parent = Some(node); + } + Some(node) + } + WalkEvent::Leave(n) => { + if ast::AnyHasGenericParams::can_cast(n.kind()) { + ctx.lifetime_stacks.pop(); + } + if ast::ExternBlock::can_cast(n.kind()) { + ctx.extern_block_parent = None; + } + None + } + } + } + + // At some point when our hir infra is fleshed out enough we should flip this and traverse the + // HIR instead of the syntax tree. + fn hints( + hints: &mut Vec, + ctx: &mut InlayHintCtx, + famous_defs @ FamousDefs(sema, _krate): &FamousDefs<'_, '_>, + config: &InlayHintsConfig, + file_id: EditionedFileId, + display_target: DisplayTarget, + node: SyntaxNode, + ) { + closing_brace::hints( + hints, + sema, + config, + display_target, + InRealFile { file_id, value: node.clone() }, + ); + if let Some(any_has_generic_args) = ast::AnyHasGenericArgs::cast(node.clone()) { + generic_param::hints(hints, famous_defs, config, any_has_generic_args); + } + + match_ast! { + match node { + ast::Expr(expr) => { + chaining::hints(hints, famous_defs, config, display_target, &expr); + adjustment::hints(hints, famous_defs, config, display_target, &expr); + match expr { + ast::Expr::CallExpr(it) => param_name::hints(hints, famous_defs, config, file_id, ast::Expr::from(it)), + ast::Expr::MethodCallExpr(it) => { + param_name::hints(hints, famous_defs, config, file_id, ast::Expr::from(it)) + } + ast::Expr::ClosureExpr(it) => { + closure_captures::hints(hints, famous_defs, config, it.clone()); + closure_ret::hints(hints, famous_defs, config, display_target, it) + }, + ast::Expr::RangeExpr(it) => range_exclusive::hints(hints, famous_defs, config, it), + _ => Some(()), + } + }, + ast::Pat(it) => { + binding_mode::hints(hints, famous_defs, config, &it); + match it { + ast::Pat::IdentPat(it) => { + bind_pat::hints(hints, famous_defs, config, display_target, &it); + } + ast::Pat::RangePat(it) => { + range_exclusive::hints(hints, famous_defs, config, it); + } + _ => {} + } + Some(()) + }, + ast::Item(it) => match it { + ast::Item::Fn(it) => { + implicit_drop::hints(hints, famous_defs, config, display_target, &it); + if let Some(extern_block) = &ctx.extern_block_parent { + extern_block::fn_hints(hints, famous_defs, config, &it, extern_block); + } + lifetime::fn_hints(hints, ctx, famous_defs, config, it) + }, + ast::Item::Static(it) => { + if let Some(extern_block) = &ctx.extern_block_parent { + extern_block::static_hints(hints, famous_defs, config, &it, extern_block); + } + implicit_static::hints(hints, famous_defs, config, Either::Left(it)) + }, + ast::Item::Const(it) => implicit_static::hints(hints, famous_defs, config, Either::Right(it)), + ast::Item::Enum(it) => discriminant::enum_hints(hints, famous_defs, config, it), + ast::Item::ExternBlock(it) => extern_block::extern_block_hints(hints, famous_defs, config, it), + _ => None, + }, + // trait object type elisions + ast::Type(ty) => match ty { + ast::Type::FnPtrType(ptr) => lifetime::fn_ptr_hints(hints, ctx, famous_defs, config, ptr), + ast::Type::PathType(path) => { + lifetime::fn_path_hints(hints, ctx, famous_defs, config, &path); + implied_dyn_trait::hints(hints, famous_defs, config, Either::Left(path)); + Some(()) + }, + ast::Type::DynTraitType(dyn_) => { + implied_dyn_trait::hints(hints, famous_defs, config, Either::Right(dyn_)); + Some(()) + }, + _ => Some(()), + }, + ast::GenericParamList(it) => bounds::hints(hints, famous_defs, config, it), + _ => Some(()), + } + }; + } + "#}); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + + let actual_ranges = cx.update_editor(|editor, window, cx| { + editor + .snapshot(window, cx) + .all_text_highlight_ranges::() + }); + + let mut highlighted_brackets = HashMap::default(); + for (color, range) in actual_ranges.iter().cloned() { + highlighted_brackets.insert(range, color); + } + + let last_bracket = actual_ranges + .iter() + .max_by_key(|(_, p)| p.end.row) + .unwrap() + .clone(); + + cx.update_editor(|editor, window, cx| { + let was_scrolled = editor.set_scroll_position( + gpui::Point::new(0.0, last_bracket.1.end.row as f64 * 2.0), + window, + cx, + ); + assert!(was_scrolled.0); + }); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + + let ranges_after_scrolling = cx.update_editor(|editor, window, cx| { + editor + .snapshot(window, cx) + .all_text_highlight_ranges::() + }); + let new_last_bracket = ranges_after_scrolling + .iter() + .max_by_key(|(_, p)| p.end.row) + .unwrap() + .clone(); + + assert_ne!( + last_bracket, new_last_bracket, + "After scrolling down, we should have highlighted more brackets" + ); + + cx.update_editor(|editor, window, cx| { + let was_scrolled = editor.set_scroll_position(gpui::Point::default(), window, cx); + assert!(was_scrolled.0); + }); + + for _ in 0..200 { + cx.update_editor(|editor, window, cx| { + editor.apply_scroll_delta(gpui::Point::new(0.0, 0.25), window, cx); + }); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + + let colored_brackets = cx.update_editor(|editor, window, cx| { + editor + .snapshot(window, cx) + .all_text_highlight_ranges::() + }); + for (color, range) in colored_brackets.clone() { + assert!( + highlighted_brackets.entry(range).or_insert(color) == &color, + "Colors should stay consistent while scrolling!" + ); + } + + let snapshot = cx.update_editor(|editor, window, cx| editor.snapshot(window, cx)); + let scroll_position = snapshot.scroll_position(); + let visible_lines = + cx.update_editor(|editor, _, _| editor.visible_line_count().unwrap()); + let visible_range = DisplayRow(scroll_position.y as u32) + ..DisplayRow((scroll_position.y + visible_lines) as u32); + + let current_highlighted_bracket_set: HashSet = HashSet::from_iter( + colored_brackets + .iter() + .flat_map(|(_, range)| [range.start, range.end]), + ); + + for highlight_range in highlighted_brackets.keys().filter(|bracket_range| { + visible_range.contains(&bracket_range.start.to_display_point(&snapshot).row()) + || visible_range.contains(&bracket_range.end.to_display_point(&snapshot).row()) + }) { + assert!( + current_highlighted_bracket_set.contains(&highlight_range.start) + || current_highlighted_bracket_set.contains(&highlight_range.end), + "Should not lose highlights while scrolling in the visible range!" + ); + } + + let buffer_snapshot = snapshot.buffer().as_singleton().unwrap().2; + for bracket_match in buffer_snapshot + .fetch_bracket_ranges( + snapshot + .display_point_to_point( + DisplayPoint::new(visible_range.start, 0), + Bias::Left, + ) + .to_offset(&buffer_snapshot) + ..snapshot + .display_point_to_point( + DisplayPoint::new( + visible_range.end, + snapshot.line_len(visible_range.end), + ), + Bias::Right, + ) + .to_offset(&buffer_snapshot), + None, + ) + .iter() + .flat_map(|entry| entry.1) + .filter(|bracket_match| bracket_match.color_index.is_some()) + { + let start = bracket_match.open_range.to_point(buffer_snapshot); + let end = bracket_match.close_range.to_point(buffer_snapshot); + let start_bracket = colored_brackets.iter().find(|(_, range)| *range == start); + assert!( + start_bracket.is_some(), + "Existing bracket start in the visible range should be highlighted. Missing color for match: \"{}\" at position {:?}", + buffer_snapshot + .text_for_range(start.start..end.end) + .collect::(), + start + ); + + let end_bracket = colored_brackets.iter().find(|(_, range)| *range == end); + assert!( + end_bracket.is_some(), + "Existing bracket end in the visible range should be highlighted. Missing color for match: \"{}\" at position {:?}", + buffer_snapshot + .text_for_range(start.start..end.end) + .collect::(), + start + ); + + assert_eq!( + start_bracket.unwrap().0, + end_bracket.unwrap().0, + "Bracket pair should be highlighted the same color!" + ) + } + } + } + + #[gpui::test] + async fn test_multi_buffer(cx: &mut gpui::TestAppContext) { + let comment_lines = 100; + + init_test(cx, |language_settings| { + language_settings.defaults.colorize_brackets = Some(true); + }); + let fs = FakeFs::new(cx.background_executor.clone()); + fs.insert_tree( + path!("/a"), + json!({ + "main.rs": "fn main() {{()}}", + "lib.rs": separate_with_comment_lines( + indoc! {r#" + mod foo { + fn process_data_1() { + let map: Option> = None; + // a + // b + // c + } + "#}, + indoc! {r#" + fn process_data_2() { + let other_map: Option> = None; + } + } + "#}, + comment_lines, + ) + }), + ) + .await; + + let project = Project::test(fs, [path!("/a").as_ref()], cx).await; + let language_registry = project.read_with(cx, |project, _| project.languages().clone()); + language_registry.add(rust_lang()); + + let buffer_1 = project + .update(cx, |project, cx| { + project.open_local_buffer(path!("/a/lib.rs"), cx) + }) + .await + .unwrap(); + let buffer_2 = project + .update(cx, |project, cx| { + project.open_local_buffer(path!("/a/main.rs"), cx) + }) + .await + .unwrap(); + + let multi_buffer = cx.new(|cx| { + let mut multi_buffer = MultiBuffer::new(Capability::ReadWrite); + multi_buffer.push_excerpts( + buffer_2.clone(), + [ExcerptRange::new(Point::new(0, 0)..Point::new(1, 0))], + cx, + ); + + let excerpt_rows = 5; + let rest_of_first_except_rows = 3; + multi_buffer.push_excerpts( + buffer_1.clone(), + [ + ExcerptRange::new(Point::new(0, 0)..Point::new(excerpt_rows, 0)), + ExcerptRange::new( + Point::new( + comment_lines as u32 + excerpt_rows + rest_of_first_except_rows, + 0, + ) + ..Point::new( + comment_lines as u32 + + excerpt_rows + + rest_of_first_except_rows + + excerpt_rows, + 0, + ), + ), + ], + cx, + ); + multi_buffer + }); + + let editor = cx.add_window(|window, cx| { + Editor::for_multibuffer(multi_buffer, Some(project.clone()), window, cx) + }); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + + let editor_snapshot = editor + .update(cx, |editor, window, cx| editor.snapshot(window, cx)) + .unwrap(); + assert_eq!( + indoc! {r#" + + +fn main«1()1» «1{«2{«3()3»}2»}1» + + +mod foo «1{ + fn process_data_1«2()2» «2{ + let map: Option«34»>3» = None; + // a + // b + + + fn process_data_2«2()2» «2{ + let other_map: Option«34»>3» = None; + }2» +}1» + +1 hsla(207.80, 16.20%, 69.19%, 1.00) +2 hsla(29.00, 54.00%, 65.88%, 1.00) +3 hsla(286.00, 51.00%, 75.25%, 1.00) +4 hsla(187.00, 47.00%, 59.22%, 1.00) +5 hsla(355.00, 65.00%, 75.94%, 1.00) +"#,}, + &editor_bracket_colors_markup(&editor_snapshot), + "Multi buffers should have their brackets colored even if no excerpts contain the bracket counterpart (after fn `process_data_2()`) \ +or if the buffer pair spans across multiple excerpts (the one after `mod foo`)" + ); + + editor + .update(cx, |editor, window, cx| { + editor.handle_input("{[]", window, cx); + }) + .unwrap(); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + let editor_snapshot = editor + .update(cx, |editor, window, cx| editor.snapshot(window, cx)) + .unwrap(); + assert_eq!( + indoc! {r#" + + +{«1[]1»fn main«1()1» «1{«2{«3()3»}2»}1» + + +mod foo «1{ + fn process_data_1«2()2» «2{ + let map: Option«34»>3» = None; + // a + // b + + + fn process_data_2«2()2» «2{ + let other_map: Option«34»>3» = None; + }2» +}1» + +1 hsla(207.80, 16.20%, 69.19%, 1.00) +2 hsla(29.00, 54.00%, 65.88%, 1.00) +3 hsla(286.00, 51.00%, 75.25%, 1.00) +4 hsla(187.00, 47.00%, 59.22%, 1.00) +5 hsla(355.00, 65.00%, 75.94%, 1.00) +"#,}, + &editor_bracket_colors_markup(&editor_snapshot), + ); + + cx.update(|cx| { + let theme = cx.theme().name.clone(); + SettingsStore::update_global(cx, |store, cx| { + store.update_user_settings(cx, |settings| { + settings.theme.theme_overrides = HashMap::from_iter([( + theme.to_string(), + ThemeStyleContent { + accents: vec![ + AccentContent(Some(SharedString::new("#ff0000"))), + AccentContent(Some(SharedString::new("#0000ff"))), + ], + ..ThemeStyleContent::default() + }, + )]); + }); + }); + }); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + let editor_snapshot = editor + .update(cx, |editor, window, cx| editor.snapshot(window, cx)) + .unwrap(); + assert_eq!( + indoc! {r#" + + +{«1[]1»fn main«1()1» «1{«2{«1()1»}2»}1» + + +mod foo «1{ + fn process_data_1«2()2» «2{ + let map: Option«12»>1» = None; + // a + // b + + + fn process_data_2«2()2» «2{ + let other_map: Option«12»>1» = None; + }2» +}1» + +1 hsla(0.00, 100.00%, 78.12%, 1.00) +2 hsla(240.00, 100.00%, 82.81%, 1.00) +"#,}, + &editor_bracket_colors_markup(&editor_snapshot), + "After updating theme accents, the editor should update the bracket coloring" + ); + } + + fn separate_with_comment_lines(head: &str, tail: &str, comment_lines: usize) -> String { + let mut result = head.to_string(); + result.push_str("\n"); + result.push_str(&"//\n".repeat(comment_lines)); + result.push_str(tail); + result + } + + fn bracket_colors_markup(cx: &mut EditorTestContext) -> String { + cx.update_editor(|editor, window, cx| { + editor_bracket_colors_markup(&editor.snapshot(window, cx)) + }) + } + + fn editor_bracket_colors_markup(snapshot: &EditorSnapshot) -> String { + fn display_point_to_offset(text: &str, point: DisplayPoint) -> usize { + let mut offset = 0; + for (row_idx, line) in text.lines().enumerate() { + if row_idx < point.row().0 as usize { + offset += line.len() + 1; // +1 for newline + } else { + offset += point.column() as usize; + break; + } + } + offset + } + + let actual_ranges = snapshot.all_text_highlight_ranges::(); + let editor_text = snapshot.text(); + + let mut next_index = 1; + let mut color_to_index = HashMap::default(); + let mut annotations = Vec::new(); + for (color, range) in &actual_ranges { + let color_index = *color_to_index + .entry(*color) + .or_insert_with(|| post_inc(&mut next_index)); + let start = snapshot.point_to_display_point(range.start, Bias::Left); + let end = snapshot.point_to_display_point(range.end, Bias::Right); + let start_offset = display_point_to_offset(&editor_text, start); + let end_offset = display_point_to_offset(&editor_text, end); + let bracket_text = &editor_text[start_offset..end_offset]; + let bracket_char = bracket_text.chars().next().unwrap(); + + if matches!(bracket_char, '{' | '[' | '(' | '<') { + annotations.push((start_offset, format!("«{color_index}"))); + } else { + annotations.push((end_offset, format!("{color_index}»"))); + } + } + + annotations.sort_by(|(pos_a, text_a), (pos_b, text_b)| { + pos_a.cmp(pos_b).reverse().then_with(|| { + let a_is_opening = text_a.starts_with('«'); + let b_is_opening = text_b.starts_with('«'); + match (a_is_opening, b_is_opening) { + (true, false) => cmp::Ordering::Less, + (false, true) => cmp::Ordering::Greater, + _ => cmp::Ordering::Equal, + } + }) + }); + annotations.dedup(); + + let mut markup = editor_text; + for (offset, text) in annotations { + markup.insert_str(offset, &text); + } + + markup.push_str("\n"); + for (index, color) in color_to_index + .iter() + .map(|(color, index)| (*index, *color)) + .sorted_by_key(|(index, _)| *index) + { + markup.push_str(&format!("{index} {color}\n")); + } + + markup + } +} diff --git a/crates/editor/src/display_map.rs b/crates/editor/src/display_map.rs index 86aad01a2ea946057fe08876937853f5b84f00bf..0d051507b2582bb13b5d14f6ad5c693ffdb321a2 100644 --- a/crates/editor/src/display_map.rs +++ b/crates/editor/src/display_map.rs @@ -483,8 +483,26 @@ impl DisplayMap { key: HighlightKey, ranges: Vec>, style: HighlightStyle, + merge: bool, + cx: &App, ) { - self.text_highlights.insert(key, Arc::new((style, ranges))); + let multi_buffer_snapshot = self.buffer.read(cx).snapshot(cx); + let to_insert = match self.text_highlights.remove(&key).filter(|_| merge) { + Some(previous) => { + let mut merged_ranges = previous.1.clone(); + for new_range in ranges { + let i = merged_ranges + .binary_search_by(|probe| { + probe.start.cmp(&new_range.start, &multi_buffer_snapshot) + }) + .unwrap_or_else(|i| i); + merged_ranges.insert(i, new_range); + } + Arc::new((style, merged_ranges)) + } + None => Arc::new((style, ranges)), + }; + self.text_highlights.insert(key, to_insert); } pub(crate) fn highlight_inlays( @@ -523,6 +541,15 @@ impl DisplayMap { .text_highlights .remove(&HighlightKey::Type(type_id)) .is_some(); + self.text_highlights.retain(|key, _| { + let retain = if let HighlightKey::TypePlus(key_type_id, _) = key { + key_type_id != &type_id + } else { + true + }; + cleared |= !retain; + retain + }); cleared |= self.inlay_highlights.remove(&type_id).is_some(); cleared } @@ -1382,6 +1409,33 @@ impl DisplaySnapshot { .cloned() } + #[cfg(any(test, feature = "test-support"))] + pub fn all_text_highlight_ranges( + &self, + ) -> Vec<(gpui::Hsla, Range)> { + use itertools::Itertools; + + let required_type_id = TypeId::of::(); + self.text_highlights + .iter() + .filter(|(key, _)| match key { + HighlightKey::Type(type_id) => type_id == &required_type_id, + HighlightKey::TypePlus(type_id, _) => type_id == &required_type_id, + }) + .map(|(_, value)| value.clone()) + .flat_map(|ranges| { + ranges + .1 + .iter() + .flat_map(|range| { + Some((ranges.0.color?, range.to_point(self.buffer_snapshot()))) + }) + .collect::>() + }) + .sorted_by_key(|(_, range)| range.start) + .collect() + } + #[allow(unused)] #[cfg(any(test, feature = "test-support"))] pub(crate) fn inlay_highlights( @@ -2387,6 +2441,8 @@ pub mod tests { ..buffer_snapshot.anchor_after(Point::new(3, 18)), ], red.into(), + false, + cx, ); map.insert_blocks( [BlockProperties { @@ -2698,7 +2754,7 @@ pub mod tests { ..Default::default() }; - map.update(cx, |map, _cx| { + map.update(cx, |map, cx| { map.highlight_text( HighlightKey::Type(TypeId::of::()), highlighted_ranges @@ -2710,6 +2766,8 @@ pub mod tests { }) .collect(), style, + false, + cx, ); }); diff --git a/crates/editor/src/display_map/custom_highlights.rs b/crates/editor/src/display_map/custom_highlights.rs index e3ae7c99208cb4549a7538ac7f2abcc601c6e6d0..a40d1adc82f4bc79308eaec901586232e9e2e5c2 100644 --- a/crates/editor/src/display_map/custom_highlights.rs +++ b/crates/editor/src/display_map/custom_highlights.rs @@ -40,7 +40,6 @@ impl<'a> CustomHighlightsChunks<'a> { buffer_chunks: multibuffer_snapshot.chunks(range.clone(), language_aware), buffer_chunk: None, offset: range.start, - text_highlights, highlight_endpoints: create_highlight_endpoints( &range, @@ -75,16 +74,9 @@ fn create_highlight_endpoints( let style = text_highlights.0; let ranges = &text_highlights.1; - let start_ix = match ranges.binary_search_by(|probe| { - let cmp = probe.end.cmp(&start, buffer); - if cmp.is_gt() { - cmp::Ordering::Greater - } else { - cmp::Ordering::Less - } - }) { - Ok(i) | Err(i) => i, - }; + let start_ix = ranges + .binary_search_by(|probe| probe.end.cmp(&start, buffer).then(cmp::Ordering::Less)) + .unwrap_or_else(|i| i); for range in &ranges[start_ix..] { if range.start.cmp(&end, buffer).is_ge() { diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 5d1c24bcad76333beee8941ee729b9578bb7ad65..e06ba62ce94c69828b3ab08465b4375b4c862343 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -13,6 +13,7 @@ //! If you're looking to improve Vim mode, you should check out Vim crate that wraps Editor and overrides its behavior. pub mod actions; mod blink_manager; +mod bracket_colorization; mod clangd_ext; pub mod code_context_menus; pub mod display_map; @@ -118,11 +119,11 @@ use language::{ AutoindentMode, BlockCommentConfig, BracketMatch, BracketPair, Buffer, BufferRow, BufferSnapshot, Capability, CharClassifier, CharKind, CharScopeContext, CodeLabel, CursorShape, DiagnosticEntryRef, DiffOptions, EditPredictionsMode, EditPreview, HighlightedText, IndentKind, - IndentSize, Language, LanguageRegistry, OffsetRangeExt, OutlineItem, Point, Runnable, - Selection, SelectionGoal, TextObject, TransactionId, TreeSitterOptions, WordsQuery, + IndentSize, Language, LanguageName, LanguageRegistry, OffsetRangeExt, OutlineItem, Point, + Runnable, Selection, SelectionGoal, TextObject, TransactionId, TreeSitterOptions, WordsQuery, language_settings::{ - self, LspInsertMode, RewrapBehavior, WordsCompletionMode, all_language_settings, - language_settings, + self, LanguageSettings, LspInsertMode, RewrapBehavior, WordsCompletionMode, + all_language_settings, language_settings, }, point_from_lsp, point_to_lsp, text_diff_with_options, }; @@ -175,6 +176,7 @@ use std::{ borrow::Cow, cell::{OnceCell, RefCell}, cmp::{self, Ordering, Reverse}, + collections::hash_map, iter::{self, Peekable}, mem, num::NonZeroU32, @@ -1193,6 +1195,9 @@ pub struct Editor { folding_newlines: Task<()>, select_next_is_case_sensitive: Option, pub lookup_key: Option>, + applicable_language_settings: HashMap, LanguageSettings>, + accent_overrides: Vec, + fetched_tree_sitter_chunks: HashMap>>, } fn debounce_value(debounce_ms: u64) -> Option { @@ -2333,12 +2338,18 @@ impl Editor { folding_newlines: Task::ready(()), lookup_key: None, select_next_is_case_sensitive: None, + applicable_language_settings: HashMap::default(), + accent_overrides: Vec::new(), + fetched_tree_sitter_chunks: HashMap::default(), }; if is_minimap { return editor; } + editor.applicable_language_settings = editor.fetch_applicable_language_settings(cx); + editor.accent_overrides = editor.fetch_accent_overrides(cx); + if let Some(breakpoints) = editor.breakpoint_store.as_ref() { editor ._subscriptions @@ -2378,6 +2389,7 @@ impl Editor { InlayHintRefreshReason::NewLinesShown, cx, ); + editor.colorize_brackets(false, cx); }) .ok(); }); @@ -21141,13 +21153,16 @@ impl Editor { key: usize, ranges: Vec>, style: HighlightStyle, + merge: bool, cx: &mut Context, ) { - self.display_map.update(cx, |map, _| { + self.display_map.update(cx, |map, cx| { map.highlight_text( HighlightKey::TypePlus(TypeId::of::(), key), ranges, style, + merge, + cx, ); }); cx.notify(); @@ -21159,8 +21174,14 @@ impl Editor { style: HighlightStyle, cx: &mut Context, ) { - self.display_map.update(cx, |map, _| { - map.highlight_text(HighlightKey::Type(TypeId::of::()), ranges, style) + self.display_map.update(cx, |map, cx| { + map.highlight_text( + HighlightKey::Type(TypeId::of::()), + ranges, + style, + false, + cx, + ) }); cx.notify(); } @@ -21308,7 +21329,6 @@ impl Editor { self.active_indent_guides_state.dirty = true; self.refresh_active_diagnostics(cx); self.refresh_code_actions(window, cx); - self.refresh_selected_text_highlights(true, window, cx); self.refresh_single_line_folds(window, cx); self.refresh_matching_bracket_highlights(window, cx); if self.has_active_edit_prediction() { @@ -21364,6 +21384,7 @@ impl Editor { } self.update_lsp_data(Some(buffer_id), window, cx); self.refresh_inlay_hints(InlayHintRefreshReason::NewLinesShown, cx); + self.colorize_brackets(false, cx); cx.emit(EditorEvent::ExcerptsAdded { buffer: buffer.clone(), predecessor: *predecessor, @@ -21401,10 +21422,16 @@ impl Editor { multi_buffer::Event::ExcerptsExpanded { ids } => { self.refresh_inlay_hints(InlayHintRefreshReason::NewLinesShown, cx); self.refresh_document_highlights(cx); + for id in ids { + self.fetched_tree_sitter_chunks.remove(id); + } + self.colorize_brackets(false, cx); cx.emit(EditorEvent::ExcerptsExpanded { ids: ids.clone() }) } multi_buffer::Event::Reparsed(buffer_id) => { self.tasks_update_task = Some(self.refresh_runnables(window, cx)); + self.refresh_selected_text_highlights(true, window, cx); + self.colorize_brackets(true, cx); jsx_tag_auto_close::refresh_enabled_in_any_buffer(self, multibuffer, cx); cx.emit(EditorEvent::Reparsed(*buffer_id)); @@ -21475,7 +21502,52 @@ impl Editor { cx.notify(); } + fn fetch_accent_overrides(&self, cx: &App) -> Vec { + if !self.mode.is_full() { + return Vec::new(); + } + + theme::ThemeSettings::get_global(cx) + .theme_overrides + .get(cx.theme().name.as_ref()) + .map(|theme_style| &theme_style.accents) + .into_iter() + .flatten() + .flat_map(|accent| accent.0.clone()) + .collect() + } + + fn fetch_applicable_language_settings( + &self, + cx: &App, + ) -> HashMap, LanguageSettings> { + if !self.mode.is_full() { + return HashMap::default(); + } + + self.buffer().read(cx).all_buffers().into_iter().fold( + HashMap::default(), + |mut acc, buffer| { + let buffer = buffer.read(cx); + let language = buffer.language().map(|language| language.name()); + if let hash_map::Entry::Vacant(v) = acc.entry(language.clone()) { + let file = buffer.file(); + v.insert(language_settings(language, file, cx).into_owned()); + } + acc + }, + ) + } + fn settings_changed(&mut self, window: &mut Window, cx: &mut Context) { + let new_language_settings = self.fetch_applicable_language_settings(cx); + let language_settings_changed = new_language_settings != self.applicable_language_settings; + self.applicable_language_settings = new_language_settings; + + let new_accent_overrides = self.fetch_accent_overrides(cx); + let accent_overrides_changed = new_accent_overrides != self.accent_overrides; + self.accent_overrides = new_accent_overrides; + if self.diagnostics_enabled() { let new_severity = EditorSettings::get_global(cx) .diagnostics_max_severity @@ -21547,15 +21619,19 @@ impl Editor { }) } } - } - if let Some(inlay_splice) = self.colors.as_mut().and_then(|colors| { - colors.render_mode_updated(EditorSettings::get_global(cx).lsp_document_colors) - }) { - if !inlay_splice.is_empty() { - self.splice_inlays(&inlay_splice.to_remove, inlay_splice.to_insert, cx); + if language_settings_changed || accent_overrides_changed { + self.colorize_brackets(true, cx); + } + + if let Some(inlay_splice) = self.colors.as_mut().and_then(|colors| { + colors.render_mode_updated(EditorSettings::get_global(cx).lsp_document_colors) + }) { + if !inlay_splice.is_empty() { + self.splice_inlays(&inlay_splice.to_remove, inlay_splice.to_insert, cx); + } + self.refresh_colors_for_visible_range(None, window, cx); } - self.refresh_colors_for_visible_range(None, window, cx); } cx.notify(); @@ -22668,7 +22744,7 @@ fn insert_extra_newline_tree_sitter( _ => return false, }; let pair = { - let mut result: Option = None; + let mut result: Option> = None; for pair in buffer .all_bracket_ranges(range.start.0..range.end.0) diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index 9d567513b2a428a89b5a58ba75a1276411dce639..2bd1316371449d8f4b7e4c428e5e6c7c27f43457 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -17565,7 +17565,9 @@ async fn test_move_to_enclosing_bracket(cx: &mut TestAppContext) { init_test(cx, |_| {}); let mut cx = EditorLspTestContext::new_typescript(Default::default(), cx).await; - let mut assert = |before, after| { + + #[track_caller] + fn assert(before: &str, after: &str, cx: &mut EditorLspTestContext) { let _state_context = cx.set_state(before); cx.run_until_parked(); cx.update_editor(|editor, window, cx| { @@ -17573,30 +17575,33 @@ async fn test_move_to_enclosing_bracket(cx: &mut TestAppContext) { }); cx.run_until_parked(); cx.assert_editor_state(after); - }; + } // Outside bracket jumps to outside of matching bracket - assert("console.logˇ(var);", "console.log(var)ˇ;"); - assert("console.log(var)ˇ;", "console.logˇ(var);"); + assert("console.logˇ(var);", "console.log(var)ˇ;", &mut cx); + assert("console.log(var)ˇ;", "console.logˇ(var);", &mut cx); // Inside bracket jumps to inside of matching bracket - assert("console.log(ˇvar);", "console.log(varˇ);"); - assert("console.log(varˇ);", "console.log(ˇvar);"); + assert("console.log(ˇvar);", "console.log(varˇ);", &mut cx); + assert("console.log(varˇ);", "console.log(ˇvar);", &mut cx); // When outside a bracket and inside, favor jumping to the inside bracket assert( "console.log('foo', [1, 2, 3]ˇ);", - "console.log(ˇ'foo', [1, 2, 3]);", + "console.log('foo', ˇ[1, 2, 3]);", + &mut cx, ); assert( "console.log(ˇ'foo', [1, 2, 3]);", - "console.log('foo', [1, 2, 3]ˇ);", + "console.log('foo'ˇ, [1, 2, 3]);", + &mut cx, ); // Bias forward if two options are equally likely assert( "let result = curried_fun()ˇ();", "let result = curried_fun()()ˇ;", + &mut cx, ); // If directly adjacent to a smaller pair but inside a larger (not adjacent), pick the smaller @@ -17609,6 +17614,7 @@ async fn test_move_to_enclosing_bracket(cx: &mut TestAppContext) { function test() { console.logˇ('test') }"}, + &mut cx, ); } diff --git a/crates/editor/src/scroll.rs b/crates/editor/src/scroll.rs index d98dc89b6b0f1a5ab0ebd9a910db0fcb0db1f18c..a92735d18617057ddd10f049e5a22525827e1874 100644 --- a/crates/editor/src/scroll.rs +++ b/crates/editor/src/scroll.rs @@ -500,6 +500,7 @@ impl Editor { editor.register_visible_buffers(cx); editor.refresh_inlay_hints(InlayHintRefreshReason::NewLinesShown, cx); editor.update_lsp_data(None, window, cx); + editor.colorize_brackets(false, cx); }) .ok(); }); diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index 95db651350a2e1c703ce0ab52c77f075a83a0500..fd5e6fcaf6435a2836ab1ad828933a9d0763f5b9 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -1,9 +1,12 @@ +pub mod row_chunk; + use crate::{ DebuggerTextObject, LanguageScope, Outline, OutlineConfig, RunnableCapture, RunnableTag, TextObject, TreeSitterOptions, diagnostic_set::{DiagnosticEntry, DiagnosticEntryRef, DiagnosticGroup}, language_settings::{LanguageSettings, language_settings}, outline::OutlineItem, + row_chunk::RowChunks, syntax_map::{ SyntaxLayer, SyntaxMap, SyntaxMapCapture, SyntaxMapCaptures, SyntaxMapMatch, SyntaxMapMatches, SyntaxSnapshot, ToTreeSitterPoint, @@ -18,9 +21,9 @@ pub use crate::{ proto, }; use anyhow::{Context as _, Result}; -use clock::Lamport; pub use clock::ReplicaId; -use collections::HashMap; +use clock::{Global, Lamport}; +use collections::{HashMap, HashSet}; use fs::MTime; use futures::channel::oneshot; use gpui::{ @@ -28,8 +31,9 @@ use gpui::{ Task, TaskLabel, TextStyle, }; +use itertools::Itertools; use lsp::{LanguageServerId, NumberOrString}; -use parking_lot::Mutex; +use parking_lot::{Mutex, RawMutex, lock_api::MutexGuard}; use serde::{Deserialize, Serialize}; use serde_json::Value; use settings::WorktreeId; @@ -45,7 +49,7 @@ use std::{ iter::{self, Iterator, Peekable}, mem, num::NonZeroU32, - ops::{Deref, Range}, + ops::{Deref, Not, Range}, path::PathBuf, rc, sync::{Arc, LazyLock}, @@ -126,6 +130,29 @@ pub struct Buffer { has_unsaved_edits: Cell<(clock::Global, bool)>, change_bits: Vec>>, _subscriptions: Vec, + tree_sitter_data: Arc>, +} + +#[derive(Debug, Clone)] +pub struct TreeSitterData { + chunks: RowChunks, + brackets_by_chunks: Vec>>>, +} + +const MAX_ROWS_IN_A_CHUNK: u32 = 50; + +impl TreeSitterData { + fn clear(&mut self) { + self.brackets_by_chunks = vec![None; self.chunks.len()]; + } + + fn new(snapshot: text::BufferSnapshot) -> Self { + let chunks = RowChunks::new(snapshot, MAX_ROWS_IN_A_CHUNK); + Self { + brackets_by_chunks: vec![None; chunks.len()], + chunks, + } + } } #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -149,6 +176,7 @@ pub struct BufferSnapshot { remote_selections: TreeMap, language: Option>, non_text_state_update_count: usize, + tree_sitter_data: Arc>, } /// The kind and amount of indentation in a particular line. For now, @@ -819,11 +847,18 @@ impl EditPreview { } #[derive(Clone, Debug, PartialEq, Eq)] -pub struct BracketMatch { - pub open_range: Range, - pub close_range: Range, +pub struct BracketMatch { + pub open_range: Range, + pub close_range: Range, pub newline_only: bool, - pub depth: usize, + pub syntax_layer_depth: usize, + pub color_index: Option, +} + +impl BracketMatch { + pub fn bracket_ranges(self) -> (Range, Range) { + (self.open_range, self.close_range) + } } impl Buffer { @@ -974,8 +1009,10 @@ impl Buffer { let saved_mtime = file.as_ref().and_then(|file| file.disk_state().mtime()); let snapshot = buffer.snapshot(); let syntax_map = Mutex::new(SyntaxMap::new(&snapshot)); + let tree_sitter_data = TreeSitterData::new(snapshot); Self { saved_mtime, + tree_sitter_data: Arc::new(Mutex::new(tree_sitter_data)), saved_version: buffer.version(), preview_version: buffer.version(), reload_task: None, @@ -1025,12 +1062,14 @@ impl Buffer { let language_registry = language_registry.clone(); syntax.reparse(&text, language_registry, language); } + let tree_sitter_data = TreeSitterData::new(text.clone()); BufferSnapshot { text, syntax, file: None, diagnostics: Default::default(), remote_selections: Default::default(), + tree_sitter_data: Arc::new(Mutex::new(tree_sitter_data)), language, non_text_state_update_count: 0, } @@ -1048,9 +1087,11 @@ impl Buffer { ) .snapshot(); let syntax = SyntaxMap::new(&text).snapshot(); + let tree_sitter_data = TreeSitterData::new(text.clone()); BufferSnapshot { text, syntax, + tree_sitter_data: Arc::new(Mutex::new(tree_sitter_data)), file: None, diagnostics: Default::default(), remote_selections: Default::default(), @@ -1075,9 +1116,11 @@ impl Buffer { if let Some(language) = language.clone() { syntax.reparse(&text, language_registry, language); } + let tree_sitter_data = TreeSitterData::new(text.clone()); BufferSnapshot { text, syntax, + tree_sitter_data: Arc::new(Mutex::new(tree_sitter_data)), file: None, diagnostics: Default::default(), remote_selections: Default::default(), @@ -1097,6 +1140,7 @@ impl Buffer { BufferSnapshot { text, syntax, + tree_sitter_data: self.tree_sitter_data.clone(), file: self.file.clone(), remote_selections: self.remote_selections.clone(), diagnostics: self.diagnostics.clone(), @@ -1611,6 +1655,7 @@ impl Buffer { self.syntax_map.lock().did_parse(syntax_snapshot); self.request_autoindent(cx); self.parse_status.0.send(ParseStatus::Idle).unwrap(); + self.tree_sitter_data.lock().clear(); cx.emit(BufferEvent::Reparsed); cx.notify(); } @@ -4120,61 +4165,166 @@ impl BufferSnapshot { self.syntax.matches(range, self, query) } - pub fn all_bracket_ranges( + /// Finds all [`RowChunks`] applicable to the given range, then returns all bracket pairs that intersect with those chunks. + /// Hence, may return more bracket pairs than the range contains. + /// + /// Will omit known chunks. + /// The resulting bracket match collections are not ordered. + pub fn fetch_bracket_ranges( &self, range: Range, - ) -> impl Iterator + '_ { - let mut matches = self.syntax.matches(range.clone(), &self.text, |grammar| { - grammar.brackets_config.as_ref().map(|c| &c.query) - }); - let configs = matches - .grammars() - .iter() - .map(|grammar| grammar.brackets_config.as_ref().unwrap()) - .collect::>(); - - iter::from_fn(move || { - while let Some(mat) = matches.peek() { - let mut open = None; - let mut close = None; - let depth = mat.depth; - let config = &configs[mat.grammar_index]; - let pattern = &config.patterns[mat.pattern_index]; - for capture in mat.captures { - if capture.index == config.open_capture_ix { - open = Some(capture.node.byte_range()); - } else if capture.index == config.close_capture_ix { - close = Some(capture.node.byte_range()); - } + known_chunks: Option<(&Global, &HashSet>)>, + ) -> HashMap, Vec>> { + let mut tree_sitter_data = self.latest_tree_sitter_data().clone(); + + let known_chunks = match known_chunks { + Some((known_version, known_chunks)) => { + if !tree_sitter_data + .chunks + .version() + .changed_since(known_version) + { + known_chunks.clone() + } else { + HashSet::default() } + } + None => HashSet::default(), + }; - matches.advance(); + let mut new_bracket_matches = HashMap::default(); + let mut all_bracket_matches = HashMap::default(); - let Some((open_range, close_range)) = open.zip(close) else { - continue; - }; + for chunk in tree_sitter_data + .chunks + .applicable_chunks(&[self.anchor_before(range.start)..self.anchor_after(range.end)]) + { + if known_chunks.contains(&chunk.row_range()) { + continue; + } + let Some(chunk_range) = tree_sitter_data.chunks.chunk_range(chunk) else { + continue; + }; + let chunk_range = chunk_range.to_offset(&tree_sitter_data.chunks.snapshot); + + let bracket_matches = match tree_sitter_data.brackets_by_chunks[chunk.id].take() { + Some(cached_brackets) => cached_brackets, + None => { + let mut bracket_pairs_ends = Vec::new(); + let mut matches = + self.syntax + .matches(chunk_range.clone(), &self.text, |grammar| { + grammar.brackets_config.as_ref().map(|c| &c.query) + }); + let configs = matches + .grammars() + .iter() + .map(|grammar| grammar.brackets_config.as_ref().unwrap()) + .collect::>(); + + let chunk_range = chunk_range.clone(); + let new_matches = iter::from_fn(move || { + while let Some(mat) = matches.peek() { + let mut open = None; + let mut close = None; + let depth = mat.depth; + let config = configs[mat.grammar_index]; + let pattern = &config.patterns[mat.pattern_index]; + for capture in mat.captures { + if capture.index == config.open_capture_ix { + open = Some(capture.node.byte_range()); + } else if capture.index == config.close_capture_ix { + close = Some(capture.node.byte_range()); + } + } - let bracket_range = open_range.start..=close_range.end; - if !bracket_range.overlaps(&range) { - continue; + matches.advance(); + + let Some((open_range, close_range)) = open.zip(close) else { + continue; + }; + + let bracket_range = open_range.start..=close_range.end; + if !bracket_range.overlaps(&chunk_range) { + continue; + } + + return Some((open_range, close_range, pattern, depth)); + } + None + }) + .sorted_by_key(|(open_range, _, _, _)| open_range.start) + .map(|(open_range, close_range, pattern, syntax_layer_depth)| { + while let Some(&last_bracket_end) = bracket_pairs_ends.last() { + if last_bracket_end <= open_range.start { + bracket_pairs_ends.pop(); + } else { + break; + } + } + + let bracket_depth = bracket_pairs_ends.len(); + bracket_pairs_ends.push(close_range.end); + + BracketMatch { + open_range, + close_range, + syntax_layer_depth, + newline_only: pattern.newline_only, + color_index: pattern.rainbow_exclude.not().then_some(bracket_depth), + } + }) + .collect::>(); + + new_bracket_matches.insert(chunk.id, new_matches.clone()); + new_matches } + }; + all_bracket_matches.insert(chunk.row_range(), bracket_matches); + } - return Some(BracketMatch { - open_range, - close_range, - newline_only: pattern.newline_only, - depth, - }); + let mut latest_tree_sitter_data = self.latest_tree_sitter_data(); + if latest_tree_sitter_data.chunks.version() == &self.version { + for (chunk_id, new_matches) in new_bracket_matches { + let old_chunks = &mut latest_tree_sitter_data.brackets_by_chunks[chunk_id]; + if old_chunks.is_none() { + *old_chunks = Some(new_matches); + } } - None - }) + } + + all_bracket_matches + } + + fn latest_tree_sitter_data(&self) -> MutexGuard<'_, RawMutex, TreeSitterData> { + let mut tree_sitter_data = self.tree_sitter_data.lock(); + if self + .version + .changed_since(tree_sitter_data.chunks.version()) + { + *tree_sitter_data = TreeSitterData::new(self.text.clone()); + } + tree_sitter_data + } + + pub fn all_bracket_ranges( + &self, + range: Range, + ) -> impl Iterator> { + self.fetch_bracket_ranges(range.clone(), None) + .into_values() + .flatten() + .filter(move |bracket_match| { + let bracket_range = bracket_match.open_range.start..bracket_match.close_range.end; + bracket_range.overlaps(&range) + }) } /// Returns bracket range pairs overlapping or adjacent to `range` pub fn bracket_ranges( &self, range: Range, - ) -> impl Iterator + '_ { + ) -> impl Iterator> + '_ { // Find bracket pairs that *inclusively* contain the given range. let range = range.start.to_previous_offset(self)..range.end.to_next_offset(self); self.all_bracket_ranges(range) @@ -4320,15 +4470,19 @@ impl BufferSnapshot { pub fn enclosing_bracket_ranges( &self, range: Range, - ) -> impl Iterator + '_ { + ) -> impl Iterator> + '_ { let range = range.start.to_offset(self)..range.end.to_offset(self); let result: Vec<_> = self.bracket_ranges(range.clone()).collect(); - let max_depth = result.iter().map(|mat| mat.depth).max().unwrap_or(0); + let max_depth = result + .iter() + .map(|mat| mat.syntax_layer_depth) + .max() + .unwrap_or(0); result.into_iter().filter(move |pair| { pair.open_range.start <= range.start && pair.close_range.end >= range.end - && pair.depth == max_depth + && pair.syntax_layer_depth == max_depth }) } @@ -4815,6 +4969,7 @@ impl Clone for BufferSnapshot { remote_selections: self.remote_selections.clone(), diagnostics: self.diagnostics.clone(), language: self.language.clone(), + tree_sitter_data: self.tree_sitter_data.clone(), non_text_state_update_count: self.non_text_state_update_count, } } diff --git a/crates/language/src/buffer/row_chunk.rs b/crates/language/src/buffer/row_chunk.rs new file mode 100644 index 0000000000000000000000000000000000000000..7589c5ac078b9443c3dfd501abb0e6d79cb74581 --- /dev/null +++ b/crates/language/src/buffer/row_chunk.rs @@ -0,0 +1,121 @@ +//! A row chunk is an exclusive range of rows, [`BufferRow`] within a buffer of a certain version, [`Global`]. +//! All but the last chunk are of a constant, given size. + +use std::{ops::Range, sync::Arc}; + +use clock::Global; +use text::{Anchor, OffsetRangeExt as _, Point}; +use util::RangeExt; + +use crate::BufferRow; + +/// An range of rows, exclusive as [`lsp::Range`] and +/// +/// denote. +/// +/// Represents an area in a text editor, adjacent to other ones. +/// Together, chunks form entire document at a particular version [`Global`]. +/// Each chunk is queried for inlays as `(start_row, 0)..(end_exclusive, 0)` via +/// +#[derive(Clone)] +pub struct RowChunks { + pub(crate) snapshot: text::BufferSnapshot, + chunks: Arc<[RowChunk]>, +} + +impl std::fmt::Debug for RowChunks { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RowChunks") + .field("version", self.snapshot.version()) + .field("chunks", &self.chunks) + .finish() + } +} + +impl RowChunks { + pub fn new(snapshot: text::BufferSnapshot, max_rows_per_chunk: u32) -> Self { + let buffer_point_range = (0..snapshot.len()).to_point(&snapshot); + let last_row = buffer_point_range.end.row; + let chunks = (buffer_point_range.start.row..=last_row) + .step_by(max_rows_per_chunk as usize) + .enumerate() + .map(|(id, chunk_start)| RowChunk { + id, + start: chunk_start, + end_exclusive: (chunk_start + max_rows_per_chunk).min(last_row), + }) + .collect::>(); + Self { + snapshot, + chunks: Arc::from(chunks), + } + } + + pub fn version(&self) -> &Global { + self.snapshot.version() + } + + pub fn len(&self) -> usize { + self.chunks.len() + } + + pub fn applicable_chunks( + &self, + ranges: &[Range], + ) -> impl Iterator { + let row_ranges = ranges + .iter() + .map(|range| range.to_point(&self.snapshot)) + // Be lenient and yield multiple chunks if they "touch" the exclusive part of the range. + // This will result in LSP hints [re-]queried for more ranges, but also more hints already visible when scrolling around. + .map(|point_range| point_range.start.row..point_range.end.row + 1) + .collect::>(); + self.chunks + .iter() + .filter(move |chunk| -> bool { + let chunk_range = chunk.row_range().to_inclusive(); + row_ranges + .iter() + .any(|row_range| chunk_range.overlaps(&row_range)) + }) + .copied() + } + + pub fn chunk_range(&self, chunk: RowChunk) -> Option> { + if !self.chunks.contains(&chunk) { + return None; + } + + let start = Point::new(chunk.start, 0); + let end = if self.chunks.last() == Some(&chunk) { + Point::new( + chunk.end_exclusive, + self.snapshot.line_len(chunk.end_exclusive), + ) + } else { + Point::new(chunk.end_exclusive, 0) + }; + Some(self.snapshot.anchor_before(start)..self.snapshot.anchor_after(end)) + } + + pub fn previous_chunk(&self, chunk: RowChunk) -> Option { + if chunk.id == 0 { + None + } else { + self.chunks.get(chunk.id - 1).copied() + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct RowChunk { + pub id: usize, + pub start: BufferRow, + pub end_exclusive: BufferRow, +} + +impl RowChunk { + pub fn row_range(&self) -> Range { + self.start..self.end_exclusive + } +} diff --git a/crates/language/src/buffer_tests.rs b/crates/language/src/buffer_tests.rs index 14475af5984d75de9e166dd1d8a0379c6a66f3fd..05402abcad478e2eedb17d31853ab0bc2bd3702c 100644 --- a/crates/language/src/buffer_tests.rs +++ b/crates/language/src/buffer_tests.rs @@ -1111,9 +1111,10 @@ fn test_text_objects(cx: &mut App) { #[gpui::test] fn test_enclosing_bracket_ranges(cx: &mut App) { - let mut assert = |selection_text, range_markers| { + #[track_caller] + fn assert(selection_text: &'static str, range_markers: Vec<&'static str>, cx: &mut App) { assert_bracket_pairs(selection_text, range_markers, rust_lang(), cx) - }; + } assert( indoc! {" @@ -1130,6 +1131,7 @@ fn test_enclosing_bracket_ranges(cx: &mut App) { } «}» let foo = 1;"}], + cx, ); assert( @@ -1156,6 +1158,7 @@ fn test_enclosing_bracket_ranges(cx: &mut App) { } let foo = 1;"}, ], + cx, ); assert( @@ -1182,6 +1185,7 @@ fn test_enclosing_bracket_ranges(cx: &mut App) { } let foo = 1;"}, ], + cx, ); assert( @@ -1199,6 +1203,7 @@ fn test_enclosing_bracket_ranges(cx: &mut App) { } «}» let foo = 1;"}], + cx, ); assert( @@ -1209,7 +1214,8 @@ fn test_enclosing_bracket_ranges(cx: &mut App) { } } let fˇoo = 1;"}, - vec![], + Vec::new(), + cx, ); // Regression test: avoid crash when querying at the end of the buffer. @@ -1221,7 +1227,8 @@ fn test_enclosing_bracket_ranges(cx: &mut App) { } } let foo = 1;ˇ"}, - vec![], + Vec::new(), + cx, ); } diff --git a/crates/language/src/language.rs b/crates/language/src/language.rs index 82e0d69cefa94cc0e03a694eea0f29031d8fe156..7ce3986736cc0a1e8b8d21124ebe8c29ddc9214c 100644 --- a/crates/language/src/language.rs +++ b/crates/language/src/language.rs @@ -1323,6 +1323,7 @@ struct BracketsConfig { #[derive(Clone, Debug, Default)] struct BracketsPatternConfig { newline_only: bool, + rainbow_exclude: bool, } pub struct DebugVariablesConfig { @@ -1685,9 +1686,13 @@ impl Language { .map(|ix| { let mut config = BracketsPatternConfig::default(); for setting in query.property_settings(ix) { - if setting.key.as_ref() == "newline.only" { + let setting_key = setting.key.as_ref(); + if setting_key == "newline.only" { config.newline_only = true } + if setting_key == "rainbow.exclude" { + config.rainbow_exclude = true + } } config }) @@ -2640,8 +2645,9 @@ pub fn rust_lang() -> Arc { ("[" @open "]" @close) ("{" @open "}" @close) ("<" @open ">" @close) -("\"" @open "\"" @close) -(closure_parameters "|" @open "|" @close)"#, +(closure_parameters "|" @open "|" @close) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) +(("'" @open "'" @close) (#set! rainbow.exclude))"#, )), text_objects: Some(Cow::from( r#" diff --git a/crates/language/src/language_settings.rs b/crates/language/src/language_settings.rs index f0235cb51b5fcbf3cdcc4a4bf46bd12adbfd674e..c5b2dc45e163e55c2427badd8d3f4a24dab64916 100644 --- a/crates/language/src/language_settings.rs +++ b/crates/language/src/language_settings.rs @@ -54,14 +54,14 @@ pub struct AllLanguageSettings { pub(crate) file_types: FxHashMap, GlobSet>, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct WhitespaceMap { pub space: SharedString, pub tab: SharedString, } /// The settings for a particular language. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct LanguageSettings { /// How many columns a tab should occupy. pub tab_size: NonZeroU32, @@ -153,9 +153,11 @@ pub struct LanguageSettings { pub completions: CompletionSettings, /// Preferred debuggers for this language. pub debuggers: Vec, + /// Whether to use tree-sitter bracket queries to detect and colorize the brackets in the editor. + pub colorize_brackets: bool, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct CompletionSettings { /// Controls how words are completed. /// For large documents, not all words may be fetched for completion. @@ -207,7 +209,7 @@ pub struct IndentGuideSettings { pub background_coloring: settings::IndentGuideBackgroundColoring, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct LanguageTaskSettings { /// Extra task variables to set for a particular language. pub variables: HashMap, @@ -225,7 +227,7 @@ pub struct LanguageTaskSettings { /// Allows to enable/disable formatting with Prettier /// and configure default Prettier, used when no project-level Prettier installation is found. /// Prettier formatting is disabled by default. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct PrettierSettings { /// Enables or disables formatting with Prettier for a given language. pub allowed: bool, @@ -584,6 +586,7 @@ impl settings::Settings for AllLanguageSettings { }, show_completions_on_input: settings.show_completions_on_input.unwrap(), show_completion_documentation: settings.show_completion_documentation.unwrap(), + colorize_brackets: settings.colorize_brackets.unwrap(), completions: CompletionSettings { words: completions.words.unwrap(), words_min_length: completions.words_min_length.unwrap() as usize, diff --git a/crates/languages/src/bash/brackets.scm b/crates/languages/src/bash/brackets.scm index 5ae73cdda76c3d0775ddb124c7b7343e5f2004de..88a2a1b67f602afb4e7de21a0ec0a523d33e37ee 100644 --- a/crates/languages/src/bash/brackets.scm +++ b/crates/languages/src/bash/brackets.scm @@ -1,12 +1,12 @@ ("(" @open ")" @close) ("[" @open "]" @close) ("{" @open "}" @close) -("\"" @open "\"" @close) -("`" @open "`" @close) -(("do" @open "done" @close) (#set! newline.only)) -((case_statement ("in" @open "esac" @close)) (#set! newline.only)) -((if_statement (elif_clause ("then" @open)) (else_clause ("else" @close))) (#set! newline.only)) -((if_statement (else_clause ("else" @open)) "fi" @close) (#set! newline.only)) -((if_statement ("then" @open) (elif_clause ("elif" @close))) (#set! newline.only)) -((if_statement ("then" @open) (else_clause ("else" @close))) (#set! newline.only)) -((if_statement ("then" @open "fi" @close)) (#set! newline.only)) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) +(("`" @open "`" @close) (#set! rainbow.exclude)) +(("do" @open "done" @close) (#set! newline.only) (#set! rainbow.exclude)) +((case_statement ("in" @open "esac" @close)) (#set! newline.only) (#set! rainbow.exclude)) +((if_statement (elif_clause ("then" @open)) (else_clause ("else" @close))) (#set! newline.only) (#set! rainbow.exclude)) +((if_statement (else_clause ("else" @open)) "fi" @close) (#set! newline.only) (#set! rainbow.exclude)) +((if_statement ("then" @open) (elif_clause ("elif" @close))) (#set! newline.only) (#set! rainbow.exclude)) +((if_statement ("then" @open) (else_clause ("else" @close))) (#set! newline.only) (#set! rainbow.exclude)) +((if_statement ("then" @open "fi" @close)) (#set! newline.only) (#set! rainbow.exclude)) diff --git a/crates/languages/src/c/brackets.scm b/crates/languages/src/c/brackets.scm index 2f886c424022875118951191b381a203593183ad..2149bddc6c9a7ec04667d03da75580b676e12a28 100644 --- a/crates/languages/src/c/brackets.scm +++ b/crates/languages/src/c/brackets.scm @@ -1,5 +1,5 @@ ("(" @open ")" @close) ("[" @open "]" @close) ("{" @open "}" @close) -("\"" @open "\"" @close) -("'" @open "'" @close) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) +(("'" @open "'" @close) (#set! rainbow.exclude)) diff --git a/crates/languages/src/cpp/brackets.scm b/crates/languages/src/cpp/brackets.scm index 2f886c424022875118951191b381a203593183ad..2149bddc6c9a7ec04667d03da75580b676e12a28 100644 --- a/crates/languages/src/cpp/brackets.scm +++ b/crates/languages/src/cpp/brackets.scm @@ -1,5 +1,5 @@ ("(" @open ")" @close) ("[" @open "]" @close) ("{" @open "}" @close) -("\"" @open "\"" @close) -("'" @open "'" @close) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) +(("'" @open "'" @close) (#set! rainbow.exclude)) diff --git a/crates/languages/src/css/brackets.scm b/crates/languages/src/css/brackets.scm index 2f886c424022875118951191b381a203593183ad..2149bddc6c9a7ec04667d03da75580b676e12a28 100644 --- a/crates/languages/src/css/brackets.scm +++ b/crates/languages/src/css/brackets.scm @@ -1,5 +1,5 @@ ("(" @open ")" @close) ("[" @open "]" @close) ("{" @open "}" @close) -("\"" @open "\"" @close) -("'" @open "'" @close) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) +(("'" @open "'" @close) (#set! rainbow.exclude)) diff --git a/crates/languages/src/go/brackets.scm b/crates/languages/src/go/brackets.scm index 0ced37682d4f08f705cd9a3665682b307e84af73..05fb1d7f9219889d652bbdbb294ca45e72cc9c05 100644 --- a/crates/languages/src/go/brackets.scm +++ b/crates/languages/src/go/brackets.scm @@ -1,6 +1,6 @@ ("(" @open ")" @close) ("[" @open "]" @close) ("{" @open "}" @close) -("\"" @open "\"" @close) -("`" @open "`" @close) -((rune_literal) @open @close) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) +(("`" @open "`" @close) (#set! rainbow.exclude)) +((rune_literal) @open @close (#set! rainbow.exclude)) diff --git a/crates/languages/src/javascript/brackets.scm b/crates/languages/src/javascript/brackets.scm index 66bf14f137794b8a620b203c102ca3e3390fea20..a16a6432692ec7b9e0e3d24151cb814fc11bd83d 100644 --- a/crates/languages/src/javascript/brackets.scm +++ b/crates/languages/src/javascript/brackets.scm @@ -4,6 +4,6 @@ ("<" @open ">" @close) ("<" @open "/>" @close) ("" @close) -("\"" @open "\"" @close) -("'" @open "'" @close) -("`" @open "`" @close) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) +(("'" @open "'" @close) (#set! rainbow.exclude)) +(("`" @open "`" @close) (#set! rainbow.exclude)) diff --git a/crates/languages/src/json/brackets.scm b/crates/languages/src/json/brackets.scm index 9e8c9cd93c30f7697ead2161295b4583ffdfb93b..cd5cdf328b3a04730d56ec0cb06c3802fe07c978 100644 --- a/crates/languages/src/json/brackets.scm +++ b/crates/languages/src/json/brackets.scm @@ -1,3 +1,3 @@ ("[" @open "]" @close) ("{" @open "}" @close) -("\"" @open "\"" @close) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) diff --git a/crates/languages/src/jsonc/brackets.scm b/crates/languages/src/jsonc/brackets.scm index 9e8c9cd93c30f7697ead2161295b4583ffdfb93b..cd5cdf328b3a04730d56ec0cb06c3802fe07c978 100644 --- a/crates/languages/src/jsonc/brackets.scm +++ b/crates/languages/src/jsonc/brackets.scm @@ -1,3 +1,3 @@ ("[" @open "]" @close) ("{" @open "}" @close) -("\"" @open "\"" @close) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) diff --git a/crates/languages/src/markdown/brackets.scm b/crates/languages/src/markdown/brackets.scm index 23f3e4d3d0155e1c68aa5c3f0ada4764fb693049..172a2e7f723e3a170d80d19fa2f78fa334258105 100644 --- a/crates/languages/src/markdown/brackets.scm +++ b/crates/languages/src/markdown/brackets.scm @@ -1,7 +1,7 @@ ("(" @open ")" @close) ("[" @open "]" @close) ("{" @open "}" @close) -("\"" @open "\"" @close) -("`" @open "`" @close) -("'" @open "'" @close) -((fenced_code_block_delimiter) @open (fenced_code_block_delimiter) @close) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) +(("`" @open "`" @close) (#set! rainbow.exclude)) +(("'" @open "'" @close) (#set! rainbow.exclude)) +(((fenced_code_block_delimiter) @open (fenced_code_block_delimiter) @close) (#set! rainbow.exclude)) diff --git a/crates/languages/src/python/brackets.scm b/crates/languages/src/python/brackets.scm index be6803358701ae6b43eb2aecb59a5a34f76d71b6..9e5b59788fc88fcb0830325417de50a9414828b8 100644 --- a/crates/languages/src/python/brackets.scm +++ b/crates/languages/src/python/brackets.scm @@ -1,4 +1,4 @@ ("(" @open ")" @close) ("[" @open "]" @close) ("{" @open "}" @close) -((string_start) @open (string_end) @close) +(((string_start) @open (string_end) @close) (#set! rainbow.exclude)) diff --git a/crates/languages/src/rust/brackets.scm b/crates/languages/src/rust/brackets.scm index 0bf19b8085fb035b28c8b19dc07ff0df191c9c26..7a35adb10021c83b8e08e888187ab133c5313ad9 100644 --- a/crates/languages/src/rust/brackets.scm +++ b/crates/languages/src/rust/brackets.scm @@ -2,6 +2,6 @@ ("[" @open "]" @close) ("{" @open "}" @close) ("<" @open ">" @close) -("\"" @open "\"" @close) (closure_parameters "|" @open "|" @close) -("'" @open "'" @close) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) +(("'" @open "'" @close) (#set! rainbow.exclude)) diff --git a/crates/languages/src/tsx/brackets.scm b/crates/languages/src/tsx/brackets.scm index 359ae87aa2d45a2241cb8f1579de14b312465baf..0e98b78036b4b19fd63d812fa92d2416788764f4 100644 --- a/crates/languages/src/tsx/brackets.scm +++ b/crates/languages/src/tsx/brackets.scm @@ -4,8 +4,8 @@ ("<" @open ">" @close) ("<" @open "/>" @close) ("" @close) -("\"" @open "\"" @close) -("'" @open "'" @close) -("`" @open "`" @close) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) +(("'" @open "'" @close) (#set! rainbow.exclude)) +(("`" @open "`" @close) (#set! rainbow.exclude)) -((jsx_element (jsx_opening_element) @open (jsx_closing_element) @close) (#set! newline.only)) +((jsx_element (jsx_opening_element) @open (jsx_closing_element) @close) (#set! newline.only) (#set! rainbow.exclude)) diff --git a/crates/languages/src/typescript/brackets.scm b/crates/languages/src/typescript/brackets.scm index 48afefeef07e9950cf6c8eba40b79def50c09c71..635233849142d8951edeca02ca0c79253aa91e80 100644 --- a/crates/languages/src/typescript/brackets.scm +++ b/crates/languages/src/typescript/brackets.scm @@ -2,6 +2,6 @@ ("[" @open "]" @close) ("{" @open "}" @close) ("<" @open ">" @close) -("\"" @open "\"" @close) -("'" @open "'" @close) -("`" @open "`" @close) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) +(("'" @open "'" @close) (#set! rainbow.exclude)) +(("`" @open "`" @close) (#set! rainbow.exclude)) diff --git a/crates/languages/src/yaml/brackets.scm b/crates/languages/src/yaml/brackets.scm index 59cf45205f6819ac1e5076ba9b9d952c9b447b08..0cfc5072d4eeda19d75ce943481670a3ee8938b0 100644 --- a/crates/languages/src/yaml/brackets.scm +++ b/crates/languages/src/yaml/brackets.scm @@ -1,4 +1,4 @@ ("[" @open "]" @close) ("{" @open "}" @close) -("\"" @open "\"" @close) -("'" @open "'" @close) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) +(("'" @open "'" @close) (#set! rainbow.exclude)) diff --git a/crates/multi_buffer/src/multi_buffer.rs b/crates/multi_buffer/src/multi_buffer.rs index 1ade63b5d17b9558c6686bc4f95bcd9193938f7d..7ecc09255b17ebbf2e68e21ab4c8d88f93d08d75 100644 --- a/crates/multi_buffer/src/multi_buffer.rs +++ b/crates/multi_buffer/src/multi_buffer.rs @@ -18,10 +18,10 @@ use collections::{BTreeMap, Bound, HashMap, HashSet}; use gpui::{App, Context, Entity, EntityId, EventEmitter}; use itertools::Itertools; use language::{ - AutoindentMode, Buffer, BufferChunks, BufferRow, BufferSnapshot, Capability, CharClassifier, - CharKind, CharScopeContext, Chunk, CursorShape, DiagnosticEntryRef, DiskState, File, - IndentGuideSettings, IndentSize, Language, LanguageScope, OffsetRangeExt, OffsetUtf16, Outline, - OutlineItem, Point, PointUtf16, Selection, TextDimension, TextObject, ToOffset as _, + AutoindentMode, BracketMatch, Buffer, BufferChunks, BufferRow, BufferSnapshot, Capability, + CharClassifier, CharKind, CharScopeContext, Chunk, CursorShape, DiagnosticEntryRef, DiskState, + File, IndentGuideSettings, IndentSize, Language, LanguageScope, OffsetRangeExt, OffsetUtf16, + Outline, OutlineItem, Point, PointUtf16, Selection, TextDimension, TextObject, ToOffset as _, ToPoint as _, TransactionId, TreeSitterOptions, Unclipped, language_settings::{LanguageSettings, language_settings}, }; @@ -5400,7 +5400,6 @@ impl MultiBufferSnapshot { { let range = range.start.to_offset(self)..range.end.to_offset(self); let mut excerpt = self.excerpt_containing(range.clone())?; - Some( excerpt .buffer() @@ -5410,15 +5409,17 @@ impl MultiBufferSnapshot { BufferOffset(pair.open_range.start)..BufferOffset(pair.open_range.end); let close_range = BufferOffset(pair.close_range.start)..BufferOffset(pair.close_range.end); - if excerpt.contains_buffer_range(open_range.start..close_range.end) { - Some(( - excerpt.map_range_from_buffer(open_range), - excerpt.map_range_from_buffer(close_range), - )) - } else { - None - } - }), + excerpt + .contains_buffer_range(open_range.start..close_range.end) + .then(|| BracketMatch { + open_range: excerpt.map_range_from_buffer(open_range), + close_range: excerpt.map_range_from_buffer(close_range), + color_index: pair.color_index, + newline_only: pair.newline_only, + syntax_layer_depth: pair.syntax_layer_depth, + }) + }) + .map(BracketMatch::bracket_ranges), ) } diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index 4c6ed0b7c535504de7ea63f8196e35553bd7d829..17f558d72d4854bb99676472100c442ad164f0a5 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -29,7 +29,6 @@ use crate::{ lsp_command::{self, *}, lsp_store::{ self, - inlay_hint_cache::BufferChunk, log_store::{GlobalLogStore, LanguageServerKind}, }, manifest_tree::{ @@ -73,6 +72,7 @@ use language::{ serialize_lsp_edit, serialize_version, }, range_from_lsp, range_to_lsp, + row_chunk::RowChunk, }; use lsp::{ AdapterServerCapabilities, CodeActionKind, CompletionContext, CompletionOptions, @@ -117,7 +117,7 @@ use std::{ time::{Duration, Instant}, }; use sum_tree::Dimensions; -use text::{Anchor, BufferId, LineEnding, OffsetRangeExt, Point, ToPoint as _}; +use text::{Anchor, BufferId, LineEnding, OffsetRangeExt, ToPoint as _}; use util::{ ConnectionResult, ResultExt as _, debug_panic, defer, maybe, merge_json_value_into, @@ -3590,7 +3590,7 @@ pub struct BufferLspData { code_lens: Option, inlay_hints: BufferInlayHints, lsp_requests: HashMap>>, - chunk_lsp_requests: HashMap>, + chunk_lsp_requests: HashMap>, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -6706,7 +6706,7 @@ impl LspStore { self.latest_lsp_data(buffer, cx) .inlay_hints .applicable_chunks(ranges) - .map(|chunk| chunk.start..chunk.end) + .map(|chunk| chunk.row_range()) .collect() } @@ -6729,7 +6729,6 @@ impl LspStore { known_chunks: Option<(clock::Global, HashSet>)>, cx: &mut Context, ) -> HashMap, Task>> { - let buffer_snapshot = buffer.read(cx).snapshot(); let next_hint_id = self.next_hint_id.clone(); let lsp_data = self.latest_lsp_data(&buffer, cx); let query_version = lsp_data.buffer_version.clone(); @@ -6758,14 +6757,12 @@ impl LspStore { let mut ranges_to_query = None; let applicable_chunks = existing_inlay_hints .applicable_chunks(ranges.as_slice()) - .filter(|chunk| !known_chunks.contains(&(chunk.start..chunk.end))) + .filter(|chunk| !known_chunks.contains(&chunk.row_range())) .collect::>(); if applicable_chunks.is_empty() { return HashMap::default(); } - let last_chunk_number = existing_inlay_hints.buffer_chunks_len() - 1; - for row_chunk in applicable_chunks { match ( existing_inlay_hints @@ -6779,16 +6776,12 @@ impl LspStore { .cloned(), ) { (None, None) => { - let end = if last_chunk_number == row_chunk.id { - Point::new(row_chunk.end, buffer_snapshot.line_len(row_chunk.end)) - } else { - Point::new(row_chunk.end, 0) + let Some(chunk_range) = existing_inlay_hints.chunk_range(row_chunk) else { + continue; }; - ranges_to_query.get_or_insert_with(Vec::new).push(( - row_chunk, - buffer_snapshot.anchor_before(Point::new(row_chunk.start, 0)) - ..buffer_snapshot.anchor_after(end), - )); + ranges_to_query + .get_or_insert_with(Vec::new) + .push((row_chunk, chunk_range)); } (None, Some(fetched_hints)) => hint_fetch_tasks.push((row_chunk, fetched_hints)), (Some(cached_hints), None) => { @@ -6796,7 +6789,7 @@ impl LspStore { if for_server.is_none_or(|for_server| for_server == server_id) { cached_inlay_hints .get_or_insert_with(HashMap::default) - .entry(row_chunk.start..row_chunk.end) + .entry(row_chunk.row_range()) .or_insert_with(HashMap::default) .entry(server_id) .or_insert_with(Vec::new) @@ -6810,7 +6803,7 @@ impl LspStore { if for_server.is_none_or(|for_server| for_server == server_id) { cached_inlay_hints .get_or_insert_with(HashMap::default) - .entry(row_chunk.start..row_chunk.end) + .entry(row_chunk.row_range()) .or_insert_with(HashMap::default) .entry(server_id) .or_insert_with(Vec::new) @@ -6896,7 +6889,7 @@ impl LspStore { .map(|(row_chunk, hints)| (row_chunk, Task::ready(Ok(hints)))) .chain(hint_fetch_tasks.into_iter().map(|(chunk, hints_fetch)| { ( - chunk.start..chunk.end, + chunk.row_range(), cx.spawn(async move |_, _| { hints_fetch.await.map_err(|e| { if e.error_code() != ErrorCode::Internal { diff --git a/crates/project/src/lsp_store/inlay_hint_cache.rs b/crates/project/src/lsp_store/inlay_hint_cache.rs index cca9d66e8c330f1a4c723a84c4fb418b976f7c03..804552b52cee9f31799e12f3c42e0614291eeab9 100644 --- a/crates/project/src/lsp_store/inlay_hint_cache.rs +++ b/crates/project/src/lsp_store/inlay_hint_cache.rs @@ -3,10 +3,12 @@ use std::{collections::hash_map, ops::Range, sync::Arc}; use collections::HashMap; use futures::future::Shared; use gpui::{App, Entity, Task}; -use language::{Buffer, BufferRow, BufferSnapshot}; +use language::{ + Buffer, + row_chunk::{RowChunk, RowChunks}, +}; use lsp::LanguageServerId; -use text::OffsetRangeExt; -use util::RangeExt as _; +use text::Anchor; use crate::{InlayHint, InlayId}; @@ -46,8 +48,7 @@ impl InvalidationStrategy { } pub struct BufferInlayHints { - snapshot: BufferSnapshot, - buffer_chunks: Vec, + chunks: RowChunks, hints_by_chunks: Vec>, fetches_by_chunks: Vec>, hints_by_id: HashMap, @@ -62,25 +63,10 @@ struct HintForId { position: usize, } -/// An range of rows, exclusive as [`lsp::Range`] and -/// -/// denote. -/// -/// Represents an area in a text editor, adjacent to other ones. -/// Together, chunks form entire document at a particular version [`clock::Global`]. -/// Each chunk is queried for inlays as `(start_row, 0)..(end_exclusive, 0)` via -/// -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct BufferChunk { - pub id: usize, - pub start: BufferRow, - pub end: BufferRow, -} - impl std::fmt::Debug for BufferInlayHints { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("BufferInlayHints") - .field("buffer_chunks", &self.buffer_chunks) + .field("buffer_chunks", &self.chunks) .field("hints_by_chunks", &self.hints_by_chunks) .field("fetches_by_chunks", &self.fetches_by_chunks) .field("hints_by_id", &self.hints_by_id) @@ -92,58 +78,30 @@ const MAX_ROWS_IN_A_CHUNK: u32 = 50; impl BufferInlayHints { pub fn new(buffer: &Entity, cx: &mut App) -> Self { - let buffer = buffer.read(cx); - let snapshot = buffer.snapshot(); - let buffer_point_range = (0..buffer.len()).to_point(&snapshot); - let last_row = buffer_point_range.end.row; - let buffer_chunks = (buffer_point_range.start.row..=last_row) - .step_by(MAX_ROWS_IN_A_CHUNK as usize) - .enumerate() - .map(|(id, chunk_start)| BufferChunk { - id, - start: chunk_start, - end: (chunk_start + MAX_ROWS_IN_A_CHUNK).min(last_row), - }) - .collect::>(); + let chunks = RowChunks::new(buffer.read(cx).text_snapshot(), MAX_ROWS_IN_A_CHUNK); Self { - hints_by_chunks: vec![None; buffer_chunks.len()], - fetches_by_chunks: vec![None; buffer_chunks.len()], + hints_by_chunks: vec![None; chunks.len()], + fetches_by_chunks: vec![None; chunks.len()], latest_invalidation_requests: HashMap::default(), hints_by_id: HashMap::default(), hint_resolves: HashMap::default(), - snapshot, - buffer_chunks, + chunks, } } pub fn applicable_chunks( &self, ranges: &[Range], - ) -> impl Iterator { - let row_ranges = ranges - .iter() - .map(|range| range.to_point(&self.snapshot)) - // Be lenient and yield multiple chunks if they "touch" the exclusive part of the range. - // This will result in LSP hints [re-]queried for more ranges, but also more hints already visible when scrolling around. - .map(|point_range| point_range.start.row..point_range.end.row + 1) - .collect::>(); - self.buffer_chunks - .iter() - .filter(move |chunk| { - let chunk_range = chunk.start..=chunk.end; - row_ranges - .iter() - .any(|row_range| chunk_range.overlaps(&row_range)) - }) - .copied() + ) -> impl Iterator { + self.chunks.applicable_chunks(ranges) } - pub fn cached_hints(&mut self, chunk: &BufferChunk) -> Option<&CacheInlayHints> { + pub fn cached_hints(&mut self, chunk: &RowChunk) -> Option<&CacheInlayHints> { self.hints_by_chunks[chunk.id].as_ref() } - pub fn fetched_hints(&mut self, chunk: &BufferChunk) -> &mut Option { + pub fn fetched_hints(&mut self, chunk: &RowChunk) -> &mut Option { &mut self.fetches_by_chunks[chunk.id] } @@ -177,8 +135,8 @@ impl BufferInlayHints { } pub fn clear(&mut self) { - self.hints_by_chunks = vec![None; self.buffer_chunks.len()]; - self.fetches_by_chunks = vec![None; self.buffer_chunks.len()]; + self.hints_by_chunks = vec![None; self.chunks.len()]; + self.fetches_by_chunks = vec![None; self.chunks.len()]; self.hints_by_id.clear(); self.hint_resolves.clear(); self.latest_invalidation_requests.clear(); @@ -186,7 +144,7 @@ impl BufferInlayHints { pub fn insert_new_hints( &mut self, - chunk: BufferChunk, + chunk: RowChunk, server_id: LanguageServerId, new_hints: Vec<(InlayId, InlayHint)>, ) { @@ -225,10 +183,6 @@ impl BufferInlayHints { Some(hint) } - pub fn buffer_chunks_len(&self) -> usize { - self.buffer_chunks.len() - } - pub(crate) fn invalidate_for_server_refresh( &mut self, for_server: LanguageServerId, @@ -263,7 +217,7 @@ impl BufferInlayHints { true } - pub(crate) fn invalidate_for_chunk(&mut self, chunk: BufferChunk) { + pub(crate) fn invalidate_for_chunk(&mut self, chunk: RowChunk) { self.fetches_by_chunks[chunk.id] = None; if let Some(hints_by_server) = self.hints_by_chunks[chunk.id].take() { for (hint_id, _) in hints_by_server.into_values().flatten() { @@ -272,4 +226,8 @@ impl BufferInlayHints { } } } + + pub fn chunk_range(&self, chunk: RowChunk) -> Option> { + self.chunks.chunk_range(chunk) + } } diff --git a/crates/remote_server/Cargo.toml b/crates/remote_server/Cargo.toml index e4c7932973741015066efbcd07d0d0c71212acb0..ba64f7aec9ee0a3759c2943e42b0f19742d905c1 100644 --- a/crates/remote_server/Cargo.toml +++ b/crates/remote_server/Cargo.toml @@ -92,6 +92,7 @@ node_runtime = { workspace = true, features = ["test-support"] } pretty_assertions.workspace = true project = { workspace = true, features = ["test-support"] } remote = { workspace = true, features = ["test-support"] } +theme = { workspace = true, features = ["test-support"] } language_model = { workspace = true, features = ["test-support"] } lsp = { workspace = true, features = ["test-support"] } prompt_store.workspace = true diff --git a/crates/remote_server/src/remote_editing_tests.rs b/crates/remote_server/src/remote_editing_tests.rs index 1cb63b8cd01e201c5fb2a212a2643cfdf481642a..4b931edb9e63443c6cf23756e737e015c291741c 100644 --- a/crates/remote_server/src/remote_editing_tests.rs +++ b/crates/remote_server/src/remote_editing_tests.rs @@ -1499,6 +1499,14 @@ async fn test_remote_git_diffs_when_recv_update_repository_delay( cx: &mut TestAppContext, server_cx: &mut TestAppContext, ) { + cx.update(|cx| { + let settings_store = SettingsStore::test(cx); + cx.set_global(settings_store); + theme::init(theme::LoadThemes::JustBase, cx); + release_channel::init(SemanticVersion::default(), cx); + editor::init(cx); + }); + use editor::Editor; use gpui::VisualContext; let text_2 = " diff --git a/crates/search/Cargo.toml b/crates/search/Cargo.toml index 7d8efbb11a5f1461da5b63152e2277a38ad272b4..291257e74258359356203955cec3a0a6d065b3fa 100644 --- a/crates/search/Cargo.toml +++ b/crates/search/Cargo.toml @@ -49,5 +49,6 @@ editor = { workspace = true, features = ["test-support"] } gpui = { workspace = true, features = ["test-support"] } language = { workspace = true, features = ["test-support"] } lsp.workspace = true +pretty_assertions.workspace = true unindent.workspace = true workspace = { workspace = true, features = ["test-support"] } diff --git a/crates/search/src/project_search.rs b/crates/search/src/project_search.rs index 68e3dca1ce07a1773856a3cacecf553d4c88f7e3..85656f179946393c3b15d8d57921ffa3847365cf 100644 --- a/crates/search/src/project_search.rs +++ b/crates/search/src/project_search.rs @@ -2449,6 +2449,7 @@ pub mod tests { use editor::{DisplayPoint, display_map::DisplayRow}; use gpui::{Action, TestAppContext, VisualTestContext, WindowHandle}; use language::{FakeLspAdapter, rust_lang}; + use pretty_assertions::assert_eq; use project::FakeFs; use serde_json::json; use settings::{InlayHintSettingsContent, SettingsStore}; @@ -2507,10 +2508,6 @@ pub mod tests { DisplayPoint::new(DisplayRow(2), 37)..DisplayPoint::new(DisplayRow(2), 40), match_background_color ), - ( - DisplayPoint::new(DisplayRow(5), 6)..DisplayPoint::new(DisplayRow(5), 9), - selection_background_color - ), ( DisplayPoint::new(DisplayRow(5), 6)..DisplayPoint::new(DisplayRow(5), 9), match_background_color diff --git a/crates/settings/src/settings_content/language.rs b/crates/settings/src/settings_content/language.rs index ed70116862bbda6af22d4027a406535ae0c19d67..11eb87817d12517ecb2ef333eacc60d1c2f48330 100644 --- a/crates/settings/src/settings_content/language.rs +++ b/crates/settings/src/settings_content/language.rs @@ -412,6 +412,10 @@ pub struct LanguageSettingsContent { /// /// Default: [] pub debuggers: Option>, + /// Whether to use tree-sitter bracket queries to detect and colorize the brackets in the editor. + /// + /// Default: false + pub colorize_brackets: Option, } /// Controls how whitespace should be displayedin the editor. diff --git a/crates/settings/src/settings_content/theme.rs b/crates/settings/src/settings_content/theme.rs index 8b87cc15196b7a562a794eb4a1effeb5cb102ef6..4cd1313633a1c32eaf2c0066b23ac3bd3e5bbe79 100644 --- a/crates/settings/src/settings_content/theme.rs +++ b/crates/settings/src/settings_content/theme.rs @@ -370,7 +370,7 @@ pub struct ThemeStyleContent { } #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] -pub struct AccentContent(pub Option); +pub struct AccentContent(pub Option); #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] pub struct PlayerColorContent { diff --git a/crates/settings/src/vscode_import.rs b/crates/settings/src/vscode_import.rs index 5644cd7a1a8463a1f072d838a0a1b16bd7ad991b..f5df817dcd0f4ae02bea3934eaaaf042a02bdbc1 100644 --- a/crates/settings/src/vscode_import.rs +++ b/crates/settings/src/vscode_import.rs @@ -450,6 +450,7 @@ impl VsCodeSettings { prettier: None, remove_trailing_whitespace_on_save: self.read_bool("editor.trimAutoWhitespace"), show_completion_documentation: None, + colorize_brackets: self.read_bool("editor.bracketPairColorization.enabled"), show_completions_on_input: self.read_bool("editor.suggestOnTriggerCharacters"), show_edit_predictions: self.read_bool("editor.inlineSuggest.enabled"), show_whitespaces: self.read_enum("editor.renderWhitespace", |s| { diff --git a/crates/settings_ui/src/page_data.rs b/crates/settings_ui/src/page_data.rs index d5368e278914044196f55aaf852e2efefed07117..76874c2ad9594cd9955cbe759c458fe9cf007c2e 100644 --- a/crates/settings_ui/src/page_data.rs +++ b/crates/settings_ui/src/page_data.rs @@ -6991,6 +6991,25 @@ fn language_settings_data() -> Vec { metadata: None, files: USER | PROJECT, }), + SettingsPageItem::SettingItem(SettingItem { + title: "Colorize brackets", + description: "Whether to colorize brackets in the editor.", + field: Box::new(SettingField { + json_path: Some("languages.$(language).colorize_brackets"), + pick: |settings_content| { + language_settings_field(settings_content, |language| { + language.colorize_brackets.as_ref() + }) + }, + write: |settings_content, value| { + language_settings_field_mut(settings_content, value, |language, value| { + language.colorize_brackets = value; + }) + }, + }), + metadata: None, + files: USER | PROJECT, + }), ]); if current_language().is_none() { diff --git a/docs/src/ai/external-agents.md b/docs/src/ai/external-agents.md index d396a9b72bf1b51e4fd3994805c7b0d5268a0cd0..696a60709cc3b6120af0c63fc01a79bd58134402 100644 --- a/docs/src/ai/external-agents.md +++ b/docs/src/ai/external-agents.md @@ -196,6 +196,7 @@ You can also add agents through your `settings.json`, by specifying certain fiel { "agent_servers": { "My Custom Agent": { + "type": "custom", "command": "node", "args": ["~/projects/agent/index.js", "--acp"], "env": {} diff --git a/docs/src/configuring-languages.md b/docs/src/configuring-languages.md index e04d63f5d16a83c84b933d9f59db901c276b7a6d..7b3456986e2766d134f3c1f15f94632feb067fb0 100644 --- a/docs/src/configuring-languages.md +++ b/docs/src/configuring-languages.md @@ -58,6 +58,7 @@ You can customize a wide range of settings for each language, including: - [`soft_wrap`](./configuring-zed.md#soft-wrap): How to wrap long lines of code - [`show_completions_on_input`](./configuring-zed.md#show-completions-on-input): Whether or not to show completions as you type - [`show_completion_documentation`](./configuring-zed.md#show-completion-documentation): Whether to display inline and alongside documentation for items in the completions menu +- [`colorize_brackets`](./configuring-zed.md#colorize-brackets): Whether to use tree-sitter bracket queries to detect and colorize the brackets in the editor (also known as "rainbow brackets") These settings allow you to maintain specific coding styles across different languages and projects. diff --git a/docs/src/configuring-zed.md b/docs/src/configuring-zed.md index 13d42a5c4c99f3a4aba3709d829f289e9e9826f8..a3e24506c46054940dc13a52a4ba82cb233c6604 100644 --- a/docs/src/configuring-zed.md +++ b/docs/src/configuring-zed.md @@ -4687,6 +4687,18 @@ See the [debugger page](./debugger.md) for more information about debugging supp }, ``` +## Colorize Brackets + +- Description: Whether to use tree-sitter bracket queries to detect and colorize the brackets in the editor (also known as "rainbow brackets"). +- Setting: `colorize_brackets` +- Default: `false` + +**Options** + +`boolean` values + +The colors that are used for different indentation levels are defined in the theme (theme key: `accents`). They can be customized by using theme overrides. + ## Unnecessary Code Fade - Description: How much to fade out unused code. diff --git a/docs/src/extensions/languages.md b/docs/src/extensions/languages.md index 7eb6a355dbfcafaa01ca885789d41e28c474d2f4..f3ffcd71ba8122956636cd1d228f885383cb83e6 100644 --- a/docs/src/extensions/languages.md +++ b/docs/src/extensions/languages.md @@ -154,6 +154,14 @@ This query identifies opening and closing brackets, braces, and quotation marks. | @open | Captures opening brackets, braces, and quotes | | @close | Captures closing brackets, braces, and quotes | +Zed uses these to highlight matching brackets: painting each bracket pair with a different color ("rainbow brackets") and highlighting the brackets if the cursor is inside the bracket pair. + +To opt out of rainbow brackets colorization, add the following to the corresponding `brackets.scm` entry: + +```scheme +(("\"" @open "\"" @close) (#set! rainbow.exclude)) +``` + ### Code outline/structure The `outline.scm` file defines the structure for the code outline. diff --git a/docs/src/themes.md b/docs/src/themes.md index 0bbea57ebfd7c9d55031c2ca9ff31b67b360bcdd..615cd2c7b38a734af071ef373b75350231f4a5fb 100644 --- a/docs/src/themes.md +++ b/docs/src/themes.md @@ -51,7 +51,15 @@ For example, add the following to your `settings.json` if you wish to override t "comment.doc": { "font_style": "italic" } - } + }, + "accents": [ + "#ff0000", + "#ff7f00", + "#ffff00", + "#00ff00", + "#0000ff", + "#8b00ff" + ] } } } diff --git a/docs/src/visual-customization.md b/docs/src/visual-customization.md index 3e4ff377f3cd54676f0b32f3f4853c9be6de706d..e5185719279dde488c40573d94fd842c06860f4d 100644 --- a/docs/src/visual-customization.md +++ b/docs/src/visual-customization.md @@ -374,6 +374,8 @@ TBD: Centered layout related settings "lsp_document_colors": "inlay", // none, inlay, border, background // When to show the scrollbar in the completion menu. "completion_menu_scrollbar": "never", // auto, system, always, never + // Turn on colorization of brackets in editors (configurable per language) + "colorize_brackets": true, ``` ### Edit Predictions {#editor-ai} diff --git a/extensions/html/languages/html/brackets.scm b/extensions/html/languages/html/brackets.scm index f9be89a2639d54c08fc2e4e9ce3f6ca3f93ba403..53d6a6bb234e28db21581906ea42e6384f872c9a 100644 --- a/extensions/html/languages/html/brackets.scm +++ b/extensions/html/languages/html/brackets.scm @@ -1,5 +1,5 @@ ("<" @open "/>" @close) ("" @close) ("<" @open ">" @close) -("\"" @open "\"" @close) -((element (start_tag) @open (end_tag) @close) (#set! newline.only)) +(("\"" @open "\"" @close) (#set! rainbow.exclude)) +((element (start_tag) @open (end_tag) @close) (#set! newline.only) (#set! rainbow.exclude)) From 28ef7455f00554424f2acee0af00b58ac6695ffc Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 20 Nov 2025 20:54:47 +0100 Subject: [PATCH 0261/1030] gpui: #[inline] some trivial functions (#43189) These appear in a lot of stacktraces (especially on windows) despite them being plain forwarding calls. Also removes some intermediate calls within gpui that will only turn into more unnecessary compiler work. Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/gpui/src/app/async_context.rs | 43 ++++++++++++++++------------ crates/gpui/src/app/context.rs | 10 +++++++ crates/gpui/src/app/entity_map.rs | 34 ++++++++++++++++++++++ crates/text/src/text.rs | 21 +++++++++++++- 4 files changed, 88 insertions(+), 20 deletions(-) diff --git a/crates/gpui/src/app/async_context.rs b/crates/gpui/src/app/async_context.rs index 381541d4b11377b988dd30e03155855c7ba25aed..f5dcd30ae943954cbc042e1ce02edad39370a04a 100644 --- a/crates/gpui/src/app/async_context.rs +++ b/crates/gpui/src/app/async_context.rs @@ -296,8 +296,8 @@ impl AsyncWindowContext { /// A convenience method for [`Window::on_next_frame`]. pub fn on_next_frame(&mut self, f: impl FnOnce(&mut Window, &mut App) + 'static) { - self.window - .update(self, |_, window, _| window.on_next_frame(f)) + self.app + .update_window(self.window, |_, window, _| window.on_next_frame(f)) .ok(); } @@ -306,8 +306,8 @@ impl AsyncWindowContext { &mut self, read: impl FnOnce(&G, &Window, &App) -> R, ) -> Result { - self.window - .update(self, |_, window, cx| read(cx.global(), window, cx)) + self.app + .update_window(self.window, |_, window, cx| read(cx.global(), window, cx)) } /// A convenience method for [`App::update_global`](BorrowAppContext::update_global). @@ -319,7 +319,7 @@ impl AsyncWindowContext { where G: Global, { - self.window.update(self, |_, window, cx| { + self.app.update_window(self.window, |_, window, cx| { cx.update_global(|global, cx| update(global, window, cx)) }) } @@ -350,8 +350,8 @@ impl AsyncWindowContext { where T: Clone + Into, { - self.window - .update(self, |_, window, cx| { + self.app + .update_window(self.window, |_, window, cx| { window.prompt(level, message, detail, answers, cx) }) .unwrap_or_else(|_| oneshot::channel().1) @@ -365,11 +365,13 @@ impl AppContext for AsyncWindowContext { where T: 'static, { - self.window.update(self, |_, _, cx| cx.new(build_entity)) + self.app + .update_window(self.window, |_, _, cx| cx.new(build_entity)) } fn reserve_entity(&mut self) -> Result> { - self.window.update(self, |_, _, cx| cx.reserve_entity()) + self.app + .update_window(self.window, |_, _, cx| cx.reserve_entity()) } fn insert_entity( @@ -377,8 +379,9 @@ impl AppContext for AsyncWindowContext { reservation: Reservation, build_entity: impl FnOnce(&mut Context) -> T, ) -> Self::Result> { - self.window - .update(self, |_, _, cx| cx.insert_entity(reservation, build_entity)) + self.app.update_window(self.window, |_, _, cx| { + cx.insert_entity(reservation, build_entity) + }) } fn update_entity( @@ -386,8 +389,8 @@ impl AppContext for AsyncWindowContext { handle: &Entity, update: impl FnOnce(&mut T, &mut Context) -> R, ) -> Result { - self.window - .update(self, |_, _, cx| cx.update_entity(handle, update)) + self.app + .update_window(self.window, |_, _, cx| cx.update_entity(handle, update)) } fn as_mut<'a, T>(&'a mut self, _: &Entity) -> Self::Result> @@ -452,8 +455,9 @@ impl VisualContext for AsyncWindowContext { &mut self, build_entity: impl FnOnce(&mut Window, &mut Context) -> T, ) -> Self::Result> { - self.window - .update(self, |_, window, cx| cx.new(|cx| build_entity(window, cx))) + self.app.update_window(self.window, |_, window, cx| { + cx.new(|cx| build_entity(window, cx)) + }) } fn update_window_entity( @@ -461,7 +465,7 @@ impl VisualContext for AsyncWindowContext { view: &Entity, update: impl FnOnce(&mut T, &mut Window, &mut Context) -> R, ) -> Self::Result { - self.window.update(self, |_, window, cx| { + self.app.update_window(self.window, |_, window, cx| { view.update(cx, |entity, cx| update(entity, window, cx)) }) } @@ -473,15 +477,16 @@ impl VisualContext for AsyncWindowContext { where V: 'static + Render, { - self.window - .update(self, |_, window, cx| window.replace_root(cx, build_view)) + self.app.update_window(self.window, |_, window, cx| { + window.replace_root(cx, build_view) + }) } fn focus(&mut self, view: &Entity) -> Self::Result<()> where V: Focusable, { - self.window.update(self, |_, window, cx| { + self.app.update_window(self.window, |_, window, cx| { view.read(cx).focus_handle(cx).focus(window); }) } diff --git a/crates/gpui/src/app/context.rs b/crates/gpui/src/app/context.rs index 41d6cac82b7c179040d61ddfd22b003c143a5fb9..65bb5521e32bb6fcfac2bcd95009949499589df1 100644 --- a/crates/gpui/src/app/context.rs +++ b/crates/gpui/src/app/context.rs @@ -736,14 +736,17 @@ impl Context<'_, T> { impl AppContext for Context<'_, T> { type Result = U; + #[inline] fn new(&mut self, build_entity: impl FnOnce(&mut Context) -> U) -> Entity { self.app.new(build_entity) } + #[inline] fn reserve_entity(&mut self) -> Reservation { self.app.reserve_entity() } + #[inline] fn insert_entity( &mut self, reservation: Reservation, @@ -752,6 +755,7 @@ impl AppContext for Context<'_, T> { self.app.insert_entity(reservation, build_entity) } + #[inline] fn update_entity( &mut self, handle: &Entity, @@ -760,6 +764,7 @@ impl AppContext for Context<'_, T> { self.app.update_entity(handle, update) } + #[inline] fn as_mut<'a, E>(&'a mut self, handle: &Entity) -> Self::Result> where E: 'static, @@ -767,6 +772,7 @@ impl AppContext for Context<'_, T> { self.app.as_mut(handle) } + #[inline] fn read_entity( &self, handle: &Entity, @@ -778,6 +784,7 @@ impl AppContext for Context<'_, T> { self.app.read_entity(handle, read) } + #[inline] fn update_window(&mut self, window: AnyWindowHandle, update: F) -> Result where F: FnOnce(AnyView, &mut Window, &mut App) -> R, @@ -785,6 +792,7 @@ impl AppContext for Context<'_, T> { self.app.update_window(window, update) } + #[inline] fn read_window( &self, window: &WindowHandle, @@ -796,6 +804,7 @@ impl AppContext for Context<'_, T> { self.app.read_window(window, read) } + #[inline] fn background_spawn(&self, future: impl Future + Send + 'static) -> Task where R: Send + 'static, @@ -803,6 +812,7 @@ impl AppContext for Context<'_, T> { self.app.background_executor.spawn(future) } + #[inline] fn read_global(&self, callback: impl FnOnce(&G, &App) -> R) -> Self::Result where G: Global, diff --git a/crates/gpui/src/app/entity_map.rs b/crates/gpui/src/app/entity_map.rs index bea98cb06a5f80fc8141a52bc47f48e8734b40c9..81dbfdbf5733eed92a77fc2dc18fb971bd9bd4a7 100644 --- a/crates/gpui/src/app/entity_map.rs +++ b/crates/gpui/src/app/entity_map.rs @@ -244,11 +244,13 @@ impl AnyEntity { } /// Returns the id associated with this entity. + #[inline] pub fn entity_id(&self) -> EntityId { self.entity_id } /// Returns the [TypeId] associated with this entity. + #[inline] pub fn entity_type(&self) -> TypeId { self.entity_type } @@ -332,18 +334,21 @@ impl Drop for AnyEntity { } impl From> for AnyEntity { + #[inline] fn from(entity: Entity) -> Self { entity.any_entity } } impl Hash for AnyEntity { + #[inline] fn hash(&self, state: &mut H) { self.entity_id.hash(state); } } impl PartialEq for AnyEntity { + #[inline] fn eq(&self, other: &Self) -> bool { self.entity_id == other.entity_id } @@ -352,12 +357,14 @@ impl PartialEq for AnyEntity { impl Eq for AnyEntity {} impl Ord for AnyEntity { + #[inline] fn cmp(&self, other: &Self) -> Ordering { self.entity_id.cmp(&other.entity_id) } } impl PartialOrd for AnyEntity { + #[inline] fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } @@ -384,6 +391,7 @@ pub struct Entity { impl Sealed for Entity {} impl Entity { + #[inline] fn new(id: EntityId, entity_map: Weak>) -> Self where T: 'static, @@ -395,11 +403,13 @@ impl Entity { } /// Get the entity ID associated with this entity + #[inline] pub fn entity_id(&self) -> EntityId { self.any_entity.entity_id } /// Downgrade this entity pointer to a non-retaining weak pointer + #[inline] pub fn downgrade(&self) -> WeakEntity { WeakEntity { any_entity: self.any_entity.downgrade(), @@ -408,16 +418,19 @@ impl Entity { } /// Convert this into a dynamically typed entity. + #[inline] pub fn into_any(self) -> AnyEntity { self.any_entity } /// Grab a reference to this entity from the context. + #[inline] pub fn read<'a>(&self, cx: &'a App) -> &'a T { cx.entities.read(self) } /// Read the entity referenced by this handle with the given function. + #[inline] pub fn read_with( &self, cx: &C, @@ -427,6 +440,7 @@ impl Entity { } /// Updates the entity referenced by this handle with the given function. + #[inline] pub fn update( &self, cx: &mut C, @@ -436,6 +450,7 @@ impl Entity { } /// Updates the entity referenced by this handle with the given function. + #[inline] pub fn as_mut<'a, C: AppContext>(&self, cx: &'a mut C) -> C::Result> { cx.as_mut(self) } @@ -451,6 +466,7 @@ impl Entity { /// Updates the entity referenced by this handle with the given function if /// the referenced entity still exists, within a visual context that has a window. /// Returns an error if the entity has been released. + #[inline] pub fn update_in( &self, cx: &mut C, @@ -461,6 +477,7 @@ impl Entity { } impl Clone for Entity { + #[inline] fn clone(&self) -> Self { Self { any_entity: self.any_entity.clone(), @@ -479,12 +496,14 @@ impl std::fmt::Debug for Entity { } impl Hash for Entity { + #[inline] fn hash(&self, state: &mut H) { self.any_entity.hash(state); } } impl PartialEq for Entity { + #[inline] fn eq(&self, other: &Self) -> bool { self.any_entity == other.any_entity } @@ -493,18 +512,21 @@ impl PartialEq for Entity { impl Eq for Entity {} impl PartialEq> for Entity { + #[inline] fn eq(&self, other: &WeakEntity) -> bool { self.any_entity.entity_id() == other.entity_id() } } impl Ord for Entity { + #[inline] fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.entity_id().cmp(&other.entity_id()) } } impl PartialOrd for Entity { + #[inline] fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } @@ -520,6 +542,7 @@ pub struct AnyWeakEntity { impl AnyWeakEntity { /// Get the entity ID associated with this weak reference. + #[inline] pub fn entity_id(&self) -> EntityId { self.entity_id } @@ -618,18 +641,21 @@ impl std::fmt::Debug for AnyWeakEntity { } impl From> for AnyWeakEntity { + #[inline] fn from(entity: WeakEntity) -> Self { entity.any_entity } } impl Hash for AnyWeakEntity { + #[inline] fn hash(&self, state: &mut H) { self.entity_id.hash(state); } } impl PartialEq for AnyWeakEntity { + #[inline] fn eq(&self, other: &Self) -> bool { self.entity_id == other.entity_id } @@ -638,12 +664,14 @@ impl PartialEq for AnyWeakEntity { impl Eq for AnyWeakEntity {} impl Ord for AnyWeakEntity { + #[inline] fn cmp(&self, other: &Self) -> Ordering { self.entity_id.cmp(&other.entity_id) } } impl PartialOrd for AnyWeakEntity { + #[inline] fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } @@ -740,6 +768,7 @@ impl WeakEntity { } /// Create a new weak entity that can never be upgraded. + #[inline] pub fn new_invalid() -> Self { Self { any_entity: AnyWeakEntity::new_invalid(), @@ -749,12 +778,14 @@ impl WeakEntity { } impl Hash for WeakEntity { + #[inline] fn hash(&self, state: &mut H) { self.any_entity.hash(state); } } impl PartialEq for WeakEntity { + #[inline] fn eq(&self, other: &Self) -> bool { self.any_entity == other.any_entity } @@ -763,18 +794,21 @@ impl PartialEq for WeakEntity { impl Eq for WeakEntity {} impl PartialEq> for WeakEntity { + #[inline] fn eq(&self, other: &Entity) -> bool { self.entity_id() == other.any_entity.entity_id() } } impl Ord for WeakEntity { + #[inline] fn cmp(&self, other: &Self) -> Ordering { self.entity_id().cmp(&other.entity_id()) } } impl PartialOrd for WeakEntity { + #[inline] fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } diff --git a/crates/text/src/text.rs b/crates/text/src/text.rs index e476103879d700dc6121882055bc7e2cabf3ed5a..fe9fe26f1bcc89b66753703e03f0a8bfeec628bd 100644 --- a/crates/text/src/text.rs +++ b/crates/text/src/text.rs @@ -3129,13 +3129,13 @@ pub trait ToOffset { } impl ToOffset for Point { + #[inline] fn to_offset(&self, snapshot: &BufferSnapshot) -> usize { snapshot.point_to_offset(*self) } } impl ToOffset for usize { - #[track_caller] fn to_offset(&self, snapshot: &BufferSnapshot) -> usize { assert!( *self <= snapshot.len(), @@ -3148,24 +3148,28 @@ impl ToOffset for usize { } impl ToOffset for Anchor { + #[inline] fn to_offset(&self, snapshot: &BufferSnapshot) -> usize { snapshot.summary_for_anchor(self) } } impl ToOffset for &T { + #[inline] fn to_offset(&self, content: &BufferSnapshot) -> usize { (*self).to_offset(content) } } impl ToOffset for PointUtf16 { + #[inline] fn to_offset(&self, snapshot: &BufferSnapshot) -> usize { snapshot.point_utf16_to_offset(*self) } } impl ToOffset for Unclipped { + #[inline] fn to_offset(&self, snapshot: &BufferSnapshot) -> usize { snapshot.unclipped_point_utf16_to_offset(*self) } @@ -3176,24 +3180,28 @@ pub trait ToPoint { } impl ToPoint for Anchor { + #[inline] fn to_point(&self, snapshot: &BufferSnapshot) -> Point { snapshot.summary_for_anchor(self) } } impl ToPoint for usize { + #[inline] fn to_point(&self, snapshot: &BufferSnapshot) -> Point { snapshot.offset_to_point(*self) } } impl ToPoint for Point { + #[inline] fn to_point(&self, _: &BufferSnapshot) -> Point { *self } } impl ToPoint for Unclipped { + #[inline] fn to_point(&self, snapshot: &BufferSnapshot) -> Point { snapshot.unclipped_point_utf16_to_point(*self) } @@ -3204,24 +3212,28 @@ pub trait ToPointUtf16 { } impl ToPointUtf16 for Anchor { + #[inline] fn to_point_utf16(&self, snapshot: &BufferSnapshot) -> PointUtf16 { snapshot.summary_for_anchor(self) } } impl ToPointUtf16 for usize { + #[inline] fn to_point_utf16(&self, snapshot: &BufferSnapshot) -> PointUtf16 { snapshot.offset_to_point_utf16(*self) } } impl ToPointUtf16 for PointUtf16 { + #[inline] fn to_point_utf16(&self, _: &BufferSnapshot) -> PointUtf16 { *self } } impl ToPointUtf16 for Point { + #[inline] fn to_point_utf16(&self, snapshot: &BufferSnapshot) -> PointUtf16 { snapshot.point_to_point_utf16(*self) } @@ -3232,18 +3244,21 @@ pub trait ToOffsetUtf16 { } impl ToOffsetUtf16 for Anchor { + #[inline] fn to_offset_utf16(&self, snapshot: &BufferSnapshot) -> OffsetUtf16 { snapshot.summary_for_anchor(self) } } impl ToOffsetUtf16 for usize { + #[inline] fn to_offset_utf16(&self, snapshot: &BufferSnapshot) -> OffsetUtf16 { snapshot.offset_to_offset_utf16(*self) } } impl ToOffsetUtf16 for OffsetUtf16 { + #[inline] fn to_offset_utf16(&self, _snapshot: &BufferSnapshot) -> OffsetUtf16 { *self } @@ -3254,24 +3269,28 @@ pub trait FromAnchor { } impl FromAnchor for Anchor { + #[inline] fn from_anchor(anchor: &Anchor, _snapshot: &BufferSnapshot) -> Self { *anchor } } impl FromAnchor for Point { + #[inline] fn from_anchor(anchor: &Anchor, snapshot: &BufferSnapshot) -> Self { snapshot.summary_for_anchor(anchor) } } impl FromAnchor for PointUtf16 { + #[inline] fn from_anchor(anchor: &Anchor, snapshot: &BufferSnapshot) -> Self { snapshot.summary_for_anchor(anchor) } } impl FromAnchor for usize { + #[inline] fn from_anchor(anchor: &Anchor, snapshot: &BufferSnapshot) -> Self { snapshot.summary_for_anchor(anchor) } From 68994488121abadb9991ad45911e67613d30ea99 Mon Sep 17 00:00:00 2001 From: Andrew Farkas <6060305+HactarCE@users.noreply.github.com> Date: Thu, 20 Nov 2025 15:16:09 -0500 Subject: [PATCH 0262/1030] Remove `prompt-caching-2024-07-31` beta header for Anthropic AI (#43185) Closes #42715 Release Notes: - Remove `prompt-caching-2024-07-31` beta header for Anthropic AI Co-authored-by: Cole Miller --- crates/anthropic/src/anthropic.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/crates/anthropic/src/anthropic.rs b/crates/anthropic/src/anthropic.rs index cd2077cdeb1370a9753df83f9b239ef776bab149..fd665e07dd7515198ee6d65cbb2b0ee69e75dce5 100644 --- a/crates/anthropic/src/anthropic.rs +++ b/crates/anthropic/src/anthropic.rs @@ -393,13 +393,8 @@ impl Model { } } - pub const DEFAULT_BETA_HEADERS: &[&str] = &["prompt-caching-2024-07-31"]; - pub fn beta_headers(&self) -> String { - let mut headers = Self::DEFAULT_BETA_HEADERS - .iter() - .map(|header| header.to_string()) - .collect::>(); + let mut headers = vec![]; match self { Self::Claude3_7Sonnet | Self::Claude3_7SonnetThinking => { From b41eb3cdaf492f990834a7ca78b5195eb169f68e Mon Sep 17 00:00:00 2001 From: Xiaobo Liu Date: Fri, 21 Nov 2025 04:34:17 +0800 Subject: [PATCH 0263/1030] windows: Fix maximized window size when DPI scale changes (#40053) The WM_DPICHANGED suggested RECT is calculated for non-maximized windows. When a maximized window's DPI changes, we now query the monitor's work area directly to ensure the window correctly fills the entire screen. For non-maximized windows, the original behavior using the system-suggested RECT is preserved. Release Notes: - windows: Fixed maximized window size when DPI scale changes Signed-off-by: Xiaobo Liu --- crates/gpui/src/platform/windows/events.rs | 75 +++++++++++++++------- 1 file changed, 51 insertions(+), 24 deletions(-) diff --git a/crates/gpui/src/platform/windows/events.rs b/crates/gpui/src/platform/windows/events.rs index cc39d3bcedd370fb4dc2fdb4c1d8304ad6b99b79..7b1d0efd1821c0651cc2f092f51041c0de84cc14 100644 --- a/crates/gpui/src/platform/windows/events.rs +++ b/crates/gpui/src/platform/windows/events.rs @@ -740,31 +740,58 @@ impl WindowsWindowInner { lock.border_offset.update(handle).log_err(); drop(lock); - let rect = unsafe { &*(lparam.0 as *const RECT) }; - let width = rect.right - rect.left; - let height = rect.bottom - rect.top; - // this will emit `WM_SIZE` and `WM_MOVE` right here - // even before this function returns - // the new size is handled in `WM_SIZE` - unsafe { - SetWindowPos( - handle, - None, - rect.left, - rect.top, - width, - height, - SWP_NOZORDER | SWP_NOACTIVATE, - ) - .context("unable to set window position after dpi has changed") - .log_err(); - } - - // When maximized, SetWindowPos doesn't send WM_SIZE, so we need to manually - // update the size and call the resize callback if is_maximized { - let device_size = size(DevicePixels(width), DevicePixels(height)); - self.handle_size_change(device_size, new_scale_factor, true); + // Get the monitor and its work area at the new DPI + let monitor = unsafe { MonitorFromWindow(handle, MONITOR_DEFAULTTONEAREST) }; + let mut monitor_info: MONITORINFO = unsafe { std::mem::zeroed() }; + monitor_info.cbSize = std::mem::size_of::() as u32; + if unsafe { GetMonitorInfoW(monitor, &mut monitor_info) }.as_bool() { + let work_area = monitor_info.rcWork; + let width = work_area.right - work_area.left; + let height = work_area.bottom - work_area.top; + + // Update the window size to match the new monitor work area + // This will trigger WM_SIZE which will handle the size change + unsafe { + SetWindowPos( + handle, + None, + work_area.left, + work_area.top, + width, + height, + SWP_NOZORDER | SWP_NOACTIVATE | SWP_FRAMECHANGED, + ) + .context("unable to set maximized window position after dpi has changed") + .log_err(); + } + + // SetWindowPos may not send WM_SIZE for maximized windows in some cases, + // so we manually update the size to ensure proper rendering + let device_size = size(DevicePixels(width), DevicePixels(height)); + self.handle_size_change(device_size, new_scale_factor, true); + } + } else { + // For non-maximized windows, use the suggested RECT from the system + let rect = unsafe { &*(lparam.0 as *const RECT) }; + let width = rect.right - rect.left; + let height = rect.bottom - rect.top; + // this will emit `WM_SIZE` and `WM_MOVE` right here + // even before this function returns + // the new size is handled in `WM_SIZE` + unsafe { + SetWindowPos( + handle, + None, + rect.left, + rect.top, + width, + height, + SWP_NOZORDER | SWP_NOACTIVATE, + ) + .context("unable to set window position after dpi has changed") + .log_err(); + } } Some(0) From a332b791899d90bc6bfaca3d153cb37254139036 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Thu, 20 Nov 2025 18:18:08 -0300 Subject: [PATCH 0264/1030] ui: Add `DiffStat` component (#43192) Release Notes: - N/A --- crates/ui/src/components.rs | 2 + crates/ui/src/components/diff_stat.rs | 85 +++++++++++++++++++++++++ crates/ui/src/components/thread_item.rs | 44 ++++++++++--- 3 files changed, 123 insertions(+), 8 deletions(-) create mode 100644 crates/ui/src/components/diff_stat.rs diff --git a/crates/ui/src/components.rs b/crates/ui/src/components.rs index 712a07d3bdddb1d0c2300f6d256fa5634b16e764..b6318f18c973ca5ca7eefa1ba39517ef65cad6df 100644 --- a/crates/ui/src/components.rs +++ b/crates/ui/src/components.rs @@ -6,6 +6,7 @@ mod chip; mod content_group; mod context_menu; mod data_table; +mod diff_stat; mod disclosure; mod divider; mod dropdown_menu; @@ -50,6 +51,7 @@ pub use chip::*; pub use content_group::*; pub use context_menu::*; pub use data_table::*; +pub use diff_stat::*; pub use disclosure::*; pub use divider::*; pub use dropdown_menu::*; diff --git a/crates/ui/src/components/diff_stat.rs b/crates/ui/src/components/diff_stat.rs new file mode 100644 index 0000000000000000000000000000000000000000..2606963555c682d9d949d19d57471e02c53351d7 --- /dev/null +++ b/crates/ui/src/components/diff_stat.rs @@ -0,0 +1,85 @@ +use crate::prelude::*; + +#[derive(IntoElement, RegisterComponent)] +pub struct DiffStat { + id: ElementId, + added: usize, + removed: usize, +} + +impl DiffStat { + pub fn new(id: impl Into, added: usize, removed: usize) -> Self { + Self { + id: id.into(), + added, + removed, + } + } +} + +impl RenderOnce for DiffStat { + fn render(self, _: &mut Window, _cx: &mut App) -> impl IntoElement { + h_flex() + .id(self.id) + .gap_1() + .child( + h_flex() + .gap_0p5() + .child( + Icon::new(IconName::Plus) + .size(IconSize::XSmall) + .color(Color::Success), + ) + .child( + Label::new(self.added.to_string()) + .color(Color::Success) + .size(LabelSize::Small), + ), + ) + .child( + h_flex() + .gap_0p5() + .child( + Icon::new(IconName::Dash) + .size(IconSize::XSmall) + .color(Color::Error), + ) + .child( + Label::new(self.removed.to_string()) + .color(Color::Error) + .size(LabelSize::Small), + ), + ) + } +} + +impl Component for DiffStat { + fn scope() -> ComponentScope { + ComponentScope::VersionControl + } + + fn preview(_window: &mut Window, cx: &mut App) -> Option { + let container = || { + h_flex() + .py_4() + .w_72() + .justify_center() + .border_1() + .border_color(cx.theme().colors().border_variant) + .bg(cx.theme().colors().panel_background) + }; + + let diff_stat_example = vec![single_example( + "Default", + container() + .child(DiffStat::new("id", 1, 2)) + .into_any_element(), + )]; + + Some( + example_group(diff_stat_example) + .vertical() + .into_any_element(), + ) + } +} diff --git a/crates/ui/src/components/thread_item.rs b/crates/ui/src/components/thread_item.rs index 0cb6a42ad11d16eddd3a2afb3d8a9dc9475b6165..dcf159f502e2d3c67576f9c6eefaff10585992eb 100644 --- a/crates/ui/src/components/thread_item.rs +++ b/crates/ui/src/components/thread_item.rs @@ -1,4 +1,4 @@ -use crate::{Chip, Indicator, SpinnerLabel, prelude::*}; +use crate::{Chip, DiffStat, Indicator, SpinnerLabel, prelude::*}; use gpui::{ClickEvent, SharedString}; #[derive(IntoElement, RegisterComponent)] @@ -10,7 +10,8 @@ pub struct ThreadItem { running: bool, generation_done: bool, selected: bool, - has_changes: bool, + added: Option, + removed: Option, worktree: Option, on_click: Option>, } @@ -25,7 +26,8 @@ impl ThreadItem { running: false, generation_done: false, selected: false, - has_changes: false, + added: None, + removed: None, worktree: None, on_click: None, } @@ -56,8 +58,13 @@ impl ThreadItem { self } - pub fn has_changes(mut self, has_changes: bool) -> Self { - self.has_changes = has_changes; + pub fn added(mut self, added: usize) -> Self { + self.added = Some(added); + self + } + + pub fn removed(mut self, removed: usize) -> Self { + self.removed = Some(removed); self } @@ -90,8 +97,10 @@ impl RenderOnce for ThreadItem { ) }; + let has_no_changes = self.added.is_none() && self.removed.is_none(); + v_flex() - .id(self.id) + .id(self.id.clone()) .cursor_pointer() .p_2() .when(self.selected, |this| { @@ -123,12 +132,19 @@ impl RenderOnce for ThreadItem { .color(Color::Muted) .alpha(0.5), ) - .when(!self.has_changes, |this| { + .when(has_no_changes, |this| { this.child( Label::new("No Changes") .size(LabelSize::Small) .color(Color::Muted), ) + }) + .when(self.added.is_some() || self.removed.is_some(), |this| { + this.child(DiffStat::new( + self.id, + self.added.unwrap_or(0), + self.removed.unwrap_or(0), + )) }), ) .when_some(self.on_click, |this, on_click| this.on_click(on_click)) @@ -192,11 +208,23 @@ impl Component for ThreadItem { ) .into_any_element(), ), + single_example( + "With Changes", + container() + .child( + ThreadItem::new("ti-5", "Managing user and project settings interactions") + .icon(IconName::AiClaude) + .timestamp("7:37 PM") + .added(10) + .removed(3), + ) + .into_any_element(), + ), single_example( "Selected Item", container() .child( - ThreadItem::new("ti-5", "Refine textarea interaction behavior") + ThreadItem::new("ti-6", "Refine textarea interaction behavior") .icon(IconName::AiGemini) .timestamp("3:00 PM") .selected(true), From 6adb0f4d037237486450cfe184755fbf5354a845 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Thu, 20 Nov 2025 18:18:30 -0300 Subject: [PATCH 0265/1030] agent_ui: Improve UI for the feedback container (#43195) Improves a previously weird wrapping and simplify the UI by adding the meta text inside the tooltip itself. https://github.com/user-attachments/assets/9896d4a2-6954-4e61-9b77-864db8f2542a Release Notes: - N/A --- crates/agent_ui/src/acp/thread_view.rs | 45 +++++++++++++------------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 784fef0b9f3862047c868dddf88a8fcd217c278d..f3565356cf5b0501d529ab89e35b955689ef040a 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -5024,15 +5024,12 @@ impl AcpThreadView { })); let mut container = h_flex() - .id("thread-controls-container") - .group("thread-controls-container") .w_full() .py_2() .px_5() .gap_px() .opacity(0.6) - .hover(|style| style.opacity(1.)) - .flex_wrap() + .hover(|s| s.opacity(1.)) .justify_end(); if AgentSettings::get_global(cx).enable_feedback @@ -5042,23 +5039,13 @@ impl AcpThreadView { { let feedback = self.thread_feedback.feedback; - container = container - .child( - div().visible_on_hover("thread-controls-container").child( - Label::new(match feedback { - Some(ThreadFeedback::Positive) => "Thanks for your feedback!", - Some(ThreadFeedback::Negative) => { - "We appreciate your feedback and will use it to improve." - } - None => { - "Rating the thread sends all of your current conversation to the Zed team." - } - }) - .color(Color::Muted) - .size(LabelSize::XSmall) - .truncate(), - ), + let tooltip_meta = || { + SharedString::new( + "Rating the thread sends all of your current conversation to the Zed team.", ) + }; + + container = container .child( IconButton::new("feedback-thumbs-up", IconName::ThumbsUp) .shape(ui::IconButtonShape::Square) @@ -5067,7 +5054,12 @@ impl AcpThreadView { Some(ThreadFeedback::Positive) => Color::Accent, _ => Color::Ignored, }) - .tooltip(Tooltip::text("Helpful Response")) + .tooltip(move |window, cx| match feedback { + Some(ThreadFeedback::Positive) => { + Tooltip::text("Thanks for your feedback!")(window, cx) + } + _ => Tooltip::with_meta("Helpful Response", None, tooltip_meta(), cx), + }) .on_click(cx.listener(move |this, _, window, cx| { this.handle_feedback_click(ThreadFeedback::Positive, window, cx); })), @@ -5080,7 +5072,16 @@ impl AcpThreadView { Some(ThreadFeedback::Negative) => Color::Accent, _ => Color::Ignored, }) - .tooltip(Tooltip::text("Not Helpful")) + .tooltip(move |window, cx| match feedback { + Some(ThreadFeedback::Negative) => { + Tooltip::text( + "We appreciate your feedback and will use it to improve in the future.", + )(window, cx) + } + _ => { + Tooltip::with_meta("Not Helpful Response", None, tooltip_meta(), cx) + } + }) .on_click(cx.listener(move |this, _, window, cx| { this.handle_feedback_click(ThreadFeedback::Negative, window, cx); })), From 9667d7882a7da1a0f5d3aa7b12c5ecec8a34250e Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Thu, 20 Nov 2025 18:28:11 -0300 Subject: [PATCH 0266/1030] extensions_ui: Improve error message when extensions fail to load (#43197) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Screenshot 2025-11-20 at 6  12@2x Release Notes: - extensions UI: Improved the feedback message for when extensions are not being displayed due to a fetch error caused by lack of connection. --- crates/extensions_ui/src/extensions_ui.rs | 51 +++++++++++++++++------ 1 file changed, 39 insertions(+), 12 deletions(-) diff --git a/crates/extensions_ui/src/extensions_ui.rs b/crates/extensions_ui/src/extensions_ui.rs index 657a39f09e6465042f5f1a5d113bdfa6e61c43ce..e35c90b6104b44bd6dbf3fe86aeaf84f122c04ca 100644 --- a/crates/extensions_ui/src/extensions_ui.rs +++ b/crates/extensions_ui/src/extensions_ui.rs @@ -293,6 +293,7 @@ pub struct ExtensionsPage { workspace: WeakEntity, list: UniformListScrollHandle, is_fetching_extensions: bool, + fetch_failed: bool, filter: ExtensionFilter, remote_extension_entries: Vec, dev_extension_entries: Vec>, @@ -353,6 +354,7 @@ impl ExtensionsPage { workspace: workspace.weak_handle(), list: scroll_handle, is_fetching_extensions: false, + fetch_failed: false, filter: ExtensionFilter::All, dev_extension_entries: Vec::new(), filtered_remote_extension_indices: Vec::new(), @@ -479,6 +481,7 @@ impl ExtensionsPage { cx: &mut Context, ) { self.is_fetching_extensions = true; + self.fetch_failed = false; cx.notify(); let extension_store = ExtensionStore::global(cx); @@ -534,17 +537,31 @@ impl ExtensionsPage { }; let fetch_result = remote_extensions.await; - this.update(cx, |this, cx| { + + let result = this.update(cx, |this, cx| { cx.notify(); this.dev_extension_entries = dev_extensions; this.is_fetching_extensions = false; - this.remote_extension_entries = fetch_result?; - this.filter_extension_entries(cx); - if let Some(callback) = on_complete { - callback(this, cx); + + match fetch_result { + Ok(extensions) => { + this.fetch_failed = false; + this.remote_extension_entries = extensions; + this.filter_extension_entries(cx); + if let Some(callback) = on_complete { + callback(this, cx); + } + Ok(()) + } + Err(err) => { + this.fetch_failed = true; + this.filter_extension_entries(cx); + Err(err) + } } - anyhow::Ok(()) - })? + }); + + result? }) .detach_and_log_err(cx); } @@ -1277,7 +1294,9 @@ impl ExtensionsPage { let has_search = self.search_query(cx).is_some(); let message = if self.is_fetching_extensions { - "Loading extensions..." + "Loading extensions…" + } else if self.fetch_failed { + "Failed to load extensions. Please check your connection and try again." } else { match self.filter { ExtensionFilter::All => { @@ -1304,7 +1323,17 @@ impl ExtensionsPage { } }; - Label::new(message) + h_flex() + .py_4() + .gap_1p5() + .when(self.fetch_failed, |this| { + this.child( + Icon::new(IconName::Warning) + .size(IconSize::Small) + .color(Color::Warning), + ) + }) + .child(Label::new(message)) } fn update_settings( @@ -1673,9 +1702,7 @@ impl Render for ExtensionsPage { } if count == 0 { - this.py_4() - .child(self.render_empty_state(cx)) - .into_any_element() + this.child(self.render_empty_state(cx)).into_any_element() } else { let scroll_handle = self.list.clone(); this.child( From 361fcc5c90a4d7d7cf9e7475e1cf7e0f1369f12a Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Thu, 20 Nov 2025 18:57:22 -0300 Subject: [PATCH 0267/1030] Make search field in panels be at the top (#43200) This mostly affects the collab and outline panels for now. It has always been a bit weird that the search field was at the bottom of the panel, even more so because in both cases, you can _arrow down_ to start navigating the list with your keyboard. So, with the search at the bottom, you'd arrow down and get to the top of the list, which was very strange. Now, with it at the top, it not only looks better but it is also more generally consistent with other surfaces in the app, like pickers, the settings UI, rules library, etc. Most search fields are always at the top. image Release Notes: - N/A --- crates/collab_ui/src/collab_panel.rs | 28 +++-- crates/outline_panel/src/outline_panel.rs | 131 +++++++++++----------- 2 files changed, 81 insertions(+), 78 deletions(-) diff --git a/crates/collab_ui/src/collab_panel.rs b/crates/collab_ui/src/collab_panel.rs index 8f2e959b17293af66279793e73fefceea413ca49..618348307f1270e180faf4b1d061b9a942e39fa5 100644 --- a/crates/collab_ui/src/collab_panel.rs +++ b/crates/collab_ui/src/collab_panel.rs @@ -32,7 +32,7 @@ use std::{mem, sync::Arc}; use theme::{ActiveTheme, ThemeSettings}; use ui::{ Avatar, AvatarAvailabilityIndicator, Button, Color, ContextMenu, Facepile, HighlightedLabel, - Icon, IconButton, IconName, IconSize, Indicator, Label, ListHeader, ListItem, Tooltip, + Icon, IconButton, IconName, IconSize, Indicator, Label, ListHeader, ListItem, Tab, Tooltip, prelude::*, tooltip_container, }; use util::{ResultExt, TryFutureExt, maybe}; @@ -287,7 +287,7 @@ impl CollabPanel { cx.new(|cx| { let filter_editor = cx.new(|cx| { let mut editor = Editor::single_line(window, cx); - editor.set_placeholder_text("Filter...", window, cx); + editor.set_placeholder_text("Search channels…", window, cx); editor }); @@ -2412,6 +2412,21 @@ impl CollabPanel { }); v_flex() .size_full() + .gap_1() + .child( + h_flex() + .p_2() + .h(Tab::container_height(cx)) + .gap_1p5() + .border_b_1() + .border_color(cx.theme().colors().border) + .child( + Icon::new(IconName::MagnifyingGlass) + .size(IconSize::Small) + .color(Color::Muted), + ) + .child(self.render_filter_input(&self.filter_editor, cx)), + ) .child( list( self.list_state.clone(), @@ -2419,15 +2434,6 @@ impl CollabPanel { ) .size_full(), ) - .child( - v_flex() - .child(div().mx_2().border_primary(cx).border_t_1()) - .child( - v_flex() - .p_2() - .child(self.render_filter_input(&self.filter_editor, cx)), - ), - ) } fn render_filter_input( diff --git a/crates/outline_panel/src/outline_panel.rs b/crates/outline_panel/src/outline_panel.rs index f44c6438ebd454d343a8ac49b0f6db11c11b469d..36cd9d076bb428f37c898a142fa7f3d1da887918 100644 --- a/crates/outline_panel/src/outline_panel.rs +++ b/crates/outline_panel/src/outline_panel.rs @@ -46,10 +46,8 @@ use settings::{Settings, SettingsStore}; use smol::channel; use theme::{SyntaxTheme, ThemeSettings}; use ui::{ - ActiveTheme, ButtonCommon, Clickable, Color, ContextMenu, DynamicSpacing, FluentBuilder, - HighlightedLabel, Icon, IconButton, IconButtonShape, IconName, IconSize, IndentGuideColors, - IndentGuideLayout, Label, LabelCommon, ListItem, ScrollAxes, Scrollbars, StyledExt, - StyledTypography, Toggleable, Tooltip, WithScrollbar, h_flex, v_flex, + ContextMenu, FluentBuilder, HighlightedLabel, IconButton, IconButtonShape, IndentGuideColors, + IndentGuideLayout, ListItem, ScrollAxes, Scrollbars, Tab, Tooltip, WithScrollbar, prelude::*, }; use util::{RangeExt, ResultExt, TryFutureExt, debug_panic, rel_path::RelPath}; use workspace::{ @@ -714,7 +712,7 @@ impl OutlinePanel { cx.new(|cx| { let filter_editor = cx.new(|cx| { let mut editor = Editor::single_line(window, cx); - editor.set_placeholder_text("Filter...", window, cx); + editor.set_placeholder_text("Search buffer symbols…", window, cx); editor }); let filter_update_subscription = cx.subscribe_in( @@ -4553,6 +4551,7 @@ impl OutlinePanel { v_flex() .id("empty-outline-state") + .gap_0p5() .flex_1() .justify_center() .size_full() @@ -4560,25 +4559,28 @@ impl OutlinePanel { panel .child(h_flex().justify_center().child(Label::new(header))) .when_some(query.clone(), |panel, query| { - panel.child(h_flex().justify_center().child(Label::new(query))) + panel.child( + h_flex() + .px_0p5() + .justify_center() + .bg(cx.theme().colors().element_selected.opacity(0.2)) + .child(Label::new(query)), + ) }) - .child( - h_flex() - .pt(DynamicSpacing::Base04.rems(cx)) - .justify_center() - .child({ - let keystroke = - match self.position(window, cx) { - DockPosition::Left => window - .keystroke_text_for(&workspace::ToggleLeftDock), - DockPosition::Bottom => window - .keystroke_text_for(&workspace::ToggleBottomDock), - DockPosition::Right => window - .keystroke_text_for(&workspace::ToggleRightDock), - }; - Label::new(format!("Toggle this panel with {keystroke}")) - }), - ) + .child(h_flex().justify_center().child({ + let keystroke = match self.position(window, cx) { + DockPosition::Left => { + window.keystroke_text_for(&workspace::ToggleLeftDock) + } + DockPosition::Bottom => { + window.keystroke_text_for(&workspace::ToggleBottomDock) + } + DockPosition::Right => { + window.keystroke_text_for(&workspace::ToggleRightDock) + } + }; + Label::new(format!("Toggle Panel With {keystroke}")).color(Color::Muted) + })) }) } else { let list_contents = { @@ -4728,39 +4730,37 @@ impl OutlinePanel { } fn render_filter_footer(&mut self, pinned: bool, cx: &mut Context) -> Div { - v_flex().flex_none().child(horizontal_separator(cx)).child( - h_flex() - .p_2() - .w_full() - .child(self.filter_editor.clone()) - .child( - div().child( - IconButton::new( - "outline-panel-menu", - if pinned { - IconName::Unpin - } else { - IconName::Pin - }, - ) - .tooltip(Tooltip::text(if pinned { - "Unpin Outline" - } else { - "Pin Active Outline" - })) - .shape(IconButtonShape::Square) - .on_click(cx.listener( - |outline_panel, _, window, cx| { - outline_panel.toggle_active_editor_pin( - &ToggleActiveEditorPin, - window, - cx, - ); - }, - )), - ), - ), - ) + let (icon, icon_tooltip) = if pinned { + (IconName::Unpin, "Unpin Outline") + } else { + (IconName::Pin, "Pin Active Outline") + }; + + h_flex() + .p_2() + .h(Tab::container_height(cx)) + .justify_between() + .border_b_1() + .border_color(cx.theme().colors().border) + .child( + h_flex() + .w_full() + .gap_1p5() + .child( + Icon::new(IconName::MagnifyingGlass) + .size(IconSize::Small) + .color(Color::Muted), + ) + .child(self.filter_editor.clone()), + ) + .child( + IconButton::new("pin_button", icon) + .tooltip(Tooltip::text(icon_tooltip)) + .shape(IconButtonShape::Square) + .on_click(cx.listener(|outline_panel, _, window, cx| { + outline_panel.toggle_active_editor_pin(&ToggleActiveEditorPin, window, cx); + })), + ) } fn buffers_inside_directory( @@ -4974,6 +4974,8 @@ impl Render for OutlinePanel { _ => None, }; + let search_query_text = search_query.map(|sq| sq.query.to_string()); + v_flex() .id("outline-panel") .size_full() @@ -5020,22 +5022,21 @@ impl Render for OutlinePanel { }), ) .track_focus(&self.focus_handle) - .when_some(search_query, |outline_panel, search_state| { + .child(self.render_filter_footer(pinned, cx)) + .when_some(search_query_text, |outline_panel, query_text| { outline_panel.child( h_flex() .py_1p5() .px_2() - .h(DynamicSpacing::Base32.px(cx)) - .flex_shrink_0() - .border_b_1() - .border_color(cx.theme().colors().border) + .h(Tab::container_height(cx)) .gap_0p5() + .border_b_1() + .border_color(cx.theme().colors().border_variant) .child(Label::new("Searching:").color(Color::Muted)) - .child(Label::new(search_state.query.to_string())), + .child(Label::new(query_text)), ) }) .child(self.render_main_contents(query, show_indent_guides, indent_size, window, cx)) - .child(self.render_filter_footer(pinned, cx)) } } @@ -5214,10 +5215,6 @@ fn empty_icon() -> AnyElement { .into_any_element() } -fn horizontal_separator(cx: &mut App) -> Div { - div().mx_2().border_primary(cx).border_t_1() -} - #[derive(Debug, Default)] struct GenerationState { entries: Vec, From 659169f06dba68e0dbcd14e1c1cd9f5d5efb2b5e Mon Sep 17 00:00:00 2001 From: Michael Benfield Date: Thu, 20 Nov 2025 13:57:43 -0800 Subject: [PATCH 0268/1030] Add codegen_ranges function in inline_assistant.rs (#43186) Just a simple refactor. Release Notes: - N/A --------- Co-authored-by: Mikayla Maki Co-authored-by: Richard Feldman --- crates/agent_ui/src/inline_assistant.rs | 181 +++++++++++++----------- 1 file changed, 99 insertions(+), 82 deletions(-) diff --git a/crates/agent_ui/src/inline_assistant.rs b/crates/agent_ui/src/inline_assistant.rs index 05cdd42544419969ce76ec168e671d5b2ac2402e..17a6a8e022f322575cabab728bd512d68754f4df 100644 --- a/crates/agent_ui/src/inline_assistant.rs +++ b/crates/agent_ui/src/inline_assistant.rs @@ -16,6 +16,7 @@ use agent_settings::AgentSettings; use anyhow::{Context as _, Result}; use client::telemetry::Telemetry; use collections::{HashMap, HashSet, VecDeque, hash_map}; +use editor::EditorSnapshot; use editor::MultiBufferOffset; use editor::RowExt; use editor::SelectionEffects; @@ -351,25 +352,20 @@ impl InlineAssistant { } } - pub fn assist( + fn codegen_ranges( &mut self, editor: &Entity, - workspace: WeakEntity, - context_store: Entity, - project: WeakEntity, - prompt_store: Option>, - thread_store: Option>, - initial_prompt: Option, + snapshot: &EditorSnapshot, window: &mut Window, cx: &mut App, - ) { - let (snapshot, initial_selections, newest_selection) = editor.update(cx, |editor, cx| { - let snapshot = editor.snapshot(window, cx); - let selections = editor.selections.all::(&snapshot.display_snapshot); - let newest_selection = editor - .selections - .newest::(&snapshot.display_snapshot); - (snapshot, selections, newest_selection) + ) -> Option<(Vec>, Selection)> { + let (initial_selections, newest_selection) = editor.update(cx, |editor, _| { + ( + editor.selections.all::(&snapshot.display_snapshot), + editor + .selections + .newest::(&snapshot.display_snapshot), + ) }); // Check if there is already an inline assistant that contains the @@ -382,7 +378,7 @@ impl InlineAssistant { && newest_selection.end.row <= range.end.row { self.focus_assist(*assist_id, window, cx); - return; + return None; } } } @@ -474,6 +470,26 @@ impl InlineAssistant { } } + Some((codegen_ranges, newest_selection)) + } + + fn batch_assist( + &mut self, + editor: &Entity, + workspace: WeakEntity, + context_store: Entity, + project: WeakEntity, + prompt_store: Option>, + thread_store: Option>, + initial_prompt: Option, + window: &mut Window, + codegen_ranges: &[Range], + newest_selection: Option>, + initial_transaction_id: Option, + cx: &mut App, + ) -> Option { + let snapshot = editor.update(cx, |editor, cx| editor.snapshot(window, cx)); + let assist_group_id = self.next_assist_group_id.post_inc(); let prompt_buffer = cx.new(|cx| { MultiBuffer::singleton( @@ -484,13 +500,14 @@ impl InlineAssistant { let mut assists = Vec::new(); let mut assist_to_focus = None; + for range in codegen_ranges { let assist_id = self.next_assist_id.post_inc(); let codegen = cx.new(|cx| { BufferCodegen::new( editor.read(cx).buffer().clone(), range.clone(), - None, + initial_transaction_id, context_store.clone(), project.clone(), prompt_store.clone(), @@ -518,11 +535,13 @@ impl InlineAssistant { ) }); - if assist_to_focus.is_none() { + if let Some(newest_selection) = newest_selection.as_ref() + && assist_to_focus.is_none() + { let focus_assist = if newest_selection.reversed { - range.start.to_point(snapshot) == newest_selection.start + range.start.to_point(&snapshot) == newest_selection.start } else { - range.end.to_point(snapshot) == newest_selection.end + range.end.to_point(&snapshot) == newest_selection.end }; if focus_assist { assist_to_focus = Some(assist_id); @@ -534,7 +553,7 @@ impl InlineAssistant { assists.push(( assist_id, - range, + range.clone(), prompt_editor, prompt_block_id, end_block_id, @@ -545,6 +564,15 @@ impl InlineAssistant { .assists_by_editor .entry(editor.downgrade()) .or_insert_with(|| EditorInlineAssists::new(editor, window, cx)); + + let assist_to_focus = if let Some(focus_id) = assist_to_focus { + Some(focus_id) + } else if assists.len() >= 1 { + Some(assists[0].0) + } else { + None + }; + let mut assist_group = InlineAssistGroup::new(); for (assist_id, range, prompt_editor, prompt_block_id, end_block_id) in assists { let codegen = prompt_editor.read(cx).codegen().clone(); @@ -568,8 +596,47 @@ impl InlineAssistant { assist_group.assist_ids.push(assist_id); editor_assists.assist_ids.push(assist_id); } + self.assist_groups.insert(assist_group_id, assist_group); + assist_to_focus + } + + pub fn assist( + &mut self, + editor: &Entity, + workspace: WeakEntity, + context_store: Entity, + project: WeakEntity, + prompt_store: Option>, + thread_store: Option>, + initial_prompt: Option, + window: &mut Window, + cx: &mut App, + ) { + let snapshot = editor.update(cx, |editor, cx| editor.snapshot(window, cx)); + + let Some((codegen_ranges, newest_selection)) = + self.codegen_ranges(editor, &snapshot, window, cx) + else { + return; + }; + + let assist_to_focus = self.batch_assist( + editor, + workspace, + context_store, + project, + prompt_store, + thread_store, + initial_prompt, + window, + &codegen_ranges, + Some(newest_selection), + None, + cx, + ); + if let Some(assist_id) = assist_to_focus { self.focus_assist(assist_id, window, cx); } @@ -588,12 +655,6 @@ impl InlineAssistant { window: &mut Window, cx: &mut App, ) -> InlineAssistId { - let assist_group_id = self.next_assist_group_id.post_inc(); - let prompt_buffer = cx.new(|cx| Buffer::local(&initial_prompt, cx)); - let prompt_buffer = cx.new(|cx| MultiBuffer::singleton(prompt_buffer, cx)); - - let assist_id = self.next_assist_id.post_inc(); - let buffer = editor.read(cx).buffer().clone(); { let snapshot = buffer.read(cx).read(cx); @@ -604,66 +665,22 @@ impl InlineAssistant { let project = workspace.read(cx).project().downgrade(); let context_store = cx.new(|_cx| ContextStore::new(project.clone())); - let codegen = cx.new(|cx| { - BufferCodegen::new( - editor.read(cx).buffer().clone(), - range.clone(), - initial_transaction_id, - context_store.clone(), - project, - prompt_store.clone(), - self.telemetry.clone(), - self.prompt_builder.clone(), - cx, - ) - }); - - let editor_margins = Arc::new(Mutex::new(EditorMargins::default())); - let prompt_editor = cx.new(|cx| { - PromptEditor::new_buffer( - assist_id, - editor_margins, - self.prompt_history.clone(), - prompt_buffer.clone(), - codegen.clone(), - self.fs.clone(), - context_store, + let assist_id = self + .batch_assist( + editor, workspace.downgrade(), + context_store, + project, + prompt_store, thread_store, - prompt_store.map(|s| s.downgrade()), + Some(initial_prompt), window, + &[range], + None, + initial_transaction_id, cx, ) - }); - - let [prompt_block_id, end_block_id] = - self.insert_assist_blocks(editor, &range, &prompt_editor, cx); - - let editor_assists = self - .assists_by_editor - .entry(editor.downgrade()) - .or_insert_with(|| EditorInlineAssists::new(editor, window, cx)); - - let mut assist_group = InlineAssistGroup::new(); - self.assists.insert( - assist_id, - InlineAssist::new( - assist_id, - assist_group_id, - editor, - &prompt_editor, - prompt_block_id, - end_block_id, - range, - codegen.clone(), - workspace.downgrade(), - window, - cx, - ), - ); - assist_group.assist_ids.push(assist_id); - editor_assists.assist_ids.push(assist_id); - self.assist_groups.insert(assist_group_id, assist_group); + .expect("batch_assist returns an id if there's only one range"); if focus { self.focus_assist(assist_id, window, cx); From 898c133906f6904f8f1d99650a2e23e300a4dd1d Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 20 Nov 2025 14:16:07 -0800 Subject: [PATCH 0269/1030] Simplify error management in stream_completion (#43035) This PR simplifies error and event handling by removing the `Ok(LanguageModelCompletionEvent::Status(CompletionRequestStatus::Failed)))` state from the stream returned by `LanguageModel::stream_completion()`, by changing it into an `Err(LanguageModelCompletionError)`. This was done by collapsing the valid `CompletionRequestStatus` values into `LanguageModelCompletionEvent`. Release Notes: - N/A --------- Co-authored-by: Michael Benfield --- crates/agent/src/tests/mod.rs | 8 +--- crates/agent/src/thread.rs | 20 +++----- .../assistant_text_thread/src/text_thread.rs | 20 ++++---- crates/eval/src/instance.rs | 12 +++-- crates/language_model/src/language_model.rs | 48 +++++++++++++++++-- crates/language_models/src/provider/cloud.rs | 27 ++++++++++- 6 files changed, 98 insertions(+), 37 deletions(-) diff --git a/crates/agent/src/tests/mod.rs b/crates/agent/src/tests/mod.rs index ffc5dbc6d30e58b5d819c3778b063951b0ed0861..f43cbed952afd434c4262da486ce11dffa40a5c8 100644 --- a/crates/agent/src/tests/mod.rs +++ b/crates/agent/src/tests/mod.rs @@ -664,9 +664,7 @@ async fn test_resume_after_tool_use_limit(cx: &mut TestAppContext) { ); // Simulate reaching tool use limit. - fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::StatusUpdate( - cloud_llm_client::CompletionRequestStatus::ToolUseLimitReached, - )); + fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUseLimitReached); fake_model.end_last_completion_stream(); let last_event = events.collect::>().await.pop().unwrap(); assert!( @@ -749,9 +747,7 @@ async fn test_send_after_tool_use_limit(cx: &mut TestAppContext) { }; fake_model .send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUse(tool_use.clone())); - fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::StatusUpdate( - cloud_llm_client::CompletionRequestStatus::ToolUseLimitReached, - )); + fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::ToolUseLimitReached); fake_model.end_last_completion_stream(); let last_event = events.collect::>().await.pop().unwrap(); assert!( diff --git a/crates/agent/src/thread.rs b/crates/agent/src/thread.rs index 45c09675b2470bc399e7ad38fbf976fb2b06eea6..928b60eee4bc3ccdf296e8ba7f4f0bdc49cb9fa3 100644 --- a/crates/agent/src/thread.rs +++ b/crates/agent/src/thread.rs @@ -15,7 +15,7 @@ use agent_settings::{ use anyhow::{Context as _, Result, anyhow}; use chrono::{DateTime, Utc}; use client::{ModelRequestUsage, RequestUsage, UserStore}; -use cloud_llm_client::{CompletionIntent, CompletionRequestStatus, Plan, UsageLimit}; +use cloud_llm_client::{CompletionIntent, Plan, UsageLimit}; use collections::{HashMap, HashSet, IndexMap}; use fs::Fs; use futures::stream; @@ -1430,20 +1430,16 @@ impl Thread { ); self.update_token_usage(usage, cx); } - StatusUpdate(CompletionRequestStatus::UsageUpdated { amount, limit }) => { + UsageUpdated { amount, limit } => { self.update_model_request_usage(amount, limit, cx); } - StatusUpdate( - CompletionRequestStatus::Started - | CompletionRequestStatus::Queued { .. } - | CompletionRequestStatus::Failed { .. }, - ) => {} - StatusUpdate(CompletionRequestStatus::ToolUseLimitReached) => { + ToolUseLimitReached => { self.tool_use_limit_reached = true; } Stop(StopReason::Refusal) => return Err(CompletionError::Refusal.into()), Stop(StopReason::MaxTokens) => return Err(CompletionError::MaxTokens.into()), Stop(StopReason::ToolUse | StopReason::EndTurn) => {} + Started | Queued { .. } => {} } Ok(None) @@ -1687,9 +1683,7 @@ impl Thread { let event = event.log_err()?; let text = match event { LanguageModelCompletionEvent::Text(text) => text, - LanguageModelCompletionEvent::StatusUpdate( - CompletionRequestStatus::UsageUpdated { amount, limit }, - ) => { + LanguageModelCompletionEvent::UsageUpdated { amount, limit } => { this.update(cx, |thread, cx| { thread.update_model_request_usage(amount, limit, cx); }) @@ -1753,9 +1747,7 @@ impl Thread { let event = event?; let text = match event { LanguageModelCompletionEvent::Text(text) => text, - LanguageModelCompletionEvent::StatusUpdate( - CompletionRequestStatus::UsageUpdated { amount, limit }, - ) => { + LanguageModelCompletionEvent::UsageUpdated { amount, limit } => { this.update(cx, |thread, cx| { thread.update_model_request_usage(amount, limit, cx); })?; diff --git a/crates/assistant_text_thread/src/text_thread.rs b/crates/assistant_text_thread/src/text_thread.rs index ae5fe25d430e80b7be68000162b6f0b21807e2a2..9f065e9ca7a1daf933c1313dd1d5f092cbed2771 100644 --- a/crates/assistant_text_thread/src/text_thread.rs +++ b/crates/assistant_text_thread/src/text_thread.rs @@ -7,9 +7,10 @@ use assistant_slash_command::{ use assistant_slash_commands::FileCommandMetadata; use client::{self, ModelRequestUsage, RequestUsage, proto, telemetry::Telemetry}; use clock::ReplicaId; -use cloud_llm_client::{CompletionIntent, CompletionRequestStatus, UsageLimit}; +use cloud_llm_client::{CompletionIntent, UsageLimit}; use collections::{HashMap, HashSet}; use fs::{Fs, RenameOptions}; + use futures::{FutureExt, StreamExt, future::Shared}; use gpui::{ App, AppContext as _, Context, Entity, EventEmitter, RenderImage, SharedString, Subscription, @@ -2073,14 +2074,15 @@ impl TextThread { }); match event { - LanguageModelCompletionEvent::StatusUpdate(status_update) => { - if let CompletionRequestStatus::UsageUpdated { amount, limit } = status_update { - this.update_model_request_usage( - amount as u32, - limit, - cx, - ); - } + LanguageModelCompletionEvent::Started | + LanguageModelCompletionEvent::Queued {..} | + LanguageModelCompletionEvent::ToolUseLimitReached { .. } => {} + LanguageModelCompletionEvent::UsageUpdated { amount, limit } => { + this.update_model_request_usage( + amount as u32, + limit, + cx, + ); } LanguageModelCompletionEvent::StartMessage { .. } => {} LanguageModelCompletionEvent::Stop(reason) => { diff --git a/crates/eval/src/instance.rs b/crates/eval/src/instance.rs index 035f1ec0ac8d0c6490dc39637e03e377ee3d194b..075a1a5cea1da782d40778befeb04bf2e6bac316 100644 --- a/crates/eval/src/instance.rs +++ b/crates/eval/src/instance.rs @@ -1251,8 +1251,11 @@ pub fn response_events_to_markdown( } Ok( LanguageModelCompletionEvent::UsageUpdate(_) + | LanguageModelCompletionEvent::ToolUseLimitReached | LanguageModelCompletionEvent::StartMessage { .. } - | LanguageModelCompletionEvent::StatusUpdate { .. }, + | LanguageModelCompletionEvent::UsageUpdated { .. } + | LanguageModelCompletionEvent::Queued { .. } + | LanguageModelCompletionEvent::Started, ) => {} Ok(LanguageModelCompletionEvent::ToolUseJsonParseError { json_parse_error, .. @@ -1337,9 +1340,12 @@ impl ThreadDialog { // Skip these Ok(LanguageModelCompletionEvent::UsageUpdate(_)) | Ok(LanguageModelCompletionEvent::RedactedThinking { .. }) - | Ok(LanguageModelCompletionEvent::StatusUpdate { .. }) | Ok(LanguageModelCompletionEvent::StartMessage { .. }) - | Ok(LanguageModelCompletionEvent::Stop(_)) => {} + | Ok(LanguageModelCompletionEvent::Stop(_)) + | Ok(LanguageModelCompletionEvent::Queued { .. }) + | Ok(LanguageModelCompletionEvent::Started) + | Ok(LanguageModelCompletionEvent::UsageUpdated { .. }) + | Ok(LanguageModelCompletionEvent::ToolUseLimitReached) => {} Ok(LanguageModelCompletionEvent::ToolUseJsonParseError { json_parse_error, diff --git a/crates/language_model/src/language_model.rs b/crates/language_model/src/language_model.rs index 785bb0dbdc7b6bb82d052cce16eb1c4b2fd66a48..3322409c09399b3ec957d8288b45e1833b77c106 100644 --- a/crates/language_model/src/language_model.rs +++ b/crates/language_model/src/language_model.rs @@ -12,7 +12,7 @@ pub mod fake_provider; use anthropic::{AnthropicError, parse_prompt_too_long}; use anyhow::{Result, anyhow}; use client::Client; -use cloud_llm_client::{CompletionMode, CompletionRequestStatus}; +use cloud_llm_client::{CompletionMode, CompletionRequestStatus, UsageLimit}; use futures::FutureExt; use futures::{StreamExt, future::BoxFuture, stream::BoxStream}; use gpui::{AnyView, App, AsyncApp, SharedString, Task, Window}; @@ -70,7 +70,15 @@ pub fn init_settings(cx: &mut App) { /// A completion event from a language model. #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub enum LanguageModelCompletionEvent { - StatusUpdate(CompletionRequestStatus), + Queued { + position: usize, + }, + Started, + UsageUpdated { + amount: usize, + limit: UsageLimit, + }, + ToolUseLimitReached, Stop(StopReason), Text(String), Thinking { @@ -93,6 +101,37 @@ pub enum LanguageModelCompletionEvent { UsageUpdate(TokenUsage), } +impl LanguageModelCompletionEvent { + pub fn from_completion_request_status( + status: CompletionRequestStatus, + upstream_provider: LanguageModelProviderName, + ) -> Result { + match status { + CompletionRequestStatus::Queued { position } => { + Ok(LanguageModelCompletionEvent::Queued { position }) + } + CompletionRequestStatus::Started => Ok(LanguageModelCompletionEvent::Started), + CompletionRequestStatus::UsageUpdated { amount, limit } => { + Ok(LanguageModelCompletionEvent::UsageUpdated { amount, limit }) + } + CompletionRequestStatus::ToolUseLimitReached => { + Ok(LanguageModelCompletionEvent::ToolUseLimitReached) + } + CompletionRequestStatus::Failed { + code, + message, + request_id: _, + retry_after, + } => Err(LanguageModelCompletionError::from_cloud_failure( + upstream_provider, + code, + message, + retry_after.map(Duration::from_secs_f64), + )), + } + } +} + #[derive(Error, Debug)] pub enum LanguageModelCompletionError { #[error("prompt too large for context window")] @@ -633,7 +672,10 @@ pub trait LanguageModel: Send + Sync { let last_token_usage = last_token_usage.clone(); async move { match result { - Ok(LanguageModelCompletionEvent::StatusUpdate { .. }) => None, + Ok(LanguageModelCompletionEvent::Queued { .. }) => None, + Ok(LanguageModelCompletionEvent::Started) => None, + Ok(LanguageModelCompletionEvent::UsageUpdated { .. }) => None, + Ok(LanguageModelCompletionEvent::ToolUseLimitReached) => None, Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None, Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)), Ok(LanguageModelCompletionEvent::Thinking { .. }) => None, diff --git a/crates/language_models/src/provider/cloud.rs b/crates/language_models/src/provider/cloud.rs index d85533ecce63441fe5aaa7a382bf04af79992f63..a9ff767146287db25fb0b42685525fd56d29d71e 100644 --- a/crates/language_models/src/provider/cloud.rs +++ b/crates/language_models/src/provider/cloud.rs @@ -752,6 +752,7 @@ impl LanguageModel for CloudLanguageModel { let mode = request.mode; let app_version = cx.update(|cx| AppVersion::global(cx)).ok(); let thinking_allowed = request.thinking_allowed; + let provider_name = provider_name(&self.model.provider); match self.model.provider { cloud_llm_client::LanguageModelProvider::Anthropic => { let request = into_anthropic( @@ -801,8 +802,9 @@ impl LanguageModel for CloudLanguageModel { Box::pin( response_lines(response, includes_status_messages) .chain(usage_updated_event(usage)) - .chain(tool_use_limit_reached_event(tool_use_limit_reached)), + .chain(tool_use_limit_reached_event(tool_use_limit_reached)), // .map(|_| {}), ), + &provider_name, move |event| mapper.map_event(event), )) }); @@ -849,6 +851,7 @@ impl LanguageModel for CloudLanguageModel { .chain(usage_updated_event(usage)) .chain(tool_use_limit_reached_event(tool_use_limit_reached)), ), + &provider_name, move |event| mapper.map_event(event), )) }); @@ -895,6 +898,7 @@ impl LanguageModel for CloudLanguageModel { .chain(usage_updated_event(usage)) .chain(tool_use_limit_reached_event(tool_use_limit_reached)), ), + &provider_name, move |event| mapper.map_event(event), )) }); @@ -935,6 +939,7 @@ impl LanguageModel for CloudLanguageModel { .chain(usage_updated_event(usage)) .chain(tool_use_limit_reached_event(tool_use_limit_reached)), ), + &provider_name, move |event| mapper.map_event(event), )) }); @@ -946,6 +951,7 @@ impl LanguageModel for CloudLanguageModel { fn map_cloud_completion_events( stream: Pin>> + Send>>, + provider: &LanguageModelProviderName, mut map_callback: F, ) -> BoxStream<'static, Result> where @@ -954,6 +960,7 @@ where + Send + 'static, { + let provider = provider.clone(); stream .flat_map(move |event| { futures::stream::iter(match event { @@ -961,7 +968,12 @@ where vec![Err(LanguageModelCompletionError::from(error))] } Ok(CompletionEvent::Status(event)) => { - vec![Ok(LanguageModelCompletionEvent::StatusUpdate(event))] + vec![ + LanguageModelCompletionEvent::from_completion_request_status( + event, + provider.clone(), + ), + ] } Ok(CompletionEvent::Event(event)) => map_callback(event), }) @@ -969,6 +981,17 @@ where .boxed() } +fn provider_name(provider: &cloud_llm_client::LanguageModelProvider) -> LanguageModelProviderName { + match provider { + cloud_llm_client::LanguageModelProvider::Anthropic => { + language_model::ANTHROPIC_PROVIDER_NAME + } + cloud_llm_client::LanguageModelProvider::OpenAi => language_model::OPEN_AI_PROVIDER_NAME, + cloud_llm_client::LanguageModelProvider::Google => language_model::GOOGLE_PROVIDER_NAME, + cloud_llm_client::LanguageModelProvider::XAi => language_model::X_AI_PROVIDER_NAME, + } +} + fn usage_updated_event( usage: Option, ) -> impl Stream>> { From dbdc501c899f3c24970a6d2c0725bee8784e0a91 Mon Sep 17 00:00:00 2001 From: Marshall Bowers Date: Thu, 20 Nov 2025 17:17:52 -0500 Subject: [PATCH 0270/1030] Fix casing in comments in `default.json` (#43201) This PR fixes the casing of the operating system names in the language-specific sections of `default.json`. This files serves as documentation for users (since it can be viewed through `zed: open default settings`), so we should make sure it is tidy. Release Notes: - N/A --- assets/settings/default.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/assets/settings/default.json b/assets/settings/default.json index bd8a6e96dd3929c63f47b20f54d3422051363511..9a6146f75b6a3b1d16d64bd1d7b1e7aab9e992dd 100644 --- a/assets/settings/default.json +++ b/assets/settings/default.json @@ -2068,11 +2068,11 @@ "dev": { // "theme": "Andromeda" }, - // Settings overrides to use when using linux + // Settings overrides to use when using Linux. "linux": {}, - // Settings overrides to use when using macos + // Settings overrides to use when using macOS. "macos": {}, - // Settings overrides to use when using windows + // Settings overrides to use when using Windows. "windows": { "languages": { "PHP": { From 8bbd101dcdf68c89d6ce13a88d34f7e9e0bad610 Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Thu, 20 Nov 2025 14:30:21 -0800 Subject: [PATCH 0271/1030] ci: Run `check_docs` when code changes (#43188) Closes #ISSUE Release Notes: - N/A *or* Added/Fixed/Improved ... --- .github/workflows/run_tests.yml | 2 +- tooling/xtask/src/tasks/workflows.rs | 3 +++ tooling/xtask/src/tasks/workflows/run_tests.rs | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 12ca53431994ee2090c9f7d83ee19ac92aa313be..323dd7fd1b52eb43400658470ee7d7c986f219fa 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -47,7 +47,7 @@ jobs: } check_pattern "run_action_checks" '^\.github/(workflows/|actions/|actionlint.yml)|tooling/xtask|script/' -qP - check_pattern "run_docs" '^docs/' -qP + check_pattern "run_docs" '^(docs/|crates/.*\.rs)' -qP check_pattern "run_licenses" '^(Cargo.lock|script/.*licenses)' -qP check_pattern "run_nix" '^(nix/|flake\.|Cargo\.|rust-toolchain.toml|\.cargo/config.toml)' -qP check_pattern "run_tests" '^(docs/|script/update_top_ranking_issues/|\.github/(ISSUE_TEMPLATE|workflows/(?!run_tests)))' -qvP diff --git a/tooling/xtask/src/tasks/workflows.rs b/tooling/xtask/src/tasks/workflows.rs index 31ca9590f1d6c8c935da4056930db470913656c9..c18eca52be8cf7fa369f46427c58b1d6b70e8bd0 100644 --- a/tooling/xtask/src/tasks/workflows.rs +++ b/tooling/xtask/src/tasks/workflows.rs @@ -23,6 +23,9 @@ mod vars; pub struct GenerateWorkflowArgs {} pub fn run_workflows(_: GenerateWorkflowArgs) -> Result<()> { + if !Path::new("crates/zed/").is_dir() { + anyhow::bail!("xtask workflows must be ran from the project root"); + } let dir = Path::new(".github/workflows"); let workflows = vec![ diff --git a/tooling/xtask/src/tasks/workflows/run_tests.rs b/tooling/xtask/src/tasks/workflows/run_tests.rs index 5894f92df88d8154ddd0eeb49b5cb4825931879a..5fff9bc8ec40941ca8e32faeaca416aeea2c7a4b 100644 --- a/tooling/xtask/src/tasks/workflows/run_tests.rs +++ b/tooling/xtask/src/tasks/workflows/run_tests.rs @@ -25,7 +25,7 @@ pub(crate) fn run_tests() -> Workflow { "run_tests", r"^(docs/|script/update_top_ranking_issues/|\.github/(ISSUE_TEMPLATE|workflows/(?!run_tests)))", ); - let should_check_docs = PathCondition::new("run_docs", r"^docs/"); + let should_check_docs = PathCondition::new("run_docs", r"^(docs/|crates/.*\.rs)"); let should_check_scripts = PathCondition::new( "run_action_checks", r"^\.github/(workflows/|actions/|actionlint.yml)|tooling/xtask|script/", From 07d98981e8d7f162a182b990047584f3840fa98f Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Thu, 20 Nov 2025 15:59:02 -0800 Subject: [PATCH 0272/1030] Make the edit prediction status bar menu work correctly when using sweep (#43203) Release Notes: - N/A --------- Co-authored-by: Ben Kunkle --- .../src/edit_prediction_button.rs | 89 +++++++++++++------ .../zed/src/zed/edit_prediction_registry.rs | 3 +- crates/zeta2/src/zeta2.rs | 23 +++-- 3 files changed, 80 insertions(+), 35 deletions(-) diff --git a/crates/edit_prediction_button/src/edit_prediction_button.rs b/crates/edit_prediction_button/src/edit_prediction_button.rs index ba00e95c488dc8e8704274638087c8334f96e1a3..051ca6e85ccb985ba6b325cda725f83029aa3193 100644 --- a/crates/edit_prediction_button/src/edit_prediction_button.rs +++ b/crates/edit_prediction_button/src/edit_prediction_button.rs @@ -83,9 +83,7 @@ impl Render for EditPredictionButton { let all_language_settings = all_language_settings(None, cx); - match &all_language_settings.edit_predictions.provider { - EditPredictionProvider::None => div().hidden(), - + match all_language_settings.edit_predictions.provider { EditPredictionProvider::Copilot => { let Some(copilot) = Copilot::global(cx) else { return div().hidden(); @@ -302,23 +300,23 @@ impl Render for EditPredictionButton { .with_handle(self.popover_menu_handle.clone()), ) } - EditPredictionProvider::Experimental(provider_name) => { - if *provider_name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME - && cx.has_flag::() - { - div().child(Icon::new(IconName::SweepAi)) - } else { - div() - } - } - - EditPredictionProvider::Zed => { + provider @ (EditPredictionProvider::Experimental( + EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, + ) + | EditPredictionProvider::Zed) => { let enabled = self.editor_enabled.unwrap_or(true); - let zeta_icon = if enabled { - IconName::ZedPredict - } else { - IconName::ZedPredictDisabled + let is_sweep = matches!( + provider, + EditPredictionProvider::Experimental( + EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME + ) + ); + + let zeta_icon = match (is_sweep, enabled) { + (true, _) => IconName::SweepAi, + (false, true) => IconName::ZedPredict, + (false, false) => IconName::ZedPredictDisabled, }; if zeta::should_show_upsell_modal() { @@ -402,8 +400,10 @@ impl Render for EditPredictionButton { let mut popover_menu = PopoverMenu::new("zeta") .menu(move |window, cx| { - this.update(cx, |this, cx| this.build_zeta_context_menu(window, cx)) - .ok() + this.update(cx, |this, cx| { + this.build_zeta_context_menu(provider, window, cx) + }) + .ok() }) .anchor(Corner::BottomRight) .with_handle(self.popover_menu_handle.clone()); @@ -429,6 +429,10 @@ impl Render for EditPredictionButton { div().child(popover_menu.into_any_element()) } + + EditPredictionProvider::None | EditPredictionProvider::Experimental(_) => { + div().hidden() + } } } } @@ -487,6 +491,12 @@ impl EditPredictionButton { providers.push(EditPredictionProvider::Codestral); } + if cx.has_flag::() { + providers.push(EditPredictionProvider::Experimental( + EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, + )); + } + providers } @@ -498,6 +508,11 @@ impl EditPredictionButton { ) -> ContextMenu { let available_providers = self.get_available_providers(cx); + const ZED_AI_CALLOUT: &str = + "Zed's edit prediction is powered by Zeta, an open-source, dataset mode."; + const USE_SWEEP_API_TOKEN_CALLOUT: &str = + "Set the SWEEP_API_TOKEN environment variable to use Sweep"; + let other_providers: Vec<_> = available_providers .into_iter() .filter(|p| *p != current_provider && *p != EditPredictionProvider::None) @@ -514,11 +529,8 @@ impl EditPredictionButton { ContextMenuEntry::new("Zed AI") .documentation_aside( DocumentationSide::Left, - DocumentationEdge::Top, - |_| { - Label::new("Zed's edit prediction is powered by Zeta, an open-source, dataset mode.") - .into_any_element() - }, + DocumentationEdge::Bottom, + |_| Label::new(ZED_AI_CALLOUT).into_any_element(), ) .handler(move |_, cx| { set_completion_provider(fs.clone(), cx, provider); @@ -539,7 +551,29 @@ impl EditPredictionButton { set_completion_provider(fs.clone(), cx, provider); }) } - EditPredictionProvider::None | EditPredictionProvider::Experimental(_) => continue, + EditPredictionProvider::Experimental( + EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, + ) => { + let has_api_token = zeta2::Zeta::try_global(cx) + .map_or(false, |zeta| zeta.read(cx).has_sweep_api_token()); + + let entry = ContextMenuEntry::new("Sweep") + .when(!has_api_token, |this| { + this.disabled(true).documentation_aside( + DocumentationSide::Left, + DocumentationEdge::Bottom, + |_| Label::new(USE_SWEEP_API_TOKEN_CALLOUT).into_any_element(), + ) + }) + .handler(move |_, cx| { + set_completion_provider(fs.clone(), cx, provider); + }); + + menu.item(entry) + } + EditPredictionProvider::None | EditPredictionProvider::Experimental(_) => { + continue; + } }; } } @@ -909,6 +943,7 @@ impl EditPredictionButton { fn build_zeta_context_menu( &self, + provider: EditPredictionProvider, window: &mut Window, cx: &mut Context, ) -> Entity { @@ -996,7 +1031,7 @@ impl EditPredictionButton { } let menu = self.build_language_settings_menu(menu, window, cx); - let menu = self.add_provider_switching_section(menu, EditPredictionProvider::Zed, cx); + let menu = self.add_provider_switching_section(menu, provider, cx); menu }) diff --git a/crates/zed/src/zed/edit_prediction_registry.rs b/crates/zed/src/zed/edit_prediction_registry.rs index 250a2b5a0e585d5acad7658a25f89bce12f766d2..577e81c6a9b36bc29a4b1d1f0cda63170c75d5a2 100644 --- a/crates/zed/src/zed/edit_prediction_registry.rs +++ b/crates/zed/src/zed/edit_prediction_registry.rs @@ -204,6 +204,8 @@ fn assign_edit_prediction_provider( editor.set_edit_prediction_provider(Some(provider), window, cx); } value @ (EditPredictionProvider::Experimental(_) | EditPredictionProvider::Zed) => { + let zeta2 = zeta2::Zeta::global(client, &user_store, cx); + if let Some(project) = editor.project() { let mut worktree = None; if let Some(buffer) = &singleton_buffer @@ -217,7 +219,6 @@ fn assign_edit_prediction_provider( && name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME && cx.has_flag::() { - let zeta2 = zeta2::Zeta::global(client, &user_store, cx); let provider = cx.new(|cx| { zeta2::ZetaEditPredictionProvider::new( project.clone(), diff --git a/crates/zeta2/src/zeta2.rs b/crates/zeta2/src/zeta2.rs index 6eacc5190f403594ad20f7365512b011d2226719..0d0f4f3d39e9c997282695828ba16e7eccd7d8e2 100644 --- a/crates/zeta2/src/zeta2.rs +++ b/crates/zeta2/src/zeta2.rs @@ -402,20 +402,21 @@ impl Zeta { #[cfg(feature = "eval-support")] eval_cache: None, edit_prediction_model: ZetaEditPredictionModel::ZedCloud, - sweep_api_token: None, + sweep_api_token: std::env::var("SWEEP_AI_TOKEN") + .context("No SWEEP_AI_TOKEN environment variable set") + .log_err(), sweep_ai_debug_info: sweep_ai::debug_info(cx), } } pub fn set_edit_prediction_model(&mut self, model: ZetaEditPredictionModel) { - if model == ZetaEditPredictionModel::Sweep { - self.sweep_api_token = std::env::var("SWEEP_AI_TOKEN") - .context("No SWEEP_AI_TOKEN environment variable set") - .log_err(); - } self.edit_prediction_model = model; } + pub fn has_sweep_api_token(&self) -> bool { + self.sweep_api_token.is_some() + } + #[cfg(feature = "eval-support")] pub fn with_eval_cache(&mut self, cache: Arc) { self.eval_cache = Some(cache); @@ -472,7 +473,11 @@ impl Zeta { } pub fn usage(&self, cx: &App) -> Option { - self.user_store.read(cx).edit_prediction_usage() + if self.edit_prediction_model == ZetaEditPredictionModel::ZedCloud { + self.user_store.read(cx).edit_prediction_usage() + } else { + None + } } pub fn register_project(&mut self, project: &Entity, cx: &mut Context) { @@ -659,6 +664,10 @@ impl Zeta { } fn accept_current_prediction(&mut self, project: &Entity, cx: &mut Context) { + if self.edit_prediction_model != ZetaEditPredictionModel::ZedCloud { + return; + } + let Some(project_state) = self.projects.get_mut(&project.entity_id()) else { return; }; From 2b9eeb9a308a650b99eb92d4ff6980e37f2b5bd2 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Thu, 20 Nov 2025 19:42:49 -0700 Subject: [PATCH 0273/1030] Disable keychain timeout in bundle-mac (#43204) Attempt to reduce the number of times bundle-mac fails to notorize by disabling keychain's auto-lock timeout Release Notes: - N/A --- script/bundle-mac | 2 ++ 1 file changed, 2 insertions(+) diff --git a/script/bundle-mac b/script/bundle-mac index 248cb10203a16299e33b1d997aeee8cfca46250e..5ee6590a0c656cb56bc5ea091ca844d26b13e9e3 100755 --- a/script/bundle-mac +++ b/script/bundle-mac @@ -113,6 +113,8 @@ if [[ -n "${MACOS_CERTIFICATE:-}" && -n "${MACOS_CERTIFICATE_PASSWORD:-}" && -n security create-keychain -p "$MACOS_CERTIFICATE_PASSWORD" zed.keychain || echo "" security default-keychain -s zed.keychain security unlock-keychain -p "$MACOS_CERTIFICATE_PASSWORD" zed.keychain + # Calling set-keychain-settings without `-t` disables the auto-lock timeout + security set-keychain-settings zed.keychain echo "$MACOS_CERTIFICATE" | base64 --decode > /tmp/zed-certificate.p12 security import /tmp/zed-certificate.p12 -k zed.keychain -P "$MACOS_CERTIFICATE_PASSWORD" -T /usr/bin/codesign rm /tmp/zed-certificate.p12 From b3ebcef5c68387176bd7e3d9d9592e4612e5eb9a Mon Sep 17 00:00:00 2001 From: Xipeng Jin <56369076+xipeng-jin@users.noreply.github.com> Date: Thu, 20 Nov 2025 21:42:56 -0500 Subject: [PATCH 0274/1030] gpui: Only time out multi-stroke bindings when current prefix matches (#42659) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Part One for Resolving #10910 ### Summary Typing prefix (partial keybinding) will behave like Vim. No timeout until you either finish the sequence or hit Escape, while ambiguous sequences still auto-resolve after 1s. ### Description This follow-up tweaks the which-key system first part groundwork so our timeout behavior matches Vim’s expectations. Then we can implement the UI part in the next step (reference latest comments in https://github.com/zed-industries/zed/pull/34798) - `DispatchResult` now reports when the current keystrokes are already a complete binding in the active context stack (`pending_has_binding`). We only start the 1s flush timer in that case. Pure prefixes or sequences that only match in other contexts—stay pending indefinitely, so leader-style combos like `space f g` no longer evaporate after a second. - `Window::dispatch_key_event` cancels any prior timer before scheduling a new one and only spawns the background flush task when `pending_has_binding` is true. If there’s no matching binding, we keep the pending keystrokes and rely on an explicit Escape or more typing to resolve them. Release Notes: - Fixed multi-stroke keybindings so only ambiguous prefixes auto-trigger after 1 s; unmatched prefixes now stay pending until canceled, matching Vim-style leader behavior. --- crates/collab/src/tests/integration_tests.rs | 6 +- crates/editor/src/editor.rs | 8 +-- crates/gpui/src/key_dispatch.rs | 49 ++++++++++++++- crates/gpui/src/window.rs | 65 +++++++++++++------- 4 files changed, 95 insertions(+), 33 deletions(-) diff --git a/crates/collab/src/tests/integration_tests.rs b/crates/collab/src/tests/integration_tests.rs index a4c8dc0e5b7e5eb01f099c11f29a5d651da09303..fcda8688d427f3e6b937f00edc7c3586dfdbef36 100644 --- a/crates/collab/src/tests/integration_tests.rs +++ b/crates/collab/src/tests/integration_tests.rs @@ -6551,12 +6551,12 @@ async fn test_pane_split_left(cx: &mut TestAppContext) { assert!(workspace.items(cx).collect::>().len() == 2); }); cx.simulate_keystrokes("cmd-k"); - // sleep for longer than the timeout in keyboard shortcut handling - // to verify that it doesn't fire in this case. + // Sleep past the historical timeout to ensure the multi-stroke binding + // still fires now that unambiguous prefixes no longer auto-expire. cx.executor().advance_clock(Duration::from_secs(2)); cx.simulate_keystrokes("left"); workspace.update(cx, |workspace, cx| { - assert!(workspace.items(cx).collect::>().len() == 2); + assert!(workspace.items(cx).collect::>().len() == 3); }); } diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index e06ba62ce94c69828b3ab08465b4375b4c862343..c3da066cfd73cab5b2de610bbf1bc653f7e6d874 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -22165,13 +22165,7 @@ impl Editor { .pending_input_keystrokes() .into_iter() .flatten() - .filter_map(|keystroke| { - if keystroke.modifiers.is_subset_of(&Modifiers::shift()) { - keystroke.key_char.clone() - } else { - None - } - }) + .filter_map(|keystroke| keystroke.key_char.clone()) .collect(); if !self.input_enabled || self.read_only || !self.focus_handle.is_focused(window) { diff --git a/crates/gpui/src/key_dispatch.rs b/crates/gpui/src/key_dispatch.rs index f0c857abd6f3c353105b4272b51ca519f1906078..ae4553408fa8d0dc7ed640319ae0b0a178465b74 100644 --- a/crates/gpui/src/key_dispatch.rs +++ b/crates/gpui/src/key_dispatch.rs @@ -121,6 +121,7 @@ pub(crate) struct Replay { #[derive(Default, Debug)] pub(crate) struct DispatchResult { pub(crate) pending: SmallVec<[Keystroke; 1]>, + pub(crate) pending_has_binding: bool, pub(crate) bindings: SmallVec<[KeyBinding; 1]>, pub(crate) to_replay: SmallVec<[Replay; 1]>, pub(crate) context_stack: Vec, @@ -480,6 +481,7 @@ impl DispatchTree { if pending { return DispatchResult { pending: input, + pending_has_binding: !bindings.is_empty(), context_stack, ..Default::default() }; @@ -608,9 +610,11 @@ impl DispatchTree { #[cfg(test)] mod tests { use crate::{ - self as gpui, Element, ElementId, GlobalElementId, InspectorElementId, LayoutId, Style, + self as gpui, DispatchResult, Element, ElementId, GlobalElementId, InspectorElementId, + Keystroke, LayoutId, Style, }; use core::panic; + use smallvec::SmallVec; use std::{cell::RefCell, ops::Range, rc::Rc}; use crate::{ @@ -676,6 +680,49 @@ mod tests { assert!(keybinding[0].action.partial_eq(&TestAction)) } + #[test] + fn test_pending_has_binding_state() { + let bindings = vec![ + KeyBinding::new("ctrl-b h", TestAction, None), + KeyBinding::new("space", TestAction, Some("ContextA")), + KeyBinding::new("space f g", TestAction, Some("ContextB")), + ]; + let keymap = Rc::new(RefCell::new(Keymap::new(bindings))); + let mut registry = ActionRegistry::default(); + registry.load_action::(); + let mut tree = DispatchTree::new(keymap, Rc::new(registry)); + + type DispatchPath = SmallVec<[super::DispatchNodeId; 32]>; + fn dispatch( + tree: &mut DispatchTree, + pending: SmallVec<[Keystroke; 1]>, + key: &str, + path: &DispatchPath, + ) -> DispatchResult { + tree.dispatch_key(pending, Keystroke::parse(key).unwrap(), path) + } + + let dispatch_path: DispatchPath = SmallVec::new(); + let result = dispatch(&mut tree, SmallVec::new(), "ctrl-b", &dispatch_path); + assert_eq!(result.pending.len(), 1); + assert!(!result.pending_has_binding); + + let result = dispatch(&mut tree, result.pending, "h", &dispatch_path); + assert_eq!(result.pending.len(), 0); + assert_eq!(result.bindings.len(), 1); + assert!(!result.pending_has_binding); + + let node_id = tree.push_node(); + tree.set_key_context(KeyContext::parse("ContextB").unwrap()); + tree.pop_node(); + + let dispatch_path = tree.dispatch_path(node_id); + let result = dispatch(&mut tree, SmallVec::new(), "space", &dispatch_path); + + assert_eq!(result.pending.len(), 1); + assert!(!result.pending_has_binding); + } + #[crate::test] fn test_input_handler_pending(cx: &mut TestAppContext) { #[derive(Clone)] diff --git a/crates/gpui/src/window.rs b/crates/gpui/src/window.rs index 3505da3e7d85ed3dca5e9050787d11902941f364..215a9423482925ee093a93af896e6cd00872aba6 100644 --- a/crates/gpui/src/window.rs +++ b/crates/gpui/src/window.rs @@ -909,6 +909,7 @@ struct PendingInput { keystrokes: SmallVec<[Keystroke; 1]>, focus: Option, timer: Option>, + needs_timeout: bool, } pub(crate) struct ElementStateBox { @@ -3896,32 +3897,52 @@ impl Window { } if !match_result.pending.is_empty() { + currently_pending.timer.take(); currently_pending.keystrokes = match_result.pending; currently_pending.focus = self.focus; - currently_pending.timer = Some(self.spawn(cx, async move |cx| { - cx.background_executor.timer(Duration::from_secs(1)).await; - cx.update(move |window, cx| { - let Some(currently_pending) = window - .pending_input - .take() - .filter(|pending| pending.focus == window.focus) - else { - return; - }; - let node_id = window.focus_node_id_in_rendered_frame(window.focus); - let dispatch_path = window.rendered_frame.dispatch_tree.dispatch_path(node_id); - - let to_replay = window - .rendered_frame - .dispatch_tree - .flush_dispatch(currently_pending.keystrokes, &dispatch_path); + let text_input_requires_timeout = event + .downcast_ref::() + .filter(|key_down| key_down.keystroke.key_char.is_some()) + .and_then(|_| self.platform_window.take_input_handler()) + .map_or(false, |mut input_handler| { + let accepts = input_handler.accepts_text_input(self, cx); + self.platform_window.set_input_handler(input_handler); + accepts + }); - window.pending_input_changed(cx); - window.replay_pending_input(to_replay, cx) - }) - .log_err(); - })); + currently_pending.needs_timeout |= + match_result.pending_has_binding || text_input_requires_timeout; + + if currently_pending.needs_timeout { + currently_pending.timer = Some(self.spawn(cx, async move |cx| { + cx.background_executor.timer(Duration::from_secs(1)).await; + cx.update(move |window, cx| { + let Some(currently_pending) = window + .pending_input + .take() + .filter(|pending| pending.focus == window.focus) + else { + return; + }; + + let node_id = window.focus_node_id_in_rendered_frame(window.focus); + let dispatch_path = + window.rendered_frame.dispatch_tree.dispatch_path(node_id); + + let to_replay = window + .rendered_frame + .dispatch_tree + .flush_dispatch(currently_pending.keystrokes, &dispatch_path); + + window.pending_input_changed(cx); + window.replay_pending_input(to_replay, cx) + }) + .log_err(); + })); + } else { + currently_pending.timer = None; + } self.pending_input = Some(currently_pending); self.pending_input_changed(cx); cx.propagate_event = false; From 550442e10000c63de1f083c3f7f1aab0ab41ed8e Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Thu, 20 Nov 2025 22:17:50 -0700 Subject: [PATCH 0275/1030] Disable fsevents tests (#43218) They're flakier than phyllo dough, and not nearly as delicious Release Notes: - N/A --- crates/fsevent/src/fsevent.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/fsevent/src/fsevent.rs b/crates/fsevent/src/fsevent.rs index 0004b7521019f4d563cd568f2875fe1edf1ac207..8af57c19ee242d62e3fe10fa4d4f3ea5cc945ebd 100644 --- a/crates/fsevent/src/fsevent.rs +++ b/crates/fsevent/src/fsevent.rs @@ -372,7 +372,9 @@ unsafe extern "C" { pub fn FSEventsGetCurrentEventId() -> u64; } -#[cfg(test)] +// These tests are disabled by default because they seem to be unresolvably flaky. +// Feel free to bring them back to help test this code +#[cfg(false)] mod tests { use super::*; use std::{fs, sync::mpsc, thread, time::Duration}; From bb514c158e1604916b32fc8c9bbe602a78727742 Mon Sep 17 00:00:00 2001 From: cacaosteve <1307385+cacaosteve@users.noreply.github.com> Date: Thu, 20 Nov 2025 21:25:37 -0800 Subject: [PATCH 0276/1030] macOS: Enumerate GPUs first; prefer low-power non-removable; fall back to system default (#38164) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Problem: Some macOS environments report no devices via MTLCopyAllDevices, causing startup failure with “unable to access a compatible graphics device,” especially on Apple Silicon. Change: Prefer MTLCreateSystemDefaultDevice (metal::Device::system_default()) first. If None, enumerate devices and select a non‑removable, low‑power device by preference. Why this works: On Apple Silicon the system default is the unified GPU; on Intel, the fallback keeps a stable policy and avoids accidentally picking removable/high‑power devices. Impact: Fixes startup on affected ASi systems; improves selection consistency on Intel multi‑GPU. Behavior unchanged where system_default() succeeds. Risk: Low. Aligns with Apple’s recommended selection path. Still fails early with a clearer message if no Metal devices exist. Closes #37689. Release Notes: - Fixed: Startup failure on some Apple Silicon machines when Metal device enumeration returned no devices by falling back to the system default device. --------- Co-authored-by: 张小白 <364772080@qq.com> Co-authored-by: Kate --- .../gpui/src/platform/mac/metal_renderer.rs | 20 ++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/crates/gpui/src/platform/mac/metal_renderer.rs b/crates/gpui/src/platform/mac/metal_renderer.rs index 9e5d6ec5ff02c74b4f0acfada8eee3d002bfd06b..550041a0ccb4cd39bc7a86317d9540e806af2a28 100644 --- a/crates/gpui/src/platform/mac/metal_renderer.rs +++ b/crates/gpui/src/platform/mac/metal_renderer.rs @@ -132,11 +132,21 @@ impl MetalRenderer { // Prefer low‐power integrated GPUs on Intel Mac. On Apple // Silicon, there is only ever one GPU, so this is equivalent to // `metal::Device::system_default()`. - let mut devices = metal::Device::all(); - devices.sort_by_key(|device| (device.is_removable(), device.is_low_power())); - let Some(device) = devices.pop() else { - log::error!("unable to access a compatible graphics device"); - std::process::exit(1); + let device = if let Some(d) = metal::Device::all() + .into_iter() + .min_by_key(|d| (d.is_removable(), !d.is_low_power())) + { + d + } else { + // For some reason `all()` can return an empty list, see https://github.com/zed-industries/zed/issues/37689 + // In that case, we fall back to the system default device. + log::error!( + "Unable to enumerate Metal devices; attempting to use system default device" + ); + metal::Device::system_default().unwrap_or_else(|| { + log::error!("unable to access a compatible graphics device"); + std::process::exit(1); + }) }; let layer = metal::MetalLayer::new(); From e2f6422b3e1b4c1901523d9000c136e9d624fa7e Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Fri, 21 Nov 2025 14:19:55 +0530 Subject: [PATCH 0277/1030] language: Move language server update to background when it takes too long (#43164) Closes https://github.com/zed-industries/zed/issues/42360 If updating a language server takes longer than 10 seconds, we now fall back to launching the currently installed version (if exists) and continue downloading the update in the background. Release Notes: - Improved language server updates for slow connection, now Zed launches existing server if the update is taking too long. --------- Co-authored-by: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> --- crates/language/src/language.rs | 151 ++++++++++-------- .../src/extension_lsp_adapter.rs | 79 ++++----- crates/languages/src/css.rs | 17 +- crates/languages/src/json.rs | 14 +- crates/languages/src/tailwind.rs | 17 +- crates/languages/src/yaml.rs | 17 +- crates/project/src/lsp_store.rs | 27 +++- 7 files changed, 163 insertions(+), 159 deletions(-) diff --git a/crates/language/src/language.rs b/crates/language/src/language.rs index 7ce3986736cc0a1e8b8d21124ebe8c29ddc9214c..03e563e145c3bd1cde63e62fa8a09a4fb0228f0f 100644 --- a/crates/language/src/language.rs +++ b/crates/language/src/language.rs @@ -28,6 +28,8 @@ use anyhow::{Context as _, Result}; use async_trait::async_trait; use collections::{HashMap, HashSet, IndexSet}; use futures::Future; +use futures::future::LocalBoxFuture; +use futures::lock::OwnedMutexGuard; use gpui::{App, AsyncApp, Entity, SharedString}; pub use highlight_map::HighlightMap; use http_client::HttpClient; @@ -51,7 +53,6 @@ use std::{ mem, ops::{DerefMut, Range}, path::{Path, PathBuf}, - pin::Pin, str, sync::{ Arc, LazyLock, @@ -152,7 +153,14 @@ pub struct Location { } type ServerBinaryCache = futures::lock::Mutex>; - +type DownloadableLanguageServerBinary = LocalBoxFuture<'static, Result>; +pub type LanguageServerBinaryLocations = LocalBoxFuture< + 'static, + ( + Result, + Option, + ), +>; /// Represents a Language Server, with certain cached sync properties. /// Uses [`LspAdapter`] under the hood, but calls all 'static' methods /// once at startup, and caches the results. @@ -162,7 +170,7 @@ pub struct CachedLspAdapter { pub disk_based_diagnostics_progress_token: Option, language_ids: HashMap, pub adapter: Arc, - cached_binary: ServerBinaryCache, + cached_binary: Arc, } impl Debug for CachedLspAdapter { @@ -209,18 +217,15 @@ impl CachedLspAdapter { toolchains: Option, binary_options: LanguageServerBinaryOptions, cx: &mut AsyncApp, - ) -> Result { - let mut cached_binary = self.cached_binary.lock().await; - self.adapter - .clone() - .get_language_server_command( - delegate, - toolchains, - binary_options, - &mut cached_binary, - cx, - ) - .await + ) -> LanguageServerBinaryLocations { + let cached_binary = self.cached_binary.clone().lock_owned().await; + self.adapter.clone().get_language_server_command( + delegate, + toolchains, + binary_options, + cached_binary, + cx.clone(), + ) } pub fn code_action_kinds(&self) -> Option> { @@ -513,14 +518,14 @@ pub trait DynLspInstaller { pre_release: bool, cx: &mut AsyncApp, ) -> Result; - fn get_language_server_command<'a>( + fn get_language_server_command( self: Arc, delegate: Arc, toolchains: Option, binary_options: LanguageServerBinaryOptions, - cached_binary: &'a mut Option<(bool, LanguageServerBinary)>, - cx: &'a mut AsyncApp, - ) -> Pin>>>; + cached_binary: OwnedMutexGuard>, + cx: AsyncApp, + ) -> LanguageServerBinaryLocations; } #[async_trait(?Send)] @@ -562,15 +567,16 @@ where binary } } - fn get_language_server_command<'a>( + fn get_language_server_command( self: Arc, delegate: Arc, toolchain: Option, binary_options: LanguageServerBinaryOptions, - cached_binary: &'a mut Option<(bool, LanguageServerBinary)>, - cx: &'a mut AsyncApp, - ) -> Pin>>> { + mut cached_binary: OwnedMutexGuard>, + mut cx: AsyncApp, + ) -> LanguageServerBinaryLocations { async move { + let cached_binary_deref = cached_binary.deref_mut(); // First we check whether the adapter can give us a user-installed binary. // If so, we do *not* want to cache that, because each worktree might give us a different // binary: @@ -584,7 +590,7 @@ where // for each worktree we might have open. if binary_options.allow_path_lookup && let Some(binary) = self - .check_if_user_installed(delegate.as_ref(), toolchain, cx) + .check_if_user_installed(delegate.as_ref(), toolchain, &mut cx) .await { log::info!( @@ -593,62 +599,77 @@ where binary.path, binary.arguments ); - return Ok(binary); + return (Ok(binary), None); } - anyhow::ensure!( - binary_options.allow_binary_download, - "downloading language servers disabled" - ); + if !binary_options.allow_binary_download { + return ( + Err(anyhow::anyhow!("downloading language servers disabled")), + None, + ); + } - if let Some((pre_release, cached_binary)) = cached_binary + if let Some((pre_release, cached_binary)) = cached_binary_deref && *pre_release == binary_options.pre_release { - return Ok(cached_binary.clone()); + return (Ok(cached_binary.clone()), None); } let Some(container_dir) = delegate.language_server_download_dir(&self.name()).await else { - anyhow::bail!("no language server download dir defined") + return ( + Err(anyhow::anyhow!("no language server download dir defined")), + None, + ); }; - let mut binary = self - .try_fetch_server_binary( - &delegate, - container_dir.to_path_buf(), - binary_options.pre_release, - cx, - ) - .await; + let last_downloaded_binary = self + .cached_server_binary(container_dir.to_path_buf(), delegate.as_ref()) + .await + .context( + "did not find existing language server binary, falling back to downloading", + ); + let download_binary = async move { + let mut binary = self + .try_fetch_server_binary( + &delegate, + container_dir.to_path_buf(), + binary_options.pre_release, + &mut cx, + ) + .await; + + if let Err(error) = binary.as_ref() { + if let Some(prev_downloaded_binary) = self + .cached_server_binary(container_dir.to_path_buf(), delegate.as_ref()) + .await + { + log::info!( + "failed to fetch newest version of language server {:?}. \ + error: {:?}, falling back to using {:?}", + self.name(), + error, + prev_downloaded_binary.path + ); + binary = Ok(prev_downloaded_binary); + } else { + delegate.update_status( + self.name(), + BinaryStatus::Failed { + error: format!("{error:?}"), + }, + ); + } + } - if let Err(error) = binary.as_ref() { - if let Some(prev_downloaded_binary) = self - .cached_server_binary(container_dir.to_path_buf(), delegate.as_ref()) - .await - { - log::info!( - "failed to fetch newest version of language server {:?}. \ - error: {:?}, falling back to using {:?}", - self.name(), - error, - prev_downloaded_binary.path - ); - binary = Ok(prev_downloaded_binary); - } else { - delegate.update_status( - self.name(), - BinaryStatus::Failed { - error: format!("{error:?}"), - }, - ); + if let Ok(binary) = &binary { + *cached_binary = Some((binary_options.pre_release, binary.clone())); } - } - if let Ok(binary) = &binary { - *cached_binary = Some((binary_options.pre_release, binary.clone())); + binary } - - binary + .boxed_local(); + (last_downloaded_binary, Some(download_binary)) } .boxed_local() } diff --git a/crates/language_extension/src/extension_lsp_adapter.rs b/crates/language_extension/src/extension_lsp_adapter.rs index 01b726748649e29b4fe69ce26df5564819894985..566e6f3b3647bd7fbe4317590ddbe2fbe79890c2 100644 --- a/crates/language_extension/src/extension_lsp_adapter.rs +++ b/crates/language_extension/src/extension_lsp_adapter.rs @@ -1,17 +1,16 @@ use std::ops::Range; use std::path::PathBuf; -use std::pin::Pin; use std::sync::Arc; use anyhow::{Context as _, Result}; use async_trait::async_trait; use collections::{HashMap, HashSet}; use extension::{Extension, ExtensionLanguageServerProxy, WorktreeDelegate}; -use futures::{Future, FutureExt, future::join_all}; +use futures::{FutureExt, future::join_all, lock::OwnedMutexGuard}; use gpui::{App, AppContext, AsyncApp, Task}; use language::{ - BinaryStatus, CodeLabel, DynLspInstaller, HighlightId, Language, LanguageName, LspAdapter, - LspAdapterDelegate, Toolchain, + BinaryStatus, CodeLabel, DynLspInstaller, HighlightId, Language, LanguageName, + LanguageServerBinaryLocations, LspAdapter, LspAdapterDelegate, Toolchain, }; use lsp::{ CodeActionKind, LanguageServerBinary, LanguageServerBinaryOptions, LanguageServerName, @@ -155,47 +154,51 @@ impl ExtensionLspAdapter { #[async_trait(?Send)] impl DynLspInstaller for ExtensionLspAdapter { - fn get_language_server_command<'a>( + fn get_language_server_command( self: Arc, delegate: Arc, _: Option, _: LanguageServerBinaryOptions, - _: &'a mut Option<(bool, LanguageServerBinary)>, - _: &'a mut AsyncApp, - ) -> Pin>>> { + _: OwnedMutexGuard>, + _: AsyncApp, + ) -> LanguageServerBinaryLocations { async move { - let delegate = Arc::new(WorktreeDelegateAdapter(delegate.clone())) as _; - let command = self - .extension - .language_server_command( - self.language_server_id.clone(), - self.language_name.clone(), - delegate, - ) - .await?; - - let path = self.extension.path_from_extension(command.command.as_ref()); - - // TODO: This should now be done via the `zed::make_file_executable` function in - // Zed extension API, but we're leaving these existing usages in place temporarily - // to avoid any compatibility issues between Zed and the extension versions. - // - // We can remove once the following extension versions no longer see any use: - // - toml@0.0.2 - // - zig@0.0.1 - if ["toml", "zig"].contains(&self.extension.manifest().id.as_ref()) - && path.starts_with(&self.extension.work_dir()) - { - make_file_executable(&path) - .await - .context("failed to set file permissions")?; - } + let ret = maybe!(async move { + let delegate = Arc::new(WorktreeDelegateAdapter(delegate.clone())) as _; + let command = self + .extension + .language_server_command( + self.language_server_id.clone(), + self.language_name.clone(), + delegate, + ) + .await?; + + let path = self.extension.path_from_extension(command.command.as_ref()); + + // TODO: This should now be done via the `zed::make_file_executable` function in + // Zed extension API, but we're leaving these existing usages in place temporarily + // to avoid any compatibility issues between Zed and the extension versions. + // + // We can remove once the following extension versions no longer see any use: + // - toml@0.0.2 + // - zig@0.0.1 + if ["toml", "zig"].contains(&self.extension.manifest().id.as_ref()) + && path.starts_with(&self.extension.work_dir()) + { + make_file_executable(&path) + .await + .context("failed to set file permissions")?; + } - Ok(LanguageServerBinary { - path, - arguments: command.args.into_iter().map(|arg| arg.into()).collect(), - env: Some(command.env.into_iter().collect()), + Ok(LanguageServerBinary { + path, + arguments: command.args.into_iter().map(|arg| arg.into()).collect(), + env: Some(command.env.into_iter().collect()), + }) }) + .await; + (ret, None) } .boxed_local() } diff --git a/crates/languages/src/css.rs b/crates/languages/src/css.rs index 035a2c693dbbdceed38adc8ccc0510274205670f..8531cd447827e77593edb818e24371c031100b64 100644 --- a/crates/languages/src/css.rs +++ b/crates/languages/src/css.rs @@ -1,13 +1,11 @@ -use anyhow::{Context as _, Result}; +use anyhow::Result; use async_trait::async_trait; -use futures::StreamExt; use gpui::AsyncApp; use language::{LspAdapter, LspAdapterDelegate, LspInstaller, Toolchain}; use lsp::{LanguageServerBinary, LanguageServerName}; use node_runtime::{NodeRuntime, VersionStrategy}; use project::lsp_store::language_server_settings; use serde_json::json; -use smol::fs; use std::{ ffi::OsString, path::{Path, PathBuf}, @@ -176,19 +174,10 @@ async fn get_cached_server_binary( node: &NodeRuntime, ) -> Option { maybe!(async { - let mut last_version_dir = None; - let mut entries = fs::read_dir(&container_dir).await?; - while let Some(entry) = entries.next().await { - let entry = entry?; - if entry.file_type().await?.is_dir() { - last_version_dir = Some(entry.path()); - } - } - let last_version_dir = last_version_dir.context("no cached binary")?; - let server_path = last_version_dir.join(SERVER_PATH); + let server_path = container_dir.join(SERVER_PATH); anyhow::ensure!( server_path.exists(), - "missing executable in directory {last_version_dir:?}" + "missing executable in directory {server_path:?}" ); Ok(LanguageServerBinary { path: node.binary_path().await?, diff --git a/crates/languages/src/json.rs b/crates/languages/src/json.rs index 45fa2dd75cce051439980b40996dda9865246a99..d75d994fee76bfa696b9501f06c5cca23eb2484c 100644 --- a/crates/languages/src/json.rs +++ b/crates/languages/src/json.rs @@ -301,20 +301,10 @@ async fn get_cached_server_binary( node: &NodeRuntime, ) -> Option { maybe!(async { - let mut last_version_dir = None; - let mut entries = fs::read_dir(&container_dir).await?; - while let Some(entry) = entries.next().await { - let entry = entry?; - if entry.file_type().await?.is_dir() { - last_version_dir = Some(entry.path()); - } - } - - let last_version_dir = last_version_dir.context("no cached binary")?; - let server_path = last_version_dir.join(SERVER_PATH); + let server_path = container_dir.join(SERVER_PATH); anyhow::ensure!( server_path.exists(), - "missing executable in directory {last_version_dir:?}" + "missing executable in directory {server_path:?}" ); Ok(LanguageServerBinary { path: node.binary_path().await?, diff --git a/crates/languages/src/tailwind.rs b/crates/languages/src/tailwind.rs index e1b50a5ccaabb7770d13abc79fbac1da5fa4cbbe..c7baa9734b56bd139ccc69b197a9a772fd7aeec1 100644 --- a/crates/languages/src/tailwind.rs +++ b/crates/languages/src/tailwind.rs @@ -1,14 +1,12 @@ -use anyhow::{Context as _, Result}; +use anyhow::Result; use async_trait::async_trait; use collections::HashMap; -use futures::StreamExt; use gpui::AsyncApp; use language::{LanguageName, LspAdapter, LspAdapterDelegate, LspInstaller, Toolchain}; use lsp::{LanguageServerBinary, LanguageServerName}; use node_runtime::{NodeRuntime, VersionStrategy}; use project::lsp_store::language_server_settings; use serde_json::{Value, json}; -use smol::fs; use std::{ ffi::OsString, path::{Path, PathBuf}, @@ -198,19 +196,10 @@ async fn get_cached_server_binary( node: &NodeRuntime, ) -> Option { maybe!(async { - let mut last_version_dir = None; - let mut entries = fs::read_dir(&container_dir).await?; - while let Some(entry) = entries.next().await { - let entry = entry?; - if entry.file_type().await?.is_dir() { - last_version_dir = Some(entry.path()); - } - } - let last_version_dir = last_version_dir.context("no cached binary")?; - let server_path = last_version_dir.join(SERVER_PATH); + let server_path = container_dir.join(SERVER_PATH); anyhow::ensure!( server_path.exists(), - "missing executable in directory {last_version_dir:?}" + "missing executable in directory {server_path:?}" ); Ok(LanguageServerBinary { path: node.binary_path().await?, diff --git a/crates/languages/src/yaml.rs b/crates/languages/src/yaml.rs index 45faa142369e6c08817deebfbf8774f228bf70d5..c79f029491b09e32ffbbfa5cfaf00c74ed186978 100644 --- a/crates/languages/src/yaml.rs +++ b/crates/languages/src/yaml.rs @@ -1,6 +1,5 @@ -use anyhow::{Context as _, Result}; +use anyhow::Result; use async_trait::async_trait; -use futures::StreamExt; use gpui::AsyncApp; use language::{ LspAdapter, LspAdapterDelegate, LspInstaller, Toolchain, language_settings::AllLanguageSettings, @@ -10,7 +9,6 @@ use node_runtime::{NodeRuntime, VersionStrategy}; use project::lsp_store::language_server_settings; use serde_json::Value; use settings::{Settings, SettingsLocation}; -use smol::fs; use std::{ ffi::OsString, path::{Path, PathBuf}, @@ -171,19 +169,10 @@ async fn get_cached_server_binary( node: &NodeRuntime, ) -> Option { maybe!(async { - let mut last_version_dir = None; - let mut entries = fs::read_dir(&container_dir).await?; - while let Some(entry) = entries.next().await { - let entry = entry?; - if entry.file_type().await?.is_dir() { - last_version_dir = Some(entry.path()); - } - } - let last_version_dir = last_version_dir.context("no cached binary")?; - let server_path = last_version_dir.join(SERVER_PATH); + let server_path = container_dir.join(SERVER_PATH); anyhow::ensure!( server_path.exists(), - "missing executable in directory {last_version_dir:?}" + "missing executable in directory {server_path:?}" ); Ok(LanguageServerBinary { path: node.binary_path().await?, diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index 17f558d72d4854bb99676472100c442ad164f0a5..e233ff1c5c121e301a85d829093cbdd37020fe07 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -139,6 +139,7 @@ pub use worktree::{ const SERVER_LAUNCHING_BEFORE_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(5); pub const SERVER_PROGRESS_THROTTLE_TIMEOUT: Duration = Duration::from_millis(100); const WORKSPACE_DIAGNOSTICS_TOKEN_START: &str = "id:"; +const SERVER_DOWNLOAD_TIMEOUT: Duration = Duration::from_secs(10); #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)] pub enum ProgressToken { @@ -599,14 +600,36 @@ impl LocalLspStore { }; cx.spawn(async move |cx| { - let binary_result = adapter + let (existing_binary, maybe_download_binary) = adapter .clone() .get_language_server_command(delegate.clone(), toolchain, lsp_binary_options, cx) + .await .await; delegate.update_status(adapter.name.clone(), BinaryStatus::None); - let mut binary = binary_result?; + let mut binary = match (existing_binary, maybe_download_binary) { + (binary, None) => binary?, + (Err(_), Some(downloader)) => downloader.await?, + (Ok(existing_binary), Some(downloader)) => { + let mut download_timeout = cx + .background_executor() + .timer(SERVER_DOWNLOAD_TIMEOUT) + .fuse(); + let mut downloader = downloader.fuse(); + futures::select! { + _ = download_timeout => { + // Return existing binary and kick the existing work to the background. + cx.spawn(async move |_| downloader.await).detach(); + Ok(existing_binary) + }, + downloaded_or_existing_binary = downloader => { + // If download fails, this results in the existing binary. + downloaded_or_existing_binary + } + }? + } + }; let mut shell_env = delegate.shell_env().await; shell_env.extend(binary.env.unwrap_or_default()); From 0a6cb6117bfe8ab42f0661652abd43aef7eabaac Mon Sep 17 00:00:00 2001 From: Andrew Farkas <6060305+HactarCE@users.noreply.github.com> Date: Fri, 21 Nov 2025 04:20:15 -0500 Subject: [PATCH 0278/1030] Fix `connect.host` setting being ignored by debugpy (#43190) Closes #42727 Unfortunately we can only support IPv4 addresses right now because `TcpArguments` only supports an IPv4 address. I'm not sure how difficult it would be to lift this limitation. Release Notes: - Fixed `connect.host` setting being ignored by debugpy Co-authored-by: Cole Miller --- crates/dap_adapters/src/python.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/dap_adapters/src/python.rs b/crates/dap_adapters/src/python.rs index 4d81e5ba851305ae3adc2ee0a6ab6a29f43edd62..2f84193ac9343cac9ff1cf52eb648bad1cd77896 100644 --- a/crates/dap_adapters/src/python.rs +++ b/crates/dap_adapters/src/python.rs @@ -368,6 +368,9 @@ impl PythonDebugAdapter { bail!("Cannot have two different ports in debug configuration") } + if let Some(hostname) = config_host { + tcp_connection.host = Some(hostname.parse().context("hostname must be IPv4")?); + } tcp_connection.port = config_port; DebugpyLaunchMode::AttachWithConnect { host: config_host } } else { From 92b6e8eb6e74a126c30a73617563d6a0d73040c9 Mon Sep 17 00:00:00 2001 From: jtaub <65861679+jtaub@users.noreply.github.com> Date: Fri, 21 Nov 2025 05:04:43 -0500 Subject: [PATCH 0279/1030] Jetbrains keymap updates (#42848) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes https://github.com/zed-industries/zed/issues/14639 ## Release Notes: Various improvements to the Jetbrains keymap. Added various missing keyboard shortcuts that I use on a daily basis in Jetbrains, and changed a few which were present in the keymap but mapped to the wrong behavior. ### Added: - Added various missing keybindings for Jetbrains keymap - `ctrl-n` → `project_symbols::Toggle` - `ctrl-alt-n` → `file_finder::Toggle` (open project files) - `ctrl-~` → `git::Branch` - `ctrl-\` → `assistant::InlineAssist` - `ctrl-space` → `editor::ShowCompletions` - `ctrl-q` → `editor::Hover` - `ctrl-p` → `editor::ShowSignatureHelp` - `ctrl-f5` → `task::Rerun` - `shift-f9` → `debugger::Start` - `shift-f10` → `task::Spawn` - Added macOS equivalents for all of the above, however I only have a Linux machine so I have not tested the mac bindings. The binding are generally the same except `ctrl → cmd` with few exceptions. - `cmd-j` → `editor::Hover` ### Fixed: - Several incorrectly mapped keybindings for the Jetbrains keymap - `ctrl-alt-s` → `editor::OpenSettings` (was `editor::OpenSettingsFile`) - `ctrl-alt-b` → `editor::GoToImplementation` (was `editor::GoToDefinitionSplit`) - `alt-left` → `pane::ActivatePreviousItem` - `alt-right` → `pane::ActivateNextItem` - `ctrl-k` now opens the Git panel. I believe this was commented out because of a bug where focus is not given to the commit message text box, but imo the current behavior of not doing anything at all feels more confusing/frustrating to a Jetbrains user (projecting a little here, happy to revert). --- assets/keymaps/linux/jetbrains.json | 24 +++++++++++++++++------- assets/keymaps/macos/jetbrains.json | 20 ++++++++++++++++---- 2 files changed, 33 insertions(+), 11 deletions(-) diff --git a/assets/keymaps/linux/jetbrains.json b/assets/keymaps/linux/jetbrains.json index cf28c43dbd7f8335f30ef7702e584bea5c0ba5e0..a0314c5bc1dd59c17f3f132db804891ef1df0d4e 100644 --- a/assets/keymaps/linux/jetbrains.json +++ b/assets/keymaps/linux/jetbrains.json @@ -1,16 +1,18 @@ [ { "bindings": { - "ctrl-alt-s": "zed::OpenSettingsFile", + "ctrl-alt-s": "zed::OpenSettings", "ctrl-{": "pane::ActivatePreviousItem", "ctrl-}": "pane::ActivateNextItem", "shift-escape": null, // Unmap workspace::zoom + "ctrl-~": "git::Branch", "ctrl-f2": "debugger::Stop", "f6": "debugger::Pause", "f7": "debugger::StepInto", "f8": "debugger::StepOver", "shift-f8": "debugger::StepOut", "f9": "debugger::Continue", + "shift-f9": "debugger::Start", "alt-shift-f9": "debugger::Start" } }, @@ -46,7 +48,7 @@ "alt-f7": "editor::FindAllReferences", "ctrl-alt-f7": "editor::FindAllReferences", "ctrl-b": "editor::GoToDefinition", // Conflicts with workspace::ToggleLeftDock - "ctrl-alt-b": "editor::GoToDefinitionSplit", // Conflicts with workspace::ToggleRightDock + "ctrl-alt-b": "editor::GoToImplementation", // Conflicts with workspace::ToggleRightDock "ctrl-shift-b": "editor::GoToTypeDefinition", "ctrl-alt-shift-b": "editor::GoToTypeDefinitionSplit", "f2": "editor::GoToDiagnostic", @@ -70,7 +72,11 @@ "ctrl-r": ["buffer_search::Deploy", { "replace_enabled": true }], "ctrl-shift-n": "file_finder::Toggle", "ctrl-g": "go_to_line::Toggle", - "alt-enter": "editor::ToggleCodeActions" + "alt-enter": "editor::ToggleCodeActions", + "ctrl-space": "editor::ShowCompletions", + "ctrl-q": "editor::Hover", + "ctrl-p": "editor::ShowSignatureHelp", + "ctrl-\\": "assistant::InlineAssist" } }, { @@ -94,9 +100,13 @@ "ctrl-shift-f12": "workspace::ToggleAllDocks", "ctrl-shift-r": ["pane::DeploySearch", { "replace_enabled": true }], "alt-shift-f10": "task::Spawn", + "shift-f10": "task::Spawn", + "ctrl-f5": "task::Rerun", "ctrl-e": "file_finder::Toggle", - // "ctrl-k": "git_panel::ToggleFocus", // bug: This should also focus commit editor + "ctrl-k": "git_panel::ToggleFocus", // bug: This should also focus commit editor "ctrl-shift-n": "file_finder::Toggle", + "ctrl-n": "project_symbols::Toggle", + "ctrl-alt-n": "file_finder::Toggle", "ctrl-shift-a": "command_palette::Toggle", "shift shift": "command_palette::Toggle", "ctrl-alt-shift-n": "project_symbols::Toggle", @@ -133,7 +143,9 @@ "context": "Pane", "bindings": { "ctrl-alt-left": "pane::GoBack", - "ctrl-alt-right": "pane::GoForward" + "ctrl-alt-right": "pane::GoForward", + "alt-left": "pane::ActivatePreviousItem", + "alt-right": "pane::ActivateNextItem" } }, { @@ -152,8 +164,6 @@ "bindings": { "ctrl-shift-t": "workspace::NewTerminal", "alt-f12": "workspace::CloseActiveDock", - "alt-left": "pane::ActivatePreviousItem", - "alt-right": "pane::ActivateNextItem", "ctrl-up": "terminal::ScrollLineUp", "ctrl-down": "terminal::ScrollLineDown", "shift-pageup": "terminal::ScrollPageUp", diff --git a/assets/keymaps/macos/jetbrains.json b/assets/keymaps/macos/jetbrains.json index e5e5aeb0b8516285136438d40b57fb17fc9a9777..364f489167f5abb9af21d9f005586bde08439850 100644 --- a/assets/keymaps/macos/jetbrains.json +++ b/assets/keymaps/macos/jetbrains.json @@ -5,12 +5,14 @@ "cmd-}": "pane::ActivateNextItem", "cmd-0": "git_panel::ToggleFocus", // overrides `cmd-0` zoom reset "shift-escape": null, // Unmap workspace::zoom + "cmd-~": "git::Branch", "ctrl-f2": "debugger::Stop", "f6": "debugger::Pause", "f7": "debugger::StepInto", "f8": "debugger::StepOver", "shift-f8": "debugger::StepOut", "f9": "debugger::Continue", + "shift-f9": "debugger::Start", "alt-shift-f9": "debugger::Start" } }, @@ -45,7 +47,7 @@ "alt-f7": "editor::FindAllReferences", "cmd-alt-f7": "editor::FindAllReferences", "cmd-b": "editor::GoToDefinition", // Conflicts with workspace::ToggleLeftDock - "cmd-alt-b": "editor::GoToDefinitionSplit", + "cmd-alt-b": "editor::GoToImplementation", "cmd-shift-b": "editor::GoToTypeDefinition", "cmd-alt-shift-b": "editor::GoToTypeDefinitionSplit", "f2": "editor::GoToDiagnostic", @@ -68,7 +70,11 @@ "cmd-r": ["buffer_search::Deploy", { "replace_enabled": true }], "cmd-shift-o": "file_finder::Toggle", "cmd-l": "go_to_line::Toggle", - "alt-enter": "editor::ToggleCodeActions" + "alt-enter": "editor::ToggleCodeActions", + "ctrl-space": "editor::ShowCompletions", + "cmd-j": "editor::Hover", + "cmd-p": "editor::ShowSignatureHelp", + "cmd-\\": "assistant::InlineAssist" } }, { @@ -96,9 +102,13 @@ "cmd-shift-f12": "workspace::ToggleAllDocks", "cmd-shift-r": ["pane::DeploySearch", { "replace_enabled": true }], "ctrl-alt-r": "task::Spawn", + "shift-f10": "task::Spawn", + "cmd-f5": "task::Rerun", "cmd-e": "file_finder::Toggle", - // "cmd-k": "git_panel::ToggleFocus", // bug: This should also focus commit editor + "cmd-k": "git_panel::ToggleFocus", // bug: This should also focus commit editor "cmd-shift-o": "file_finder::Toggle", + "cmd-shift-n": "file_finder::Toggle", + "cmd-n": "project_symbols::Toggle", "cmd-shift-a": "command_palette::Toggle", "shift shift": "command_palette::Toggle", "cmd-alt-o": "project_symbols::Toggle", // JetBrains: Go to Symbol @@ -135,7 +145,9 @@ "context": "Pane", "bindings": { "cmd-alt-left": "pane::GoBack", - "cmd-alt-right": "pane::GoForward" + "cmd-alt-right": "pane::GoForward", + "alt-left": "pane::ActivatePreviousItem", + "alt-right": "pane::ActivateNextItem" } }, { From a30887f03b89dac712827f9982c399c3f0b8019d Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Fri, 21 Nov 2025 11:08:21 +0100 Subject: [PATCH 0280/1030] Fix some panics (#43233) Fixes ZED-2NP Fixes ZED-3DP Fixes ZED-3EV Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/editor/src/editor.rs | 4 +--- crates/language_tools/src/lsp_log_view.rs | 15 ++++++++------- crates/util/src/paths.rs | 15 +++++++++++---- 3 files changed, 20 insertions(+), 14 deletions(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index c3da066cfd73cab5b2de610bbf1bc653f7e6d874..30c03c1a8481003aad991c3acf4c6be38bf4b8d5 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -17112,9 +17112,6 @@ impl Editor { let multi_buffer = editor.read_with(cx, |editor, _| editor.buffer().clone())?; - let multi_buffer_snapshot = - multi_buffer.read_with(cx, |multi_buffer, cx| multi_buffer.snapshot(cx))?; - let (locations, current_location_index) = multi_buffer.update(cx, |multi_buffer, cx| { let mut locations = locations @@ -17134,6 +17131,7 @@ impl Editor { }) .collect::>(); + let multi_buffer_snapshot = multi_buffer.snapshot(cx); // There is an O(n) implementation, but given this list will be // small (usually <100 items), the extra O(log(n)) factor isn't // worth the (surprisingly large amount of) extra complexity. diff --git a/crates/language_tools/src/lsp_log_view.rs b/crates/language_tools/src/lsp_log_view.rs index 5f96f8e03048a14f82626ab774a21aab02dc89bf..3f99a3e83413691c3893b184406f6e2569062623 100644 --- a/crates/language_tools/src/lsp_log_view.rs +++ b/crates/language_tools/src/lsp_log_view.rs @@ -5,7 +5,6 @@ use gpui::{ App, Context, Corner, Entity, EventEmitter, FocusHandle, Focusable, IntoElement, ParentElement, Render, Styled, Subscription, Task, WeakEntity, Window, actions, div, }; -use itertools::Itertools; use language::{LanguageServerId, language_settings::SoftWrap}; use lsp::{ LanguageServer, LanguageServerBinary, LanguageServerName, LanguageServerSelector, MessageType, @@ -241,13 +240,15 @@ impl LspLogView { ], cx, ); - if text.len() > 1024 - && let Some((fold_offset, _)) = - text.char_indices().dropping(1024).next() - && fold_offset < text.len() - { + if text.len() > 1024 { + let b = editor.buffer().read(cx).as_singleton().unwrap().read(cx); + let fold_offset = + b.as_rope().ceil_char_boundary(last_offset.0 + 1024); editor.fold_ranges( - vec![last_offset + fold_offset..last_offset + text.len()], + vec![ + MultiBufferOffset(fold_offset) + ..MultiBufferOffset(b.as_rope().len()), + ], false, window, cx, diff --git a/crates/util/src/paths.rs b/crates/util/src/paths.rs index 5813c444af555dc90c65ce6f1584067b446cc79b..74929c6c831bcdb035756483ddbf9b2bc9ad444c 100644 --- a/crates/util/src/paths.rs +++ b/crates/util/src/paths.rs @@ -625,7 +625,14 @@ impl PathWithPosition { pub fn parse_str(s: &str) -> Self { let trimmed = s.trim(); let path = Path::new(trimmed); - let maybe_file_name_with_row_col = path.file_name().unwrap_or_default().to_string_lossy(); + let Some(maybe_file_name_with_row_col) = path.file_name().unwrap_or_default().to_str() + else { + return Self { + path: Path::new(s).to_path_buf(), + row: None, + column: None, + }; + }; if maybe_file_name_with_row_col.is_empty() { return Self { path: Path::new(s).to_path_buf(), @@ -640,15 +647,15 @@ impl PathWithPosition { static SUFFIX_RE: LazyLock = LazyLock::new(|| Regex::new(ROW_COL_CAPTURE_REGEX).unwrap()); match SUFFIX_RE - .captures(&maybe_file_name_with_row_col) + .captures(maybe_file_name_with_row_col) .map(|caps| caps.extract()) { Some((_, [file_name, maybe_row, maybe_column])) => { let row = maybe_row.parse::().ok(); let column = maybe_column.parse::().ok(); - let suffix_length = maybe_file_name_with_row_col.len() - file_name.len(); - let path_without_suffix = &trimmed[..trimmed.len() - suffix_length]; + let (_, suffix) = trimmed.split_once(file_name).unwrap(); + let path_without_suffix = &trimmed[..trimmed.len() - suffix.len()]; Self { path: Path::new(path_without_suffix).to_path_buf(), From 1ce58a88ccdfb7e45106d66fa44c9db35b4f94b9 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Fri, 21 Nov 2025 11:33:38 +0100 Subject: [PATCH 0281/1030] zed: Allocate more rayon threads depending on available parallelism (#43235) While zed itself is not a heavy user of rayon, wasmtime is, especially for compilation. This change is similar to the rayon default but we halve the number of threads still so we don't spawn too many threads overall. Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/remote_server/src/unix.rs | 2 +- crates/zed/src/main.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/remote_server/src/unix.rs b/crates/remote_server/src/unix.rs index 29e5ef735f5a001c23e3215c1a3fc5d291830282..c631d47b8c2cea5d2ed74cd6ce8bd2956c3fbb1a 100644 --- a/crates/remote_server/src/unix.rs +++ b/crates/remote_server/src/unix.rs @@ -373,7 +373,7 @@ pub fn execute_run( let listeners = ServerListeners::new(stdin_socket, stdout_socket, stderr_socket)?; rayon::ThreadPoolBuilder::new() - .num_threads(4) + .num_threads(std::thread::available_parallelism().map_or(1, |n| n.get().div_ceil(2))) .stack_size(10 * 1024 * 1024) .thread_name(|ix| format!("RayonWorker{}", ix)) .build_global() diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 9dba1b427d35db3e236e166b5a90984deca1747b..89944f835b0c7905145b3c6c0df13a20c78f37b8 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -265,7 +265,7 @@ pub fn main() { } rayon::ThreadPoolBuilder::new() - .num_threads(4) + .num_threads(std::thread::available_parallelism().map_or(1, |n| n.get().div_ceil(2))) .stack_size(10 * 1024 * 1024) .thread_name(|ix| format!("RayonWorker{}", ix)) .build_global() From ea85f905f10f9b0efa68ac08d03c46fd2677b13c Mon Sep 17 00:00:00 2001 From: Kunall Banerjee <14703164+yeskunall@users.noreply.github.com> Date: Fri, 21 Nov 2025 06:56:47 -0500 Subject: [PATCH 0282/1030] docs: Fix small typo in docs for Snippets (#43238) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Happened to notice this typo while going through the docs. Release Notes: - N/A --- 💖 --- docs/src/snippets.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/snippets.md b/docs/src/snippets.md index 29ecd9bc850b919dbc63a87e2f1bf9477901a33d..e84210d0fadef1598776b1ec51a3f19cdb2ac0c0 100644 --- a/docs/src/snippets.md +++ b/docs/src/snippets.md @@ -35,7 +35,7 @@ To create JSX snippets you have to use `javascript.json` snippets file, instead ## Known Limitations -- Only the first prefix is used when an list of prefixes is passed in. +- Only the first prefix is used when a list of prefixes is passed in. - Currently only the `json` snippet file format is supported, even though the `simple-completion-language-server` supports both `json` and `toml` file formats. ## See also From d6a5566619cddd2ca6669481ae617e70db553b74 Mon Sep 17 00:00:00 2001 From: Kunall Banerjee <14703164+yeskunall@users.noreply.github.com> Date: Fri, 21 Nov 2025 07:23:43 -0500 Subject: [PATCH 0283/1030] docs: Point to the right URL for Gemini CLI (#43239) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Point to the right URL for Gemini CLI. Release Notes: - N/A --- 💖 --- docs/src/ai/external-agents.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/ai/external-agents.md b/docs/src/ai/external-agents.md index 696a60709cc3b6120af0c63fc01a79bd58134402..0467913b072ef296a5b187fbeb8dc6a406bf1bed 100644 --- a/docs/src/ai/external-agents.md +++ b/docs/src/ai/external-agents.md @@ -33,7 +33,7 @@ If you'd like to bind this to a keyboard shortcut, you can do so by editing your #### Installation -The first time you create a Gemini CLI thread, Zed will install [@google/gemini-cli](https://github.com/zed-industries/claude-code-acp). This installation is only available to Zed and is kept up to date as you use the agent. +The first time you create a Gemini CLI thread, Zed will install [@google/gemini-cli](https://github.com/google-gemini/gemini-cli). This installation is only available to Zed and is kept up to date as you use the agent. By default, Zed will use this managed version of Gemini CLI even if you have it installed globally. However, you can configure it to use a version in your `PATH` by adding this to your settings: From 0ee7271e48c26a307c1c8e0a79fc48f9f0ca01c3 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Fri, 21 Nov 2025 11:22:51 -0300 Subject: [PATCH 0284/1030] Allow onboarding pages to be zoomed in/out (#43244) We were just missing adding keybindings for these. Release Notes: - onboarding: The onboarding pages can now be zoomed in/out with the same keybindings you'd use to zoom in/out a regular buffer. --- assets/keymaps/default-linux.json | 14 ++++++++++++++ assets/keymaps/default-macos.json | 14 ++++++++++++++ assets/keymaps/default-windows.json | 14 ++++++++++++++ 3 files changed, 42 insertions(+) diff --git a/assets/keymaps/default-linux.json b/assets/keymaps/default-linux.json index c69ce1d40a54fa68ab7a2473827653575e285a4d..4a6421c4e1f335a4d35fe9ac11d157b30a914004 100644 --- a/assets/keymaps/default-linux.json +++ b/assets/keymaps/default-linux.json @@ -1251,11 +1251,25 @@ "context": "Onboarding", "use_key_equivalents": true, "bindings": { + "ctrl-=": ["zed::IncreaseUiFontSize", { "persist": false }], + "ctrl-+": ["zed::IncreaseUiFontSize", { "persist": false }], + "ctrl--": ["zed::DecreaseUiFontSize", { "persist": false }], + "ctrl-0": ["zed::ResetUiFontSize", { "persist": false }], "ctrl-enter": "onboarding::Finish", "alt-shift-l": "onboarding::SignIn", "alt-shift-a": "onboarding::OpenAccount" } }, + { + "context": "Welcome", + "use_key_equivalents": true, + "bindings": { + "ctrl-=": ["zed::IncreaseUiFontSize", { "persist": false }], + "ctrl-+": ["zed::IncreaseUiFontSize", { "persist": false }], + "ctrl--": ["zed::DecreaseUiFontSize", { "persist": false }], + "ctrl-0": ["zed::ResetUiFontSize", { "persist": false }] + } + }, { "context": "InvalidBuffer", "use_key_equivalents": true, diff --git a/assets/keymaps/default-macos.json b/assets/keymaps/default-macos.json index aa47e2081203e753cf8d14cafa1b01cadaf97327..8790acf906effd3e0cd7026a909759978ae39dd5 100644 --- a/assets/keymaps/default-macos.json +++ b/assets/keymaps/default-macos.json @@ -1356,11 +1356,25 @@ "context": "Onboarding", "use_key_equivalents": true, "bindings": { + "cmd-=": ["zed::IncreaseUiFontSize", { "persist": false }], + "cmd-+": ["zed::IncreaseUiFontSize", { "persist": false }], + "cmd--": ["zed::DecreaseUiFontSize", { "persist": false }], + "cmd-0": ["zed::ResetUiFontSize", { "persist": false }], "cmd-enter": "onboarding::Finish", "alt-tab": "onboarding::SignIn", "alt-shift-a": "onboarding::OpenAccount" } }, + { + "context": "Welcome", + "use_key_equivalents": true, + "bindings": { + "cmd-=": ["zed::IncreaseUiFontSize", { "persist": false }], + "cmd-+": ["zed::IncreaseUiFontSize", { "persist": false }], + "cmd--": ["zed::DecreaseUiFontSize", { "persist": false }], + "cmd-0": ["zed::ResetUiFontSize", { "persist": false }] + } + }, { "context": "InvalidBuffer", "use_key_equivalents": true, diff --git a/assets/keymaps/default-windows.json b/assets/keymaps/default-windows.json index b37d750a4e9c96d73db5963bf9bc4c5338781f5f..1144eac80b2cd881e8d99cc5beb69b42263338c3 100644 --- a/assets/keymaps/default-windows.json +++ b/assets/keymaps/default-windows.json @@ -1285,11 +1285,25 @@ "context": "Onboarding", "use_key_equivalents": true, "bindings": { + "ctrl-=": ["zed::IncreaseUiFontSize", { "persist": false }], + "ctrl-+": ["zed::IncreaseUiFontSize", { "persist": false }], + "ctrl--": ["zed::DecreaseUiFontSize", { "persist": false }], + "ctrl-0": ["zed::ResetUiFontSize", { "persist": false }], "ctrl-enter": "onboarding::Finish", "alt-shift-l": "onboarding::SignIn", "shift-alt-a": "onboarding::OpenAccount" } }, + { + "context": "Welcome", + "use_key_equivalents": true, + "bindings": { + "ctrl-=": ["zed::IncreaseUiFontSize", { "persist": false }], + "ctrl-+": ["zed::IncreaseUiFontSize", { "persist": false }], + "ctrl--": ["zed::DecreaseUiFontSize", { "persist": false }], + "ctrl-0": ["zed::ResetUiFontSize", { "persist": false }] + } + }, { "context": "GitWorktreeSelector || (GitWorktreeSelector > Picker > Editor)", "use_key_equivalents": true, From 28e1c15e9013ad3b81adc66cd2ad62bb24dfddb6 Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Fri, 21 Nov 2025 20:03:11 +0530 Subject: [PATCH 0285/1030] agent_ui: Fix sent agent prompt getting lost after authentication (#43245) Closes #42379 Release Notes: - Fixed issue where a sent agent message is not restored after successful authentication. --- crates/agent_ui/src/acp/thread_view.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index f3565356cf5b0501d529ab89e35b955689ef040a..780cff2e78fd4441fc451ddb3bc93b66a940c6c8 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -297,6 +297,7 @@ pub struct AcpThreadView { _cancel_task: Option>, _subscriptions: [Subscription; 5], show_codex_windows_warning: bool, + in_flight_prompt: Option>, } enum ThreadState { @@ -437,6 +438,7 @@ impl AcpThreadView { new_server_version_available: None, resume_thread_metadata: resume_thread, show_codex_windows_warning, + in_flight_prompt: None, } } @@ -1155,6 +1157,7 @@ impl AcpThreadView { } this.update_in(cx, |this, window, cx| { + this.in_flight_prompt = Some(contents.clone()); this.set_editor_is_expanded(false, cx); this.scroll_to_bottom(cx); this.message_editor.update(cx, |message_editor, cx| { @@ -1182,7 +1185,12 @@ impl AcpThreadView { })?; let res = send.await; let turn_time_ms = turn_start_time.elapsed().as_millis(); - let status = if res.is_ok() { "success" } else { "failure" }; + let status = if res.is_ok() { + this.update(cx, |this, _| this.in_flight_prompt.take()).ok(); + "success" + } else { + "failure" + }; telemetry::event!( "Agent Turn Completed", agent = agent_telemetry_id, @@ -5694,6 +5702,11 @@ impl AcpThreadView { provider_id: None, }; this.clear_thread_error(cx); + if let Some(message) = this.in_flight_prompt.take() { + this.message_editor.update(cx, |editor, cx| { + editor.set_message(message, window, cx); + }); + } let this = cx.weak_entity(); window.defer(cx, |window, cx| { Self::handle_auth_required(this, err, agent, connection, window, cx); From a8d7f06b477656c6bf07270ce196b7cb1cc3f194 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Fri, 21 Nov 2025 15:48:41 +0100 Subject: [PATCH 0286/1030] Revert "util: Check whether discovered powershell is actually executable" (#43247) Reverts zed-industries/zed#43044 Closes https://github.com/zed-industries/zed/issues/43224 This slows down startup on windows significantly Release Notes: - Fixed slow startup on Windows --- crates/askpass/src/askpass.rs | 1 - crates/gpui/src/platform/windows/platform.rs | 10 +-- crates/util/src/shell.rs | 80 ++++++++------------ 3 files changed, 36 insertions(+), 55 deletions(-) diff --git a/crates/askpass/src/askpass.rs b/crates/askpass/src/askpass.rs index f7d81641f47f8be62adb9606ffd3e47e1d89ca73..0974409477d452958df13893e316845a919723c5 100644 --- a/crates/askpass/src/askpass.rs +++ b/crates/askpass/src/askpass.rs @@ -250,7 +250,6 @@ impl PasswordProxy { .await .with_context(|| format!("creating askpass script at {askpass_script_path:?}"))?; make_file_executable(&askpass_script_path).await?; - // todo(shell): There might be no powershell on the system #[cfg(target_os = "windows")] let askpass_helper = format!( "powershell.exe -ExecutionPolicy Bypass -File {}", diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index 110bc02633515b417edc6707347fbae77e2888e4..b7f13f1fab495b1040d1be8e7b86376c450b5f7e 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -390,12 +390,10 @@ impl Platform for WindowsPlatform { clippy::disallowed_methods, reason = "We are restarting ourselves, using std command thus is fine" )] - // todo(shell): There might be no powershell on the system - let restart_process = - util::command::new_std_command(util::shell::get_windows_system_shell()) - .arg("-command") - .arg(script) - .spawn(); + let restart_process = util::command::new_std_command("powershell.exe") + .arg("-command") + .arg(script) + .spawn(); match restart_process { Ok(_) => self.quit(), diff --git a/crates/util/src/shell.rs b/crates/util/src/shell.rs index 1eeb483defbe6f21d3018b3ce0cbdc8e4109a367..ba54f7b7784b45613b28067afe2748339e6b6c64 100644 --- a/crates/util/src/shell.rs +++ b/crates/util/src/shell.rs @@ -2,8 +2,6 @@ use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use std::{borrow::Cow, fmt, path::Path, sync::LazyLock}; -use crate::command::new_std_command; - /// Shell configuration to open the terminal with. #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq, JsonSchema, Hash)] #[serde(rename_all = "snake_case")] @@ -110,12 +108,16 @@ pub fn get_windows_system_shell() -> String { use std::path::PathBuf; fn find_pwsh_in_programfiles(find_alternate: bool, find_preview: bool) -> Option { + #[cfg(target_pointer_width = "64")] let env_var = if find_alternate { - if cfg!(target_pointer_width = "64") { - "ProgramFiles(x86)" - } else { - "ProgramW6432" - } + "ProgramFiles(x86)" + } else { + "ProgramFiles" + }; + + #[cfg(target_pointer_width = "32")] + let env_var = if find_alternate { + "ProgramW6432" } else { "ProgramFiles" }; @@ -163,19 +165,23 @@ pub fn get_windows_system_shell() -> String { } else { "Microsoft.PowerShell_" }; - msix_app_dir.read_dir().ok()?.find_map(|entry| { - let entry = entry.ok()?; - if !matches!(entry.file_type(), Ok(ft) if ft.is_dir()) { - return None; - } + msix_app_dir + .read_dir() + .ok()? + .filter_map(|entry| { + let entry = entry.ok()?; + if !matches!(entry.file_type(), Ok(ft) if ft.is_dir()) { + return None; + } - if !entry.file_name().to_string_lossy().starts_with(prefix) { - return None; - } + if !entry.file_name().to_string_lossy().starts_with(prefix) { + return None; + } - let exe_path = entry.path().join("pwsh.exe"); - exe_path.exists().then_some(exe_path) - }) + let exe_path = entry.path().join("pwsh.exe"); + exe_path.exists().then_some(exe_path) + }) + .next() } fn find_pwsh_in_scoop() -> Option { @@ -184,37 +190,15 @@ pub fn get_windows_system_shell() -> String { pwsh_exe.exists().then_some(pwsh_exe) } - // check whether the found powershell is executable for us static SYSTEM_SHELL: LazyLock = LazyLock::new(|| { - let can_execute_pwsh = |p: &PathBuf| { - #[allow(clippy::disallowed_methods)] - let status = new_std_command(p).arg("-NoProfile").arg("-Help").status(); - let success = status.as_ref().is_ok_and(|status| status.success()); - if !success { - log::warn!( - "Powershell found at `{}` is not executable: {status:?}", - p.display() - ); - } - success - }; - - let locations = [ - || find_pwsh_in_programfiles(false, false), - || find_pwsh_in_programfiles(true, false), - || find_pwsh_in_msix(false), - || find_pwsh_in_programfiles(false, true), - || find_pwsh_in_msix(true), - || find_pwsh_in_programfiles(true, true), - || find_pwsh_in_scoop(), - || which::which_global("pwsh.exe").ok(), - || which::which_global("powershell.exe").ok(), - ]; - locations - .into_iter() - .filter_map(|f| f()) - .find(|p| can_execute_pwsh(&p)) - .map(|p| p.to_string_lossy().trim().to_owned()) + find_pwsh_in_programfiles(false, false) + .or_else(|| find_pwsh_in_programfiles(true, false)) + .or_else(|| find_pwsh_in_msix(false)) + .or_else(|| find_pwsh_in_programfiles(false, true)) + .or_else(|| find_pwsh_in_msix(true)) + .or_else(|| find_pwsh_in_programfiles(true, true)) + .or_else(find_pwsh_in_scoop) + .map(|p| p.to_string_lossy().into_owned()) .inspect(|shell| log::info!("Found powershell in: {}", shell)) .unwrap_or_else(|| { log::warn!("Powershell not found, falling back to `cmd`"); From 2ac13b94892f16b2ba04dcf0f0e4ac232e07aade Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Fri, 21 Nov 2025 08:28:17 -0700 Subject: [PATCH 0287/1030] Fallible Settings (#42938) Also tidies up error notifications so that in the case of syntax errors we don't see noise about the migration failing as well. Release Notes: - Invalid values in settings files will no longer prevent the rest of the file from being parsed. --- Cargo.lock | 290 ++++++------------ Cargo.toml | 1 - crates/settings/Cargo.toml | 1 - crates/settings/src/fallible_options.rs | 112 +++++++ crates/settings/src/settings.rs | 3 +- crates/settings/src/settings_content.rs | 59 ++-- crates/settings/src/settings_content/agent.rs | 19 +- .../settings/src/settings_content/editor.rs | 23 +- .../src/settings_content/extension.rs | 6 +- .../settings/src/settings_content/language.rs | 38 +-- .../src/settings_content/language_model.rs | 63 ++-- .../settings/src/settings_content/project.rs | 45 ++- .../settings/src/settings_content/terminal.rs | 11 +- crates/settings/src/settings_content/theme.rs | 56 ++-- .../src/settings_content/workspace.rs | 25 +- crates/settings/src/settings_store.rs | 37 +-- crates/settings_macros/src/settings_macros.rs | 52 +++- crates/zed/src/main.rs | 11 +- crates/zed/src/zed.rs | 151 +++++---- 19 files changed, 509 insertions(+), 494 deletions(-) create mode 100644 crates/settings/src/fallible_options.rs diff --git a/Cargo.lock b/Cargo.lock index 9cace971e3d9248361c7e97def9481206fa3cc1b..e2613dc27767875e5d3d96aa058923077f20de0c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -183,7 +183,7 @@ dependencies = [ "regex", "reqwest_client", "rust-embed", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -238,7 +238,7 @@ checksum = "ecf16c18fea41282d6bbadd1549a06be6836bddb1893f44a6235f340fa24e2af" dependencies = [ "anyhow", "derive_more 2.0.1", - "schemars 1.0.4", + "schemars", "serde", "serde_json", ] @@ -298,7 +298,7 @@ dependencies = [ "language_model", "paths", "project", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "serde_json_lenient", @@ -372,7 +372,7 @@ dependencies = [ "release_channel", "rope", "rules_library", - "schemars 1.0.4", + "schemars", "search", "serde", "serde_json", @@ -627,7 +627,7 @@ dependencies = [ "chrono", "futures 0.3.31", "http_client", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -2033,7 +2033,7 @@ dependencies = [ "aws-sdk-bedrockruntime", "aws-smithy-types", "futures 0.3.31", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "strum 0.27.2", @@ -2578,7 +2578,7 @@ version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9225bdcf4e4a9a4c08bf16607908eb2fbf746828d5e0b5e019726dbf6571f201" dependencies = [ - "darling 0.20.11", + "darling", "proc-macro2", "quote", "syn 2.0.106", @@ -2837,7 +2837,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eadd868a2ce9ca38de7eeafdcec9c7065ef89b42b32f0839278d55f35c54d1ff" dependencies = [ "heck 0.4.1", - "indexmap 2.11.4", + "indexmap", "log", "proc-macro2", "quote", @@ -3207,7 +3207,7 @@ dependencies = [ "indoc", "ordered-float 2.10.1", "rustc-hash 2.1.1", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "strum 0.27.2", @@ -3484,7 +3484,7 @@ dependencies = [ name = "collections" version = "0.1.0" dependencies = [ - "indexmap 2.11.4", + "indexmap", "rustc-hash 2.1.1", ] @@ -3714,7 +3714,7 @@ dependencies = [ "net", "parking_lot", "postage", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -4464,7 +4464,7 @@ checksum = "d74b6bcf49ebbd91f1b1875b706ea46545032a14003b5557b7dfa4bbeba6766e" dependencies = [ "cc", "codespan-reporting 0.13.0", - "indexmap 2.11.4", + "indexmap", "proc-macro2", "quote", "scratch", @@ -4479,7 +4479,7 @@ checksum = "94ca2ad69673c4b35585edfa379617ac364bccd0ba0adf319811ba3a74ffa48a" dependencies = [ "clap", "codespan-reporting 0.13.0", - "indexmap 2.11.4", + "indexmap", "proc-macro2", "quote", "syn 2.0.106", @@ -4497,7 +4497,7 @@ version = "1.0.187" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a8ebf0b6138325af3ec73324cb3a48b64d57721f17291b151206782e61f66cd" dependencies = [ - "indexmap 2.11.4", + "indexmap", "proc-macro2", "quote", "syn 2.0.106", @@ -4526,7 +4526,7 @@ dependencies = [ "parking_lot", "paths", "proto", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -4545,7 +4545,7 @@ name = "dap-types" version = "0.0.1" source = "git+https://github.com/zed-industries/dap-types?rev=1b461b310481d01e02b2603c16d7144b926339f8#1b461b310481d01e02b2603c16d7144b926339f8" dependencies = [ - "schemars 1.0.4", + "schemars", "serde", "serde_json", ] @@ -4582,18 +4582,8 @@ version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ - "darling_core 0.20.11", - "darling_macro 0.20.11", -] - -[[package]] -name = "darling" -version = "0.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" -dependencies = [ - "darling_core 0.21.3", - "darling_macro 0.21.3", + "darling_core", + "darling_macro", ] [[package]] @@ -4610,38 +4600,13 @@ dependencies = [ "syn 2.0.106", ] -[[package]] -name = "darling_core" -version = "0.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim", - "syn 2.0.106", -] - [[package]] name = "darling_macro" version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ - "darling_core 0.20.11", - "quote", - "syn 2.0.106", -] - -[[package]] -name = "darling_macro" -version = "0.21.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" -dependencies = [ - "darling_core 0.21.3", + "darling_core", "quote", "syn 2.0.106", ] @@ -4791,7 +4756,7 @@ dependencies = [ "pretty_assertions", "project", "rpc", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "serde_json_lenient", @@ -4830,7 +4795,7 @@ dependencies = [ "anyhow", "futures 0.3.31", "http_client", - "schemars 1.0.4", + "schemars", "serde", "serde_json", ] @@ -4945,7 +4910,7 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae5c625eda104c228c06ecaf988d1c60e542176bd7a490e60eeda3493244c0c9" dependencies = [ - "darling 0.20.11", + "darling", "proc-macro2", "quote", "syn 2.0.106", @@ -5422,7 +5387,7 @@ dependencies = [ "release_channel", "rope", "rpc", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -6154,7 +6119,7 @@ dependencies = [ "picker", "pretty_assertions", "project", - "schemars 1.0.4", + "schemars", "search", "serde", "serde_json", @@ -7001,7 +6966,7 @@ dependencies = [ "derive_more 2.0.1", "derive_setters", "gh-workflow-macros", - "indexmap 2.11.4", + "indexmap", "merge", "serde", "serde_json", @@ -7036,7 +7001,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" dependencies = [ "fallible-iterator", - "indexmap 2.11.4", + "indexmap", "stable_deref_trait", ] @@ -7066,7 +7031,7 @@ dependencies = [ "rand 0.9.2", "regex", "rope", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "smol", @@ -7152,7 +7117,7 @@ dependencies = [ "project", "recent_projects", "remote", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -7256,7 +7221,7 @@ dependencies = [ "anyhow", "futures 0.3.31", "http_client", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -7360,7 +7325,7 @@ dependencies = [ "refineable", "reqwest_client", "resvg", - "schemars 1.0.4", + "schemars", "seahash", "semantic_version", "serde", @@ -7448,7 +7413,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.11.4", + "indexmap", "slab", "tokio", "tokio-util", @@ -7467,7 +7432,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.3.1", - "indexmap 2.11.4", + "indexmap", "slab", "tokio", "tokio-util", @@ -8230,17 +8195,6 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7c5cedc30da3a610cac6b4ba17597bdf7152cf974e8aab3afb3d54455e371c8" -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", - "serde", -] - [[package]] name = "indexmap" version = "2.11.4" @@ -8629,7 +8583,7 @@ dependencies = [ "language", "paths", "project", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -8830,7 +8784,7 @@ dependencies = [ "rand 0.9.2", "regex", "rpc", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -8903,7 +8857,7 @@ dependencies = [ "open_router", "parking_lot", "proto", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -8952,7 +8906,7 @@ dependencies = [ "partial-json-fixer", "project", "release_channel", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -9469,7 +9423,7 @@ dependencies = [ "anyhow", "futures 0.3.31", "http_client", - "schemars 1.0.4", + "schemars", "serde", "serde_json", ] @@ -9532,7 +9486,7 @@ dependencies = [ "parking_lot", "postage", "release_channel", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "smol", @@ -10112,7 +10066,7 @@ dependencies = [ "anyhow", "futures 0.3.31", "http_client", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "strum 0.27.2", @@ -10212,7 +10166,7 @@ dependencies = [ "half", "hashbrown 0.15.5", "hexf-parse", - "indexmap 2.11.4", + "indexmap", "log", "num-traits", "once_cell", @@ -10886,7 +10840,7 @@ checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "crc32fast", "hashbrown 0.15.5", - "indexmap 2.11.4", + "indexmap", "memchr", ] @@ -10941,7 +10895,7 @@ dependencies = [ "anyhow", "futures 0.3.31", "http_client", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -10964,7 +10918,7 @@ dependencies = [ "notifications", "picker", "project", - "schemars 1.0.4", + "schemars", "serde", "settings", "telemetry", @@ -11049,7 +11003,7 @@ dependencies = [ "futures 0.3.31", "http_client", "log", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -11064,7 +11018,7 @@ dependencies = [ "anyhow", "futures 0.3.31", "http_client", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -11945,7 +11899,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.11.4", + "indexmap", ] [[package]] @@ -12061,7 +12015,7 @@ dependencies = [ "env_logger 0.11.8", "gpui", "menu", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "theme", @@ -12178,7 +12132,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "740ebea15c5d1428f910cd1a5f52cebf8d25006245ed8ade92702f4943d91e07" dependencies = [ "base64 0.22.1", - "indexmap 2.11.4", + "indexmap", "quick-xml 0.38.3", "serde", "time", @@ -12341,7 +12295,7 @@ dependencies = [ "comfy-table", "either", "hashbrown 0.15.5", - "indexmap 2.11.4", + "indexmap", "itoa", "num-traits", "polars-arrow", @@ -12518,7 +12472,7 @@ dependencies = [ "either", "hashbrown 0.15.5", "hex", - "indexmap 2.11.4", + "indexmap", "libm", "memchr", "num-traits", @@ -12630,7 +12584,7 @@ version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e6c1ab13e04d5167661a9854ed1ea0482b2ed9b8a0f1118dabed7cd994a85e3" dependencies = [ - "indexmap 2.11.4", + "indexmap", "polars-error", "polars-utils", "serde", @@ -12733,7 +12687,7 @@ dependencies = [ "flate2", "foldhash 0.1.5", "hashbrown 0.15.5", - "indexmap 2.11.4", + "indexmap", "libc", "memmap2", "num-traits", @@ -13059,7 +13013,7 @@ dependencies = [ "gpui", "http_client", "image", - "indexmap 2.11.4", + "indexmap", "itertools 0.14.0", "language", "log", @@ -13076,7 +13030,7 @@ dependencies = [ "release_channel", "remote", "rpc", - "schemars 1.0.4", + "schemars", "semver", "serde", "serde_json", @@ -13141,7 +13095,7 @@ dependencies = [ "pretty_assertions", "project", "rayon", - "schemars 1.0.4", + "schemars", "search", "serde", "serde_json", @@ -14010,7 +13964,7 @@ dependencies = [ "prost 0.9.0", "release_channel", "rpc", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -14900,24 +14854,12 @@ dependencies = [ "anyhow", "clap", "env_logger 0.11.8", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "theme", ] -[[package]] -name = "schemars" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" -dependencies = [ - "dyn-clone", - "ref-cast", - "serde", - "serde_json", -] - [[package]] name = "schemars" version = "1.0.4" @@ -14925,7 +14867,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0" dependencies = [ "dyn-clone", - "indexmap 2.11.4", + "indexmap", "ref-cast", "schemars_derive", "serde", @@ -15138,7 +15080,7 @@ dependencies = [ "menu", "pretty_assertions", "project", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -15288,7 +15230,7 @@ version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ - "indexmap 2.11.4", + "indexmap", "itoa", "memchr", "ryu", @@ -15302,7 +15244,7 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e033097bf0d2b59a62b42c18ebbb797503839b26afdda2c4e1415cb6c813540" dependencies = [ - "indexmap 2.11.4", + "indexmap", "itoa", "memchr", "ryu", @@ -15372,44 +15314,13 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_with" -version = "3.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6093cd8c01b25262b84927e0f7151692158fab02d961e04c979d3903eba7ecc5" -dependencies = [ - "base64 0.22.1", - "chrono", - "hex", - "indexmap 1.9.3", - "indexmap 2.11.4", - "schemars 0.9.0", - "schemars 1.0.4", - "serde_core", - "serde_json", - "serde_with_macros", - "time", -] - -[[package]] -name = "serde_with_macros" -version = "3.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7e6c180db0816026a61afa1cff5344fb7ebded7e4d3062772179f2501481c27" -dependencies = [ - "darling 0.21.3", - "proc-macro2", - "quote", - "syn 2.0.106", -] - [[package]] name = "serde_yaml" version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.11.4", + "indexmap", "itoa", "ryu", "serde", @@ -15457,12 +15368,11 @@ dependencies = [ "pretty_assertions", "release_channel", "rust-embed", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "serde_json_lenient", "serde_repr", - "serde_with", "settings_json", "settings_macros", "smallvec", @@ -15541,7 +15451,7 @@ dependencies = [ "pretty_assertions", "project", "release_channel", - "schemars 1.0.4", + "schemars", "search", "serde", "session", @@ -15845,7 +15755,7 @@ dependencies = [ "indoc", "parking_lot", "paths", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "serde_json_lenient", @@ -16051,7 +15961,7 @@ dependencies = [ "futures-util", "hashbrown 0.15.5", "hashlink 0.10.0", - "indexmap 2.11.4", + "indexmap", "log", "memchr", "once_cell", @@ -16990,7 +16900,7 @@ dependencies = [ "menu", "picker", "project", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "settings", @@ -17069,7 +16979,7 @@ dependencies = [ "parking_lot", "pretty_assertions", "proto", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "serde_json_lenient", @@ -17172,7 +17082,7 @@ dependencies = [ "rand 0.9.2", "regex", "release_channel", - "schemars 1.0.4", + "schemars", "serde", "settings", "smol", @@ -17218,7 +17128,7 @@ dependencies = [ "project", "rand 0.9.2", "regex", - "schemars 1.0.4", + "schemars", "search", "serde", "serde_json", @@ -17270,7 +17180,7 @@ dependencies = [ "palette", "parking_lot", "refineable", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "serde_json_lenient", @@ -17300,7 +17210,7 @@ dependencies = [ "clap", "collections", "gpui", - "indexmap 2.11.4", + "indexmap", "log", "palette", "serde", @@ -17554,7 +17464,7 @@ dependencies = [ "project", "remote", "rpc", - "schemars 1.0.4", + "schemars", "serde", "settings", "smallvec", @@ -17743,7 +17653,7 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" dependencies = [ - "indexmap 2.11.4", + "indexmap", "serde_core", "serde_spanned 1.0.3", "toml_datetime 0.7.3", @@ -17776,7 +17686,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.11.4", + "indexmap", "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", @@ -17790,7 +17700,7 @@ version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" dependencies = [ - "indexmap 2.11.4", + "indexmap", "toml_datetime 0.7.3", "toml_parser", "winnow", @@ -18450,7 +18360,7 @@ dependencies = [ "icons", "itertools 0.14.0", "menu", - "schemars 1.0.4", + "schemars", "serde", "settings", "smallvec", @@ -18724,7 +18634,7 @@ dependencies = [ "rand 0.9.2", "regex", "rust-embed", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "serde_json_lenient", @@ -18836,7 +18746,7 @@ name = "vercel" version = "0.1.0" dependencies = [ "anyhow", - "schemars 1.0.4", + "schemars", "serde", "strum 0.27.2", ] @@ -18886,7 +18796,7 @@ dependencies = [ "project_panel", "regex", "release_channel", - "schemars 1.0.4", + "schemars", "search", "serde", "serde_json", @@ -19148,7 +19058,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fd83062c17b9f4985d438603cde0a5e8c5c8198201a6937f778b607924c7da2" dependencies = [ "anyhow", - "indexmap 2.11.4", + "indexmap", "serde", "serde_derive", "serde_json", @@ -19166,7 +19076,7 @@ dependencies = [ "anyhow", "auditable-serde", "flate2", - "indexmap 2.11.4", + "indexmap", "serde", "serde_derive", "serde_json", @@ -19196,7 +19106,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84e5df6dba6c0d7fafc63a450f1738451ed7a0b52295d83e868218fa286bf708" dependencies = [ "bitflags 2.9.4", - "indexmap 2.11.4", + "indexmap", "semver", ] @@ -19208,7 +19118,7 @@ checksum = "d06bfa36ab3ac2be0dee563380147a5b81ba10dd8885d7fbbc9eb574be67d185" dependencies = [ "bitflags 2.9.4", "hashbrown 0.15.5", - "indexmap 2.11.4", + "indexmap", "semver", "serde", ] @@ -19221,7 +19131,7 @@ checksum = "0f51cad774fb3c9461ab9bccc9c62dfb7388397b5deda31bf40e8108ccd678b2" dependencies = [ "bitflags 2.9.4", "hashbrown 0.15.5", - "indexmap 2.11.4", + "indexmap", "semver", ] @@ -19250,7 +19160,7 @@ dependencies = [ "cfg-if", "encoding_rs", "hashbrown 0.14.5", - "indexmap 2.11.4", + "indexmap", "libc", "log", "mach2 0.4.3", @@ -19374,7 +19284,7 @@ dependencies = [ "cranelift-bitset", "cranelift-entity", "gimli 0.31.1", - "indexmap 2.11.4", + "indexmap", "log", "object 0.36.7", "postcard", @@ -19499,7 +19409,7 @@ checksum = "8358319c2dd1e4db79e3c1c5d3a5af84956615343f9f89f4e4996a36816e06e6" dependencies = [ "anyhow", "heck 0.5.0", - "indexmap 2.11.4", + "indexmap", "wit-parser 0.221.3", ] @@ -20667,7 +20577,7 @@ checksum = "d8a39a15d1ae2077688213611209849cad40e9e5cccf6e61951a425850677ff3" dependencies = [ "anyhow", "heck 0.4.1", - "indexmap 2.11.4", + "indexmap", "wasm-metadata 0.201.0", "wit-bindgen-core 0.22.0", "wit-component 0.201.0", @@ -20681,7 +20591,7 @@ checksum = "9d0809dc5ba19e2e98661bf32fc0addc5a3ca5bf3a6a7083aa6ba484085ff3ce" dependencies = [ "anyhow", "heck 0.5.0", - "indexmap 2.11.4", + "indexmap", "prettyplease", "syn 2.0.106", "wasm-metadata 0.227.1", @@ -20726,7 +20636,7 @@ checksum = "421c0c848a0660a8c22e2fd217929a0191f14476b68962afd2af89fd22e39825" dependencies = [ "anyhow", "bitflags 2.9.4", - "indexmap 2.11.4", + "indexmap", "log", "serde", "serde_derive", @@ -20745,7 +20655,7 @@ checksum = "635c3adc595422cbf2341a17fb73a319669cc8d33deed3a48368a841df86b676" dependencies = [ "anyhow", "bitflags 2.9.4", - "indexmap 2.11.4", + "indexmap", "log", "serde", "serde_derive", @@ -20764,7 +20674,7 @@ checksum = "196d3ecfc4b759a8573bf86a9b3f8996b304b3732e4c7de81655f875f6efdca6" dependencies = [ "anyhow", "id-arena", - "indexmap 2.11.4", + "indexmap", "log", "semver", "serde", @@ -20782,7 +20692,7 @@ checksum = "896112579ed56b4a538b07a3d16e562d101ff6265c46b515ce0c701eef16b2ac" dependencies = [ "anyhow", "id-arena", - "indexmap 2.11.4", + "indexmap", "log", "semver", "serde", @@ -20800,7 +20710,7 @@ checksum = "ddf445ed5157046e4baf56f9138c124a0824d4d1657e7204d71886ad8ce2fc11" dependencies = [ "anyhow", "id-arena", - "indexmap 2.11.4", + "indexmap", "log", "semver", "serde", @@ -20850,7 +20760,7 @@ dependencies = [ "pretty_assertions", "project", "remote", - "schemars 1.0.4", + "schemars", "serde", "serde_json", "session", @@ -20965,7 +20875,7 @@ name = "x_ai" version = "0.1.0" dependencies = [ "anyhow", - "schemars 1.0.4", + "schemars", "serde", "strum 0.27.2", ] @@ -21064,7 +20974,7 @@ dependencies = [ "cargo_toml", "clap", "gh-workflow", - "indexmap 2.11.4", + "indexmap", "indoc", "serde", "toml 0.8.23", @@ -21515,7 +21425,7 @@ name = "zed_actions" version = "0.1.0" dependencies = [ "gpui", - "schemars 1.0.4", + "schemars", "serde", "uuid", ] @@ -21923,7 +21833,7 @@ dependencies = [ "crc32fast", "crossbeam-utils", "displaydoc", - "indexmap 2.11.4", + "indexmap", "num_enum", "thiserror 1.0.69", ] diff --git a/Cargo.toml b/Cargo.toml index fa85879698521a70686e9d96c6a108e8d1cbe28d..63303678ebfbdbf5d403b40ff83c4612ad2ba2c8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -641,7 +641,6 @@ serde_json_lenient = { version = "0.2", features = [ serde_path_to_error = "0.1.17" serde_repr = "0.1" serde_urlencoded = "0.7" -serde_with = "3.4.0" sha2 = "0.10" shellexpand = "2.1.0" shlex = "1.3.0" diff --git a/crates/settings/Cargo.toml b/crates/settings/Cargo.toml index af0d5a55f363fd6d96f281f2db50f803baae7d14..1f1513d6216f6ac13b6dc6c1b1cded5677d32849 100644 --- a/crates/settings/Cargo.toml +++ b/crates/settings/Cargo.toml @@ -34,7 +34,6 @@ serde.workspace = true serde_json.workspace = true serde_json_lenient.workspace = true serde_repr.workspace = true -serde_with.workspace = true settings_json.workspace = true settings_macros.workspace = true smallvec.workspace = true diff --git a/crates/settings/src/fallible_options.rs b/crates/settings/src/fallible_options.rs new file mode 100644 index 0000000000000000000000000000000000000000..e0eea451f1fe863edebc5f9bf438e10821531db6 --- /dev/null +++ b/crates/settings/src/fallible_options.rs @@ -0,0 +1,112 @@ +use std::cell::RefCell; + +use serde::Deserialize; + +use crate::ParseStatus; + +thread_local! { + static ERRORS: RefCell>> = const { RefCell::new(None) }; +} + +pub(crate) fn parse_json<'de, T>(json: &'de str) -> (Option, ParseStatus) +where + T: Deserialize<'de>, +{ + ERRORS.with_borrow_mut(|errors| { + errors.replace(Vec::default()); + }); + + let mut deserializer = serde_json_lenient::Deserializer::from_str(json); + let value = T::deserialize(&mut deserializer); + let value = match value { + Ok(value) => value, + Err(error) => { + return ( + None, + ParseStatus::Failed { + error: error.to_string(), + }, + ); + } + }; + + if let Some(errors) = ERRORS.with_borrow_mut(|errors| errors.take().filter(|e| !e.is_empty())) { + let error = errors + .into_iter() + .map(|e| e.to_string()) + .flat_map(|e| ["\n".to_owned(), e]) + .skip(1) + .collect::(); + return (Some(value), ParseStatus::Failed { error }); + } + + (Some(value), ParseStatus::Success) +} + +pub(crate) fn deserialize<'de, D, T>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, + T: serde::Deserialize<'de> + FallibleOption, +{ + match T::deserialize(deserializer) { + Ok(value) => Ok(value), + Err(e) => ERRORS.with_borrow_mut(|errors| { + if let Some(errors) = errors { + errors.push(anyhow::anyhow!("{}", e)); + Ok(Default::default()) + } else { + Err(e) + } + }), + } +} + +pub trait FallibleOption: Default {} +impl FallibleOption for Option {} + +#[cfg(test)] +mod tests { + use serde::Deserialize; + use settings_macros::with_fallible_options; + + use crate::ParseStatus; + + #[with_fallible_options] + #[derive(Deserialize, Debug, PartialEq)] + struct Foo { + foo: Option, + bar: Option, + baz: Option, + } + + #[test] + fn test_fallible() { + let input = r#" + {"foo": "bar", + "bar": "foo", + "baz": 3, + } + "#; + + let (settings, result) = crate::fallible_options::parse_json::(&input); + assert_eq!( + settings.unwrap(), + Foo { + foo: Some("bar".into()), + bar: None, + baz: None, + } + ); + + assert!(crate::parse_json_with_comments::(&input).is_err()); + + let ParseStatus::Failed { error } = result else { + panic!("Expected parse to fail") + }; + + assert_eq!( + error, + "invalid type: string \"foo\", expected usize at line 3 column 24\ninvalid type: integer `3`, expected a boolean at line 4 column 20".to_string() + ) + } +} diff --git a/crates/settings/src/settings.rs b/crates/settings/src/settings.rs index fc097d474e92a66ca5b981e2a3235b33f42fb1bf..5f07ebe52f8997a91653063b5c20ca8e7432acc5 100644 --- a/crates/settings/src/settings.rs +++ b/crates/settings/src/settings.rs @@ -1,5 +1,6 @@ mod base_keymap_setting; mod editable_setting_control; +mod fallible_options; mod keymap_file; pub mod merge_from; mod serde_helper; @@ -33,7 +34,7 @@ pub use settings_file::*; pub use settings_json::*; pub use settings_store::{ InvalidSettingsError, LocalSettingsKind, MigrationStatus, ParseStatus, Settings, SettingsFile, - SettingsJsonSchemaParams, SettingsKey, SettingsLocation, SettingsStore, + SettingsJsonSchemaParams, SettingsKey, SettingsLocation, SettingsParseResult, SettingsStore, }; pub use vscode_import::{VsCodeSettings, VsCodeSettingsSource}; diff --git a/crates/settings/src/settings_content.rs b/crates/settings/src/settings_content.rs index 9cd8ff46e8bea5be69bd5415b5668f21dc71f13a..230e1ffd48b9cc1d58aba59ea0af2c629e36c8e3 100644 --- a/crates/settings/src/settings_content.rs +++ b/crates/settings/src/settings_content.rs @@ -23,8 +23,7 @@ use gpui::{App, SharedString}; use release_channel::ReleaseChannel; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use serde_with::skip_serializing_none; -use settings_macros::MergeFrom; +use settings_macros::{MergeFrom, with_fallible_options}; use std::collections::BTreeSet; use std::env; use std::sync::Arc; @@ -32,7 +31,7 @@ pub use util::serde::default_true; use crate::{ActiveSettingsProfileName, merge_from}; -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, PartialEq, Default, Clone, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct SettingsContent { #[serde(flatten)] @@ -169,7 +168,7 @@ impl SettingsContent { } } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct UserSettingsContent { #[serde(flatten)] @@ -260,7 +259,7 @@ impl strum::VariantNames for BaseKeymapContent { ]; } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug)] pub struct TitleBarSettingsContent { /// Whether to show the branch icon beside branch switcher in the title bar. @@ -294,7 +293,7 @@ pub struct TitleBarSettingsContent { } /// Configuration of audio in Zed. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug)] pub struct AudioSettingsContent { /// Opt into the new audio system. @@ -338,7 +337,7 @@ pub struct AudioSettingsContent { } /// Control what info is collected by Zed. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema, Debug, MergeFrom)] pub struct TelemetrySettingsContent { /// Send debug info like crash reports. @@ -360,7 +359,7 @@ impl Default for TelemetrySettingsContent { } } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Debug, PartialEq, Eq, Serialize, Deserialize, JsonSchema, Clone, MergeFrom)] pub struct DebuggerSettingsContent { /// Determines the stepping granularity. @@ -441,7 +440,7 @@ pub enum DockPosition { } /// Settings for slash commands. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Deserialize, Serialize, Debug, Default, Clone, JsonSchema, MergeFrom, PartialEq, Eq)] pub struct SlashCommandSettings { /// Settings for the `/cargo-workspace` slash command. @@ -449,7 +448,7 @@ pub struct SlashCommandSettings { } /// Settings for the `/cargo-workspace` slash command. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Deserialize, Serialize, Debug, Default, Clone, JsonSchema, MergeFrom, PartialEq, Eq)] pub struct CargoWorkspaceCommandSettings { /// Whether `/cargo-workspace` is enabled. @@ -457,7 +456,7 @@ pub struct CargoWorkspaceCommandSettings { } /// Configuration of voice calls in Zed. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug)] pub struct CallSettingsContent { /// Whether the microphone should be muted when joining a channel or a call. @@ -471,7 +470,7 @@ pub struct CallSettingsContent { pub share_on_join: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug)] pub struct GitPanelSettingsContent { /// Whether to show the panel button in the status bar. @@ -535,7 +534,7 @@ pub enum StatusStyle { LabelColor, } -#[skip_serializing_none] +#[with_fallible_options] #[derive( Copy, Clone, Default, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq, )] @@ -543,7 +542,7 @@ pub struct ScrollbarSettings { pub show: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug, PartialEq)] pub struct NotificationPanelSettingsContent { /// Whether to show the panel button in the status bar. @@ -561,7 +560,7 @@ pub struct NotificationPanelSettingsContent { pub default_width: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug, PartialEq)] pub struct PanelSettingsContent { /// Whether to show the panel button in the status bar. @@ -579,7 +578,7 @@ pub struct PanelSettingsContent { pub default_width: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug, PartialEq)] pub struct MessageEditorSettings { /// Whether to automatically replace emoji shortcodes with emoji characters. @@ -589,7 +588,7 @@ pub struct MessageEditorSettings { pub auto_replace_emoji_shortcode: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug, PartialEq)] pub struct FileFinderSettingsContent { /// Whether to show file icons in the file finder. @@ -664,7 +663,7 @@ pub enum FileFinderWidthContent { Full, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Default, Serialize, Deserialize, PartialEq, Debug, JsonSchema, MergeFrom)] pub struct VimSettingsContent { pub default_mode: Option, @@ -697,7 +696,7 @@ pub enum UseSystemClipboard { } /// The settings for cursor shape. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq, JsonSchema, MergeFrom)] pub struct CursorShapeSettings { /// Cursor shape for the normal mode. @@ -719,7 +718,7 @@ pub struct CursorShapeSettings { } /// Settings specific to journaling -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] pub struct JournalSettingsContent { /// The path of the directory where journal entries are stored. @@ -740,7 +739,7 @@ pub enum HourFormat { Hour24, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug, PartialEq)] pub struct OutlinePanelSettingsContent { /// Whether to show the outline panel button in the status bar. @@ -835,7 +834,7 @@ pub enum ShowIndentGuides { Never, } -#[skip_serializing_none] +#[with_fallible_options] #[derive( Copy, Clone, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq, Default, )] @@ -853,7 +852,7 @@ pub enum LineIndicatorFormat { } /// The settings for the image viewer. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, Default, PartialEq)] pub struct ImageViewerSettingsContent { /// The unit to use for displaying image file sizes. @@ -862,7 +861,7 @@ pub struct ImageViewerSettingsContent { pub unit: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive( Clone, Copy, @@ -885,7 +884,7 @@ pub enum ImageFileSizeUnit { Decimal, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] pub struct RemoteSettingsContent { pub ssh_connections: Option>, @@ -893,7 +892,7 @@ pub struct RemoteSettingsContent { pub read_ssh_config: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct SshConnection { pub host: SharedString, @@ -922,7 +921,7 @@ pub struct WslConnection { pub projects: BTreeSet, } -#[skip_serializing_none] +#[with_fallible_options] #[derive( Clone, Debug, Default, Serialize, PartialEq, Eq, PartialOrd, Ord, Deserialize, JsonSchema, )] @@ -930,19 +929,17 @@ pub struct SshProject { pub paths: Vec, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema, MergeFrom)] pub struct SshPortForwardOption { - #[serde(skip_serializing_if = "Option::is_none")] pub local_host: Option, pub local_port: u16, - #[serde(skip_serializing_if = "Option::is_none")] pub remote_host: Option, pub remote_port: u16, } /// Settings for configuring REPL display and behavior. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct ReplSettingsContent { /// Maximum number of lines to keep in REPL's scrollback buffer. diff --git a/crates/settings/src/settings_content/agent.rs b/crates/settings/src/settings_content/agent.rs index c6ed2fc4a8980ff56153c6da69c03f3c3b7bf9c7..2ea9f0cd5788f3312061ec8ffef2a728403463ac 100644 --- a/crates/settings/src/settings_content/agent.rs +++ b/crates/settings/src/settings_content/agent.rs @@ -2,13 +2,12 @@ use collections::{HashMap, IndexMap}; use gpui::SharedString; use schemars::{JsonSchema, json_schema}; use serde::{Deserialize, Serialize}; -use serde_with::skip_serializing_none; -use settings_macros::MergeFrom; +use settings_macros::{MergeFrom, with_fallible_options}; use std::{borrow::Cow, path::PathBuf, sync::Arc}; use crate::DockPosition; -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom, Debug, Default)] pub struct AgentSettingsContent { /// Whether the Agent is enabled. @@ -166,7 +165,7 @@ impl AgentSettingsContent { } } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct AgentProfileContent { pub name: Arc, @@ -180,7 +179,7 @@ pub struct AgentProfileContent { pub default_model: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct ContextServerPresetContent { pub tools: IndexMap, bool>, @@ -215,7 +214,7 @@ pub enum NotifyWhenAgentWaiting { Never, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] pub struct LanguageModelSelection { pub provider: LanguageModelProviderSetting, @@ -231,7 +230,7 @@ pub enum CompletionMode { Burn, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] pub struct LanguageModelParameters { pub provider: Option, @@ -290,7 +289,7 @@ impl From<&str> for LanguageModelProviderSetting { } } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, PartialEq, Deserialize, Serialize, Clone, JsonSchema, MergeFrom, Debug)] pub struct AllAgentServersSettings { pub gemini: Option, @@ -302,7 +301,7 @@ pub struct AllAgentServersSettings { pub custom: HashMap, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Deserialize, Serialize, Clone, JsonSchema, MergeFrom, Debug, PartialEq)] pub struct BuiltinAgentServerSettings { /// Absolute path to a binary to be used when launching this agent. @@ -340,7 +339,7 @@ pub struct BuiltinAgentServerSettings { pub default_model: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Deserialize, Serialize, Clone, JsonSchema, MergeFrom, Debug, PartialEq)] #[serde(tag = "type", rename_all = "snake_case")] pub enum CustomAgentServerSettings { diff --git a/crates/settings/src/settings_content/editor.rs b/crates/settings/src/settings_content/editor.rs index 4ef5f3e427b8ca8a2658c7bb35012ecc9618e377..9ec5542e9bc1aaa78c4b26cb08257a2644990a99 100644 --- a/crates/settings/src/settings_content/editor.rs +++ b/crates/settings/src/settings_content/editor.rs @@ -4,14 +4,13 @@ use std::num; use collections::HashMap; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use serde_with::skip_serializing_none; -use settings_macros::MergeFrom; +use settings_macros::{MergeFrom, with_fallible_options}; use crate::{ DelayMs, DiagnosticSeverityContent, ShowScrollbar, serialize_f32_with_two_decimal_places, }; -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct EditorSettingsContent { /// Whether the cursor blinks in the editor. @@ -254,7 +253,7 @@ impl RelativeLineNumbers { } // Toolbar related settings -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq)] pub struct ToolbarContent { /// Whether to display breadcrumbs in the editor toolbar. @@ -281,7 +280,7 @@ pub struct ToolbarContent { } /// Scrollbar related settings -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Default)] pub struct ScrollbarContent { /// When to show the scrollbar in the editor. @@ -317,7 +316,7 @@ pub struct ScrollbarContent { } /// Sticky scroll related settings -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Default, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] pub struct StickyScrollContent { /// Whether sticky scroll is enabled. @@ -327,7 +326,7 @@ pub struct StickyScrollContent { } /// Minimap related settings -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Default, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] pub struct MinimapContent { /// When to show the minimap in the editor. @@ -362,7 +361,7 @@ pub struct MinimapContent { } /// Forcefully enable or disable the scrollbar for each axis -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Default)] pub struct ScrollbarAxesContent { /// When false, forcefully disables the horizontal scrollbar. Otherwise, obey other settings. @@ -377,7 +376,7 @@ pub struct ScrollbarAxesContent { } /// Gutter related settings -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq)] pub struct GutterContent { /// Whether to show line numbers in the gutter. @@ -754,7 +753,7 @@ pub enum SnippetSortOrder { } /// Default options for buffer and project search items. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Default, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq)] pub struct SearchSettingsContent { /// Whether to show the project search button in the status bar. @@ -771,7 +770,7 @@ pub struct SearchSettingsContent { pub center_on_match: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, Eq, JsonSchema, MergeFrom)] #[serde(rename_all = "snake_case")] pub struct JupyterContent { @@ -787,7 +786,7 @@ pub struct JupyterContent { } /// Whether to allow drag and drop text selection in buffer. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Default, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq)] pub struct DragAndDropSelectionContent { /// When true, enables drag and drop text selection in buffer. diff --git a/crates/settings/src/settings_content/extension.rs b/crates/settings/src/settings_content/extension.rs index f8abb5283ff02365efe5f8cd919757c0e0c4565a..2fefd4ef38aeb9af133ed745d2732a3cb6ec77f7 100644 --- a/crates/settings/src/settings_content/extension.rs +++ b/crates/settings/src/settings_content/extension.rs @@ -3,10 +3,9 @@ use std::sync::Arc; use collections::HashMap; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use serde_with::skip_serializing_none; -use settings_macros::MergeFrom; +use settings_macros::{MergeFrom, with_fallible_options}; -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct ExtensionSettingsContent { /// The extensions that should be automatically installed by Zed. @@ -20,7 +19,6 @@ pub struct ExtensionSettingsContent { #[serde(default)] pub auto_update_extensions: HashMap, bool>, /// The capabilities granted to extensions. - #[serde(default)] pub granted_extension_capabilities: Option>, } diff --git a/crates/settings/src/settings_content/language.rs b/crates/settings/src/settings_content/language.rs index 11eb87817d12517ecb2ef333eacc60d1c2f48330..78ecc270166483b13af7e169b2390ad9f76d595d 100644 --- a/crates/settings/src/settings_content/language.rs +++ b/crates/settings/src/settings_content/language.rs @@ -4,20 +4,17 @@ use collections::{HashMap, HashSet}; use gpui::{Modifiers, SharedString}; use schemars::JsonSchema; use serde::{Deserialize, Serialize, de::Error as _}; -use serde_with::skip_serializing_none; -use settings_macros::MergeFrom; +use settings_macros::{MergeFrom, with_fallible_options}; use std::sync::Arc; use crate::{ExtendingVec, merge_from}; -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize, JsonSchema)] pub struct AllLanguageSettingsContent { /// The settings for enabling/disabling features. - #[serde(default)] pub features: Option, /// The edit prediction settings. - #[serde(default)] pub edit_predictions: Option, /// The default language settings. #[serde(flatten)] @@ -59,7 +56,7 @@ impl merge_from::MergeFrom for AllLanguageSettingsContent { } /// The settings for enabling/disabling features. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Clone, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom)] #[serde(rename_all = "snake_case")] pub struct FeaturesContent { @@ -134,7 +131,7 @@ impl EditPredictionProvider { } /// The contents of the edit prediction settings. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] pub struct EditPredictionSettingsContent { /// A list of globs representing files that edit predictions should be disabled for. @@ -153,7 +150,7 @@ pub struct EditPredictionSettingsContent { pub enabled_in_text_threads: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] pub struct CopilotSettingsContent { /// HTTP/HTTPS proxy to use for Copilot. @@ -246,7 +243,7 @@ pub enum SoftWrap { } /// The settings for a particular language. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct LanguageSettingsContent { /// How many columns a tab should occupy. @@ -451,7 +448,7 @@ pub enum ShowWhitespaceSetting { Trailing, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] pub struct WhitespaceMapContent { pub space: Option, @@ -483,7 +480,7 @@ pub enum RewrapBehavior { Anywhere, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct JsxTagAutoCloseSettingsContent { /// Enables or disables auto-closing of JSX tags. @@ -491,7 +488,7 @@ pub struct JsxTagAutoCloseSettingsContent { } /// The settings for inlay hints. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Default, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq)] pub struct InlayHintSettingsContent { /// Global switch to toggle hints on and off. @@ -573,7 +570,7 @@ impl InlayHintKind { } /// Controls how completions are processed for this language. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom, Default)] #[serde(rename_all = "snake_case")] pub struct CompletionSettingsContent { @@ -658,7 +655,7 @@ pub enum WordsCompletionMode { /// Allows to enable/disable formatting with Prettier /// and configure default Prettier, used when no project-level Prettier installation is found. /// Prettier formatting is disabled by default. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct PrettierSettingsContent { /// Enables or disables formatting with Prettier for a given language. @@ -812,7 +809,7 @@ struct LanguageServerSpecifierContent { } /// The settings for indent guides. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct IndentGuideSettingsContent { /// Whether to display indent guides in the editor. @@ -838,7 +835,7 @@ pub struct IndentGuideSettingsContent { } /// The task settings for a particular language. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Clone, Default, Deserialize, PartialEq, Serialize, JsonSchema, MergeFrom)] pub struct LanguageTaskSettingsContent { /// Extra task variables to set for a particular language. @@ -855,7 +852,7 @@ pub struct LanguageTaskSettingsContent { } /// Map from language name to settings. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct LanguageToSettingsMap(pub HashMap); @@ -911,6 +908,9 @@ pub enum IndentGuideBackgroundColoring { #[cfg(test)] mod test { + + use crate::{ParseStatus, fallible_options}; + use super::*; #[test] @@ -970,8 +970,8 @@ mod test { #[test] fn test_formatter_deserialization_invalid() { let raw_auto = "{\"formatter\": {}}"; - let result: Result = serde_json::from_str(raw_auto); - assert!(result.is_err()); + let (_, result) = fallible_options::parse_json::(raw_auto); + assert!(matches!(result, ParseStatus::Failed { .. })); } #[test] diff --git a/crates/settings/src/settings_content/language_model.rs b/crates/settings/src/settings_content/language_model.rs index 50ad812142e1544d2fa7947d4c6a845c6d459090..0a746c1284c1d981fdf95745952baacc74548d04 100644 --- a/crates/settings/src/settings_content/language_model.rs +++ b/crates/settings/src/settings_content/language_model.rs @@ -1,12 +1,11 @@ use collections::HashMap; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use serde_with::skip_serializing_none; -use settings_macros::MergeFrom; +use settings_macros::{MergeFrom, with_fallible_options}; use std::sync::Arc; -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct AllLanguageModelSettingsContent { pub anthropic: Option, @@ -25,14 +24,14 @@ pub struct AllLanguageModelSettingsContent { pub zed_dot_dev: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct AnthropicSettingsContent { pub api_url: Option, pub available_models: Option>, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct AnthropicAvailableModel { /// The model's name in the Anthropic API. e.g. claude-3-5-sonnet-latest, claude-3-opus-20240229, etc @@ -54,7 +53,7 @@ pub struct AnthropicAvailableModel { pub mode: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct AmazonBedrockSettingsContent { pub available_models: Option>, @@ -64,7 +63,7 @@ pub struct AmazonBedrockSettingsContent { pub authentication_method: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct BedrockAvailableModel { pub name: String, @@ -88,14 +87,14 @@ pub enum BedrockAuthMethodContent { Automatic, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct OllamaSettingsContent { pub api_url: Option, pub available_models: Option>, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct OllamaAvailableModel { /// The model name in the Ollama API (e.g. "llama3.2:latest") @@ -136,14 +135,14 @@ impl Default for KeepAlive { } } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct LmStudioSettingsContent { pub api_url: Option, pub available_models: Option>, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct LmStudioAvailableModel { pub name: String, @@ -153,14 +152,14 @@ pub struct LmStudioAvailableModel { pub supports_images: bool, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct DeepseekSettingsContent { pub api_url: Option, pub available_models: Option>, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct DeepseekAvailableModel { pub name: String, @@ -169,14 +168,14 @@ pub struct DeepseekAvailableModel { pub max_output_tokens: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct MistralSettingsContent { pub api_url: Option, pub available_models: Option>, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct MistralAvailableModel { pub name: String, @@ -189,14 +188,14 @@ pub struct MistralAvailableModel { pub supports_thinking: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct OpenAiSettingsContent { pub api_url: Option, pub available_models: Option>, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct OpenAiAvailableModel { pub name: String, @@ -216,14 +215,14 @@ pub enum OpenAiReasoningEffort { High, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct OpenAiCompatibleSettingsContent { pub api_url: String, pub available_models: Vec, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct OpenAiCompatibleAvailableModel { pub name: String, @@ -235,7 +234,7 @@ pub struct OpenAiCompatibleAvailableModel { pub capabilities: OpenAiCompatibleModelCapabilities, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct OpenAiCompatibleModelCapabilities { pub tools: bool, @@ -255,14 +254,14 @@ impl Default for OpenAiCompatibleModelCapabilities { } } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct VercelSettingsContent { pub api_url: Option, pub available_models: Option>, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct VercelAvailableModel { pub name: String, @@ -272,14 +271,14 @@ pub struct VercelAvailableModel { pub max_completion_tokens: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct GoogleSettingsContent { pub api_url: Option, pub available_models: Option>, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct GoogleAvailableModel { pub name: String, @@ -288,14 +287,14 @@ pub struct GoogleAvailableModel { pub mode: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct XAiSettingsContent { pub api_url: Option, pub available_models: Option>, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct XaiAvailableModel { pub name: String, @@ -308,13 +307,13 @@ pub struct XaiAvailableModel { pub parallel_tool_calls: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct ZedDotDevSettingsContent { pub available_models: Option>, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct ZedDotDevAvailableModel { /// The provider of the language model. @@ -351,14 +350,14 @@ pub enum ZedDotDevAvailableProvider { Google, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Default, Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] pub struct OpenRouterSettingsContent { pub api_url: Option, pub available_models: Option>, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct OpenRouterAvailableModel { pub name: String, @@ -372,7 +371,7 @@ pub struct OpenRouterAvailableModel { pub provider: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct OpenRouterProvider { order: Option>, @@ -401,7 +400,7 @@ fn default_true() -> bool { } /// Configuration for caching language model messages. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct LanguageModelCacheConfiguration { pub max_cache_anchors: usize, diff --git a/crates/settings/src/settings_content/project.rs b/crates/settings/src/settings_content/project.rs index c9021ee22e4c419af544bea8e76387615e2a949d..ccad50ce8827f6d7d59a45b0fb2efd4abb5257b7 100644 --- a/crates/settings/src/settings_content/project.rs +++ b/crates/settings/src/settings_content/project.rs @@ -3,8 +3,7 @@ use std::{path::PathBuf, sync::Arc}; use collections::{BTreeMap, HashMap}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use serde_with::skip_serializing_none; -use settings_macros::MergeFrom; +use settings_macros::{MergeFrom, with_fallible_options}; use util::serde::default_true; use crate::{ @@ -12,7 +11,7 @@ use crate::{ SlashCommandSettings, }; -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct ProjectSettingsContent { #[serde(flatten)] @@ -32,7 +31,6 @@ pub struct ProjectSettingsContent { #[serde(default)] pub lsp: HashMap, LspSettings>, - #[serde(default)] pub terminal: Option, /// Configuration for Debugger-related features @@ -53,15 +51,13 @@ pub struct ProjectSettingsContent { pub git_hosting_providers: Option>, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct WorktreeSettingsContent { /// The displayed name of this project. If not set or null, the root directory name /// will be displayed. /// /// Default: null - #[serde(default)] - #[serde(skip_serializing_if = "Option::is_none")] pub project_name: Option, /// Whether to prevent this project from being shared in public channels. @@ -103,7 +99,7 @@ pub struct WorktreeSettingsContent { pub hidden_files: Option>, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, JsonSchema, MergeFrom, Hash)] #[serde(rename_all = "snake_case")] pub struct LspSettings { @@ -140,7 +136,7 @@ impl Default for LspSettings { } } -#[skip_serializing_none] +#[with_fallible_options] #[derive( Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq, JsonSchema, MergeFrom, Hash, )] @@ -151,7 +147,7 @@ pub struct BinarySettings { pub ignore_system_version: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive( Clone, Debug, Default, Serialize, Deserialize, PartialEq, Eq, JsonSchema, MergeFrom, Hash, )] @@ -161,7 +157,7 @@ pub struct FetchSettings { } /// Common language server settings. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct GlobalLspSettingsContent { /// Whether to show the LSP servers button in the status bar. @@ -170,18 +166,16 @@ pub struct GlobalLspSettingsContent { pub button: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema, MergeFrom)] #[serde(rename_all = "snake_case")] pub struct DapSettingsContent { pub binary: Option, - #[serde(default)] pub args: Option>, - #[serde(default)] pub env: Option>, } -#[skip_serializing_none] +#[with_fallible_options] #[derive( Default, Copy, Clone, PartialEq, Eq, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, )] @@ -249,7 +243,7 @@ impl ContextServerSettingsContent { } } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Deserialize, Serialize, Clone, PartialEq, Eq, JsonSchema, MergeFrom)] pub struct ContextServerCommand { #[serde(rename = "command")] @@ -285,7 +279,7 @@ impl std::fmt::Debug for ContextServerCommand { } } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Copy, Clone, Debug, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct GitSettings { /// Whether or not to show the git gutter. @@ -339,7 +333,7 @@ pub enum GitGutterSetting { Hide, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Copy, Debug, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom)] #[serde(rename_all = "snake_case")] pub struct InlineBlameSettings { @@ -368,7 +362,7 @@ pub struct InlineBlameSettings { pub show_commit_summary: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Copy, Debug, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom)] #[serde(rename_all = "snake_case")] pub struct BlameSettings { @@ -378,7 +372,7 @@ pub struct BlameSettings { pub show_avatar: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Copy, PartialEq, Debug, Default, Serialize, Deserialize, JsonSchema, MergeFrom)] #[serde(rename_all = "snake_case")] pub struct BranchPickerSettingsContent { @@ -410,6 +404,7 @@ pub enum GitHunkStyleSetting { UnstagedHollow, } +#[with_fallible_options] #[derive( Copy, Clone, @@ -432,7 +427,7 @@ pub enum GitPathStyle { FilePathFirst, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct DiagnosticsSettingsContent { /// Whether to show the project diagnostics button in the status bar. @@ -448,7 +443,7 @@ pub struct DiagnosticsSettingsContent { pub inline: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive( Clone, Copy, Debug, Default, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq, )] @@ -464,7 +459,7 @@ pub struct LspPullDiagnosticsSettingsContent { pub debounce_ms: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive( Clone, Copy, Debug, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Eq, )] @@ -493,7 +488,7 @@ pub struct InlineDiagnosticsSettingsContent { pub max_severity: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct NodeBinarySettings { /// The path to the Node binary. @@ -541,7 +536,7 @@ pub enum DiagnosticSeverityContent { } /// A custom Git hosting provider. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct GitHostingProviderConfig { /// The type of the provider. diff --git a/crates/settings/src/settings_content/terminal.rs b/crates/settings/src/settings_content/terminal.rs index 723156bc3ad2d5d07866f40836f10ec9f3e79087..c75b986bb817752d2f3ce64db52af2ad61a1c58d 100644 --- a/crates/settings/src/settings_content/terminal.rs +++ b/crates/settings/src/settings_content/terminal.rs @@ -4,8 +4,7 @@ use collections::HashMap; use gpui::{AbsoluteLength, FontFeatures, SharedString, px}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use serde_with::skip_serializing_none; -use settings_macros::MergeFrom; +use settings_macros::{MergeFrom, with_fallible_options}; use crate::FontFamilyName; @@ -32,7 +31,7 @@ pub struct ProjectTerminalSettingsContent { pub detect_venv: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct TerminalSettingsContent { #[serde(flatten)] @@ -201,7 +200,7 @@ pub enum WorkingDirectory { Always { directory: String }, } -#[skip_serializing_none] +#[with_fallible_options] #[derive( Clone, Copy, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq, Default, )] @@ -339,7 +338,7 @@ pub enum AlternateScroll { } // Toolbar related settings -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq)] pub struct TerminalToolbarContent { /// Whether to display the terminal title in breadcrumbs inside the terminal pane. @@ -386,7 +385,7 @@ pub enum VenvSettings { conda_manager: Option, }, } -#[skip_serializing_none] +#[with_fallible_options] pub struct VenvSettingsContent<'a> { pub activate_script: ActivateScript, pub venv_name: &'a str, diff --git a/crates/settings/src/settings_content/theme.rs b/crates/settings/src/settings_content/theme.rs index 4cd1313633a1c32eaf2c0066b23ac3bd3e5bbe79..f089b076fbdf404589057b1001b232e9fcb2ee79 100644 --- a/crates/settings/src/settings_content/theme.rs +++ b/crates/settings/src/settings_content/theme.rs @@ -4,90 +4,72 @@ use schemars::{JsonSchema, JsonSchema_repr}; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; use serde_repr::{Deserialize_repr, Serialize_repr}; -use settings_macros::MergeFrom; +use settings_macros::{MergeFrom, with_fallible_options}; use std::{fmt::Display, sync::Arc}; -use serde_with::skip_serializing_none; - use crate::serialize_f32_with_two_decimal_places; /// Settings for rendering text in UI and text buffers. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, PartialEq, Debug, Default, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct ThemeSettingsContent { /// The default font size for text in the UI. - #[serde(default)] #[serde(serialize_with = "crate::serialize_optional_f32_with_two_decimal_places")] pub ui_font_size: Option, /// The name of a font to use for rendering in the UI. - #[serde(default)] pub ui_font_family: Option, /// The font fallbacks to use for rendering in the UI. - #[serde(default)] #[schemars(default = "default_font_fallbacks")] #[schemars(extend("uniqueItems" = true))] pub ui_font_fallbacks: Option>, /// The OpenType features to enable for text in the UI. - #[serde(default)] #[schemars(default = "default_font_features")] pub ui_font_features: Option, /// The weight of the UI font in CSS units from 100 to 900. - #[serde(default)] #[schemars(default = "default_buffer_font_weight")] pub ui_font_weight: Option, /// The name of a font to use for rendering in text buffers. - #[serde(default)] pub buffer_font_family: Option, /// The font fallbacks to use for rendering in text buffers. - #[serde(default)] #[schemars(extend("uniqueItems" = true))] pub buffer_font_fallbacks: Option>, /// The default font size for rendering in text buffers. - #[serde(default)] #[serde(serialize_with = "crate::serialize_optional_f32_with_two_decimal_places")] pub buffer_font_size: Option, /// The weight of the editor font in CSS units from 100 to 900. - #[serde(default)] #[schemars(default = "default_buffer_font_weight")] pub buffer_font_weight: Option, /// The buffer's line height. - #[serde(default)] pub buffer_line_height: Option, /// The OpenType features to enable for rendering in text buffers. - #[serde(default)] #[schemars(default = "default_font_features")] pub buffer_font_features: Option, /// The font size for agent responses in the agent panel. Falls back to the UI font size if unset. - #[serde(default)] #[serde(serialize_with = "crate::serialize_optional_f32_with_two_decimal_places")] pub agent_ui_font_size: Option, /// The font size for user messages in the agent panel. - #[serde(default)] #[serde(serialize_with = "crate::serialize_optional_f32_with_two_decimal_places")] pub agent_buffer_font_size: Option, /// The name of the Zed theme to use. - #[serde(default)] pub theme: Option, /// The name of the icon theme to use. - #[serde(default)] pub icon_theme: Option, /// UNSTABLE: Expect many elements to be broken. /// // Controls the density of the UI. - #[serde(rename = "unstable.ui_density", default)] + #[serde(rename = "unstable.ui_density")] pub ui_density: Option, /// How much to fade out unused code. - #[serde(default)] #[schemars(range(min = 0.0, max = 0.9))] pub unnecessary_code_fade: Option, /// EXPERIMENTAL: Overrides for the current theme. /// /// These values will override the ones on the current theme specified in `theme`. - #[serde(rename = "experimental.theme_overrides", default)] + #[serde(rename = "experimental.theme_overrides")] pub experimental_theme_overrides: Option, /// Overrides per theme @@ -270,7 +252,7 @@ impl UiDensity { } /// Font family name. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq)] #[serde(transparent)] pub struct FontFamilyName(pub Arc); @@ -345,11 +327,11 @@ where } /// The content of a serialized theme. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] #[serde(default)] pub struct ThemeStyleContent { - #[serde(default, rename = "background.appearance")] + #[serde(rename = "background.appearance")] pub window_background_appearance: Option, #[serde(default)] @@ -380,18 +362,18 @@ pub struct PlayerColorContent { } /// Theme name. -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq)] #[serde(transparent)] pub struct ThemeName(pub Arc); /// Icon Theme Name -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq)] #[serde(transparent)] pub struct IconThemeName(pub Arc); -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] #[serde(default)] pub struct ThemeColorsContent { @@ -925,19 +907,27 @@ pub struct ThemeColorsContent { pub vim_mode_text: Option, } -#[skip_serializing_none] #[derive(Debug, Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] #[serde(default)] pub struct HighlightStyleContent { pub color: Option, - #[serde(deserialize_with = "treat_error_as_none")] + #[serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "treat_error_as_none" + )] pub background_color: Option, - #[serde(deserialize_with = "treat_error_as_none")] + #[serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "treat_error_as_none" + )] pub font_style: Option, - #[serde(deserialize_with = "treat_error_as_none")] + #[serde( + skip_serializing_if = "Option::is_none", + deserialize_with = "treat_error_as_none" + )] pub font_weight: Option, } @@ -959,7 +949,7 @@ where Ok(T::deserialize(value).ok()) } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Debug, Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq)] #[serde(default)] pub struct StatusColorsContent { diff --git a/crates/settings/src/settings_content/workspace.rs b/crates/settings/src/settings_content/workspace.rs index fc4c7fdbda553c2a959ba1062ee0f43d675b2f54..f078c873179d2b50893e608bc51e609be9850a12 100644 --- a/crates/settings/src/settings_content/workspace.rs +++ b/crates/settings/src/settings_content/workspace.rs @@ -3,15 +3,14 @@ use std::num::NonZeroUsize; use collections::HashMap; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use serde_with::skip_serializing_none; -use settings_macros::MergeFrom; +use settings_macros::{MergeFrom, with_fallible_options}; use crate::{ CenteredPaddingSettings, DelayMs, DockPosition, DockSide, InactiveOpacity, ScrollbarSettingsContent, ShowIndentGuides, serialize_optional_f32_with_two_decimal_places, }; -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct WorkspaceSettingsContent { /// Active pane styling settings. @@ -112,7 +111,7 @@ pub struct WorkspaceSettingsContent { pub zoomed_padding: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Default, Debug, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct ItemSettingsContent { /// Whether to show the Git file status on a tab item. @@ -142,7 +141,7 @@ pub struct ItemSettingsContent { pub show_close_button: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize, JsonSchema, MergeFrom)] pub struct PreviewTabsSettingsContent { /// Whether to show opened editors as preview tabs. @@ -244,7 +243,7 @@ pub enum ActivateOnClose { LeftNeighbour, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Copy, Clone, PartialEq, Debug, Default, Serialize, Deserialize, JsonSchema, MergeFrom)] #[serde(rename_all = "snake_case")] pub struct ActivePaneModifiers { @@ -350,7 +349,7 @@ pub enum RestoreOnStartupBehavior { LastSession, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug, PartialEq)] pub struct TabBarSettingsContent { /// Whether or not to show the tab bar in the editor. @@ -367,13 +366,13 @@ pub struct TabBarSettingsContent { pub show_tab_bar_buttons: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug, PartialEq, Eq)] pub struct StatusBarSettingsContent { /// Whether to show the status bar. /// /// Default: true - #[serde(rename = "experimental.show", default)] + #[serde(rename = "experimental.show")] pub show: Option, /// Whether to display the active language button in the status bar. /// @@ -465,7 +464,7 @@ pub enum PaneSplitDirectionVertical { #[derive(Copy, Clone, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Default)] #[serde(rename_all = "snake_case")] -#[skip_serializing_none] +#[with_fallible_options] pub struct CenteredLayoutSettings { /// The relative width of the left padding of the central pane from the /// workspace when the centered layout is used. @@ -510,7 +509,7 @@ impl OnLastWindowClosed { } } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug)] pub struct ProjectPanelAutoOpenSettings { /// Whether to automatically open newly created files in the editor. @@ -527,7 +526,7 @@ pub struct ProjectPanelAutoOpenSettings { pub on_drop: Option, } -#[skip_serializing_none] +#[with_fallible_options] #[derive(Clone, PartialEq, Default, Serialize, Deserialize, JsonSchema, MergeFrom, Debug)] pub struct ProjectPanelSettingsContent { /// Whether to show the project panel button in the status bar. @@ -663,7 +662,7 @@ pub enum ProjectPanelSortMode { FilesFirst, } -#[skip_serializing_none] +#[with_fallible_options] #[derive( Copy, Clone, Debug, Serialize, Deserialize, JsonSchema, MergeFrom, PartialEq, Eq, Default, )] diff --git a/crates/settings/src/settings_store.rs b/crates/settings/src/settings_store.rs index 5c4a97fa1373232972aac99a2f2abb1ae2810d97..181b8b417879be63fe85dbe6d08adca2d97929bd 100644 --- a/crates/settings/src/settings_store.rs +++ b/crates/settings/src/settings_store.rs @@ -32,7 +32,7 @@ pub type EditorconfigProperties = ec4rs::Properties; use crate::{ ActiveSettingsProfileName, FontFamilyName, IconThemeName, LanguageSettingsContent, - LanguageToSettingsMap, ThemeName, VsCodeSettings, WorktreeId, + LanguageToSettingsMap, ThemeName, VsCodeSettings, WorktreeId, fallible_options, merge_from::MergeFrom, settings_content::{ ExtensionsSettingsContent, ProjectSettingsContent, SettingsContent, UserSettingsContent, @@ -666,44 +666,31 @@ impl SettingsStore { file: SettingsFile, ) -> (Option, SettingsParseResult) { let mut migration_status = MigrationStatus::NotNeeded; - let settings: SettingsContentType = if user_settings_content.is_empty() { - parse_json_with_comments("{}").expect("Empty settings should always be valid") + let (settings, parse_status) = if user_settings_content.is_empty() { + fallible_options::parse_json("{}") } else { let migration_res = migrator::migrate_settings(user_settings_content); - let content = match &migration_res { - Ok(Some(content)) => content, - Ok(None) => user_settings_content, - Err(_) => user_settings_content, - }; - let parse_result = parse_json_with_comments(content); - migration_status = match migration_res { + migration_status = match &migration_res { Ok(Some(_)) => MigrationStatus::Succeeded, Ok(None) => MigrationStatus::NotNeeded, Err(err) => MigrationStatus::Failed { error: err.to_string(), }, }; - match parse_result { - Ok(settings) => settings, - Err(err) => { - let result = SettingsParseResult { - parse_status: ParseStatus::Failed { - error: err.to_string(), - }, - migration_status, - }; - self.file_errors.insert(file, result.clone()); - return (None, result); - } - } + let content = match &migration_res { + Ok(Some(content)) => content, + Ok(None) => user_settings_content, + Err(_) => user_settings_content, + }; + fallible_options::parse_json(content) }; let result = SettingsParseResult { - parse_status: ParseStatus::Success, + parse_status, migration_status, }; self.file_errors.insert(file, result.clone()); - return (Some(settings), result); + return (settings, result); } pub fn error_for_file(&self, file: SettingsFile) -> Option { diff --git a/crates/settings_macros/src/settings_macros.rs b/crates/settings_macros/src/settings_macros.rs index f6da25d7bc0c3f5bd4b3670305086ac28c0f685f..bad786991da9502cb2ff0763f754aae4a54801af 100644 --- a/crates/settings_macros/src/settings_macros.rs +++ b/crates/settings_macros/src/settings_macros.rs @@ -1,6 +1,9 @@ use proc_macro::TokenStream; + use quote::quote; -use syn::{Data, DeriveInput, Fields, parse_macro_input}; +use syn::{ + Data, DeriveInput, Field, Fields, ItemEnum, ItemStruct, Type, parse_macro_input, parse_quote, +}; /// Derives the `MergeFrom` trait for a struct. /// @@ -100,3 +103,50 @@ pub fn derive_register_setting(input: TokenStream) -> TokenStream { } .into() } + +// Adds serde attributes to each field with type Option: +// #serde(default, skip_serializing_if = "Option::is_none", deserialize_with = "settings::deserialize_fallible") +#[proc_macro_attribute] +pub fn with_fallible_options(_args: TokenStream, input: TokenStream) -> TokenStream { + fn apply_on_fields(fields: &mut Fields) { + match fields { + Fields::Unit => {} + Fields::Named(fields) => { + for field in &mut fields.named { + add_if_option(field) + } + } + Fields::Unnamed(fields) => { + for field in &mut fields.unnamed { + add_if_option(field) + } + } + } + } + + fn add_if_option(field: &mut Field) { + match &field.ty { + Type::Path(syn::TypePath { qself: None, path }) + if path.leading_colon.is_none() + && path.segments.len() == 1 + && path.segments[0].ident == "Option" => {} + _ => return, + } + let attr = parse_quote!( + #[serde(default, skip_serializing_if = "Option::is_none", deserialize_with="crate::fallible_options::deserialize")] + ); + field.attrs.push(attr); + } + + if let Ok(mut input) = syn::parse::(input.clone()) { + apply_on_fields(&mut input.fields); + quote!(#input).into() + } else if let Ok(mut input) = syn::parse::(input) { + for variant in &mut input.variants { + apply_on_fields(&mut variant.fields); + } + quote!(#input).into() + } else { + panic!("with_fallible_options can only be applied to struct or enum definitions."); + } +} diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 89944f835b0c7905145b3c6c0df13a20c78f37b8..3f1044c309b75bf2400c85f41cc6abe78e8e3e30 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -50,8 +50,8 @@ use workspace::{ use zed::{ OpenListener, OpenRequest, RawOpenRequest, app_menus, build_window_options, derive_paths_with_position, edit_prediction_registry, handle_cli_connection, - handle_keymap_file_changes, handle_settings_changed, handle_settings_file_changes, - initialize_workspace, open_paths_with_positions, + handle_keymap_file_changes, handle_settings_file_changes, initialize_workspace, + open_paths_with_positions, }; use crate::zed::{OpenRequestKind, eager_load_active_theme_and_icon_theme}; @@ -411,12 +411,7 @@ pub fn main() { } settings::init(cx); zlog_settings::init(cx); - handle_settings_file_changes( - user_settings_file_rx, - global_settings_file_rx, - cx, - handle_settings_changed, - ); + handle_settings_file_changes(user_settings_file_rx, global_settings_file_rx, cx); handle_keymap_file_changes(user_keymap_file_rx, cx); let user_agent = format!( diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index e90cf59f38e69be74e67969d230d5532b72dbba7..df46794bb833320f2793fdb798df735fc72c8b3f 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -58,7 +58,7 @@ use rope::Rope; use search::project_search::ProjectSearchBar; use settings::{ BaseKeymap, DEFAULT_KEYMAP_PATH, InvalidSettingsError, KeybindSource, KeymapFile, - KeymapFileLoadResult, Settings, SettingsStore, VIM_KEYMAP_PATH, + KeymapFileLoadResult, MigrationStatus, Settings, SettingsStore, VIM_KEYMAP_PATH, initial_local_debug_tasks_content, initial_project_settings_content, initial_tasks_content, update_settings_file, }; @@ -1363,44 +1363,63 @@ fn open_log_file(workspace: &mut Workspace, window: &mut Window, cx: &mut Contex .detach(); } -pub fn handle_settings_file_changes( - mut user_settings_file_rx: mpsc::UnboundedReceiver, - mut global_settings_file_rx: mpsc::UnboundedReceiver, - cx: &mut App, - settings_changed: impl Fn(Option, &mut App) + 'static, -) { - MigrationNotification::set_global(cx.new(|_| MigrationNotification), cx); +fn notify_settings_errors(result: settings::SettingsParseResult, is_user: bool, cx: &mut App) { + if let settings::ParseStatus::Failed { error: err } = &result.parse_status { + let settings_type = if is_user { "user" } else { "global" }; + log::error!("Failed to load {} settings: {err}", settings_type); + } - // Helper function to process settings content - let process_settings = move |content: String, - is_user: bool, - store: &mut SettingsStore, - cx: &mut App| - -> bool { - let result = if is_user { - store.set_user_settings(&content, cx) - } else { - store.set_global_settings(&content, cx) - }; + let error = match result.parse_status { + settings::ParseStatus::Failed { error } => Some(anyhow::format_err!(error)), + settings::ParseStatus::Success => None, + }; + struct SettingsParseErrorNotification; + let id = NotificationId::unique::(); - let id = NotificationId::Named("failed-to-migrate-settings".into()); - // Apply migrations to both user and global settings - let content_migrated = match result.migration_status { - settings::MigrationStatus::Succeeded => { - dismiss_app_notification(&id, cx); - true - } - settings::MigrationStatus::NotNeeded => { - dismiss_app_notification(&id, cx); + let showed_parse_error = match error { + Some(error) => { + if let Some(InvalidSettingsError::LocalSettings { .. }) = + error.downcast_ref::() + { false + // Local settings errors are displayed by the projects + } else { + show_app_notification(id, cx, move |cx| { + cx.new(|cx| { + MessageNotification::new(format!("Invalid user settings file\n{error}"), cx) + .primary_message("Open Settings File") + .primary_icon(IconName::Settings) + .primary_on_click(|window, cx| { + window.dispatch_action( + zed_actions::OpenSettingsFile.boxed_clone(), + cx, + ); + cx.emit(DismissEvent); + }) + }) + }); + true } - settings::MigrationStatus::Failed { error: err } => { + } + None => { + dismiss_app_notification(&id, cx); + false + } + }; + let id = NotificationId::Named("failed-to-migrate-settings".into()); + + match result.migration_status { + settings::MigrationStatus::Succeeded | settings::MigrationStatus::NotNeeded => { + dismiss_app_notification(&id, cx); + } + settings::MigrationStatus::Failed { error: err } => { + if !showed_parse_error { show_app_notification(id, cx, move |cx| { cx.new(|cx| { MessageNotification::new( format!( "Failed to migrate settings\n\ - {err}" + {err}" ), cx, ) @@ -1412,26 +1431,17 @@ pub fn handle_settings_file_changes( }) }) }); - // notify user here - false } - }; - - if let settings::ParseStatus::Failed { error: err } = &result.parse_status { - let settings_type = if is_user { "user" } else { "global" }; - log::error!("Failed to load {} settings: {err}", settings_type); } - - settings_changed( - match result.parse_status { - settings::ParseStatus::Failed { error } => Some(anyhow::format_err!(error)), - settings::ParseStatus::Success => None, - }, - cx, - ); - - content_migrated }; +} + +pub fn handle_settings_file_changes( + mut user_settings_file_rx: mpsc::UnboundedReceiver, + mut global_settings_file_rx: mpsc::UnboundedReceiver, + cx: &mut App, +) { + MigrationNotification::set_global(cx.new(|_| MigrationNotification), cx); // Initial load of both settings files let global_content = cx @@ -1444,8 +1454,8 @@ pub fn handle_settings_file_changes( .unwrap(); SettingsStore::update_global(cx, |store, cx| { - process_settings(global_content, false, store, cx); - process_settings(user_content, true, store, cx); + notify_settings_errors(store.set_user_settings(&user_content, cx), true, cx); + notify_settings_errors(store.set_global_settings(&global_content, cx), false, cx); }); // Watch for changes in both files @@ -1462,7 +1472,14 @@ pub fn handle_settings_file_changes( }; let result = cx.update_global(|store: &mut SettingsStore, cx| { - let migrating_in_memory = process_settings(content, is_user, store, cx); + let result = if is_user { + store.set_user_settings(&content, cx) + } else { + store.set_global_settings(&content, cx) + }; + let migrating_in_memory = + matches!(&result.migration_status, MigrationStatus::Succeeded); + notify_settings_errors(result, is_user, cx); if let Some(notifier) = MigrationNotification::try_global(cx) { notifier.update(cx, |_, cx| { cx.emit(MigrationEvent::ContentChanged { @@ -1725,36 +1742,6 @@ pub fn load_default_keymap(cx: &mut App) { } } -pub fn handle_settings_changed(error: Option, cx: &mut App) { - struct SettingsParseErrorNotification; - let id = NotificationId::unique::(); - - match error { - Some(error) => { - if let Some(InvalidSettingsError::LocalSettings { .. }) = - error.downcast_ref::() - { - // Local settings errors are displayed by the projects - return; - } - show_app_notification(id, cx, move |cx| { - cx.new(|cx| { - MessageNotification::new(format!("Invalid user settings file\n{error}"), cx) - .primary_message("Open Settings File") - .primary_icon(IconName::Settings) - .primary_on_click(|window, cx| { - window.dispatch_action(zed_actions::OpenSettingsFile.boxed_clone(), cx); - cx.emit(DismissEvent); - }) - }) - }); - } - None => { - dismiss_app_notification(&id, cx); - } - } -} - pub fn open_new_ssh_project_from_project( workspace: &mut Workspace, paths: Vec, @@ -4497,7 +4484,7 @@ mod tests { app_state.fs.clone(), PathBuf::from("/global_settings.json"), ); - handle_settings_file_changes(settings_rx, global_settings_rx, cx, |_, _| {}); + handle_settings_file_changes(settings_rx, global_settings_rx, cx); handle_keymap_file_changes(keymap_rx, cx); }); workspace @@ -4615,7 +4602,7 @@ mod tests { app_state.fs.clone(), PathBuf::from("/global_settings.json"), ); - handle_settings_file_changes(settings_rx, global_settings_rx, cx, |_, _| {}); + handle_settings_file_changes(settings_rx, global_settings_rx, cx); handle_keymap_file_changes(keymap_rx, cx); }); From 3c69e5c46bfbdfa69400db907a4f6d676c43c1c6 Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Fri, 21 Nov 2025 21:41:19 +0530 Subject: [PATCH 0288/1030] Revert "gpui: Convert macOS clipboard file URLs to paths for paste" (#43254) Reverts zed-industries/zed#36848 Turns out this broke copying a screenshot from apps like CleanShot X and then pasting it over. We should land this again after taking a look at those cases. Pasting screenshots from the native macOS screenshot functionality works though. cc @seantimm Release Notes: - Fixed issue where copying a screenshot from apps like CleanShot X into Agent Panel didn't work as expected. --- crates/gpui/src/platform/mac/platform.rs | 63 +----------------------- 1 file changed, 2 insertions(+), 61 deletions(-) diff --git a/crates/gpui/src/platform/mac/platform.rs b/crates/gpui/src/platform/mac/platform.rs index 46477045722e132a275f926140915dbb9bd6cd5c..7f71d4f164b4974675af5f0d1df5a1f5bb34b7de 100644 --- a/crates/gpui/src/platform/mac/platform.rs +++ b/crates/gpui/src/platform/mac/platform.rs @@ -1135,32 +1135,7 @@ impl Platform for MacPlatform { } } - // Next, check for URL flavors (including file URLs). Some tools only provide a URL - // with no plain text entry. - { - // Try the modern UTType identifiers first. - let file_url_type: id = ns_string("public.file-url"); - let url_type: id = ns_string("public.url"); - - let url_data = if msg_send![types, containsObject: file_url_type] { - pasteboard.dataForType(file_url_type) - } else if msg_send![types, containsObject: url_type] { - pasteboard.dataForType(url_type) - } else { - nil - }; - - if url_data != nil && !url_data.bytes().is_null() { - let bytes = slice::from_raw_parts( - url_data.bytes() as *mut u8, - url_data.length() as usize, - ); - - return Some(self.read_string_from_clipboard(&state, bytes)); - } - } - - // If it wasn't a string or URL, try the various supported image types. + // If it wasn't a string, try the various supported image types. for format in ImageFormat::iter() { if let Some(item) = try_clipboard_image(pasteboard, format) { return Some(item); @@ -1168,7 +1143,7 @@ impl Platform for MacPlatform { } } - // If it wasn't a string, URL, or a supported image type, give up. + // If it wasn't a string or a supported image type, give up. None } @@ -1743,40 +1718,6 @@ mod tests { ); } - #[test] - fn test_file_url_reads_as_url_string() { - let platform = build_platform(); - - // Create a file URL for an arbitrary test path and write it to the pasteboard. - // This path does not need to exist; we only validate URL→path conversion. - let mock_path = "/tmp/zed-clipboard-file-url-test"; - unsafe { - // Build an NSURL from the file path - let url: id = msg_send![class!(NSURL), fileURLWithPath: ns_string(mock_path)]; - let abs: id = msg_send![url, absoluteString]; - - // Encode the URL string as UTF-8 bytes - let len: usize = msg_send![abs, lengthOfBytesUsingEncoding: NSUTF8StringEncoding]; - let bytes_ptr = abs.UTF8String() as *const u8; - let data = NSData::dataWithBytes_length_(nil, bytes_ptr as *const c_void, len as u64); - - // Write as public.file-url to the unique pasteboard - let file_url_type: id = ns_string("public.file-url"); - platform - .0 - .lock() - .pasteboard - .setData_forType(data, file_url_type); - } - - // Ensure the clipboard read returns the URL string, not a converted path - let expected_url = format!("file://{}", mock_path); - assert_eq!( - platform.read_from_clipboard(), - Some(ClipboardItem::new_string(expected_url)) - ); - } - fn build_platform() -> MacPlatform { let platform = MacPlatform::new(false); platform.0.lock().pasteboard = unsafe { NSPasteboard::pasteboardWithUniqueName(nil) }; From 9b823616dd7bbb8bb72e1d6baf3879a8dc69bdad Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Fri, 21 Nov 2025 09:12:19 -0700 Subject: [PATCH 0289/1030] Fix install linux (#43205) Closes: #42726 Release Notes: - Fix ./script/install-linux for installing a development version of Zed on Linux --- script/install-linux | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/script/install-linux b/script/install-linux index 72fd8eba2ccfd14d17f23241266a289fbf950fc6..a642ed2e0e42850a83f04e2917cb6cdf1f58cbed 100755 --- a/script/install-linux +++ b/script/install-linux @@ -15,12 +15,8 @@ export ZED_CHANNEL=$( Date: Fri, 21 Nov 2025 17:31:57 +0100 Subject: [PATCH 0290/1030] crashes: Print panic message to logs (#43159) Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/crashes/src/crashes.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/crates/crashes/src/crashes.rs b/crates/crashes/src/crashes.rs index f1d39afdd9a88eea70466594d04df09e034fc928..baf0bcde3b0769c4fc6cf958c86e181cda615683 100644 --- a/crates/crashes/src/crashes.rs +++ b/crates/crashes/src/crashes.rs @@ -51,11 +51,13 @@ pub async fn init(crash_init: InitCrashHandler) { unsafe { env::set_var("RUST_BACKTRACE", "1") }; old_hook(info); // prevent the macOS crash dialog from popping up - std::process::exit(1); + if cfg!(target_os = "macos") { + std::process::exit(1); + } })); return; } - (Some(true), _) | (None, _) => { + _ => { panic::set_hook(Box::new(panic_hook)); } } @@ -300,11 +302,18 @@ pub fn panic_hook(info: &PanicHookInfo) { .map(|loc| format!("{}:{}", loc.file(), loc.line())) .unwrap_or_default(); + let current_thread = std::thread::current(); + let thread_name = current_thread.name().unwrap_or(""); + // wait 500ms for the crash handler process to start up // if it's still not there just write panic info and no minidump let retry_frequency = Duration::from_millis(100); for _ in 0..5 { if let Some(client) = CRASH_HANDLER.get() { + let location = info + .location() + .map_or_else(|| "".to_owned(), |location| location.to_string()); + log::error!("thread '{thread_name}' panicked at {location}:\n{message}..."); client .send_message( 2, From 4fb671f4eb95b5190c4e9540322a61f137c556f5 Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Fri, 21 Nov 2025 13:39:08 -0300 Subject: [PATCH 0291/1030] zeta2: Predict at next diagnostic location (#43257) When no predictions are available for the current buffer, we will now attempt to predict at the closest diagnostic from the cursor location that wasn't included in the last prediction request. This enables a commonly desired kind of far-away jump without requiring explicit model support. Release Notes: - N/A --- crates/codestral/src/codestral.rs | 2 +- .../src/copilot_completion_provider.rs | 2 +- crates/edit_prediction/src/edit_prediction.rs | 4 +- crates/editor/src/edit_prediction_tests.rs | 4 +- .../src/supermaven_completion_provider.rs | 2 +- crates/util/src/rel_path.rs | 1 + crates/zeta/src/zeta.rs | 2 +- crates/zeta2/Cargo.toml | 3 +- crates/zeta2/src/provider.rs | 89 +-- crates/zeta2/src/zeta2.rs | 668 ++++++++++++++---- crates/zeta2_tools/src/zeta2_tools.rs | 19 +- 11 files changed, 539 insertions(+), 257 deletions(-) diff --git a/crates/codestral/src/codestral.rs b/crates/codestral/src/codestral.rs index 9fbd207a809fb2cb3ac685ea6629a36c8631d1fe..6a500acbf6ec5eea63c35a8deb83a8545cee497e 100644 --- a/crates/codestral/src/codestral.rs +++ b/crates/codestral/src/codestral.rs @@ -182,7 +182,7 @@ impl EditPredictionProvider for CodestralCompletionProvider { Self::api_key(cx).is_some() } - fn is_refreshing(&self) -> bool { + fn is_refreshing(&self, _cx: &App) -> bool { self.pending_request.is_some() } diff --git a/crates/copilot/src/copilot_completion_provider.rs b/crates/copilot/src/copilot_completion_provider.rs index 30ef6de07ec92f66cc888b52a540cf9c7e673bb4..e92f0c7d7dd7e51c4a8fdc19f34bd6eb4189c097 100644 --- a/crates/copilot/src/copilot_completion_provider.rs +++ b/crates/copilot/src/copilot_completion_provider.rs @@ -68,7 +68,7 @@ impl EditPredictionProvider for CopilotCompletionProvider { false } - fn is_refreshing(&self) -> bool { + fn is_refreshing(&self, _cx: &App) -> bool { self.pending_refresh.is_some() && self.completions.is_empty() } diff --git a/crates/edit_prediction/src/edit_prediction.rs b/crates/edit_prediction/src/edit_prediction.rs index aebfa5e5229ef1fec50f2d9cf74e354878ddc1c5..1984383a9691ae9373973a3eb9f00db4e7e795f2 100644 --- a/crates/edit_prediction/src/edit_prediction.rs +++ b/crates/edit_prediction/src/edit_prediction.rs @@ -87,7 +87,7 @@ pub trait EditPredictionProvider: 'static + Sized { cursor_position: language::Anchor, cx: &App, ) -> bool; - fn is_refreshing(&self) -> bool; + fn is_refreshing(&self, cx: &App) -> bool; fn refresh( &mut self, buffer: Entity, @@ -200,7 +200,7 @@ where } fn is_refreshing(&self, cx: &App) -> bool { - self.read(cx).is_refreshing() + self.read(cx).is_refreshing(cx) } fn refresh( diff --git a/crates/editor/src/edit_prediction_tests.rs b/crates/editor/src/edit_prediction_tests.rs index 74f13a404c6a52db448d68eba9e5c255e7276923..a1839144a47a81f668ba2743cd5e362f6711d0e9 100644 --- a/crates/editor/src/edit_prediction_tests.rs +++ b/crates/editor/src/edit_prediction_tests.rs @@ -469,7 +469,7 @@ impl EditPredictionProvider for FakeEditPredictionProvider { true } - fn is_refreshing(&self) -> bool { + fn is_refreshing(&self, _cx: &gpui::App) -> bool { false } @@ -542,7 +542,7 @@ impl EditPredictionProvider for FakeNonZedEditPredictionProvider { true } - fn is_refreshing(&self) -> bool { + fn is_refreshing(&self, _cx: &gpui::App) -> bool { false } diff --git a/crates/supermaven/src/supermaven_completion_provider.rs b/crates/supermaven/src/supermaven_completion_provider.rs index 0c9fe85da6130f5ea2040434a0dcd3727754d3c0..9d5e256aca1b66644145cb688851d0ec5c1b81b9 100644 --- a/crates/supermaven/src/supermaven_completion_provider.rs +++ b/crates/supermaven/src/supermaven_completion_provider.rs @@ -129,7 +129,7 @@ impl EditPredictionProvider for SupermavenCompletionProvider { self.supermaven.read(cx).is_enabled() } - fn is_refreshing(&self) -> bool { + fn is_refreshing(&self, _cx: &App) -> bool { self.pending_refresh.is_some() && self.completion_id.is_none() } diff --git a/crates/util/src/rel_path.rs b/crates/util/src/rel_path.rs index b360297f209c54c6a33b174a738ed1876fbc16a0..60a0e2ef9ef51ee579a30fac57486b0040c42227 100644 --- a/crates/util/src/rel_path.rs +++ b/crates/util/src/rel_path.rs @@ -374,6 +374,7 @@ impl PartialEq for RelPath { } } +#[derive(Default)] pub struct RelPathComponents<'a>(&'a str); pub struct RelPathAncestors<'a>(Option<&'a str>); diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index c2ef5cb826db0947c18e1e91a6163cccc12deb11..cb31488d17668531ee11a67d1e4be19a1674d3d2 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -1486,7 +1486,7 @@ impl edit_prediction::EditPredictionProvider for ZetaEditPredictionProvider { ) -> bool { true } - fn is_refreshing(&self) -> bool { + fn is_refreshing(&self, _cx: &App) -> bool { !self.pending_completions.is_empty() } diff --git a/crates/zeta2/Cargo.toml b/crates/zeta2/Cargo.toml index 0f156f68fac881d65d76f178315f40df1dba9d7f..834762447707b88d6b009f0d6700c639306c9bbd 100644 --- a/crates/zeta2/Cargo.toml +++ b/crates/zeta2/Cargo.toml @@ -32,7 +32,9 @@ indoc.workspace = true language.workspace = true language_model.workspace = true log.workspace = true +lsp.workspace = true open_ai.workspace = true +pretty_assertions.workspace = true project.workspace = true release_channel.workspace = true serde.workspace = true @@ -44,7 +46,6 @@ util.workspace = true uuid.workspace = true workspace.workspace = true worktree.workspace = true -pretty_assertions.workspace = true [dev-dependencies] clock = { workspace = true, features = ["test-support"] } diff --git a/crates/zeta2/src/provider.rs b/crates/zeta2/src/provider.rs index 1b82826f663b092b5763935d9a7a2d4bb9607ebf..768af6253fe1a2aa60ef9cb0a10fcee0035dc3e2 100644 --- a/crates/zeta2/src/provider.rs +++ b/crates/zeta2/src/provider.rs @@ -1,24 +1,15 @@ -use std::{ - cmp, - sync::Arc, - time::{Duration, Instant}, -}; +use std::{cmp, sync::Arc, time::Duration}; -use arrayvec::ArrayVec; use client::{Client, UserStore}; use edit_prediction::{DataCollectionState, Direction, EditPredictionProvider}; -use gpui::{App, Entity, Task, prelude::*}; +use gpui::{App, Entity, prelude::*}; use language::ToPoint as _; use project::Project; -use util::ResultExt as _; use crate::{BufferEditPrediction, Zeta, ZetaEditPredictionModel}; pub struct ZetaEditPredictionProvider { zeta: Entity, - next_pending_prediction_id: usize, - pending_predictions: ArrayVec, - last_request_timestamp: Instant, project: Entity, } @@ -29,28 +20,25 @@ impl ZetaEditPredictionProvider { project: Entity, client: &Arc, user_store: &Entity, - cx: &mut App, + cx: &mut Context, ) -> Self { let zeta = Zeta::global(client, user_store, cx); zeta.update(cx, |zeta, cx| { zeta.register_project(&project, cx); }); + cx.observe(&zeta, |_this, _zeta, cx| { + cx.notify(); + }) + .detach(); + Self { - zeta, - next_pending_prediction_id: 0, - pending_predictions: ArrayVec::new(), - last_request_timestamp: Instant::now(), project: project, + zeta, } } } -struct PendingPrediction { - id: usize, - _task: Task<()>, -} - impl EditPredictionProvider for ZetaEditPredictionProvider { fn name() -> &'static str { "zed-predict2" @@ -95,8 +83,8 @@ impl EditPredictionProvider for ZetaEditPredictionProvider { } } - fn is_refreshing(&self) -> bool { - !self.pending_predictions.is_empty() + fn is_refreshing(&self, cx: &App) -> bool { + self.zeta.read(cx).is_refreshing(&self.project) } fn refresh( @@ -123,59 +111,8 @@ impl EditPredictionProvider for ZetaEditPredictionProvider { self.zeta.update(cx, |zeta, cx| { zeta.refresh_context_if_needed(&self.project, &buffer, cursor_position, cx); + zeta.refresh_prediction_from_buffer(self.project.clone(), buffer, cursor_position, cx) }); - - let pending_prediction_id = self.next_pending_prediction_id; - self.next_pending_prediction_id += 1; - let last_request_timestamp = self.last_request_timestamp; - - let project = self.project.clone(); - let task = cx.spawn(async move |this, cx| { - if let Some(timeout) = (last_request_timestamp + Self::THROTTLE_TIMEOUT) - .checked_duration_since(Instant::now()) - { - cx.background_executor().timer(timeout).await; - } - - let refresh_task = this.update(cx, |this, cx| { - this.last_request_timestamp = Instant::now(); - this.zeta.update(cx, |zeta, cx| { - zeta.refresh_prediction(&project, &buffer, cursor_position, cx) - }) - }); - - if let Some(refresh_task) = refresh_task.ok() { - refresh_task.await.log_err(); - } - - this.update(cx, |this, cx| { - if this.pending_predictions[0].id == pending_prediction_id { - this.pending_predictions.remove(0); - } else { - this.pending_predictions.clear(); - } - - cx.notify(); - }) - .ok(); - }); - - // We always maintain at most two pending predictions. When we already - // have two, we replace the newest one. - if self.pending_predictions.len() <= 1 { - self.pending_predictions.push(PendingPrediction { - id: pending_prediction_id, - _task: task, - }); - } else if self.pending_predictions.len() == 2 { - self.pending_predictions.pop(); - self.pending_predictions.push(PendingPrediction { - id: pending_prediction_id, - _task: task, - }); - } - - cx.notify(); } fn cycle( @@ -191,14 +128,12 @@ impl EditPredictionProvider for ZetaEditPredictionProvider { self.zeta.update(cx, |zeta, cx| { zeta.accept_current_prediction(&self.project, cx); }); - self.pending_predictions.clear(); } fn discard(&mut self, cx: &mut Context) { self.zeta.update(cx, |zeta, _cx| { zeta.discard_current_prediction(&self.project); }); - self.pending_predictions.clear(); } fn suggest( diff --git a/crates/zeta2/src/zeta2.rs b/crates/zeta2/src/zeta2.rs index 0d0f4f3d39e9c997282695828ba16e7eccd7d8e2..a06d7043cf565dccf0d8a4e8830cbb41c2e9981b 100644 --- a/crates/zeta2/src/zeta2.rs +++ b/crates/zeta2/src/zeta2.rs @@ -1,4 +1,5 @@ use anyhow::{Context as _, Result, anyhow, bail}; +use arrayvec::ArrayVec; use chrono::TimeDelta; use client::{Client, EditPredictionUsage, UserStore}; use cloud_llm_client::predict_edits_v3::{self, PromptFormat, Signature}; @@ -19,18 +20,20 @@ use futures::AsyncReadExt as _; use futures::channel::{mpsc, oneshot}; use gpui::http_client::{AsyncBody, Method}; use gpui::{ - App, Entity, EntityId, Global, SemanticVersion, SharedString, Subscription, Task, WeakEntity, - http_client, prelude::*, + App, AsyncApp, Entity, EntityId, Global, SemanticVersion, SharedString, Subscription, Task, + WeakEntity, http_client, prelude::*, }; use language::{Anchor, Buffer, DiagnosticSet, LanguageServerId, Point, ToOffset as _, ToPoint}; use language::{BufferSnapshot, OffsetRangeExt}; use language_model::{LlmApiToken, RefreshLlmTokenListener}; +use lsp::DiagnosticSeverity; use open_ai::FunctionDefinition; use project::{Project, ProjectPath}; use release_channel::AppVersion; use serde::de::DeserializeOwned; use std::collections::{VecDeque, hash_map}; +use std::fmt::Write; use std::ops::Range; use std::path::Path; use std::str::FromStr as _; @@ -39,7 +42,7 @@ use std::time::{Duration, Instant}; use std::{env, mem}; use thiserror::Error; use util::rel_path::RelPathBuf; -use util::{LogErrorFuture, ResultExt as _, TryFutureExt}; +use util::{LogErrorFuture, RangeExt as _, ResultExt as _, TryFutureExt}; use workspace::notifications::{ErrorMessagePrompt, NotificationId, show_app_notification}; pub mod assemble_excerpts; @@ -239,6 +242,9 @@ struct ZetaProject { recent_paths: VecDeque, registered_buffers: HashMap, current_prediction: Option, + next_pending_prediction_id: usize, + pending_predictions: ArrayVec, + last_prediction_refresh: Option<(EntityId, Instant)>, context: Option, Vec>>>, refresh_context_task: Option>>>, refresh_context_debounce_task: Option>>, @@ -248,7 +254,7 @@ struct ZetaProject { #[derive(Debug, Clone)] struct CurrentEditPrediction { - pub requested_by_buffer_id: EntityId, + pub requested_by: PredictionRequestedBy, pub prediction: EditPrediction, } @@ -272,11 +278,13 @@ impl CurrentEditPrediction { return true; }; + let requested_by_buffer_id = self.requested_by.buffer_id(); + // This reduces the occurrence of UI thrash from replacing edits // // TODO: This is fairly arbitrary - should have a more general heuristic that handles multiple edits. - if self.requested_by_buffer_id == self.prediction.buffer.entity_id() - && self.requested_by_buffer_id == old_prediction.prediction.buffer.entity_id() + if requested_by_buffer_id == Some(self.prediction.buffer.entity_id()) + && requested_by_buffer_id == Some(old_prediction.prediction.buffer.entity_id()) && old_edits.len() == 1 && new_edits.len() == 1 { @@ -289,6 +297,26 @@ impl CurrentEditPrediction { } } +#[derive(Debug, Clone)] +enum PredictionRequestedBy { + DiagnosticsUpdate, + Buffer(EntityId), +} + +impl PredictionRequestedBy { + pub fn buffer_id(&self) -> Option { + match self { + PredictionRequestedBy::DiagnosticsUpdate => None, + PredictionRequestedBy::Buffer(buffer_id) => Some(*buffer_id), + } + } +} + +struct PendingPrediction { + id: usize, + _task: Task<()>, +} + /// A prediction from the perspective of a buffer. #[derive(Debug)] enum BufferEditPrediction<'a> { @@ -513,31 +541,48 @@ impl Zeta { recent_paths: VecDeque::new(), registered_buffers: HashMap::default(), current_prediction: None, + pending_predictions: ArrayVec::new(), + next_pending_prediction_id: 0, + last_prediction_refresh: None, context: None, refresh_context_task: None, refresh_context_debounce_task: None, refresh_context_timestamp: None, - _subscription: cx.subscribe(&project, |this, project, event, cx| { - // TODO [zeta2] init with recent paths - if let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) { - if let project::Event::ActiveEntryChanged(Some(active_entry_id)) = event { - let path = project.read(cx).path_for_entry(*active_entry_id, cx); - if let Some(path) = path { - if let Some(ix) = zeta_project - .recent_paths - .iter() - .position(|probe| probe == &path) - { - zeta_project.recent_paths.remove(ix); - } - zeta_project.recent_paths.push_front(path); - } - } - } - }), + _subscription: cx.subscribe(&project, Self::handle_project_event), }) } + fn handle_project_event( + &mut self, + project: Entity, + event: &project::Event, + cx: &mut Context, + ) { + // TODO [zeta2] init with recent paths + match event { + project::Event::ActiveEntryChanged(Some(active_entry_id)) => { + let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) else { + return; + }; + let path = project.read(cx).path_for_entry(*active_entry_id, cx); + if let Some(path) = path { + if let Some(ix) = zeta_project + .recent_paths + .iter() + .position(|probe| probe == &path) + { + zeta_project.recent_paths.remove(ix); + } + zeta_project.recent_paths.push_front(path); + } + } + project::Event::DiagnosticsUpdated { .. } => { + self.refresh_prediction_from_diagnostics(project, cx); + } + _ => (), + } + } + fn register_buffer_impl<'a>( zeta_project: &'a mut ZetaProject, buffer: &Entity, @@ -650,16 +695,25 @@ impl Zeta { let project_state = self.projects.get(&project.entity_id())?; let CurrentEditPrediction { - requested_by_buffer_id, + requested_by, prediction, } = project_state.current_prediction.as_ref()?; if prediction.targets_buffer(buffer.read(cx)) { Some(BufferEditPrediction::Local { prediction }) - } else if *requested_by_buffer_id == buffer.entity_id() { - Some(BufferEditPrediction::Jump { prediction }) } else { - None + let show_jump = match requested_by { + PredictionRequestedBy::Buffer(requested_by_buffer_id) => { + requested_by_buffer_id == &buffer.entity_id() + } + PredictionRequestedBy::DiagnosticsUpdate => true, + }; + + if show_jump { + Some(BufferEditPrediction::Jump { prediction }) + } else { + None + } } } @@ -676,6 +730,7 @@ impl Zeta { return; }; let request_id = prediction.prediction.id.to_string(); + project_state.pending_predictions.clear(); let client = self.client.clone(); let llm_token = self.llm_token.clone(); @@ -715,47 +770,191 @@ impl Zeta { fn discard_current_prediction(&mut self, project: &Entity) { if let Some(project_state) = self.projects.get_mut(&project.entity_id()) { project_state.current_prediction.take(); + project_state.pending_predictions.clear(); }; } - pub fn refresh_prediction( + fn is_refreshing(&self, project: &Entity) -> bool { + self.projects + .get(&project.entity_id()) + .is_some_and(|project_state| !project_state.pending_predictions.is_empty()) + } + + pub fn refresh_prediction_from_buffer( &mut self, - project: &Entity, - buffer: &Entity, + project: Entity, + buffer: Entity, position: language::Anchor, cx: &mut Context, - ) -> Task> { - let request_task = self.request_prediction(project, buffer, position, cx); - let buffer = buffer.clone(); - let project = project.clone(); + ) { + self.queue_prediction_refresh(project.clone(), buffer.entity_id(), cx, move |this, cx| { + let Some(request_task) = this + .update(cx, |this, cx| { + this.request_prediction(&project, &buffer, position, cx) + }) + .log_err() + else { + return Task::ready(anyhow::Ok(())); + }; - cx.spawn(async move |this, cx| { - if let Some(prediction) = request_task.await? { - this.update(cx, |this, cx| { - let project_state = this - .projects - .get_mut(&project.entity_id()) - .context("Project not found")?; - - let new_prediction = CurrentEditPrediction { - requested_by_buffer_id: buffer.entity_id(), - prediction: prediction, - }; + let project = project.clone(); + cx.spawn(async move |cx| { + if let Some(prediction) = request_task.await? { + this.update(cx, |this, cx| { + let project_state = this + .projects + .get_mut(&project.entity_id()) + .context("Project not found")?; + + let new_prediction = CurrentEditPrediction { + requested_by: PredictionRequestedBy::Buffer(buffer.entity_id()), + prediction: prediction, + }; - if project_state - .current_prediction - .as_ref() - .is_none_or(|old_prediction| { - new_prediction.should_replace_prediction(&old_prediction, cx) - }) - { - project_state.current_prediction = Some(new_prediction); + if project_state + .current_prediction + .as_ref() + .is_none_or(|old_prediction| { + new_prediction.should_replace_prediction(&old_prediction, cx) + }) + { + project_state.current_prediction = Some(new_prediction); + cx.notify(); + } + anyhow::Ok(()) + })??; + } + Ok(()) + }) + }) + } + + pub fn refresh_prediction_from_diagnostics( + &mut self, + project: Entity, + cx: &mut Context, + ) { + let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) else { + return; + }; + + // Prefer predictions from buffer + if zeta_project.current_prediction.is_some() { + return; + }; + + self.queue_prediction_refresh(project.clone(), project.entity_id(), cx, move |this, cx| { + let Some(open_buffer_task) = project + .update(cx, |project, cx| { + project + .active_entry() + .and_then(|entry| project.path_for_entry(entry, cx)) + .map(|path| project.open_buffer(path, cx)) + }) + .log_err() + .flatten() + else { + return Task::ready(anyhow::Ok(())); + }; + + cx.spawn(async move |cx| { + let active_buffer = open_buffer_task.await?; + let snapshot = active_buffer.read_with(cx, |buffer, _cx| buffer.snapshot())?; + + let Some((jump_buffer, jump_position)) = Self::next_diagnostic_location( + active_buffer, + &snapshot, + Default::default(), + Default::default(), + &project, + cx, + ) + .await? + else { + return anyhow::Ok(()); + }; + + let Some(prediction) = this + .update(cx, |this, cx| { + this.request_prediction(&project, &jump_buffer, jump_position, cx) + })? + .await? + else { + return anyhow::Ok(()); + }; + + this.update(cx, |this, cx| { + if let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) { + zeta_project.current_prediction.get_or_insert_with(|| { + cx.notify(); + CurrentEditPrediction { + requested_by: PredictionRequestedBy::DiagnosticsUpdate, + prediction, + } + }); } - anyhow::Ok(()) - })??; + })?; + + anyhow::Ok(()) + }) + }); + } + + #[cfg(not(test))] + pub const THROTTLE_TIMEOUT: Duration = Duration::from_millis(300); + #[cfg(test)] + pub const THROTTLE_TIMEOUT: Duration = Duration::ZERO; + + fn queue_prediction_refresh( + &mut self, + project: Entity, + throttle_entity: EntityId, + cx: &mut Context, + do_refresh: impl FnOnce(WeakEntity, &mut AsyncApp) -> Task> + 'static, + ) { + let zeta_project = self.get_or_init_zeta_project(&project, cx); + let pending_prediction_id = zeta_project.next_pending_prediction_id; + zeta_project.next_pending_prediction_id += 1; + let last_request = zeta_project.last_prediction_refresh; + + // TODO report cancelled requests like in zeta1 + let task = cx.spawn(async move |this, cx| { + if let Some((last_entity, last_timestamp)) = last_request + && throttle_entity == last_entity + && let Some(timeout) = + (last_timestamp + Self::THROTTLE_TIMEOUT).checked_duration_since(Instant::now()) + { + cx.background_executor().timer(timeout).await; } - Ok(()) - }) + + do_refresh(this.clone(), cx).await.log_err(); + + this.update(cx, |this, cx| { + let zeta_project = this.get_or_init_zeta_project(&project, cx); + + if zeta_project.pending_predictions[0].id == pending_prediction_id { + zeta_project.pending_predictions.remove(0); + } else { + zeta_project.pending_predictions.clear(); + } + + cx.notify(); + }) + .ok(); + }); + + if zeta_project.pending_predictions.len() <= 1 { + zeta_project.pending_predictions.push(PendingPrediction { + id: pending_prediction_id, + _task: task, + }); + } else if zeta_project.pending_predictions.len() == 2 { + zeta_project.pending_predictions.pop(); + zeta_project.pending_predictions.push(PendingPrediction { + id: pending_prediction_id, + _task: task, + }); + } } pub fn request_prediction( @@ -770,7 +969,7 @@ impl Zeta { self.request_prediction_with_zed_cloud(project, active_buffer, position, cx) } ZetaEditPredictionModel::Sweep => { - self.request_prediction_with_sweep(project, active_buffer, position, cx) + self.request_prediction_with_sweep(project, active_buffer, position, true, cx) } } } @@ -780,6 +979,7 @@ impl Zeta { project: &Entity, active_buffer: &Entity, position: language::Anchor, + allow_jump: bool, cx: &mut Context, ) -> Task>> { let snapshot = active_buffer.read(cx).snapshot(); @@ -802,6 +1002,7 @@ impl Zeta { let project_state = self.get_or_init_zeta_project(project, cx); let events = project_state.events.clone(); + let has_events = !events.is_empty(); let recent_buffers = project_state.recent_paths.iter().cloned(); let http_client = cx.http_client(); @@ -817,114 +1018,188 @@ impl Zeta { .take(3) .collect::>(); - let result = cx.background_spawn(async move { - let text = snapshot.text(); + const DIAGNOSTIC_LINES_RANGE: u32 = 20; - let mut recent_changes = String::new(); - for event in events { - sweep_ai::write_event(event, &mut recent_changes).unwrap(); - } + let cursor_point = position.to_point(&snapshot); + let diagnostic_search_start = cursor_point.row.saturating_sub(DIAGNOSTIC_LINES_RANGE); + let diagnostic_search_end = cursor_point.row + DIAGNOSTIC_LINES_RANGE; + let diagnostic_search_range = + Point::new(diagnostic_search_start, 0)..Point::new(diagnostic_search_end, 0); + + let result = cx.background_spawn({ + let snapshot = snapshot.clone(); + let diagnostic_search_range = diagnostic_search_range.clone(); + async move { + let text = snapshot.text(); + + let mut recent_changes = String::new(); + for event in events { + sweep_ai::write_event(event, &mut recent_changes).unwrap(); + } + + let mut file_chunks = recent_buffer_snapshots + .into_iter() + .map(|snapshot| { + let end_point = Point::new(30, 0).min(snapshot.max_point()); + sweep_ai::FileChunk { + content: snapshot.text_for_range(Point::zero()..end_point).collect(), + file_path: snapshot + .file() + .map(|f| f.path().as_unix_str()) + .unwrap_or("untitled") + .to_string(), + start_line: 0, + end_line: end_point.row as usize, + timestamp: snapshot.file().and_then(|file| { + Some( + file.disk_state() + .mtime()? + .to_seconds_and_nanos_for_persistence()? + .0, + ) + }), + } + }) + .collect::>(); + + let diagnostic_entries = + snapshot.diagnostics_in_range(diagnostic_search_range, false); + let mut diagnostic_content = String::new(); + let mut diagnostic_count = 0; + + for entry in diagnostic_entries { + let start_point: Point = entry.range.start; + + let severity = match entry.diagnostic.severity { + DiagnosticSeverity::ERROR => "error", + DiagnosticSeverity::WARNING => "warning", + DiagnosticSeverity::INFORMATION => "info", + DiagnosticSeverity::HINT => "hint", + _ => continue, + }; + + diagnostic_count += 1; - let file_chunks = recent_buffer_snapshots - .into_iter() - .map(|snapshot| { - let end_point = language::Point::new(30, 0).min(snapshot.max_point()); - sweep_ai::FileChunk { - content: snapshot - .text_for_range(language::Point::zero()..end_point) - .collect(), - file_path: snapshot - .file() - .map(|f| f.path().as_unix_str()) - .unwrap_or("untitled") - .to_string(), + writeln!( + &mut diagnostic_content, + "{} at line {}: {}", + severity, + start_point.row + 1, + entry.diagnostic.message + )?; + } + + if !diagnostic_content.is_empty() { + file_chunks.push(sweep_ai::FileChunk { + file_path: format!("Diagnostics for {}", full_path.display()), start_line: 0, - end_line: end_point.row as usize, - timestamp: snapshot.file().and_then(|file| { - Some( - file.disk_state() - .mtime()? - .to_seconds_and_nanos_for_persistence()? - .0, - ) - }), - } - }) - .collect(); - - let request_body = sweep_ai::AutocompleteRequest { - debug_info, - repo_name, - file_path: full_path.clone(), - file_contents: text.clone(), - original_file_contents: text, - cursor_position: offset, - recent_changes: recent_changes.clone(), - changes_above_cursor: true, - multiple_suggestions: false, - branch: None, - file_chunks, - retrieval_chunks: vec![], - recent_user_actions: vec![], - // TODO - privacy_mode_enabled: false, - }; + end_line: diagnostic_count, + content: diagnostic_content, + timestamp: None, + }); + } - let mut buf: Vec = Vec::new(); - let writer = brotli::CompressorWriter::new(&mut buf, 4096, 11, 22); - serde_json::to_writer(writer, &request_body)?; - let body: AsyncBody = buf.into(); + let request_body = sweep_ai::AutocompleteRequest { + debug_info, + repo_name, + file_path: full_path.clone(), + file_contents: text.clone(), + original_file_contents: text, + cursor_position: offset, + recent_changes: recent_changes.clone(), + changes_above_cursor: true, + multiple_suggestions: false, + branch: None, + file_chunks, + retrieval_chunks: vec![], + recent_user_actions: vec![], + // TODO + privacy_mode_enabled: false, + }; - const SWEEP_API_URL: &str = - "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; + let mut buf: Vec = Vec::new(); + let writer = brotli::CompressorWriter::new(&mut buf, 4096, 11, 22); + serde_json::to_writer(writer, &request_body)?; + let body: AsyncBody = buf.into(); - let request = http_client::Request::builder() - .uri(SWEEP_API_URL) - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {}", api_token)) - .header("Connection", "keep-alive") - .header("Content-Encoding", "br") - .method(Method::POST) - .body(body)?; + const SWEEP_API_URL: &str = + "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; - let mut response = http_client.send(request).await?; + let request = http_client::Request::builder() + .uri(SWEEP_API_URL) + .header("Content-Type", "application/json") + .header("Authorization", format!("Bearer {}", api_token)) + .header("Connection", "keep-alive") + .header("Content-Encoding", "br") + .method(Method::POST) + .body(body)?; - let mut body: Vec = Vec::new(); - response.body_mut().read_to_end(&mut body).await?; + let mut response = http_client.send(request).await?; - if !response.status().is_success() { - anyhow::bail!( - "Request failed with status: {:?}\nBody: {}", - response.status(), - String::from_utf8_lossy(&body), - ); - }; + let mut body: Vec = Vec::new(); + response.body_mut().read_to_end(&mut body).await?; - let response: sweep_ai::AutocompleteResponse = serde_json::from_slice(&body)?; - - let old_text = snapshot - .text_for_range(response.start_index..response.end_index) - .collect::(); - let edits = language::text_diff(&old_text, &response.completion) - .into_iter() - .map(|(range, text)| { - ( - snapshot.anchor_after(response.start_index + range.start) - ..snapshot.anchor_before(response.start_index + range.end), - text, - ) - }) - .collect::>(); + if !response.status().is_success() { + anyhow::bail!( + "Request failed with status: {:?}\nBody: {}", + response.status(), + String::from_utf8_lossy(&body), + ); + }; + + let response: sweep_ai::AutocompleteResponse = serde_json::from_slice(&body)?; - anyhow::Ok((response.autocomplete_id, edits, snapshot)) + let old_text = snapshot + .text_for_range(response.start_index..response.end_index) + .collect::(); + let edits = language::text_diff(&old_text, &response.completion) + .into_iter() + .map(|(range, text)| { + ( + snapshot.anchor_after(response.start_index + range.start) + ..snapshot.anchor_before(response.start_index + range.end), + text, + ) + }) + .collect::>(); + + anyhow::Ok((response.autocomplete_id, edits, snapshot)) + } }); let buffer = active_buffer.clone(); + let project = project.clone(); + let active_buffer = active_buffer.clone(); - cx.spawn(async move |_, cx| { + cx.spawn(async move |this, cx| { let (id, edits, old_snapshot) = result.await?; if edits.is_empty() { + if has_events + && allow_jump + && let Some((jump_buffer, jump_position)) = Self::next_diagnostic_location( + active_buffer, + &snapshot, + diagnostic_search_range, + cursor_point, + &project, + cx, + ) + .await? + { + return this + .update(cx, |this, cx| { + this.request_prediction_with_sweep( + &project, + &jump_buffer, + jump_position, + false, + cx, + ) + })? + .await; + } + return anyhow::Ok(None); } @@ -955,6 +1230,85 @@ impl Zeta { }) } + async fn next_diagnostic_location( + active_buffer: Entity, + active_buffer_snapshot: &BufferSnapshot, + active_buffer_diagnostic_search_range: Range, + active_buffer_cursor_point: Point, + project: &Entity, + cx: &mut AsyncApp, + ) -> Result, language::Anchor)>> { + // find the closest diagnostic to the cursor that wasn't close enough to be included in the last request + let mut jump_location = active_buffer_snapshot + .diagnostic_groups(None) + .into_iter() + .filter_map(|(_, group)| { + let range = &group.entries[group.primary_ix] + .range + .to_point(&active_buffer_snapshot); + if range.overlaps(&active_buffer_diagnostic_search_range) { + None + } else { + Some(range.start) + } + }) + .min_by_key(|probe| probe.row.abs_diff(active_buffer_cursor_point.row)) + .map(|position| { + ( + active_buffer.clone(), + active_buffer_snapshot.anchor_before(position), + ) + }); + + if jump_location.is_none() { + let active_buffer_path = active_buffer.read_with(cx, |buffer, cx| { + let file = buffer.file()?; + + Some(ProjectPath { + worktree_id: file.worktree_id(cx), + path: file.path().clone(), + }) + })?; + + let buffer_task = project.update(cx, |project, cx| { + let (path, _, _) = project + .diagnostic_summaries(false, cx) + .filter(|(path, _, _)| Some(path) != active_buffer_path.as_ref()) + .max_by_key(|(path, _, _)| { + // find the buffer with errors that shares most parent directories + path.path + .components() + .zip( + active_buffer_path + .as_ref() + .map(|p| p.path.components()) + .unwrap_or_default(), + ) + .take_while(|(a, b)| a == b) + .count() + })?; + + Some(project.open_buffer(path, cx)) + })?; + + if let Some(buffer_task) = buffer_task { + let closest_buffer = buffer_task.await?; + + jump_location = closest_buffer + .read_with(cx, |buffer, _cx| { + buffer + .buffer_diagnostics(None) + .into_iter() + .min_by_key(|entry| entry.diagnostic.severity) + .map(|entry| entry.range.start) + })? + .map(|position| (closest_buffer, position)); + } + } + + anyhow::Ok(jump_location) + } + fn request_prediction_with_zed_cloud( &mut self, project: &Entity, @@ -2168,8 +2522,8 @@ mod tests { // Prediction for current file - let prediction_task = zeta.update(cx, |zeta, cx| { - zeta.refresh_prediction(&project, &buffer1, position, cx) + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx) }); let (_request, respond_tx) = req_rx.next().await.unwrap(); @@ -2184,7 +2538,8 @@ mod tests { Bye "})) .unwrap(); - prediction_task.await.unwrap(); + + cx.run_until_parked(); zeta.read_with(cx, |zeta, cx| { let prediction = zeta @@ -2242,8 +2597,8 @@ mod tests { }); // Prediction for another file - let prediction_task = zeta.update(cx, |zeta, cx| { - zeta.refresh_prediction(&project, &buffer1, position, cx) + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx) }); let (_request, respond_tx) = req_rx.next().await.unwrap(); respond_tx @@ -2256,7 +2611,8 @@ mod tests { Adios "#})) .unwrap(); - prediction_task.await.unwrap(); + cx.run_until_parked(); + zeta.read_with(cx, |zeta, cx| { let prediction = zeta .current_prediction_for_buffer(&buffer1, &project, cx) diff --git a/crates/zeta2_tools/src/zeta2_tools.rs b/crates/zeta2_tools/src/zeta2_tools.rs index 756fff5d621a85f7936a980d71f68c87098c4539..8758857e7cf50d6a5f2e5a4ea509293b18a8cb2c 100644 --- a/crates/zeta2_tools/src/zeta2_tools.rs +++ b/crates/zeta2_tools/src/zeta2_tools.rs @@ -1,6 +1,6 @@ mod zeta2_context_view; -use std::{cmp::Reverse, path::PathBuf, str::FromStr, sync::Arc, time::Duration}; +use std::{cmp::Reverse, path::PathBuf, str::FromStr, sync::Arc}; use chrono::TimeDelta; use client::{Client, UserStore}; @@ -237,24 +237,13 @@ impl Zeta2Inspector { fn set_zeta_options(&mut self, options: ZetaOptions, cx: &mut Context) { self.zeta.update(cx, |this, _cx| this.set_options(options)); - const DEBOUNCE_TIME: Duration = Duration::from_millis(100); - if let Some(prediction) = self.last_prediction.as_mut() { if let Some(buffer) = prediction.buffer.upgrade() { let position = prediction.position; - let zeta = self.zeta.clone(); let project = self.project.clone(); - prediction._task = Some(cx.spawn(async move |_this, cx| { - cx.background_executor().timer(DEBOUNCE_TIME).await; - if let Some(task) = zeta - .update(cx, |zeta, cx| { - zeta.refresh_prediction(&project, &buffer, position, cx) - }) - .ok() - { - task.await.log_err(); - } - })); + self.zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project, buffer, position, cx) + }); prediction.state = LastPredictionState::Requested; } else { self.last_prediction.take(); From 5a9b810aef5ff5769215d031ad6968795d7084df Mon Sep 17 00:00:00 2001 From: Luke Naylor Date: Fri, 21 Nov 2025 16:57:56 +0000 Subject: [PATCH 0292/1030] markdown: Add LaTeX syntax highlighting injection (#41110) Closes [#30264](https://github.com/zed-industries/zed/issues/30264) Small addition based on [nvim-treesitter](https://github.com/nvim-treesitter/nvim-treesitter/blob/main/runtime/queries/markdown_inline/injections.scm) Screenshot From 2025-10-24 15-47-58 This does require the LaTeX extension to be installed. Release Notes: - Added LaTeX highlighting for inline and display equations in Markdown when the LaTeX extension is installed --------- Co-authored-by: Ben Kunkle --- crates/languages/src/markdown-inline/injections.scm | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 crates/languages/src/markdown-inline/injections.scm diff --git a/crates/languages/src/markdown-inline/injections.scm b/crates/languages/src/markdown-inline/injections.scm new file mode 100644 index 0000000000000000000000000000000000000000..074b08fd87432f324ae56e0403fc1a0a77334b05 --- /dev/null +++ b/crates/languages/src/markdown-inline/injections.scm @@ -0,0 +1,2 @@ +((latex_block) @injection.content + (#set! injection.language "latex")) From f0820ae8e4a2c5fbc9b469a834f7b1548366b547 Mon Sep 17 00:00:00 2001 From: Bennet Bo Fenner Date: Fri, 21 Nov 2025 18:10:30 +0100 Subject: [PATCH 0293/1030] agent_ui: Remove context strip from inline assistant (#42633) TODO - [x] Implement PromptEditor::paste - [x] Fix creases on unlink - [x] PromptCompletionProviderDelegate::supports_images - [ ] Fix highlighting in completion menu Release Notes: - N/A --------- Co-authored-by: Danilo Leal --- Cargo.lock | 2 - Cargo.toml | 1 - assets/keymaps/default-linux.json | 16 +- assets/keymaps/default-macos.json | 16 - assets/keymaps/default-windows.json | 17 +- crates/agent_ui/Cargo.toml | 2 - crates/agent_ui/src/acp.rs | 1 - crates/agent_ui/src/acp/message_editor.rs | 1133 +++------------- crates/agent_ui/src/agent_panel.rs | 27 +- crates/agent_ui/src/agent_ui.rs | 20 +- crates/agent_ui/src/buffer_codegen.rs | 90 +- .../src/{acp => }/completion_provider.rs | 871 ++++++++++--- crates/agent_ui/src/context.rs | 1160 +---------------- crates/agent_ui/src/context_picker.rs | 931 ------------- .../context_picker/fetch_context_picker.rs | 252 ---- .../src/context_picker/file_context_picker.rs | 392 ------ .../context_picker/rules_context_picker.rs | 224 ---- .../context_picker/symbol_context_picker.rs | 415 ------ .../context_picker/thread_context_picker.rs | 280 ---- crates/agent_ui/src/context_store.rs | 614 --------- crates/agent_ui/src/context_strip.rs | 619 --------- crates/agent_ui/src/inline_assistant.rs | 75 +- crates/agent_ui/src/inline_prompt_editor.rs | 453 +++---- crates/agent_ui/src/mention_set.rs | 1112 ++++++++++++++++ .../agent_ui/src/terminal_inline_assistant.rs | 55 +- crates/agent_ui/src/ui.rs | 2 - crates/agent_ui/src/ui/context_pill.rs | 858 ------------ 27 files changed, 2224 insertions(+), 7414 deletions(-) rename crates/agent_ui/src/{acp => }/completion_provider.rs (62%) delete mode 100644 crates/agent_ui/src/context_picker.rs delete mode 100644 crates/agent_ui/src/context_picker/fetch_context_picker.rs delete mode 100644 crates/agent_ui/src/context_picker/file_context_picker.rs delete mode 100644 crates/agent_ui/src/context_picker/rules_context_picker.rs delete mode 100644 crates/agent_ui/src/context_picker/symbol_context_picker.rs delete mode 100644 crates/agent_ui/src/context_picker/thread_context_picker.rs delete mode 100644 crates/agent_ui/src/context_store.rs delete mode 100644 crates/agent_ui/src/context_strip.rs create mode 100644 crates/agent_ui/src/mention_set.rs delete mode 100644 crates/agent_ui/src/ui/context_pill.rs diff --git a/Cargo.lock b/Cargo.lock index e2613dc27767875e5d3d96aa058923077f20de0c..9917862e72ba3f63e20b2c7305902a85dc0f3191 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -368,7 +368,6 @@ dependencies = [ "prompt_store", "proto", "rand 0.9.2", - "ref-cast", "release_channel", "rope", "rules_library", @@ -394,7 +393,6 @@ dependencies = [ "ui_input", "unindent", "url", - "urlencoding", "util", "watch", "workspace", diff --git a/Cargo.toml b/Cargo.toml index 63303678ebfbdbf5d403b40ff83c4612ad2ba2c8..abf74a8108fa06cf3c154438c13ce015719b7481 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -607,7 +607,6 @@ pulldown-cmark = { version = "0.12.0", default-features = false } quote = "1.0.9" rand = "0.9" rayon = "1.8" -ref-cast = "1.0.24" regex = "1.5" # WARNING: If you change this, you must also publish a new version of zed-reqwest to crates.io reqwest = { git = "https://github.com/zed-industries/reqwest.git", rev = "c15662463bda39148ba154100dd44d3fba5873a4", default-features = false, features = [ diff --git a/assets/keymaps/default-linux.json b/assets/keymaps/default-linux.json index 4a6421c4e1f335a4d35fe9ac11d157b30a914004..7ddabfc9f55577c8ced3fbad0cc881cd4bb183d0 100644 --- a/assets/keymaps/default-linux.json +++ b/assets/keymaps/default-linux.json @@ -239,13 +239,11 @@ "ctrl-alt-l": "agent::OpenRulesLibrary", "ctrl-i": "agent::ToggleProfileSelector", "ctrl-alt-/": "agent::ToggleModelSelector", - "ctrl-shift-a": "agent::ToggleContextPicker", "ctrl-shift-j": "agent::ToggleNavigationMenu", "ctrl-alt-i": "agent::ToggleOptionsMenu", "ctrl-alt-shift-n": "agent::ToggleNewThreadMenu", "shift-alt-escape": "agent::ExpandMessageEditor", "ctrl->": "agent::AddSelectionToThread", - "ctrl-alt-e": "agent::RemoveAllContext", "ctrl-shift-e": "project_panel::ToggleFocus", "ctrl-shift-enter": "agent::ContinueThread", "super-ctrl-b": "agent::ToggleBurnMode", @@ -322,17 +320,6 @@ "alt-enter": "editor::Newline" } }, - { - "context": "ContextStrip", - "bindings": { - "up": "agent::FocusUp", - "right": "agent::FocusRight", - "left": "agent::FocusLeft", - "down": "agent::FocusDown", - "backspace": "agent::RemoveFocusedContext", - "enter": "agent::AcceptSuggestedContext" - } - }, { "context": "AcpThread > ModeSelector", "bindings": { @@ -824,8 +811,7 @@ "context": "PromptEditor", "bindings": { "ctrl-[": "agent::CyclePreviousInlineAssist", - "ctrl-]": "agent::CycleNextInlineAssist", - "ctrl-alt-e": "agent::RemoveAllContext" + "ctrl-]": "agent::CycleNextInlineAssist" } }, { diff --git a/assets/keymaps/default-macos.json b/assets/keymaps/default-macos.json index 8790acf906effd3e0cd7026a909759978ae39dd5..2f7c25a3560e09bccb9f45c64df38048eefdddd6 100644 --- a/assets/keymaps/default-macos.json +++ b/assets/keymaps/default-macos.json @@ -278,13 +278,11 @@ "cmd-alt-p": "agent::ManageProfiles", "cmd-i": "agent::ToggleProfileSelector", "cmd-alt-/": "agent::ToggleModelSelector", - "cmd-shift-a": "agent::ToggleContextPicker", "cmd-shift-j": "agent::ToggleNavigationMenu", "cmd-alt-m": "agent::ToggleOptionsMenu", "cmd-alt-shift-n": "agent::ToggleNewThreadMenu", "shift-alt-escape": "agent::ExpandMessageEditor", "cmd->": "agent::AddSelectionToThread", - "cmd-alt-e": "agent::RemoveAllContext", "cmd-shift-e": "project_panel::ToggleFocus", "cmd-ctrl-b": "agent::ToggleBurnMode", "cmd-shift-enter": "agent::ContinueThread", @@ -365,18 +363,6 @@ "alt-enter": "editor::Newline" } }, - { - "context": "ContextStrip", - "use_key_equivalents": true, - "bindings": { - "up": "agent::FocusUp", - "right": "agent::FocusRight", - "left": "agent::FocusLeft", - "down": "agent::FocusDown", - "backspace": "agent::RemoveFocusedContext", - "enter": "agent::AcceptSuggestedContext" - } - }, { "context": "AgentConfiguration", "bindings": { @@ -889,9 +875,7 @@ "context": "PromptEditor", "use_key_equivalents": true, "bindings": { - "cmd-shift-a": "agent::ToggleContextPicker", "cmd-alt-/": "agent::ToggleModelSelector", - "cmd-alt-e": "agent::RemoveAllContext", "ctrl-[": "agent::CyclePreviousInlineAssist", "ctrl-]": "agent::CycleNextInlineAssist" } diff --git a/assets/keymaps/default-windows.json b/assets/keymaps/default-windows.json index 1144eac80b2cd881e8d99cc5beb69b42263338c3..40fdc4609967bfbabf5f575a9bae6cc02abab9cd 100644 --- a/assets/keymaps/default-windows.json +++ b/assets/keymaps/default-windows.json @@ -240,13 +240,11 @@ "shift-alt-p": "agent::ManageProfiles", "ctrl-i": "agent::ToggleProfileSelector", "shift-alt-/": "agent::ToggleModelSelector", - "ctrl-shift-a": "agent::ToggleContextPicker", "ctrl-shift-j": "agent::ToggleNavigationMenu", "ctrl-alt-i": "agent::ToggleOptionsMenu", // "ctrl-shift-alt-n": "agent::ToggleNewThreadMenu", "shift-alt-escape": "agent::ExpandMessageEditor", "ctrl-shift-.": "agent::AddSelectionToThread", - "shift-alt-e": "agent::RemoveAllContext", "ctrl-shift-e": "project_panel::ToggleFocus", "ctrl-shift-enter": "agent::ContinueThread", "super-ctrl-b": "agent::ToggleBurnMode", @@ -328,18 +326,6 @@ "alt-enter": "editor::Newline" } }, - { - "context": "ContextStrip", - "use_key_equivalents": true, - "bindings": { - "up": "agent::FocusUp", - "right": "agent::FocusRight", - "left": "agent::FocusLeft", - "down": "agent::FocusDown", - "backspace": "agent::RemoveFocusedContext", - "enter": "agent::AcceptSuggestedContext" - } - }, { "context": "AcpThread > ModeSelector", "bindings": { @@ -837,8 +823,7 @@ "use_key_equivalents": true, "bindings": { "ctrl-[": "agent::CyclePreviousInlineAssist", - "ctrl-]": "agent::CycleNextInlineAssist", - "shift-alt-e": "agent::RemoveAllContext" + "ctrl-]": "agent::CycleNextInlineAssist" } }, { diff --git a/crates/agent_ui/Cargo.toml b/crates/agent_ui/Cargo.toml index 6447b41335ece093718036091baf30c505ad76fd..17e90775b2fd386524b01ea3ab056a00273aa82f 100644 --- a/crates/agent_ui/Cargo.toml +++ b/crates/agent_ui/Cargo.toml @@ -69,7 +69,6 @@ postage.workspace = true project.workspace = true prompt_store.workspace = true proto.workspace = true -ref-cast.workspace = true release_channel.workspace = true rope.workspace = true rules_library.workspace = true @@ -93,7 +92,6 @@ time_format.workspace = true ui.workspace = true ui_input.workspace = true url.workspace = true -urlencoding.workspace = true util.workspace = true watch.workspace = true workspace.workspace = true diff --git a/crates/agent_ui/src/acp.rs b/crates/agent_ui/src/acp.rs index 2e15cd424d6313d981ff8c000f5eeb958aec9370..7a740c2dc4b9fbc769aa847347a0aa56d5f51934 100644 --- a/crates/agent_ui/src/acp.rs +++ b/crates/agent_ui/src/acp.rs @@ -1,4 +1,3 @@ -mod completion_provider; mod entry_view_state; mod message_editor; mod mode_selector; diff --git a/crates/agent_ui/src/acp/message_editor.rs b/crates/agent_ui/src/acp/message_editor.rs index 883a7424e47eaf412278995cb9f3d497fd4f5c67..9fbdbb04986294aa319c04cb5d76de63f4a758ab 100644 --- a/crates/agent_ui/src/acp/message_editor.rs +++ b/crates/agent_ui/src/acp/message_editor.rs @@ -1,68 +1,45 @@ use crate::{ ChatWithFollow, - acp::completion_provider::{ContextPickerCompletionProvider, SlashCommandCompletion}, - context_picker::{ContextPickerAction, fetch_context_picker::fetch_url_content}, + completion_provider::{ + PromptCompletionProvider, PromptCompletionProviderDelegate, PromptContextAction, + PromptContextType, SlashCommandCompletion, + }, + mention_set::{ + Mention, MentionImage, MentionSet, insert_crease_for_mention, paste_images_as_context, + }, }; -use acp_thread::{MentionUri, selection_name}; -use agent::{HistoryStore, outline}; +use acp_thread::MentionUri; +use agent::HistoryStore; use agent_client_protocol as acp; -use agent_servers::{AgentServer, AgentServerDelegate}; use anyhow::{Result, anyhow}; -use assistant_slash_commands::codeblock_fence_for_path; -use collections::{HashMap, HashSet}; +use collections::HashSet; use editor::{ - Addon, Anchor, AnchorRangeExt, ContextMenuOptions, ContextMenuPlacement, Editor, EditorElement, - EditorEvent, EditorMode, EditorSnapshot, EditorStyle, ExcerptId, FoldPlaceholder, Inlay, - MultiBuffer, MultiBufferOffset, ToOffset, - actions::Paste, - code_context_menus::CodeContextMenu, - display_map::{Crease, CreaseId, FoldId}, - scroll::Autoscroll, -}; -use futures::{ - FutureExt as _, - future::{Shared, join_all}, + Addon, AnchorRangeExt, ContextMenuOptions, ContextMenuPlacement, Editor, EditorElement, + EditorEvent, EditorMode, EditorStyle, Inlay, MultiBuffer, MultiBufferOffset, ToOffset, + actions::Paste, code_context_menus::CodeContextMenu, scroll::Autoscroll, }; +use futures::{FutureExt as _, future::join_all}; use gpui::{ - Animation, AnimationExt as _, AppContext, ClipboardEntry, Context, Entity, EntityId, - EventEmitter, FocusHandle, Focusable, Image, ImageFormat, Img, KeyContext, SharedString, - Subscription, Task, TextStyle, WeakEntity, pulsating_between, + AppContext, Context, Entity, EventEmitter, FocusHandle, Focusable, ImageFormat, KeyContext, + SharedString, Subscription, Task, TextStyle, WeakEntity, }; -use itertools::Either; use language::{Buffer, Language, language_settings::InlayHintKind}; -use language_model::LanguageModelImage; -use postage::stream::Stream as _; -use project::{ - CompletionIntent, InlayHint, InlayHintLabel, InlayId, Project, ProjectItem, ProjectPath, - Worktree, -}; -use prompt_store::{PromptId, PromptStore}; +use project::{CompletionIntent, InlayHint, InlayHintLabel, InlayId, Project, Worktree}; +use prompt_store::PromptStore; use rope::Point; use settings::Settings; -use std::{ - cell::RefCell, - ffi::OsStr, - fmt::Write, - ops::{Range, RangeInclusive}, - path::{Path, PathBuf}, - rc::Rc, - sync::Arc, - time::Duration, -}; -use text::OffsetRangeExt; +use std::{cell::RefCell, fmt::Write, rc::Rc, sync::Arc}; use theme::ThemeSettings; -use ui::{ButtonLike, TintColor, Toggleable, prelude::*}; -use util::{ResultExt, debug_panic, rel_path::RelPath}; -use workspace::{CollaboratorId, Workspace, notifications::NotifyResultExt as _}; +use ui::prelude::*; +use util::{ResultExt, debug_panic}; +use workspace::{CollaboratorId, Workspace}; use zed_actions::agent::Chat; pub struct MessageEditor { - mention_set: MentionSet, + mention_set: Entity, editor: Entity, project: Entity, workspace: WeakEntity, - history_store: Entity, - prompt_store: Option>, prompt_capabilities: Rc>, available_commands: Rc>>, agent_name: SharedString, @@ -82,6 +59,41 @@ impl EventEmitter for MessageEditor {} const COMMAND_HINT_INLAY_ID: InlayId = InlayId::Hint(0); +impl PromptCompletionProviderDelegate for Entity { + fn supports_images(&self, cx: &App) -> bool { + self.read(cx).prompt_capabilities.borrow().image + } + + fn supported_modes(&self, cx: &App) -> Vec { + let mut supported = vec![PromptContextType::File, PromptContextType::Symbol]; + if self.read(cx).prompt_capabilities.borrow().embedded_context { + supported.extend(&[ + PromptContextType::Thread, + PromptContextType::Fetch, + PromptContextType::Rules, + ]); + } + supported + } + + fn available_commands(&self, cx: &App) -> Vec { + self.read(cx) + .available_commands + .borrow() + .iter() + .map(|cmd| crate::completion_provider::AvailableCommand { + name: cmd.name.clone().into(), + description: cmd.description.clone().into(), + requires_argument: cmd.input.is_some(), + }) + .collect() + } + + fn confirm_command(&self, cx: &mut App) { + self.update(cx, |this, cx| this.send(cx)); + } +} + impl MessageEditor { pub fn new( workspace: WeakEntity, @@ -103,15 +115,7 @@ impl MessageEditor { }, None, ); - let completion_provider = Rc::new(ContextPickerCompletionProvider::new( - cx.weak_entity(), - workspace.clone(), - history_store.clone(), - prompt_store.clone(), - prompt_capabilities.clone(), - available_commands.clone(), - )); - let mention_set = MentionSet::default(); + let editor = cx.new(|cx| { let buffer = cx.new(|cx| Buffer::local("", cx).with_language(Arc::new(language), cx)); let buffer = cx.new(|cx| MultiBuffer::singleton(buffer, cx)); @@ -121,7 +125,6 @@ impl MessageEditor { editor.set_show_indent_guides(false, cx); editor.set_soft_wrap(); editor.set_use_modal_editing(true); - editor.set_completion_provider(Some(completion_provider.clone())); editor.set_context_menu_options(ContextMenuOptions { min_entries_visible: 12, max_entries_visible: 12, @@ -130,6 +133,26 @@ impl MessageEditor { editor.register_addon(MessageEditorAddon::new()); editor }); + let mention_set = cx.new(|cx| { + MentionSet::new( + editor.clone(), + project.downgrade(), + history_store.clone(), + prompt_store.clone(), + window, + cx, + ) + }); + let completion_provider = Rc::new(PromptCompletionProvider::new( + cx.entity(), + mention_set.clone(), + history_store.clone(), + prompt_store.clone(), + workspace.clone(), + )); + editor.update(cx, |editor, _cx| { + editor.set_completion_provider(Some(completion_provider.clone())) + }); cx.on_focus_in(&editor.focus_handle(cx), window, |_, _, cx| { cx.emit(MessageEditorEvent::Focus) @@ -143,12 +166,12 @@ impl MessageEditor { let mut has_hint = false; let mut subscriptions = Vec::new(); - subscriptions.push(cx.subscribe_in(&editor, window, { - move |this, editor, event, window, cx| { + subscriptions.push(cx.subscribe(&editor, { + move |this, editor, event, cx| { if let EditorEvent::Edited { .. } = event && !editor.read(cx).read_only(cx) { - let snapshot = editor.update(cx, |editor, cx| { + editor.update(cx, |editor, cx| { let new_hints = this .command_hint(editor.buffer(), cx) .into_iter() @@ -164,11 +187,7 @@ impl MessageEditor { cx, ); has_hint = has_new_hint; - - editor.snapshot(window, cx) }); - this.mention_set.remove_invalid(snapshot); - cx.notify(); } } @@ -179,8 +198,6 @@ impl MessageEditor { project, mention_set, workspace, - history_store, - prompt_store, prompt_capabilities, available_commands, agent_name, @@ -238,6 +255,9 @@ impl MessageEditor { window: &mut Window, cx: &mut Context, ) { + let Some(workspace) = self.workspace.upgrade() else { + return; + }; let uri = MentionUri::Thread { id: thread.id.clone(), name: thread.title.to_string(), @@ -256,7 +276,21 @@ impl MessageEditor { .text_anchor }); - self.confirm_mention_completion(thread.title, start, content_len, uri, window, cx) + let supports_images = self.prompt_capabilities.borrow().image; + + self.mention_set + .update(cx, |mention_set, cx| { + mention_set.confirm_mention_completion( + thread.title, + start, + content_len, + uri, + supports_images, + &workspace, + window, + cx, + ) + }) .detach(); } @@ -265,11 +299,6 @@ impl MessageEditor { &self.editor } - #[cfg(test)] - pub(crate) fn mention_set(&mut self) -> &mut MentionSet { - &mut self.mention_set - } - pub fn is_empty(&self, cx: &App) -> bool { self.editor.read(cx).is_empty(cx) } @@ -283,388 +312,9 @@ impl MessageEditor { .is_some_and(|menu| matches!(menu, CodeContextMenu::Completions(_)) && menu.visible()) } - pub fn mentions(&self) -> HashSet { - self.mention_set - .mentions - .values() - .map(|(uri, _)| uri.clone()) - .collect() - } - - pub fn confirm_mention_completion( - &mut self, - crease_text: SharedString, - start: text::Anchor, - content_len: usize, - mention_uri: MentionUri, - window: &mut Window, - cx: &mut Context, - ) -> Task<()> { - let snapshot = self - .editor - .update(cx, |editor, cx| editor.snapshot(window, cx)); - let Some(start_anchor) = snapshot.buffer_snapshot().as_singleton_anchor(start) else { - return Task::ready(()); - }; - let excerpt_id = start_anchor.excerpt_id; - let end_anchor = snapshot.buffer_snapshot().anchor_before( - start_anchor.to_offset(&snapshot.buffer_snapshot()) + content_len + 1usize, - ); - - let crease = if let MentionUri::File { abs_path } = &mention_uri - && let Some(extension) = abs_path.extension() - && let Some(extension) = extension.to_str() - && Img::extensions().contains(&extension) - && !extension.contains("svg") - { - let Some(project_path) = self - .project - .read(cx) - .project_path_for_absolute_path(&abs_path, cx) - else { - log::error!("project path not found"); - return Task::ready(()); - }; - let image = self - .project - .update(cx, |project, cx| project.open_image(project_path, cx)); - let image = cx - .spawn(async move |_, cx| { - let image = image.await.map_err(|e| e.to_string())?; - let image = image - .update(cx, |image, _| image.image.clone()) - .map_err(|e| e.to_string())?; - Ok(image) - }) - .shared(); - insert_crease_for_mention( - excerpt_id, - start, - content_len, - mention_uri.name().into(), - IconName::Image.path().into(), - Some(image), - self.editor.clone(), - window, - cx, - ) - } else { - insert_crease_for_mention( - excerpt_id, - start, - content_len, - crease_text, - mention_uri.icon_path(cx), - None, - self.editor.clone(), - window, - cx, - ) - }; - let Some((crease_id, tx)) = crease else { - return Task::ready(()); - }; - - let task = match mention_uri.clone() { - MentionUri::Fetch { url } => self.confirm_mention_for_fetch(url, cx), - MentionUri::Directory { .. } => Task::ready(Ok(Mention::Link)), - MentionUri::Thread { id, .. } => self.confirm_mention_for_thread(id, cx), - MentionUri::TextThread { path, .. } => self.confirm_mention_for_text_thread(path, cx), - MentionUri::File { abs_path } => self.confirm_mention_for_file(abs_path, cx), - MentionUri::Symbol { - abs_path, - line_range, - .. - } => self.confirm_mention_for_symbol(abs_path, line_range, cx), - MentionUri::Rule { id, .. } => self.confirm_mention_for_rule(id, cx), - MentionUri::PastedImage => { - debug_panic!("pasted image URI should not be included in completions"); - Task::ready(Err(anyhow!( - "pasted imaged URI should not be included in completions" - ))) - } - MentionUri::Selection { .. } => { - debug_panic!("unexpected selection URI"); - Task::ready(Err(anyhow!("unexpected selection URI"))) - } - }; - let task = cx - .spawn(async move |_, _| task.await.map_err(|e| e.to_string())) - .shared(); - self.mention_set - .mentions - .insert(crease_id, (mention_uri, task.clone())); - - // Notify the user if we failed to load the mentioned context - cx.spawn_in(window, async move |this, cx| { - let result = task.await.notify_async_err(cx); - drop(tx); - if result.is_none() { - this.update(cx, |this, cx| { - this.editor.update(cx, |editor, cx| { - // Remove mention - editor.edit([(start_anchor..end_anchor, "")], cx); - }); - this.mention_set.mentions.remove(&crease_id); - }) - .ok(); - } - }) - } - - fn confirm_mention_for_file( - &mut self, - abs_path: PathBuf, - cx: &mut Context, - ) -> Task> { - let Some(project_path) = self - .project - .read(cx) - .project_path_for_absolute_path(&abs_path, cx) - else { - return Task::ready(Err(anyhow!("project path not found"))); - }; - let extension = abs_path - .extension() - .and_then(OsStr::to_str) - .unwrap_or_default(); - - if Img::extensions().contains(&extension) && !extension.contains("svg") { - if !self.prompt_capabilities.borrow().image { - return Task::ready(Err(anyhow!("This model does not support images yet"))); - } - let task = self - .project - .update(cx, |project, cx| project.open_image(project_path, cx)); - return cx.spawn(async move |_, cx| { - let image = task.await?; - let image = image.update(cx, |image, _| image.image.clone())?; - let format = image.format; - let image = cx - .update(|cx| LanguageModelImage::from_image(image, cx))? - .await; - if let Some(image) = image { - Ok(Mention::Image(MentionImage { - data: image.source, - format, - })) - } else { - Err(anyhow!("Failed to convert image")) - } - }); - } - - let buffer = self - .project - .update(cx, |project, cx| project.open_buffer(project_path, cx)); - cx.spawn(async move |_, cx| { - let buffer = buffer.await?; - let buffer_content = outline::get_buffer_content_or_outline( - buffer.clone(), - Some(&abs_path.to_string_lossy()), - &cx, - ) - .await?; - - Ok(Mention::Text { - content: buffer_content.text, - tracked_buffers: vec![buffer], - }) - }) - } - - fn confirm_mention_for_fetch( - &mut self, - url: url::Url, - cx: &mut Context, - ) -> Task> { - let http_client = match self - .workspace - .update(cx, |workspace, _| workspace.client().http_client()) - { - Ok(http_client) => http_client, - Err(e) => return Task::ready(Err(e)), - }; - cx.background_executor().spawn(async move { - let content = fetch_url_content(http_client, url.to_string()).await?; - Ok(Mention::Text { - content, - tracked_buffers: Vec::new(), - }) - }) - } - - fn confirm_mention_for_symbol( - &mut self, - abs_path: PathBuf, - line_range: RangeInclusive, - cx: &mut Context, - ) -> Task> { - let Some(project_path) = self - .project - .read(cx) - .project_path_for_absolute_path(&abs_path, cx) - else { - return Task::ready(Err(anyhow!("project path not found"))); - }; - let buffer = self - .project - .update(cx, |project, cx| project.open_buffer(project_path, cx)); - cx.spawn(async move |_, cx| { - let buffer = buffer.await?; - let mention = buffer.update(cx, |buffer, cx| { - let start = Point::new(*line_range.start(), 0).min(buffer.max_point()); - let end = Point::new(*line_range.end() + 1, 0).min(buffer.max_point()); - let content = buffer.text_for_range(start..end).collect(); - Mention::Text { - content, - tracked_buffers: vec![cx.entity()], - } - })?; - anyhow::Ok(mention) - }) - } - - fn confirm_mention_for_rule( - &mut self, - id: PromptId, - cx: &mut Context, - ) -> Task> { - let Some(prompt_store) = self.prompt_store.clone() else { - return Task::ready(Err(anyhow!("missing prompt store"))); - }; - let prompt = prompt_store.read(cx).load(id, cx); - cx.spawn(async move |_, _| { - let prompt = prompt.await?; - Ok(Mention::Text { - content: prompt, - tracked_buffers: Vec::new(), - }) - }) - } - - pub fn confirm_mention_for_selection( - &mut self, - source_range: Range, - selections: Vec<(Entity, Range, Range)>, - window: &mut Window, - cx: &mut Context, - ) { - let snapshot = self.editor.read(cx).buffer().read(cx).snapshot(cx); - let Some(start) = snapshot.as_singleton_anchor(source_range.start) else { - return; - }; - - let offset = start.to_offset(&snapshot); - - for (buffer, selection_range, range_to_fold) in selections { - let range = snapshot.anchor_after(offset + range_to_fold.start) - ..snapshot.anchor_after(offset + range_to_fold.end); - - let abs_path = buffer - .read(cx) - .project_path(cx) - .and_then(|project_path| self.project.read(cx).absolute_path(&project_path, cx)); - let snapshot = buffer.read(cx).snapshot(); - - let text = snapshot - .text_for_range(selection_range.clone()) - .collect::(); - let point_range = selection_range.to_point(&snapshot); - let line_range = point_range.start.row..=point_range.end.row; - - let uri = MentionUri::Selection { - abs_path: abs_path.clone(), - line_range: line_range.clone(), - }; - let crease = crate::context_picker::crease_for_mention( - selection_name(abs_path.as_deref(), &line_range).into(), - uri.icon_path(cx), - range, - self.editor.downgrade(), - ); - - let crease_id = self.editor.update(cx, |editor, cx| { - let crease_ids = editor.insert_creases(vec![crease.clone()], cx); - editor.fold_creases(vec![crease], false, window, cx); - crease_ids.first().copied().unwrap() - }); - - self.mention_set.mentions.insert( - crease_id, - ( - uri, - Task::ready(Ok(Mention::Text { - content: text, - tracked_buffers: vec![buffer], - })) - .shared(), - ), - ); - } - - // Take this explanation with a grain of salt but, with creases being - // inserted, GPUI's recomputes the editor layout in the next frames, so - // directly calling `editor.request_autoscroll` wouldn't work as - // expected. We're leveraging `cx.on_next_frame` to wait 2 frames and - // ensure that the layout has been recalculated so that the autoscroll - // request actually shows the cursor's new position. - let editor = self.editor.clone(); - cx.on_next_frame(window, move |_, window, cx| { - cx.on_next_frame(window, move |_, _, cx| { - editor.update(cx, |editor, cx| { - editor.request_autoscroll(Autoscroll::fit(), cx) - }); - }); - }); - } - - fn confirm_mention_for_thread( - &mut self, - id: acp::SessionId, - cx: &mut Context, - ) -> Task> { - let server = Rc::new(agent::NativeAgentServer::new( - self.project.read(cx).fs().clone(), - self.history_store.clone(), - )); - let delegate = AgentServerDelegate::new( - self.project.read(cx).agent_server_store().clone(), - self.project.clone(), - None, - None, - ); - let connection = server.connect(None, delegate, cx); - cx.spawn(async move |_, cx| { - let (agent, _) = connection.await?; - let agent = agent.downcast::().unwrap(); - let summary = agent - .0 - .update(cx, |agent, cx| agent.thread_summary(id, cx))? - .await?; - anyhow::Ok(Mention::Text { - content: summary.to_string(), - tracked_buffers: Vec::new(), - }) - }) - } - - fn confirm_mention_for_text_thread( - &mut self, - path: PathBuf, - cx: &mut Context, - ) -> Task> { - let text_thread_task = self.history_store.update(cx, |store, cx| { - store.load_text_thread(path.as_path().into(), cx) - }); - cx.spawn(async move |_, cx| { - let text_thread = text_thread_task.await?; - let xml = text_thread.update(cx, |text_thread, cx| text_thread.to_xml(cx))?; - Ok(Mention::Text { - content: xml, - tracked_buffers: Vec::new(), - }) - }) + #[cfg(test)] + pub fn mention_set(&self) -> &Entity { + &self.mention_set } fn validate_slash_commands( @@ -716,7 +366,7 @@ impl MessageEditor { let contents = self .mention_set - .contents(full_mention_content, self.project.clone(), cx); + .update(cx, |store, cx| store.contents(full_mention_content, cx)); let editor = self.editor.clone(); let supports_embedded_context = self.prompt_capabilities.borrow().embedded_context; @@ -828,10 +478,12 @@ impl MessageEditor { self.editor.update(cx, |editor, cx| { editor.clear(window, cx); editor.remove_creases( - self.mention_set - .mentions - .drain() - .map(|(crease_id, _)| crease_id), + self.mention_set.update(cx, |mention_set, _cx| { + mention_set + .remove_all() + .map(|(crease_id, _)| crease_id) + .collect::>() + }), cx, ) }); @@ -910,153 +562,12 @@ impl MessageEditor { } fn paste(&mut self, _: &Paste, window: &mut Window, cx: &mut Context) { - if !self.prompt_capabilities.borrow().image { - return; + if self.prompt_capabilities.borrow().image + && let Some(task) = + paste_images_as_context(self.editor.clone(), self.mention_set.clone(), window, cx) + { + task.detach(); } - let Some(clipboard) = cx.read_from_clipboard() else { - return; - }; - cx.spawn_in(window, async move |this, cx| { - use itertools::Itertools; - let (mut images, paths) = clipboard - .into_entries() - .filter_map(|entry| match entry { - ClipboardEntry::Image(image) => Some(Either::Left(image)), - ClipboardEntry::ExternalPaths(paths) => Some(Either::Right(paths)), - _ => None, - }) - .partition_map::, Vec<_>, _, _, _>(std::convert::identity); - - if !paths.is_empty() { - images.extend( - cx.background_spawn(async move { - let mut images = vec![]; - for path in paths.into_iter().flat_map(|paths| paths.paths().to_owned()) { - let Ok(content) = async_fs::read(path).await else { - continue; - }; - let Ok(format) = image::guess_format(&content) else { - continue; - }; - images.push(gpui::Image::from_bytes( - match format { - image::ImageFormat::Png => gpui::ImageFormat::Png, - image::ImageFormat::Jpeg => gpui::ImageFormat::Jpeg, - image::ImageFormat::WebP => gpui::ImageFormat::Webp, - image::ImageFormat::Gif => gpui::ImageFormat::Gif, - image::ImageFormat::Bmp => gpui::ImageFormat::Bmp, - image::ImageFormat::Tiff => gpui::ImageFormat::Tiff, - image::ImageFormat::Ico => gpui::ImageFormat::Ico, - _ => continue, - }, - content, - )); - } - images - }) - .await, - ); - } - - if images.is_empty() { - return; - } - - let replacement_text = MentionUri::PastedImage.as_link().to_string(); - let Ok(editor) = this.update(cx, |this, cx| { - cx.stop_propagation(); - this.editor.clone() - }) else { - return; - }; - for image in images { - let Ok((excerpt_id, text_anchor, multibuffer_anchor)) = - editor.update_in(cx, |message_editor, window, cx| { - let snapshot = message_editor.snapshot(window, cx); - let (excerpt_id, _, buffer_snapshot) = - snapshot.buffer_snapshot().as_singleton().unwrap(); - - let text_anchor = buffer_snapshot.anchor_before(buffer_snapshot.len()); - let multibuffer_anchor = snapshot - .buffer_snapshot() - .anchor_in_excerpt(*excerpt_id, text_anchor); - message_editor.edit( - [( - multi_buffer::Anchor::max()..multi_buffer::Anchor::max(), - format!("{replacement_text} "), - )], - cx, - ); - (*excerpt_id, text_anchor, multibuffer_anchor) - }) - else { - break; - }; - - let content_len = replacement_text.len(); - let Some(start_anchor) = multibuffer_anchor else { - continue; - }; - let Ok(end_anchor) = editor.update(cx, |editor, cx| { - let snapshot = editor.buffer().read(cx).snapshot(cx); - snapshot.anchor_before(start_anchor.to_offset(&snapshot) + content_len) - }) else { - continue; - }; - let image = Arc::new(image); - let Ok(Some((crease_id, tx))) = cx.update(|window, cx| { - insert_crease_for_mention( - excerpt_id, - text_anchor, - content_len, - MentionUri::PastedImage.name().into(), - IconName::Image.path().into(), - Some(Task::ready(Ok(image.clone())).shared()), - editor.clone(), - window, - cx, - ) - }) else { - continue; - }; - let task = cx - .spawn(async move |cx| { - let format = image.format; - let image = cx - .update(|_, cx| LanguageModelImage::from_image(image, cx)) - .map_err(|e| e.to_string())? - .await; - drop(tx); - if let Some(image) = image { - Ok(Mention::Image(MentionImage { - data: image.source, - format, - })) - } else { - Err("Failed to convert image".into()) - } - }) - .shared(); - - this.update(cx, |this, _| { - this.mention_set - .mentions - .insert(crease_id, (MentionUri::PastedImage, task.clone())) - }) - .ok(); - - if task.await.notify_async_err(cx).is_none() { - this.update(cx, |this, cx| { - this.editor.update(cx, |editor, cx| { - editor.edit([(start_anchor..end_anchor, "")], cx); - }); - this.mention_set.mentions.remove(&crease_id); - }) - .ok(); - } - } - }) - .detach(); } pub fn insert_dragged_files( @@ -1066,6 +577,9 @@ impl MessageEditor { window: &mut Window, cx: &mut Context, ) { + let Some(workspace) = self.workspace.upgrade() else { + return; + }; let path_style = self.project.read(cx).path_style(cx); let buffer = self.editor.read(cx).buffer().clone(); let Some(buffer) = buffer.read(cx).as_singleton() else { @@ -1080,12 +594,11 @@ impl MessageEditor { continue; }; let abs_path = worktree.read(cx).absolutize(&path.path); - let (file_name, _) = - crate::context_picker::file_context_picker::extract_file_name_and_directory( - &path.path, - worktree.read(cx).root_name(), - path_style, - ); + let (file_name, _) = crate::completion_provider::extract_file_name_and_directory( + &path.path, + worktree.read(cx).root_name(), + path_style, + ); let uri = if entry.is_dir() { MentionUri::Directory { abs_path } @@ -1107,14 +620,19 @@ impl MessageEditor { cx, ); }); - tasks.push(self.confirm_mention_completion( - file_name, - anchor, - content_len, - uri, - window, - cx, - )); + let supports_images = self.prompt_capabilities.borrow().image; + tasks.push(self.mention_set.update(cx, |mention_set, cx| { + mention_set.confirm_mention_completion( + file_name, + anchor, + content_len, + uri, + supports_images, + &workspace, + window, + cx, + ) + })); } cx.spawn(async move |_, _| { join_all(tasks).await; @@ -1137,13 +655,15 @@ impl MessageEditor { let Some(workspace) = self.workspace.upgrade() else { return; }; - let Some(completion) = ContextPickerCompletionProvider::completion_for_action( - ContextPickerAction::AddSelections, - anchor..anchor, - cx.weak_entity(), - &workspace, - cx, - ) else { + let Some(completion) = + PromptCompletionProvider::>::completion_for_action( + PromptContextAction::AddSelections, + anchor..anchor, + self.mention_set.downgrade(), + &workspace, + cx, + ) + else { return; }; @@ -1274,10 +794,13 @@ impl MessageEditor { }; drop(tx); - self.mention_set.mentions.insert( - crease_id, - (mention_uri.clone(), Task::ready(Ok(mention)).shared()), - ); + self.mention_set.update(cx, |mention_set, _cx| { + mention_set.insert_mention( + crease_id, + mention_uri.clone(), + Task::ready(Ok(mention)).shared(), + ) + }); } cx.notify(); } @@ -1305,111 +828,6 @@ impl MessageEditor { } } -fn full_mention_for_directory( - project: &Entity, - abs_path: &Path, - cx: &mut App, -) -> Task> { - fn collect_files_in_path(worktree: &Worktree, path: &RelPath) -> Vec<(Arc, String)> { - let mut files = Vec::new(); - - for entry in worktree.child_entries(path) { - if entry.is_dir() { - files.extend(collect_files_in_path(worktree, &entry.path)); - } else if entry.is_file() { - files.push(( - entry.path.clone(), - worktree - .full_path(&entry.path) - .to_string_lossy() - .to_string(), - )); - } - } - - files - } - - let Some(project_path) = project - .read(cx) - .project_path_for_absolute_path(&abs_path, cx) - else { - return Task::ready(Err(anyhow!("project path not found"))); - }; - let Some(entry) = project.read(cx).entry_for_path(&project_path, cx) else { - return Task::ready(Err(anyhow!("project entry not found"))); - }; - let directory_path = entry.path.clone(); - let worktree_id = project_path.worktree_id; - let Some(worktree) = project.read(cx).worktree_for_id(worktree_id, cx) else { - return Task::ready(Err(anyhow!("worktree not found"))); - }; - let project = project.clone(); - cx.spawn(async move |cx| { - let file_paths = worktree.read_with(cx, |worktree, _cx| { - collect_files_in_path(worktree, &directory_path) - })?; - let descendants_future = cx.update(|cx| { - join_all(file_paths.into_iter().map(|(worktree_path, full_path)| { - let rel_path = worktree_path - .strip_prefix(&directory_path) - .log_err() - .map_or_else(|| worktree_path.clone(), |rel_path| rel_path.into()); - - let open_task = project.update(cx, |project, cx| { - project.buffer_store().update(cx, |buffer_store, cx| { - let project_path = ProjectPath { - worktree_id, - path: worktree_path, - }; - buffer_store.open_buffer(project_path, cx) - }) - }); - - cx.spawn(async move |cx| { - let buffer = open_task.await.log_err()?; - let buffer_content = outline::get_buffer_content_or_outline( - buffer.clone(), - Some(&full_path), - &cx, - ) - .await - .ok()?; - - Some((rel_path, full_path, buffer_content.text, buffer)) - }) - })) - })?; - - let contents = cx - .background_spawn(async move { - let (contents, tracked_buffers) = descendants_future - .await - .into_iter() - .flatten() - .map(|(rel_path, full_path, rope, buffer)| { - ((rel_path, full_path, rope), buffer) - }) - .unzip(); - Mention::Text { - content: render_directory_contents(contents), - tracked_buffers, - } - }) - .await; - anyhow::Ok(contents) - }) -} - -fn render_directory_contents(entries: Vec<(Arc, String, String)>) -> String { - let mut output = String::new(); - for (_relative_path, full_path, content) in entries { - let fence = codeblock_fence_for_path(Some(&full_path), None); - write!(output, "\n{fence}\n{content}\n```").unwrap(); - } - output -} - impl Focusable for MessageEditor { fn focus_handle(&self, cx: &App) -> FocusHandle { self.editor.focus_handle(cx) @@ -1453,233 +871,6 @@ impl Render for MessageEditor { } } -pub(crate) fn insert_crease_for_mention( - excerpt_id: ExcerptId, - anchor: text::Anchor, - content_len: usize, - crease_label: SharedString, - crease_icon: SharedString, - // abs_path: Option>, - image: Option, String>>>>, - editor: Entity, - window: &mut Window, - cx: &mut App, -) -> Option<(CreaseId, postage::barrier::Sender)> { - let (tx, rx) = postage::barrier::channel(); - - let crease_id = editor.update(cx, |editor, cx| { - let snapshot = editor.buffer().read(cx).snapshot(cx); - - let start = snapshot.anchor_in_excerpt(excerpt_id, anchor)?; - - let start = start.bias_right(&snapshot); - let end = snapshot.anchor_before(start.to_offset(&snapshot) + content_len); - - let placeholder = FoldPlaceholder { - render: render_mention_fold_button( - crease_label, - crease_icon, - start..end, - rx, - image, - cx.weak_entity(), - cx, - ), - merge_adjacent: false, - ..Default::default() - }; - - let crease = Crease::Inline { - range: start..end, - placeholder, - render_toggle: None, - render_trailer: None, - metadata: None, - }; - - let ids = editor.insert_creases(vec![crease.clone()], cx); - editor.fold_creases(vec![crease], false, window, cx); - - Some(ids[0]) - })?; - - Some((crease_id, tx)) -} - -fn render_mention_fold_button( - label: SharedString, - icon: SharedString, - range: Range, - mut loading_finished: postage::barrier::Receiver, - image_task: Option, String>>>>, - editor: WeakEntity, - cx: &mut App, -) -> Arc, &mut App) -> AnyElement> { - let loading = cx.new(|cx| { - let loading = cx.spawn(async move |this, cx| { - loading_finished.recv().await; - this.update(cx, |this: &mut LoadingContext, cx| { - this.loading = None; - cx.notify(); - }) - .ok(); - }); - LoadingContext { - id: cx.entity_id(), - label, - icon, - range, - editor, - loading: Some(loading), - image: image_task.clone(), - } - }); - Arc::new(move |_fold_id, _fold_range, _cx| loading.clone().into_any_element()) -} - -struct LoadingContext { - id: EntityId, - label: SharedString, - icon: SharedString, - range: Range, - editor: WeakEntity, - loading: Option>, - image: Option, String>>>>, -} - -impl Render for LoadingContext { - fn render(&mut self, _window: &mut Window, cx: &mut Context) -> impl IntoElement { - let is_in_text_selection = self - .editor - .update(cx, |editor, cx| editor.is_range_selected(&self.range, cx)) - .unwrap_or_default(); - ButtonLike::new(("loading-context", self.id)) - .style(ButtonStyle::Filled) - .selected_style(ButtonStyle::Tinted(TintColor::Accent)) - .toggle_state(is_in_text_selection) - .when_some(self.image.clone(), |el, image_task| { - el.hoverable_tooltip(move |_, cx| { - let image = image_task.peek().cloned().transpose().ok().flatten(); - let image_task = image_task.clone(); - cx.new::(|cx| ImageHover { - image, - _task: cx.spawn(async move |this, cx| { - if let Ok(image) = image_task.clone().await { - this.update(cx, |this, cx| { - if this.image.replace(image).is_none() { - cx.notify(); - } - }) - .ok(); - } - }), - }) - .into() - }) - }) - .child( - h_flex() - .gap_1() - .child( - Icon::from_path(self.icon.clone()) - .size(IconSize::XSmall) - .color(Color::Muted), - ) - .child( - Label::new(self.label.clone()) - .size(LabelSize::Small) - .buffer_font(cx) - .single_line(), - ) - .map(|el| { - if self.loading.is_some() { - el.with_animation( - "loading-context-crease", - Animation::new(Duration::from_secs(2)) - .repeat() - .with_easing(pulsating_between(0.4, 0.8)), - |label, delta| label.opacity(delta), - ) - .into_any() - } else { - el.into_any() - } - }), - ) - } -} - -struct ImageHover { - image: Option>, - _task: Task<()>, -} - -impl Render for ImageHover { - fn render(&mut self, _window: &mut Window, _cx: &mut Context) -> impl IntoElement { - if let Some(image) = self.image.clone() { - gpui::img(image).max_w_96().max_h_96().into_any_element() - } else { - gpui::Empty.into_any_element() - } - } -} - -#[derive(Debug, Clone, Eq, PartialEq)] -pub enum Mention { - Text { - content: String, - tracked_buffers: Vec>, - }, - Image(MentionImage), - Link, -} - -#[derive(Clone, Debug, Eq, PartialEq)] -pub struct MentionImage { - pub data: SharedString, - pub format: ImageFormat, -} - -#[derive(Default)] -pub struct MentionSet { - mentions: HashMap>>)>, -} - -impl MentionSet { - fn contents( - &self, - full_mention_content: bool, - project: Entity, - cx: &mut App, - ) -> Task>> { - let mentions = self.mentions.clone(); - cx.spawn(async move |cx| { - let mut contents = HashMap::default(); - for (crease_id, (mention_uri, task)) in mentions { - let content = if full_mention_content - && let MentionUri::Directory { abs_path } = &mention_uri - { - cx.update(|cx| full_mention_for_directory(&project, abs_path, cx))? - .await? - } else { - task.await.map_err(|e| anyhow!("{e}"))? - }; - - contents.insert(crease_id, (mention_uri, content)); - } - Ok(contents) - }) - } - - fn remove_invalid(&mut self, snapshot: EditorSnapshot) { - for (crease_id, crease) in snapshot.crease_snapshot.creases() { - if !crease.range().start.is_valid(&snapshot.buffer_snapshot()) { - self.mentions.remove(&crease_id); - } - } - } -} - pub struct MessageEditorAddon {} impl MessageEditorAddon { @@ -2391,7 +1582,7 @@ mod tests { .update(&mut cx, |message_editor, cx| { message_editor .mention_set() - .contents(false, project.clone(), cx) + .update(cx, |mention_set, cx| mention_set.contents(false, cx)) }) .await .unwrap() @@ -2446,7 +1637,7 @@ mod tests { .update(&mut cx, |message_editor, cx| { message_editor .mention_set() - .contents(false, project.clone(), cx) + .update(cx, |mention_set, cx| mention_set.contents(false, cx)) }) .await .unwrap() @@ -2569,7 +1760,7 @@ mod tests { .update(&mut cx, |message_editor, cx| { message_editor .mention_set() - .contents(false, project.clone(), cx) + .update(cx, |mention_set, cx| mention_set.contents(false, cx)) }) .await .unwrap() @@ -2617,7 +1808,7 @@ mod tests { .update(&mut cx, |message_editor, cx| { message_editor .mention_set() - .contents(false, project.clone(), cx) + .update(cx, |mention_set, cx| mention_set.contents(false, cx)) }) .await .expect_err("Should fail to load x.png"); @@ -2670,7 +1861,7 @@ mod tests { .update(&mut cx, |message_editor, cx| { message_editor .mention_set() - .contents(false, project.clone(), cx) + .update(cx, |mention_set, cx| mention_set.contents(false, cx)) }) .await .unwrap(); @@ -2781,7 +1972,9 @@ mod tests { worktree_root.join("large_file.txt") }); let large_file_task = message_editor.update(cx, |editor, cx| { - editor.confirm_mention_for_file(large_file_abs_path, cx) + editor.mention_set().update(cx, |set, cx| { + set.confirm_mention_for_file(large_file_abs_path, true, cx) + }) }); let large_file_mention = large_file_task.await.unwrap(); @@ -2813,7 +2006,9 @@ mod tests { worktree_root.join("small_file.txt") }); let small_file_task = message_editor.update(cx, |editor, cx| { - editor.confirm_mention_for_file(small_file_abs_path, cx) + editor.mention_set().update(cx, |set, cx| { + set.confirm_mention_for_file(small_file_abs_path, true, cx) + }) }); let small_file_mention = small_file_task.await.unwrap(); @@ -2888,7 +2083,7 @@ mod tests { text ); - let mentions = editor.mentions(); + let mentions = editor.mention_set().read(cx).mentions(); assert_eq!( mentions.len(), 1, diff --git a/crates/agent_ui/src/agent_panel.rs b/crates/agent_ui/src/agent_panel.rs index 3cbedfbe198cf826a2e82e1f42f1a0d794da49e6..6ff909389986ec27b998c4554fe2d86115ef1785 100644 --- a/crates/agent_ui/src/agent_panel.rs +++ b/crates/agent_ui/src/agent_panel.rs @@ -17,6 +17,7 @@ use settings::{ use zed_actions::agent::{OpenClaudeCodeOnboardingModal, ReauthenticateAgent}; +use crate::ManageProfiles; use crate::ui::{AcpOnboardingModal, ClaudeCodeOnboardingModal}; use crate::{ AddContextServer, AgentDiffPane, DeleteRecentlyOpenThread, Follow, InlineAssistant, @@ -34,7 +35,6 @@ use crate::{ acp::{AcpThreadHistory, ThreadHistoryEvent}, }; use crate::{ExternalAgent, NewExternalAgentThread, NewNativeAgentThreadFromSummary}; -use crate::{ManageProfiles, context_store::ContextStore}; use agent_settings::AgentSettings; use ai_onboarding::AgentPanelOnboarding; use anyhow::{Result, anyhow}; @@ -431,7 +431,6 @@ pub struct AgentPanel { text_thread_store: Entity, prompt_store: Option>, context_server_registry: Entity, - inline_assist_context_store: Entity, configuration: Option>, configuration_subscription: Option, active_view: ActiveView, @@ -543,7 +542,6 @@ impl AgentPanel { let client = workspace.client().clone(); let workspace = workspace.weak_handle(); - let inline_assist_context_store = cx.new(|_cx| ContextStore::new(project.downgrade())); let context_server_registry = cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx)); @@ -680,7 +678,6 @@ impl AgentPanel { configuration: None, configuration_subscription: None, context_server_registry, - inline_assist_context_store, previous_view: None, new_thread_menu_handle: PopoverMenuHandle::default(), agent_panel_menu_handle: PopoverMenuHandle::default(), @@ -721,10 +718,6 @@ impl AgentPanel { &self.prompt_store } - pub(crate) fn inline_assist_context_store(&self) -> &Entity { - &self.inline_assist_context_store - } - pub(crate) fn thread_store(&self) -> &Entity { &self.history_store } @@ -2664,23 +2657,19 @@ impl rules_library::InlineAssistDelegate for PromptLibraryInlineAssist { cx: &mut Context, ) { InlineAssistant::update_global(cx, |assistant, cx| { - let Some(project) = self - .workspace - .upgrade() - .map(|workspace| workspace.read(cx).project().downgrade()) - else { + let Some(workspace) = self.workspace.upgrade() else { + return; + }; + let Some(panel) = workspace.read(cx).panel::(cx) else { return; }; - let prompt_store = None; - let thread_store = None; - let context_store = cx.new(|_| ContextStore::new(project.clone())); + let project = workspace.read(cx).project().downgrade(); assistant.assist( prompt_editor, self.workspace.clone(), - context_store, project, - prompt_store, - thread_store, + panel.read(cx).thread_store().clone(), + None, initial_prompt, window, cx, diff --git a/crates/agent_ui/src/agent_ui.rs b/crates/agent_ui/src/agent_ui.rs index e06364988c1b49ab8877e40571393e02c252b47b..ae4cb70d4af419184519afb53ab62849b8a0eab8 100644 --- a/crates/agent_ui/src/agent_ui.rs +++ b/crates/agent_ui/src/agent_ui.rs @@ -4,14 +4,13 @@ mod agent_diff; mod agent_model_selector; mod agent_panel; mod buffer_codegen; +mod completion_provider; mod context; -mod context_picker; mod context_server_configuration; -mod context_store; -mod context_strip; mod inline_assistant; mod inline_prompt_editor; mod language_model_selector; +mod mention_set; mod profile_selector; mod slash_command; mod slash_command_picker; @@ -35,7 +34,7 @@ use language::{ language_settings::{AllLanguageSettings, EditPredictionProvider}, }; use language_model::{ - ConfiguredModel, LanguageModel, LanguageModelId, LanguageModelProviderId, LanguageModelRegistry, + ConfiguredModel, LanguageModelId, LanguageModelProviderId, LanguageModelRegistry, }; use project::DisableAiSettings; use prompt_store::PromptBuilder; @@ -56,8 +55,6 @@ actions!( [ /// Creates a new text-based conversation thread. NewTextThread, - /// Toggles the context picker interface for adding files, symbols, or other context. - ToggleContextPicker, /// Toggles the menu to create new agent threads. ToggleNewThreadMenu, /// Toggles the navigation menu for switching between threads and views. @@ -70,8 +67,6 @@ actions!( ToggleProfileSelector, /// Cycles through available session modes. CycleModeSelector, - /// Removes all added context from the current conversation. - RemoveAllContext, /// Expands the message editor to full size. ExpandMessageEditor, /// Opens the conversation history view. @@ -94,10 +89,6 @@ actions!( FocusLeft, /// Moves focus right in the interface. FocusRight, - /// Removes the currently focused context item. - RemoveFocusedContext, - /// Accepts the suggested context item. - AcceptSuggestedContext, /// Opens the active thread as a markdown file. OpenActiveThreadAsMarkdown, /// Opens the agent diff view to review changes. @@ -220,11 +211,6 @@ impl ModelUsageContext { } } } - - pub fn language_model(&self, cx: &App) -> Option> { - self.configured_model(cx) - .map(|configured_model| configured_model.model) - } } /// Initializes the `agent` crate. diff --git a/crates/agent_ui/src/buffer_codegen.rs b/crates/agent_ui/src/buffer_codegen.rs index 3877bede3370589d1b6f74529cfa3c6ca1f34f0a..ba52b0298d37211626b6baf6aae1fb3da0be6372 100644 --- a/crates/agent_ui/src/buffer_codegen.rs +++ b/crates/agent_ui/src/buffer_codegen.rs @@ -1,6 +1,4 @@ -use crate::{ - context::load_context, context_store::ContextStore, inline_prompt_editor::CodegenStatus, -}; +use crate::{context::LoadedContext, inline_prompt_editor::CodegenStatus}; use agent_settings::AgentSettings; use anyhow::{Context as _, Result}; use client::telemetry::Telemetry; @@ -8,9 +6,12 @@ use cloud_llm_client::CompletionIntent; use collections::HashSet; use editor::{Anchor, AnchorRangeExt, MultiBuffer, MultiBufferSnapshot, ToOffset as _, ToPoint}; use futures::{ - SinkExt, Stream, StreamExt, TryStreamExt as _, channel::mpsc, future::LocalBoxFuture, join, + SinkExt, Stream, StreamExt, TryStreamExt as _, + channel::mpsc, + future::{LocalBoxFuture, Shared}, + join, }; -use gpui::{App, AppContext as _, Context, Entity, EventEmitter, Subscription, Task, WeakEntity}; +use gpui::{App, AppContext as _, Context, Entity, EventEmitter, Subscription, Task}; use language::{Buffer, IndentKind, Point, TransactionId, line_diff}; use language_model::{ LanguageModel, LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage, @@ -18,8 +19,7 @@ use language_model::{ }; use multi_buffer::MultiBufferRow; use parking_lot::Mutex; -use project::Project; -use prompt_store::{PromptBuilder, PromptStore}; +use prompt_store::PromptBuilder; use rope::Rope; use smol::future::FutureExt; use std::{ @@ -43,9 +43,6 @@ pub struct BufferCodegen { buffer: Entity, range: Range, initial_transaction_id: Option, - context_store: Entity, - project: WeakEntity, - prompt_store: Option>, telemetry: Arc, builder: Arc, pub is_insertion: bool, @@ -56,9 +53,6 @@ impl BufferCodegen { buffer: Entity, range: Range, initial_transaction_id: Option, - context_store: Entity, - project: WeakEntity, - prompt_store: Option>, telemetry: Arc, builder: Arc, cx: &mut Context, @@ -68,9 +62,6 @@ impl BufferCodegen { buffer.clone(), range.clone(), false, - Some(context_store.clone()), - project.clone(), - prompt_store.clone(), Some(telemetry.clone()), builder.clone(), cx, @@ -85,9 +76,6 @@ impl BufferCodegen { buffer, range, initial_transaction_id, - context_store, - project, - prompt_store, telemetry, builder, }; @@ -148,6 +136,7 @@ impl BufferCodegen { &mut self, primary_model: Arc, user_prompt: String, + context_task: Shared>>, cx: &mut Context, ) -> Result<()> { let alternative_models = LanguageModelRegistry::read_global(cx) @@ -165,9 +154,6 @@ impl BufferCodegen { self.buffer.clone(), self.range.clone(), false, - Some(self.context_store.clone()), - self.project.clone(), - self.prompt_store.clone(), Some(self.telemetry.clone()), self.builder.clone(), cx, @@ -180,7 +166,7 @@ impl BufferCodegen { .zip(&self.alternatives) { alternative.update(cx, |alternative, cx| { - alternative.start(user_prompt.clone(), model.clone(), cx) + alternative.start(user_prompt.clone(), context_task.clone(), model.clone(), cx) })?; } @@ -243,9 +229,6 @@ pub struct CodegenAlternative { status: CodegenStatus, generation: Task<()>, diff: Diff, - context_store: Option>, - project: WeakEntity, - prompt_store: Option>, telemetry: Option>, _subscription: gpui::Subscription, builder: Arc, @@ -264,9 +247,6 @@ impl CodegenAlternative { buffer: Entity, range: Range, active: bool, - context_store: Option>, - project: WeakEntity, - prompt_store: Option>, telemetry: Option>, builder: Arc, cx: &mut Context, @@ -307,9 +287,6 @@ impl CodegenAlternative { status: CodegenStatus::Idle, generation: Task::ready(()), diff: Diff::default(), - context_store, - project, - prompt_store, telemetry, _subscription: cx.subscribe(&buffer, Self::handle_buffer_event), builder, @@ -366,6 +343,7 @@ impl CodegenAlternative { pub fn start( &mut self, user_prompt: String, + context_task: Shared>>, model: Arc, cx: &mut Context, ) -> Result<()> { @@ -384,7 +362,7 @@ impl CodegenAlternative { if user_prompt.trim().to_lowercase() == "delete" { async { Ok(LanguageModelTextStream::default()) }.boxed_local() } else { - let request = self.build_request(&model, user_prompt, cx)?; + let request = self.build_request(&model, user_prompt, context_task, cx)?; cx.spawn(async move |_, cx| { Ok(model.stream_completion_text(request.await, cx).await?) }) @@ -398,6 +376,7 @@ impl CodegenAlternative { &self, model: &Arc, user_prompt: String, + context_task: Shared>>, cx: &mut App, ) -> Result> { let buffer = self.buffer.read(cx).snapshot(cx); @@ -437,19 +416,6 @@ impl CodegenAlternative { ) .context("generating content prompt")?; - let context_task = self.context_store.as_ref().and_then(|context_store| { - if let Some(project) = self.project.upgrade() { - let context = context_store - .read(cx) - .context() - .cloned() - .collect::>(); - Some(load_context(context, &project, &self.prompt_store, cx)) - } else { - None - } - }); - let temperature = AgentSettings::temperature_for_model(model, cx); Ok(cx.spawn(async move |_cx| { @@ -459,10 +425,8 @@ impl CodegenAlternative { cache: false, }; - if let Some(context_task) = context_task { - context_task - .await - .add_to_request_message(&mut request_message); + if let Some(context) = context_task.await { + context.add_to_request_message(&mut request_message); } request_message.content.push(prompt.into()); @@ -1088,7 +1052,6 @@ impl Diff { #[cfg(test)] mod tests { use super::*; - use fs::FakeFs; use futures::{ Stream, stream::{self}, @@ -1120,17 +1083,12 @@ mod tests { snapshot.anchor_before(Point::new(1, 0))..snapshot.anchor_after(Point::new(4, 5)) }); let prompt_builder = Arc::new(PromptBuilder::new(None).unwrap()); - let fs = FakeFs::new(cx.executor()); - let project = Project::test(fs, vec![], cx).await; let codegen = cx.new(|cx| { CodegenAlternative::new( buffer.clone(), range.clone(), true, None, - project.downgrade(), - None, - None, prompt_builder, cx, ) @@ -1187,17 +1145,12 @@ mod tests { snapshot.anchor_before(Point::new(1, 6))..snapshot.anchor_after(Point::new(1, 6)) }); let prompt_builder = Arc::new(PromptBuilder::new(None).unwrap()); - let fs = FakeFs::new(cx.executor()); - let project = Project::test(fs, vec![], cx).await; let codegen = cx.new(|cx| { CodegenAlternative::new( buffer.clone(), range.clone(), true, None, - project.downgrade(), - None, - None, prompt_builder, cx, ) @@ -1256,17 +1209,12 @@ mod tests { snapshot.anchor_before(Point::new(1, 2))..snapshot.anchor_after(Point::new(1, 2)) }); let prompt_builder = Arc::new(PromptBuilder::new(None).unwrap()); - let fs = FakeFs::new(cx.executor()); - let project = Project::test(fs, vec![], cx).await; let codegen = cx.new(|cx| { CodegenAlternative::new( buffer.clone(), range.clone(), true, None, - project.downgrade(), - None, - None, prompt_builder, cx, ) @@ -1325,17 +1273,12 @@ mod tests { snapshot.anchor_before(Point::new(0, 0))..snapshot.anchor_after(Point::new(4, 2)) }); let prompt_builder = Arc::new(PromptBuilder::new(None).unwrap()); - let fs = FakeFs::new(cx.executor()); - let project = Project::test(fs, vec![], cx).await; let codegen = cx.new(|cx| { CodegenAlternative::new( buffer.clone(), range.clone(), true, None, - project.downgrade(), - None, - None, prompt_builder, cx, ) @@ -1382,17 +1325,12 @@ mod tests { snapshot.anchor_before(Point::new(1, 0))..snapshot.anchor_after(Point::new(1, 14)) }); let prompt_builder = Arc::new(PromptBuilder::new(None).unwrap()); - let fs = FakeFs::new(cx.executor()); - let project = Project::test(fs, vec![], cx).await; let codegen = cx.new(|cx| { CodegenAlternative::new( buffer.clone(), range.clone(), false, None, - project.downgrade(), - None, - None, prompt_builder, cx, ) diff --git a/crates/agent_ui/src/acp/completion_provider.rs b/crates/agent_ui/src/completion_provider.rs similarity index 62% rename from crates/agent_ui/src/acp/completion_provider.rs rename to crates/agent_ui/src/completion_provider.rs index e87526957ce844a10c7c4f07f7ec6790927b142e..4e813570a42b9d7fee3f4ea5ef9ad6dafe1cc80e 100644 --- a/crates/agent_ui/src/acp/completion_provider.rs +++ b/crates/agent_ui/src/completion_provider.rs @@ -1,41 +1,130 @@ -use std::cell::RefCell; +use std::cmp::Reverse; use std::ops::Range; use std::path::PathBuf; -use std::rc::Rc; use std::sync::Arc; use std::sync::atomic::AtomicBool; use acp_thread::MentionUri; use agent::{HistoryEntry, HistoryStore}; -use agent_client_protocol as acp; use anyhow::Result; use editor::{CompletionProvider, Editor, ExcerptId}; -use fuzzy::{StringMatch, StringMatchCandidate}; +use fuzzy::{PathMatch, StringMatch, StringMatchCandidate}; use gpui::{App, Entity, Task, WeakEntity}; use language::{Buffer, CodeLabel, CodeLabelBuilder, HighlightId}; use lsp::CompletionContext; +use ordered_float::OrderedFloat; use project::lsp_store::{CompletionDocumentation, SymbolLocation}; use project::{ - Completion, CompletionDisplayOptions, CompletionIntent, CompletionResponse, Project, - ProjectPath, Symbol, WorktreeId, + Completion, CompletionDisplayOptions, CompletionIntent, CompletionResponse, + PathMatchCandidateSet, Project, ProjectPath, Symbol, WorktreeId, }; -use prompt_store::PromptStore; +use prompt_store::{PromptId, PromptStore, UserPromptId}; use rope::Point; use text::{Anchor, ToPoint as _}; use ui::prelude::*; +use util::ResultExt as _; +use util::paths::PathStyle; use util::rel_path::RelPath; use workspace::Workspace; use crate::AgentPanel; -use crate::acp::message_editor::MessageEditor; -use crate::context_picker::file_context_picker::{FileMatch, search_files}; -use crate::context_picker::rules_context_picker::{RulesContextEntry, search_rules}; -use crate::context_picker::symbol_context_picker::SymbolMatch; -use crate::context_picker::symbol_context_picker::search_symbols; -use crate::context_picker::thread_context_picker::search_threads; -use crate::context_picker::{ - ContextPickerAction, ContextPickerEntry, ContextPickerMode, selection_ranges, -}; +use crate::mention_set::MentionSet; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum PromptContextEntry { + Mode(PromptContextType), + Action(PromptContextAction), +} + +impl PromptContextEntry { + pub fn keyword(&self) -> &'static str { + match self { + Self::Mode(mode) => mode.keyword(), + Self::Action(action) => action.keyword(), + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum PromptContextType { + File, + Symbol, + Fetch, + Thread, + Rules, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum PromptContextAction { + AddSelections, +} + +impl PromptContextAction { + pub fn keyword(&self) -> &'static str { + match self { + Self::AddSelections => "selection", + } + } + + pub fn label(&self) -> &'static str { + match self { + Self::AddSelections => "Selection", + } + } + + pub fn icon(&self) -> IconName { + match self { + Self::AddSelections => IconName::Reader, + } + } +} + +impl TryFrom<&str> for PromptContextType { + type Error = String; + + fn try_from(value: &str) -> Result { + match value { + "file" => Ok(Self::File), + "symbol" => Ok(Self::Symbol), + "fetch" => Ok(Self::Fetch), + "thread" => Ok(Self::Thread), + "rule" => Ok(Self::Rules), + _ => Err(format!("Invalid context picker mode: {}", value)), + } + } +} + +impl PromptContextType { + pub fn keyword(&self) -> &'static str { + match self { + Self::File => "file", + Self::Symbol => "symbol", + Self::Fetch => "fetch", + Self::Thread => "thread", + Self::Rules => "rule", + } + } + + pub fn label(&self) -> &'static str { + match self { + Self::File => "Files & Directories", + Self::Symbol => "Symbols", + Self::Fetch => "Fetch", + Self::Thread => "Threads", + Self::Rules => "Rules", + } + } + + pub fn icon(&self) -> IconName { + match self { + Self::File => IconName::File, + Self::Symbol => IconName::Code, + Self::Fetch => IconName::ToolWeb, + Self::Thread => IconName::Thread, + Self::Rules => IconName::Reader, + } + } +} pub(crate) enum Match { File(FileMatch), @@ -47,11 +136,6 @@ pub(crate) enum Match { Entry(EntryMatch), } -pub struct EntryMatch { - mat: Option, - entry: ContextPickerEntry, -} - impl Match { pub fn score(&self) -> f64 { match self { @@ -66,43 +150,69 @@ impl Match { } } -pub struct ContextPickerCompletionProvider { - message_editor: WeakEntity, - workspace: WeakEntity, +pub struct EntryMatch { + mat: Option, + entry: PromptContextEntry, +} + +#[derive(Debug, Clone)] +pub struct RulesContextEntry { + pub prompt_id: UserPromptId, + pub title: SharedString, +} + +#[derive(Debug, Clone)] +pub struct AvailableCommand { + pub name: Arc, + pub description: Arc, + pub requires_argument: bool, +} + +pub trait PromptCompletionProviderDelegate: Send + Sync + 'static { + fn supports_context(&self, mode: PromptContextType, cx: &App) -> bool { + self.supported_modes(cx).contains(&mode) + } + fn supported_modes(&self, cx: &App) -> Vec; + fn supports_images(&self, cx: &App) -> bool; + + fn available_commands(&self, cx: &App) -> Vec; + fn confirm_command(&self, cx: &mut App); +} + +pub struct PromptCompletionProvider { + source: Arc, + mention_set: Entity, history_store: Entity, prompt_store: Option>, - prompt_capabilities: Rc>, - available_commands: Rc>>, + workspace: WeakEntity, } -impl ContextPickerCompletionProvider { +impl PromptCompletionProvider { pub fn new( - message_editor: WeakEntity, - workspace: WeakEntity, + source: T, + mention_set: Entity, history_store: Entity, prompt_store: Option>, - prompt_capabilities: Rc>, - available_commands: Rc>>, + workspace: WeakEntity, ) -> Self { Self { - message_editor, + source: Arc::new(source), + mention_set, workspace, history_store, prompt_store, - prompt_capabilities, - available_commands, } } fn completion_for_entry( - entry: ContextPickerEntry, + entry: PromptContextEntry, source_range: Range, - message_editor: WeakEntity, + mention_set: WeakEntity, workspace: &Entity, cx: &mut App, ) -> Option { match entry { - ContextPickerEntry::Mode(mode) => Some(Completion { + PromptContextEntry::Mode(mode) => Some(Completion { replace_range: source_range, new_text: format!("@{} ", mode.keyword()), label: CodeLabel::plain(mode.label().to_string(), None), @@ -117,8 +227,8 @@ impl ContextPickerCompletionProvider { // inserted confirm: Some(Arc::new(|_, _, _| true)), }), - ContextPickerEntry::Action(action) => { - Self::completion_for_action(action, source_range, message_editor, workspace, cx) + PromptContextEntry::Action(action) => { + Self::completion_for_action(action, source_range, mention_set, workspace, cx) } } } @@ -127,7 +237,9 @@ impl ContextPickerCompletionProvider { thread_entry: HistoryEntry, source_range: Range, recent: bool, - editor: WeakEntity, + source: Arc, + mention_set: WeakEntity, + workspace: Entity, cx: &mut App, ) -> Completion { let uri = thread_entry.mention_uri(); @@ -155,8 +267,10 @@ impl ContextPickerCompletionProvider { thread_entry.title().clone(), source_range.start, new_text_len - 1, - editor, uri, + source, + mention_set, + workspace, )), } } @@ -164,7 +278,9 @@ impl ContextPickerCompletionProvider { fn completion_for_rules( rule: RulesContextEntry, source_range: Range, - editor: WeakEntity, + source: Arc, + mention_set: WeakEntity, + workspace: Entity, cx: &mut App, ) -> Completion { let uri = MentionUri::Rule { @@ -188,8 +304,10 @@ impl ContextPickerCompletionProvider { rule.title, source_range.start, new_text_len - 1, - editor, uri, + source, + mention_set, + workspace, )), } } @@ -200,20 +318,18 @@ impl ContextPickerCompletionProvider { is_recent: bool, is_directory: bool, source_range: Range, - message_editor: WeakEntity, + source: Arc, + mention_set: WeakEntity, + workspace: Entity, project: Entity, cx: &mut App, ) -> Option { let path_style = project.read(cx).path_style(cx); let (file_name, directory) = - crate::context_picker::file_context_picker::extract_file_name_and_directory( - &project_path.path, - path_prefix, - path_style, - ); + extract_file_name_and_directory(&project_path.path, path_prefix, path_style); let label = - build_code_label_for_full_path(&file_name, directory.as_ref().map(|s| s.as_ref()), cx); + build_code_label_for_path(&file_name, directory.as_ref().map(|s| s.as_ref()), None, cx); let abs_path = project.read(cx).absolute_path(&project_path, cx)?; @@ -246,8 +362,10 @@ impl ContextPickerCompletionProvider { file_name, source_range.start, new_text_len - 1, - message_editor, uri, + source, + mention_set, + workspace, )), }) } @@ -255,7 +373,8 @@ impl ContextPickerCompletionProvider { fn completion_for_symbol( symbol: Symbol, source_range: Range, - message_editor: WeakEntity, + source: Arc, + mention_set: WeakEntity, workspace: Entity, cx: &mut App, ) -> Option { @@ -275,7 +394,12 @@ impl ContextPickerCompletionProvider { ), }; - let label = build_symbol_label(&symbol.name, &file_name, symbol.range.start.0.row + 1, cx); + let label = build_code_label_for_path( + &symbol.name, + Some(&file_name), + Some(symbol.range.start.0.row + 1), + cx, + ); let uri = MentionUri::Symbol { abs_path, @@ -299,8 +423,10 @@ impl ContextPickerCompletionProvider { symbol.name.into(), source_range.start, new_text_len - 1, - message_editor, uri, + source, + mention_set, + workspace, )), }) } @@ -308,7 +434,9 @@ impl ContextPickerCompletionProvider { fn completion_for_fetch( source_range: Range, url_to_fetch: SharedString, - message_editor: WeakEntity, + source: Arc, + mention_set: WeakEntity, + workspace: Entity, cx: &mut App, ) -> Option { let new_text = format!("@fetch {} ", url_to_fetch); @@ -333,21 +461,23 @@ impl ContextPickerCompletionProvider { url_to_fetch.to_string().into(), source_range.start, new_text.len() - 1, - message_editor, mention_uri, + source, + mention_set, + workspace, )), }) } pub(crate) fn completion_for_action( - action: ContextPickerAction, + action: PromptContextAction, source_range: Range, - message_editor: WeakEntity, + mention_set: WeakEntity, workspace: &Entity, cx: &mut App, ) -> Option { let (new_text, on_action) = match action { - ContextPickerAction::AddSelections => { + PromptContextAction::AddSelections => { const PLACEHOLDER: &str = "selection "; let selections = selection_ranges(workspace, cx) .into_iter() @@ -367,12 +497,12 @@ impl ContextPickerCompletionProvider { let source_range = source_range.clone(); move |_, window: &mut Window, cx: &mut App| { let selections = selections.clone(); - let message_editor = message_editor.clone(); + let mention_set = mention_set.clone(); let source_range = source_range.clone(); window.defer(cx, move |window, cx| { - message_editor - .update(cx, |message_editor, cx| { - message_editor.confirm_mention_for_selection( + mention_set + .update(cx, |store, cx| { + store.confirm_mention_for_selection( source_range, selections, window, @@ -406,12 +536,8 @@ impl ContextPickerCompletionProvider { }) } - fn search_slash_commands( - &self, - query: String, - cx: &mut App, - ) -> Task> { - let commands = self.available_commands.borrow().clone(); + fn search_slash_commands(&self, query: String, cx: &mut App) -> Task> { + let commands = self.source.available_commands(cx); if commands.is_empty() { return Task::ready(Vec::new()); } @@ -443,7 +569,7 @@ impl ContextPickerCompletionProvider { fn search_mentions( &self, - mode: Option, + mode: Option, query: String, cancellation_flag: Arc, cx: &mut App, @@ -452,7 +578,7 @@ impl ContextPickerCompletionProvider { return Task::ready(Vec::default()); }; match mode { - Some(ContextPickerMode::File) => { + Some(PromptContextType::File) => { let search_files_task = search_files(query, cancellation_flag, &workspace, cx); cx.background_spawn(async move { search_files_task @@ -463,7 +589,7 @@ impl ContextPickerCompletionProvider { }) } - Some(ContextPickerMode::Symbol) => { + Some(PromptContextType::Symbol) => { let search_symbols_task = search_symbols(query, cancellation_flag, &workspace, cx); cx.background_spawn(async move { search_symbols_task @@ -474,7 +600,7 @@ impl ContextPickerCompletionProvider { }) } - Some(ContextPickerMode::Thread) => { + Some(PromptContextType::Thread) => { let search_threads_task = search_threads(query, cancellation_flag, &self.history_store, cx); cx.background_spawn(async move { @@ -486,7 +612,7 @@ impl ContextPickerCompletionProvider { }) } - Some(ContextPickerMode::Fetch) => { + Some(PromptContextType::Fetch) => { if !query.is_empty() { Task::ready(vec![Match::Fetch(query.into())]) } else { @@ -494,7 +620,7 @@ impl ContextPickerCompletionProvider { } } - Some(ContextPickerMode::Rules) => { + Some(PromptContextType::Rules) => { if let Some(prompt_store) = self.prompt_store.as_ref() { let search_rules_task = search_rules(query, cancellation_flag, prompt_store, cx); @@ -584,9 +710,8 @@ impl ContextPickerCompletionProvider { let mut recent = Vec::with_capacity(6); let mut mentions = self - .message_editor - .read_with(cx, |message_editor, _cx| message_editor.mentions()) - .unwrap_or_default(); + .mention_set + .read_with(cx, |store, _cx| store.mentions()); let workspace = workspace.read(cx); let project = workspace.project().read(cx); let include_root_name = workspace.visible_worktrees(cx).count() > 1; @@ -637,7 +762,7 @@ impl ContextPickerCompletionProvider { }), ); - if self.prompt_capabilities.borrow().embedded_context { + if self.source.supports_context(PromptContextType::Thread, cx) { const RECENT_COUNT: usize = 2; let threads = self .history_store @@ -658,15 +783,14 @@ impl ContextPickerCompletionProvider { &self, workspace: &Entity, cx: &mut App, - ) -> Vec { - let embedded_context = self.prompt_capabilities.borrow().embedded_context; + ) -> Vec { let mut entries = vec![ - ContextPickerEntry::Mode(ContextPickerMode::File), - ContextPickerEntry::Mode(ContextPickerMode::Symbol), + PromptContextEntry::Mode(PromptContextType::File), + PromptContextEntry::Mode(PromptContextType::Symbol), ]; - if embedded_context { - entries.push(ContextPickerEntry::Mode(ContextPickerMode::Thread)); + if self.source.supports_context(PromptContextType::Thread, cx) { + entries.push(PromptContextEntry::Mode(PromptContextType::Thread)); } let has_selection = workspace @@ -679,53 +803,25 @@ impl ContextPickerCompletionProvider { }) }); if has_selection { - entries.push(ContextPickerEntry::Action( - ContextPickerAction::AddSelections, + entries.push(PromptContextEntry::Action( + PromptContextAction::AddSelections, )); } - if embedded_context { - if self.prompt_store.is_some() { - entries.push(ContextPickerEntry::Mode(ContextPickerMode::Rules)); - } + if self.prompt_store.is_some() && self.source.supports_context(PromptContextType::Rules, cx) + { + entries.push(PromptContextEntry::Mode(PromptContextType::Rules)); + } - entries.push(ContextPickerEntry::Mode(ContextPickerMode::Fetch)); + if self.source.supports_context(PromptContextType::Fetch, cx) { + entries.push(PromptContextEntry::Mode(PromptContextType::Fetch)); } entries } } -fn build_symbol_label(symbol_name: &str, file_name: &str, line: u32, cx: &App) -> CodeLabel { - let comment_id = cx.theme().syntax().highlight_id("comment").map(HighlightId); - let mut label = CodeLabelBuilder::default(); - - label.push_str(symbol_name, None); - label.push_str(" ", None); - label.push_str(&format!("{} L{}", file_name, line), comment_id); - - label.build() -} - -fn build_code_label_for_full_path(file_name: &str, directory: Option<&str>, cx: &App) -> CodeLabel { - let path = cx - .theme() - .syntax() - .highlight_id("variable") - .map(HighlightId); - let mut label = CodeLabelBuilder::default(); - - label.push_str(file_name, None); - label.push_str(" ", None); - - if let Some(directory) = directory { - label.push_str(directory, path); - } - - label.build() -} - -impl CompletionProvider for ContextPickerCompletionProvider { +impl CompletionProvider for PromptCompletionProvider { fn completions( &self, _excerpt_id: ExcerptId, @@ -735,17 +831,13 @@ impl CompletionProvider for ContextPickerCompletionProvider { _window: &mut Window, cx: &mut Context, ) -> Task>> { - let state = buffer.update(cx, |buffer, _cx| { + let state = buffer.update(cx, |buffer, cx| { let position = buffer_position.to_point(buffer); let line_start = Point::new(position.row, 0); let offset_to_line = buffer.point_to_offset(line_start); let mut lines = buffer.text_for_range(line_start..position).lines(); let line = lines.next()?; - ContextCompletion::try_parse( - line, - offset_to_line, - self.prompt_capabilities.borrow().embedded_context, - ) + ContextCompletion::try_parse(line, offset_to_line, &self.source.supported_modes(cx)) }); let Some(state) = state else { return Task::ready(Ok(Vec::new())); @@ -760,8 +852,8 @@ impl CompletionProvider for ContextPickerCompletionProvider { let source_range = snapshot.anchor_before(state.source_range().start) ..snapshot.anchor_after(state.source_range().end); - let editor = self.message_editor.clone(); - + let source = self.source.clone(); + let mention_set = self.mention_set.downgrade(); match state { ContextCompletion::SlashCommand(SlashCommandCompletion { command, argument, .. @@ -778,7 +870,8 @@ impl CompletionProvider for ContextPickerCompletionProvider { format!("/{} ", command.name) }; - let is_missing_argument = argument.is_none() && command.input.is_some(); + let is_missing_argument = + command.requires_argument && argument.is_none(); Completion { replace_range: source_range.clone(), new_text, @@ -792,26 +885,18 @@ impl CompletionProvider for ContextPickerCompletionProvider { snippet_deduplication_key: None, insert_text_mode: None, confirm: Some(Arc::new({ - let editor = editor.clone(); + let source = source.clone(); move |intent, _window, cx| { if !is_missing_argument { cx.defer({ - let editor = editor.clone(); - move |cx| { - editor - .update(cx, |editor, cx| { - match intent { - CompletionIntent::Complete - | CompletionIntent::CompleteWithInsert - | CompletionIntent::CompleteWithReplace => { - if !is_missing_argument { - editor.send(cx); - } - } - CompletionIntent::Compose => {} - } - }) - .ok(); + let source = source.clone(); + move |cx| match intent { + CompletionIntent::Complete + | CompletionIntent::CompleteWithInsert + | CompletionIntent::CompleteWithReplace => { + source.confirm_command(cx); + } + CompletionIntent::Compose => {} } }); } @@ -869,7 +954,9 @@ impl CompletionProvider for ContextPickerCompletionProvider { is_recent, mat.is_dir, source_range.clone(), - editor.clone(), + source.clone(), + mention_set.clone(), + workspace.clone(), project.clone(), cx, ) @@ -879,7 +966,8 @@ impl CompletionProvider for ContextPickerCompletionProvider { Self::completion_for_symbol( symbol, source_range.clone(), - editor.clone(), + source.clone(), + mention_set.clone(), workspace.clone(), cx, ) @@ -889,7 +977,9 @@ impl CompletionProvider for ContextPickerCompletionProvider { thread, source_range.clone(), false, - editor.clone(), + source.clone(), + mention_set.clone(), + workspace.clone(), cx, )), @@ -897,21 +987,27 @@ impl CompletionProvider for ContextPickerCompletionProvider { thread, source_range.clone(), true, - editor.clone(), + source.clone(), + mention_set.clone(), + workspace.clone(), cx, )), Match::Rules(user_rules) => Some(Self::completion_for_rules( user_rules, source_range.clone(), - editor.clone(), + source.clone(), + mention_set.clone(), + workspace.clone(), cx, )), Match::Fetch(url) => Self::completion_for_fetch( source_range.clone(), url, - editor.clone(), + source.clone(), + mention_set.clone(), + workspace.clone(), cx, ), @@ -919,7 +1015,7 @@ impl CompletionProvider for ContextPickerCompletionProvider { Self::completion_for_entry( entry, source_range.clone(), - editor.clone(), + mention_set.clone(), &workspace, cx, ) @@ -957,27 +1053,24 @@ impl CompletionProvider for ContextPickerCompletionProvider { let offset_to_line = buffer.point_to_offset(line_start); let mut lines = buffer.text_for_range(line_start..position).lines(); if let Some(line) = lines.next() { - ContextCompletion::try_parse( - line, - offset_to_line, - self.prompt_capabilities.borrow().embedded_context, - ) - .filter(|completion| { - // Right now we don't support completing arguments of slash commands - let is_slash_command_with_argument = matches!( - completion, - ContextCompletion::SlashCommand(SlashCommandCompletion { - argument: Some(_), - .. - }) - ); - !is_slash_command_with_argument - }) - .map(|completion| { - completion.source_range().start <= offset_to_line + position.column as usize - && completion.source_range().end >= offset_to_line + position.column as usize - }) - .unwrap_or(false) + ContextCompletion::try_parse(line, offset_to_line, &self.source.supported_modes(cx)) + .filter(|completion| { + // Right now we don't support completing arguments of slash commands + let is_slash_command_with_argument = matches!( + completion, + ContextCompletion::SlashCommand(SlashCommandCompletion { + argument: Some(_), + .. + }) + ); + !is_slash_command_with_argument + }) + .map(|completion| { + completion.source_range().start <= offset_to_line + position.column as usize + && completion.source_range().end + >= offset_to_line + position.column as usize + }) + .unwrap_or(false) } else { false } @@ -992,27 +1085,33 @@ impl CompletionProvider for ContextPickerCompletionProvider { } } -fn confirm_completion_callback( +fn confirm_completion_callback( crease_text: SharedString, start: Anchor, content_len: usize, - message_editor: WeakEntity, mention_uri: MentionUri, + source: Arc, + mention_set: WeakEntity, + workspace: Entity, ) -> Arc bool + Send + Sync> { Arc::new(move |_, window, cx| { - let message_editor = message_editor.clone(); + let source = source.clone(); + let mention_set = mention_set.clone(); let crease_text = crease_text.clone(); let mention_uri = mention_uri.clone(); + let workspace = workspace.clone(); window.defer(cx, move |window, cx| { - message_editor + mention_set .clone() - .update(cx, |message_editor, cx| { - message_editor + .update(cx, |mention_set, cx| { + mention_set .confirm_mention_completion( crease_text, start, content_len, mention_uri, + source.supports_images(cx), + &workspace, window, cx, ) @@ -1037,11 +1136,15 @@ impl ContextCompletion { } } - fn try_parse(line: &str, offset_to_line: usize, allow_non_file_mentions: bool) -> Option { + fn try_parse( + line: &str, + offset_to_line: usize, + supported_modes: &[PromptContextType], + ) -> Option { if let Some(command) = SlashCommandCompletion::try_parse(line, offset_to_line) { Some(Self::SlashCommand(command)) } else if let Some(mention) = - MentionCompletion::try_parse(allow_non_file_mentions, line, offset_to_line) + MentionCompletion::try_parse(line, offset_to_line, supported_modes) { Some(Self::Mention(mention)) } else { @@ -1098,12 +1201,16 @@ impl SlashCommandCompletion { #[derive(Debug, Default, PartialEq)] struct MentionCompletion { source_range: Range, - mode: Option, + mode: Option, argument: Option, } impl MentionCompletion { - fn try_parse(allow_non_file_mentions: bool, line: &str, offset_to_line: usize) -> Option { + fn try_parse( + line: &str, + offset_to_line: usize, + supported_modes: &[PromptContextType], + ) -> Option { let last_mention_start = line.rfind('@')?; // No whitespace immediately after '@' @@ -1137,8 +1244,8 @@ impl MentionCompletion { // Safe since we check no leading whitespace above end += mode_text.len(); - if let Some(parsed_mode) = ContextPickerMode::try_from(mode_text).ok() - && (allow_non_file_mentions || matches!(parsed_mode, ContextPickerMode::File)) + if let Some(parsed_mode) = PromptContextType::try_from(mode_text).ok() + && supported_modes.contains(&parsed_mode) { mode = Some(parsed_mode); } else { @@ -1172,6 +1279,339 @@ impl MentionCompletion { } } +pub(crate) fn search_files( + query: String, + cancellation_flag: Arc, + workspace: &Entity, + cx: &App, +) -> Task> { + if query.is_empty() { + let workspace = workspace.read(cx); + let project = workspace.project().read(cx); + let visible_worktrees = workspace.visible_worktrees(cx).collect::>(); + let include_root_name = visible_worktrees.len() > 1; + + let recent_matches = workspace + .recent_navigation_history(Some(10), cx) + .into_iter() + .map(|(project_path, _)| { + let path_prefix = if include_root_name { + project + .worktree_for_id(project_path.worktree_id, cx) + .map(|wt| wt.read(cx).root_name().into()) + .unwrap_or_else(|| RelPath::empty().into()) + } else { + RelPath::empty().into() + }; + + FileMatch { + mat: PathMatch { + score: 0., + positions: Vec::new(), + worktree_id: project_path.worktree_id.to_usize(), + path: project_path.path, + path_prefix, + distance_to_relative_ancestor: 0, + is_dir: false, + }, + is_recent: true, + } + }); + + let file_matches = visible_worktrees.into_iter().flat_map(|worktree| { + let worktree = worktree.read(cx); + let path_prefix: Arc = if include_root_name { + worktree.root_name().into() + } else { + RelPath::empty().into() + }; + worktree.entries(false, 0).map(move |entry| FileMatch { + mat: PathMatch { + score: 0., + positions: Vec::new(), + worktree_id: worktree.id().to_usize(), + path: entry.path.clone(), + path_prefix: path_prefix.clone(), + distance_to_relative_ancestor: 0, + is_dir: entry.is_dir(), + }, + is_recent: false, + }) + }); + + Task::ready(recent_matches.chain(file_matches).collect()) + } else { + let worktrees = workspace.read(cx).visible_worktrees(cx).collect::>(); + let include_root_name = worktrees.len() > 1; + let candidate_sets = worktrees + .into_iter() + .map(|worktree| { + let worktree = worktree.read(cx); + + PathMatchCandidateSet { + snapshot: worktree.snapshot(), + include_ignored: worktree.root_entry().is_some_and(|entry| entry.is_ignored), + include_root_name, + candidates: project::Candidates::Entries, + } + }) + .collect::>(); + + let executor = cx.background_executor().clone(); + cx.foreground_executor().spawn(async move { + fuzzy::match_path_sets( + candidate_sets.as_slice(), + query.as_str(), + &None, + false, + 100, + &cancellation_flag, + executor, + ) + .await + .into_iter() + .map(|mat| FileMatch { + mat, + is_recent: false, + }) + .collect::>() + }) + } +} + +pub(crate) fn search_symbols( + query: String, + cancellation_flag: Arc, + workspace: &Entity, + cx: &mut App, +) -> Task> { + let symbols_task = workspace.update(cx, |workspace, cx| { + workspace + .project() + .update(cx, |project, cx| project.symbols(&query, cx)) + }); + let project = workspace.read(cx).project().clone(); + cx.spawn(async move |cx| { + let Some(symbols) = symbols_task.await.log_err() else { + return Vec::new(); + }; + let Some((visible_match_candidates, external_match_candidates)): Option<(Vec<_>, Vec<_>)> = + project + .update(cx, |project, cx| { + symbols + .iter() + .enumerate() + .map(|(id, symbol)| { + StringMatchCandidate::new(id, symbol.label.filter_text()) + }) + .partition(|candidate| match &symbols[candidate.id].path { + SymbolLocation::InProject(project_path) => project + .entry_for_path(project_path, cx) + .is_some_and(|e| !e.is_ignored), + SymbolLocation::OutsideProject { .. } => false, + }) + }) + .log_err() + else { + return Vec::new(); + }; + + const MAX_MATCHES: usize = 100; + let mut visible_matches = cx.background_executor().block(fuzzy::match_strings( + &visible_match_candidates, + &query, + false, + true, + MAX_MATCHES, + &cancellation_flag, + cx.background_executor().clone(), + )); + let mut external_matches = cx.background_executor().block(fuzzy::match_strings( + &external_match_candidates, + &query, + false, + true, + MAX_MATCHES - visible_matches.len().min(MAX_MATCHES), + &cancellation_flag, + cx.background_executor().clone(), + )); + let sort_key_for_match = |mat: &StringMatch| { + let symbol = &symbols[mat.candidate_id]; + (Reverse(OrderedFloat(mat.score)), symbol.label.filter_text()) + }; + + visible_matches.sort_unstable_by_key(sort_key_for_match); + external_matches.sort_unstable_by_key(sort_key_for_match); + let mut matches = visible_matches; + matches.append(&mut external_matches); + + matches + .into_iter() + .map(|mut mat| { + let symbol = symbols[mat.candidate_id].clone(); + let filter_start = symbol.label.filter_range.start; + for position in &mut mat.positions { + *position += filter_start; + } + SymbolMatch { symbol } + }) + .collect() + }) +} + +pub(crate) fn search_threads( + query: String, + cancellation_flag: Arc, + thread_store: &Entity, + cx: &mut App, +) -> Task> { + let threads = thread_store.read(cx).entries().collect(); + if query.is_empty() { + return Task::ready(threads); + } + + let executor = cx.background_executor().clone(); + cx.background_spawn(async move { + let candidates = threads + .iter() + .enumerate() + .map(|(id, thread)| StringMatchCandidate::new(id, thread.title())) + .collect::>(); + let matches = fuzzy::match_strings( + &candidates, + &query, + false, + true, + 100, + &cancellation_flag, + executor, + ) + .await; + + matches + .into_iter() + .map(|mat| threads[mat.candidate_id].clone()) + .collect() + }) +} + +pub(crate) fn search_rules( + query: String, + cancellation_flag: Arc, + prompt_store: &Entity, + cx: &mut App, +) -> Task> { + let search_task = prompt_store.read(cx).search(query, cancellation_flag, cx); + cx.background_spawn(async move { + search_task + .await + .into_iter() + .flat_map(|metadata| { + // Default prompts are filtered out as they are automatically included. + if metadata.default { + None + } else { + match metadata.id { + PromptId::EditWorkflow => None, + PromptId::User { uuid } => Some(RulesContextEntry { + prompt_id: uuid, + title: metadata.title?, + }), + } + } + }) + .collect::>() + }) +} + +pub struct SymbolMatch { + pub symbol: Symbol, +} + +pub struct FileMatch { + pub mat: PathMatch, + pub is_recent: bool, +} + +pub fn extract_file_name_and_directory( + path: &RelPath, + path_prefix: &RelPath, + path_style: PathStyle, +) -> (SharedString, Option) { + // If path is empty, this means we're matching with the root directory itself + // so we use the path_prefix as the name + if path.is_empty() && !path_prefix.is_empty() { + return (path_prefix.display(path_style).to_string().into(), None); + } + + let full_path = path_prefix.join(path); + let file_name = full_path.file_name().unwrap_or_default(); + let display_path = full_path.display(path_style); + let (directory, file_name) = display_path.split_at(display_path.len() - file_name.len()); + ( + file_name.to_string().into(), + Some(SharedString::new(directory)).filter(|dir| !dir.is_empty()), + ) +} + +fn build_code_label_for_path( + file: &str, + directory: Option<&str>, + line_number: Option, + cx: &App, +) -> CodeLabel { + let variable_highlight_id = cx + .theme() + .syntax() + .highlight_id("variable") + .map(HighlightId); + let mut label = CodeLabelBuilder::default(); + + label.push_str(file, None); + label.push_str(" ", None); + + if let Some(directory) = directory { + label.push_str(directory, variable_highlight_id); + } + if let Some(line_number) = line_number { + label.push_str(&format!(" L{}", line_number), variable_highlight_id); + } + label.build() +} + +fn selection_ranges( + workspace: &Entity, + cx: &mut App, +) -> Vec<(Entity, Range)> { + let Some(editor) = workspace + .read(cx) + .active_item(cx) + .and_then(|item| item.act_as::(cx)) + else { + return Vec::new(); + }; + + editor.update(cx, |editor, cx| { + let selections = editor.selections.all_adjusted(&editor.display_snapshot(cx)); + + let buffer = editor.buffer().clone().read(cx); + let snapshot = buffer.snapshot(cx); + + selections + .into_iter() + .map(|s| snapshot.anchor_after(s.start)..snapshot.anchor_before(s.end)) + .flat_map(|range| { + let (start_buffer, start) = buffer.text_anchor_for_position(range.start, cx)?; + let (end_buffer, end) = buffer.text_anchor_for_position(range.end, cx)?; + if start_buffer != end_buffer { + return None; + } + Some((start_buffer, start..end)) + }) + .collect::>() + }) +} + #[cfg(test)] mod tests { use super::*; @@ -1245,10 +1685,15 @@ mod tests { #[test] fn test_mention_completion_parse() { - assert_eq!(MentionCompletion::try_parse(true, "Lorem Ipsum", 0), None); + let supported_modes = vec![PromptContextType::File, PromptContextType::Symbol]; + + assert_eq!( + MentionCompletion::try_parse("Lorem Ipsum", 0, &supported_modes), + None + ); assert_eq!( - MentionCompletion::try_parse(true, "Lorem @", 0), + MentionCompletion::try_parse("Lorem @", 0, &supported_modes), Some(MentionCompletion { source_range: 6..7, mode: None, @@ -1257,52 +1702,52 @@ mod tests { ); assert_eq!( - MentionCompletion::try_parse(true, "Lorem @file", 0), + MentionCompletion::try_parse("Lorem @file", 0, &supported_modes), Some(MentionCompletion { source_range: 6..11, - mode: Some(ContextPickerMode::File), + mode: Some(PromptContextType::File), argument: None, }) ); assert_eq!( - MentionCompletion::try_parse(true, "Lorem @file ", 0), + MentionCompletion::try_parse("Lorem @file ", 0, &supported_modes), Some(MentionCompletion { source_range: 6..12, - mode: Some(ContextPickerMode::File), + mode: Some(PromptContextType::File), argument: None, }) ); assert_eq!( - MentionCompletion::try_parse(true, "Lorem @file main.rs", 0), + MentionCompletion::try_parse("Lorem @file main.rs", 0, &supported_modes), Some(MentionCompletion { source_range: 6..19, - mode: Some(ContextPickerMode::File), + mode: Some(PromptContextType::File), argument: Some("main.rs".to_string()), }) ); assert_eq!( - MentionCompletion::try_parse(true, "Lorem @file main.rs ", 0), + MentionCompletion::try_parse("Lorem @file main.rs ", 0, &supported_modes), Some(MentionCompletion { source_range: 6..19, - mode: Some(ContextPickerMode::File), + mode: Some(PromptContextType::File), argument: Some("main.rs".to_string()), }) ); assert_eq!( - MentionCompletion::try_parse(true, "Lorem @file main.rs Ipsum", 0), + MentionCompletion::try_parse("Lorem @file main.rs Ipsum", 0, &supported_modes), Some(MentionCompletion { source_range: 6..19, - mode: Some(ContextPickerMode::File), + mode: Some(PromptContextType::File), argument: Some("main.rs".to_string()), }) ); assert_eq!( - MentionCompletion::try_parse(true, "Lorem @main", 0), + MentionCompletion::try_parse("Lorem @main", 0, &supported_modes), Some(MentionCompletion { source_range: 6..11, mode: None, @@ -1311,7 +1756,7 @@ mod tests { ); assert_eq!( - MentionCompletion::try_parse(true, "Lorem @main ", 0), + MentionCompletion::try_parse("Lorem @main ", 0, &supported_modes), Some(MentionCompletion { source_range: 6..12, mode: None, @@ -1319,41 +1764,47 @@ mod tests { }) ); - assert_eq!(MentionCompletion::try_parse(true, "Lorem @main m", 0), None); + assert_eq!( + MentionCompletion::try_parse("Lorem @main m", 0, &supported_modes), + None + ); - assert_eq!(MentionCompletion::try_parse(true, "test@", 0), None); + assert_eq!( + MentionCompletion::try_parse("test@", 0, &supported_modes), + None + ); // Allowed non-file mentions assert_eq!( - MentionCompletion::try_parse(true, "Lorem @symbol main", 0), + MentionCompletion::try_parse("Lorem @symbol main", 0, &supported_modes), Some(MentionCompletion { source_range: 6..18, - mode: Some(ContextPickerMode::Symbol), + mode: Some(PromptContextType::Symbol), argument: Some("main".to_string()), }) ); // Disallowed non-file mentions assert_eq!( - MentionCompletion::try_parse(false, "Lorem @symbol main", 0), + MentionCompletion::try_parse("Lorem @symbol main", 0, &[PromptContextType::File]), None ); assert_eq!( - MentionCompletion::try_parse(true, "Lorem@symbol", 0), + MentionCompletion::try_parse("Lorem@symbol", 0, &supported_modes), None, "Should not parse mention inside word" ); assert_eq!( - MentionCompletion::try_parse(true, "Lorem @ file", 0), + MentionCompletion::try_parse("Lorem @ file", 0, &supported_modes), None, "Should not parse with a space after @" ); assert_eq!( - MentionCompletion::try_parse(true, "@ file", 0), + MentionCompletion::try_parse("@ file", 0, &supported_modes), None, "Should not parse with a space after @ at the start of the line" ); diff --git a/crates/agent_ui/src/context.rs b/crates/agent_ui/src/context.rs index 7f497f9cab9eae7ca9fa2a573100ab2993546228..ad8c95ba3e61f8f51d2b70ce59d0c8a9451e6571 100644 --- a/crates/agent_ui/src/context.rs +++ b/crates/agent_ui/src/context.rs @@ -1,764 +1,10 @@ -use agent::outline; -use assistant_text_thread::TextThread; -use futures::future; -use futures::{FutureExt, future::Shared}; -use gpui::{App, AppContext as _, ElementId, Entity, SharedString, Task}; -use language::Buffer; +use crate::mention_set::Mention; +use gpui::{AppContext as _, Entity, Task}; use language_model::{LanguageModelImage, LanguageModelRequestMessage, MessageContent}; -use project::{Project, ProjectEntryId, ProjectPath, Worktree}; -use prompt_store::{PromptStore, UserPromptId}; -use ref_cast::RefCast; -use rope::Point; -use std::fmt::{self, Display, Formatter, Write as _}; -use std::hash::{Hash, Hasher}; -use std::path::PathBuf; -use std::{ops::Range, path::Path, sync::Arc}; -use text::{Anchor, OffsetRangeExt as _}; -use ui::IconName; -use util::markdown::MarkdownCodeBlock; -use util::rel_path::RelPath; -use util::{ResultExt as _, post_inc}; +use ui::App; +use util::ResultExt as _; -pub const RULES_ICON: IconName = IconName::Reader; - -pub enum ContextKind { - File, - Directory, - Symbol, - Selection, - FetchedUrl, - Thread, - TextThread, - Rules, - Image, -} - -impl ContextKind { - pub fn icon(&self) -> IconName { - match self { - ContextKind::File => IconName::File, - ContextKind::Directory => IconName::Folder, - ContextKind::Symbol => IconName::Code, - ContextKind::Selection => IconName::Reader, - ContextKind::FetchedUrl => IconName::ToolWeb, - ContextKind::Thread => IconName::Thread, - ContextKind::TextThread => IconName::TextThread, - ContextKind::Rules => RULES_ICON, - ContextKind::Image => IconName::Image, - } - } -} - -/// Handle for context that can be attached to a user message. -/// -/// This uses IDs that are stable enough for tracking renames and identifying when context has -/// already been added to the thread. To use this in a set, wrap it in `AgentContextKey` to opt in -/// to `PartialEq` and `Hash` impls that use the subset of the fields used for this stable identity. -#[derive(Debug, Clone)] -pub enum AgentContextHandle { - File(FileContextHandle), - Directory(DirectoryContextHandle), - Symbol(SymbolContextHandle), - Selection(SelectionContextHandle), - FetchedUrl(FetchedUrlContext), - Thread(ThreadContextHandle), - TextThread(TextThreadContextHandle), - Rules(RulesContextHandle), - Image(ImageContext), -} - -impl AgentContextHandle { - pub fn id(&self) -> ContextId { - match self { - Self::File(context) => context.context_id, - Self::Directory(context) => context.context_id, - Self::Symbol(context) => context.context_id, - Self::Selection(context) => context.context_id, - Self::FetchedUrl(context) => context.context_id, - Self::Thread(context) => context.context_id, - Self::TextThread(context) => context.context_id, - Self::Rules(context) => context.context_id, - Self::Image(context) => context.context_id, - } - } - - pub fn element_id(&self, name: SharedString) -> ElementId { - ElementId::NamedInteger(name, self.id().0) - } -} - -/// Loaded context that can be attached to a user message. This can be thought of as a -/// snapshot of the context along with an `AgentContextHandle`. -#[derive(Debug, Clone)] -pub enum AgentContext { - File(FileContext), - Directory(DirectoryContext), - Symbol(SymbolContext), - Selection(SelectionContext), - FetchedUrl(FetchedUrlContext), - Thread(ThreadContext), - TextThread(TextThreadContext), - Rules(RulesContext), - Image(ImageContext), -} - -impl AgentContext { - pub fn handle(&self) -> AgentContextHandle { - match self { - AgentContext::File(context) => AgentContextHandle::File(context.handle.clone()), - AgentContext::Directory(context) => { - AgentContextHandle::Directory(context.handle.clone()) - } - AgentContext::Symbol(context) => AgentContextHandle::Symbol(context.handle.clone()), - AgentContext::Selection(context) => { - AgentContextHandle::Selection(context.handle.clone()) - } - AgentContext::FetchedUrl(context) => AgentContextHandle::FetchedUrl(context.clone()), - AgentContext::Thread(context) => AgentContextHandle::Thread(context.handle.clone()), - AgentContext::TextThread(context) => { - AgentContextHandle::TextThread(context.handle.clone()) - } - AgentContext::Rules(context) => AgentContextHandle::Rules(context.handle.clone()), - AgentContext::Image(context) => AgentContextHandle::Image(context.clone()), - } - } -} - -/// ID created at time of context add, for use in ElementId. This is not the stable identity of a -/// context, instead that's handled by the `PartialEq` and `Hash` impls of `AgentContextKey`. -#[derive(Debug, Copy, Clone)] -pub struct ContextId(u64); - -impl ContextId { - pub fn zero() -> Self { - ContextId(0) - } - - fn for_lookup() -> Self { - ContextId(u64::MAX) - } - - pub fn post_inc(&mut self) -> Self { - Self(post_inc(&mut self.0)) - } -} - -/// File context provides the entire contents of a file. -/// -/// This holds an `Entity` so that file path renames affect its display and so that it can -/// be opened even if the file has been deleted. An alternative might be to use `ProjectEntryId`, -/// but then when deleted there is no path info or ability to open. -#[derive(Debug, Clone)] -pub struct FileContextHandle { - pub buffer: Entity, - pub context_id: ContextId, -} - -#[derive(Debug, Clone)] -pub struct FileContext { - pub handle: FileContextHandle, - pub full_path: String, - pub text: SharedString, - pub is_outline: bool, -} - -impl FileContextHandle { - pub fn eq_for_key(&self, other: &Self) -> bool { - self.buffer == other.buffer - } - - pub fn hash_for_key(&self, state: &mut H) { - self.buffer.hash(state) - } - - pub fn project_path(&self, cx: &App) -> Option { - let file = self.buffer.read(cx).file()?; - Some(ProjectPath { - worktree_id: file.worktree_id(cx), - path: file.path().clone(), - }) - } - - fn load(self, cx: &App) -> Task> { - let buffer_ref = self.buffer.read(cx); - let Some(file) = buffer_ref.file() else { - log::error!("file context missing path"); - return Task::ready(None); - }; - let full_path = file.full_path(cx).to_string_lossy().into_owned(); - let rope = buffer_ref.as_rope().clone(); - let buffer = self.buffer.clone(); - - cx.spawn(async move |cx| { - let buffer_content = - outline::get_buffer_content_or_outline(buffer.clone(), Some(&full_path), &cx) - .await - .unwrap_or_else(|_| outline::BufferContent { - text: rope.to_string(), - is_outline: false, - }); - - let context = AgentContext::File(FileContext { - handle: self, - full_path, - text: buffer_content.text.into(), - is_outline: buffer_content.is_outline, - }); - Some(context) - }) - } -} - -impl Display for FileContext { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{}", - MarkdownCodeBlock { - tag: &codeblock_tag(&self.full_path, None), - text: &self.text, - } - ) - } -} - -/// Directory contents provides the entire contents of text files in a directory. -/// -/// This has a `ProjectEntryId` so that it follows renames. -#[derive(Debug, Clone)] -pub struct DirectoryContextHandle { - pub entry_id: ProjectEntryId, - pub context_id: ContextId, -} - -#[derive(Debug, Clone)] -pub struct DirectoryContext { - pub handle: DirectoryContextHandle, - pub full_path: String, - pub descendants: Vec, -} - -#[derive(Debug, Clone)] -pub struct DirectoryContextDescendant { - /// Path within the directory. - pub rel_path: Arc, - pub fenced_codeblock: SharedString, -} - -impl DirectoryContextHandle { - pub fn eq_for_key(&self, other: &Self) -> bool { - self.entry_id == other.entry_id - } - - pub fn hash_for_key(&self, state: &mut H) { - self.entry_id.hash(state) - } - - fn load(self, project: Entity, cx: &mut App) -> Task> { - let Some(worktree) = project.read(cx).worktree_for_entry(self.entry_id, cx) else { - return Task::ready(None); - }; - let worktree_ref = worktree.read(cx); - let Some(entry) = worktree_ref.entry_for_id(self.entry_id) else { - return Task::ready(None); - }; - if entry.is_file() { - log::error!("DirectoryContext unexpectedly refers to a file."); - return Task::ready(None); - } - - let directory_path = entry.path.clone(); - let directory_full_path = worktree_ref - .full_path(&directory_path) - .to_string_lossy() - .to_string(); - - let file_paths = collect_files_in_path(worktree_ref, &directory_path); - let descendants_future = future::join_all(file_paths.into_iter().map(|path| { - let worktree_ref = worktree.read(cx); - let worktree_id = worktree_ref.id(); - let full_path = worktree_ref.full_path(&path).to_string_lossy().into_owned(); - - let rel_path = path - .strip_prefix(&directory_path) - .log_err() - .map_or_else(|| path.clone(), |rel_path| rel_path.into()); - - let open_task = project.update(cx, |project, cx| { - project.buffer_store().update(cx, |buffer_store, cx| { - let project_path = ProjectPath { worktree_id, path }; - buffer_store.open_buffer(project_path, cx) - }) - }); - - // TODO: report load errors instead of just logging - let rope_task = cx.spawn(async move |cx| { - let buffer = open_task.await.log_err()?; - let rope = buffer - .read_with(cx, |buffer, _cx| buffer.as_rope().clone()) - .log_err()?; - Some((rope, buffer)) - }); - - cx.background_spawn(async move { - let (rope, _buffer) = rope_task.await?; - let fenced_codeblock = MarkdownCodeBlock { - tag: &codeblock_tag(&full_path, None), - text: &rope.to_string(), - } - .to_string() - .into(); - let descendant = DirectoryContextDescendant { - rel_path, - fenced_codeblock, - }; - Some(descendant) - }) - })); - - cx.background_spawn(async move { - let descendants = descendants_future - .await - .into_iter() - .flatten() - .collect::>(); - let context = AgentContext::Directory(DirectoryContext { - handle: self, - full_path: directory_full_path, - descendants, - }); - Some(context) - }) - } -} - -impl Display for DirectoryContext { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut is_first = true; - for descendant in &self.descendants { - if !is_first { - writeln!(f)?; - } else { - is_first = false; - } - write!(f, "{}", descendant.fenced_codeblock)?; - } - Ok(()) - } -} - -#[derive(Debug, Clone)] -pub struct SymbolContextHandle { - pub buffer: Entity, - pub symbol: SharedString, - pub range: Range, - /// The range that fully contains the symbol. e.g. for function symbol, this will include not - /// only the signature, but also the body. Not used by `PartialEq` or `Hash` for - /// `AgentContextKey`. - pub enclosing_range: Range, - pub context_id: ContextId, -} - -#[derive(Debug, Clone)] -pub struct SymbolContext { - pub handle: SymbolContextHandle, - pub full_path: String, - pub line_range: Range, - pub text: SharedString, -} - -impl SymbolContextHandle { - pub fn eq_for_key(&self, other: &Self) -> bool { - self.buffer == other.buffer && self.symbol == other.symbol && self.range == other.range - } - - pub fn hash_for_key(&self, state: &mut H) { - self.buffer.hash(state); - self.symbol.hash(state); - self.range.hash(state); - } - - pub fn full_path(&self, cx: &App) -> Option { - Some(self.buffer.read(cx).file()?.full_path(cx)) - } - - pub fn enclosing_line_range(&self, cx: &App) -> Range { - self.enclosing_range - .to_point(&self.buffer.read(cx).snapshot()) - } - - pub fn text(&self, cx: &App) -> SharedString { - self.buffer - .read(cx) - .text_for_range(self.enclosing_range.clone()) - .collect::() - .into() - } - - fn load(self, cx: &App) -> Task> { - let buffer_ref = self.buffer.read(cx); - let Some(file) = buffer_ref.file() else { - log::error!("symbol context's file has no path"); - return Task::ready(None); - }; - let full_path = file.full_path(cx).to_string_lossy().into_owned(); - let line_range = self.enclosing_range.to_point(&buffer_ref.snapshot()); - let text = self.text(cx); - let context = AgentContext::Symbol(SymbolContext { - handle: self, - full_path, - line_range, - text, - }); - Task::ready(Some(context)) - } -} - -impl Display for SymbolContext { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let code_block = MarkdownCodeBlock { - tag: &codeblock_tag(&self.full_path, Some(self.line_range.clone())), - text: &self.text, - }; - write!(f, "{code_block}",) - } -} - -#[derive(Debug, Clone)] -pub struct SelectionContextHandle { - pub buffer: Entity, - pub range: Range, - pub context_id: ContextId, -} - -#[derive(Debug, Clone)] -pub struct SelectionContext { - pub handle: SelectionContextHandle, - pub full_path: String, - pub line_range: Range, - pub text: SharedString, -} - -impl SelectionContextHandle { - pub fn eq_for_key(&self, other: &Self) -> bool { - self.buffer == other.buffer && self.range == other.range - } - - pub fn hash_for_key(&self, state: &mut H) { - self.buffer.hash(state); - self.range.hash(state); - } - - pub fn full_path(&self, cx: &App) -> Option { - Some(self.buffer.read(cx).file()?.full_path(cx)) - } - - pub fn line_range(&self, cx: &App) -> Range { - self.range.to_point(&self.buffer.read(cx).snapshot()) - } - - pub fn text(&self, cx: &App) -> SharedString { - self.buffer - .read(cx) - .text_for_range(self.range.clone()) - .collect::() - .into() - } - - fn load(self, cx: &App) -> Task> { - let Some(full_path) = self.full_path(cx) else { - log::error!("selection context's file has no path"); - return Task::ready(None); - }; - let text = self.text(cx); - let context = AgentContext::Selection(SelectionContext { - full_path: full_path.to_string_lossy().into_owned(), - line_range: self.line_range(cx), - text, - handle: self, - }); - - Task::ready(Some(context)) - } -} - -impl Display for SelectionContext { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let code_block = MarkdownCodeBlock { - tag: &codeblock_tag(&self.full_path, Some(self.line_range.clone())), - text: &self.text, - }; - write!(f, "{code_block}",) - } -} - -#[derive(Debug, Clone)] -pub struct FetchedUrlContext { - pub url: SharedString, - /// Text contents of the fetched url. Unlike other context types, the contents of this gets - /// populated when added rather than when sending the message. Not used by `PartialEq` or `Hash` - /// for `AgentContextKey`. - pub text: SharedString, - pub context_id: ContextId, -} - -impl FetchedUrlContext { - pub fn eq_for_key(&self, other: &Self) -> bool { - self.url == other.url - } - - pub fn hash_for_key(&self, state: &mut H) { - self.url.hash(state); - } - - pub fn lookup_key(url: SharedString) -> AgentContextKey { - AgentContextKey(AgentContextHandle::FetchedUrl(FetchedUrlContext { - url, - text: "".into(), - context_id: ContextId::for_lookup(), - })) - } - - pub fn load(self) -> Task> { - Task::ready(Some(AgentContext::FetchedUrl(self))) - } -} - -impl Display for FetchedUrlContext { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - // TODO: Better format - url and contents are not delimited. - write!(f, "{}\n{}\n", self.url, self.text) - } -} - -#[derive(Debug, Clone)] -pub struct ThreadContextHandle { - pub thread: Entity, - pub context_id: ContextId, -} - -#[derive(Debug, Clone)] -pub struct ThreadContext { - pub handle: ThreadContextHandle, - pub title: SharedString, - pub text: SharedString, -} - -impl ThreadContextHandle { - pub fn eq_for_key(&self, other: &Self) -> bool { - self.thread == other.thread - } - - pub fn hash_for_key(&self, state: &mut H) { - self.thread.hash(state) - } - - pub fn title(&self, cx: &App) -> SharedString { - self.thread.read(cx).title() - } - - fn load(self, cx: &mut App) -> Task> { - let task = self.thread.update(cx, |thread, cx| thread.summary(cx)); - let title = self.title(cx); - cx.background_spawn(async move { - let text = task.await?; - let context = AgentContext::Thread(ThreadContext { - title, - text, - handle: self, - }); - Some(context) - }) - } -} - -impl Display for ThreadContext { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - // TODO: Better format for this - doesn't distinguish title and contents. - write!(f, "{}\n{}\n", &self.title, &self.text.trim()) - } -} - -#[derive(Debug, Clone)] -pub struct TextThreadContextHandle { - pub text_thread: Entity, - pub context_id: ContextId, -} - -#[derive(Debug, Clone)] -pub struct TextThreadContext { - pub handle: TextThreadContextHandle, - pub title: SharedString, - pub text: SharedString, -} - -impl TextThreadContextHandle { - // pub fn lookup_key() -> - pub fn eq_for_key(&self, other: &Self) -> bool { - self.text_thread == other.text_thread - } - - pub fn hash_for_key(&self, state: &mut H) { - self.text_thread.hash(state) - } - - pub fn title(&self, cx: &App) -> SharedString { - self.text_thread.read(cx).summary().or_default() - } - - fn load(self, cx: &App) -> Task> { - let title = self.title(cx); - let text = self.text_thread.read(cx).to_xml(cx); - let context = AgentContext::TextThread(TextThreadContext { - title, - text: text.into(), - handle: self, - }); - Task::ready(Some(context)) - } -} - -impl Display for TextThreadContext { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, " write!(f, "&")?, - '<' => write!(f, "<")?, - '>' => write!(f, ">")?, - '"' => write!(f, """)?, - '\'' => write!(f, "'")?, - _ => write!(f, "{}", c)?, - } - } - writeln!(f, "\">")?; - write!(f, "{}", self.text.trim())?; - write!(f, "\n") - } -} - -#[derive(Debug, Clone)] -pub struct RulesContextHandle { - pub prompt_id: UserPromptId, - pub context_id: ContextId, -} - -#[derive(Debug, Clone)] -pub struct RulesContext { - pub handle: RulesContextHandle, - pub title: Option, - pub text: SharedString, -} - -impl RulesContextHandle { - pub fn eq_for_key(&self, other: &Self) -> bool { - self.prompt_id == other.prompt_id - } - - pub fn hash_for_key(&self, state: &mut H) { - self.prompt_id.hash(state) - } - - pub fn lookup_key(prompt_id: UserPromptId) -> AgentContextKey { - AgentContextKey(AgentContextHandle::Rules(RulesContextHandle { - prompt_id, - context_id: ContextId::for_lookup(), - })) - } - - pub fn load( - self, - prompt_store: &Option>, - cx: &App, - ) -> Task> { - let Some(prompt_store) = prompt_store.as_ref() else { - return Task::ready(None); - }; - let prompt_store = prompt_store.read(cx); - let prompt_id = self.prompt_id.into(); - let Some(metadata) = prompt_store.metadata(prompt_id) else { - return Task::ready(None); - }; - let title = metadata.title; - let text_task = prompt_store.load(prompt_id, cx); - cx.background_spawn(async move { - // TODO: report load errors instead of just logging - let text = text_task.await.log_err()?.into(); - let context = AgentContext::Rules(RulesContext { - handle: self, - title, - text, - }); - Some(context) - }) - } -} - -impl Display for RulesContext { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if let Some(title) = &self.title { - writeln!(f, "Rules title: {}", title)?; - } - let code_block = MarkdownCodeBlock { - tag: "", - text: self.text.trim(), - }; - write!(f, "{code_block}") - } -} - -#[derive(Debug, Clone)] -pub struct ImageContext { - pub project_path: Option, - pub full_path: Option, - pub original_image: Arc, - // TODO: handle this elsewhere and remove `ignore-interior-mutability` opt-out in clippy.toml - // needed due to a false positive of `clippy::mutable_key_type`. - pub image_task: Shared>>, - pub context_id: ContextId, -} - -pub enum ImageStatus { - Loading, - Error, - Warning, - Ready, -} - -impl ImageContext { - pub fn eq_for_key(&self, other: &Self) -> bool { - self.original_image.id() == other.original_image.id() - } - - pub fn hash_for_key(&self, state: &mut H) { - self.original_image.id().hash(state); - } - - pub fn image(&self) -> Option { - self.image_task.clone().now_or_never().flatten() - } - - pub fn status(&self, model: Option<&Arc>) -> ImageStatus { - match self.image_task.clone().now_or_never() { - None => ImageStatus::Loading, - Some(None) => ImageStatus::Error, - Some(Some(_)) => { - if model.is_some_and(|model| !model.supports_images()) { - ImageStatus::Warning - } else { - ImageStatus::Ready - } - } - } - } - - pub fn load(self, cx: &App) -> Task> { - cx.background_spawn(async move { - self.image_task.clone().await; - Some(AgentContext::Image(self)) - }) - } -} +use crate::mention_set::MentionSet; #[derive(Debug, Clone, Default)] pub struct LoadedContext { @@ -792,382 +38,26 @@ impl LoadedContext { } /// Loads and formats a collection of contexts. -pub fn load_context( - contexts: Vec, - project: &Entity, - prompt_store: &Option>, - cx: &mut App, -) -> Task { - let load_tasks: Vec<_> = contexts - .into_iter() - .map(|context| match context { - AgentContextHandle::File(context) => context.load(cx), - AgentContextHandle::Directory(context) => context.load(project.clone(), cx), - AgentContextHandle::Symbol(context) => context.load(cx), - AgentContextHandle::Selection(context) => context.load(cx), - AgentContextHandle::FetchedUrl(context) => context.load(), - AgentContextHandle::Thread(context) => context.load(cx), - AgentContextHandle::TextThread(context) => context.load(cx), - AgentContextHandle::Rules(context) => context.load(prompt_store, cx), - AgentContextHandle::Image(context) => context.load(cx), - }) - .collect(); - +pub fn load_context(mention_set: &Entity, cx: &mut App) -> Task> { + let task = mention_set.update(cx, |mention_set, cx| mention_set.contents(true, cx)); cx.background_spawn(async move { - let load_results = future::join_all(load_tasks).await; - - let mut text = String::new(); - - let mut file_context = Vec::new(); - let mut directory_context = Vec::new(); - let mut symbol_context = Vec::new(); - let mut selection_context = Vec::new(); - let mut fetched_url_context = Vec::new(); - let mut thread_context = Vec::new(); - let mut text_thread_context = Vec::new(); - let mut rules_context = Vec::new(); - let mut images = Vec::new(); - for context in load_results.into_iter().flatten() { - match context { - AgentContext::File(context) => file_context.push(context), - AgentContext::Directory(context) => directory_context.push(context), - AgentContext::Symbol(context) => symbol_context.push(context), - AgentContext::Selection(context) => selection_context.push(context), - AgentContext::FetchedUrl(context) => fetched_url_context.push(context), - AgentContext::Thread(context) => thread_context.push(context), - AgentContext::TextThread(context) => text_thread_context.push(context), - AgentContext::Rules(context) => rules_context.push(context), - AgentContext::Image(context) => images.extend(context.image()), - } - } - - // Use empty text if there are no contexts that contribute to text (everything but image - // context). - if file_context.is_empty() - && directory_context.is_empty() - && symbol_context.is_empty() - && selection_context.is_empty() - && fetched_url_context.is_empty() - && thread_context.is_empty() - && text_thread_context.is_empty() - && rules_context.is_empty() - { - return LoadedContext { text, images }; - } - - text.push_str( - "\n\n\ - The following items were attached by the user. \ - They are up-to-date and don't need to be re-read.\n\n", - ); - - if !file_context.is_empty() { - text.push_str(""); - for context in file_context { - text.push('\n'); - let _ = write!(text, "{context}"); - } - text.push_str("\n"); - } - - if !directory_context.is_empty() { - text.push_str(""); - for context in directory_context { - text.push('\n'); - let _ = write!(text, "{context}"); - } - text.push_str("\n"); - } - - if !symbol_context.is_empty() { - text.push_str(""); - for context in symbol_context { - text.push('\n'); - let _ = write!(text, "{context}"); - } - text.push_str("\n"); - } - - if !selection_context.is_empty() { - text.push_str(""); - for context in selection_context { - text.push('\n'); - let _ = write!(text, "{context}"); - } - text.push_str("\n"); - } - - if !fetched_url_context.is_empty() { - text.push_str(""); - for context in fetched_url_context { - text.push('\n'); - let _ = write!(text, "{context}"); - } - text.push_str("\n"); - } - - if !thread_context.is_empty() { - text.push_str(""); - for context in thread_context { - text.push('\n'); - let _ = write!(text, "{context}"); - } - text.push_str("\n"); - } - - if !text_thread_context.is_empty() { - text.push_str(""); - for context in text_thread_context { - text.push('\n'); - let _ = writeln!(text, "{context}"); - } - text.push_str(""); - } - - if !rules_context.is_empty() { - text.push_str( - "\n\ - The user has specified the following rules that should be applied:\n", - ); - for context in rules_context { - text.push('\n'); - let _ = write!(text, "{context}"); - } - text.push_str("\n"); - } - - text.push_str("\n"); - - LoadedContext { text, images } + let mentions = task.await.log_err()?; + let mut loaded_context = LoadedContext::default(); + loaded_context + .text + .push_str("The following items were attached by the user.\n"); + for (_, (_, mention)) in mentions { + match mention { + Mention::Text { content, .. } => { + loaded_context.text.push_str(&content); + } + Mention::Image(mention_image) => loaded_context.images.push(LanguageModelImage { + source: mention_image.data, + ..LanguageModelImage::empty() + }), + Mention::Link => {} + } + } + Some(loaded_context) }) } - -fn collect_files_in_path(worktree: &Worktree, path: &RelPath) -> Vec> { - let mut files = Vec::new(); - - for entry in worktree.child_entries(path) { - if entry.is_dir() { - files.extend(collect_files_in_path(worktree, &entry.path)); - } else if entry.is_file() { - files.push(entry.path.clone()); - } - } - - files -} - -fn codeblock_tag(full_path: &str, line_range: Option>) -> String { - let mut result = String::new(); - - if let Some(extension) = Path::new(full_path) - .extension() - .and_then(|ext| ext.to_str()) - { - let _ = write!(result, "{} ", extension); - } - - let _ = write!(result, "{}", full_path); - - if let Some(range) = line_range { - if range.start.row == range.end.row { - let _ = write!(result, ":{}", range.start.row + 1); - } else { - let _ = write!(result, ":{}-{}", range.start.row + 1, range.end.row + 1); - } - } - - result -} - -/// Wraps `AgentContext` to opt-in to `PartialEq` and `Hash` impls which use a subset of fields -/// needed for stable context identity. -#[derive(Debug, Clone, RefCast)] -#[repr(transparent)] -pub struct AgentContextKey(pub AgentContextHandle); - -impl AsRef for AgentContextKey { - fn as_ref(&self) -> &AgentContextHandle { - &self.0 - } -} - -impl Eq for AgentContextKey {} - -impl PartialEq for AgentContextKey { - fn eq(&self, other: &Self) -> bool { - match &self.0 { - AgentContextHandle::File(context) => { - if let AgentContextHandle::File(other_context) = &other.0 { - return context.eq_for_key(other_context); - } - } - AgentContextHandle::Directory(context) => { - if let AgentContextHandle::Directory(other_context) = &other.0 { - return context.eq_for_key(other_context); - } - } - AgentContextHandle::Symbol(context) => { - if let AgentContextHandle::Symbol(other_context) = &other.0 { - return context.eq_for_key(other_context); - } - } - AgentContextHandle::Selection(context) => { - if let AgentContextHandle::Selection(other_context) = &other.0 { - return context.eq_for_key(other_context); - } - } - AgentContextHandle::FetchedUrl(context) => { - if let AgentContextHandle::FetchedUrl(other_context) = &other.0 { - return context.eq_for_key(other_context); - } - } - AgentContextHandle::Thread(context) => { - if let AgentContextHandle::Thread(other_context) = &other.0 { - return context.eq_for_key(other_context); - } - } - AgentContextHandle::Rules(context) => { - if let AgentContextHandle::Rules(other_context) = &other.0 { - return context.eq_for_key(other_context); - } - } - AgentContextHandle::Image(context) => { - if let AgentContextHandle::Image(other_context) = &other.0 { - return context.eq_for_key(other_context); - } - } - AgentContextHandle::TextThread(context) => { - if let AgentContextHandle::TextThread(other_context) = &other.0 { - return context.eq_for_key(other_context); - } - } - } - false - } -} - -impl Hash for AgentContextKey { - fn hash(&self, state: &mut H) { - match &self.0 { - AgentContextHandle::File(context) => context.hash_for_key(state), - AgentContextHandle::Directory(context) => context.hash_for_key(state), - AgentContextHandle::Symbol(context) => context.hash_for_key(state), - AgentContextHandle::Selection(context) => context.hash_for_key(state), - AgentContextHandle::FetchedUrl(context) => context.hash_for_key(state), - AgentContextHandle::Thread(context) => context.hash_for_key(state), - AgentContextHandle::TextThread(context) => context.hash_for_key(state), - AgentContextHandle::Rules(context) => context.hash_for_key(state), - AgentContextHandle::Image(context) => context.hash_for_key(state), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use gpui::TestAppContext; - use project::{FakeFs, Project}; - use serde_json::json; - use settings::SettingsStore; - use util::path; - - fn init_test_settings(cx: &mut TestAppContext) { - cx.update(|cx| { - let settings_store = SettingsStore::test(cx); - cx.set_global(settings_store); - }); - } - - // Helper to create a test project with test files - async fn create_test_project( - cx: &mut TestAppContext, - files: serde_json::Value, - ) -> Entity { - let fs = FakeFs::new(cx.background_executor.clone()); - fs.insert_tree(path!("/test"), files).await; - Project::test(fs, [path!("/test").as_ref()], cx).await - } - - #[gpui::test] - async fn test_large_file_uses_fallback(cx: &mut TestAppContext) { - init_test_settings(cx); - - // Create a large file that exceeds AUTO_OUTLINE_SIZE - const LINE: &str = "Line with some text\n"; - let large_content = LINE.repeat(2 * (outline::AUTO_OUTLINE_SIZE / LINE.len())); - let content_len = large_content.len(); - - assert!(content_len > outline::AUTO_OUTLINE_SIZE); - - let file_context = load_context_for("file.txt", large_content, cx).await; - - // Should contain some of the actual file content - assert!( - file_context.text.contains(LINE), - "Should contain some of the file content" - ); - - // Should be much smaller than original - assert!( - file_context.text.len() < content_len / 10, - "Should be significantly smaller than original content" - ); - } - - #[gpui::test] - async fn test_small_file_uses_full_content(cx: &mut TestAppContext) { - init_test_settings(cx); - - let small_content = "This is a small file.\n"; - let content_len = small_content.len(); - - assert!(content_len < outline::AUTO_OUTLINE_SIZE); - - let file_context = load_context_for("file.txt", small_content.to_string(), cx).await; - - assert!( - !file_context - .text - .contains(&format!("# File outline for {}", path!("test/file.txt"))), - "Small files should not get an outline" - ); - - assert!( - file_context.text.contains(small_content), - "Small files should use full content" - ); - } - - async fn load_context_for( - filename: &str, - content: String, - cx: &mut TestAppContext, - ) -> LoadedContext { - // Create a test project with the file - let project = create_test_project( - cx, - json!({ - filename: content, - }), - ) - .await; - - // Open the buffer - let buffer_path = project - .read_with(cx, |project, cx| project.find_project_path(filename, cx)) - .unwrap(); - - let buffer = project - .update(cx, |project, cx| project.open_buffer(buffer_path, cx)) - .await - .unwrap(); - - let context_handle = AgentContextHandle::File(FileContextHandle { - buffer: buffer.clone(), - context_id: ContextId::zero(), - }); - - cx.update(|cx| load_context(vec![context_handle], &project, &None, cx)) - .await - } -} diff --git a/crates/agent_ui/src/context_picker.rs b/crates/agent_ui/src/context_picker.rs deleted file mode 100644 index 0a6e811673aa47339087e538003e87b1940d0039..0000000000000000000000000000000000000000 --- a/crates/agent_ui/src/context_picker.rs +++ /dev/null @@ -1,931 +0,0 @@ -mod completion_provider; -pub(crate) mod fetch_context_picker; -pub(crate) mod file_context_picker; -pub(crate) mod rules_context_picker; -pub(crate) mod symbol_context_picker; -pub(crate) mod thread_context_picker; - -use std::ops::Range; -use std::path::PathBuf; -use std::sync::Arc; - -use agent::{HistoryEntry, HistoryEntryId, HistoryStore}; -use agent_client_protocol as acp; -use anyhow::{Result, anyhow}; -use collections::HashSet; -pub use completion_provider::ContextPickerCompletionProvider; -use editor::display_map::{Crease, CreaseId, CreaseMetadata, FoldId}; -use editor::{Anchor, Editor, ExcerptId, FoldPlaceholder, ToOffset}; -use fetch_context_picker::FetchContextPicker; -use file_context_picker::FileContextPicker; -use file_context_picker::render_file_context_entry; -use gpui::{ - App, DismissEvent, Empty, Entity, EventEmitter, FocusHandle, Focusable, Subscription, Task, - WeakEntity, -}; -use language::Buffer; -use multi_buffer::MultiBufferRow; -use project::ProjectPath; -use prompt_store::PromptStore; -use rules_context_picker::{RulesContextEntry, RulesContextPicker}; -use symbol_context_picker::SymbolContextPicker; -use thread_context_picker::render_thread_context_entry; -use ui::{ - ButtonLike, ContextMenu, ContextMenuEntry, ContextMenuItem, Disclosure, TintColor, prelude::*, -}; -use util::paths::PathStyle; -use util::rel_path::RelPath; -use workspace::{Workspace, notifications::NotifyResultExt}; - -use crate::context_picker::thread_context_picker::ThreadContextPicker; -use crate::{context::RULES_ICON, context_store::ContextStore}; - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub(crate) enum ContextPickerEntry { - Mode(ContextPickerMode), - Action(ContextPickerAction), -} - -impl ContextPickerEntry { - pub fn keyword(&self) -> &'static str { - match self { - Self::Mode(mode) => mode.keyword(), - Self::Action(action) => action.keyword(), - } - } - - pub fn label(&self) -> &'static str { - match self { - Self::Mode(mode) => mode.label(), - Self::Action(action) => action.label(), - } - } - - pub fn icon(&self) -> IconName { - match self { - Self::Mode(mode) => mode.icon(), - Self::Action(action) => action.icon(), - } - } -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub(crate) enum ContextPickerMode { - File, - Symbol, - Fetch, - Thread, - Rules, -} - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub(crate) enum ContextPickerAction { - AddSelections, -} - -impl ContextPickerAction { - pub fn keyword(&self) -> &'static str { - match self { - Self::AddSelections => "selection", - } - } - - pub fn label(&self) -> &'static str { - match self { - Self::AddSelections => "Selection", - } - } - - pub fn icon(&self) -> IconName { - match self { - Self::AddSelections => IconName::Reader, - } - } -} - -impl TryFrom<&str> for ContextPickerMode { - type Error = String; - - fn try_from(value: &str) -> Result { - match value { - "file" => Ok(Self::File), - "symbol" => Ok(Self::Symbol), - "fetch" => Ok(Self::Fetch), - "thread" => Ok(Self::Thread), - "rule" => Ok(Self::Rules), - _ => Err(format!("Invalid context picker mode: {}", value)), - } - } -} - -impl ContextPickerMode { - pub fn keyword(&self) -> &'static str { - match self { - Self::File => "file", - Self::Symbol => "symbol", - Self::Fetch => "fetch", - Self::Thread => "thread", - Self::Rules => "rule", - } - } - - pub fn label(&self) -> &'static str { - match self { - Self::File => "Files & Directories", - Self::Symbol => "Symbols", - Self::Fetch => "Fetch", - Self::Thread => "Threads", - Self::Rules => "Rules", - } - } - - pub fn icon(&self) -> IconName { - match self { - Self::File => IconName::File, - Self::Symbol => IconName::Code, - Self::Fetch => IconName::ToolWeb, - Self::Thread => IconName::Thread, - Self::Rules => RULES_ICON, - } - } -} - -#[derive(Debug, Clone)] -enum ContextPickerState { - Default(Entity), - File(Entity), - Symbol(Entity), - Fetch(Entity), - Thread(Entity), - Rules(Entity), -} - -pub(super) struct ContextPicker { - mode: ContextPickerState, - workspace: WeakEntity, - context_store: WeakEntity, - thread_store: Option>, - prompt_store: Option>, - _subscriptions: Vec, -} - -impl ContextPicker { - pub fn new( - workspace: WeakEntity, - thread_store: Option>, - prompt_store: Option>, - context_store: WeakEntity, - window: &mut Window, - cx: &mut Context, - ) -> Self { - let subscriptions = context_store - .upgrade() - .map(|context_store| { - cx.observe(&context_store, |this, _, cx| this.notify_current_picker(cx)) - }) - .into_iter() - .chain( - thread_store - .as_ref() - .and_then(|thread_store| thread_store.upgrade()) - .map(|thread_store| { - cx.observe(&thread_store, |this, _, cx| this.notify_current_picker(cx)) - }), - ) - .collect::>(); - - ContextPicker { - mode: ContextPickerState::Default(ContextMenu::build( - window, - cx, - |menu, _window, _cx| menu, - )), - workspace, - context_store, - thread_store, - prompt_store, - _subscriptions: subscriptions, - } - } - - pub fn init(&mut self, window: &mut Window, cx: &mut Context) { - self.mode = ContextPickerState::Default(self.build_menu(window, cx)); - cx.notify(); - } - - fn build_menu(&mut self, window: &mut Window, cx: &mut Context) -> Entity { - let context_picker = cx.entity(); - - let menu = ContextMenu::build(window, cx, move |menu, _window, cx| { - let Some(workspace) = self.workspace.upgrade() else { - return menu; - }; - let path_style = workspace.read(cx).path_style(cx); - let recent = self.recent_entries(cx); - let has_recent = !recent.is_empty(); - let recent_entries = recent - .into_iter() - .enumerate() - .map(|(ix, entry)| { - self.recent_menu_item(context_picker.clone(), ix, entry, path_style) - }) - .collect::>(); - - let entries = self - .workspace - .upgrade() - .map(|workspace| { - available_context_picker_entries( - &self.prompt_store, - &self.thread_store, - &workspace, - cx, - ) - }) - .unwrap_or_default(); - - menu.when(has_recent, |menu| { - menu.custom_row(|_, _| { - div() - .mb_1() - .child( - Label::new("Recent") - .color(Color::Muted) - .size(LabelSize::Small), - ) - .into_any_element() - }) - }) - .extend(recent_entries) - .when(has_recent, |menu| menu.separator()) - .extend(entries.into_iter().map(|entry| { - let context_picker = context_picker.clone(); - - ContextMenuEntry::new(entry.label()) - .icon(entry.icon()) - .icon_size(IconSize::XSmall) - .icon_color(Color::Muted) - .handler(move |window, cx| { - context_picker.update(cx, |this, cx| this.select_entry(entry, window, cx)) - }) - })) - .keep_open_on_confirm(true) - }); - - cx.subscribe(&menu, move |_, _, _: &DismissEvent, cx| { - cx.emit(DismissEvent); - }) - .detach(); - - menu - } - - /// Whether threads are allowed as context. - pub fn allow_threads(&self) -> bool { - self.thread_store.is_some() - } - - fn select_entry( - &mut self, - entry: ContextPickerEntry, - window: &mut Window, - cx: &mut Context, - ) { - let context_picker = cx.entity().downgrade(); - - match entry { - ContextPickerEntry::Mode(mode) => match mode { - ContextPickerMode::File => { - self.mode = ContextPickerState::File(cx.new(|cx| { - FileContextPicker::new( - context_picker.clone(), - self.workspace.clone(), - self.context_store.clone(), - window, - cx, - ) - })); - } - ContextPickerMode::Symbol => { - self.mode = ContextPickerState::Symbol(cx.new(|cx| { - SymbolContextPicker::new( - context_picker.clone(), - self.workspace.clone(), - self.context_store.clone(), - window, - cx, - ) - })); - } - ContextPickerMode::Rules => { - if let Some(prompt_store) = self.prompt_store.as_ref() { - self.mode = ContextPickerState::Rules(cx.new(|cx| { - RulesContextPicker::new( - prompt_store.clone(), - context_picker.clone(), - self.context_store.clone(), - window, - cx, - ) - })); - } - } - ContextPickerMode::Fetch => { - self.mode = ContextPickerState::Fetch(cx.new(|cx| { - FetchContextPicker::new( - context_picker.clone(), - self.workspace.clone(), - self.context_store.clone(), - window, - cx, - ) - })); - } - ContextPickerMode::Thread => { - if let Some(thread_store) = self.thread_store.clone() { - self.mode = ContextPickerState::Thread(cx.new(|cx| { - ThreadContextPicker::new( - thread_store, - context_picker.clone(), - self.context_store.clone(), - self.workspace.clone(), - window, - cx, - ) - })); - } - } - }, - ContextPickerEntry::Action(action) => match action { - ContextPickerAction::AddSelections => { - if let Some((context_store, workspace)) = - self.context_store.upgrade().zip(self.workspace.upgrade()) - { - add_selections_as_context(&context_store, &workspace, cx); - } - - cx.emit(DismissEvent); - } - }, - } - - cx.notify(); - cx.focus_self(window); - } - - pub fn select_first(&mut self, window: &mut Window, cx: &mut Context) { - // Other variants already select their first entry on open automatically - if let ContextPickerState::Default(entity) = &self.mode { - entity.update(cx, |entity, cx| { - entity.select_first(&Default::default(), window, cx) - }) - } - } - - fn recent_menu_item( - &self, - context_picker: Entity, - ix: usize, - entry: RecentEntry, - path_style: PathStyle, - ) -> ContextMenuItem { - match entry { - RecentEntry::File { - project_path, - path_prefix, - } => { - let context_store = self.context_store.clone(); - let worktree_id = project_path.worktree_id; - let path = project_path.path.clone(); - - ContextMenuItem::custom_entry( - move |_window, cx| { - render_file_context_entry( - ElementId::named_usize("ctx-recent", ix), - worktree_id, - &path, - &path_prefix, - false, - path_style, - context_store.clone(), - cx, - ) - .into_any() - }, - move |window, cx| { - context_picker.update(cx, |this, cx| { - this.add_recent_file(project_path.clone(), window, cx); - }) - }, - None, - ) - } - RecentEntry::Thread(thread) => { - let context_store = self.context_store.clone(); - let view_thread = thread.clone(); - - ContextMenuItem::custom_entry( - move |_window, cx| { - render_thread_context_entry(&view_thread, context_store.clone(), cx) - .into_any() - }, - move |window, cx| { - context_picker.update(cx, |this, cx| { - this.add_recent_thread(thread.clone(), window, cx) - .detach_and_log_err(cx); - }) - }, - None, - ) - } - } - } - - fn add_recent_file( - &self, - project_path: ProjectPath, - window: &mut Window, - cx: &mut Context, - ) { - let Some(context_store) = self.context_store.upgrade() else { - return; - }; - - let task = context_store.update(cx, |context_store, cx| { - context_store.add_file_from_path(project_path.clone(), true, cx) - }); - - cx.spawn_in(window, async move |_, cx| task.await.notify_async_err(cx)) - .detach(); - - cx.notify(); - } - - fn add_recent_thread( - &self, - entry: HistoryEntry, - _window: &mut Window, - cx: &mut Context, - ) -> Task> { - let Some(context_store) = self.context_store.upgrade() else { - return Task::ready(Err(anyhow!("context store not available"))); - }; - let Some(project) = self - .workspace - .upgrade() - .map(|workspace| workspace.read(cx).project().clone()) - else { - return Task::ready(Err(anyhow!("project not available"))); - }; - - match entry { - HistoryEntry::AcpThread(thread) => { - let Some(thread_store) = self - .thread_store - .as_ref() - .and_then(|thread_store| thread_store.upgrade()) - else { - return Task::ready(Err(anyhow!("thread store not available"))); - }; - let load_thread_task = - agent::load_agent_thread(thread.id, thread_store, project, cx); - cx.spawn(async move |this, cx| { - let thread = load_thread_task.await?; - context_store.update(cx, |context_store, cx| { - context_store.add_thread(thread, true, cx); - })?; - this.update(cx, |_this, cx| cx.notify()) - }) - } - HistoryEntry::TextThread(thread) => { - let Some(thread_store) = self - .thread_store - .as_ref() - .and_then(|thread_store| thread_store.upgrade()) - else { - return Task::ready(Err(anyhow!("text thread store not available"))); - }; - - let task = thread_store.update(cx, |this, cx| { - this.load_text_thread(thread.path.clone(), cx) - }); - cx.spawn(async move |this, cx| { - let thread = task.await?; - context_store.update(cx, |context_store, cx| { - context_store.add_text_thread(thread, true, cx); - })?; - this.update(cx, |_this, cx| cx.notify()) - }) - } - } - } - - fn recent_entries(&self, cx: &mut App) -> Vec { - let Some(workspace) = self.workspace.upgrade() else { - return vec![]; - }; - - let Some(context_store) = self.context_store.upgrade() else { - return vec![]; - }; - - recent_context_picker_entries_with_store( - context_store, - self.thread_store.clone(), - workspace, - None, - cx, - ) - } - - fn notify_current_picker(&mut self, cx: &mut Context) { - match &self.mode { - ContextPickerState::Default(entity) => entity.update(cx, |_, cx| cx.notify()), - ContextPickerState::File(entity) => entity.update(cx, |_, cx| cx.notify()), - ContextPickerState::Symbol(entity) => entity.update(cx, |_, cx| cx.notify()), - ContextPickerState::Fetch(entity) => entity.update(cx, |_, cx| cx.notify()), - ContextPickerState::Thread(entity) => entity.update(cx, |_, cx| cx.notify()), - ContextPickerState::Rules(entity) => entity.update(cx, |_, cx| cx.notify()), - } - } -} - -impl EventEmitter for ContextPicker {} - -impl Focusable for ContextPicker { - fn focus_handle(&self, cx: &App) -> FocusHandle { - match &self.mode { - ContextPickerState::Default(menu) => menu.focus_handle(cx), - ContextPickerState::File(file_picker) => file_picker.focus_handle(cx), - ContextPickerState::Symbol(symbol_picker) => symbol_picker.focus_handle(cx), - ContextPickerState::Fetch(fetch_picker) => fetch_picker.focus_handle(cx), - ContextPickerState::Thread(thread_picker) => thread_picker.focus_handle(cx), - ContextPickerState::Rules(user_rules_picker) => user_rules_picker.focus_handle(cx), - } - } -} - -impl Render for ContextPicker { - fn render(&mut self, _window: &mut Window, _cx: &mut Context) -> impl IntoElement { - v_flex() - .w(px(400.)) - .min_w(px(400.)) - .map(|parent| match &self.mode { - ContextPickerState::Default(menu) => parent.child(menu.clone()), - ContextPickerState::File(file_picker) => parent.child(file_picker.clone()), - ContextPickerState::Symbol(symbol_picker) => parent.child(symbol_picker.clone()), - ContextPickerState::Fetch(fetch_picker) => parent.child(fetch_picker.clone()), - ContextPickerState::Thread(thread_picker) => parent.child(thread_picker.clone()), - ContextPickerState::Rules(user_rules_picker) => { - parent.child(user_rules_picker.clone()) - } - }) - } -} - -pub(crate) enum RecentEntry { - File { - project_path: ProjectPath, - path_prefix: Arc, - }, - Thread(HistoryEntry), -} - -pub(crate) fn available_context_picker_entries( - prompt_store: &Option>, - thread_store: &Option>, - workspace: &Entity, - cx: &mut App, -) -> Vec { - let mut entries = vec![ - ContextPickerEntry::Mode(ContextPickerMode::File), - ContextPickerEntry::Mode(ContextPickerMode::Symbol), - ]; - - let has_selection = workspace - .read(cx) - .active_item(cx) - .and_then(|item| item.downcast::()) - .is_some_and(|editor| { - editor.update(cx, |editor, cx| { - editor.has_non_empty_selection(&editor.display_snapshot(cx)) - }) - }); - if has_selection { - entries.push(ContextPickerEntry::Action( - ContextPickerAction::AddSelections, - )); - } - - if thread_store.is_some() { - entries.push(ContextPickerEntry::Mode(ContextPickerMode::Thread)); - } - - if prompt_store.is_some() { - entries.push(ContextPickerEntry::Mode(ContextPickerMode::Rules)); - } - - entries.push(ContextPickerEntry::Mode(ContextPickerMode::Fetch)); - - entries -} - -fn recent_context_picker_entries_with_store( - context_store: Entity, - thread_store: Option>, - workspace: Entity, - exclude_path: Option, - cx: &App, -) -> Vec { - let project = workspace.read(cx).project(); - - let mut exclude_paths = context_store.read(cx).file_paths(cx); - exclude_paths.extend(exclude_path); - - let exclude_paths = exclude_paths - .into_iter() - .filter_map(|project_path| project.read(cx).absolute_path(&project_path, cx)) - .collect(); - - let exclude_threads = context_store.read(cx).thread_ids(); - - recent_context_picker_entries(thread_store, workspace, &exclude_paths, exclude_threads, cx) -} - -pub(crate) fn recent_context_picker_entries( - thread_store: Option>, - workspace: Entity, - exclude_paths: &HashSet, - exclude_threads: &HashSet, - cx: &App, -) -> Vec { - let mut recent = Vec::with_capacity(6); - let workspace = workspace.read(cx); - let project = workspace.project().read(cx); - let include_root_name = workspace.visible_worktrees(cx).count() > 1; - - recent.extend( - workspace - .recent_navigation_history_iter(cx) - .filter(|(_, abs_path)| { - abs_path - .as_ref() - .is_none_or(|path| !exclude_paths.contains(path.as_path())) - }) - .take(4) - .filter_map(|(project_path, _)| { - project - .worktree_for_id(project_path.worktree_id, cx) - .map(|worktree| { - let path_prefix = if include_root_name { - worktree.read(cx).root_name().into() - } else { - RelPath::empty().into() - }; - RecentEntry::File { - project_path, - path_prefix, - } - }) - }), - ); - - if let Some(thread_store) = thread_store.and_then(|store| store.upgrade()) { - const RECENT_THREADS_COUNT: usize = 2; - recent.extend( - thread_store - .read(cx) - .recently_opened_entries(cx) - .iter() - .filter(|e| match e.id() { - HistoryEntryId::AcpThread(session_id) => !exclude_threads.contains(&session_id), - HistoryEntryId::TextThread(path) => { - !exclude_paths.contains(&path.to_path_buf()) - } - }) - .take(RECENT_THREADS_COUNT) - .map(|thread| RecentEntry::Thread(thread.clone())), - ); - } - - recent -} - -fn add_selections_as_context( - context_store: &Entity, - workspace: &Entity, - cx: &mut App, -) { - let selection_ranges = selection_ranges(workspace, cx); - context_store.update(cx, |context_store, cx| { - for (buffer, range) in selection_ranges { - context_store.add_selection(buffer, range, cx); - } - }) -} - -pub(crate) fn selection_ranges( - workspace: &Entity, - cx: &mut App, -) -> Vec<(Entity, Range)> { - let Some(editor) = workspace - .read(cx) - .active_item(cx) - .and_then(|item| item.act_as::(cx)) - else { - return Vec::new(); - }; - - editor.update(cx, |editor, cx| { - let selections = editor.selections.all_adjusted(&editor.display_snapshot(cx)); - - let buffer = editor.buffer().clone().read(cx); - let snapshot = buffer.snapshot(cx); - - selections - .into_iter() - .map(|s| snapshot.anchor_after(s.start)..snapshot.anchor_before(s.end)) - .flat_map(|range| { - let (start_buffer, start) = buffer.text_anchor_for_position(range.start, cx)?; - let (end_buffer, end) = buffer.text_anchor_for_position(range.end, cx)?; - if start_buffer != end_buffer { - return None; - } - Some((start_buffer, start..end)) - }) - .collect::>() - }) -} - -pub(crate) fn insert_crease_for_mention( - excerpt_id: ExcerptId, - crease_start: text::Anchor, - content_len: usize, - crease_label: SharedString, - crease_icon_path: SharedString, - editor_entity: Entity, - window: &mut Window, - cx: &mut App, -) -> Option { - editor_entity.update(cx, |editor, cx| { - let snapshot = editor.buffer().read(cx).snapshot(cx); - - let start = snapshot.anchor_in_excerpt(excerpt_id, crease_start)?; - - let start = start.bias_right(&snapshot); - let end = snapshot.anchor_before(start.to_offset(&snapshot) + content_len); - - let crease = crease_for_mention( - crease_label, - crease_icon_path, - start..end, - editor_entity.downgrade(), - ); - - let ids = editor.insert_creases(vec![crease.clone()], cx); - editor.fold_creases(vec![crease], false, window, cx); - - Some(ids[0]) - }) -} - -pub fn crease_for_mention( - label: SharedString, - icon_path: SharedString, - range: Range, - editor_entity: WeakEntity, -) -> Crease { - let placeholder = FoldPlaceholder { - render: render_fold_icon_button(icon_path.clone(), label.clone(), editor_entity), - merge_adjacent: false, - ..Default::default() - }; - - let render_trailer = move |_row, _unfold, _window: &mut Window, _cx: &mut App| Empty.into_any(); - - Crease::inline(range, placeholder, fold_toggle("mention"), render_trailer) - .with_metadata(CreaseMetadata { icon_path, label }) -} - -fn render_fold_icon_button( - icon_path: SharedString, - label: SharedString, - editor: WeakEntity, -) -> Arc, &mut App) -> AnyElement> { - Arc::new({ - move |fold_id, fold_range, cx| { - let is_in_text_selection = editor - .update(cx, |editor, cx| editor.is_range_selected(&fold_range, cx)) - .unwrap_or_default(); - - ButtonLike::new(fold_id) - .style(ButtonStyle::Filled) - .selected_style(ButtonStyle::Tinted(TintColor::Accent)) - .toggle_state(is_in_text_selection) - .child( - h_flex() - .gap_1() - .child( - Icon::from_path(icon_path.clone()) - .size(IconSize::XSmall) - .color(Color::Muted), - ) - .child( - Label::new(label.clone()) - .size(LabelSize::Small) - .buffer_font(cx) - .single_line(), - ), - ) - .into_any_element() - } - }) -} - -fn fold_toggle( - name: &'static str, -) -> impl Fn( - MultiBufferRow, - bool, - Arc, - &mut Window, - &mut App, -) -> AnyElement { - move |row, is_folded, fold, _window, _cx| { - Disclosure::new((name, row.0 as u64), !is_folded) - .toggle_state(is_folded) - .on_click(move |_e, window, cx| fold(!is_folded, window, cx)) - .into_any_element() - } -} - -pub struct MentionLink; - -impl MentionLink { - const FILE: &str = "@file"; - const SYMBOL: &str = "@symbol"; - const SELECTION: &str = "@selection"; - const THREAD: &str = "@thread"; - const FETCH: &str = "@fetch"; - const RULE: &str = "@rule"; - - const TEXT_THREAD_URL_PREFIX: &str = "text-thread://"; - - pub fn for_file(file_name: &str, full_path: &str) -> String { - format!("[@{}]({}:{})", file_name, Self::FILE, full_path) - } - - pub fn for_symbol(symbol_name: &str, full_path: &str) -> String { - format!( - "[@{}]({}:{}:{})", - symbol_name, - Self::SYMBOL, - full_path, - symbol_name - ) - } - - pub fn for_selection(file_name: &str, full_path: &str, line_range: Range) -> String { - format!( - "[@{} ({}-{})]({}:{}:{}-{})", - file_name, - line_range.start + 1, - line_range.end + 1, - Self::SELECTION, - full_path, - line_range.start, - line_range.end - ) - } - - pub fn for_thread(thread: &HistoryEntry) -> String { - match thread { - HistoryEntry::AcpThread(thread) => { - format!("[@{}]({}:{})", thread.title, Self::THREAD, thread.id) - } - HistoryEntry::TextThread(thread) => { - let filename = thread - .path - .file_name() - .unwrap_or_default() - .to_string_lossy(); - let escaped_filename = urlencoding::encode(&filename); - format!( - "[@{}]({}:{}{})", - thread.title, - Self::THREAD, - Self::TEXT_THREAD_URL_PREFIX, - escaped_filename - ) - } - } - } - - pub fn for_fetch(url: &str) -> String { - format!("[@{}]({}:{})", url, Self::FETCH, url) - } - - pub fn for_rule(rule: &RulesContextEntry) -> String { - format!("[@{}]({}:{})", rule.title, Self::RULE, rule.prompt_id.0) - } -} diff --git a/crates/agent_ui/src/context_picker/fetch_context_picker.rs b/crates/agent_ui/src/context_picker/fetch_context_picker.rs deleted file mode 100644 index 31fc45aca3ccbf561793769939169d214aaa2d99..0000000000000000000000000000000000000000 --- a/crates/agent_ui/src/context_picker/fetch_context_picker.rs +++ /dev/null @@ -1,252 +0,0 @@ -use std::cell::RefCell; -use std::rc::Rc; -use std::sync::Arc; - -use anyhow::{Context as _, Result, bail}; -use futures::AsyncReadExt as _; -use gpui::{App, DismissEvent, Entity, FocusHandle, Focusable, Task, WeakEntity}; -use html_to_markdown::{TagHandler, convert_html_to_markdown, markdown}; -use http_client::{AsyncBody, HttpClientWithUrl}; -use picker::{Picker, PickerDelegate}; -use ui::{Context, ListItem, Window, prelude::*}; -use workspace::Workspace; - -use crate::{context_picker::ContextPicker, context_store::ContextStore}; - -pub struct FetchContextPicker { - picker: Entity>, -} - -impl FetchContextPicker { - pub fn new( - context_picker: WeakEntity, - workspace: WeakEntity, - context_store: WeakEntity, - window: &mut Window, - cx: &mut Context, - ) -> Self { - let delegate = FetchContextPickerDelegate::new(context_picker, workspace, context_store); - let picker = cx.new(|cx| Picker::uniform_list(delegate, window, cx)); - - Self { picker } - } -} - -impl Focusable for FetchContextPicker { - fn focus_handle(&self, cx: &App) -> FocusHandle { - self.picker.focus_handle(cx) - } -} - -impl Render for FetchContextPicker { - fn render(&mut self, _window: &mut Window, _cx: &mut Context) -> impl IntoElement { - self.picker.clone() - } -} - -#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy)] -enum ContentType { - Html, - Plaintext, - Json, -} - -pub struct FetchContextPickerDelegate { - context_picker: WeakEntity, - workspace: WeakEntity, - context_store: WeakEntity, - url: String, -} - -impl FetchContextPickerDelegate { - pub fn new( - context_picker: WeakEntity, - workspace: WeakEntity, - context_store: WeakEntity, - ) -> Self { - FetchContextPickerDelegate { - context_picker, - workspace, - context_store, - url: String::new(), - } - } -} - -pub(crate) async fn fetch_url_content( - http_client: Arc, - url: String, -) -> Result { - let url = if !url.starts_with("https://") && !url.starts_with("http://") { - format!("https://{url}") - } else { - url - }; - - let mut response = http_client.get(&url, AsyncBody::default(), true).await?; - - let mut body = Vec::new(); - response - .body_mut() - .read_to_end(&mut body) - .await - .context("error reading response body")?; - - if response.status().is_client_error() { - let text = String::from_utf8_lossy(body.as_slice()); - bail!( - "status error {}, response: {text:?}", - response.status().as_u16() - ); - } - - let Some(content_type) = response.headers().get("content-type") else { - bail!("missing Content-Type header"); - }; - let content_type = content_type - .to_str() - .context("invalid Content-Type header")?; - let content_type = match content_type { - "text/html" => ContentType::Html, - "text/plain" => ContentType::Plaintext, - "application/json" => ContentType::Json, - _ => ContentType::Html, - }; - - match content_type { - ContentType::Html => { - let mut handlers: Vec = vec![ - Rc::new(RefCell::new(markdown::WebpageChromeRemover)), - Rc::new(RefCell::new(markdown::ParagraphHandler)), - Rc::new(RefCell::new(markdown::HeadingHandler)), - Rc::new(RefCell::new(markdown::ListHandler)), - Rc::new(RefCell::new(markdown::TableHandler::new())), - Rc::new(RefCell::new(markdown::StyledTextHandler)), - ]; - if url.contains("wikipedia.org") { - use html_to_markdown::structure::wikipedia; - - handlers.push(Rc::new(RefCell::new(wikipedia::WikipediaChromeRemover))); - handlers.push(Rc::new(RefCell::new(wikipedia::WikipediaInfoboxHandler))); - handlers.push(Rc::new( - RefCell::new(wikipedia::WikipediaCodeHandler::new()), - )); - } else { - handlers.push(Rc::new(RefCell::new(markdown::CodeHandler))); - } - - convert_html_to_markdown(&body[..], &mut handlers) - } - ContentType::Plaintext => Ok(std::str::from_utf8(&body)?.to_owned()), - ContentType::Json => { - let json: serde_json::Value = serde_json::from_slice(&body)?; - - Ok(format!( - "```json\n{}\n```", - serde_json::to_string_pretty(&json)? - )) - } - } -} - -impl PickerDelegate for FetchContextPickerDelegate { - type ListItem = ListItem; - - fn match_count(&self) -> usize { - if self.url.is_empty() { 0 } else { 1 } - } - - fn no_matches_text(&self, _window: &mut Window, _cx: &mut App) -> Option { - Some("Enter the URL that you would like to fetch".into()) - } - - fn selected_index(&self) -> usize { - 0 - } - - fn set_selected_index( - &mut self, - _ix: usize, - _window: &mut Window, - _cx: &mut Context>, - ) { - } - - fn placeholder_text(&self, _window: &mut Window, _cx: &mut App) -> Arc { - "Enter a URL…".into() - } - - fn update_matches( - &mut self, - query: String, - _window: &mut Window, - _cx: &mut Context>, - ) -> Task<()> { - self.url = query; - - Task::ready(()) - } - - fn confirm(&mut self, _secondary: bool, window: &mut Window, cx: &mut Context>) { - let Some(workspace) = self.workspace.upgrade() else { - return; - }; - - let http_client = workspace.read(cx).client().http_client(); - let url = self.url.clone(); - cx.spawn_in(window, async move |this, cx| { - let text = cx - .background_spawn(fetch_url_content(http_client, url.clone())) - .await?; - - this.update(cx, |this, cx| { - this.delegate.context_store.update(cx, |context_store, cx| { - context_store.add_fetched_url(url, text, cx) - }) - })??; - - anyhow::Ok(()) - }) - .detach_and_log_err(cx); - } - - fn dismissed(&mut self, _window: &mut Window, cx: &mut Context>) { - self.context_picker - .update(cx, |_, cx| { - cx.emit(DismissEvent); - }) - .ok(); - } - - fn render_match( - &self, - ix: usize, - selected: bool, - _window: &mut Window, - cx: &mut Context>, - ) -> Option { - let added = self - .context_store - .upgrade() - .is_some_and(|context_store| context_store.read(cx).includes_url(&self.url)); - - Some( - ListItem::new(ix) - .inset(true) - .toggle_state(selected) - .child(Label::new(self.url.clone())) - .when(added, |child| { - child.disabled(true).end_slot( - h_flex() - .gap_1() - .child( - Icon::new(IconName::Check) - .size(IconSize::Small) - .color(Color::Success), - ) - .child(Label::new("Added").size(LabelSize::Small)), - ) - }), - ) - } -} diff --git a/crates/agent_ui/src/context_picker/file_context_picker.rs b/crates/agent_ui/src/context_picker/file_context_picker.rs deleted file mode 100644 index ded24caa922d27d8821e46e5c58b5ed22ab754ff..0000000000000000000000000000000000000000 --- a/crates/agent_ui/src/context_picker/file_context_picker.rs +++ /dev/null @@ -1,392 +0,0 @@ -use std::sync::Arc; -use std::sync::atomic::AtomicBool; - -use file_icons::FileIcons; -use fuzzy::PathMatch; -use gpui::{ - App, AppContext, DismissEvent, Entity, FocusHandle, Focusable, Stateful, Task, WeakEntity, -}; -use picker::{Picker, PickerDelegate}; -use project::{PathMatchCandidateSet, ProjectPath, WorktreeId}; -use ui::{ListItem, Tooltip, prelude::*}; -use util::{ResultExt as _, paths::PathStyle, rel_path::RelPath}; -use workspace::Workspace; - -use crate::{ - context_picker::ContextPicker, - context_store::{ContextStore, FileInclusion}, -}; - -pub struct FileContextPicker { - picker: Entity>, -} - -impl FileContextPicker { - pub fn new( - context_picker: WeakEntity, - workspace: WeakEntity, - context_store: WeakEntity, - window: &mut Window, - cx: &mut Context, - ) -> Self { - let delegate = FileContextPickerDelegate::new(context_picker, workspace, context_store); - let picker = cx.new(|cx| Picker::uniform_list(delegate, window, cx)); - - Self { picker } - } -} - -impl Focusable for FileContextPicker { - fn focus_handle(&self, cx: &App) -> FocusHandle { - self.picker.focus_handle(cx) - } -} - -impl Render for FileContextPicker { - fn render(&mut self, _window: &mut Window, _cx: &mut Context) -> impl IntoElement { - self.picker.clone() - } -} - -pub struct FileContextPickerDelegate { - context_picker: WeakEntity, - workspace: WeakEntity, - context_store: WeakEntity, - matches: Vec, - selected_index: usize, -} - -impl FileContextPickerDelegate { - pub fn new( - context_picker: WeakEntity, - workspace: WeakEntity, - context_store: WeakEntity, - ) -> Self { - Self { - context_picker, - workspace, - context_store, - matches: Vec::new(), - selected_index: 0, - } - } -} - -impl PickerDelegate for FileContextPickerDelegate { - type ListItem = ListItem; - - fn match_count(&self) -> usize { - self.matches.len() - } - - fn selected_index(&self) -> usize { - self.selected_index - } - - fn set_selected_index( - &mut self, - ix: usize, - _window: &mut Window, - _cx: &mut Context>, - ) { - self.selected_index = ix; - } - - fn placeholder_text(&self, _window: &mut Window, _cx: &mut App) -> Arc { - "Search files & directories…".into() - } - - fn update_matches( - &mut self, - query: String, - window: &mut Window, - cx: &mut Context>, - ) -> Task<()> { - let Some(workspace) = self.workspace.upgrade() else { - return Task::ready(()); - }; - - let search_task = search_files(query, Arc::::default(), &workspace, cx); - - cx.spawn_in(window, async move |this, cx| { - // TODO: This should be probably be run in the background. - let paths = search_task.await; - - this.update(cx, |this, _cx| { - this.delegate.matches = paths; - }) - .log_err(); - }) - } - - fn confirm(&mut self, _secondary: bool, _window: &mut Window, cx: &mut Context>) { - let Some(FileMatch { mat, .. }) = self.matches.get(self.selected_index) else { - return; - }; - - let project_path = ProjectPath { - worktree_id: WorktreeId::from_usize(mat.worktree_id), - path: mat.path.clone(), - }; - - let is_directory = mat.is_dir; - - self.context_store - .update(cx, |context_store, cx| { - if is_directory { - context_store - .add_directory(&project_path, true, cx) - .log_err(); - } else { - context_store - .add_file_from_path(project_path.clone(), true, cx) - .detach_and_log_err(cx); - } - }) - .ok(); - } - - fn dismissed(&mut self, _: &mut Window, cx: &mut Context>) { - self.context_picker - .update(cx, |_, cx| { - cx.emit(DismissEvent); - }) - .ok(); - } - - fn render_match( - &self, - ix: usize, - selected: bool, - _window: &mut Window, - cx: &mut Context>, - ) -> Option { - let FileMatch { mat, .. } = &self.matches.get(ix)?; - let workspace = self.workspace.upgrade()?; - let path_style = workspace.read(cx).path_style(cx); - - Some( - ListItem::new(ix) - .inset(true) - .toggle_state(selected) - .child(render_file_context_entry( - ElementId::named_usize("file-ctx-picker", ix), - WorktreeId::from_usize(mat.worktree_id), - &mat.path, - &mat.path_prefix, - mat.is_dir, - path_style, - self.context_store.clone(), - cx, - )), - ) - } -} - -pub struct FileMatch { - pub mat: PathMatch, - pub is_recent: bool, -} - -pub(crate) fn search_files( - query: String, - cancellation_flag: Arc, - workspace: &Entity, - cx: &App, -) -> Task> { - if query.is_empty() { - let workspace = workspace.read(cx); - let project = workspace.project().read(cx); - let visible_worktrees = workspace.visible_worktrees(cx).collect::>(); - let include_root_name = visible_worktrees.len() > 1; - - let recent_matches = workspace - .recent_navigation_history(Some(10), cx) - .into_iter() - .map(|(project_path, _)| { - let path_prefix = if include_root_name { - project - .worktree_for_id(project_path.worktree_id, cx) - .map(|wt| wt.read(cx).root_name().into()) - .unwrap_or_else(|| RelPath::empty().into()) - } else { - RelPath::empty().into() - }; - - FileMatch { - mat: PathMatch { - score: 0., - positions: Vec::new(), - worktree_id: project_path.worktree_id.to_usize(), - path: project_path.path, - path_prefix, - distance_to_relative_ancestor: 0, - is_dir: false, - }, - is_recent: true, - } - }); - - let file_matches = visible_worktrees.into_iter().flat_map(|worktree| { - let worktree = worktree.read(cx); - let path_prefix: Arc = if include_root_name { - worktree.root_name().into() - } else { - RelPath::empty().into() - }; - worktree.entries(false, 0).map(move |entry| FileMatch { - mat: PathMatch { - score: 0., - positions: Vec::new(), - worktree_id: worktree.id().to_usize(), - path: entry.path.clone(), - path_prefix: path_prefix.clone(), - distance_to_relative_ancestor: 0, - is_dir: entry.is_dir(), - }, - is_recent: false, - }) - }); - - Task::ready(recent_matches.chain(file_matches).collect()) - } else { - let worktrees = workspace.read(cx).visible_worktrees(cx).collect::>(); - let include_root_name = worktrees.len() > 1; - let candidate_sets = worktrees - .into_iter() - .map(|worktree| { - let worktree = worktree.read(cx); - - PathMatchCandidateSet { - snapshot: worktree.snapshot(), - include_ignored: worktree.root_entry().is_some_and(|entry| entry.is_ignored), - include_root_name, - candidates: project::Candidates::Entries, - } - }) - .collect::>(); - - let executor = cx.background_executor().clone(); - cx.foreground_executor().spawn(async move { - fuzzy::match_path_sets( - candidate_sets.as_slice(), - query.as_str(), - &None, - false, - 100, - &cancellation_flag, - executor, - ) - .await - .into_iter() - .map(|mat| FileMatch { - mat, - is_recent: false, - }) - .collect::>() - }) - } -} - -pub fn extract_file_name_and_directory( - path: &RelPath, - path_prefix: &RelPath, - path_style: PathStyle, -) -> (SharedString, Option) { - // If path is empty, this means we're matching with the root directory itself - // so we use the path_prefix as the name - if path.is_empty() && !path_prefix.is_empty() { - return (path_prefix.display(path_style).to_string().into(), None); - } - - let full_path = path_prefix.join(path); - let file_name = full_path.file_name().unwrap_or_default(); - let display_path = full_path.display(path_style); - let (directory, file_name) = display_path.split_at(display_path.len() - file_name.len()); - ( - file_name.to_string().into(), - Some(SharedString::new(directory)).filter(|dir| !dir.is_empty()), - ) -} - -pub fn render_file_context_entry( - id: ElementId, - worktree_id: WorktreeId, - path: &Arc, - path_prefix: &Arc, - is_directory: bool, - path_style: PathStyle, - context_store: WeakEntity, - cx: &App, -) -> Stateful

{ - let (file_name, directory) = extract_file_name_and_directory(path, path_prefix, path_style); - - let added = context_store.upgrade().and_then(|context_store| { - let project_path = ProjectPath { - worktree_id, - path: path.clone(), - }; - if is_directory { - context_store - .read(cx) - .path_included_in_directory(&project_path, cx) - } else { - context_store.read(cx).file_path_included(&project_path, cx) - } - }); - - let file_icon = if is_directory { - FileIcons::get_folder_icon(false, path.as_std_path(), cx) - } else { - FileIcons::get_icon(path.as_std_path(), cx) - } - .map(Icon::from_path) - .unwrap_or_else(|| Icon::new(IconName::File)); - - h_flex() - .id(id) - .gap_1p5() - .w_full() - .child(file_icon.size(IconSize::Small).color(Color::Muted)) - .child( - h_flex() - .gap_1() - .child(Label::new(file_name)) - .children(directory.map(|directory| { - Label::new(directory) - .size(LabelSize::Small) - .color(Color::Muted) - })), - ) - .when_some(added, |el, added| match added { - FileInclusion::Direct => el.child( - h_flex() - .w_full() - .justify_end() - .gap_0p5() - .child( - Icon::new(IconName::Check) - .size(IconSize::Small) - .color(Color::Success), - ) - .child(Label::new("Added").size(LabelSize::Small)), - ), - FileInclusion::InDirectory { full_path } => { - let directory_full_path = full_path.to_string_lossy().into_owned(); - - el.child( - h_flex() - .w_full() - .justify_end() - .gap_0p5() - .child( - Icon::new(IconName::Check) - .size(IconSize::Small) - .color(Color::Success), - ) - .child(Label::new("Included").size(LabelSize::Small)), - ) - .tooltip(Tooltip::text(format!("in {directory_full_path}"))) - } - }) -} diff --git a/crates/agent_ui/src/context_picker/rules_context_picker.rs b/crates/agent_ui/src/context_picker/rules_context_picker.rs deleted file mode 100644 index 68f4917a4fd5689aab1a418dd78d2c8a322cd717..0000000000000000000000000000000000000000 --- a/crates/agent_ui/src/context_picker/rules_context_picker.rs +++ /dev/null @@ -1,224 +0,0 @@ -use std::sync::Arc; -use std::sync::atomic::AtomicBool; - -use gpui::{App, DismissEvent, Entity, FocusHandle, Focusable, Task, WeakEntity}; -use picker::{Picker, PickerDelegate}; -use prompt_store::{PromptId, PromptStore, UserPromptId}; -use ui::{ListItem, prelude::*}; -use util::ResultExt as _; - -use crate::{ - context::RULES_ICON, - context_picker::ContextPicker, - context_store::{self, ContextStore}, -}; - -pub struct RulesContextPicker { - picker: Entity>, -} - -impl RulesContextPicker { - pub fn new( - prompt_store: WeakEntity, - context_picker: WeakEntity, - context_store: WeakEntity, - window: &mut Window, - cx: &mut Context, - ) -> Self { - let delegate = RulesContextPickerDelegate::new(prompt_store, context_picker, context_store); - let picker = cx.new(|cx| Picker::uniform_list(delegate, window, cx)); - - RulesContextPicker { picker } - } -} - -impl Focusable for RulesContextPicker { - fn focus_handle(&self, cx: &App) -> FocusHandle { - self.picker.focus_handle(cx) - } -} - -impl Render for RulesContextPicker { - fn render(&mut self, _window: &mut Window, _cx: &mut Context) -> impl IntoElement { - self.picker.clone() - } -} - -#[derive(Debug, Clone)] -pub struct RulesContextEntry { - pub prompt_id: UserPromptId, - pub title: SharedString, -} - -pub struct RulesContextPickerDelegate { - prompt_store: WeakEntity, - context_picker: WeakEntity, - context_store: WeakEntity, - matches: Vec, - selected_index: usize, -} - -impl RulesContextPickerDelegate { - pub fn new( - prompt_store: WeakEntity, - context_picker: WeakEntity, - context_store: WeakEntity, - ) -> Self { - RulesContextPickerDelegate { - prompt_store, - context_picker, - context_store, - matches: Vec::new(), - selected_index: 0, - } - } -} - -impl PickerDelegate for RulesContextPickerDelegate { - type ListItem = ListItem; - - fn match_count(&self) -> usize { - self.matches.len() - } - - fn selected_index(&self) -> usize { - self.selected_index - } - - fn set_selected_index( - &mut self, - ix: usize, - _window: &mut Window, - _cx: &mut Context>, - ) { - self.selected_index = ix; - } - - fn placeholder_text(&self, _window: &mut Window, _cx: &mut App) -> Arc { - "Search available rules…".into() - } - - fn update_matches( - &mut self, - query: String, - window: &mut Window, - cx: &mut Context>, - ) -> Task<()> { - let Some(prompt_store) = self.prompt_store.upgrade() else { - return Task::ready(()); - }; - let search_task = search_rules(query, Arc::new(AtomicBool::default()), &prompt_store, cx); - cx.spawn_in(window, async move |this, cx| { - let matches = search_task.await; - this.update(cx, |this, cx| { - this.delegate.matches = matches; - this.delegate.selected_index = 0; - cx.notify(); - }) - .ok(); - }) - } - - fn confirm(&mut self, _secondary: bool, _window: &mut Window, cx: &mut Context>) { - let Some(entry) = self.matches.get(self.selected_index) else { - return; - }; - - self.context_store - .update(cx, |context_store, cx| { - context_store.add_rules(entry.prompt_id, true, cx) - }) - .log_err(); - } - - fn dismissed(&mut self, _window: &mut Window, cx: &mut Context>) { - self.context_picker - .update(cx, |_, cx| { - cx.emit(DismissEvent); - }) - .ok(); - } - - fn render_match( - &self, - ix: usize, - selected: bool, - _window: &mut Window, - cx: &mut Context>, - ) -> Option { - let thread = &self.matches.get(ix)?; - - Some(ListItem::new(ix).inset(true).toggle_state(selected).child( - render_thread_context_entry(thread, self.context_store.clone(), cx), - )) - } -} - -pub fn render_thread_context_entry( - user_rules: &RulesContextEntry, - context_store: WeakEntity, - cx: &mut App, -) -> Div { - let added = context_store.upgrade().is_some_and(|context_store| { - context_store - .read(cx) - .includes_user_rules(user_rules.prompt_id) - }); - - h_flex() - .gap_1p5() - .w_full() - .justify_between() - .child( - h_flex() - .gap_1p5() - .max_w_72() - .child( - Icon::new(RULES_ICON) - .size(IconSize::XSmall) - .color(Color::Muted), - ) - .child(Label::new(user_rules.title.clone()).truncate()), - ) - .when(added, |el| { - el.child( - h_flex() - .gap_1() - .child( - Icon::new(IconName::Check) - .size(IconSize::Small) - .color(Color::Success), - ) - .child(Label::new("Added").size(LabelSize::Small)), - ) - }) -} - -pub(crate) fn search_rules( - query: String, - cancellation_flag: Arc, - prompt_store: &Entity, - cx: &mut App, -) -> Task> { - let search_task = prompt_store.read(cx).search(query, cancellation_flag, cx); - cx.background_spawn(async move { - search_task - .await - .into_iter() - .flat_map(|metadata| { - // Default prompts are filtered out as they are automatically included. - if metadata.default { - None - } else { - match metadata.id { - PromptId::EditWorkflow => None, - PromptId::User { uuid } => Some(RulesContextEntry { - prompt_id: uuid, - title: metadata.title?, - }), - } - } - }) - .collect::>() - }) -} diff --git a/crates/agent_ui/src/context_picker/symbol_context_picker.rs b/crates/agent_ui/src/context_picker/symbol_context_picker.rs deleted file mode 100644 index fbce71d94efd84b1acc6e0b5d4ea11cb2b9243d5..0000000000000000000000000000000000000000 --- a/crates/agent_ui/src/context_picker/symbol_context_picker.rs +++ /dev/null @@ -1,415 +0,0 @@ -use std::cmp::Reverse; -use std::sync::Arc; -use std::sync::atomic::AtomicBool; - -use anyhow::{Result, anyhow}; -use fuzzy::{StringMatch, StringMatchCandidate}; -use gpui::{ - App, AppContext, DismissEvent, Entity, FocusHandle, Focusable, Stateful, Task, WeakEntity, -}; -use ordered_float::OrderedFloat; -use picker::{Picker, PickerDelegate}; -use project::lsp_store::SymbolLocation; -use project::{DocumentSymbol, Symbol}; -use ui::{ListItem, prelude::*}; -use util::ResultExt as _; -use workspace::Workspace; - -use crate::{ - context::AgentContextHandle, context_picker::ContextPicker, context_store::ContextStore, -}; - -pub struct SymbolContextPicker { - picker: Entity>, -} - -impl SymbolContextPicker { - pub fn new( - context_picker: WeakEntity, - workspace: WeakEntity, - context_store: WeakEntity, - window: &mut Window, - cx: &mut Context, - ) -> Self { - let delegate = SymbolContextPickerDelegate::new(context_picker, workspace, context_store); - let picker = cx.new(|cx| Picker::uniform_list(delegate, window, cx)); - - Self { picker } - } -} - -impl Focusable for SymbolContextPicker { - fn focus_handle(&self, cx: &App) -> FocusHandle { - self.picker.focus_handle(cx) - } -} - -impl Render for SymbolContextPicker { - fn render(&mut self, _window: &mut Window, _cx: &mut Context) -> impl IntoElement { - self.picker.clone() - } -} - -pub struct SymbolContextPickerDelegate { - context_picker: WeakEntity, - workspace: WeakEntity, - context_store: WeakEntity, - matches: Vec, - selected_index: usize, -} - -impl SymbolContextPickerDelegate { - pub fn new( - context_picker: WeakEntity, - workspace: WeakEntity, - context_store: WeakEntity, - ) -> Self { - Self { - context_picker, - workspace, - context_store, - matches: Vec::new(), - selected_index: 0, - } - } -} - -impl PickerDelegate for SymbolContextPickerDelegate { - type ListItem = ListItem; - - fn match_count(&self) -> usize { - self.matches.len() - } - - fn selected_index(&self) -> usize { - self.selected_index - } - - fn set_selected_index( - &mut self, - ix: usize, - _window: &mut Window, - _cx: &mut Context>, - ) { - self.selected_index = ix; - } - - fn placeholder_text(&self, _window: &mut Window, _cx: &mut App) -> Arc { - "Search symbols…".into() - } - - fn update_matches( - &mut self, - query: String, - window: &mut Window, - cx: &mut Context>, - ) -> Task<()> { - let Some(workspace) = self.workspace.upgrade() else { - return Task::ready(()); - }; - - let search_task = search_symbols(query, Arc::::default(), &workspace, cx); - let context_store = self.context_store.clone(); - cx.spawn_in(window, async move |this, cx| { - let symbols = search_task.await; - - let symbol_entries = context_store - .read_with(cx, |context_store, cx| { - compute_symbol_entries(symbols, context_store, cx) - }) - .log_err() - .unwrap_or_default(); - - this.update(cx, |this, _cx| { - this.delegate.matches = symbol_entries; - }) - .log_err(); - }) - } - - fn confirm(&mut self, _secondary: bool, _window: &mut Window, cx: &mut Context>) { - let Some(mat) = self.matches.get(self.selected_index) else { - return; - }; - let Some(workspace) = self.workspace.upgrade() else { - return; - }; - - let add_symbol_task = add_symbol( - mat.symbol.clone(), - true, - workspace, - self.context_store.clone(), - cx, - ); - - let selected_index = self.selected_index; - cx.spawn(async move |this, cx| { - let (_, included) = add_symbol_task.await?; - this.update(cx, |this, _| { - if let Some(mat) = this.delegate.matches.get_mut(selected_index) { - mat.is_included = included; - } - }) - }) - .detach_and_log_err(cx); - } - - fn dismissed(&mut self, _: &mut Window, cx: &mut Context>) { - self.context_picker - .update(cx, |_, cx| { - cx.emit(DismissEvent); - }) - .ok(); - } - - fn render_match( - &self, - ix: usize, - selected: bool, - _window: &mut Window, - _: &mut Context>, - ) -> Option { - let mat = &self.matches.get(ix)?; - - Some(ListItem::new(ix).inset(true).toggle_state(selected).child( - render_symbol_context_entry(ElementId::named_usize("symbol-ctx-picker", ix), mat), - )) - } -} - -pub(crate) struct SymbolEntry { - pub symbol: Symbol, - pub is_included: bool, -} - -pub(crate) fn add_symbol( - symbol: Symbol, - remove_if_exists: bool, - workspace: Entity, - context_store: WeakEntity, - cx: &mut App, -) -> Task, bool)>> { - let project = workspace.read(cx).project().clone(); - let open_buffer_task = project.update(cx, |project, cx| { - let SymbolLocation::InProject(symbol_path) = &symbol.path else { - return Task::ready(Err(anyhow!("can't add symbol from outside of project"))); - }; - project.open_buffer(symbol_path.clone(), cx) - }); - cx.spawn(async move |cx| { - let buffer = open_buffer_task.await?; - let document_symbols = project - .update(cx, |project, cx| project.document_symbols(&buffer, cx))? - .await?; - - // Try to find a matching document symbol. Document symbols include - // not only the symbol itself (e.g. function name), but they also - // include the context that they contain (e.g. function body). - let (name, range, enclosing_range) = if let Some(DocumentSymbol { - name, - range, - selection_range, - .. - }) = - find_matching_symbol(&symbol, document_symbols.as_slice()) - { - (name, selection_range, range) - } else { - // If we do not find a matching document symbol, fall back to - // just the symbol itself - (symbol.name, symbol.range.clone(), symbol.range) - }; - - let (range, enclosing_range) = buffer.read_with(cx, |buffer, _| { - ( - buffer.anchor_after(range.start)..buffer.anchor_before(range.end), - buffer.anchor_after(enclosing_range.start) - ..buffer.anchor_before(enclosing_range.end), - ) - })?; - - context_store.update(cx, move |context_store, cx| { - context_store.add_symbol( - buffer, - name.into(), - range, - enclosing_range, - remove_if_exists, - cx, - ) - }) - }) -} - -fn find_matching_symbol(symbol: &Symbol, candidates: &[DocumentSymbol]) -> Option { - let mut candidates = candidates.iter(); - let mut candidate = candidates.next()?; - - loop { - if candidate.range.start > symbol.range.end { - return None; - } - if candidate.range.end < symbol.range.start { - candidate = candidates.next()?; - continue; - } - if candidate.selection_range == symbol.range { - return Some(candidate.clone()); - } - if candidate.range.start <= symbol.range.start && symbol.range.end <= candidate.range.end { - candidates = candidate.children.iter(); - candidate = candidates.next()?; - continue; - } - return None; - } -} - -pub struct SymbolMatch { - pub symbol: Symbol, -} - -pub(crate) fn search_symbols( - query: String, - cancellation_flag: Arc, - workspace: &Entity, - cx: &mut App, -) -> Task> { - let symbols_task = workspace.update(cx, |workspace, cx| { - workspace - .project() - .update(cx, |project, cx| project.symbols(&query, cx)) - }); - let project = workspace.read(cx).project().clone(); - cx.spawn(async move |cx| { - let Some(symbols) = symbols_task.await.log_err() else { - return Vec::new(); - }; - let Some((visible_match_candidates, external_match_candidates)): Option<(Vec<_>, Vec<_>)> = - project - .update(cx, |project, cx| { - symbols - .iter() - .enumerate() - .map(|(id, symbol)| { - StringMatchCandidate::new(id, symbol.label.filter_text()) - }) - .partition(|candidate| match &symbols[candidate.id].path { - SymbolLocation::InProject(project_path) => project - .entry_for_path(project_path, cx) - .is_some_and(|e| !e.is_ignored), - SymbolLocation::OutsideProject { .. } => false, - }) - }) - .log_err() - else { - return Vec::new(); - }; - - const MAX_MATCHES: usize = 100; - let mut visible_matches = cx.background_executor().block(fuzzy::match_strings( - &visible_match_candidates, - &query, - false, - true, - MAX_MATCHES, - &cancellation_flag, - cx.background_executor().clone(), - )); - let mut external_matches = cx.background_executor().block(fuzzy::match_strings( - &external_match_candidates, - &query, - false, - true, - MAX_MATCHES - visible_matches.len().min(MAX_MATCHES), - &cancellation_flag, - cx.background_executor().clone(), - )); - let sort_key_for_match = |mat: &StringMatch| { - let symbol = &symbols[mat.candidate_id]; - (Reverse(OrderedFloat(mat.score)), symbol.label.filter_text()) - }; - - visible_matches.sort_unstable_by_key(sort_key_for_match); - external_matches.sort_unstable_by_key(sort_key_for_match); - let mut matches = visible_matches; - matches.append(&mut external_matches); - - matches - .into_iter() - .map(|mut mat| { - let symbol = symbols[mat.candidate_id].clone(); - let filter_start = symbol.label.filter_range.start; - for position in &mut mat.positions { - *position += filter_start; - } - SymbolMatch { symbol } - }) - .collect() - }) -} - -fn compute_symbol_entries( - symbols: Vec, - context_store: &ContextStore, - cx: &App, -) -> Vec { - symbols - .into_iter() - .map(|SymbolMatch { symbol, .. }| SymbolEntry { - is_included: context_store.includes_symbol(&symbol, cx), - symbol, - }) - .collect::>() -} - -pub fn render_symbol_context_entry(id: ElementId, entry: &SymbolEntry) -> Stateful
{ - let path = match &entry.symbol.path { - SymbolLocation::InProject(project_path) => { - project_path.path.file_name().unwrap_or_default().into() - } - SymbolLocation::OutsideProject { - abs_path, - signature: _, - } => abs_path - .file_name() - .map(|f| f.to_string_lossy()) - .unwrap_or_default(), - }; - let symbol_location = format!("{} L{}", path, entry.symbol.range.start.0.row + 1); - - h_flex() - .id(id) - .gap_1p5() - .w_full() - .child( - Icon::new(IconName::Code) - .size(IconSize::Small) - .color(Color::Muted), - ) - .child( - h_flex() - .gap_1() - .child(Label::new(&entry.symbol.name)) - .child( - Label::new(symbol_location) - .size(LabelSize::Small) - .color(Color::Muted), - ), - ) - .when(entry.is_included, |el| { - el.child( - h_flex() - .w_full() - .justify_end() - .gap_0p5() - .child( - Icon::new(IconName::Check) - .size(IconSize::Small) - .color(Color::Success), - ) - .child(Label::new("Added").size(LabelSize::Small)), - ) - }) -} diff --git a/crates/agent_ui/src/context_picker/thread_context_picker.rs b/crates/agent_ui/src/context_picker/thread_context_picker.rs deleted file mode 100644 index d6a3a270742fe28c483d2d7d39894eb9e3c021ea..0000000000000000000000000000000000000000 --- a/crates/agent_ui/src/context_picker/thread_context_picker.rs +++ /dev/null @@ -1,280 +0,0 @@ -use std::sync::Arc; -use std::sync::atomic::AtomicBool; - -use crate::{ - context_picker::ContextPicker, - context_store::{self, ContextStore}, -}; -use agent::{HistoryEntry, HistoryStore}; -use fuzzy::StringMatchCandidate; -use gpui::{App, DismissEvent, Entity, FocusHandle, Focusable, Task, WeakEntity}; -use picker::{Picker, PickerDelegate}; -use ui::{ListItem, prelude::*}; -use workspace::Workspace; - -pub struct ThreadContextPicker { - picker: Entity>, -} - -impl ThreadContextPicker { - pub fn new( - thread_store: WeakEntity, - context_picker: WeakEntity, - context_store: WeakEntity, - workspace: WeakEntity, - window: &mut Window, - cx: &mut Context, - ) -> Self { - let delegate = ThreadContextPickerDelegate::new( - thread_store, - context_picker, - context_store, - workspace, - ); - let picker = cx.new(|cx| Picker::uniform_list(delegate, window, cx)); - - ThreadContextPicker { picker } - } -} - -impl Focusable for ThreadContextPicker { - fn focus_handle(&self, cx: &App) -> FocusHandle { - self.picker.focus_handle(cx) - } -} - -impl Render for ThreadContextPicker { - fn render(&mut self, _window: &mut Window, _cx: &mut Context) -> impl IntoElement { - self.picker.clone() - } -} - -pub struct ThreadContextPickerDelegate { - thread_store: WeakEntity, - context_picker: WeakEntity, - context_store: WeakEntity, - workspace: WeakEntity, - matches: Vec, - selected_index: usize, -} - -impl ThreadContextPickerDelegate { - pub fn new( - thread_store: WeakEntity, - context_picker: WeakEntity, - context_store: WeakEntity, - workspace: WeakEntity, - ) -> Self { - ThreadContextPickerDelegate { - thread_store, - context_picker, - context_store, - workspace, - matches: Vec::new(), - selected_index: 0, - } - } -} - -impl PickerDelegate for ThreadContextPickerDelegate { - type ListItem = ListItem; - - fn match_count(&self) -> usize { - self.matches.len() - } - - fn selected_index(&self) -> usize { - self.selected_index - } - - fn set_selected_index( - &mut self, - ix: usize, - _window: &mut Window, - _cx: &mut Context>, - ) { - self.selected_index = ix; - } - - fn placeholder_text(&self, _window: &mut Window, _cx: &mut App) -> Arc { - "Search threads…".into() - } - - fn update_matches( - &mut self, - query: String, - window: &mut Window, - cx: &mut Context>, - ) -> Task<()> { - let Some(thread_store) = self.thread_store.upgrade() else { - return Task::ready(()); - }; - - let search_task = search_threads(query, Arc::new(AtomicBool::default()), &thread_store, cx); - cx.spawn_in(window, async move |this, cx| { - let matches = search_task.await; - this.update(cx, |this, cx| { - this.delegate.matches = matches; - this.delegate.selected_index = 0; - cx.notify(); - }) - .ok(); - }) - } - - fn confirm(&mut self, _secondary: bool, _window: &mut Window, cx: &mut Context>) { - let Some(project) = self - .workspace - .upgrade() - .map(|w| w.read(cx).project().clone()) - else { - return; - }; - let Some((entry, thread_store)) = self - .matches - .get(self.selected_index) - .zip(self.thread_store.upgrade()) - else { - return; - }; - - match entry { - HistoryEntry::AcpThread(thread) => { - let load_thread_task = - agent::load_agent_thread(thread.id.clone(), thread_store, project, cx); - - cx.spawn(async move |this, cx| { - let thread = load_thread_task.await?; - this.update(cx, |this, cx| { - this.delegate - .context_store - .update(cx, |context_store, cx| { - context_store.add_thread(thread, true, cx) - }) - .ok(); - }) - }) - .detach_and_log_err(cx); - } - HistoryEntry::TextThread(thread) => { - let task = thread_store.update(cx, |this, cx| { - this.load_text_thread(thread.path.clone(), cx) - }); - - cx.spawn(async move |this, cx| { - let thread = task.await?; - this.update(cx, |this, cx| { - this.delegate - .context_store - .update(cx, |context_store, cx| { - context_store.add_text_thread(thread, true, cx) - }) - .ok(); - }) - }) - .detach_and_log_err(cx); - } - } - } - - fn dismissed(&mut self, _window: &mut Window, cx: &mut Context>) { - self.context_picker - .update(cx, |_, cx| { - cx.emit(DismissEvent); - }) - .ok(); - } - - fn render_match( - &self, - ix: usize, - selected: bool, - _window: &mut Window, - cx: &mut Context>, - ) -> Option { - let thread = &self.matches.get(ix)?; - - Some(ListItem::new(ix).inset(true).toggle_state(selected).child( - render_thread_context_entry(thread, self.context_store.clone(), cx), - )) - } -} - -pub fn render_thread_context_entry( - entry: &HistoryEntry, - context_store: WeakEntity, - cx: &mut App, -) -> Div { - let is_added = match entry { - HistoryEntry::AcpThread(thread) => context_store - .upgrade() - .is_some_and(|ctx_store| ctx_store.read(cx).includes_thread(&thread.id)), - HistoryEntry::TextThread(thread) => context_store - .upgrade() - .is_some_and(|ctx_store| ctx_store.read(cx).includes_text_thread(&thread.path)), - }; - - h_flex() - .gap_1p5() - .w_full() - .justify_between() - .child( - h_flex() - .gap_1p5() - .max_w_72() - .child( - Icon::new(IconName::Thread) - .size(IconSize::XSmall) - .color(Color::Muted), - ) - .child(Label::new(entry.title().clone()).truncate()), - ) - .when(is_added, |el| { - el.child( - h_flex() - .gap_1() - .child( - Icon::new(IconName::Check) - .size(IconSize::Small) - .color(Color::Success), - ) - .child(Label::new("Added").size(LabelSize::Small)), - ) - }) -} - -pub(crate) fn search_threads( - query: String, - cancellation_flag: Arc, - thread_store: &Entity, - cx: &mut App, -) -> Task> { - let threads = thread_store.read(cx).entries().collect(); - if query.is_empty() { - return Task::ready(threads); - } - - let executor = cx.background_executor().clone(); - cx.background_spawn(async move { - let candidates = threads - .iter() - .enumerate() - .map(|(id, thread)| StringMatchCandidate::new(id, thread.title())) - .collect::>(); - let matches = fuzzy::match_strings( - &candidates, - &query, - false, - true, - 100, - &cancellation_flag, - executor, - ) - .await; - - matches - .into_iter() - .map(|mat| threads[mat.candidate_id].clone()) - .collect() - }) -} diff --git a/crates/agent_ui/src/context_store.rs b/crates/agent_ui/src/context_store.rs deleted file mode 100644 index 18aa59c8f716d59e4a0d717904b09472494c4dbc..0000000000000000000000000000000000000000 --- a/crates/agent_ui/src/context_store.rs +++ /dev/null @@ -1,614 +0,0 @@ -use crate::context::{ - AgentContextHandle, AgentContextKey, ContextId, ContextKind, DirectoryContextHandle, - FetchedUrlContext, FileContextHandle, ImageContext, RulesContextHandle, SelectionContextHandle, - SymbolContextHandle, TextThreadContextHandle, ThreadContextHandle, -}; -use agent_client_protocol as acp; -use anyhow::{Context as _, Result, anyhow}; -use assistant_text_thread::TextThread; -use collections::{HashSet, IndexSet}; -use futures::{self, FutureExt}; -use gpui::{App, Context, Entity, EventEmitter, Image, SharedString, Task, WeakEntity}; -use language::{Buffer, File as _}; -use language_model::LanguageModelImage; -use project::{ - Project, ProjectItem, ProjectPath, Symbol, image_store::is_image_file, - lsp_store::SymbolLocation, -}; -use prompt_store::UserPromptId; -use ref_cast::RefCast as _; -use std::{ - ops::Range, - path::{Path, PathBuf}, - sync::Arc, -}; -use text::{Anchor, OffsetRangeExt}; - -pub struct ContextStore { - project: WeakEntity, - next_context_id: ContextId, - context_set: IndexSet, - context_thread_ids: HashSet, - context_text_thread_paths: HashSet>, -} - -pub enum ContextStoreEvent { - ContextRemoved(AgentContextKey), -} - -impl EventEmitter for ContextStore {} - -impl ContextStore { - pub fn new(project: WeakEntity) -> Self { - Self { - project, - next_context_id: ContextId::zero(), - context_set: IndexSet::default(), - context_thread_ids: HashSet::default(), - context_text_thread_paths: HashSet::default(), - } - } - - pub fn context(&self) -> impl Iterator { - self.context_set.iter().map(|entry| entry.as_ref()) - } - - pub fn clear(&mut self, cx: &mut Context) { - self.context_set.clear(); - self.context_thread_ids.clear(); - cx.notify(); - } - - pub fn add_file_from_path( - &mut self, - project_path: ProjectPath, - remove_if_exists: bool, - cx: &mut Context, - ) -> Task>> { - let Some(project) = self.project.upgrade() else { - return Task::ready(Err(anyhow!("failed to read project"))); - }; - - if is_image_file(&project, &project_path, cx) { - self.add_image_from_path(project_path, remove_if_exists, cx) - } else { - cx.spawn(async move |this, cx| { - let open_buffer_task = project.update(cx, |project, cx| { - project.open_buffer(project_path.clone(), cx) - })?; - let buffer = open_buffer_task.await?; - this.update(cx, |this, cx| { - this.add_file_from_buffer(&project_path, buffer, remove_if_exists, cx) - }) - }) - } - } - - pub fn add_file_from_buffer( - &mut self, - project_path: &ProjectPath, - buffer: Entity, - remove_if_exists: bool, - cx: &mut Context, - ) -> Option { - let context_id = self.next_context_id.post_inc(); - let context = AgentContextHandle::File(FileContextHandle { buffer, context_id }); - - if let Some(key) = self.context_set.get(AgentContextKey::ref_cast(&context)) { - if remove_if_exists { - self.remove_context(&context, cx); - None - } else { - Some(key.as_ref().clone()) - } - } else if self.path_included_in_directory(project_path, cx).is_some() { - None - } else { - self.insert_context(context.clone(), cx); - Some(context) - } - } - - pub fn add_directory( - &mut self, - project_path: &ProjectPath, - remove_if_exists: bool, - cx: &mut Context, - ) -> Result> { - let project = self.project.upgrade().context("failed to read project")?; - let entry_id = project - .read(cx) - .entry_for_path(project_path, cx) - .map(|entry| entry.id) - .context("no entry found for directory context")?; - - let context_id = self.next_context_id.post_inc(); - let context = AgentContextHandle::Directory(DirectoryContextHandle { - entry_id, - context_id, - }); - - let context = - if let Some(existing) = self.context_set.get(AgentContextKey::ref_cast(&context)) { - if remove_if_exists { - self.remove_context(&context, cx); - None - } else { - Some(existing.as_ref().clone()) - } - } else { - self.insert_context(context.clone(), cx); - Some(context) - }; - - anyhow::Ok(context) - } - - pub fn add_symbol( - &mut self, - buffer: Entity, - symbol: SharedString, - range: Range, - enclosing_range: Range, - remove_if_exists: bool, - cx: &mut Context, - ) -> (Option, bool) { - let context_id = self.next_context_id.post_inc(); - let context = AgentContextHandle::Symbol(SymbolContextHandle { - buffer, - symbol, - range, - enclosing_range, - context_id, - }); - - if let Some(key) = self.context_set.get(AgentContextKey::ref_cast(&context)) { - let handle = if remove_if_exists { - self.remove_context(&context, cx); - None - } else { - Some(key.as_ref().clone()) - }; - return (handle, false); - } - - let included = self.insert_context(context.clone(), cx); - (Some(context), included) - } - - pub fn add_thread( - &mut self, - thread: Entity, - remove_if_exists: bool, - cx: &mut Context, - ) -> Option { - let context_id = self.next_context_id.post_inc(); - let context = AgentContextHandle::Thread(ThreadContextHandle { thread, context_id }); - - if let Some(existing) = self.context_set.get(AgentContextKey::ref_cast(&context)) { - if remove_if_exists { - self.remove_context(&context, cx); - None - } else { - Some(existing.as_ref().clone()) - } - } else { - self.insert_context(context.clone(), cx); - Some(context) - } - } - - pub fn add_text_thread( - &mut self, - text_thread: Entity, - remove_if_exists: bool, - cx: &mut Context, - ) -> Option { - let context_id = self.next_context_id.post_inc(); - let context = AgentContextHandle::TextThread(TextThreadContextHandle { - text_thread, - context_id, - }); - - if let Some(existing) = self.context_set.get(AgentContextKey::ref_cast(&context)) { - if remove_if_exists { - self.remove_context(&context, cx); - None - } else { - Some(existing.as_ref().clone()) - } - } else { - self.insert_context(context.clone(), cx); - Some(context) - } - } - - pub fn add_rules( - &mut self, - prompt_id: UserPromptId, - remove_if_exists: bool, - cx: &mut Context, - ) -> Option { - let context_id = self.next_context_id.post_inc(); - let context = AgentContextHandle::Rules(RulesContextHandle { - prompt_id, - context_id, - }); - - if let Some(existing) = self.context_set.get(AgentContextKey::ref_cast(&context)) { - if remove_if_exists { - self.remove_context(&context, cx); - None - } else { - Some(existing.as_ref().clone()) - } - } else { - self.insert_context(context.clone(), cx); - Some(context) - } - } - - pub fn add_fetched_url( - &mut self, - url: String, - text: impl Into, - cx: &mut Context, - ) -> AgentContextHandle { - let context = AgentContextHandle::FetchedUrl(FetchedUrlContext { - url: url.into(), - text: text.into(), - context_id: self.next_context_id.post_inc(), - }); - - self.insert_context(context.clone(), cx); - context - } - - pub fn add_image_from_path( - &mut self, - project_path: ProjectPath, - remove_if_exists: bool, - cx: &mut Context, - ) -> Task>> { - let project = self.project.clone(); - cx.spawn(async move |this, cx| { - let open_image_task = project.update(cx, |project, cx| { - project.open_image(project_path.clone(), cx) - })?; - let image_item = open_image_task.await?; - - this.update(cx, |this, cx| { - let item = image_item.read(cx); - this.insert_image( - Some(item.project_path(cx)), - Some(item.file.full_path(cx).to_string_lossy().into_owned()), - item.image.clone(), - remove_if_exists, - cx, - ) - }) - }) - } - - pub fn add_image_instance(&mut self, image: Arc, cx: &mut Context) { - self.insert_image(None, None, image, false, cx); - } - - fn insert_image( - &mut self, - project_path: Option, - full_path: Option, - image: Arc, - remove_if_exists: bool, - cx: &mut Context, - ) -> Option { - let image_task = LanguageModelImage::from_image(image.clone(), cx).shared(); - let context = AgentContextHandle::Image(ImageContext { - project_path, - full_path, - original_image: image, - image_task, - context_id: self.next_context_id.post_inc(), - }); - if self.has_context(&context) && remove_if_exists { - self.remove_context(&context, cx); - return None; - } - - self.insert_context(context.clone(), cx); - Some(context) - } - - pub fn add_selection( - &mut self, - buffer: Entity, - range: Range, - cx: &mut Context, - ) { - let context_id = self.next_context_id.post_inc(); - let context = AgentContextHandle::Selection(SelectionContextHandle { - buffer, - range, - context_id, - }); - self.insert_context(context, cx); - } - - pub fn add_suggested_context( - &mut self, - suggested: &SuggestedContext, - cx: &mut Context, - ) { - match suggested { - SuggestedContext::File { - buffer, - icon_path: _, - name: _, - } => { - if let Some(buffer) = buffer.upgrade() { - let context_id = self.next_context_id.post_inc(); - self.insert_context( - AgentContextHandle::File(FileContextHandle { buffer, context_id }), - cx, - ); - }; - } - SuggestedContext::TextThread { - text_thread, - name: _, - } => { - if let Some(text_thread) = text_thread.upgrade() { - let context_id = self.next_context_id.post_inc(); - self.insert_context( - AgentContextHandle::TextThread(TextThreadContextHandle { - text_thread, - context_id, - }), - cx, - ); - } - } - } - } - - fn insert_context(&mut self, context: AgentContextHandle, cx: &mut Context) -> bool { - match &context { - // AgentContextHandle::Thread(thread_context) => { - // if let Some(thread_store) = self.thread_store.clone() { - // thread_context.thread.update(cx, |thread, cx| { - // thread.start_generating_detailed_summary_if_needed(thread_store, cx); - // }); - // self.context_thread_ids - // .insert(thread_context.thread.read(cx).id().clone()); - // } else { - // return false; - // } - // } - AgentContextHandle::TextThread(text_thread_context) => { - self.context_text_thread_paths - .extend(text_thread_context.text_thread.read(cx).path().cloned()); - } - _ => {} - } - let inserted = self.context_set.insert(AgentContextKey(context)); - if inserted { - cx.notify(); - } - inserted - } - - pub fn remove_context(&mut self, context: &AgentContextHandle, cx: &mut Context) { - if let Some((_, key)) = self - .context_set - .shift_remove_full(AgentContextKey::ref_cast(context)) - { - match context { - AgentContextHandle::Thread(thread_context) => { - self.context_thread_ids - .remove(thread_context.thread.read(cx).id()); - } - AgentContextHandle::TextThread(text_thread_context) => { - if let Some(path) = text_thread_context.text_thread.read(cx).path() { - self.context_text_thread_paths.remove(path); - } - } - _ => {} - } - cx.emit(ContextStoreEvent::ContextRemoved(key)); - cx.notify(); - } - } - - pub fn has_context(&mut self, context: &AgentContextHandle) -> bool { - self.context_set - .contains(AgentContextKey::ref_cast(context)) - } - - /// Returns whether this file path is already included directly in the context, or if it will be - /// included in the context via a directory. - pub fn file_path_included(&self, path: &ProjectPath, cx: &App) -> Option { - let project = self.project.upgrade()?.read(cx); - self.context().find_map(|context| match context { - AgentContextHandle::File(file_context) => { - FileInclusion::check_file(file_context, path, cx) - } - AgentContextHandle::Image(image_context) => { - FileInclusion::check_image(image_context, path) - } - AgentContextHandle::Directory(directory_context) => { - FileInclusion::check_directory(directory_context, path, project, cx) - } - _ => None, - }) - } - - pub fn path_included_in_directory( - &self, - path: &ProjectPath, - cx: &App, - ) -> Option { - let project = self.project.upgrade()?.read(cx); - self.context().find_map(|context| match context { - AgentContextHandle::Directory(directory_context) => { - FileInclusion::check_directory(directory_context, path, project, cx) - } - _ => None, - }) - } - - pub fn includes_symbol(&self, symbol: &Symbol, cx: &App) -> bool { - self.context().any(|context| match context { - AgentContextHandle::Symbol(context) => { - if context.symbol != symbol.name { - return false; - } - let buffer = context.buffer.read(cx); - let Some(context_path) = buffer.project_path(cx) else { - return false; - }; - if symbol.path != SymbolLocation::InProject(context_path) { - return false; - } - let context_range = context.range.to_point_utf16(&buffer.snapshot()); - context_range.start == symbol.range.start.0 - && context_range.end == symbol.range.end.0 - } - _ => false, - }) - } - - pub fn includes_thread(&self, thread_id: &acp::SessionId) -> bool { - self.context_thread_ids.contains(thread_id) - } - - pub fn includes_text_thread(&self, path: &Arc) -> bool { - self.context_text_thread_paths.contains(path) - } - - pub fn includes_user_rules(&self, prompt_id: UserPromptId) -> bool { - self.context_set - .contains(&RulesContextHandle::lookup_key(prompt_id)) - } - - pub fn includes_url(&self, url: impl Into) -> bool { - self.context_set - .contains(&FetchedUrlContext::lookup_key(url.into())) - } - - pub fn get_url_context(&self, url: SharedString) -> Option { - self.context_set - .get(&FetchedUrlContext::lookup_key(url)) - .map(|key| key.as_ref().clone()) - } - - pub fn file_paths(&self, cx: &App) -> HashSet { - self.context() - .filter_map(|context| match context { - AgentContextHandle::File(file) => { - let buffer = file.buffer.read(cx); - buffer.project_path(cx) - } - AgentContextHandle::Directory(_) - | AgentContextHandle::Symbol(_) - | AgentContextHandle::Thread(_) - | AgentContextHandle::Selection(_) - | AgentContextHandle::FetchedUrl(_) - | AgentContextHandle::TextThread(_) - | AgentContextHandle::Rules(_) - | AgentContextHandle::Image(_) => None, - }) - .collect() - } - - pub fn thread_ids(&self) -> &HashSet { - &self.context_thread_ids - } -} - -#[derive(Clone)] -pub enum SuggestedContext { - File { - name: SharedString, - icon_path: Option, - buffer: WeakEntity, - }, - TextThread { - name: SharedString, - text_thread: WeakEntity, - }, -} - -impl SuggestedContext { - pub fn name(&self) -> &SharedString { - match self { - Self::File { name, .. } => name, - Self::TextThread { name, .. } => name, - } - } - - pub fn icon_path(&self) -> Option { - match self { - Self::File { icon_path, .. } => icon_path.clone(), - Self::TextThread { .. } => None, - } - } - - pub fn kind(&self) -> ContextKind { - match self { - Self::File { .. } => ContextKind::File, - Self::TextThread { .. } => ContextKind::TextThread, - } - } -} - -pub enum FileInclusion { - Direct, - InDirectory { full_path: PathBuf }, -} - -impl FileInclusion { - fn check_file(file_context: &FileContextHandle, path: &ProjectPath, cx: &App) -> Option { - let file_path = file_context.buffer.read(cx).project_path(cx)?; - if path == &file_path { - Some(FileInclusion::Direct) - } else { - None - } - } - - fn check_image(image_context: &ImageContext, path: &ProjectPath) -> Option { - let image_path = image_context.project_path.as_ref()?; - if path == image_path { - Some(FileInclusion::Direct) - } else { - None - } - } - - fn check_directory( - directory_context: &DirectoryContextHandle, - path: &ProjectPath, - project: &Project, - cx: &App, - ) -> Option { - let worktree = project - .worktree_for_entry(directory_context.entry_id, cx)? - .read(cx); - let entry = worktree.entry_for_id(directory_context.entry_id)?; - let directory_path = ProjectPath { - worktree_id: worktree.id(), - path: entry.path.clone(), - }; - if path.starts_with(&directory_path) { - if path == &directory_path { - Some(FileInclusion::Direct) - } else { - Some(FileInclusion::InDirectory { - full_path: worktree.full_path(&entry.path), - }) - } - } else { - None - } - } -} diff --git a/crates/agent_ui/src/context_strip.rs b/crates/agent_ui/src/context_strip.rs deleted file mode 100644 index d2393ac4f612cebc6cf97d10a38894e7022e53b9..0000000000000000000000000000000000000000 --- a/crates/agent_ui/src/context_strip.rs +++ /dev/null @@ -1,619 +0,0 @@ -use crate::{ - AcceptSuggestedContext, AgentPanel, FocusDown, FocusLeft, FocusRight, FocusUp, - ModelUsageContext, RemoveAllContext, RemoveFocusedContext, ToggleContextPicker, - context_picker::ContextPicker, - ui::{AddedContext, ContextPill}, -}; -use crate::{ - context::AgentContextHandle, - context_store::{ContextStore, SuggestedContext}, -}; -use agent::HistoryStore; -use collections::HashSet; -use editor::Editor; -use gpui::{ - App, Bounds, ClickEvent, DismissEvent, Entity, EventEmitter, FocusHandle, Focusable, - Subscription, Task, WeakEntity, -}; -use itertools::Itertools; -use project::ProjectItem; -use prompt_store::PromptStore; -use rope::Point; -use std::rc::Rc; -use text::ToPoint as _; -use ui::{PopoverMenu, PopoverMenuHandle, Tooltip, prelude::*}; -use util::ResultExt as _; -use workspace::Workspace; -use zed_actions::assistant::OpenRulesLibrary; - -pub struct ContextStrip { - context_store: Entity, - context_picker: Entity, - context_picker_menu_handle: PopoverMenuHandle, - focus_handle: FocusHandle, - suggest_context_kind: SuggestContextKind, - workspace: WeakEntity, - prompt_store: Option>, - _subscriptions: Vec, - focused_index: Option, - children_bounds: Option>>, - model_usage_context: ModelUsageContext, -} - -impl ContextStrip { - pub fn new( - context_store: Entity, - workspace: WeakEntity, - thread_store: Option>, - prompt_store: Option>, - context_picker_menu_handle: PopoverMenuHandle, - suggest_context_kind: SuggestContextKind, - model_usage_context: ModelUsageContext, - window: &mut Window, - cx: &mut Context, - ) -> Self { - let context_picker = cx.new(|cx| { - ContextPicker::new( - workspace.clone(), - thread_store.clone(), - prompt_store.clone(), - context_store.downgrade(), - window, - cx, - ) - }); - - let focus_handle = cx.focus_handle(); - - let subscriptions = vec![ - cx.observe(&context_store, |_, _, cx| cx.notify()), - cx.subscribe_in(&context_picker, window, Self::handle_context_picker_event), - cx.on_focus(&focus_handle, window, Self::handle_focus), - cx.on_blur(&focus_handle, window, Self::handle_blur), - ]; - - Self { - context_store: context_store.clone(), - context_picker, - context_picker_menu_handle, - focus_handle, - suggest_context_kind, - workspace, - prompt_store, - _subscriptions: subscriptions, - focused_index: None, - children_bounds: None, - model_usage_context, - } - } - - /// Whether or not the context strip has items to display - pub fn has_context_items(&self, cx: &App) -> bool { - self.context_store.read(cx).context().next().is_some() - || self.suggested_context(cx).is_some() - } - - fn added_contexts(&self, cx: &App) -> Vec { - if let Some(workspace) = self.workspace.upgrade() { - let project = workspace.read(cx).project().read(cx); - let prompt_store = self.prompt_store.as_ref().and_then(|p| p.upgrade()); - - let current_model = self.model_usage_context.language_model(cx); - - self.context_store - .read(cx) - .context() - .flat_map(|context| { - AddedContext::new_pending( - context.clone(), - prompt_store.as_ref(), - project, - current_model.as_ref(), - cx, - ) - }) - .collect::>() - } else { - Vec::new() - } - } - - fn suggested_context(&self, cx: &App) -> Option { - match self.suggest_context_kind { - SuggestContextKind::Thread => self.suggested_thread(cx), - } - } - - fn suggested_thread(&self, cx: &App) -> Option { - if !self.context_picker.read(cx).allow_threads() { - return None; - } - - let workspace = self.workspace.upgrade()?; - let panel = workspace.read(cx).panel::(cx)?.read(cx); - - if let Some(active_text_thread_editor) = panel.active_text_thread_editor() { - let text_thread = active_text_thread_editor.read(cx).text_thread(); - let weak_text_thread = text_thread.downgrade(); - let text_thread = text_thread.read(cx); - let path = text_thread.path()?; - - if self.context_store.read(cx).includes_text_thread(path) { - return None; - } - - Some(SuggestedContext::TextThread { - name: text_thread.summary().or_default(), - text_thread: weak_text_thread, - }) - } else { - None - } - } - - fn handle_context_picker_event( - &mut self, - _picker: &Entity, - _event: &DismissEvent, - _window: &mut Window, - cx: &mut Context, - ) { - cx.emit(ContextStripEvent::PickerDismissed); - } - - fn handle_focus(&mut self, _window: &mut Window, cx: &mut Context) { - self.focused_index = self.last_pill_index(); - cx.notify(); - } - - fn handle_blur(&mut self, _window: &mut Window, cx: &mut Context) { - self.focused_index = None; - cx.notify(); - } - - fn focus_left(&mut self, _: &FocusLeft, _window: &mut Window, cx: &mut Context) { - self.focused_index = match self.focused_index { - Some(index) if index > 0 => Some(index - 1), - _ => self.last_pill_index(), - }; - - cx.notify(); - } - - fn focus_right(&mut self, _: &FocusRight, _window: &mut Window, cx: &mut Context) { - let Some(last_index) = self.last_pill_index() else { - return; - }; - - self.focused_index = match self.focused_index { - Some(index) if index < last_index => Some(index + 1), - _ => Some(0), - }; - - cx.notify(); - } - - fn focus_up(&mut self, _: &FocusUp, _window: &mut Window, cx: &mut Context) { - let Some(focused_index) = self.focused_index else { - return; - }; - - if focused_index == 0 { - return cx.emit(ContextStripEvent::BlurredUp); - } - - let Some((focused, pills)) = self.focused_bounds(focused_index) else { - return; - }; - - let iter = pills[..focused_index].iter().enumerate().rev(); - self.focused_index = Self::find_best_horizontal_match(focused, iter).or(Some(0)); - cx.notify(); - } - - fn focus_down(&mut self, _: &FocusDown, _window: &mut Window, cx: &mut Context) { - let Some(focused_index) = self.focused_index else { - return; - }; - - let last_index = self.last_pill_index(); - - if self.focused_index == last_index { - return cx.emit(ContextStripEvent::BlurredDown); - } - - let Some((focused, pills)) = self.focused_bounds(focused_index) else { - return; - }; - - let iter = pills.iter().enumerate().skip(focused_index + 1); - self.focused_index = Self::find_best_horizontal_match(focused, iter).or(last_index); - cx.notify(); - } - - fn focused_bounds(&self, focused: usize) -> Option<(&Bounds, &[Bounds])> { - let pill_bounds = self.pill_bounds()?; - let focused = pill_bounds.get(focused)?; - - Some((focused, pill_bounds)) - } - - fn pill_bounds(&self) -> Option<&[Bounds]> { - let bounds = self.children_bounds.as_ref()?; - let eraser = if bounds.len() < 3 { 0 } else { 1 }; - let pills = &bounds[1..bounds.len() - eraser]; - - if pills.is_empty() { None } else { Some(pills) } - } - - fn last_pill_index(&self) -> Option { - Some(self.pill_bounds()?.len() - 1) - } - - fn find_best_horizontal_match<'a>( - focused: &'a Bounds, - iter: impl Iterator)>, - ) -> Option { - let mut best = None; - - let focused_left = focused.left(); - let focused_right = focused.right(); - - for (index, probe) in iter { - if probe.origin.y == focused.origin.y { - continue; - } - - let overlap = probe.right().min(focused_right) - probe.left().max(focused_left); - - best = match best { - Some((_, prev_overlap, y)) if probe.origin.y != y || prev_overlap > overlap => { - break; - } - Some(_) | None => Some((index, overlap, probe.origin.y)), - }; - } - - best.map(|(index, _, _)| index) - } - - fn open_context(&mut self, context: &AgentContextHandle, window: &mut Window, cx: &mut App) { - let Some(workspace) = self.workspace.upgrade() else { - return; - }; - - match context { - AgentContextHandle::File(file_context) => { - if let Some(project_path) = file_context.project_path(cx) { - workspace.update(cx, |workspace, cx| { - workspace - .open_path(project_path, None, true, window, cx) - .detach_and_log_err(cx); - }); - } - } - - AgentContextHandle::Directory(directory_context) => { - let entry_id = directory_context.entry_id; - workspace.update(cx, |workspace, cx| { - workspace.project().update(cx, |_project, cx| { - cx.emit(project::Event::RevealInProjectPanel(entry_id)); - }) - }) - } - - AgentContextHandle::Symbol(symbol_context) => { - let buffer = symbol_context.buffer.read(cx); - if let Some(project_path) = buffer.project_path(cx) { - let snapshot = buffer.snapshot(); - let target_position = symbol_context.range.start.to_point(&snapshot); - open_editor_at_position(project_path, target_position, &workspace, window, cx) - .detach(); - } - } - - AgentContextHandle::Selection(selection_context) => { - let buffer = selection_context.buffer.read(cx); - if let Some(project_path) = buffer.project_path(cx) { - let snapshot = buffer.snapshot(); - let target_position = selection_context.range.start.to_point(&snapshot); - - open_editor_at_position(project_path, target_position, &workspace, window, cx) - .detach(); - } - } - - AgentContextHandle::FetchedUrl(fetched_url_context) => { - cx.open_url(&fetched_url_context.url); - } - - AgentContextHandle::Thread(_thread_context) => {} - - AgentContextHandle::TextThread(text_thread_context) => { - workspace.update(cx, |workspace, cx| { - if let Some(panel) = workspace.panel::(cx) { - let context = text_thread_context.text_thread.clone(); - window.defer(cx, move |window, cx| { - panel.update(cx, |panel, cx| { - panel.open_text_thread(context, window, cx) - }); - }); - } - }) - } - - AgentContextHandle::Rules(rules_context) => window.dispatch_action( - Box::new(OpenRulesLibrary { - prompt_to_select: Some(rules_context.prompt_id.0), - }), - cx, - ), - - AgentContextHandle::Image(_) => {} - } - } - - fn remove_focused_context( - &mut self, - _: &RemoveFocusedContext, - _window: &mut Window, - cx: &mut Context, - ) { - if let Some(index) = self.focused_index { - let added_contexts = self.added_contexts(cx); - let Some(context) = added_contexts.get(index) else { - return; - }; - - self.context_store.update(cx, |this, cx| { - this.remove_context(&context.handle, cx); - }); - - let is_now_empty = added_contexts.len() == 1; - if is_now_empty { - cx.emit(ContextStripEvent::BlurredEmpty); - } else { - self.focused_index = Some(index.saturating_sub(1)); - cx.notify(); - } - } - } - - fn is_suggested_focused(&self, added_contexts: &Vec) -> bool { - // We only suggest one item after the actual context - self.focused_index == Some(added_contexts.len()) - } - - fn accept_suggested_context( - &mut self, - _: &AcceptSuggestedContext, - _window: &mut Window, - cx: &mut Context, - ) { - if let Some(suggested) = self.suggested_context(cx) - && self.is_suggested_focused(&self.added_contexts(cx)) - { - self.add_suggested_context(&suggested, cx); - } - } - - fn add_suggested_context(&mut self, suggested: &SuggestedContext, cx: &mut Context) { - self.context_store.update(cx, |context_store, cx| { - context_store.add_suggested_context(suggested, cx) - }); - cx.notify(); - } -} - -impl Focusable for ContextStrip { - fn focus_handle(&self, _cx: &App) -> FocusHandle { - self.focus_handle.clone() - } -} - -impl Render for ContextStrip { - fn render(&mut self, _window: &mut Window, cx: &mut Context) -> impl IntoElement { - let context_picker = self.context_picker.clone(); - let focus_handle = self.focus_handle.clone(); - - let added_contexts = self.added_contexts(cx); - let dupe_names = added_contexts - .iter() - .map(|c| c.name.clone()) - .sorted() - .tuple_windows() - .filter(|(a, b)| a == b) - .map(|(a, _)| a) - .collect::>(); - let no_added_context = added_contexts.is_empty(); - - let suggested_context = self.suggested_context(cx).map(|suggested_context| { - ( - suggested_context, - self.is_suggested_focused(&added_contexts), - ) - }); - - h_flex() - .flex_wrap() - .gap_1() - .track_focus(&focus_handle) - .key_context("ContextStrip") - .on_action(cx.listener(Self::focus_up)) - .on_action(cx.listener(Self::focus_right)) - .on_action(cx.listener(Self::focus_down)) - .on_action(cx.listener(Self::focus_left)) - .on_action(cx.listener(Self::remove_focused_context)) - .on_action(cx.listener(Self::accept_suggested_context)) - .on_children_prepainted({ - let entity = cx.entity().downgrade(); - move |children_bounds, _window, cx| { - entity - .update(cx, |this, _| { - this.children_bounds = Some(children_bounds); - }) - .ok(); - } - }) - .child( - PopoverMenu::new("context-picker") - .menu({ - let context_picker = context_picker.clone(); - move |window, cx| { - context_picker.update(cx, |this, cx| { - this.init(window, cx); - }); - - Some(context_picker.clone()) - } - }) - .on_open({ - let context_picker = context_picker.downgrade(); - Rc::new(move |window, cx| { - context_picker - .update(cx, |context_picker, cx| { - context_picker.select_first(window, cx); - }) - .ok(); - }) - }) - .trigger_with_tooltip( - IconButton::new("add-context", IconName::Plus) - .icon_size(IconSize::Small) - .style(ui::ButtonStyle::Filled), - { - let focus_handle = focus_handle.clone(); - move |_window, cx| { - Tooltip::for_action_in( - "Add Context", - &ToggleContextPicker, - &focus_handle, - cx, - ) - } - }, - ) - .attach(gpui::Corner::TopLeft) - .anchor(gpui::Corner::BottomLeft) - .offset(gpui::Point { - x: px(0.0), - y: px(-2.0), - }) - .with_handle(self.context_picker_menu_handle.clone()), - ) - .children( - added_contexts - .into_iter() - .enumerate() - .map(|(i, added_context)| { - let name = added_context.name.clone(); - let context = added_context.handle.clone(); - ContextPill::added( - added_context, - dupe_names.contains(&name), - self.focused_index == Some(i), - Some({ - let context = context.clone(); - let context_store = self.context_store.clone(); - Rc::new(cx.listener(move |_this, _event, _window, cx| { - context_store.update(cx, |this, cx| { - this.remove_context(&context, cx); - }); - cx.notify(); - })) - }), - ) - .on_click({ - Rc::new(cx.listener(move |this, event: &ClickEvent, window, cx| { - if event.click_count() > 1 { - this.open_context(&context, window, cx); - } else { - this.focused_index = Some(i); - } - cx.notify(); - })) - }) - }), - ) - .when_some(suggested_context, |el, (suggested, focused)| { - el.child( - ContextPill::suggested( - suggested.name().clone(), - suggested.icon_path(), - suggested.kind(), - focused, - ) - .on_click(Rc::new(cx.listener( - move |this, _event, _window, cx| { - this.add_suggested_context(&suggested, cx); - }, - ))), - ) - }) - .when(!no_added_context, { - move |parent| { - parent.child( - IconButton::new("remove-all-context", IconName::Eraser) - .icon_size(IconSize::Small) - .tooltip({ - let focus_handle = focus_handle.clone(); - move |_window, cx| { - Tooltip::for_action_in( - "Remove All Context", - &RemoveAllContext, - &focus_handle, - cx, - ) - } - }) - .on_click(cx.listener({ - let focus_handle = focus_handle.clone(); - move |_this, _event, window, cx| { - focus_handle.dispatch_action(&RemoveAllContext, window, cx); - } - })), - ) - } - }) - .into_any() - } -} - -pub enum ContextStripEvent { - PickerDismissed, - BlurredEmpty, - BlurredDown, - BlurredUp, -} - -impl EventEmitter for ContextStrip {} - -pub enum SuggestContextKind { - Thread, -} - -fn open_editor_at_position( - project_path: project::ProjectPath, - target_position: Point, - workspace: &Entity, - window: &mut Window, - cx: &mut App, -) -> Task<()> { - let open_task = workspace.update(cx, |workspace, cx| { - workspace.open_path(project_path, None, true, window, cx) - }); - window.spawn(cx, async move |cx| { - if let Some(active_editor) = open_task - .await - .log_err() - .and_then(|item| item.downcast::()) - { - active_editor - .downgrade() - .update_in(cx, |editor, window, cx| { - editor.go_to_singleton_buffer_point(target_position, window, cx); - }) - .log_err(); - } - }) -} diff --git a/crates/agent_ui/src/inline_assistant.rs b/crates/agent_ui/src/inline_assistant.rs index 17a6a8e022f322575cabab728bd512d68754f4df..f822c79f2589c757173bcd2699ef6abf2ac51027 100644 --- a/crates/agent_ui/src/inline_assistant.rs +++ b/crates/agent_ui/src/inline_assistant.rs @@ -4,10 +4,11 @@ use std::ops::Range; use std::rc::Rc; use std::sync::Arc; +use crate::context::load_context; +use crate::mention_set::MentionSet; use crate::{ AgentPanel, buffer_codegen::{BufferCodegen, CodegenAlternative, CodegenEvent}, - context_store::ContextStore, inline_prompt_editor::{CodegenStatus, InlineAssistId, PromptEditor, PromptEditorEvent}, terminal_inline_assistant::TerminalInlineAssistant, }; @@ -31,6 +32,7 @@ use editor::{ }, }; use fs::Fs; +use futures::FutureExt; use gpui::{ App, Context, Entity, Focusable, Global, HighlightStyle, Subscription, Task, UpdateGlobal, WeakEntity, Window, point, @@ -214,16 +216,10 @@ impl InlineAssistant { if let Some(editor) = item.act_as::(cx) { editor.update(cx, |editor, cx| { if is_ai_enabled { - let panel = workspace.read(cx).panel::(cx); - let thread_store = panel - .as_ref() - .map(|agent_panel| agent_panel.read(cx).thread_store().downgrade()); - editor.add_code_action_provider( Rc::new(AssistantCodeActionProvider { editor: cx.entity().downgrade(), workspace: workspace.downgrade(), - thread_store, }), window, cx, @@ -235,9 +231,6 @@ impl InlineAssistant { editor.cancel(&Default::default(), window, cx); } } - - // Remove the Assistant1 code action provider, as it still might be registered. - editor.remove_code_action_provider("assistant".into(), window, cx); } else { editor.remove_code_action_provider( ASSISTANT_CODE_ACTION_PROVIDER_ID.into(), @@ -279,8 +272,7 @@ impl InlineAssistant { let agent_panel = agent_panel.read(cx); let prompt_store = agent_panel.prompt_store().as_ref().cloned(); - let thread_store = Some(agent_panel.thread_store().downgrade()); - let context_store = agent_panel.inline_assist_context_store().clone(); + let thread_store = agent_panel.thread_store().clone(); let handle_assist = |window: &mut Window, cx: &mut Context| match inline_assist_target { @@ -289,10 +281,9 @@ impl InlineAssistant { assistant.assist( &active_editor, cx.entity().downgrade(), - context_store, workspace.project().downgrade(), - prompt_store, thread_store, + prompt_store, action.prompt.clone(), window, cx, @@ -305,8 +296,8 @@ impl InlineAssistant { &active_terminal, cx.entity().downgrade(), workspace.project().downgrade(), - prompt_store, thread_store, + prompt_store, action.prompt.clone(), window, cx, @@ -477,10 +468,9 @@ impl InlineAssistant { &mut self, editor: &Entity, workspace: WeakEntity, - context_store: Entity, project: WeakEntity, + thread_store: Entity, prompt_store: Option>, - thread_store: Option>, initial_prompt: Option, window: &mut Window, codegen_ranges: &[Range], @@ -508,9 +498,6 @@ impl InlineAssistant { editor.read(cx).buffer().clone(), range.clone(), initial_transaction_id, - context_store.clone(), - project.clone(), - prompt_store.clone(), self.telemetry.clone(), self.prompt_builder.clone(), cx, @@ -526,10 +513,10 @@ impl InlineAssistant { prompt_buffer.clone(), codegen.clone(), self.fs.clone(), - context_store.clone(), - workspace.clone(), thread_store.clone(), - prompt_store.as_ref().map(|s| s.downgrade()), + prompt_store.clone(), + project.clone(), + workspace.clone(), window, cx, ) @@ -606,10 +593,9 @@ impl InlineAssistant { &mut self, editor: &Entity, workspace: WeakEntity, - context_store: Entity, project: WeakEntity, + thread_store: Entity, prompt_store: Option>, - thread_store: Option>, initial_prompt: Option, window: &mut Window, cx: &mut App, @@ -625,10 +611,9 @@ impl InlineAssistant { let assist_to_focus = self.batch_assist( editor, workspace, - context_store, project, - prompt_store, thread_store, + prompt_store, initial_prompt, window, &codegen_ranges, @@ -650,8 +635,8 @@ impl InlineAssistant { initial_transaction_id: Option, focus: bool, workspace: Entity, + thread_store: Entity, prompt_store: Option>, - thread_store: Option>, window: &mut Window, cx: &mut App, ) -> InlineAssistId { @@ -663,16 +648,14 @@ impl InlineAssistant { } let project = workspace.read(cx).project().downgrade(); - let context_store = cx.new(|_cx| ContextStore::new(project.clone())); let assist_id = self .batch_assist( editor, workspace.downgrade(), - context_store, project, - prompt_store, thread_store, + prompt_store, Some(initial_prompt), window, &[range], @@ -1294,7 +1277,8 @@ impl InlineAssistant { return; } - let Some(user_prompt) = assist.user_prompt(cx) else { + let Some((user_prompt, mention_set)) = assist.user_prompt(cx).zip(assist.mention_set(cx)) + else { return; }; @@ -1310,9 +1294,12 @@ impl InlineAssistant { return; }; + let context_task = load_context(&mention_set, cx).shared(); assist .codegen - .update(cx, |codegen, cx| codegen.start(model, user_prompt, cx)) + .update(cx, |codegen, cx| { + codegen.start(model, user_prompt, context_task, cx) + }) .log_err(); } @@ -1778,6 +1765,11 @@ impl InlineAssist { let decorations = self.decorations.as_ref()?; Some(decorations.prompt_editor.read(cx).prompt(cx)) } + + fn mention_set(&self, cx: &App) -> Option> { + let decorations = self.decorations.as_ref()?; + Some(decorations.prompt_editor.read(cx).mention_set().clone()) + } } struct InlineAssistDecorations { @@ -1790,10 +1782,9 @@ struct InlineAssistDecorations { struct AssistantCodeActionProvider { editor: WeakEntity, workspace: WeakEntity, - thread_store: Option>, } -const ASSISTANT_CODE_ACTION_PROVIDER_ID: &str = "assistant2"; +const ASSISTANT_CODE_ACTION_PROVIDER_ID: &str = "assistant"; impl CodeActionProvider for AssistantCodeActionProvider { fn id(&self) -> Arc { @@ -1861,10 +1852,20 @@ impl CodeActionProvider for AssistantCodeActionProvider { ) -> Task> { let editor = self.editor.clone(); let workspace = self.workspace.clone(); - let thread_store = self.thread_store.clone(); let prompt_store = PromptStore::global(cx); window.spawn(cx, async move |cx| { let workspace = workspace.upgrade().context("workspace was released")?; + let thread_store = cx.update(|_window, cx| { + anyhow::Ok( + workspace + .read(cx) + .panel::(cx) + .context("missing agent panel")? + .read(cx) + .thread_store() + .clone(), + ) + })??; let editor = editor.upgrade().context("editor was released")?; let range = editor .update(cx, |editor, cx| { @@ -1907,8 +1908,8 @@ impl CodeActionProvider for AssistantCodeActionProvider { None, true, workspace, - prompt_store, thread_store, + prompt_store, window, cx, ); diff --git a/crates/agent_ui/src/inline_prompt_editor.rs b/crates/agent_ui/src/inline_prompt_editor.rs index 3a0866f47063a6dc5f68df1c36c3fdb0e07d2b74..7cd7a9d58a71effa18612234f9f718f794c99c06 100644 --- a/crates/agent_ui/src/inline_prompt_editor.rs +++ b/crates/agent_ui/src/inline_prompt_editor.rs @@ -1,19 +1,21 @@ use agent::HistoryStore; -use collections::{HashMap, VecDeque}; +use collections::VecDeque; use editor::actions::Paste; -use editor::display_map::{CreaseId, EditorMargins}; -use editor::{Addon, AnchorRangeExt as _, MultiBufferOffset}; +use editor::code_context_menus::CodeContextMenu; +use editor::display_map::EditorMargins; +use editor::{AnchorRangeExt as _, MultiBufferOffset, ToOffset as _}; use editor::{ ContextMenuOptions, Editor, EditorElement, EditorEvent, EditorMode, EditorStyle, MultiBuffer, actions::{MoveDown, MoveUp}, }; use fs::Fs; use gpui::{ - AnyElement, App, ClipboardEntry, Context, CursorStyle, Entity, EventEmitter, FocusHandle, - Focusable, Subscription, TextStyle, WeakEntity, Window, + AnyElement, App, Context, CursorStyle, Entity, EventEmitter, FocusHandle, Focusable, + Subscription, TextStyle, WeakEntity, Window, }; use language_model::{LanguageModel, LanguageModelRegistry}; use parking_lot::Mutex; +use project::Project; use prompt_store::PromptStore; use settings::Settings; use std::cmp; @@ -28,22 +30,21 @@ use zed_actions::agent::ToggleModelSelector; use crate::agent_model_selector::AgentModelSelector; use crate::buffer_codegen::BufferCodegen; -use crate::context::{AgentContextHandle, AgentContextKey}; -use crate::context_picker::{ContextPicker, ContextPickerCompletionProvider, crease_for_mention}; -use crate::context_store::{ContextStore, ContextStoreEvent}; -use crate::context_strip::{ContextStrip, ContextStripEvent, SuggestContextKind}; -use crate::terminal_codegen::TerminalCodegen; -use crate::{ - CycleNextInlineAssist, CyclePreviousInlineAssist, ModelUsageContext, RemoveAllContext, - ToggleContextPicker, +use crate::completion_provider::{ + PromptCompletionProvider, PromptCompletionProviderDelegate, PromptContextType, }; +use crate::mention_set::paste_images_as_context; +use crate::mention_set::{MentionSet, crease_for_mention}; +use crate::terminal_codegen::TerminalCodegen; +use crate::{CycleNextInlineAssist, CyclePreviousInlineAssist, ModelUsageContext}; pub struct PromptEditor { pub editor: Entity, mode: PromptEditorMode, - context_store: Entity, - context_strip: Entity, - context_picker_menu_handle: PopoverMenuHandle, + mention_set: Entity, + history_store: Entity, + prompt_store: Option>, + workspace: WeakEntity, model_selector: Entity, edited_since_done: bool, prompt_history: VecDeque, @@ -51,7 +52,6 @@ pub struct PromptEditor { pending_prompt: String, _codegen_subscription: Subscription, editor_subscriptions: Vec, - _context_strip_subscription: Subscription, show_rate_limit_notice: bool, _phantom: std::marker::PhantomData, } @@ -98,6 +98,19 @@ impl Render for PromptEditor { buttons.extend(self.render_buttons(window, cx)); + let menu_visible = self.is_completions_menu_visible(cx); + let add_context_button = IconButton::new("add-context", IconName::AtSign) + .icon_size(IconSize::Small) + .icon_color(Color::Muted) + .when(!menu_visible, |this| { + this.tooltip(move |_window, cx| { + Tooltip::with_meta("Add Context", None, "Or type @ to include context", cx) + }) + }) + .on_click(cx.listener(move |this, _, window, cx| { + this.trigger_completion_menu(window, cx); + })); + v_flex() .key_context("PromptEditor") .capture_action(cx.listener(Self::paste)) @@ -114,7 +127,6 @@ impl Render for PromptEditor { h_flex() .items_start() .cursor(CursorStyle::Arrow) - .on_action(cx.listener(Self::toggle_context_picker)) .on_action(cx.listener(|this, _: &ToggleModelSelector, window, cx| { this.model_selector .update(cx, |model_selector, cx| model_selector.toggle(window, cx)); @@ -123,7 +135,6 @@ impl Render for PromptEditor { .on_action(cx.listener(Self::cancel)) .on_action(cx.listener(Self::move_up)) .on_action(cx.listener(Self::move_down)) - .on_action(cx.listener(Self::remove_all_context)) .capture_action(cx.listener(Self::cycle_prev)) .capture_action(cx.listener(Self::cycle_next)) .child( @@ -182,7 +193,7 @@ impl Render for PromptEditor { .pl_1() .items_start() .justify_between() - .child(self.context_strip.clone()) + .child(add_context_button) .child(self.model_selector.clone()), ), ) @@ -214,6 +225,18 @@ impl PromptEditor { )); } + fn assign_completion_provider(&mut self, cx: &mut Context) { + self.editor.update(cx, |editor, _cx| { + editor.set_completion_provider(Some(Rc::new(PromptCompletionProvider::new( + PromptEditorCompletionProviderDelegate, + self.mention_set.clone(), + self.history_store.clone(), + self.prompt_store.clone(), + self.workspace.clone(), + )))); + }); + } + pub fn set_show_cursor_when_unfocused( &mut self, show_cursor_when_unfocused: bool, @@ -226,27 +249,23 @@ impl PromptEditor { pub fn unlink(&mut self, window: &mut Window, cx: &mut Context) { let prompt = self.prompt(cx); - let existing_creases = self.editor.update(cx, extract_message_creases); - + let existing_creases = self.editor.update(cx, |editor, cx| { + extract_message_creases(editor, &self.mention_set, window, cx) + }); let focus = self.editor.focus_handle(cx).contains_focused(window, cx); self.editor = cx.new(|cx| { let mut editor = Editor::auto_height(1, Self::MAX_LINES as usize, window, cx); editor.set_soft_wrap_mode(language::language_settings::SoftWrap::EditorWidth, cx); editor.set_placeholder_text("Add a prompt…", window, cx); editor.set_text(prompt, window, cx); - insert_message_creases( - &mut editor, - &existing_creases, - &self.context_store, - window, - cx, - ); + insert_message_creases(&mut editor, &existing_creases, window, cx); if focus { window.focus(&editor.focus_handle(cx)); } editor }); + self.assign_completion_provider(cx); self.subscribe_to_editor(window, cx); } @@ -274,32 +293,13 @@ impl PromptEditor { self.editor.read(cx).text(cx) } - fn paste(&mut self, _: &Paste, _window: &mut Window, cx: &mut Context) { - let images = cx - .read_from_clipboard() - .map(|item| { - item.into_entries() - .filter_map(|entry| { - if let ClipboardEntry::Image(image) = entry { - Some(image) - } else { - None - } - }) - .collect::>() - }) - .unwrap_or_default(); - - if images.is_empty() { - return; + fn paste(&mut self, _: &Paste, window: &mut Window, cx: &mut Context) { + if inline_assistant_model_supports_images(cx) + && let Some(task) = + paste_images_as_context(self.editor.clone(), self.mention_set.clone(), window, cx) + { + task.detach(); } - cx.stop_propagation(); - - self.context_store.update(cx, |store, cx| { - for image in images { - store.add_image_instance(Arc::new(image), cx); - } - }); } fn handle_prompt_editor_events( @@ -343,23 +343,44 @@ impl PromptEditor { } } - fn toggle_context_picker( - &mut self, - _: &ToggleContextPicker, - window: &mut Window, - cx: &mut Context, - ) { - self.context_picker_menu_handle.toggle(window, cx); + pub fn is_completions_menu_visible(&self, cx: &App) -> bool { + self.editor + .read(cx) + .context_menu() + .borrow() + .as_ref() + .is_some_and(|menu| matches!(menu, CodeContextMenu::Completions(_)) && menu.visible()) } - pub fn remove_all_context( - &mut self, - _: &RemoveAllContext, - _window: &mut Window, - cx: &mut Context, - ) { - self.context_store.update(cx, |store, cx| store.clear(cx)); - cx.notify(); + pub fn trigger_completion_menu(&mut self, window: &mut Window, cx: &mut Context) { + self.editor.update(cx, |editor, cx| { + let menu_is_open = editor.context_menu().borrow().as_ref().is_some_and(|menu| { + matches!(menu, CodeContextMenu::Completions(_)) && menu.visible() + }); + + let has_at_sign = { + let snapshot = editor.display_snapshot(cx); + let cursor = editor.selections.newest::(&snapshot).head(); + let offset = cursor.to_offset(&snapshot); + if offset.0 > 0 { + snapshot + .buffer_snapshot() + .reversed_chars_at(offset) + .next() + .map(|sign| sign == '@') + .unwrap_or(false) + } else { + false + } + }; + + if menu_is_open && has_at_sign { + return; + } + + editor.insert("@", window, cx); + editor.show_completions(&editor::actions::ShowCompletions, window, cx); + }); } fn cancel( @@ -434,8 +455,6 @@ impl PromptEditor { editor.move_to_end(&Default::default(), window, cx) }); } - } else if self.context_strip.read(cx).has_context_items(cx) { - self.context_strip.focus_handle(cx).focus(window); } } @@ -709,6 +728,7 @@ impl PromptEditor { EditorStyle { background: colors.editor_background, local_player: cx.theme().players().local(), + syntax: cx.theme().syntax().clone(), text: text_style, ..Default::default() }, @@ -716,21 +736,6 @@ impl PromptEditor { }) .into_any_element() } - - fn handle_context_strip_event( - &mut self, - _context_strip: &Entity, - event: &ContextStripEvent, - window: &mut Window, - cx: &mut Context, - ) { - match event { - ContextStripEvent::PickerDismissed - | ContextStripEvent::BlurredEmpty - | ContextStripEvent::BlurredUp => self.editor.focus_handle(cx).focus(window), - ContextStripEvent::BlurredDown => {} - } - } } pub enum PromptEditorMode { @@ -765,6 +770,36 @@ impl InlineAssistId { } } +struct PromptEditorCompletionProviderDelegate; + +fn inline_assistant_model_supports_images(cx: &App) -> bool { + LanguageModelRegistry::read_global(cx) + .inline_assistant_model() + .map_or(false, |m| m.model.supports_images()) +} + +impl PromptCompletionProviderDelegate for PromptEditorCompletionProviderDelegate { + fn supported_modes(&self, _cx: &App) -> Vec { + vec![ + PromptContextType::File, + PromptContextType::Symbol, + PromptContextType::Thread, + PromptContextType::Fetch, + PromptContextType::Rules, + ] + } + + fn supports_images(&self, cx: &App) -> bool { + inline_assistant_model_supports_images(cx) + } + + fn available_commands(&self, _cx: &App) -> Vec { + Vec::new() + } + + fn confirm_command(&self, _cx: &mut App) {} +} + impl PromptEditor { pub fn new_buffer( id: InlineAssistId, @@ -773,15 +808,14 @@ impl PromptEditor { prompt_buffer: Entity, codegen: Entity, fs: Arc, - context_store: Entity, + history_store: Entity, + prompt_store: Option>, + project: WeakEntity, workspace: WeakEntity, - thread_store: Option>, - prompt_store: Option>, window: &mut Window, cx: &mut Context>, ) -> PromptEditor { let codegen_subscription = cx.observe(&codegen, Self::handle_codegen_changed); - let codegen_buffer = codegen.read(cx).buffer(cx).read(cx).as_singleton(); let mode = PromptEditorMode::Buffer { id, codegen, @@ -805,7 +839,6 @@ impl PromptEditor { // typing in one will make what you typed appear in all of them. editor.set_show_cursor_when_unfocused(true, cx); editor.set_placeholder_text(&Self::placeholder_text(&mode, window, cx), window, cx); - editor.register_addon(ContextCreasesAddon::new()); editor.set_context_menu_options(ContextMenuOptions { min_entries_visible: 12, max_entries_visible: 12, @@ -815,43 +848,25 @@ impl PromptEditor { editor }); - let prompt_editor_entity = prompt_editor.downgrade(); - prompt_editor.update(cx, |editor, _| { - editor.set_completion_provider(Some(Rc::new(ContextPickerCompletionProvider::new( - workspace.clone(), - context_store.downgrade(), - thread_store.clone(), + let mention_set = cx.new(|cx| { + MentionSet::new( + prompt_editor.clone(), + project, + history_store.clone(), prompt_store.clone(), - prompt_editor_entity, - codegen_buffer.as_ref().map(Entity::downgrade), - )))); - }); - - let context_picker_menu_handle = PopoverMenuHandle::default(); - let model_selector_menu_handle = PopoverMenuHandle::default(); - - let context_strip = cx.new(|cx| { - ContextStrip::new( - context_store.clone(), - workspace.clone(), - thread_store.clone(), - prompt_store, - context_picker_menu_handle.clone(), - SuggestContextKind::Thread, - ModelUsageContext::InlineAssistant, window, cx, ) }); - let context_strip_subscription = - cx.subscribe_in(&context_strip, window, Self::handle_context_strip_event); + let model_selector_menu_handle = PopoverMenuHandle::default(); let mut this: PromptEditor = PromptEditor { editor: prompt_editor.clone(), - context_store, - context_strip, - context_picker_menu_handle, + mention_set, + history_store, + prompt_store, + workspace, model_selector: cx.new(|cx| { AgentModelSelector::new( fs, @@ -868,12 +883,12 @@ impl PromptEditor { pending_prompt: String::new(), _codegen_subscription: codegen_subscription, editor_subscriptions: Vec::new(), - _context_strip_subscription: context_strip_subscription, show_rate_limit_notice: false, mode, _phantom: Default::default(), }; + this.assign_completion_provider(cx); this.subscribe_to_editor(window, cx); this } @@ -919,6 +934,10 @@ impl PromptEditor { } } + pub fn mention_set(&self) -> &Entity { + &self.mention_set + } + pub fn editor_margins(&self) -> &Arc> { match &self.mode { PromptEditorMode::Buffer { editor_margins, .. } => editor_margins, @@ -945,10 +964,10 @@ impl PromptEditor { prompt_buffer: Entity, codegen: Entity, fs: Arc, - context_store: Entity, + history_store: Entity, + prompt_store: Option>, + project: WeakEntity, workspace: WeakEntity, - thread_store: Option>, - prompt_store: Option>, window: &mut Window, cx: &mut Context, ) -> Self { @@ -980,43 +999,25 @@ impl PromptEditor { editor }); - let prompt_editor_entity = prompt_editor.downgrade(); - prompt_editor.update(cx, |editor, _| { - editor.set_completion_provider(Some(Rc::new(ContextPickerCompletionProvider::new( - workspace.clone(), - context_store.downgrade(), - thread_store.clone(), + let mention_set = cx.new(|cx| { + MentionSet::new( + prompt_editor.clone(), + project, + history_store.clone(), prompt_store.clone(), - prompt_editor_entity, - None, - )))); - }); - - let context_picker_menu_handle = PopoverMenuHandle::default(); - let model_selector_menu_handle = PopoverMenuHandle::default(); - - let context_strip = cx.new(|cx| { - ContextStrip::new( - context_store.clone(), - workspace.clone(), - thread_store.clone(), - prompt_store.clone(), - context_picker_menu_handle.clone(), - SuggestContextKind::Thread, - ModelUsageContext::InlineAssistant, window, cx, ) }); - let context_strip_subscription = - cx.subscribe_in(&context_strip, window, Self::handle_context_strip_event); + let model_selector_menu_handle = PopoverMenuHandle::default(); let mut this = Self { editor: prompt_editor.clone(), - context_store, - context_strip, - context_picker_menu_handle, + mention_set, + history_store, + prompt_store, + workspace, model_selector: cx.new(|cx| { AgentModelSelector::new( fs, @@ -1033,12 +1034,12 @@ impl PromptEditor { pending_prompt: String::new(), _codegen_subscription: codegen_subscription, editor_subscriptions: Vec::new(), - _context_strip_subscription: context_strip_subscription, mode, show_rate_limit_notice: false, _phantom: Default::default(), }; this.count_lines(cx); + this.assign_completion_provider(cx); this.subscribe_to_editor(window, cx); this } @@ -1085,6 +1086,10 @@ impl PromptEditor { } } + pub fn mention_set(&self) -> &Entity { + &self.mention_set + } + pub fn codegen(&self) -> &Entity { match &self.mode { PromptEditorMode::Buffer { .. } => unreachable!(), @@ -1164,128 +1169,38 @@ impl GenerationMode { /// Stored information that can be used to resurrect a context crease when creating an editor for a past message. #[derive(Clone, Debug)] -pub struct MessageCrease { - pub range: Range, - pub icon_path: SharedString, - pub label: SharedString, - /// None for a deserialized message, Some otherwise. - pub context: Option, -} - -#[derive(Default)] -pub struct ContextCreasesAddon { - creases: HashMap>, - _subscription: Option, +struct MessageCrease { + range: Range, + icon_path: SharedString, + label: SharedString, } -impl Addon for ContextCreasesAddon { - fn to_any(&self) -> &dyn std::any::Any { - self - } - - fn to_any_mut(&mut self) -> Option<&mut dyn std::any::Any> { - Some(self) - } -} - -impl ContextCreasesAddon { - pub fn new() -> Self { - Self { - creases: HashMap::default(), - _subscription: None, - } - } - - pub fn add_creases( - &mut self, - context_store: &Entity, - key: AgentContextKey, - creases: impl IntoIterator, - cx: &mut Context, - ) { - self.creases.entry(key).or_default().extend(creases); - self._subscription = Some( - cx.subscribe(context_store, |editor, _, event, cx| match event { - ContextStoreEvent::ContextRemoved(key) => { - let Some(this) = editor.addon_mut::() else { - return; - }; - let (crease_ids, replacement_texts): (Vec<_>, Vec<_>) = this - .creases - .remove(key) - .unwrap_or_default() - .into_iter() - .unzip(); - let ranges = editor - .remove_creases(crease_ids, cx) - .into_iter() - .map(|(_, range)| range) - .collect::>(); - editor.unfold_ranges(&ranges, false, false, cx); - editor.edit(ranges.into_iter().zip(replacement_texts), cx); - cx.notify(); - } - }), - ) - } - - pub fn into_inner(self) -> HashMap> { - self.creases - } -} - -pub fn extract_message_creases( +fn extract_message_creases( editor: &mut Editor, + mention_set: &Entity, + window: &mut Window, cx: &mut Context<'_, Editor>, ) -> Vec { - let buffer_snapshot = editor.buffer().read(cx).snapshot(cx); - let mut contexts_by_crease_id = editor - .addon_mut::() - .map(std::mem::take) - .unwrap_or_default() - .into_inner() - .into_iter() - .flat_map(|(key, creases)| { - let context = key.0; - creases - .into_iter() - .map(move |(id, _)| (id, context.clone())) - }) - .collect::>(); - // Filter the addon's list of creases based on what the editor reports, - // since the addon might have removed creases in it. - - editor.display_map.update(cx, |display_map, cx| { - display_map - .snapshot(cx) - .crease_snapshot - .creases() - .filter_map(|(id, crease)| { - Some(( - id, - ( - crease.range().to_offset(&buffer_snapshot), - crease.metadata()?.clone(), - ), - )) + let creases = mention_set.read(cx).creases(); + let snapshot = editor.snapshot(window, cx); + snapshot + .crease_snapshot + .creases() + .filter(|(id, _)| creases.contains(id)) + .filter_map(|(_, crease)| { + let metadata = crease.metadata()?.clone(); + Some(MessageCrease { + range: crease.range().to_offset(snapshot.buffer()), + label: metadata.label, + icon_path: metadata.icon_path, }) - .map(|(id, (range, metadata))| { - let context = contexts_by_crease_id.remove(&id); - MessageCrease { - range, - context, - label: metadata.label, - icon_path: metadata.icon_path, - } - }) - .collect() - }) + }) + .collect() } -pub fn insert_message_creases( +fn insert_message_creases( editor: &mut Editor, message_creases: &[MessageCrease], - context_store: &Entity, window: &mut Window, cx: &mut Context<'_, Editor>, ) { @@ -1303,14 +1218,6 @@ pub fn insert_message_creases( ) }) .collect::>(); - let ids = editor.insert_creases(creases.clone(), cx); + editor.insert_creases(creases.clone(), cx); editor.fold_creases(creases, false, window, cx); - if let Some(addon) = editor.addon_mut::() { - for (crease, id) in message_creases.iter().zip(ids) { - if let Some(context) = crease.context.as_ref() { - let key = AgentContextKey(context.clone()); - addon.add_creases(context_store, key, vec![(id, crease.label.clone())], cx); - } - } - } } diff --git a/crates/agent_ui/src/mention_set.rs b/crates/agent_ui/src/mention_set.rs new file mode 100644 index 0000000000000000000000000000000000000000..156e62949ae425532dcb897754928011ed2bd8a6 --- /dev/null +++ b/crates/agent_ui/src/mention_set.rs @@ -0,0 +1,1112 @@ +use acp_thread::{MentionUri, selection_name}; +use agent::{HistoryStore, outline}; +use agent_client_protocol as acp; +use agent_servers::{AgentServer, AgentServerDelegate}; +use anyhow::{Context as _, Result, anyhow}; +use assistant_slash_commands::codeblock_fence_for_path; +use collections::{HashMap, HashSet}; +use editor::{ + Anchor, Editor, EditorEvent, EditorSnapshot, ExcerptId, FoldPlaceholder, ToOffset, + display_map::{Crease, CreaseId, CreaseMetadata, FoldId}, + scroll::Autoscroll, +}; +use futures::{AsyncReadExt as _, FutureExt as _, future::Shared}; +use gpui::{ + Animation, AnimationExt as _, AppContext, ClipboardEntry, Context, Empty, Entity, EntityId, + Image, ImageFormat, Img, SharedString, Subscription, Task, WeakEntity, pulsating_between, +}; +use http_client::{AsyncBody, HttpClientWithUrl}; +use itertools::Either; +use language::Buffer; +use language_model::LanguageModelImage; +use multi_buffer::MultiBufferRow; +use postage::stream::Stream as _; +use project::{Project, ProjectItem, ProjectPath, Worktree}; +use prompt_store::{PromptId, PromptStore}; +use rope::Point; +use std::{ + cell::RefCell, + ffi::OsStr, + fmt::Write, + ops::{Range, RangeInclusive}, + path::{Path, PathBuf}, + rc::Rc, + sync::Arc, + time::Duration, +}; +use text::OffsetRangeExt; +use ui::{ButtonLike, Disclosure, TintColor, Toggleable, prelude::*}; +use util::{ResultExt, debug_panic, rel_path::RelPath}; +use workspace::{Workspace, notifications::NotifyResultExt as _}; + +pub type MentionTask = Shared>>; + +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum Mention { + Text { + content: String, + tracked_buffers: Vec>, + }, + Image(MentionImage), + Link, +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct MentionImage { + pub data: SharedString, + pub format: ImageFormat, +} + +pub struct MentionSet { + editor: Entity, + project: WeakEntity, + history_store: Entity, + prompt_store: Option>, + mentions: HashMap, + _editor_subscription: Subscription, +} + +impl MentionSet { + pub fn new( + editor: Entity, + project: WeakEntity, + history_store: Entity, + prompt_store: Option>, + window: &mut Window, + cx: &mut Context, + ) -> Self { + let editor_subscription = + cx.subscribe_in(&editor, window, move |this, editor, event, window, cx| { + if let EditorEvent::Edited { .. } = event + && !editor.read(cx).read_only(cx) + { + let snapshot = editor.update(cx, |editor, cx| editor.snapshot(window, cx)); + this.remove_invalid(snapshot); + } + }); + + Self { + editor, + project, + history_store, + prompt_store, + mentions: HashMap::default(), + _editor_subscription: editor_subscription, + } + } + + pub fn contents( + &self, + full_mention_content: bool, + cx: &mut App, + ) -> Task>> { + let Some(project) = self.project.upgrade() else { + return Task::ready(Err(anyhow!("Project not found"))); + }; + let mentions = self.mentions.clone(); + cx.spawn(async move |cx| { + let mut contents = HashMap::default(); + for (crease_id, (mention_uri, task)) in mentions { + let content = if full_mention_content + && let MentionUri::Directory { abs_path } = &mention_uri + { + cx.update(|cx| full_mention_for_directory(&project, abs_path, cx))? + .await? + } else { + task.await.map_err(|e| anyhow!("{e}"))? + }; + + contents.insert(crease_id, (mention_uri, content)); + } + Ok(contents) + }) + } + + fn remove_invalid(&mut self, snapshot: EditorSnapshot) { + for (crease_id, crease) in snapshot.crease_snapshot.creases() { + if !crease.range().start.is_valid(&snapshot.buffer_snapshot()) { + self.mentions.remove(&crease_id); + } + } + } + + pub fn insert_mention(&mut self, crease_id: CreaseId, uri: MentionUri, task: MentionTask) { + self.mentions.insert(crease_id, (uri, task)); + } + + pub fn remove_mention(&mut self, crease_id: &CreaseId) { + self.mentions.remove(crease_id); + } + + pub fn creases(&self) -> HashSet { + self.mentions.keys().cloned().collect() + } + + pub fn mentions(&self) -> HashSet { + self.mentions.values().map(|(uri, _)| uri.clone()).collect() + } + + pub fn remove_all(&mut self) -> impl Iterator { + self.mentions.drain() + } + + pub fn confirm_mention_completion( + &mut self, + crease_text: SharedString, + start: text::Anchor, + content_len: usize, + mention_uri: MentionUri, + supports_images: bool, + workspace: &Entity, + window: &mut Window, + cx: &mut Context, + ) -> Task<()> { + let Some(project) = self.project.upgrade() else { + return Task::ready(()); + }; + + let snapshot = self + .editor + .update(cx, |editor, cx| editor.snapshot(window, cx)); + let Some(start_anchor) = snapshot.buffer_snapshot().as_singleton_anchor(start) else { + return Task::ready(()); + }; + let excerpt_id = start_anchor.excerpt_id; + let end_anchor = snapshot.buffer_snapshot().anchor_before( + start_anchor.to_offset(&snapshot.buffer_snapshot()) + content_len + 1usize, + ); + + let crease = if let MentionUri::File { abs_path } = &mention_uri + && let Some(extension) = abs_path.extension() + && let Some(extension) = extension.to_str() + && Img::extensions().contains(&extension) + && !extension.contains("svg") + { + let Some(project_path) = project + .read(cx) + .project_path_for_absolute_path(&abs_path, cx) + else { + log::error!("project path not found"); + return Task::ready(()); + }; + let image_task = project.update(cx, |project, cx| project.open_image(project_path, cx)); + let image = cx + .spawn(async move |_, cx| { + let image = image_task.await.map_err(|e| e.to_string())?; + let image = image + .update(cx, |image, _| image.image.clone()) + .map_err(|e| e.to_string())?; + Ok(image) + }) + .shared(); + insert_crease_for_mention( + excerpt_id, + start, + content_len, + mention_uri.name().into(), + IconName::Image.path().into(), + Some(image), + self.editor.clone(), + window, + cx, + ) + } else { + insert_crease_for_mention( + excerpt_id, + start, + content_len, + crease_text, + mention_uri.icon_path(cx), + None, + self.editor.clone(), + window, + cx, + ) + }; + let Some((crease_id, tx)) = crease else { + return Task::ready(()); + }; + + let task = match mention_uri.clone() { + MentionUri::Fetch { url } => { + self.confirm_mention_for_fetch(url, workspace.read(cx).client().http_client(), cx) + } + MentionUri::Directory { .. } => Task::ready(Ok(Mention::Link)), + MentionUri::Thread { id, .. } => self.confirm_mention_for_thread(id, cx), + MentionUri::TextThread { path, .. } => self.confirm_mention_for_text_thread(path, cx), + MentionUri::File { abs_path } => { + self.confirm_mention_for_file(abs_path, supports_images, cx) + } + MentionUri::Symbol { + abs_path, + line_range, + .. + } => self.confirm_mention_for_symbol(abs_path, line_range, cx), + MentionUri::Rule { id, .. } => self.confirm_mention_for_rule(id, cx), + MentionUri::PastedImage => { + debug_panic!("pasted image URI should not be included in completions"); + Task::ready(Err(anyhow!( + "pasted imaged URI should not be included in completions" + ))) + } + MentionUri::Selection { .. } => { + debug_panic!("unexpected selection URI"); + Task::ready(Err(anyhow!("unexpected selection URI"))) + } + }; + let task = cx + .spawn(async move |_, _| task.await.map_err(|e| e.to_string())) + .shared(); + self.mentions.insert(crease_id, (mention_uri, task.clone())); + + // Notify the user if we failed to load the mentioned context + cx.spawn_in(window, async move |this, cx| { + let result = task.await.notify_async_err(cx); + drop(tx); + if result.is_none() { + this.update(cx, |this, cx| { + this.editor.update(cx, |editor, cx| { + // Remove mention + editor.edit([(start_anchor..end_anchor, "")], cx); + }); + this.mentions.remove(&crease_id); + }) + .ok(); + } + }) + } + + pub fn confirm_mention_for_file( + &self, + abs_path: PathBuf, + supports_images: bool, + cx: &mut Context, + ) -> Task> { + let Some(project) = self.project.upgrade() else { + return Task::ready(Err(anyhow!("project not found"))); + }; + + let Some(project_path) = project + .read(cx) + .project_path_for_absolute_path(&abs_path, cx) + else { + return Task::ready(Err(anyhow!("project path not found"))); + }; + let extension = abs_path + .extension() + .and_then(OsStr::to_str) + .unwrap_or_default(); + + if Img::extensions().contains(&extension) && !extension.contains("svg") { + if !supports_images { + return Task::ready(Err(anyhow!("This model does not support images yet"))); + } + let task = project.update(cx, |project, cx| project.open_image(project_path, cx)); + return cx.spawn(async move |_, cx| { + let image = task.await?; + let image = image.update(cx, |image, _| image.image.clone())?; + let format = image.format; + let image = cx + .update(|cx| LanguageModelImage::from_image(image, cx))? + .await; + if let Some(image) = image { + Ok(Mention::Image(MentionImage { + data: image.source, + format, + })) + } else { + Err(anyhow!("Failed to convert image")) + } + }); + } + + let buffer = project.update(cx, |project, cx| project.open_buffer(project_path, cx)); + cx.spawn(async move |_, cx| { + let buffer = buffer.await?; + let buffer_content = outline::get_buffer_content_or_outline( + buffer.clone(), + Some(&abs_path.to_string_lossy()), + &cx, + ) + .await?; + + Ok(Mention::Text { + content: buffer_content.text, + tracked_buffers: vec![buffer], + }) + }) + } + + fn confirm_mention_for_fetch( + &self, + url: url::Url, + http_client: Arc, + cx: &mut Context, + ) -> Task> { + cx.background_executor().spawn(async move { + let content = fetch_url_content(http_client, url.to_string()).await?; + Ok(Mention::Text { + content, + tracked_buffers: Vec::new(), + }) + }) + } + + fn confirm_mention_for_symbol( + &self, + abs_path: PathBuf, + line_range: RangeInclusive, + cx: &mut Context, + ) -> Task> { + let Some(project) = self.project.upgrade() else { + return Task::ready(Err(anyhow!("project not found"))); + }; + let Some(project_path) = project + .read(cx) + .project_path_for_absolute_path(&abs_path, cx) + else { + return Task::ready(Err(anyhow!("project path not found"))); + }; + let buffer = project.update(cx, |project, cx| project.open_buffer(project_path, cx)); + cx.spawn(async move |_, cx| { + let buffer = buffer.await?; + let mention = buffer.update(cx, |buffer, cx| { + let start = Point::new(*line_range.start(), 0).min(buffer.max_point()); + let end = Point::new(*line_range.end() + 1, 0).min(buffer.max_point()); + let content = buffer.text_for_range(start..end).collect(); + Mention::Text { + content, + tracked_buffers: vec![cx.entity()], + } + })?; + anyhow::Ok(mention) + }) + } + + fn confirm_mention_for_rule( + &mut self, + id: PromptId, + cx: &mut Context, + ) -> Task> { + let Some(prompt_store) = self.prompt_store.as_ref() else { + return Task::ready(Err(anyhow!("Missing prompt store"))); + }; + let prompt = prompt_store.read(cx).load(id, cx); + cx.spawn(async move |_, _| { + let prompt = prompt.await?; + Ok(Mention::Text { + content: prompt, + tracked_buffers: Vec::new(), + }) + }) + } + + pub fn confirm_mention_for_selection( + &mut self, + source_range: Range, + selections: Vec<(Entity, Range, Range)>, + window: &mut Window, + cx: &mut Context, + ) { + let Some(project) = self.project.upgrade() else { + return; + }; + + let snapshot = self.editor.read(cx).buffer().read(cx).snapshot(cx); + let Some(start) = snapshot.as_singleton_anchor(source_range.start) else { + return; + }; + + let offset = start.to_offset(&snapshot); + + for (buffer, selection_range, range_to_fold) in selections { + let range = snapshot.anchor_after(offset + range_to_fold.start) + ..snapshot.anchor_after(offset + range_to_fold.end); + + let abs_path = buffer + .read(cx) + .project_path(cx) + .and_then(|project_path| project.read(cx).absolute_path(&project_path, cx)); + let snapshot = buffer.read(cx).snapshot(); + + let text = snapshot + .text_for_range(selection_range.clone()) + .collect::(); + let point_range = selection_range.to_point(&snapshot); + let line_range = point_range.start.row..=point_range.end.row; + + let uri = MentionUri::Selection { + abs_path: abs_path.clone(), + line_range: line_range.clone(), + }; + let crease = crease_for_mention( + selection_name(abs_path.as_deref(), &line_range).into(), + uri.icon_path(cx), + range, + self.editor.downgrade(), + ); + + let crease_id = self.editor.update(cx, |editor, cx| { + let crease_ids = editor.insert_creases(vec![crease.clone()], cx); + editor.fold_creases(vec![crease], false, window, cx); + crease_ids.first().copied().unwrap() + }); + + self.mentions.insert( + crease_id, + ( + uri, + Task::ready(Ok(Mention::Text { + content: text, + tracked_buffers: vec![buffer], + })) + .shared(), + ), + ); + } + + // Take this explanation with a grain of salt but, with creases being + // inserted, GPUI's recomputes the editor layout in the next frames, so + // directly calling `editor.request_autoscroll` wouldn't work as + // expected. We're leveraging `cx.on_next_frame` to wait 2 frames and + // ensure that the layout has been recalculated so that the autoscroll + // request actually shows the cursor's new position. + let editor = self.editor.clone(); + cx.on_next_frame(window, move |_, window, cx| { + cx.on_next_frame(window, move |_, _, cx| { + editor.update(cx, |editor, cx| { + editor.request_autoscroll(Autoscroll::fit(), cx) + }); + }); + }); + } + + fn confirm_mention_for_thread( + &mut self, + id: acp::SessionId, + cx: &mut Context, + ) -> Task> { + let Some(project) = self.project.upgrade() else { + return Task::ready(Err(anyhow!("project not found"))); + }; + + let server = Rc::new(agent::NativeAgentServer::new( + project.read(cx).fs().clone(), + self.history_store.clone(), + )); + let delegate = AgentServerDelegate::new( + project.read(cx).agent_server_store().clone(), + project.clone(), + None, + None, + ); + let connection = server.connect(None, delegate, cx); + cx.spawn(async move |_, cx| { + let (agent, _) = connection.await?; + let agent = agent.downcast::().unwrap(); + let summary = agent + .0 + .update(cx, |agent, cx| agent.thread_summary(id, cx))? + .await?; + anyhow::Ok(Mention::Text { + content: summary.to_string(), + tracked_buffers: Vec::new(), + }) + }) + } + + fn confirm_mention_for_text_thread( + &mut self, + path: PathBuf, + cx: &mut Context, + ) -> Task> { + let text_thread_task = self.history_store.update(cx, |store, cx| { + store.load_text_thread(path.as_path().into(), cx) + }); + cx.spawn(async move |_, cx| { + let text_thread = text_thread_task.await?; + let xml = text_thread.update(cx, |text_thread, cx| text_thread.to_xml(cx))?; + Ok(Mention::Text { + content: xml, + tracked_buffers: Vec::new(), + }) + }) + } +} + +pub(crate) fn paste_images_as_context( + editor: Entity, + mention_set: Entity, + window: &mut Window, + cx: &mut App, +) -> Option> { + let clipboard = cx.read_from_clipboard()?; + Some(window.spawn(cx, async move |cx| { + use itertools::Itertools; + let (mut images, paths) = clipboard + .into_entries() + .filter_map(|entry| match entry { + ClipboardEntry::Image(image) => Some(Either::Left(image)), + ClipboardEntry::ExternalPaths(paths) => Some(Either::Right(paths)), + _ => None, + }) + .partition_map::, Vec<_>, _, _, _>(std::convert::identity); + + if !paths.is_empty() { + images.extend( + cx.background_spawn(async move { + let mut images = vec![]; + for path in paths.into_iter().flat_map(|paths| paths.paths().to_owned()) { + let Ok(content) = async_fs::read(path).await else { + continue; + }; + let Ok(format) = image::guess_format(&content) else { + continue; + }; + images.push(gpui::Image::from_bytes( + match format { + image::ImageFormat::Png => gpui::ImageFormat::Png, + image::ImageFormat::Jpeg => gpui::ImageFormat::Jpeg, + image::ImageFormat::WebP => gpui::ImageFormat::Webp, + image::ImageFormat::Gif => gpui::ImageFormat::Gif, + image::ImageFormat::Bmp => gpui::ImageFormat::Bmp, + image::ImageFormat::Tiff => gpui::ImageFormat::Tiff, + image::ImageFormat::Ico => gpui::ImageFormat::Ico, + _ => continue, + }, + content, + )); + } + images + }) + .await, + ); + } + + if images.is_empty() { + return; + } + + let replacement_text = MentionUri::PastedImage.as_link().to_string(); + cx.update(|_window, cx| { + cx.stop_propagation(); + }) + .ok(); + for image in images { + let Ok((excerpt_id, text_anchor, multibuffer_anchor)) = + editor.update_in(cx, |message_editor, window, cx| { + let snapshot = message_editor.snapshot(window, cx); + let (excerpt_id, _, buffer_snapshot) = + snapshot.buffer_snapshot().as_singleton().unwrap(); + + let text_anchor = buffer_snapshot.anchor_before(buffer_snapshot.len()); + let multibuffer_anchor = snapshot + .buffer_snapshot() + .anchor_in_excerpt(*excerpt_id, text_anchor); + message_editor.edit( + [( + multi_buffer::Anchor::max()..multi_buffer::Anchor::max(), + format!("{replacement_text} "), + )], + cx, + ); + (*excerpt_id, text_anchor, multibuffer_anchor) + }) + else { + break; + }; + + let content_len = replacement_text.len(); + let Some(start_anchor) = multibuffer_anchor else { + continue; + }; + let Ok(end_anchor) = editor.update(cx, |editor, cx| { + let snapshot = editor.buffer().read(cx).snapshot(cx); + snapshot.anchor_before(start_anchor.to_offset(&snapshot) + content_len) + }) else { + continue; + }; + let image = Arc::new(image); + let Ok(Some((crease_id, tx))) = cx.update(|window, cx| { + insert_crease_for_mention( + excerpt_id, + text_anchor, + content_len, + MentionUri::PastedImage.name().into(), + IconName::Image.path().into(), + Some(Task::ready(Ok(image.clone())).shared()), + editor.clone(), + window, + cx, + ) + }) else { + continue; + }; + let task = cx + .spawn(async move |cx| { + let format = image.format; + let image = cx + .update(|_, cx| LanguageModelImage::from_image(image, cx)) + .map_err(|e| e.to_string())? + .await; + drop(tx); + if let Some(image) = image { + Ok(Mention::Image(MentionImage { + data: image.source, + format, + })) + } else { + Err("Failed to convert image".into()) + } + }) + .shared(); + + mention_set + .update(cx, |mention_set, _cx| { + mention_set.insert_mention(crease_id, MentionUri::PastedImage, task.clone()) + }) + .ok(); + + if task.await.notify_async_err(cx).is_none() { + editor + .update(cx, |editor, cx| { + editor.edit([(start_anchor..end_anchor, "")], cx); + }) + .ok(); + mention_set + .update(cx, |mention_set, _cx| { + mention_set.remove_mention(&crease_id) + }) + .ok(); + } + } + })) +} + +pub(crate) fn insert_crease_for_mention( + excerpt_id: ExcerptId, + anchor: text::Anchor, + content_len: usize, + crease_label: SharedString, + crease_icon: SharedString, + // abs_path: Option>, + image: Option, String>>>>, + editor: Entity, + window: &mut Window, + cx: &mut App, +) -> Option<(CreaseId, postage::barrier::Sender)> { + let (tx, rx) = postage::barrier::channel(); + + let crease_id = editor.update(cx, |editor, cx| { + let snapshot = editor.buffer().read(cx).snapshot(cx); + + let start = snapshot.anchor_in_excerpt(excerpt_id, anchor)?; + + let start = start.bias_right(&snapshot); + let end = snapshot.anchor_before(start.to_offset(&snapshot) + content_len); + + let placeholder = FoldPlaceholder { + render: render_mention_fold_button( + crease_label.clone(), + crease_icon.clone(), + start..end, + rx, + image, + cx.weak_entity(), + cx, + ), + merge_adjacent: false, + ..Default::default() + }; + + let crease = Crease::Inline { + range: start..end, + placeholder, + render_toggle: None, + render_trailer: None, + metadata: Some(CreaseMetadata { + label: crease_label, + icon_path: crease_icon, + }), + }; + + let ids = editor.insert_creases(vec![crease.clone()], cx); + editor.fold_creases(vec![crease], false, window, cx); + + Some(ids[0]) + })?; + + Some((crease_id, tx)) +} + +pub(crate) fn crease_for_mention( + label: SharedString, + icon_path: SharedString, + range: Range, + editor_entity: WeakEntity, +) -> Crease { + let placeholder = FoldPlaceholder { + render: render_fold_icon_button(icon_path.clone(), label.clone(), editor_entity), + merge_adjacent: false, + ..Default::default() + }; + + let render_trailer = move |_row, _unfold, _window: &mut Window, _cx: &mut App| Empty.into_any(); + + Crease::inline(range, placeholder, fold_toggle("mention"), render_trailer) + .with_metadata(CreaseMetadata { icon_path, label }) +} + +fn render_fold_icon_button( + icon_path: SharedString, + label: SharedString, + editor: WeakEntity, +) -> Arc, &mut App) -> AnyElement> { + Arc::new({ + move |fold_id, fold_range, cx| { + let is_in_text_selection = editor + .update(cx, |editor, cx| editor.is_range_selected(&fold_range, cx)) + .unwrap_or_default(); + + ButtonLike::new(fold_id) + .style(ButtonStyle::Filled) + .selected_style(ButtonStyle::Tinted(TintColor::Accent)) + .toggle_state(is_in_text_selection) + .child( + h_flex() + .gap_1() + .child( + Icon::from_path(icon_path.clone()) + .size(IconSize::XSmall) + .color(Color::Muted), + ) + .child( + Label::new(label.clone()) + .size(LabelSize::Small) + .buffer_font(cx) + .single_line(), + ), + ) + .into_any_element() + } + }) +} + +fn fold_toggle( + name: &'static str, +) -> impl Fn( + MultiBufferRow, + bool, + Arc, + &mut Window, + &mut App, +) -> AnyElement { + move |row, is_folded, fold, _window, _cx| { + Disclosure::new((name, row.0 as u64), !is_folded) + .toggle_state(is_folded) + .on_click(move |_e, window, cx| fold(!is_folded, window, cx)) + .into_any_element() + } +} + +fn full_mention_for_directory( + project: &Entity, + abs_path: &Path, + cx: &mut App, +) -> Task> { + fn collect_files_in_path(worktree: &Worktree, path: &RelPath) -> Vec<(Arc, String)> { + let mut files = Vec::new(); + + for entry in worktree.child_entries(path) { + if entry.is_dir() { + files.extend(collect_files_in_path(worktree, &entry.path)); + } else if entry.is_file() { + files.push(( + entry.path.clone(), + worktree + .full_path(&entry.path) + .to_string_lossy() + .to_string(), + )); + } + } + + files + } + + let Some(project_path) = project + .read(cx) + .project_path_for_absolute_path(&abs_path, cx) + else { + return Task::ready(Err(anyhow!("project path not found"))); + }; + let Some(entry) = project.read(cx).entry_for_path(&project_path, cx) else { + return Task::ready(Err(anyhow!("project entry not found"))); + }; + let directory_path = entry.path.clone(); + let worktree_id = project_path.worktree_id; + let Some(worktree) = project.read(cx).worktree_for_id(worktree_id, cx) else { + return Task::ready(Err(anyhow!("worktree not found"))); + }; + let project = project.clone(); + cx.spawn(async move |cx| { + let file_paths = worktree.read_with(cx, |worktree, _cx| { + collect_files_in_path(worktree, &directory_path) + })?; + let descendants_future = cx.update(|cx| { + futures::future::join_all(file_paths.into_iter().map(|(worktree_path, full_path)| { + let rel_path = worktree_path + .strip_prefix(&directory_path) + .log_err() + .map_or_else(|| worktree_path.clone(), |rel_path| rel_path.into()); + + let open_task = project.update(cx, |project, cx| { + project.buffer_store().update(cx, |buffer_store, cx| { + let project_path = ProjectPath { + worktree_id, + path: worktree_path, + }; + buffer_store.open_buffer(project_path, cx) + }) + }); + + cx.spawn(async move |cx| { + let buffer = open_task.await.log_err()?; + let buffer_content = outline::get_buffer_content_or_outline( + buffer.clone(), + Some(&full_path), + &cx, + ) + .await + .ok()?; + + Some((rel_path, full_path, buffer_content.text, buffer)) + }) + })) + })?; + + let contents = cx + .background_spawn(async move { + let (contents, tracked_buffers) = descendants_future + .await + .into_iter() + .flatten() + .map(|(rel_path, full_path, rope, buffer)| { + ((rel_path, full_path, rope), buffer) + }) + .unzip(); + Mention::Text { + content: render_directory_contents(contents), + tracked_buffers, + } + }) + .await; + anyhow::Ok(contents) + }) +} + +fn render_directory_contents(entries: Vec<(Arc, String, String)>) -> String { + let mut output = String::new(); + for (_relative_path, full_path, content) in entries { + let fence = codeblock_fence_for_path(Some(&full_path), None); + write!(output, "\n{fence}\n{content}\n```").unwrap(); + } + output +} + +fn render_mention_fold_button( + label: SharedString, + icon: SharedString, + range: Range, + mut loading_finished: postage::barrier::Receiver, + image_task: Option, String>>>>, + editor: WeakEntity, + cx: &mut App, +) -> Arc, &mut App) -> AnyElement> { + let loading = cx.new(|cx| { + let loading = cx.spawn(async move |this, cx| { + loading_finished.recv().await; + this.update(cx, |this: &mut LoadingContext, cx| { + this.loading = None; + cx.notify(); + }) + .ok(); + }); + LoadingContext { + id: cx.entity_id(), + label, + icon, + range, + editor, + loading: Some(loading), + image: image_task.clone(), + } + }); + Arc::new(move |_fold_id, _fold_range, _cx| loading.clone().into_any_element()) +} + +struct LoadingContext { + id: EntityId, + label: SharedString, + icon: SharedString, + range: Range, + editor: WeakEntity, + loading: Option>, + image: Option, String>>>>, +} + +impl Render for LoadingContext { + fn render(&mut self, _window: &mut Window, cx: &mut Context) -> impl IntoElement { + let is_in_text_selection = self + .editor + .update(cx, |editor, cx| editor.is_range_selected(&self.range, cx)) + .unwrap_or_default(); + ButtonLike::new(("loading-context", self.id)) + .style(ButtonStyle::Filled) + .selected_style(ButtonStyle::Tinted(TintColor::Accent)) + .toggle_state(is_in_text_selection) + .when_some(self.image.clone(), |el, image_task| { + el.hoverable_tooltip(move |_, cx| { + let image = image_task.peek().cloned().transpose().ok().flatten(); + let image_task = image_task.clone(); + cx.new::(|cx| ImageHover { + image, + _task: cx.spawn(async move |this, cx| { + if let Ok(image) = image_task.clone().await { + this.update(cx, |this, cx| { + if this.image.replace(image).is_none() { + cx.notify(); + } + }) + .ok(); + } + }), + }) + .into() + }) + }) + .child( + h_flex() + .gap_1() + .child( + Icon::from_path(self.icon.clone()) + .size(IconSize::XSmall) + .color(Color::Muted), + ) + .child( + Label::new(self.label.clone()) + .size(LabelSize::Small) + .buffer_font(cx) + .single_line(), + ) + .map(|el| { + if self.loading.is_some() { + el.with_animation( + "loading-context-crease", + Animation::new(Duration::from_secs(2)) + .repeat() + .with_easing(pulsating_between(0.4, 0.8)), + |label, delta| label.opacity(delta), + ) + .into_any() + } else { + el.into_any() + } + }), + ) + } +} + +struct ImageHover { + image: Option>, + _task: Task<()>, +} + +impl Render for ImageHover { + fn render(&mut self, _window: &mut Window, _cx: &mut Context) -> impl IntoElement { + if let Some(image) = self.image.clone() { + gpui::img(image).max_w_96().max_h_96().into_any_element() + } else { + gpui::Empty.into_any_element() + } + } +} + +async fn fetch_url_content(http_client: Arc, url: String) -> Result { + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy)] + enum ContentType { + Html, + Plaintext, + Json, + } + use html_to_markdown::{TagHandler, convert_html_to_markdown, markdown}; + + let url = if !url.starts_with("https://") && !url.starts_with("http://") { + format!("https://{url}") + } else { + url + }; + + let mut response = http_client.get(&url, AsyncBody::default(), true).await?; + let mut body = Vec::new(); + response + .body_mut() + .read_to_end(&mut body) + .await + .context("error reading response body")?; + + if response.status().is_client_error() { + let text = String::from_utf8_lossy(body.as_slice()); + anyhow::bail!( + "status error {}, response: {text:?}", + response.status().as_u16() + ); + } + + let Some(content_type) = response.headers().get("content-type") else { + anyhow::bail!("missing Content-Type header"); + }; + let content_type = content_type + .to_str() + .context("invalid Content-Type header")?; + let content_type = match content_type { + "text/html" => ContentType::Html, + "text/plain" => ContentType::Plaintext, + "application/json" => ContentType::Json, + _ => ContentType::Html, + }; + + match content_type { + ContentType::Html => { + let mut handlers: Vec = vec![ + Rc::new(RefCell::new(markdown::WebpageChromeRemover)), + Rc::new(RefCell::new(markdown::ParagraphHandler)), + Rc::new(RefCell::new(markdown::HeadingHandler)), + Rc::new(RefCell::new(markdown::ListHandler)), + Rc::new(RefCell::new(markdown::TableHandler::new())), + Rc::new(RefCell::new(markdown::StyledTextHandler)), + ]; + if url.contains("wikipedia.org") { + use html_to_markdown::structure::wikipedia; + + handlers.push(Rc::new(RefCell::new(wikipedia::WikipediaChromeRemover))); + handlers.push(Rc::new(RefCell::new(wikipedia::WikipediaInfoboxHandler))); + handlers.push(Rc::new( + RefCell::new(wikipedia::WikipediaCodeHandler::new()), + )); + } else { + handlers.push(Rc::new(RefCell::new(markdown::CodeHandler))); + } + convert_html_to_markdown(&body[..], &mut handlers) + } + ContentType::Plaintext => Ok(std::str::from_utf8(&body)?.to_owned()), + ContentType::Json => { + let json: serde_json::Value = serde_json::from_slice(&body)?; + + Ok(format!( + "```json\n{}\n```", + serde_json::to_string_pretty(&json)? + )) + } + } +} diff --git a/crates/agent_ui/src/terminal_inline_assistant.rs b/crates/agent_ui/src/terminal_inline_assistant.rs index 9e653dcce1dcf1487af9998662b57ea4f998c7de..c6da11a35af22c4052cd580e58c896e19a1faf78 100644 --- a/crates/agent_ui/src/terminal_inline_assistant.rs +++ b/crates/agent_ui/src/terminal_inline_assistant.rs @@ -1,6 +1,5 @@ use crate::{ context::load_context, - context_store::ContextStore, inline_prompt_editor::{ CodegenStatus, PromptEditor, PromptEditorEvent, TerminalInlineAssistId, }, @@ -73,8 +72,8 @@ impl TerminalInlineAssistant { terminal_view: &Entity, workspace: WeakEntity, project: WeakEntity, + thread_store: Entity, prompt_store: Option>, - thread_store: Option>, initial_prompt: Option, window: &mut Window, cx: &mut App, @@ -87,7 +86,6 @@ impl TerminalInlineAssistant { cx, ) }); - let context_store = cx.new(|_cx| ContextStore::new(project)); let codegen = cx.new(|_| TerminalCodegen::new(terminal, self.telemetry.clone())); let prompt_editor = cx.new(|cx| { @@ -97,10 +95,10 @@ impl TerminalInlineAssistant { prompt_buffer.clone(), codegen, self.fs.clone(), - context_store.clone(), - workspace.clone(), thread_store.clone(), - prompt_store.as_ref().map(|s| s.downgrade()), + prompt_store.clone(), + project.clone(), + workspace.clone(), window, cx, ) @@ -119,8 +117,6 @@ impl TerminalInlineAssistant { terminal_view, prompt_editor, workspace.clone(), - context_store, - prompt_store, window, cx, ); @@ -227,6 +223,10 @@ impl TerminalInlineAssistant { assist_id: TerminalInlineAssistId, cx: &mut App, ) -> Result> { + let ConfiguredModel { model, .. } = LanguageModelRegistry::read_global(cx) + .inline_assistant_model() + .context("No inline assistant model")?; + let assist = self.assists.get(&assist_id).context("invalid assist")?; let shell = std::env::var("SHELL").ok(); @@ -243,35 +243,20 @@ impl TerminalInlineAssistant { .ok() .unwrap_or_default(); + let prompt_editor = assist.prompt_editor.clone().context("invalid assist")?; + let prompt = self.prompt_builder.generate_terminal_assistant_prompt( - &assist - .prompt_editor - .clone() - .context("invalid assist")? - .read(cx) - .prompt(cx), + &prompt_editor.read(cx).prompt(cx), shell.as_deref(), working_directory.as_deref(), &latest_output, )?; - let contexts = assist - .context_store - .read(cx) - .context() - .cloned() - .collect::>(); - let context_load_task = assist.workspace.update(cx, |workspace, cx| { - let project = workspace.project(); - load_context(contexts, project, &assist.prompt_store, cx) - })?; - - let ConfiguredModel { model, .. } = LanguageModelRegistry::read_global(cx) - .inline_assistant_model() - .context("No inline assistant model")?; - let temperature = AgentSettings::temperature_for_model(&model, cx); + let mention_set = prompt_editor.read(cx).mention_set().clone(); + let load_context_task = load_context(&mention_set, cx); + Ok(cx.background_spawn(async move { let mut request_message = LanguageModelRequestMessage { role: Role::User, @@ -279,9 +264,9 @@ impl TerminalInlineAssistant { cache: false, }; - context_load_task - .await - .add_to_request_message(&mut request_message); + if let Some(context) = load_context_task.await { + context.add_to_request_message(&mut request_message); + } request_message.content.push(prompt.into()); @@ -409,8 +394,6 @@ struct TerminalInlineAssist { prompt_editor: Option>>, codegen: Entity, workspace: WeakEntity, - context_store: Entity, - prompt_store: Option>, _subscriptions: Vec, } @@ -420,8 +403,6 @@ impl TerminalInlineAssist { terminal: &Entity, prompt_editor: Entity>, workspace: WeakEntity, - context_store: Entity, - prompt_store: Option>, window: &mut Window, cx: &mut App, ) -> Self { @@ -431,8 +412,6 @@ impl TerminalInlineAssist { prompt_editor: Some(prompt_editor.clone()), codegen: codegen.clone(), workspace, - context_store, - prompt_store, _subscriptions: vec![ window.subscribe(&prompt_editor, cx, |prompt_editor, event, window, cx| { TerminalInlineAssistant::update_global(cx, |this, cx| { diff --git a/crates/agent_ui/src/ui.rs b/crates/agent_ui/src/ui.rs index f556f8eece8efef77f4a6c286fee032cbfcb42df..e604df416e2725a6f1b7bff8eed883a8cc36e184 100644 --- a/crates/agent_ui/src/ui.rs +++ b/crates/agent_ui/src/ui.rs @@ -2,7 +2,6 @@ mod acp_onboarding_modal; mod agent_notification; mod burn_mode_tooltip; mod claude_code_onboarding_modal; -mod context_pill; mod end_trial_upsell; mod hold_for_default; mod onboarding_modal; @@ -13,7 +12,6 @@ pub use acp_onboarding_modal::*; pub use agent_notification::*; pub use burn_mode_tooltip::*; pub use claude_code_onboarding_modal::*; -pub use context_pill::*; pub use end_trial_upsell::*; pub use hold_for_default::*; pub use onboarding_modal::*; diff --git a/crates/agent_ui/src/ui/context_pill.rs b/crates/agent_ui/src/ui/context_pill.rs deleted file mode 100644 index 89bf618a16d3fb8e7abc5afaf34ee6e8bb43ab67..0000000000000000000000000000000000000000 --- a/crates/agent_ui/src/ui/context_pill.rs +++ /dev/null @@ -1,858 +0,0 @@ -use std::{ops::Range, path::Path, rc::Rc, sync::Arc, time::Duration}; - -use file_icons::FileIcons; -use futures::FutureExt as _; -use gpui::{ - Animation, AnimationExt as _, AnyView, ClickEvent, Entity, Image, MouseButton, Task, - pulsating_between, -}; -use language_model::LanguageModelImage; -use project::Project; -use prompt_store::PromptStore; -use rope::Point; -use ui::{IconButtonShape, Tooltip, prelude::*, tooltip_container}; -use util::paths::PathStyle; - -use crate::context::{ - AgentContextHandle, ContextId, ContextKind, DirectoryContextHandle, FetchedUrlContext, - FileContextHandle, ImageContext, ImageStatus, RulesContextHandle, SelectionContextHandle, - SymbolContextHandle, TextThreadContextHandle, ThreadContextHandle, -}; - -#[derive(IntoElement)] -pub enum ContextPill { - Added { - context: AddedContext, - dupe_name: bool, - focused: bool, - on_click: Option>, - on_remove: Option>, - }, - Suggested { - name: SharedString, - icon_path: Option, - kind: ContextKind, - focused: bool, - on_click: Option>, - }, -} - -impl ContextPill { - pub fn added( - context: AddedContext, - dupe_name: bool, - focused: bool, - on_remove: Option>, - ) -> Self { - Self::Added { - context, - dupe_name, - on_remove, - focused, - on_click: None, - } - } - - pub fn suggested( - name: SharedString, - icon_path: Option, - kind: ContextKind, - focused: bool, - ) -> Self { - Self::Suggested { - name, - icon_path, - kind, - focused, - on_click: None, - } - } - - pub fn on_click(mut self, listener: Rc) -> Self { - match &mut self { - ContextPill::Added { on_click, .. } => { - *on_click = Some(listener); - } - ContextPill::Suggested { on_click, .. } => { - *on_click = Some(listener); - } - } - self - } - - pub fn id(&self) -> ElementId { - match self { - Self::Added { context, .. } => context.handle.element_id("context-pill".into()), - Self::Suggested { .. } => "suggested-context-pill".into(), - } - } - - pub fn icon(&self) -> Icon { - match self { - Self::Suggested { - icon_path: Some(icon_path), - .. - } => Icon::from_path(icon_path), - Self::Suggested { kind, .. } => Icon::new(kind.icon()), - Self::Added { context, .. } => context.icon(), - } - } -} - -impl RenderOnce for ContextPill { - fn render(self, _window: &mut Window, cx: &mut App) -> impl IntoElement { - let color = cx.theme().colors(); - - let base_pill = h_flex() - .id(self.id()) - .pl_1() - .pb(px(1.)) - .border_1() - .rounded_sm() - .gap_1() - .child(self.icon().size(IconSize::XSmall).color(Color::Muted)); - - match &self { - ContextPill::Added { - context, - dupe_name, - on_remove, - focused, - on_click, - } => { - let status_is_error = matches!(context.status, ContextStatus::Error { .. }); - let status_is_warning = matches!(context.status, ContextStatus::Warning { .. }); - - base_pill - .pr(if on_remove.is_some() { px(2.) } else { px(4.) }) - .map(|pill| { - if status_is_error { - pill.bg(cx.theme().status().error_background) - .border_color(cx.theme().status().error_border) - } else if status_is_warning { - pill.bg(cx.theme().status().warning_background) - .border_color(cx.theme().status().warning_border) - } else if *focused { - pill.bg(color.element_background) - .border_color(color.border_focused) - } else { - pill.bg(color.element_background) - .border_color(color.border.opacity(0.5)) - } - }) - .child( - h_flex() - .id("context-data") - .gap_1() - .child( - div().max_w_64().child( - Label::new(context.name.clone()) - .size(LabelSize::Small) - .truncate(), - ), - ) - .when_some(context.parent.as_ref(), |element, parent_name| { - if *dupe_name { - element.child( - Label::new(parent_name.clone()) - .size(LabelSize::XSmall) - .color(Color::Muted), - ) - } else { - element - } - }) - .when_some(context.tooltip.as_ref(), |element, tooltip| { - element.tooltip(Tooltip::text(tooltip.clone())) - }) - .map(|element| match &context.status { - ContextStatus::Ready => element - .when_some( - context.render_hover.as_ref(), - |element, render_hover| { - let render_hover = render_hover.clone(); - element.hoverable_tooltip(move |window, cx| { - render_hover(window, cx) - }) - }, - ) - .into_any(), - ContextStatus::Loading { message } => element - .tooltip(ui::Tooltip::text(message.clone())) - .with_animation( - "pulsating-ctx-pill", - Animation::new(Duration::from_secs(2)) - .repeat() - .with_easing(pulsating_between(0.4, 0.8)), - |label, delta| label.opacity(delta), - ) - .into_any_element(), - ContextStatus::Warning { message } - | ContextStatus::Error { message } => element - .tooltip(ui::Tooltip::text(message.clone())) - .into_any_element(), - }), - ) - .when_some(on_remove.as_ref(), |element, on_remove| { - element.child( - IconButton::new( - context.handle.element_id("remove".into()), - IconName::Close, - ) - .shape(IconButtonShape::Square) - .icon_size(IconSize::XSmall) - .tooltip(Tooltip::text("Remove Context")) - .on_click({ - let on_remove = on_remove.clone(); - move |event, window, cx| on_remove(event, window, cx) - }), - ) - }) - .when_some(on_click.as_ref(), |element, on_click| { - let on_click = on_click.clone(); - element.cursor_pointer().on_click(move |event, window, cx| { - on_click(event, window, cx); - cx.stop_propagation(); - }) - }) - .into_any_element() - } - ContextPill::Suggested { - name, - icon_path: _, - kind: _, - focused, - on_click, - } => base_pill - .cursor_pointer() - .pr_1() - .border_dashed() - .map(|pill| { - if *focused { - pill.border_color(color.border_focused) - .bg(color.element_background.opacity(0.5)) - } else { - pill.border_color(color.border) - } - }) - .hover(|style| style.bg(color.element_hover.opacity(0.5))) - .child( - div().max_w_64().child( - Label::new(name.clone()) - .size(LabelSize::Small) - .color(Color::Muted) - .truncate(), - ), - ) - .tooltip(|_window, cx| { - Tooltip::with_meta("Suggested Context", None, "Click to add it", cx) - }) - .when_some(on_click.as_ref(), |element, on_click| { - let on_click = on_click.clone(); - element.on_click(move |event, window, cx| { - on_click(event, window, cx); - cx.stop_propagation(); - }) - }) - .into_any(), - } - } -} - -pub enum ContextStatus { - Ready, - Loading { message: SharedString }, - Error { message: SharedString }, - Warning { message: SharedString }, -} - -#[derive(RegisterComponent)] -pub struct AddedContext { - pub handle: AgentContextHandle, - pub kind: ContextKind, - pub name: SharedString, - pub parent: Option, - pub tooltip: Option, - pub icon_path: Option, - pub status: ContextStatus, - pub render_hover: Option AnyView + 'static>>, -} - -impl AddedContext { - pub fn icon(&self) -> Icon { - match &self.status { - ContextStatus::Warning { .. } => Icon::new(IconName::Warning).color(Color::Warning), - ContextStatus::Error { .. } => Icon::new(IconName::XCircle).color(Color::Error), - _ => { - if let Some(icon_path) = &self.icon_path { - Icon::from_path(icon_path) - } else { - Icon::new(self.kind.icon()) - } - } - } - } - /// Creates an `AddedContext` by retrieving relevant details of `AgentContext`. This returns a - /// `None` if `DirectoryContext` or `RulesContext` no longer exist. - /// - /// TODO: `None` cases are unremovable from `ContextStore` and so are a very minor memory leak. - pub fn new_pending( - handle: AgentContextHandle, - prompt_store: Option<&Entity>, - project: &Project, - model: Option<&Arc>, - cx: &App, - ) -> Option { - match handle { - AgentContextHandle::File(handle) => { - Self::pending_file(handle, project.path_style(cx), cx) - } - AgentContextHandle::Directory(handle) => Self::pending_directory(handle, project, cx), - AgentContextHandle::Symbol(handle) => { - Self::pending_symbol(handle, project.path_style(cx), cx) - } - AgentContextHandle::Selection(handle) => { - Self::pending_selection(handle, project.path_style(cx), cx) - } - AgentContextHandle::FetchedUrl(handle) => Some(Self::fetched_url(handle)), - AgentContextHandle::Thread(handle) => Some(Self::pending_thread(handle, cx)), - AgentContextHandle::TextThread(handle) => Some(Self::pending_text_thread(handle, cx)), - AgentContextHandle::Rules(handle) => Self::pending_rules(handle, prompt_store, cx), - AgentContextHandle::Image(handle) => { - Some(Self::image(handle, model, project.path_style(cx), cx)) - } - } - } - - fn pending_file( - handle: FileContextHandle, - path_style: PathStyle, - cx: &App, - ) -> Option { - let full_path = handle - .buffer - .read(cx) - .file()? - .full_path(cx) - .to_string_lossy() - .to_string(); - Some(Self::file(handle, &full_path, path_style, cx)) - } - - fn file( - handle: FileContextHandle, - full_path: &str, - path_style: PathStyle, - cx: &App, - ) -> AddedContext { - let (name, parent) = extract_file_name_and_directory_from_full_path(full_path, path_style); - AddedContext { - kind: ContextKind::File, - name, - parent, - tooltip: Some(SharedString::new(full_path)), - icon_path: FileIcons::get_icon(Path::new(full_path), cx), - status: ContextStatus::Ready, - render_hover: None, - handle: AgentContextHandle::File(handle), - } - } - - fn pending_directory( - handle: DirectoryContextHandle, - project: &Project, - cx: &App, - ) -> Option { - let worktree = project.worktree_for_entry(handle.entry_id, cx)?.read(cx); - let entry = worktree.entry_for_id(handle.entry_id)?; - let full_path = worktree - .full_path(&entry.path) - .to_string_lossy() - .to_string(); - Some(Self::directory(handle, &full_path, project.path_style(cx))) - } - - fn directory( - handle: DirectoryContextHandle, - full_path: &str, - path_style: PathStyle, - ) -> AddedContext { - let (name, parent) = extract_file_name_and_directory_from_full_path(full_path, path_style); - AddedContext { - kind: ContextKind::Directory, - name, - parent, - tooltip: Some(SharedString::new(full_path)), - icon_path: None, - status: ContextStatus::Ready, - render_hover: None, - handle: AgentContextHandle::Directory(handle), - } - } - - fn pending_symbol( - handle: SymbolContextHandle, - path_style: PathStyle, - cx: &App, - ) -> Option { - let excerpt = ContextFileExcerpt::new( - &handle.full_path(cx)?.to_string_lossy(), - handle.enclosing_line_range(cx), - path_style, - cx, - ); - Some(AddedContext { - kind: ContextKind::Symbol, - name: handle.symbol.clone(), - parent: Some(excerpt.file_name_and_range.clone()), - tooltip: None, - icon_path: None, - status: ContextStatus::Ready, - render_hover: { - let handle = handle.clone(); - Some(Rc::new(move |_, cx| { - excerpt.hover_view(handle.text(cx), cx).into() - })) - }, - handle: AgentContextHandle::Symbol(handle), - }) - } - - fn pending_selection( - handle: SelectionContextHandle, - path_style: PathStyle, - cx: &App, - ) -> Option { - let excerpt = ContextFileExcerpt::new( - &handle.full_path(cx)?.to_string_lossy(), - handle.line_range(cx), - path_style, - cx, - ); - Some(AddedContext { - kind: ContextKind::Selection, - name: excerpt.file_name_and_range.clone(), - parent: excerpt.parent_name.clone(), - tooltip: None, - icon_path: excerpt.icon_path.clone(), - status: ContextStatus::Ready, - render_hover: { - let handle = handle.clone(); - Some(Rc::new(move |_, cx| { - excerpt.hover_view(handle.text(cx), cx).into() - })) - }, - handle: AgentContextHandle::Selection(handle), - }) - } - - fn fetched_url(context: FetchedUrlContext) -> AddedContext { - AddedContext { - kind: ContextKind::FetchedUrl, - name: context.url.clone(), - parent: None, - tooltip: None, - icon_path: None, - status: ContextStatus::Ready, - render_hover: None, - handle: AgentContextHandle::FetchedUrl(context), - } - } - - fn pending_thread(handle: ThreadContextHandle, cx: &App) -> AddedContext { - AddedContext { - kind: ContextKind::Thread, - name: handle.title(cx), - parent: None, - tooltip: None, - icon_path: None, - status: if handle.thread.read(cx).is_generating_summary() { - ContextStatus::Loading { - message: "Summarizing…".into(), - } - } else { - ContextStatus::Ready - }, - render_hover: { - let thread = handle.thread.clone(); - Some(Rc::new(move |_, cx| { - let text = thread - .update(cx, |thread, cx| thread.summary(cx)) - .now_or_never() - .flatten() - .unwrap_or_else(|| SharedString::from(thread.read(cx).to_markdown())); - ContextPillHover::new_text(text, cx).into() - })) - }, - handle: AgentContextHandle::Thread(handle), - } - } - - fn pending_text_thread(handle: TextThreadContextHandle, cx: &App) -> AddedContext { - AddedContext { - kind: ContextKind::TextThread, - name: handle.title(cx), - parent: None, - tooltip: None, - icon_path: None, - status: ContextStatus::Ready, - render_hover: { - let text_thread = handle.text_thread.clone(); - Some(Rc::new(move |_, cx| { - let text = text_thread.read(cx).to_xml(cx); - ContextPillHover::new_text(text.into(), cx).into() - })) - }, - handle: AgentContextHandle::TextThread(handle), - } - } - - fn pending_rules( - handle: RulesContextHandle, - prompt_store: Option<&Entity>, - cx: &App, - ) -> Option { - let title = prompt_store - .as_ref()? - .read(cx) - .metadata(handle.prompt_id.into())? - .title - .unwrap_or_else(|| "Unnamed Rule".into()); - Some(AddedContext { - kind: ContextKind::Rules, - name: title, - parent: None, - tooltip: None, - icon_path: None, - status: ContextStatus::Ready, - render_hover: None, - handle: AgentContextHandle::Rules(handle), - }) - } - - fn image( - context: ImageContext, - model: Option<&Arc>, - path_style: PathStyle, - cx: &App, - ) -> AddedContext { - let (name, parent, icon_path) = if let Some(full_path) = context.full_path.as_ref() { - let (name, parent) = - extract_file_name_and_directory_from_full_path(full_path, path_style); - let icon_path = FileIcons::get_icon(Path::new(full_path), cx); - (name, parent, icon_path) - } else { - ("Image".into(), None, None) - }; - - let status = match context.status(model) { - ImageStatus::Loading => ContextStatus::Loading { - message: "Loading…".into(), - }, - ImageStatus::Error => ContextStatus::Error { - message: "Failed to load Image".into(), - }, - ImageStatus::Warning => ContextStatus::Warning { - message: format!( - "{} doesn't support attaching Images as Context", - model.map(|m| m.name().0).unwrap_or_else(|| "Model".into()) - ) - .into(), - }, - ImageStatus::Ready => ContextStatus::Ready, - }; - - AddedContext { - kind: ContextKind::Image, - name, - parent, - tooltip: None, - icon_path, - status, - render_hover: Some(Rc::new({ - let image = context.original_image.clone(); - move |_, cx| { - let image = image.clone(); - ContextPillHover::new(cx, move |_, _| { - gpui::img(image.clone()) - .max_w_96() - .max_h_96() - .into_any_element() - }) - .into() - } - })), - handle: AgentContextHandle::Image(context), - } - } -} - -fn extract_file_name_and_directory_from_full_path( - path: &str, - path_style: PathStyle, -) -> (SharedString, Option) { - let (parent, file_name) = path_style.split(path); - let parent = parent.and_then(|parent| { - let parent = parent.trim_end_matches(path_style.separator()); - let (_, parent) = path_style.split(parent); - if parent.is_empty() { - None - } else { - Some(SharedString::new(parent)) - } - }); - (SharedString::new(file_name), parent) -} - -#[derive(Debug, Clone)] -struct ContextFileExcerpt { - pub file_name_and_range: SharedString, - pub full_path_and_range: SharedString, - pub parent_name: Option, - pub icon_path: Option, -} - -impl ContextFileExcerpt { - pub fn new(full_path: &str, line_range: Range, path_style: PathStyle, cx: &App) -> Self { - let (parent, file_name) = path_style.split(full_path); - let line_range_text = format!(" ({}-{})", line_range.start.row + 1, line_range.end.row + 1); - let mut full_path_and_range = full_path.to_owned(); - full_path_and_range.push_str(&line_range_text); - let mut file_name_and_range = file_name.to_owned(); - file_name_and_range.push_str(&line_range_text); - - let parent_name = parent.and_then(|parent| { - let parent = parent.trim_end_matches(path_style.separator()); - let (_, parent) = path_style.split(parent); - if parent.is_empty() { - None - } else { - Some(SharedString::new(parent)) - } - }); - - let icon_path = FileIcons::get_icon(Path::new(full_path), cx); - - ContextFileExcerpt { - file_name_and_range: file_name_and_range.into(), - full_path_and_range: full_path_and_range.into(), - parent_name, - icon_path, - } - } - - fn hover_view(&self, text: SharedString, cx: &mut App) -> Entity { - let icon_path = self.icon_path.clone(); - let full_path_and_range = self.full_path_and_range.clone(); - ContextPillHover::new(cx, move |_, cx| { - v_flex() - .child( - h_flex() - .gap_0p5() - .w_full() - .max_w_full() - .border_b_1() - .border_color(cx.theme().colors().border.opacity(0.6)) - .children( - icon_path - .clone() - .map(Icon::from_path) - .map(|icon| icon.color(Color::Muted).size(IconSize::XSmall)), - ) - .child( - // TODO: make this truncate on the left. - Label::new(full_path_and_range.clone()) - .size(LabelSize::Small) - .ml_1(), - ), - ) - .child( - div() - .id("context-pill-hover-contents") - .overflow_scroll() - .max_w_128() - .max_h_96() - .child(Label::new(text.clone()).buffer_font(cx)), - ) - .into_any_element() - }) - } -} - -struct ContextPillHover { - render_hover: Box AnyElement>, -} - -impl ContextPillHover { - fn new( - cx: &mut App, - render_hover: impl Fn(&mut Window, &mut App) -> AnyElement + 'static, - ) -> Entity { - cx.new(|_| Self { - render_hover: Box::new(render_hover), - }) - } - - fn new_text(content: SharedString, cx: &mut App) -> Entity { - Self::new(cx, move |_, _| { - div() - .id("context-pill-hover-contents") - .overflow_scroll() - .max_w_128() - .max_h_96() - .child(content.clone()) - .into_any_element() - }) - } -} - -impl Render for ContextPillHover { - fn render(&mut self, window: &mut Window, cx: &mut Context) -> impl IntoElement { - tooltip_container(cx, move |this, cx| { - this.occlude() - .on_mouse_move(|_, _, cx| cx.stop_propagation()) - .on_mouse_down(MouseButton::Left, |_, _, cx| cx.stop_propagation()) - .child((self.render_hover)(window, cx)) - }) - } -} - -impl Component for AddedContext { - fn scope() -> ComponentScope { - ComponentScope::Agent - } - - fn sort_name() -> &'static str { - "AddedContext" - } - - fn preview(_window: &mut Window, cx: &mut App) -> Option { - let mut next_context_id = ContextId::zero(); - let image_ready = ( - "Ready", - AddedContext::image( - ImageContext { - context_id: next_context_id.post_inc(), - project_path: None, - full_path: None, - original_image: Arc::new(Image::empty()), - image_task: Task::ready(Some(LanguageModelImage::empty())).shared(), - }, - None, - PathStyle::local(), - cx, - ), - ); - - let image_loading = ( - "Loading", - AddedContext::image( - ImageContext { - context_id: next_context_id.post_inc(), - project_path: None, - full_path: None, - original_image: Arc::new(Image::empty()), - image_task: cx - .background_spawn(async move { - smol::Timer::after(Duration::from_secs(60 * 5)).await; - Some(LanguageModelImage::empty()) - }) - .shared(), - }, - None, - PathStyle::local(), - cx, - ), - ); - - let image_error = ( - "Error", - AddedContext::image( - ImageContext { - context_id: next_context_id.post_inc(), - project_path: None, - full_path: None, - original_image: Arc::new(Image::empty()), - image_task: Task::ready(None).shared(), - }, - None, - PathStyle::local(), - cx, - ), - ); - - Some( - v_flex() - .gap_6() - .children( - vec![image_ready, image_loading, image_error] - .into_iter() - .map(|(text, context)| { - single_example( - text, - ContextPill::added(context, false, false, None).into_any_element(), - ) - }), - ) - .into_any(), - ) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use gpui::App; - use language_model::{LanguageModel, fake_provider::FakeLanguageModel}; - use std::sync::Arc; - - #[gpui::test] - fn test_image_context_warning_for_unsupported_model(cx: &mut App) { - let model: Arc = Arc::new(FakeLanguageModel::default()); - assert!(!model.supports_images()); - - let image_context = ImageContext { - context_id: ContextId::zero(), - project_path: None, - original_image: Arc::new(Image::empty()), - image_task: Task::ready(Some(LanguageModelImage::empty())).shared(), - full_path: None, - }; - - let added_context = - AddedContext::image(image_context, Some(&model), PathStyle::local(), cx); - - assert!(matches!( - added_context.status, - ContextStatus::Warning { .. } - )); - - assert!(matches!(added_context.kind, ContextKind::Image)); - assert_eq!(added_context.name.as_ref(), "Image"); - assert!(added_context.parent.is_none()); - assert!(added_context.icon_path.is_none()); - } - - #[gpui::test] - fn test_image_context_ready_for_no_model(cx: &mut App) { - let image_context = ImageContext { - context_id: ContextId::zero(), - project_path: None, - original_image: Arc::new(Image::empty()), - image_task: Task::ready(Some(LanguageModelImage::empty())).shared(), - full_path: None, - }; - - let added_context = AddedContext::image(image_context, None, PathStyle::local(), cx); - - assert!( - matches!(added_context.status, ContextStatus::Ready), - "Expected ready status when no model provided" - ); - - assert!(matches!(added_context.kind, ContextKind::Image)); - assert_eq!(added_context.name.as_ref(), "Image"); - assert!(added_context.parent.is_none()); - assert!(added_context.icon_path.is_none()); - } -} From 6b9c2b0363c3c13f7e57f9682ac011e62a802879 Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Fri, 21 Nov 2025 14:22:49 -0300 Subject: [PATCH 0294/1030] zeta2: Improve jump outside UI (#43262) Still a prototype UI but a bit more noticeable :) Release Notes: - N/A --- crates/editor/src/editor.rs | 77 ++++++++++++++++++++++++++++++------- 1 file changed, 64 insertions(+), 13 deletions(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 30c03c1a8481003aad991c3acf4c6be38bf4b8d5..d4c09e06cbe2349ec759f0546049c462bf95b0a8 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -8814,23 +8814,13 @@ impl Editor { cx, ), EditPrediction::MoveOutside { snapshot, .. } => { - let file_name = snapshot - .file() - .map(|file| file.file_name(cx)) - .unwrap_or("untitled"); let mut element = self - .render_edit_prediction_line_popover( - format!("Jump to {file_name}"), - Some(IconName::ZedPredict), - window, - cx, - ) + .render_edit_prediction_jump_outside_popover(snapshot, window, cx) .into_any(); let size = element.layout_as_root(AvailableSpace::min_size(), window, cx); - let origin_x = text_bounds.size.width / 2. - size.width / 2.; - let origin_y = text_bounds.size.height - size.height - px(30.); - let origin = text_bounds.origin + gpui::Point::new(origin_x, origin_y); + let origin_x = text_bounds.size.width - size.width - px(30.); + let origin = text_bounds.origin + gpui::Point::new(origin_x, px(16.)); element.prepaint_at(origin, window, cx); Some((element, origin)) @@ -9395,6 +9385,67 @@ impl Editor { }) } + fn render_edit_prediction_jump_outside_popover( + &self, + snapshot: &BufferSnapshot, + window: &mut Window, + cx: &mut App, + ) -> Stateful
{ + let keybind = self.render_edit_prediction_accept_keybind(window, cx); + let has_keybind = keybind.is_some(); + + let file_name = snapshot + .file() + .map(|file| SharedString::new(file.file_name(cx))) + .unwrap_or(SharedString::new_static("untitled")); + + h_flex() + .id("ep-jump-outside-popover") + .py_1() + .px_2() + .gap_1() + .rounded_md() + .border_1() + .bg(Self::edit_prediction_line_popover_bg_color(cx)) + .border_color(Self::edit_prediction_callout_popover_border_color(cx)) + .shadow_xs() + .when(!has_keybind, |el| { + let status_colors = cx.theme().status(); + + el.bg(status_colors.error_background) + .border_color(status_colors.error.opacity(0.6)) + .pl_2() + .child(Icon::new(IconName::ZedPredictError).color(Color::Error)) + .cursor_default() + .hoverable_tooltip(move |_window, cx| { + cx.new(|_| MissingEditPredictionKeybindingTooltip).into() + }) + }) + .children(keybind) + .child( + Label::new(file_name) + .size(LabelSize::Small) + .buffer_font(cx) + .when(!has_keybind, |el| { + el.color(cx.theme().status().error.into()).strikethrough() + }), + ) + .when(!has_keybind, |el| { + el.child( + h_flex().ml_1().child( + Icon::new(IconName::Info) + .size(IconSize::Small) + .color(cx.theme().status().error.into()), + ), + ) + }) + .child( + div() + .mt(px(1.5)) + .child(Icon::new(IconName::ArrowUpRight).size(IconSize::Small)), + ) + } + fn edit_prediction_line_popover_bg_color(cx: &App) -> Hsla { let accent_color = cx.theme().colors().text_accent; let editor_bg_color = cx.theme().colors().editor_background; From 0492255d7b85cd370f037b993772dad97ac4001b Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Fri, 21 Nov 2025 13:30:02 -0500 Subject: [PATCH 0295/1030] Make community champions public (#43271) Release Notes: - N/A --- .../community_champion_auto_labeler.yml | 60 +++++++++++++++++-- 1 file changed, 56 insertions(+), 4 deletions(-) diff --git a/.github/workflows/community_champion_auto_labeler.yml b/.github/workflows/community_champion_auto_labeler.yml index c525bf4738f888b5ca84230982ff1f4f5da2db2f..8d991c8eb35a98e642d98e1ab3b1b9d30c2a003c 100644 --- a/.github/workflows/community_champion_auto_labeler.yml +++ b/.github/workflows/community_champion_auto_labeler.yml @@ -13,13 +13,65 @@ jobs: steps: - name: Check if author is a community champion and apply label uses: actions/github-script@v7 + env: + COMMUNITY_CHAMPIONS: | + 0x2CA + 5brian + 5herlocked + abdelq + afgomez + AidanV + akbxr + AlvaroParker + artemevsevev + bajrangCoder + bcomnes + Be-ing + blopker + bobbymannino + CharlesChen0823 + chbk + cppcoffee + davewa + ddoemonn + djsauble + fantacell + findrakecil + gko + huacnlee + imumesh18 + jacobtread + jansol + jeffreyguenther + jenslys + jongretar + lemorage + lnay + marcocondrache + marius851000 + mikebronner + ognevny + RemcoSmitsDev + romaninsh + Simek + someone13574 + sourcefrog + suxiaoshao + Takk8IS + tidely + timvermeulen + valentinegb + versecafe + vitallium + warrenjokinen + ya7010 + Zertsov with: script: | - const communityChampionBody = `${{ secrets.COMMUNITY_CHAMPIONS }}`; - - const communityChampions = communityChampionBody + const communityChampions = process.env.COMMUNITY_CHAMPIONS .split('\n') - .map(handle => handle.trim().toLowerCase()); + .map(handle => handle.trim().toLowerCase()) + .filter(handle => handle.length > 0); let author; if (context.eventName === 'issues') { From e76b485de3cb85978d5e5300f004a65b4f7ff968 Mon Sep 17 00:00:00 2001 From: Dave Waggoner Date: Fri, 21 Nov 2025 11:01:06 -0800 Subject: [PATCH 0296/1030] terminal: New settings for path hyperlink regexes (#40305) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes: - #12338 - #40202 1. Adds two new settings which allow customizing the set of regexes used to identify path hyperlinks in terminal 1. Fixes path hyperlinks for paths containing unicode emoji and punctuation, for example, `mojo.🔥` 1. Fixes path hyperlinks for Windows verbatim paths, for example, `\\?\C:\Over\here.rs`. 1. Improves path hyperlink performance, especially for terminals with a lot of content 1. Replaces existing custom hard-coded default path hyperlink parsing logic with a set of customizable default regexes ## New settings (from default.json) ### terminal.path_hyperlink_regexes Regexes used to identify paths for hyperlink navigation. Supports optional named capture groups `path`, `line`, `column`, and `link`. If none of these are present, the entire match is the hyperlink target. If `path` is present, it is the hyperlink target, along with `line` and `column` if present. `link` may be used to customize what text in terminal is part of the hyperlink. If `link` is not present, the text of the entire match is used. If `line` and `column` are not present, the default built-in line and column suffix processing is used which parses `line:column` and `(line,column)` variants. The default value handles Python diagnostics and common path, line, column syntaxes. This can be extended or replaced to handle specific scenarios. For example, to enable support for hyperlinking paths which contain spaces in rust output, ``` [ "\\s+(-->|:::|at) (?(?.+?))(:$|$)", "\\s+(Compiling|Checking|Documenting) [^(]+\\((?(?.+))\\)" ], ``` could be used. Processing stops at the first regex with a match, even if no link is produced which is the case when the cursor is not over the hyperlinked text. For best performance it is recommended to order regexes from most common to least common. For readability and documentation, each regex may be an array of strings which are collected into one multi-line regex string for use in terminal path hyperlink detection. ### terminal.path_hyperlink_timeout_ms Timeout for hover and Cmd-click path hyperlink discovery in milliseconds. Specifying a timeout of `0` will disable path hyperlinking in terminal. ## Performance This PR fixes terminal to only search the hovered line for hyperlinks and adds a benchmark. Before this fix, hyperlink detection grows linearly with terminal content, with this fix it is proportional only to the hovered line. The gains come from replacing `visible_regex_match_iter`, which searched all visible lines, with code that only searches the line hovered on (including if the line is wrapped). Local benchmark timings (terminal with 500 lines of content): ||main|this PR|Δ| |-|-|-:|-| | cargo_hyperlink_benchmark | 1.4 ms | 13 µs | -99.0% | | rust_hyperlink_benchmark | 1.2 ms | 11 µs | -99.1% | | ls_hyperlink_benchmark | 1.3 ms | 7 µs | -99.5% | Release Notes: - terminal: New settings to allow customizing the set of regexes used to identify path hyperlinks in terminal - terminal: Fixed terminal path hyperlinks for paths containing unicode punctuation and emoji, e.g. mojo.🔥 - terminal: Fixed path hyperlinks for Windows verbatim paths, for example, `\\?\C:\Over\here.rs` - terminal: Improved terminal hyperlink performance, especially for terminals with a lot of content visible --- Cargo.lock | 4 +- assets/settings/default.json | 54 +- crates/project/src/terminals.rs | 4 + .../settings/src/settings_content/terminal.rs | 42 + crates/settings/src/vscode_import.rs | 2 + crates/terminal/Cargo.toml | 7 +- crates/terminal/src/terminal.rs | 23 +- crates/terminal/src/terminal_hyperlinks.rs | 850 ++++++++++++------ crates/terminal/src/terminal_settings.rs | 16 +- docs/src/configuring-zed.md | 47 + 10 files changed, 766 insertions(+), 283 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9917862e72ba3f63e20b2c7305902a85dc0f3191..bfce6ab287c81852f558ea064097443c1131d9a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17072,16 +17072,17 @@ dependencies = [ "alacritty_terminal", "anyhow", "collections", + "fancy-regex 0.14.0", "futures 0.3.31", "gpui", "itertools 0.14.0", "libc", "log", "rand 0.9.2", - "regex", "release_channel", "schemars", "serde", + "serde_json", "settings", "smol", "sysinfo 0.37.2", @@ -17091,6 +17092,7 @@ dependencies = [ "url", "urlencoding", "util", + "util_macros", "windows 0.61.3", ] diff --git a/assets/settings/default.json b/assets/settings/default.json index 9a6146f75b6a3b1d16d64bd1d7b1e7aab9e992dd..9b289bdf088be12ec6970f81ddd7edfd55aedc66 100644 --- a/assets/settings/default.json +++ b/assets/settings/default.json @@ -1585,7 +1585,59 @@ // // Most terminal themes have APCA values of 40-70. // A value of 45 preserves colorful themes while ensuring legibility. - "minimum_contrast": 45 + "minimum_contrast": 45, + // Regexes used to identify paths for hyperlink navigation. Supports optional named capture + // groups `path`, `line`, `column`, and `link`. If none of these are present, the entire match + // is the hyperlink target. If `path` is present, it is the hyperlink target, along with `line` + // and `column` if present. `link` may be used to customize what text in terminal is part of the + // hyperlink. If `link` is not present, the text of the entire match is used. If `line` and + // `column` are not present, the default built-in line and column suffix processing is used + // which parses `line:column` and `(line,column)` variants. The default value handles Python + // diagnostics and common path, line, column syntaxes. This can be extended or replaced to + // handle specific scenarios. For example, to enable support for hyperlinking paths which + // contain spaces in rust output, + // + // [ + // "\\s+(-->|:::|at) (?(?.+?))(:$|$)", + // "\\s+(Compiling|Checking|Documenting) [^(]+\\((?(?.+))\\)" + // ], + // + // could be used. Processing stops at the first regex with a match, even if no link is + // produced which is the case when the cursor is not over the hyperlinked text. For best + // performance it is recommended to order regexes from most common to least common. For + // readability and documentation, each regex may be an array of strings which are collected + // into one multi-line regex string for use in terminal path hyperlink detection. + "path_hyperlink_regexes": [ + // Python-style diagnostics + "File \"(?[^\"]+)\", line (?[0-9]+)", + // Common path syntax with optional line, column, description, trailing punctuation, or + // surrounding symbols or quotes + [ + "(?x)", + "# optionally starts with 0-2 opening prefix symbols", + "[({\\[<]{0,2}", + "# which may be followed by an opening quote", + "(?[\"'`])?", + "# `path` is the shortest sequence of any non-space character", + "(?(?[^ ]+?", + " # which may end with a line and optionally a column,", + " (?:+[0-9]+(:[0-9]+)?|:?\\([0-9]+([,:][0-9]+)?\\))?", + "))", + "# which must be followed by a matching quote", + "(?()\\k)", + "# and optionally a single closing symbol", + "[)}\\]>]?", + "# if line/column matched, may be followed by a description", + "(?():[^ 0-9][^ ]*)?", + "# which may be followed by trailing punctuation", + "[.,:)}\\]>]*", + "# and always includes trailing whitespace or end of line", + "([ ]+|$)" + ] + ], + // Timeout for hover and Cmd-click path hyperlink discovery in milliseconds. Specifying a + // timeout of `0` will disable path hyperlinking in terminal. + "path_hyperlink_timeout_ms": 1 }, "code_actions_on_format": {}, // Settings related to running tasks. diff --git a/crates/project/src/terminals.rs b/crates/project/src/terminals.rs index ef21c97f8178181493968c984e6534772eac9beb..81172f57744ac3d03532a263e70a483496db24f6 100644 --- a/crates/project/src/terminals.rs +++ b/crates/project/src/terminals.rs @@ -240,6 +240,8 @@ impl Project { settings.cursor_shape, settings.alternate_scroll, settings.max_scroll_history_lines, + settings.path_hyperlink_regexes, + settings.path_hyperlink_timeout_ms, is_via_remote, cx.entity_id().as_u64(), Some(completion_tx), @@ -369,6 +371,8 @@ impl Project { settings.cursor_shape, settings.alternate_scroll, settings.max_scroll_history_lines, + settings.path_hyperlink_regexes, + settings.path_hyperlink_timeout_ms, is_via_remote, cx.entity_id().as_u64(), None, diff --git a/crates/settings/src/settings_content/terminal.rs b/crates/settings/src/settings_content/terminal.rs index c75b986bb817752d2f3ce64db52af2ad61a1c58d..c54ebe2d1c57af6e0fe51c765a5529cc4b1d4d7f 100644 --- a/crates/settings/src/settings_content/terminal.rs +++ b/crates/settings/src/settings_content/terminal.rs @@ -29,6 +29,41 @@ pub struct ProjectTerminalSettingsContent { /// /// Default: on pub detect_venv: Option, + /// Regexes used to identify paths for hyperlink navigation. + /// + /// Default: [ + /// // Python-style diagnostics + /// "File \"(?[^\"]+)\", line (?[0-9]+)", + /// // Common path syntax with optional line, column, description, trailing punctuation, or + /// // surrounding symbols or quotes + /// [ + /// "(?x)", + /// "# optionally starts with 0-2 opening prefix symbols", + /// "[({\\[<]{0,2}", + /// "# which may be followed by an opening quote", + /// "(?[\"'`])?", + /// "# `path` is the shortest sequence of any non-space character", + /// "(?(?[^ ]+?", + /// " # which may end with a line and optionally a column,", + /// " (?:+[0-9]+(:[0-9]+)?|:?\\([0-9]+([,:][0-9]+)?\\))?", + /// "))", + /// "# which must be followed by a matching quote", + /// "(?()\\k)", + /// "# and optionally a single closing symbol", + /// "[)}\\]>]?", + /// "# if line/column matched, may be followed by a description", + /// "(?():[^ 0-9][^ ]*)?", + /// "# which may be followed by trailing punctuation", + /// "[.,:)}\\]>]*", + /// "# and always includes trailing whitespace or end of line", + /// "([ ]+|$)" + /// ] + /// ] + pub path_hyperlink_regexes: Option>, + /// Timeout for hover and Cmd-click path hyperlink discovery in milliseconds. + /// + /// Default: 1 + pub path_hyperlink_timeout_ms: Option, } #[with_fallible_options] @@ -412,6 +447,13 @@ impl VenvSettings { } } +#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, JsonSchema, MergeFrom)] +#[serde(untagged)] +pub enum PathHyperlinkRegex { + SingleLine(String), + MultiLine(Vec), +} + #[derive( Copy, Clone, diff --git a/crates/settings/src/vscode_import.rs b/crates/settings/src/vscode_import.rs index f5df817dcd0f4ae02bea3934eaaaf042a02bdbc1..4d893011d49d2094614c6e06918ecf6e8fade774 100644 --- a/crates/settings/src/vscode_import.rs +++ b/crates/settings/src/vscode_import.rs @@ -772,6 +772,8 @@ impl VsCodeSettings { working_directory: None, env, detect_venv: None, + path_hyperlink_regexes: None, + path_hyperlink_timeout_ms: None, } } diff --git a/crates/terminal/Cargo.toml b/crates/terminal/Cargo.toml index 0dc7338e04b79e2a50effbea180dccf1587c66b1..dac9db190dbd0864142a1d429b69db17b4ae25e9 100644 --- a/crates/terminal/Cargo.toml +++ b/crates/terminal/Cargo.toml @@ -25,6 +25,7 @@ anyhow.workspace = true collections.workspace = true futures.workspace = true gpui.workspace = true +itertools.workspace = true libc.workspace = true log.workspace = true release_channel.workspace = true @@ -37,9 +38,8 @@ task.workspace = true theme.workspace = true thiserror.workspace = true util.workspace = true -regex.workspace = true +fancy-regex.workspace = true urlencoding.workspace = true -itertools.workspace = true [target.'cfg(windows)'.dependencies] windows.workspace = true @@ -47,4 +47,7 @@ windows.workspace = true [dev-dependencies] gpui = { workspace = true, features = ["test-support"] } rand.workspace = true +serde_json.workspace = true +settings = { workspace = true, features = ["test-support"] } url.workspace = true +util_macros.workspace = true diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 3c71a7f0e1a483f1e27fe52170bbabbe6129b974..69b6be5f249b811273aed8ecd96ed82493a3596a 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -374,7 +374,7 @@ impl TerminalBuilder { scroll_px: px(0.), next_link_id: 0, selection_phase: SelectionPhase::Ended, - hyperlink_regex_searches: RegexSearches::new(), + hyperlink_regex_searches: RegexSearches::default(), vi_mode_enabled: false, is_remote_terminal: false, last_mouse_move_time: Instant::now(), @@ -388,6 +388,8 @@ impl TerminalBuilder { cursor_shape, alternate_scroll, max_scroll_history_lines, + path_hyperlink_regexes: Vec::default(), + path_hyperlink_timeout_ms: 0, window_id, }, child_exited: None, @@ -408,6 +410,8 @@ impl TerminalBuilder { cursor_shape: CursorShape, alternate_scroll: AlternateScroll, max_scroll_history_lines: Option, + path_hyperlink_regexes: Vec, + path_hyperlink_timeout_ms: u64, is_remote_terminal: bool, window_id: u64, completion_tx: Option>>, @@ -592,7 +596,10 @@ impl TerminalBuilder { scroll_px: px(0.), next_link_id: 0, selection_phase: SelectionPhase::Ended, - hyperlink_regex_searches: RegexSearches::new(), + hyperlink_regex_searches: RegexSearches::new( + &path_hyperlink_regexes, + path_hyperlink_timeout_ms, + ), vi_mode_enabled: false, is_remote_terminal, last_mouse_move_time: Instant::now(), @@ -606,6 +613,8 @@ impl TerminalBuilder { cursor_shape, alternate_scroll, max_scroll_history_lines, + path_hyperlink_regexes, + path_hyperlink_timeout_ms, window_id, }, child_exited: None, @@ -838,6 +847,8 @@ struct CopyTemplate { cursor_shape: CursorShape, alternate_scroll: AlternateScroll, max_scroll_history_lines: Option, + path_hyperlink_regexes: Vec, + path_hyperlink_timeout_ms: u64, window_id: u64, } @@ -2163,6 +2174,8 @@ impl Terminal { self.template.cursor_shape, self.template.alternate_scroll, self.template.max_scroll_history_lines, + self.template.path_hyperlink_regexes.clone(), + self.template.path_hyperlink_timeout_ms, self.is_remote_terminal, self.template.window_id, None, @@ -2404,6 +2417,8 @@ mod tests { CursorShape::default(), AlternateScroll::On, None, + vec![], + 0, false, 0, Some(completion_tx), @@ -2452,6 +2467,8 @@ mod tests { CursorShape::default(), AlternateScroll::On, None, + vec![], + 0, false, 0, Some(completion_tx), @@ -2527,6 +2544,8 @@ mod tests { CursorShape::default(), AlternateScroll::On, None, + Vec::new(), + 0, false, 0, Some(completion_tx), diff --git a/crates/terminal/src/terminal_hyperlinks.rs b/crates/terminal/src/terminal_hyperlinks.rs index d3b50674204884e168d8cee39110a5b05ce13f54..94e8d1716ff6c71efcf444b068b77adc946b9a7c 100644 --- a/crates/terminal/src/terminal_hyperlinks.rs +++ b/crates/terminal/src/terminal_hyperlinks.rs @@ -2,45 +2,64 @@ use alacritty_terminal::{ Term, event::EventListener, grid::Dimensions, - index::{Boundary, Column, Direction as AlacDirection, Line, Point as AlacPoint}, - term::search::{Match, RegexIter, RegexSearch}, + index::{Boundary, Column, Direction as AlacDirection, Point as AlacPoint}, + term::{ + cell::Flags, + search::{Match, RegexIter, RegexSearch}, + }, +}; +use fancy_regex::Regex; +use log::{info, warn}; +use std::{ + ops::{Index, Range}, + time::{Duration, Instant}, }; -use regex::Regex; -use std::{ops::Index, sync::LazyLock}; const URL_REGEX: &str = r#"(ipfs:|ipns:|magnet:|mailto:|gemini://|gopher://|https://|http://|news:|file://|git://|ssh:|ftp://)[^\u{0000}-\u{001F}\u{007F}-\u{009F}<>"\s{-}\^⟨⟩`']+"#; -// Optional suffix matches MSBuild diagnostic suffixes for path parsing in PathLikeWithPosition -// https://learn.microsoft.com/en-us/visualstudio/msbuild/msbuild-diagnostic-format-for-tasks -const WORD_REGEX: &str = - r#"[\$\+\w.\[\]:/\\@\-~()]+(?:\((?:\d+|\d+,\d+)\))|[\$\+\w.\[\]:/\\@\-~()]+"#; - -const PYTHON_FILE_LINE_REGEX: &str = r#"File "(?P[^"]+)", line (?P\d+)"#; - -static PYTHON_FILE_LINE_MATCHER: LazyLock = - LazyLock::new(|| Regex::new(PYTHON_FILE_LINE_REGEX).unwrap()); - -fn python_extract_path_and_line(input: &str) -> Option<(&str, u32)> { - if let Some(captures) = PYTHON_FILE_LINE_MATCHER.captures(input) { - let path_part = captures.name("file")?.as_str(); - - let line_number: u32 = captures.name("line")?.as_str().parse().ok()?; - return Some((path_part, line_number)); - } - None -} +const WIDE_CHAR_SPACERS: Flags = + Flags::from_bits(Flags::LEADING_WIDE_CHAR_SPACER.bits() | Flags::WIDE_CHAR_SPACER.bits()) + .unwrap(); pub(super) struct RegexSearches { url_regex: RegexSearch, - word_regex: RegexSearch, - python_file_line_regex: RegexSearch, + path_hyperlink_regexes: Vec, + path_hyperlink_timeout: Duration, } +impl Default for RegexSearches { + fn default() -> Self { + Self { + url_regex: RegexSearch::new(URL_REGEX).unwrap(), + path_hyperlink_regexes: Vec::default(), + path_hyperlink_timeout: Duration::default(), + } + } +} impl RegexSearches { - pub(super) fn new() -> Self { + pub(super) fn new( + path_hyperlink_regexes: impl IntoIterator>, + path_hyperlink_timeout_ms: u64, + ) -> Self { Self { url_regex: RegexSearch::new(URL_REGEX).unwrap(), - word_regex: RegexSearch::new(WORD_REGEX).unwrap(), - python_file_line_regex: RegexSearch::new(PYTHON_FILE_LINE_REGEX).unwrap(), + path_hyperlink_regexes: path_hyperlink_regexes + .into_iter() + .filter_map(|regex| { + Regex::new(regex.as_ref()) + .inspect_err(|error| { + warn!( + concat!( + "Ignoring path hyperlink regex specified in ", + "`terminal.path_hyperlink_regexes`:\n\n\t{}\n\nError: {}", + ), + regex.as_ref(), + error + ); + }) + .ok() + }) + .collect(), + path_hyperlink_timeout: Duration::from_millis(path_hyperlink_timeout_ms), } } } @@ -77,76 +96,32 @@ pub(super) fn find_from_grid_point( let url_match = min_index..=max_index; Some((url, true, url_match)) - } else if let Some(url_match) = regex_match_at(term, point, &mut regex_searches.url_regex) { - let url = term.bounds_to_string(*url_match.start(), *url_match.end()); - let (sanitized_url, sanitized_match) = sanitize_url_punctuation(url, url_match, term); - Some((sanitized_url, true, sanitized_match)) - } else if let Some(python_match) = - regex_match_at(term, point, &mut regex_searches.python_file_line_regex) - { - let matching_line = term.bounds_to_string(*python_match.start(), *python_match.end()); - python_extract_path_and_line(&matching_line).map(|(file_path, line_number)| { - (format!("{file_path}:{line_number}"), false, python_match) - }) - } else if let Some(word_match) = regex_match_at(term, point, &mut regex_searches.word_regex) { - let file_path = term.bounds_to_string(*word_match.start(), *word_match.end()); - - let (sanitized_match, sanitized_word) = 'sanitize: { - let mut word_match = word_match; - let mut file_path = file_path; - - if is_path_surrounded_by_common_symbols(&file_path) { - word_match = Match::new( - word_match.start().add(term, Boundary::Grid, 1), - word_match.end().sub(term, Boundary::Grid, 1), - ); - file_path = file_path[1..file_path.len() - 1].to_owned(); - } - - while file_path.ends_with(':') { - file_path.pop(); - word_match = Match::new( - *word_match.start(), - word_match.end().sub(term, Boundary::Grid, 1), - ); - } - let mut colon_count = 0; - for c in file_path.chars() { - if c == ':' { - colon_count += 1; - } - } - // strip trailing comment after colon in case of - // file/at/path.rs:row:column:description or error message - // so that the file path is `file/at/path.rs:row:column` - if colon_count > 2 { - let last_index = file_path.rfind(':').unwrap(); - let prev_is_digit = last_index > 0 - && file_path - .chars() - .nth(last_index - 1) - .is_some_and(|c| c.is_ascii_digit()); - let next_is_digit = last_index < file_path.len() - 1 - && file_path - .chars() - .nth(last_index + 1) - .is_none_or(|c| c.is_ascii_digit()); - if prev_is_digit && !next_is_digit { - let stripped_len = file_path.len() - last_index; - word_match = Match::new( - *word_match.start(), - word_match.end().sub(term, Boundary::Grid, stripped_len), - ); - file_path = file_path[0..last_index].to_owned(); - } - } - - break 'sanitize (word_match, file_path); - }; - - Some((sanitized_word, false, sanitized_match)) } else { - None + let (line_start, line_end) = (term.line_search_left(point), term.line_search_right(point)); + if let Some((url, url_match)) = RegexIter::new( + line_start, + line_end, + AlacDirection::Right, + term, + &mut regex_searches.url_regex, + ) + .find(|rm| rm.contains(&point)) + .map(|url_match| { + let url = term.bounds_to_string(*url_match.start(), *url_match.end()); + sanitize_url_punctuation(url, url_match, term) + }) { + Some((url, true, url_match)) + } else { + path_match( + &term, + line_start, + line_end, + point, + &mut regex_searches.path_hyperlink_regexes, + regex_searches.path_hyperlink_timeout, + ) + .map(|(path, path_match)| (path, false, path_match)) + } }; found_word.map(|(maybe_url_or_path, is_url, word_match)| { @@ -222,58 +197,171 @@ fn sanitize_url_punctuation( } } -fn is_path_surrounded_by_common_symbols(path: &str) -> bool { - // Avoid detecting `[]` or `()` strings as paths, surrounded by common symbols - path.len() > 2 - // The rest of the brackets and various quotes cannot be matched by the [`WORD_REGEX`] hence not checked for. - && (path.starts_with('[') && path.ends_with(']') - || path.starts_with('(') && path.ends_with(')')) -} +fn path_match( + term: &Term, + line_start: AlacPoint, + line_end: AlacPoint, + hovered: AlacPoint, + path_hyperlink_regexes: &mut Vec, + path_hyperlink_timeout: Duration, +) -> Option<(String, Match)> { + if path_hyperlink_regexes.is_empty() || path_hyperlink_timeout.as_millis() == 0 { + return None; + } -/// Based on alacritty/src/display/hint.rs > regex_match_at -/// Retrieve the match, if the specified point is inside the content matching the regex. -fn regex_match_at(term: &Term, point: AlacPoint, regex: &mut RegexSearch) -> Option { - visible_regex_match_iter(term, regex).find(|rm| rm.contains(&point)) -} + let search_start_time = Instant::now(); + + let timed_out = || { + let elapsed_time = Instant::now().saturating_duration_since(search_start_time); + (elapsed_time > path_hyperlink_timeout) + .then_some((elapsed_time.as_millis(), path_hyperlink_timeout.as_millis())) + }; + + // This used to be: `let line = term.bounds_to_string(line_start, line_end)`, however, that + // api compresses tab characters into a single space, whereas we require a cell accurate + // string representation of the line. The below algorithm does this, but seems a bit odd. + // Maybe there is a clean api for doing this, but I couldn't find it. + let mut line = String::with_capacity( + (line_end.line.0 - line_start.line.0 + 1) as usize * term.grid().columns(), + ); + line.push(term.grid()[line_start].c); + for cell in term.grid().iter_from(line_start) { + if cell.point > line_end { + break; + } + + if !cell.flags.intersects(WIDE_CHAR_SPACERS) { + line.push(match cell.c { + '\t' => ' ', + c @ _ => c, + }); + } + } + let line = line.trim_ascii_end(); + + let found_from_range = |path_range: Range, + link_range: Range, + position: Option<(u32, Option)>| { + let advance_point_by_str = |mut point: AlacPoint, s: &str| { + for _ in s.chars() { + point = term + .expand_wide(point, AlacDirection::Right) + .add(term, Boundary::Grid, 1); + } + + // There does not appear to be an alacritty api that is + // "move to start of current wide char", so we have to do it ourselves. + let flags = term.grid().index(point).flags; + if flags.contains(Flags::LEADING_WIDE_CHAR_SPACER) { + AlacPoint::new(point.line + 1, Column(0)) + } else if flags.contains(Flags::WIDE_CHAR_SPACER) { + AlacPoint::new(point.line, point.column - 1) + } else { + point + } + }; + + let link_start = advance_point_by_str(line_start, &line[..link_range.start]); + let link_end = advance_point_by_str(link_start, &line[link_range]); + let link_match = link_start + ..=term + .expand_wide(link_end, AlacDirection::Left) + .sub(term, Boundary::Grid, 1); + + Some(( + { + let mut path = line[path_range].to_string(); + position.inspect(|(line, column)| { + path += &format!(":{line}"); + column.inspect(|column| path += &format!(":{column}")); + }); + path + }, + link_match, + )) + }; + + for regex in path_hyperlink_regexes { + let mut path_found = false; + + for captures in regex.captures_iter(&line) { + let captures = match captures { + Ok(captures) => captures, + Err(error) => { + warn!("Error '{error}' searching for path hyperlinks in line: {line}"); + info!( + "Skipping match from path hyperlinks with regex: {}", + regex.as_str() + ); + continue; + } + }; + + let match_range = captures.get(0).unwrap().range(); + let (path_range, line_column) = if let Some(path) = captures.name("path") { + let parse = |name: &str| { + captures + .name(name) + .and_then(|capture| capture.as_str().parse().ok()) + }; + + ( + path.range(), + parse("line").map(|line| (line, parse("column"))), + ) + } else { + (match_range.clone(), None) + }; + let link_range = captures + .name("link") + .map_or(match_range, |link| link.range()); + let found = found_from_range(path_range, link_range, line_column); + + if let Some(found) = found { + path_found = true; + if found.1.contains(&hovered) { + return Some(found); + } + } + } + + if path_found { + return None; + } + + if let Some((timed_out_ms, timeout_ms)) = timed_out() { + warn!("Timed out processing path hyperlink regexes after {timed_out_ms}ms"); + info!("{timeout_ms}ms time out specified in `terminal.path_hyperlink_timeout_ms`"); + return None; + } + } -/// Copied from alacritty/src/display/hint.rs: -/// Iterate over all visible regex matches. -fn visible_regex_match_iter<'a, T>( - term: &'a Term, - regex: &'a mut RegexSearch, -) -> impl Iterator + 'a { - const MAX_SEARCH_LINES: usize = 100; - - let viewport_start = Line(-(term.grid().display_offset() as i32)); - let viewport_end = viewport_start + term.bottommost_line(); - let mut start = term.line_search_left(AlacPoint::new(viewport_start, Column(0))); - let mut end = term.line_search_right(AlacPoint::new(viewport_end, Column(0))); - start.line = start.line.max(viewport_start - MAX_SEARCH_LINES); - end.line = end.line.min(viewport_end + MAX_SEARCH_LINES); - - RegexIter::new(start, end, AlacDirection::Right, term, regex) - .skip_while(move |rm| rm.end().line < viewport_start) - .take_while(move |rm| rm.start().line <= viewport_end) + None } #[cfg(test)] mod tests { + use crate::terminal_settings::TerminalSettings; + use super::*; use alacritty_terminal::{ event::VoidListener, - index::{Boundary, Point as AlacPoint}, + grid::Dimensions, + index::{Boundary, Column, Line, Point as AlacPoint}, term::{Config, cell::Flags, test::TermSize}, vte::ansi::Handler, }; - use std::{cell::RefCell, ops::RangeInclusive, path::PathBuf}; + use fancy_regex::Regex; + use settings::{self, Settings, SettingsContent}; + use std::{cell::RefCell, ops::RangeInclusive, path::PathBuf, rc::Rc}; use url::Url; use util::paths::PathWithPosition; fn re_test(re: &str, hay: &str, expected: Vec<&str>) { - let results: Vec<_> = regex::Regex::new(re) + let results: Vec<_> = Regex::new(re) .unwrap() .find_iter(hay) - .map(|m| m.as_str()) + .map(|m| m.unwrap().as_str()) .collect(); assert_eq!(results, expected); } @@ -376,78 +464,6 @@ mod tests { } } - #[test] - fn test_word_regex() { - re_test( - WORD_REGEX, - "hello, world! \"What\" is this?", - vec!["hello", "world", "What", "is", "this"], - ); - } - - #[test] - fn test_word_regex_with_linenum() { - // filename(line) and filename(line,col) as used in MSBuild output - // should be considered a single "word", even though comma is - // usually a word separator - re_test(WORD_REGEX, "a Main.cs(20) b", vec!["a", "Main.cs(20)", "b"]); - re_test( - WORD_REGEX, - "Main.cs(20,5) Error desc", - vec!["Main.cs(20,5)", "Error", "desc"], - ); - // filename:line:col is a popular format for unix tools - re_test( - WORD_REGEX, - "a Main.cs:20:5 b", - vec!["a", "Main.cs:20:5", "b"], - ); - // Some tools output "filename:line:col:message", which currently isn't - // handled correctly, but might be in the future - re_test( - WORD_REGEX, - "Main.cs:20:5:Error desc", - vec!["Main.cs:20:5:Error", "desc"], - ); - } - - #[test] - fn test_python_file_line_regex() { - re_test( - PYTHON_FILE_LINE_REGEX, - "hay File \"/zed/bad_py.py\", line 8 stack", - vec!["File \"/zed/bad_py.py\", line 8"], - ); - re_test(PYTHON_FILE_LINE_REGEX, "unrelated", vec![]); - } - - #[test] - fn test_python_file_line() { - let inputs: Vec<(&str, Option<(&str, u32)>)> = vec![ - ( - "File \"/zed/bad_py.py\", line 8", - Some(("/zed/bad_py.py", 8u32)), - ), - ("File \"path/to/zed/bad_py.py\"", None), - ("unrelated", None), - ("", None), - ]; - let actual = inputs - .iter() - .map(|input| python_extract_path_and_line(input.0)) - .collect::>(); - let expected = inputs.iter().map(|(_, output)| *output).collect::>(); - assert_eq!(actual, expected); - } - - // We use custom columns in many tests to workaround this issue by ensuring a wrapped - // line never ends on a wide char: - // - // - // - // This issue was recently fixed, as soon as we update to a version containing the fix we - // can remove all the custom columns from these tests. - // macro_rules! test_hyperlink { ($($lines:expr),+; $hyperlink_kind:ident) => { { use crate::terminal_hyperlinks::tests::line_cells_count; @@ -458,21 +474,28 @@ mod tests { test_lines.iter().copied() .map(line_cells_count) .fold((0, 0), |state, cells| (state.0 + cells, cmp::max(state.1, cells))); - - test_hyperlink!( + let contains_tab_char = test_lines.iter().copied() + .map(str::chars).flatten().find(|&c| c == '\t'); + let columns = if contains_tab_char.is_some() { + // This avoids tabs at end of lines causing whitespace-eating line wraps... + vec![longest_line_cells + 1] + } else { // Alacritty has issues with 2 columns, use 3 as the minimum for now. - [3, longest_line_cells / 2, longest_line_cells + 1]; + vec![3, longest_line_cells / 2, longest_line_cells + 1] + }; + test_hyperlink!( + columns; total_cells; test_lines.iter().copied(); $hyperlink_kind ) } }; - ([ $($columns:expr),+ ]; $total_cells:expr; $lines:expr; $hyperlink_kind:ident) => { { + ($columns:expr; $total_cells:expr; $lines:expr; $hyperlink_kind:ident) => { { use crate::terminal_hyperlinks::tests::{ test_hyperlink, HyperlinkKind }; let source_location = format!("{}:{}", std::file!(), std::line!()); - for columns in vec![ $($columns),+] { + for columns in $columns { test_hyperlink(columns, $total_cells, $lines, HyperlinkKind::$hyperlink_kind, &source_location); } @@ -522,24 +545,80 @@ mod tests { test_path!("‹«/test/cool.rs»:«4»:«👉2»›:"); test_path!("‹«/👉test/cool.rs»(«4»,«2»)›:"); test_path!("‹«/test/cool.rs»(«4»,«2»👉)›:"); + test_path!("‹«/👉test/cool.rs»:(«4»,«2»)›:"); + test_path!("‹«/test/cool.rs»:(«4»,«2»👉)›:"); + test_path!("‹«/👉test/cool.rs»:(«4»:«2»)›:"); + test_path!("‹«/test/cool.rs»:(«4»:«2»👉)›:"); + test_path!("/test/cool.rs:4:2👉:", "What is this?"); + test_path!("/test/cool.rs(4,2)👉:", "What is this?"); // path, line, column, and description - test_path!("‹«/test/cool.rs»:«4»:«2»›👉:Error!"); - test_path!("‹«/test/cool.rs»:«4»:«2»›:👉Error!"); + test_path!("/test/cool.rs:4:2👉:Error!"); + test_path!("/test/cool.rs:4:2:👉Error!"); + test_path!("‹«/test/co👉ol.rs»:«4»:«2»›:Error!"); test_path!("‹«/test/co👉ol.rs»(«4»,«2»)›:Error!"); // Cargo output - test_path!(" Compiling Cool 👉(‹«/test/Cool»›)"); + test_path!(" Compiling Cool 👉(/test/Cool)"); test_path!(" Compiling Cool (‹«/👉test/Cool»›)"); - test_path!(" Compiling Cool (‹«/test/Cool»›👉)"); + test_path!(" Compiling Cool (/test/Cool👉)"); // Python test_path!("‹«awe👉some.py»›"); test_path!(" ‹F👉ile \"«/awesome.py»\", line «42»›: Wat?"); - test_path!(" ‹File \"«/awe👉some.py»\", line «42»›: Wat?"); + test_path!(" ‹File \"«/awe👉some.py»\", line «42»›"); test_path!(" ‹File \"«/awesome.py»👉\", line «42»›: Wat?"); - test_path!(" ‹File \"«/awesome.py»\", line «4👉2»›: Wat?"); + test_path!(" ‹File \"«/awesome.py»\", line «4👉2»›"); + } + + #[test] + fn simple_with_descriptions() { + // path, line, column and description + test_path!("‹«/👉test/cool.rs»:«4»:«2»›:例Desc例例例"); + test_path!("‹«/test/cool.rs»:«4»:«👉2»›:例Desc例例例"); + test_path!("/test/cool.rs:4:2:例Desc例👉例例"); + test_path!("‹«/👉test/cool.rs»(«4»,«2»)›:例Desc例例例"); + test_path!("‹«/test/cool.rs»(«4»👉,«2»)›:例Desc例例例"); + test_path!("/test/cool.rs(4,2):例Desc例👉例例"); + + // path, line, column and description w/extra colons + test_path!("‹«/👉test/cool.rs»:«4»:«2»›::例Desc例例例"); + test_path!("‹«/test/cool.rs»:«4»:«👉2»›::例Desc例例例"); + test_path!("/test/cool.rs:4:2::例Desc例👉例例"); + test_path!("‹«/👉test/cool.rs»(«4»,«2»)›::例Desc例例例"); + test_path!("‹«/test/cool.rs»(«4»,«2»👉)›::例Desc例例例"); + test_path!("/test/cool.rs(4,2)::例Desc例👉例例"); + } + + #[test] + fn multiple_same_line() { + test_path!("‹«/👉test/cool.rs»› /test/cool.rs"); + test_path!("/test/cool.rs ‹«/👉test/cool.rs»›"); + + test_path!( + "‹«🦀 multiple_👉same_line 🦀» 🚣«4» 🏛️«2»›: 🦀 multiple_same_line 🦀 🚣4 🏛️2:" + ); + test_path!( + "🦀 multiple_same_line 🦀 🚣4 🏛️2 ‹«🦀 multiple_👉same_line 🦀» 🚣«4» 🏛️«2»›:" + ); + + // ls output (tab separated) + test_path!( + "‹«Carg👉o.toml»›\t\texperiments\t\tnotebooks\t\trust-toolchain.toml\ttooling" + ); + test_path!( + "Cargo.toml\t\t‹«exper👉iments»›\t\tnotebooks\t\trust-toolchain.toml\ttooling" + ); + test_path!( + "Cargo.toml\t\texperiments\t\t‹«note👉books»›\t\trust-toolchain.toml\ttooling" + ); + test_path!( + "Cargo.toml\t\texperiments\t\tnotebooks\t\t‹«rust-t👉oolchain.toml»›\ttooling" + ); + test_path!( + "Cargo.toml\t\texperiments\t\tnotebooks\t\trust-toolchain.toml\t‹«too👉ling»›" + ); } #[test] @@ -555,6 +634,7 @@ mod tests { test_path!("‹«/test/co👉ol.rs»::«42»›"); test_path!("‹«/test/co👉ol.rs»::«42»›:"); test_path!("‹«/test/co👉ol.rs:4:2»(«1»,«618»)›"); + test_path!("‹«/test/co👉ol.rs:4:2»(«1»,«618»)›:"); test_path!("‹«/test/co👉ol.rs»(«1»,«618»)›::"); } @@ -570,7 +650,58 @@ mod tests { test_path!("<‹«/test/co👉ol.rs»:«4»›>"); test_path!("[\"‹«/test/co👉ol.rs»:«4»›\"]"); - test_path!("'(‹«/test/co👉ol.rs»:«4»›)'"); + test_path!("'‹«(/test/co👉ol.rs:4)»›'"); + + test_path!("\"‹«/test/co👉ol.rs»:«4»:«2»›\""); + test_path!("'‹«/test/co👉ol.rs»:«4»:«2»›'"); + test_path!("`‹«/test/co👉ol.rs»:«4»:«2»›`"); + + test_path!("[‹«/test/co👉ol.rs»:«4»:«2»›]"); + test_path!("(‹«/test/co👉ol.rs»:«4»:«2»›)"); + test_path!("{‹«/test/co👉ol.rs»:«4»:«2»›}"); + test_path!("<‹«/test/co👉ol.rs»:«4»:«2»›>"); + + test_path!("[\"‹«/test/co👉ol.rs»:«4»:«2»›\"]"); + + test_path!("\"‹«/test/co👉ol.rs»(«4»)›\""); + test_path!("'‹«/test/co👉ol.rs»(«4»)›'"); + test_path!("`‹«/test/co👉ol.rs»(«4»)›`"); + + test_path!("[‹«/test/co👉ol.rs»(«4»)›]"); + test_path!("(‹«/test/co👉ol.rs»(«4»)›)"); + test_path!("{‹«/test/co👉ol.rs»(«4»)›}"); + test_path!("<‹«/test/co👉ol.rs»(«4»)›>"); + + test_path!("[\"‹«/test/co👉ol.rs»(«4»)›\"]"); + + test_path!("\"‹«/test/co👉ol.rs»(«4»,«2»)›\""); + test_path!("'‹«/test/co👉ol.rs»(«4»,«2»)›'"); + test_path!("`‹«/test/co👉ol.rs»(«4»,«2»)›`"); + + test_path!("[‹«/test/co👉ol.rs»(«4»,«2»)›]"); + test_path!("(‹«/test/co👉ol.rs»(«4»,«2»)›)"); + test_path!("{‹«/test/co👉ol.rs»(«4»,«2»)›}"); + test_path!("<‹«/test/co👉ol.rs»(«4»,«2»)›>"); + + test_path!("[\"‹«/test/co👉ol.rs»(«4»,«2»)›\"]"); + + // Imbalanced + test_path!("([‹«/test/co👉ol.rs»:«4»›] was here...)"); + test_path!("[Here's <‹«/test/co👉ol.rs»:«4»›>]"); + test_path!("('‹«/test/co👉ol.rs»:«4»›' was here...)"); + test_path!("[Here's `‹«/test/co👉ol.rs»:«4»›`]"); + } + + #[test] + fn trailing_punctuation() { + test_path!("‹«/test/co👉ol.rs»›:,.."); + test_path!("/test/cool.rs:,👉.."); + test_path!("‹«/test/co👉ol.rs»:«4»›:,"); + test_path!("/test/cool.rs:4:👉,"); + test_path!("[\"‹«/test/co👉ol.rs»:«4»›\"]:,"); + test_path!("'‹«(/test/co👉ol.rs:4),,»›'.."); + test_path!("('‹«/test/co👉ol.rs»:«4»›'::: was here...)"); + test_path!("[Here's <‹«/test/co👉ol.rs»:«4»›>]::: "); } #[test] @@ -585,6 +716,20 @@ mod tests { test_path!(" Compiling Cool (‹«/👉例/Cool»›)"); test_path!(" Compiling Cool (‹«/例👈/Cool»›)"); + test_path!(" Compiling Cool (‹«/👉例/Cool Spaces»›)"); + test_path!(" Compiling Cool (‹«/例👈/Cool Spaces»›)"); + test_path!(" Compiling Cool (‹«/👉例/Cool Spaces»:«4»:«2»›)"); + test_path!(" Compiling Cool (‹«/例👈/Cool Spaces»(«4»,«2»)›)"); + + test_path!(" --> ‹«/👉例/Cool Spaces»›"); + test_path!(" ::: ‹«/例👈/Cool Spaces»›"); + test_path!(" --> ‹«/👉例/Cool Spaces»:«4»:«2»›"); + test_path!(" ::: ‹«/例👈/Cool Spaces»(«4»,«2»)›"); + test_path!(" panicked at ‹«/👉例/Cool Spaces»:«4»:«2»›:"); + test_path!(" panicked at ‹«/例👈/Cool Spaces»(«4»,«2»)›:"); + test_path!(" at ‹«/👉例/Cool Spaces»:«4»:«2»›"); + test_path!(" at ‹«/例👈/Cool Spaces»(«4»,«2»)›"); + // Python test_path!("‹«👉例wesome.py»›"); test_path!("‹«例👈wesome.py»›"); @@ -624,7 +769,14 @@ mod tests { } #[test] - #[should_panic(expected = "No hyperlink found")] + // + fn issue_12338_regex() { + // Issue #12338 + test_path!(".rw-r--r-- 0 staff 05-27 14:03 ‹«'test file 👉1.txt'»›"); + test_path!(".rw-r--r-- 0 staff 05-27 14:03 ‹«👉'test file 1.txt'»›"); + } + + #[test] // fn issue_12338() { // Issue #12338 @@ -658,30 +810,48 @@ mod tests { test_path!(" ‹File \"«/🏃👈wesome.🔥»\", line «42»›: Wat?"); } + #[test] + // + fn issue_40202() { + // Elixir + test_path!("[‹«lib/blitz_apex_👉server/stats/aggregate_rank_stats.ex»:«35»›: BlitzApexServer.Stats.AggregateRankStats.update/2] + 1 #=> 1"); + } + + #[test] + // + fn issue_28194() { + test_path!( + "‹«test/c👉ontrollers/template_items_controller_test.rb»:«20»›:in 'block (2 levels) in '" + ); + test_path!( + "test/controllers/template_items_controller_test.rb:19:i👉n 'block in '" + ); + } + #[test] #[cfg_attr( not(target_os = "windows"), should_panic( - expected = "Path = «test/controllers/template_items_controller_test.rb», line = 20, at grid cells (0, 0)..=(17, 1)" + expected = "Path = «/test/cool.rs:4:NotDesc», at grid cells (0, 1)..=(7, 2)" ) )] #[cfg_attr( target_os = "windows", should_panic( - expected = r#"Path = «test\\controllers\\template_items_controller_test.rb», line = 20, at grid cells (0, 0)..=(17, 1)"# + expected = r#"Path = «C:\\test\\cool.rs:4:NotDesc», at grid cells (0, 1)..=(8, 1)"# ) )] - // - // - // #28194 was closed, but the link includes the description part (":in" here), which - // seems wrong... - fn issue_28194() { - test_path!( - "‹«test/c👉ontrollers/template_items_controller_test.rb»:«20»›:in 'block (2 levels) in '" - ); - test_path!( - "‹«test/controllers/template_items_controller_test.rb»:«19»›:i👉n 'block in '" - ); + // PathWithPosition::parse_str considers "/test/co👉ol.rs:4:NotDesc" invalid input, but + // still succeeds and truncates the part after the position. Ideally this would be + // parsed as the path "/test/co👉ol.rs:4:NotDesc" with no position. + fn path_with_position_parse_str() { + test_path!("`‹«/test/co👉ol.rs:4:NotDesc»›`"); + test_path!("<‹«/test/co👉ol.rs:4:NotDesc»›>"); + + test_path!("'‹«(/test/co👉ol.rs:4:2)»›'"); + test_path!("'‹«(/test/co👉ol.rs(4))»›'"); + test_path!("'‹«(/test/co👉ol.rs(4,2))»›'"); } } @@ -715,35 +885,38 @@ mod tests { test_path!("‹«/👉test/cool.rs(1,618033988749)»›"); } - #[test] - #[should_panic(expected = "Path = «»")] - fn colon_suffix_succeeds_in_finding_an_empty_maybe_path() { - test_path!("‹«/test/cool.rs»:«4»:«2»›👉:", "What is this?"); - test_path!("‹«/test/cool.rs»(«4»,«2»)›👉:", "What is this?"); - } - #[test] #[cfg_attr( not(target_os = "windows"), - should_panic(expected = "Path = «/test/cool.rs»") + should_panic(expected = "Path = «/te:st/co:ol.r:s:4:2::::::»") )] #[cfg_attr( target_os = "windows", - should_panic(expected = r#"Path = «C:\\test\\cool.rs»"#) + should_panic(expected = r#"Path = «C:\\te:st\\co:ol.r:s:4:2::::::»"#) )] fn many_trailing_colons_should_be_parsed_as_part_of_the_path() { - test_path!("‹«/test/cool.rs:::👉:»›"); test_path!("‹«/te:st/👉co:ol.r:s:4:2::::::»›"); + test_path!("/test/cool.rs:::👉:"); } } - #[cfg(target_os = "windows")] mod windows { // Lots of fun to be had with long file paths (verbatim) and UNC paths on Windows. // See // See // See + #[test] + fn default_prompts() { + // Windows command prompt + test_path!(r#"‹«C:\Users\someone\👉test»›>"#); + test_path!(r#"C:\Users\someone\test👉>"#); + + // Windows PowerShell + test_path!(r#"PS ‹«C:\Users\someone\👉test\cool.rs»›>"#); + test_path!(r#"PS C:\Users\someone\test\cool.rs👉>"#); + } + #[test] fn unc() { test_path!(r#"‹«\\server\share\👉test\cool.rs»›"#); @@ -752,24 +925,116 @@ mod tests { mod issues { #[test] - #[should_panic( - expected = r#"Path = «C:\\test\\cool.rs», at grid cells (0, 0)..=(6, 0)"# - )] fn issue_verbatim() { test_path!(r#"‹«\\?\C:\👉test\cool.rs»›"#); test_path!(r#"‹«\\?\C:\test\cool👉.rs»›"#); } #[test] - #[should_panic( - expected = r#"Path = «\\\\server\\share\\test\\cool.rs», at grid cells (0, 0)..=(10, 2)"# - )] fn issue_verbatim_unc() { test_path!(r#"‹«\\?\UNC\server\share\👉test\cool.rs»›"#); test_path!(r#"‹«\\?\UNC\server\share\test\cool👉.rs»›"#); } } } + + mod perf { + use super::super::*; + use crate::TerminalSettings; + use alacritty_terminal::{ + event::VoidListener, + grid::Dimensions, + index::{Column, Point as AlacPoint}, + term::test::mock_term, + term::{Term, search::Match}, + }; + use settings::{self, Settings, SettingsContent}; + use std::{cell::RefCell, rc::Rc}; + use util_macros::perf; + + fn build_test_term(line: &str) -> (Term, AlacPoint) { + let content = line.repeat(500); + let term = mock_term(&content); + let point = AlacPoint::new( + term.grid().bottommost_line() - 1, + Column(term.grid().last_column().0 / 2), + ); + + (term, point) + } + + #[perf] + pub fn cargo_hyperlink_benchmark() { + const LINE: &str = " Compiling terminal v0.1.0 (/Hyperlinks/Bench/Source/zed-hyperlinks/crates/terminal)\r\n"; + thread_local! { + static TEST_TERM_AND_POINT: (Term, AlacPoint) = + build_test_term(LINE); + } + TEST_TERM_AND_POINT.with(|(term, point)| { + assert!( + find_from_grid_point_bench(term, *point).is_some(), + "Hyperlink should have been found" + ); + }); + } + + #[perf] + pub fn rust_hyperlink_benchmark() { + const LINE: &str = " --> /Hyperlinks/Bench/Source/zed-hyperlinks/crates/terminal/terminal.rs:1000:42\r\n"; + thread_local! { + static TEST_TERM_AND_POINT: (Term, AlacPoint) = + build_test_term(LINE); + } + TEST_TERM_AND_POINT.with(|(term, point)| { + assert!( + find_from_grid_point_bench(term, *point).is_some(), + "Hyperlink should have been found" + ); + }); + } + + #[perf] + pub fn ls_hyperlink_benchmark() { + const LINE: &str = "Cargo.toml experiments notebooks rust-toolchain.toml tooling\r\n"; + thread_local! { + static TEST_TERM_AND_POINT: (Term, AlacPoint) = + build_test_term(LINE); + } + TEST_TERM_AND_POINT.with(|(term, point)| { + assert!( + find_from_grid_point_bench(term, *point).is_some(), + "Hyperlink should have been found" + ); + }); + } + + pub fn find_from_grid_point_bench( + term: &Term, + point: AlacPoint, + ) -> Option<(String, bool, Match)> { + const PATH_HYPERLINK_TIMEOUT_MS: u64 = 1000; + + thread_local! { + static TEST_REGEX_SEARCHES: RefCell = + RefCell::new({ + let default_settings_content: Rc = + settings::parse_json_with_comments(&settings::default_settings()) + .unwrap(); + let default_terminal_settings = + TerminalSettings::from_settings(&default_settings_content); + + RegexSearches::new( + &default_terminal_settings.path_hyperlink_regexes, + PATH_HYPERLINK_TIMEOUT_MS + ) + }); + } + + TEST_REGEX_SEARCHES.with(|regex_searches| { + find_from_grid_point(&term, point, &mut regex_searches.borrow_mut()) + }) + } + } } mod file_iri { @@ -821,11 +1086,12 @@ mod tests { } // See https://en.wikipedia.org/wiki/File_URI_scheme + // https://github.com/zed-industries/zed/issues/39189 #[test] #[should_panic( expected = r#"Path = «C:\\test\\cool\\index.rs», at grid cells (0, 0)..=(9, 1)"# )] - fn issue_absolute_file_iri() { + fn issue_39189() { test_file_iri!("file:///C:/test/cool/index.rs"); test_file_iri!("file:///C:/test/cool/"); } @@ -981,7 +1247,7 @@ mod tests { let mut point = cursor.point; if !cursor.input_needs_wrap { - point.column -= 1; + point = point.sub(term, Boundary::Grid, 1); } if grid.index(point).flags.contains(Flags::WIDE_CHAR_SPACER) { @@ -1007,6 +1273,13 @@ mod tests { } } + fn process_input(term: &mut Term, c: char) { + match c { + '\t' => term.put_tab(1), + c @ _ => term.input(c), + } + } + let mut hovered_grid_point: Option = None; let mut hyperlink_match = AlacPoint::default()..=AlacPoint::default(); let mut iri_or_path = String::default(); @@ -1098,9 +1371,9 @@ mod tests { term.input('C'); prev_input_point = prev_input_point_from_term(&term); term.input(':'); - term.input(c); + process_input(&mut term, c); } else { - term.input(c); + process_input(&mut term, c); prev_input_point = prev_input_point_from_term(&term); } @@ -1130,15 +1403,6 @@ mod tests { iri_or_path = path.to_string_lossy().into_owned(); } - if cfg!(windows) { - // Handle verbatim and UNC paths for Windows - if let Some(stripped) = iri_or_path.strip_prefix(r#"\\?\UNC\"#) { - iri_or_path = format!(r#"\\{stripped}"#); - } else if let Some(stripped) = iri_or_path.strip_prefix(r#"\\?\"#) { - iri_or_path = stripped.to_string(); - } - } - let hovered_grid_point = hovered_grid_point.expect("Missing hovered point (👉 or 👈)"); let hovered_char = term.grid().index(hovered_grid_point).c; ( @@ -1161,6 +1425,7 @@ mod tests { match c { // Fullwidth unicode characters used in tests '例' | '🏃' | '🦀' | '🔥' => 2, + '\t' => 8, // it's really 0-8, use the max always _ => 1, } } @@ -1283,11 +1548,9 @@ mod tests { let mut marker_header_row = String::new(); for index in 0..self.term.columns() { let remainder = index % 10; - first_header_row.push_str( - &(index > 0 && remainder == 0) - .then_some((index / 10).to_string()) - .unwrap_or(" ".into()), - ); + if index > 0 && remainder == 0 { + first_header_row.push_str(&format!("{:>10}", (index / 10))); + } second_header_row += &remainder.to_string(); if index == self.expected_hyperlink.hovered_grid_point.column.0 { marker_header_row.push('↓'); @@ -1296,16 +1559,20 @@ mod tests { } } - result += &format!("\n [{}]\n", first_header_row); + let remainder = (self.term.columns() - 1) % 10; + if remainder != 0 { + first_header_row.push_str(&" ".repeat(remainder)); + } + + result += &format!("\n [ {}]\n", first_header_row); result += &format!(" [{}]\n", second_header_row); result += &format!(" {}", marker_header_row); - let spacers: Flags = Flags::LEADING_WIDE_CHAR_SPACER | Flags::WIDE_CHAR_SPACER; for cell in self .term .renderable_content() .display_iter - .filter(|cell| !cell.flags.intersects(spacers)) + .filter(|cell| !cell.flags.intersects(WIDE_CHAR_SPACERS)) { if cell.point.column.0 == 0 { let prefix = @@ -1317,7 +1584,10 @@ mod tests { result += &format!("\n{prefix}[{:>3}] ", cell.point.line.to_string()); } - result.push(cell.c); + match cell.c { + '\t' => result.push(' '), + c @ _ => result.push(c), + } } result @@ -1331,8 +1601,34 @@ mod tests { hyperlink_kind: HyperlinkKind, source_location: &str, ) { + const CARGO_DIR_REGEX: &str = + r#"\s+(Compiling|Checking|Documenting) [^(]+\((?(?.+))\)"#; + const RUST_DIAGNOSTIC_REGEX: &str = r#"\s+(-->|:::|at) (?(?.+?))(:$|$)"#; + const ISSUE_12338_REGEX: &str = + r#"[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2} (?(?.+))"#; + const MULTIPLE_SAME_LINE_REGEX: &str = + r#"(?(?🦀 multiple_same_line 🦀) 🚣(?[0-9]+) 🏛(?[0-9]+)):"#; + const PATH_HYPERLINK_TIMEOUT_MS: u64 = 1000; + thread_local! { - static TEST_REGEX_SEARCHES: RefCell = RefCell::new(RegexSearches::new()); + static TEST_REGEX_SEARCHES: RefCell = + RefCell::new({ + let default_settings_content: Rc = + settings::parse_json_with_comments(&settings::default_settings()).unwrap(); + let default_terminal_settings = TerminalSettings::from_settings(&default_settings_content); + + RegexSearches::new([ + RUST_DIAGNOSTIC_REGEX, + CARGO_DIR_REGEX, + ISSUE_12338_REGEX, + MULTIPLE_SAME_LINE_REGEX, + ] + .into_iter() + .chain(default_terminal_settings.path_hyperlink_regexes + .iter() + .map(AsRef::as_ref)), + PATH_HYPERLINK_TIMEOUT_MS) + }); } let term_size = TermSize::new(columns, total_cells / columns + 2); @@ -1357,12 +1653,16 @@ mod tests { Some((hyperlink_word, true, hyperlink_match)) => { check_hyperlink_match.check_iri_and_match(hyperlink_word, &hyperlink_match); } - _ => { - assert!( - false, - "No hyperlink found\n at {source_location}:\n{}", - check_hyperlink_match.format_renderable_content() - ) + None => { + if expected_hyperlink.hyperlink_match.start() + != expected_hyperlink.hyperlink_match.end() + { + assert!( + false, + "No hyperlink found\n at {source_location}:\n{}", + check_hyperlink_match.format_renderable_content() + ) + } } } } diff --git a/crates/terminal/src/terminal_settings.rs b/crates/terminal/src/terminal_settings.rs index 0c6f03832c939a1d0ad4431932d9ce4ea3d7f57f..3b3070c6f680452b43d398786fa2a705a06d3404 100644 --- a/crates/terminal/src/terminal_settings.rs +++ b/crates/terminal/src/terminal_settings.rs @@ -9,8 +9,8 @@ use serde::{Deserialize, Serialize}; pub use settings::AlternateScroll; use settings::{ - RegisterSetting, ShowScrollbar, TerminalBlink, TerminalDockPosition, TerminalLineHeight, - VenvSettings, WorkingDirectory, merge_from::MergeFrom, + PathHyperlinkRegex, RegisterSetting, ShowScrollbar, TerminalBlink, TerminalDockPosition, + TerminalLineHeight, VenvSettings, WorkingDirectory, merge_from::MergeFrom, }; use task::Shell; use theme::FontFamilyName; @@ -47,6 +47,8 @@ pub struct TerminalSettings { pub toolbar: Toolbar, pub scrollbar: ScrollbarSettings, pub minimum_contrast: f32, + pub path_hyperlink_regexes: Vec, + pub path_hyperlink_timeout_ms: u64, } #[derive(Copy, Clone, Debug, Serialize, Deserialize, JsonSchema, PartialEq, Eq)] @@ -116,6 +118,16 @@ impl settings::Settings for TerminalSettings { show: user_content.scrollbar.unwrap().show, }, minimum_contrast: user_content.minimum_contrast.unwrap(), + path_hyperlink_regexes: project_content + .path_hyperlink_regexes + .unwrap() + .into_iter() + .map(|regex| match regex { + PathHyperlinkRegex::SingleLine(regex) => regex, + PathHyperlinkRegex::MultiLine(regex) => regex.join("\n"), + }) + .collect(), + path_hyperlink_timeout_ms: project_content.path_hyperlink_timeout_ms.unwrap(), } } } diff --git a/docs/src/configuring-zed.md b/docs/src/configuring-zed.md index a3e24506c46054940dc13a52a4ba82cb233c6604..6edcafb3d8f275047ba953cdf6644604709f7f22 100644 --- a/docs/src/configuring-zed.md +++ b/docs/src/configuring-zed.md @@ -4115,6 +4115,53 @@ Example command to set the title: `echo -e "\e]2;New Title\007";` } ``` +### Terminal: Path Hyperlink Regexes + +- Description: Regexes used to identify path hyperlinks. The regexes can be specified in two forms - a single regex string, or an array of strings (which will be collected into a single multi-line regex string). +- Setting: `path_hyperlink_regexes` +- Default: + +```json [settings] +{ + "terminal": { + "path_hyperlink_regexes": [ + // Python-style diagnostics + "File \"(?[^\"]+)\", line (?[0-9]+)", + // Common path syntax with optional line, column, description, trailing punctuation, or + // surrounding symbols or quotes + [ + "(?x)", + "# optionally starts with 0-2 opening prefix symbols", + "[({\\[<]{0,2}", + "# which may be followed by an opening quote", + "(?[\"'`])?", + "# `path` is the shortest sequence of any non-space character", + "(?(?[^ ]+?", + " # which may end with a line and optionally a column,", + " (?:+[0-9]+(:[0-9]+)?|:?\\([0-9]+([,:][0-9]+)?\\))?", + "))", + "# which must be followed by a matching quote", + "(?()\\k)", + "# and optionally a single closing symbol", + "[)}\\]>]?", + "# if line/column matched, may be followed by a description", + "(?():[^ 0-9][^ ]*)?", + "# which may be followed by trailing punctuation", + "[.,:)}\\]>]*", + "# and always includes trailing whitespace or end of line", + "([ ]+|$)" + ] + ] + } +} +``` + +### Terminal: Path Hyperlink Timeout (ms) + +- Description: Maximum time to search for a path hyperlink. When set to 0, path hyperlinks are disabled. +- Setting: `path_hyperlink_timeout_ms` +- Default: `1` + ## REPL - Description: Repl settings. From a04b3d80c82ba4aa6321a4c3986e9dbcf78c87fa Mon Sep 17 00:00:00 2001 From: Be Date: Fri, 21 Nov 2025 13:55:44 -0600 Subject: [PATCH 0297/1030] gpui: Fall back to client-side decorations on Wayland if SSD not supported (#39313) It is optional for Wayland servers to support server-side decorations. In particular, GNOME chooses to not implement SSD (https://gitlab.gnome.org/GNOME/mutter/-/issues/217). So, even if the application requests SSD, it must draw client-side decorations unless the application receives a response from the server confirming the request for SSD. Before, when the user requested SSD for Zed, but the Wayland server did not support it, there were no server-side decorations (window titlebar) drawn, but Zed did not draw the window minimize, maximize, and close buttons either. This fixes Zed so it always draws the window control buttons if the Wayland server does not support SSD. Before on GNOME Wayland with SSD requested: image After on GNOME Wayland with SSD requested: image Release Notes: - Fixed window control buttons not showing in GNOME Wayland when SSD requested --- .../gpui/src/platform/linux/wayland/window.rs | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/crates/gpui/src/platform/linux/wayland/window.rs b/crates/gpui/src/platform/linux/wayland/window.rs index c02d1f3bc3d0d1ecf7589ae959f8c9b0e3f0fde5..3334ae28a31927b2150e79fc513855fa699c55ba 100644 --- a/crates/gpui/src/platform/linux/wayland/window.rs +++ b/crates/gpui/src/platform/linux/wayland/window.rs @@ -1270,10 +1270,21 @@ impl PlatformWindow for WaylandWindow { fn request_decorations(&self, decorations: WindowDecorations) { let mut state = self.borrow_mut(); - state.decorations = decorations; - if let Some(decoration) = state.surface_state.decoration() { - decoration.set_mode(decorations.to_xdg()); - update_window(state); + match state.surface_state.decoration().as_ref() { + Some(decoration) => { + decoration.set_mode(decorations.to_xdg()); + state.decorations = decorations; + update_window(state); + } + None => { + if matches!(decorations, WindowDecorations::Server) { + log::info!( + "Server-side decorations requested, but the Wayland server does not support them. Falling back to client-side decorations." + ); + } + state.decorations = WindowDecorations::Client; + update_window(state); + } } } From dfa102c5ae2b69e7f4e0faa8c24c9b229faa4eac Mon Sep 17 00:00:00 2001 From: Be Date: Fri, 21 Nov 2025 13:56:00 -0600 Subject: [PATCH 0298/1030] Add setting for enabling server-side decorations (#39250) Previously, this was controllable via the undocumented ZED_WINDOW_DECORATIONS environment variable (added in #13866). Using an environment variable for this is inconvenient because it requires users to set that environment variable somehow before starting Zed, such as in the .desktop file or persistently in their shell. Controlling this via a Zed setting is more convenient. This does not modify the design of the titlebar in any way. It only moves the existing option from an environment variable to a Zed setting. Fixes #14165 Client-side decorations (default): image Server-side decorations in KDE Plasma: image Release Notes: - Changed option for Wayland server-side decorations from an environment variable to settings.json field --------- Co-authored-by: Conrad Irwin --- assets/settings/default.json | 10 ++++++++ crates/rules_library/src/rules_library.rs | 7 ++++-- .../src/settings_content/workspace.rs | 25 +++++++++++++++++++ crates/settings/src/vscode_import.rs | 1 + crates/settings_ui/src/page_data.rs | 15 +++++++++++ crates/settings_ui/src/settings_ui.rs | 1 + crates/workspace/src/workspace_settings.rs | 2 ++ crates/zed/src/zed.rs | 5 +++- 8 files changed, 63 insertions(+), 3 deletions(-) diff --git a/assets/settings/default.json b/assets/settings/default.json index 9b289bdf088be12ec6970f81ddd7edfd55aedc66..ba79f0ccbcca3837d94adc49ddbc9c53b3ae0a5f 100644 --- a/assets/settings/default.json +++ b/assets/settings/default.json @@ -175,6 +175,16 @@ // // Default: true "zoomed_padding": true, + // What draws Zed's window decorations (titlebar): + // 1. Client application (Zed) draws its own window decorations + // "client" + // 2. Display server draws the window decorations. Not supported by GNOME Wayland. + // "server" + // + // This requires restarting Zed for changes to take effect. + // + // Default: "client" + "window_decorations": "client", // Whether to use the system provided dialogs for Open and Save As. // When set to false, Zed will use the built-in keyboard-first pickers. "use_system_path_prompts": true, diff --git a/crates/rules_library/src/rules_library.rs b/crates/rules_library/src/rules_library.rs index 207a9841e41bf35e1f63bb00b0c62073c1cf0224..b5b664f6e5c91e2a4f3760b3ad34c3b055bb2df7 100644 --- a/crates/rules_library/src/rules_library.rs +++ b/crates/rules_library/src/rules_library.rs @@ -25,7 +25,7 @@ use ui::{ Divider, KeyBinding, ListItem, ListItemSpacing, ListSubHeader, Render, Tooltip, prelude::*, }; use util::{ResultExt, TryFutureExt}; -use workspace::{Workspace, client_side_decorations}; +use workspace::{Workspace, WorkspaceSettings, client_side_decorations}; use zed_actions::assistant::InlineAssist; use prompt_store::*; @@ -122,7 +122,10 @@ pub fn open_rules_library( let window_decorations = match std::env::var("ZED_WINDOW_DECORATIONS") { Ok(val) if val == "server" => gpui::WindowDecorations::Server, Ok(val) if val == "client" => gpui::WindowDecorations::Client, - _ => gpui::WindowDecorations::Client, + _ => match WorkspaceSettings::get_global(cx).window_decorations { + settings::WindowDecorations::Server => gpui::WindowDecorations::Server, + settings::WindowDecorations::Client => gpui::WindowDecorations::Client, + }, }; cx.open_window( WindowOptions { diff --git a/crates/settings/src/settings_content/workspace.rs b/crates/settings/src/settings_content/workspace.rs index f078c873179d2b50893e608bc51e609be9850a12..088d478e464bd0f4e9a92419440c16576005fc95 100644 --- a/crates/settings/src/settings_content/workspace.rs +++ b/crates/settings/src/settings_content/workspace.rs @@ -109,6 +109,9 @@ pub struct WorkspaceSettingsContent { /// /// Default: true pub zoomed_padding: Option, + /// What draws window decorations/titlebar, the client application (Zed) or display server + /// Default: client + pub window_decorations: Option, } #[with_fallible_options] @@ -290,6 +293,28 @@ pub enum BottomDockLayout { RightAligned, } +#[derive( + Copy, + Clone, + Default, + Debug, + Serialize, + Deserialize, + PartialEq, + JsonSchema, + MergeFrom, + strum::VariantArray, + strum::VariantNames, +)] +#[serde(rename_all = "snake_case")] +pub enum WindowDecorations { + /// Zed draws its own window decorations/titlebar (client-side decoration) + #[default] + Client, + /// Show system's window titlebar (server-side decoration; not supported by GNOME Wayland) + Server, +} + #[derive( Copy, Clone, diff --git a/crates/settings/src/vscode_import.rs b/crates/settings/src/vscode_import.rs index 4d893011d49d2094614c6e06918ecf6e8fade774..22081727d8ff767b861a776f0a821e3b4a8d5fdf 100644 --- a/crates/settings/src/vscode_import.rs +++ b/crates/settings/src/vscode_import.rs @@ -843,6 +843,7 @@ impl VsCodeSettings { resize_all_panels_in_dock: None, restore_on_file_reopen: self.read_bool("workbench.editor.restoreViewState"), restore_on_startup: None, + window_decorations: None, show_call_status_icon: None, use_system_path_prompts: self.read_bool("files.simpleDialog.enable"), use_system_prompts: None, diff --git a/crates/settings_ui/src/page_data.rs b/crates/settings_ui/src/page_data.rs index 76874c2ad9594cd9955cbe759c458fe9cf007c2e..edd488f419eeee0a7074a95697d9615317891a4d 100644 --- a/crates/settings_ui/src/page_data.rs +++ b/crates/settings_ui/src/page_data.rs @@ -3264,6 +3264,21 @@ pub(crate) fn settings_data(cx: &App) -> Vec { metadata: None, files: USER, }), + SettingsPageItem::SettingItem(SettingItem { + title: "Window Decorations", + description: "(Linux only) whether Zed or your compositor should draw window decorations.", + field: Box::new(SettingField { + json_path: Some("window_decorations"), + pick: |settings_content| { + settings_content.workspace.window_decorations.as_ref() + }, + write: |settings_content, value| { + settings_content.workspace.window_decorations = value; + }, + }), + metadata: None, + files: USER, + }), SettingsPageItem::SectionHeader("Pane Modifiers"), SettingsPageItem::SettingItem(SettingItem { title: "Inactive Opacity", diff --git a/crates/settings_ui/src/settings_ui.rs b/crates/settings_ui/src/settings_ui.rs index ef8cf4928665113a72d97b804931295d6181dde4..4f29945edb2e212e3638db60213dde082a41baf6 100644 --- a/crates/settings_ui/src/settings_ui.rs +++ b/crates/settings_ui/src/settings_ui.rs @@ -513,6 +513,7 @@ fn init_renderers(cx: &mut App) { .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) .add_basic_renderer::(render_dropdown) + .add_basic_renderer::(render_dropdown) // please semicolon stay on next line ; } diff --git a/crates/workspace/src/workspace_settings.rs b/crates/workspace/src/workspace_settings.rs index 24958df7c6d5d36fee243022d700ccf56a570a19..4ce0394fe5fdc74754c1147138cb33c67e076d88 100644 --- a/crates/workspace/src/workspace_settings.rs +++ b/crates/workspace/src/workspace_settings.rs @@ -31,6 +31,7 @@ pub struct WorkspaceSettings { pub close_on_file_delete: bool, pub use_system_window_tabs: bool, pub zoomed_padding: bool, + pub window_decorations: settings::WindowDecorations, } #[derive(Copy, Clone, PartialEq, Debug, Default)] @@ -105,6 +106,7 @@ impl Settings for WorkspaceSettings { close_on_file_delete: workspace.close_on_file_delete.unwrap(), use_system_window_tabs: workspace.use_system_window_tabs.unwrap(), zoomed_padding: workspace.zoomed_padding.unwrap(), + window_decorations: workspace.window_decorations.unwrap(), } } } diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index df46794bb833320f2793fdb798df735fc72c8b3f..be38b3f0952d5ccab6d9d729d77f3fce1e407a4d 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -307,7 +307,10 @@ pub fn build_window_options(display_uuid: Option, cx: &mut App) -> WindowO let window_decorations = match std::env::var("ZED_WINDOW_DECORATIONS") { Ok(val) if val == "server" => gpui::WindowDecorations::Server, Ok(val) if val == "client" => gpui::WindowDecorations::Client, - _ => gpui::WindowDecorations::Client, + _ => match WorkspaceSettings::get_global(cx).window_decorations { + settings::WindowDecorations::Server => gpui::WindowDecorations::Server, + settings::WindowDecorations::Client => gpui::WindowDecorations::Client, + }, }; let use_system_window_tabs = WorkspaceSettings::get_global(cx).use_system_window_tabs; From 279b76d44051db300de6c3e2606ea128646f6937 Mon Sep 17 00:00:00 2001 From: Conrad Irwin Date: Fri, 21 Nov 2025 13:29:08 -0700 Subject: [PATCH 0299/1030] Retry sentry uploads (#43267) We see internal server errors occasionally; and it's very annoying to have to re-run the entire step Release Notes: - N/A --- script/bundle-linux | 17 ++++++++++++++--- script/bundle-mac | 17 +++++++++++++++-- script/bundle-windows.ps1 | 15 ++++++++++++++- 3 files changed, 43 insertions(+), 6 deletions(-) diff --git a/script/bundle-linux b/script/bundle-linux index dee60f01e6d1d0ba2624284a3e44c50b35a885c7..4f5c9f6e7eeb9875346d172dd8d0d2d5d0c2bd27 100755 --- a/script/bundle-linux +++ b/script/bundle-linux @@ -92,9 +92,20 @@ else echo "Uploading zed debug symbols to sentry..." # note: this uploads the unstripped binary which is needed because it contains # .eh_frame data for stack unwinding. see https://github.com/getsentry/symbolic/issues/783 - sentry-cli debug-files upload --include-sources --wait -p zed -o zed-dev \ - "${target_dir}/${target_triple}"/release/zed \ - "${target_dir}/${remote_server_triple}"/release/remote_server + for attempt in 1 2 3; do + echo "Attempting sentry upload (attempt $attempt/3)..." + if sentry-cli debug-files upload --include-sources --wait -p zed -o zed-dev \ + "${target_dir}/${target_triple}"/release/zed \ + "${target_dir}/${remote_server_triple}"/release/remote_server; then + echo "Sentry upload successful on attempt $attempt" + break + else + echo "Sentry upload failed on attempt $attempt" + if [ $attempt -eq 3 ]; then + echo "All sentry upload attempts failed" + fi + fi + done else echo "missing SENTRY_AUTH_TOKEN. skipping sentry upload." fi diff --git a/script/bundle-mac b/script/bundle-mac index 5ee6590a0c656cb56bc5ea091ca844d26b13e9e3..c6c925f073600336f4aa3114a732609481ade26e 100755 --- a/script/bundle-mac +++ b/script/bundle-mac @@ -300,8 +300,21 @@ function upload_debug_symbols() { # note: this uploads the unstripped binary which is needed because it contains # .eh_frame data for stack unwinding. see https://github.com/getsentry/symbolic/issues/783 sentry-cli debug-files upload --include-sources --wait -p zed -o zed-dev \ - "target/${target_triple}/${target_dir}/zed.dwarf" \ - "target/${target_triple}/${target_dir}/remote_server.dwarf" + # Try uploading up to 3 times + for attempt in 1 2 3; do + echo "Sentry upload attempt $attempt..." + if sentry-cli debug-files upload --include-sources --wait -p zed -o zed-dev \ + "target/${target_triple}/${target_dir}/zed.dwarf" \ + "target/${target_triple}/${target_dir}/remote_server.dwarf"; then + break + else + echo "Sentry upload failed on attempt $attempt" + if [ $attempt -eq 3 ]; then + echo "All sentry upload attempts failed" + exit 1 + fi + fi + done else echo "missing SENTRY_AUTH_TOKEN. skipping sentry upload." fi diff --git a/script/bundle-windows.ps1 b/script/bundle-windows.ps1 index a9f5eafcc670ad6d3f36eeee92c7e05fe80fb8af..48114a970ff272d52c9927c65788c65dc1a5c7e7 100644 --- a/script/bundle-windows.ps1 +++ b/script/bundle-windows.ps1 @@ -147,7 +147,20 @@ function UploadToSentry { return } Write-Output "Uploading zed debug symbols to sentry..." - sentry-cli debug-files upload --include-sources --wait -p zed -o zed-dev $CargoOutDir + for ($i = 1; $i -le 3; $i++) { + try { + sentry-cli debug-files upload --include-sources --wait -p zed -o zed-dev $CargoOutDir + break + } + catch { + Write-Output "Sentry upload attempt $i failed: $_" + if ($i -eq 3) { + Write-Output "All sentry upload attempts failed" + throw + } + Start-Sleep -Seconds 2 + } + } } function MakeAppx { From d07193cdf2b88dd0835342c2c69f707a890cbd21 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sat, 22 Nov 2025 00:33:32 +0100 Subject: [PATCH 0300/1030] git: Handle git pre-commit hooks separately (#43285) We now run git pre-commit hooks before we commit. This ensures we don't run into timeout issues with askpass delegate and report invalid error to the user. Closes #43157 Release Notes: - Fixed long running pre-commit hooks causing committing from Zed to fail. Co-authored-by: Cole Miller --- crates/collab/src/rpc.rs | 1 + crates/fs/src/fake_git_repo.rs | 10 ++++++- crates/git/src/git.rs | 25 ++++++++++++++++ crates/git/src/repository.rs | 29 ++++++++++++++++++- crates/project/src/git_store.rs | 51 +++++++++++++++++++++++++++++++-- crates/proto/proto/git.proto | 10 +++++++ crates/proto/proto/zed.proto | 4 ++- crates/proto/src/proto.rs | 3 ++ 8 files changed, 128 insertions(+), 5 deletions(-) diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index f73631bb19c80a463ed38b78031dd0fe4d452681..a3bd93db5fc177a60bb450b469335dc6b9e6ce3d 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -453,6 +453,7 @@ impl Server { .add_request_handler(forward_mutating_project_request::) .add_request_handler(forward_mutating_project_request::) .add_request_handler(forward_mutating_project_request::) + .add_request_handler(forward_mutating_project_request::) .add_request_handler(forward_mutating_project_request::) .add_request_handler(forward_read_only_project_request::) .add_request_handler(forward_read_only_project_request::) diff --git a/crates/fs/src/fake_git_repo.rs b/crates/fs/src/fake_git_repo.rs index 97cd13d185817453c369356bdc60cbc1517bf1e1..c9a41243aa641318026db208d78a64429cfeb1ab 100644 --- a/crates/fs/src/fake_git_repo.rs +++ b/crates/fs/src/fake_git_repo.rs @@ -3,7 +3,7 @@ use anyhow::{Context as _, Result, bail}; use collections::{HashMap, HashSet}; use futures::future::{self, BoxFuture, join_all}; use git::{ - Oid, + Oid, RunHook, blame::Blame, repository::{ AskPassDelegate, Branch, CommitDetails, CommitOptions, FetchOptions, GitRepository, @@ -532,6 +532,14 @@ impl GitRepository for FakeGitRepository { unimplemented!() } + fn run_hook( + &self, + _hook: RunHook, + _env: Arc>, + ) -> BoxFuture<'_, Result<()>> { + unimplemented!() + } + fn push( &self, _branch: String, diff --git a/crates/git/src/git.rs b/crates/git/src/git.rs index 50a1e1234ba3caeff729d37b6fa3022336b54e96..4dc2f0a8a93cec82da4df4d3b4431dbf6f4d3862 100644 --- a/crates/git/src/git.rs +++ b/crates/git/src/git.rs @@ -225,3 +225,28 @@ impl From for usize { u64::from_ne_bytes(u64_bytes) as usize } } + +#[repr(i32)] +#[derive(Copy, Clone, Debug)] +pub enum RunHook { + PreCommit, +} + +impl RunHook { + pub fn as_str(&self) -> &str { + match self { + Self::PreCommit => "pre-commit", + } + } + + pub fn to_proto(&self) -> i32 { + *self as i32 + } + + pub fn from_proto(value: i32) -> Option { + match value { + 0 => Some(Self::PreCommit), + _ => None, + } + } +} diff --git a/crates/git/src/repository.rs b/crates/git/src/repository.rs index 2c9189962492daa75dba86e9e2ebd247ad85254e..9beb3d838382d9267afdb081211647139f85b75e 100644 --- a/crates/git/src/repository.rs +++ b/crates/git/src/repository.rs @@ -1,7 +1,7 @@ use crate::commit::parse_git_diff_name_status; use crate::stash::GitStash; use crate::status::{DiffTreeType, GitStatus, StatusCode, TreeDiff}; -use crate::{Oid, SHORT_SHA_LENGTH}; +use crate::{Oid, RunHook, SHORT_SHA_LENGTH}; use anyhow::{Context as _, Result, anyhow, bail}; use collections::HashMap; use futures::future::BoxFuture; @@ -485,6 +485,12 @@ pub trait GitRepository: Send + Sync { env: Arc>, ) -> BoxFuture<'_, Result<()>>; + fn run_hook( + &self, + hook: RunHook, + env: Arc>, + ) -> BoxFuture<'_, Result<()>>; + fn commit( &self, message: SharedString, @@ -1643,6 +1649,7 @@ impl GitRepository for RealGitRepository { .args(["commit", "--quiet", "-m"]) .arg(&message.to_string()) .arg("--cleanup=strip") + .arg("--no-verify") .stdout(smol::process::Stdio::piped()) .stderr(smol::process::Stdio::piped()); @@ -2037,6 +2044,26 @@ impl GitRepository for RealGitRepository { }) .boxed() } + + fn run_hook( + &self, + hook: RunHook, + env: Arc>, + ) -> BoxFuture<'_, Result<()>> { + let working_directory = self.working_directory(); + let git_binary_path = self.any_git_binary_path.clone(); + let executor = self.executor.clone(); + self.executor + .spawn(async move { + let working_directory = working_directory?; + let git = GitBinary::new(git_binary_path, working_directory, executor) + .envs(HashMap::clone(&env)); + git.run(&["hook", "run", "--ignore-missing", hook.as_str()]) + .await?; + Ok(()) + }) + .boxed() + } } fn git_status_args(path_prefixes: &[RepoPath]) -> Vec { diff --git a/crates/project/src/git_store.rs b/crates/project/src/git_store.rs index 40ef0daa29390e229ab03eb840c39900163d4b6a..bde9261fa28b8ed0d6c6a79fd02b90177e52a98e 100644 --- a/crates/project/src/git_store.rs +++ b/crates/project/src/git_store.rs @@ -25,7 +25,7 @@ use futures::{ stream::FuturesOrdered, }; use git::{ - BuildPermalinkParams, GitHostingProviderRegistry, Oid, + BuildPermalinkParams, GitHostingProviderRegistry, Oid, RunHook, blame::Blame, parse_git_remote_url, repository::{ @@ -433,6 +433,7 @@ impl GitStore { client.add_entity_request_handler(Self::handle_stash_apply); client.add_entity_request_handler(Self::handle_stash_drop); client.add_entity_request_handler(Self::handle_commit); + client.add_entity_request_handler(Self::handle_run_hook); client.add_entity_request_handler(Self::handle_reset); client.add_entity_request_handler(Self::handle_show); client.add_entity_request_handler(Self::handle_load_commit_diff); @@ -1982,6 +1983,22 @@ impl GitStore { Ok(proto::Ack {}) } + async fn handle_run_hook( + this: Entity, + envelope: TypedEnvelope, + mut cx: AsyncApp, + ) -> Result { + let repository_id = RepositoryId::from_proto(envelope.payload.repository_id); + let repository_handle = Self::repository_for_request(&this, repository_id, &mut cx)?; + let hook = RunHook::from_proto(envelope.payload.hook).context("invalid hook")?; + repository_handle + .update(&mut cx, |repository_handle, cx| { + repository_handle.run_hook(hook, cx) + })? + .await??; + Ok(proto::Ack {}) + } + async fn handle_commit( this: Entity, envelope: TypedEnvelope, @@ -4262,19 +4279,49 @@ impl Repository { }) } + pub fn run_hook(&mut self, hook: RunHook, _cx: &mut App) -> oneshot::Receiver> { + let id = self.id; + self.send_job( + Some(format!("git hook {}", hook.as_str()).into()), + move |git_repo, _cx| async move { + match git_repo { + RepositoryState::Local { + backend, + environment, + } => backend.run_hook(hook, environment.clone()).await, + RepositoryState::Remote { project_id, client } => { + client + .request(proto::RunGitHook { + project_id: project_id.0, + repository_id: id.to_proto(), + hook: hook.to_proto(), + }) + .await?; + + Ok(()) + } + } + }, + ) + } + pub fn commit( &mut self, message: SharedString, name_and_email: Option<(SharedString, SharedString)>, options: CommitOptions, askpass: AskPassDelegate, - _cx: &mut App, + cx: &mut App, ) -> oneshot::Receiver> { let id = self.id; let askpass_delegates = self.askpass_delegates.clone(); let askpass_id = util::post_inc(&mut self.latest_askpass_id); + let rx = self.run_hook(RunHook::PreCommit, cx); + self.send_job(Some("git commit".into()), move |git_repo, _cx| async move { + rx.await??; + match git_repo { RepositoryState::Local { backend, diff --git a/crates/proto/proto/git.proto b/crates/proto/proto/git.proto index efbd7f616f9e75c4e0409f4dc73c67f9eb1836e0..07fab2065b98994547121f19bab8f0fda50b2a59 100644 --- a/crates/proto/proto/git.proto +++ b/crates/proto/proto/git.proto @@ -531,3 +531,13 @@ message GitCreateWorktree { string directory = 4; optional string commit = 5; } + +message RunGitHook { + enum GitHook { + PRE_COMMIT = 0; + } + + uint64 project_id = 1; + uint64 repository_id = 2; + GitHook hook = 3; +} diff --git a/crates/proto/proto/zed.proto b/crates/proto/proto/zed.proto index 6ecea916ca5143ecd75678cd2e21587087f67b51..7a2c86887fcd0ffa8f64e5362568b7bd0e12ec7b 100644 --- a/crates/proto/proto/zed.proto +++ b/crates/proto/proto/zed.proto @@ -437,7 +437,9 @@ message Envelope { OpenImageResponse open_image_response = 392; CreateImageForPeer create_image_for_peer = 393; - ExternalExtensionAgentsUpdated external_extension_agents_updated = 394; // current max + ExternalExtensionAgentsUpdated external_extension_agents_updated = 394; + + RunGitHook run_git_hook = 395; // current max } reserved 87 to 88; diff --git a/crates/proto/src/proto.rs b/crates/proto/src/proto.rs index fa6af5c3899da3519ce13d772bdc61fb78194d19..bcffe5ae01616c469bde2e730feff3e8e777e572 100644 --- a/crates/proto/src/proto.rs +++ b/crates/proto/src/proto.rs @@ -49,6 +49,7 @@ messages!( (ChannelMessageUpdate, Foreground), (CloseBuffer, Foreground), (Commit, Background), + (RunGitHook, Background), (CopyProjectEntry, Foreground), (CreateBufferForPeer, Foreground), (CreateImageForPeer, Foreground), @@ -349,6 +350,7 @@ request_messages!( (Call, Ack), (CancelCall, Ack), (Commit, Ack), + (RunGitHook, Ack), (CopyProjectEntry, ProjectEntryResponse), (CreateChannel, CreateChannelResponse), (CreateProjectEntry, ProjectEntryResponse), @@ -547,6 +549,7 @@ entity_messages!( BufferSaved, CloseBuffer, Commit, + RunGitHook, GetColorPresentation, CopyProjectEntry, CreateBufferForPeer, From de58a496efd3379dd787326c678b931ffff4213f Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 21 Nov 2025 16:10:26 -0800 Subject: [PATCH 0301/1030] Fix a bug where Anthropic completions would not work on nightly (#43287) Follow up to: https://github.com/zed-industries/zed/pull/43185/files Release Notes: - N/A Co-authored-by: Michael --- crates/anthropic/src/anthropic.rs | 64 +++++++------------------------ 1 file changed, 14 insertions(+), 50 deletions(-) diff --git a/crates/anthropic/src/anthropic.rs b/crates/anthropic/src/anthropic.rs index fd665e07dd7515198ee6d65cbb2b0ee69e75dce5..d4f89808379b0bf10c8f3eaa22484b61fd8c26f1 100644 --- a/crates/anthropic/src/anthropic.rs +++ b/crates/anthropic/src/anthropic.rs @@ -393,7 +393,7 @@ impl Model { } } - pub fn beta_headers(&self) -> String { + pub fn beta_headers(&self) -> Option { let mut headers = vec![]; match self { @@ -415,7 +415,11 @@ impl Model { _ => {} } - headers.join(",") + if headers.is_empty() { + None + } else { + Some(headers.join(",")) + } } pub fn tool_model_id(&self) -> &str { @@ -431,56 +435,12 @@ impl Model { } } -pub async fn complete( - client: &dyn HttpClient, - api_url: &str, - api_key: &str, - request: Request, - beta_headers: String, -) -> Result { - let uri = format!("{api_url}/v1/messages"); - let request_builder = HttpRequest::builder() - .method(Method::POST) - .uri(uri) - .header("Anthropic-Version", "2023-06-01") - .header("Anthropic-Beta", beta_headers) - .header("X-Api-Key", api_key.trim()) - .header("Content-Type", "application/json"); - - let serialized_request = - serde_json::to_string(&request).map_err(AnthropicError::SerializeRequest)?; - let request = request_builder - .body(AsyncBody::from(serialized_request)) - .map_err(AnthropicError::BuildRequestBody)?; - - let mut response = client - .send(request) - .await - .map_err(AnthropicError::HttpSend)?; - let status_code = response.status(); - let mut body = String::new(); - response - .body_mut() - .read_to_string(&mut body) - .await - .map_err(AnthropicError::ReadResponse)?; - - if status_code.is_success() { - Ok(serde_json::from_str(&body).map_err(AnthropicError::DeserializeResponse)?) - } else { - Err(AnthropicError::HttpResponseError { - status_code, - message: body, - }) - } -} - pub async fn stream_completion( client: &dyn HttpClient, api_url: &str, api_key: &str, request: Request, - beta_headers: String, + beta_headers: Option, ) -> Result>, AnthropicError> { stream_completion_with_rate_limit_info(client, api_url, api_key, request, beta_headers) .await @@ -578,7 +538,7 @@ pub async fn stream_completion_with_rate_limit_info( api_url: &str, api_key: &str, request: Request, - beta_headers: String, + beta_headers: Option, ) -> Result< ( BoxStream<'static, Result>, @@ -592,13 +552,17 @@ pub async fn stream_completion_with_rate_limit_info( }; let uri = format!("{api_url}/v1/messages"); - let request_builder = HttpRequest::builder() + let mut request_builder = HttpRequest::builder() .method(Method::POST) .uri(uri) .header("Anthropic-Version", "2023-06-01") - .header("Anthropic-Beta", beta_headers) .header("X-Api-Key", api_key.trim()) .header("Content-Type", "application/json"); + + if let Some(beta_headers) = beta_headers { + request_builder = request_builder.header("Anthropic-Beta", beta_headers); + } + let serialized_request = serde_json::to_string(&request).map_err(AnthropicError::SerializeRequest)?; let request = request_builder From 8e2c0c3a0c856084687887f895596f8e6d366414 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Sat, 22 Nov 2025 01:46:53 +0100 Subject: [PATCH 0302/1030] askpass: Fix double command ampersand in powershell script (#43289) Fixes https://github.com/zed-industries/zed/issues/42618 / https://github.com/zed-industries/zed/issues/43109 Release Notes: - N/A --- crates/askpass/src/askpass.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/askpass/src/askpass.rs b/crates/askpass/src/askpass.rs index 0974409477d452958df13893e316845a919723c5..25db3144ccb10b9cac1b8d8555ea9924e193468c 100644 --- a/crates/askpass/src/askpass.rs +++ b/crates/askpass/src/askpass.rs @@ -374,7 +374,7 @@ fn generate_askpass_script( Ok(format!( r#" $ErrorActionPreference = 'Stop'; - ($args -join [char]0) | & {askpass_program} --askpass={askpass_socket} 2> $null + ($args -join [char]0) | {askpass_program} --askpass={askpass_socket} 2> $null "#, )) } From 4376eb8217d7c37d2d254ab3bbc0c7af4a7b0993 Mon Sep 17 00:00:00 2001 From: Cole Miller Date: Fri, 21 Nov 2025 20:45:01 -0500 Subject: [PATCH 0303/1030] Disable flaky `test_git_status_postprocessing` test (#43293) Release Notes: - N/A --- crates/project/src/project_tests.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/project/src/project_tests.rs b/crates/project/src/project_tests.rs index d4a3068e856e1eaf0aff19754d81141956ca8fcf..cf13879471969f2fd459c473e72bd7e115589799 100644 --- a/crates/project/src/project_tests.rs +++ b/crates/project/src/project_tests.rs @@ -8453,6 +8453,7 @@ async fn test_git_repository_status(cx: &mut gpui::TestAppContext) { } #[gpui::test] +#[ignore] async fn test_git_status_postprocessing(cx: &mut gpui::TestAppContext) { init_test(cx); cx.executor().allow_parking(); From bfe141ea79aa4984028934067ba75c48d99136ae Mon Sep 17 00:00:00 2001 From: Julia Ryan Date: Fri, 21 Nov 2025 18:23:52 -0800 Subject: [PATCH 0304/1030] Fix wsl path parsing (#43295) Closes #40286 Release Notes: - N/A --------- Co-authored-by: John Tur --- crates/cli/src/main.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/crates/cli/src/main.rs b/crates/cli/src/main.rs index a16f5a3bab9849ee93abac4e2eccb602698b65de..335e75ac4f5e43e63159fb26018849d8e0a22ced 100644 --- a/crates/cli/src/main.rs +++ b/crates/cli/src/main.rs @@ -178,14 +178,13 @@ fn parse_path_in_wsl(source: &str, wsl: &str) -> Result { .arg("--distribution") .arg(distro_name) .arg("--exec") - .arg("wslpath") - .arg("-m") + .arg("realpath") + .arg("-s") .arg(&source.path) .output()?; let result = String::from_utf8_lossy(&output.stdout); - let prefix = format!("//wsl.localhost/{}", distro_name); - source.path = Path::new(result.trim().strip_prefix(&prefix).unwrap_or(&result)).to_owned(); + source.path = Path::new(result.trim()).to_owned(); Ok(source.to_string(|path| path.to_string_lossy().into_owned())) } From ab0527b3908bb4f941be849523643f0371839e50 Mon Sep 17 00:00:00 2001 From: Marco Mihai Condrache <52580954+marcocondrache@users.noreply.github.com> Date: Sat, 22 Nov 2025 11:21:01 +0100 Subject: [PATCH 0305/1030] gpui: Fix documentation of window methods (#43315) Closes #43313 Release Notes: - N/A Signed-off-by: Marco Mihai Condrache <52580954+marcocondrache@users.noreply.github.com> --- crates/gpui/src/window.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/gpui/src/window.rs b/crates/gpui/src/window.rs index 215a9423482925ee093a93af896e6cd00872aba6..1b317e1ee30daa8e9dfa71a0efe0eba526f9cbbf 100644 --- a/crates/gpui/src/window.rs +++ b/crates/gpui/src/window.rs @@ -2434,7 +2434,7 @@ impl Window { } /// Updates the cursor style at the platform level. This method should only be called - /// during the prepaint phase of element drawing. + /// during the paint phase of element drawing. pub fn set_cursor_style(&mut self, style: CursorStyle, hitbox: &Hitbox) { self.invalidator.debug_assert_paint(); self.next_frame.cursor_styles.push(CursorStyleRequest { @@ -2445,7 +2445,7 @@ impl Window { /// Updates the cursor style for the entire window at the platform level. A cursor /// style using this method will have precedence over any cursor style set using - /// `set_cursor_style`. This method should only be called during the prepaint + /// `set_cursor_style`. This method should only be called during the paint /// phase of element drawing. pub fn set_window_cursor_style(&mut self, style: CursorStyle) { self.invalidator.debug_assert_paint(); From 10eba0bd5f79286fa27eb105b38992ba838fd66f Mon Sep 17 00:00:00 2001 From: Liffindra Angga Zaaldian <3760093+findrakecil@users.noreply.github.com> Date: Sat, 22 Nov 2025 17:21:56 +0700 Subject: [PATCH 0306/1030] Update JavaScript default language server (#43316) As stated in [TypeScript Language Server documentation](https://zed.dev/docs/languages/typescript#language-servers), JavaScript uses `vtsls` as the default language server. Release Notes: - N/A --- docs/src/languages/javascript.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/src/languages/javascript.md b/docs/src/languages/javascript.md index f8ba2f18e7e8b18479ecca00f0f7771751cb7d09..1b87dac5553f0dc44153d4706be1dd4bd2e341d5 100644 --- a/docs/src/languages/javascript.md +++ b/docs/src/languages/javascript.md @@ -3,7 +3,8 @@ JavaScript support is available natively in Zed. - Tree-sitter: [tree-sitter/tree-sitter-javascript](https://github.com/tree-sitter/tree-sitter-javascript) -- Language Server: [typescript-language-server/typescript-language-server](https://github.com/typescript-language-server/typescript-language-server) +- Language Server: [yioneko/vtsls](https://github.com/yioneko/vtsls) +- Alternate Language Server: [typescript-language-server/typescript-language-server](https://github.com/typescript-language-server/typescript-language-server) - Debug Adapter: [vscode-js-debug](https://github.com/microsoft/vscode-js-debug) ## Code formatting From 5b23a4ad7b1212ccf5c8247e9493859733fa1ab9 Mon Sep 17 00:00:00 2001 From: warrenjokinen <110791849+warrenjokinen@users.noreply.github.com> Date: Sat, 22 Nov 2025 13:11:41 -0700 Subject: [PATCH 0307/1030] docs: Fix minor typo in docker.md (#43334) Updated wording (added a missing word) for reporting issues in Dockerfile extension documentation. Closes #ISSUE N/A Release Notes: - N/A --- docs/src/languages/docker.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/languages/docker.md b/docs/src/languages/docker.md index e09613fa9abc427b279e1f5af8db0c8d0a102076..f003a575d2b0cc91da1d0bc9636a22619b8d2902 100644 --- a/docs/src/languages/docker.md +++ b/docs/src/languages/docker.md @@ -10,7 +10,7 @@ Docker `compose.yaml` language support in Zed is provided by the [Docker Compose ## Dockerfile -`Dockerfile` language support in Zed is provided by the [Dockerfile extension](https://github.com/d1y/dockerfile.zed). Please issues to: [https://github.com/d1y/dockerfile.zed/issues](https://github.com/d1y/dockerfile.zed/issues). +`Dockerfile` language support in Zed is provided by the [Dockerfile extension](https://github.com/d1y/dockerfile.zed). Please report issues to: [https://github.com/d1y/dockerfile.zed/issues](https://github.com/d1y/dockerfile.zed/issues). - Tree-sitter: [camdencheek/tree-sitter-dockerfile](https://github.com/camdencheek/tree-sitter-dockerfile) - Language Server: [rcjsuen/dockerfile-language-server](https://github.com/rcjsuen/dockerfile-language-server) From 7a5851e1558d7e0f4a064a51438db966b297a27f Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Sat, 22 Nov 2025 22:26:26 -0300 Subject: [PATCH 0308/1030] ui: Remove `CheckboxWithLabel` and improve `Switch` and `Checkbox` (#43343) This PR finally removes the `CheckboxWithLabel` component, which is not fully needed given the `Checkbox` can take a `label` method. Then, took advantage of the opportunity to add more methods with regards to label customization (position, size, and color) in both the `Checkbox` and `Switch` components. Release Notes: - N/A --- .../src/collab_panel/channel_modal.rs | 24 +- crates/debugger_ui/src/new_process_modal.rs | 23 +- crates/ui/src/components/toggle.rs | 288 +++++++----------- crates/workspace/src/theme_preview.rs | 6 +- 4 files changed, 144 insertions(+), 197 deletions(-) diff --git a/crates/collab_ui/src/collab_panel/channel_modal.rs b/crates/collab_ui/src/collab_panel/channel_modal.rs index e558835dbaf0e34e2efa1b4f64fd8f6cb96016c5..9d882562cab710f562145087e5c38474fda4808b 100644 --- a/crates/collab_ui/src/collab_panel/channel_modal.rs +++ b/crates/collab_ui/src/collab_panel/channel_modal.rs @@ -10,7 +10,7 @@ use gpui::{ }; use picker::{Picker, PickerDelegate}; use std::sync::Arc; -use ui::{Avatar, CheckboxWithLabel, ContextMenu, ListItem, ListItemSpacing, prelude::*}; +use ui::{Avatar, Checkbox, ContextMenu, ListItem, ListItemSpacing, prelude::*}; use util::TryFutureExt; use workspace::{ModalView, notifications::DetachAndPromptErr}; @@ -165,16 +165,18 @@ impl Render for ChannelModal { .h(rems_from_px(22.)) .justify_between() .line_height(rems(1.25)) - .child(CheckboxWithLabel::new( - "is-public", - Label::new("Public").size(LabelSize::Small), - if visibility == ChannelVisibility::Public { - ui::ToggleState::Selected - } else { - ui::ToggleState::Unselected - }, - cx.listener(Self::set_channel_visibility), - )) + .child( + Checkbox::new( + "is-public", + if visibility == ChannelVisibility::Public { + ui::ToggleState::Selected + } else { + ui::ToggleState::Unselected + }, + ) + .label("Public") + .on_click(cx.listener(Self::set_channel_visibility)), + ) .children( Some( Button::new("copy-link", "Copy Link") diff --git a/crates/debugger_ui/src/new_process_modal.rs b/crates/debugger_ui/src/new_process_modal.rs index c343110b47527adc0f8d4e3e097a5f769b80682c..4460284fadc8f43c86400d1f47d3e9d68c42b39b 100644 --- a/crates/debugger_ui/src/new_process_modal.rs +++ b/crates/debugger_ui/src/new_process_modal.rs @@ -25,9 +25,9 @@ use settings::Settings; use task::{DebugScenario, RevealTarget, VariableName, ZedDebugConfig}; use theme::ThemeSettings; use ui::{ - CheckboxWithLabel, ContextMenu, DropdownMenu, FluentBuilder, IconWithIndicator, Indicator, - KeyBinding, ListItem, ListItemSpacing, ToggleButtonGroup, ToggleButtonSimple, ToggleState, - Tooltip, prelude::*, + ContextMenu, DropdownMenu, FluentBuilder, IconWithIndicator, Indicator, KeyBinding, ListItem, + ListItemSpacing, Switch, SwitchLabelPosition, ToggleButtonGroup, ToggleButtonSimple, + ToggleState, Tooltip, prelude::*, }; use util::{ResultExt, debug_panic, rel_path::RelPath, shell::ShellKind}; use workspace::{ModalView, Workspace, notifications::DetachAndPromptErr, pane}; @@ -910,13 +910,12 @@ impl ConfigureMode { .child(render_editor(&self.cwd, window, cx)), ) .child( - CheckboxWithLabel::new( - "debugger-stop-on-entry", - Label::new("Stop on Entry") - .size(LabelSize::Small) - .color(Color::Muted), - self.stop_on_entry, - { + Switch::new("debugger-stop-on-entry", self.stop_on_entry) + .label("Stop on Entry") + .label_position(SwitchLabelPosition::Start) + .label_size(LabelSize::Default) + .color(ui::SwitchColor::Accent) + .on_click({ let this = cx.weak_entity(); move |state, _, cx| { this.update(cx, |this, _| { @@ -924,9 +923,7 @@ impl ConfigureMode { }) .ok(); } - }, - ) - .checkbox_position(ui::IconPosition::End), + }), ) } } diff --git a/crates/ui/src/components/toggle.rs b/crates/ui/src/components/toggle.rs index ab66b71996d6c7b64d0d3867ab73bd9727816316..a41dce6c61de1cabdbccee1478afe143feee4987 100644 --- a/crates/ui/src/components/toggle.rs +++ b/crates/ui/src/components/toggle.rs @@ -1,7 +1,8 @@ use gpui::{ - AnyElement, AnyView, ClickEvent, ElementId, Hsla, IntoElement, Styled, Window, div, hsla, - prelude::*, + AnyElement, AnyView, ClickEvent, ElementId, Hsla, IntoElement, KeybindingKeystroke, Keystroke, + Styled, Window, div, hsla, prelude::*, }; +use settings::KeybindSource; use std::{rc::Rc, sync::Arc}; use crate::utils::is_light; @@ -50,6 +51,8 @@ pub struct Checkbox { style: ToggleStyle, tooltip: Option AnyView>>, label: Option, + label_size: LabelSize, + label_color: Color, } impl Checkbox { @@ -64,6 +67,8 @@ impl Checkbox { style: ToggleStyle::default(), tooltip: None, label: None, + label_size: LabelSize::Default, + label_color: Color::Muted, placeholder: false, } } @@ -128,6 +133,16 @@ impl Checkbox { self.label = Some(label.into()); self } + + pub fn label_size(mut self, size: LabelSize) -> Self { + self.label_size = size; + self + } + + pub fn label_color(mut self, color: Color) -> Self { + self.label_color = color; + self + } } impl Checkbox { @@ -155,7 +170,6 @@ impl Checkbox { } } - /// container size pub fn container_size() -> Pixels { px(20.0) } @@ -169,6 +183,7 @@ impl RenderOnce for Checkbox { } else { Color::Selected }; + let icon = match self.toggle_state { ToggleState::Selected => { if self.placeholder { @@ -232,6 +247,7 @@ impl RenderOnce for Checkbox { h_flex() .id(self.id) + .cursor_pointer() .gap(DynamicSpacing::Base06.rems(cx)) .child(checkbox) .when_some( @@ -242,110 +258,15 @@ impl RenderOnce for Checkbox { }) }, ) - // TODO: Allow label size to be different from default. - // TODO: Allow label color to be different from muted. .when_some(self.label, |this, label| { - this.child(Label::new(label).color(Color::Muted)) - }) - .when_some(self.tooltip, |this, tooltip| { - this.tooltip(move |window, cx| tooltip(window, cx)) - }) - } -} - -/// A [`Checkbox`] that has a [`Label`]. -#[derive(IntoElement, RegisterComponent)] -pub struct CheckboxWithLabel { - id: ElementId, - label: Label, - checked: ToggleState, - on_click: Arc, - filled: bool, - style: ToggleStyle, - checkbox_position: IconPosition, -} - -// TODO: Remove `CheckboxWithLabel` now that `label` is a method of `Checkbox`. -impl CheckboxWithLabel { - /// Creates a checkbox with an attached label. - pub fn new( - id: impl Into, - label: Label, - checked: ToggleState, - on_click: impl Fn(&ToggleState, &mut Window, &mut App) + 'static, - ) -> Self { - Self { - id: id.into(), - label, - checked, - on_click: Arc::new(on_click), - filled: false, - style: ToggleStyle::default(), - checkbox_position: IconPosition::Start, - } - } - - /// Sets the style of the checkbox using the specified [`ToggleStyle`]. - pub fn style(mut self, style: ToggleStyle) -> Self { - self.style = style; - self - } - - /// Match the style of the checkbox to the current elevation using [`ToggleStyle::ElevationBased`]. - pub fn elevation(mut self, elevation: ElevationIndex) -> Self { - self.style = ToggleStyle::ElevationBased(elevation); - self - } - - /// Sets the `fill` setting of the checkbox, indicating whether it should be filled. - pub fn fill(mut self) -> Self { - self.filled = true; - self - } - - pub fn checkbox_position(mut self, position: IconPosition) -> Self { - self.checkbox_position = position; - self - } -} - -impl RenderOnce for CheckboxWithLabel { - fn render(self, _window: &mut Window, cx: &mut App) -> impl IntoElement { - h_flex() - .gap(DynamicSpacing::Base08.rems(cx)) - .when(self.checkbox_position == IconPosition::Start, |this| { this.child( - Checkbox::new(self.id.clone(), self.checked) - .style(self.style.clone()) - .when(self.filled, Checkbox::fill) - .on_click({ - let on_click = self.on_click.clone(); - move |checked, window, cx| { - (on_click)(checked, window, cx); - } - }), + Label::new(label) + .color(self.label_color) + .size(self.label_size), ) }) - .child( - div() - .id(SharedString::from(format!("{}-label", self.id))) - .on_click({ - let on_click = self.on_click.clone(); - move |_event, window, cx| { - (on_click)(&self.checked.inverse(), window, cx); - } - }) - .child(self.label), - ) - .when(self.checkbox_position == IconPosition::End, |this| { - this.child( - Checkbox::new(self.id.clone(), self.checked) - .style(self.style) - .when(self.filled, Checkbox::fill) - .on_click(move |checked, window, cx| { - (self.on_click)(checked, window, cx); - }), - ) + .when_some(self.tooltip, |this, tooltip| { + this.tooltip(move |window, cx| tooltip(window, cx)) }) } } @@ -412,6 +333,14 @@ impl From for Color { } } +/// Defines the color for a switch component. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Default)] +pub enum SwitchLabelPosition { + Start, + #[default] + End, +} + /// # Switch /// /// Switches are used to represent opposite states, such as enabled or disabled. @@ -422,6 +351,9 @@ pub struct Switch { disabled: bool, on_click: Option>, label: Option, + label_position: Option, + label_size: LabelSize, + full_width: bool, key_binding: Option, color: SwitchColor, tab_index: Option, @@ -436,6 +368,9 @@ impl Switch { disabled: false, on_click: None, label: None, + label_position: None, + label_size: LabelSize::Small, + full_width: false, key_binding: None, color: SwitchColor::default(), tab_index: None, @@ -469,6 +404,24 @@ impl Switch { self } + pub fn label_position( + mut self, + label_position: impl Into>, + ) -> Self { + self.label_position = label_position.into(); + self + } + + pub fn label_size(mut self, size: LabelSize) -> Self { + self.label_size = size; + self + } + + pub fn full_width(mut self, full_width: bool) -> Self { + self.full_width = full_width; + self + } + /// Display the keybinding that triggers the switch action. pub fn key_binding(mut self, key_binding: impl Into>) -> Self { self.key_binding = key_binding.into(); @@ -503,6 +456,7 @@ impl RenderOnce for Switch { }; let group_id = format!("switch_group_{:?}", self.id); + let label = self.label; let switch = div() .id((self.id.clone(), "switch")) @@ -555,9 +509,27 @@ impl RenderOnce for Switch { h_flex() .id(self.id) - .gap(DynamicSpacing::Base06.rems(cx)) .cursor_pointer() + .gap(DynamicSpacing::Base06.rems(cx)) + .when(self.full_width, |this| this.w_full().justify_between()) + .when( + self.label_position == Some(SwitchLabelPosition::Start), + |this| { + this.when_some(label.clone(), |this, label| { + this.child(Label::new(label).size(self.label_size)) + }) + }, + ) .child(switch) + .when( + self.label_position == Some(SwitchLabelPosition::End), + |this| { + this.when_some(label, |this, label| { + this.child(Label::new(label).size(self.label_size)) + }) + }, + ) + .children(self.key_binding) .when_some( self.on_click.filter(|_| !self.disabled), |this, on_click| { @@ -566,10 +538,6 @@ impl RenderOnce for Switch { }) }, ) - .when_some(self.label, |this, label| { - this.child(Label::new(label).size(LabelSize::Small)) - }) - .children(self.key_binding) } } @@ -1070,75 +1038,55 @@ impl Component for Switch { "With Label", vec![ single_example( - "Label", - Switch::new("switch_with_label", ToggleState::Selected) + "Start Label", + Switch::new("switch_with_label_start", ToggleState::Selected) + .label("Always save on quit") + .label_position(SwitchLabelPosition::Start) + .into_any_element(), + ), + single_example( + "End Label", + Switch::new("switch_with_label_end", ToggleState::Selected) .label("Always save on quit") + .label_position(SwitchLabelPosition::End) + .into_any_element(), + ), + single_example( + "Default Size Label", + Switch::new( + "switch_with_label_default_size", + ToggleState::Selected, + ) + .label("Always save on quit") + .label_size(LabelSize::Default) + .into_any_element(), + ), + single_example( + "Small Size Label", + Switch::new("switch_with_label_small_size", ToggleState::Selected) + .label("Always save on quit") + .label_size(LabelSize::Small) .into_any_element(), ), - // TODO: Where did theme_preview_keybinding go? - // single_example( - // "Keybinding", - // Switch::new("switch_with_keybinding", ToggleState::Selected) - // .key_binding(theme_preview_keybinding("cmd-shift-e")) - // .into_any_element(), - // ), ], ), + example_group_with_title( + "With Keybinding", + vec![single_example( + "Keybinding", + Switch::new("switch_with_keybinding", ToggleState::Selected) + .key_binding(Some(KeyBinding::from_keystrokes( + vec![KeybindingKeystroke::from_keystroke( + Keystroke::parse("cmd-s").unwrap(), + )] + .into(), + KeybindSource::Base, + ))) + .into_any_element(), + )], + ), ]) .into_any_element(), ) } } - -impl Component for CheckboxWithLabel { - fn scope() -> ComponentScope { - ComponentScope::Input - } - - fn description() -> Option<&'static str> { - Some("A checkbox component with an attached label") - } - - fn preview(_window: &mut Window, _cx: &mut App) -> Option { - Some( - v_flex() - .gap_6() - .children(vec![example_group_with_title( - "States", - vec![ - single_example( - "Unselected", - CheckboxWithLabel::new( - "checkbox_with_label_unselected", - Label::new("Always save on quit"), - ToggleState::Unselected, - |_, _, _| {}, - ) - .into_any_element(), - ), - single_example( - "Indeterminate", - CheckboxWithLabel::new( - "checkbox_with_label_indeterminate", - Label::new("Always save on quit"), - ToggleState::Indeterminate, - |_, _, _| {}, - ) - .into_any_element(), - ), - single_example( - "Selected", - CheckboxWithLabel::new( - "checkbox_with_label_selected", - Label::new("Always save on quit"), - ToggleState::Selected, - |_, _, _| {}, - ) - .into_any_element(), - ), - ], - )]) - .into_any_element(), - ) - } -} diff --git a/crates/workspace/src/theme_preview.rs b/crates/workspace/src/theme_preview.rs index 94a280b4da1283178201898bd3e8c2c71e5f0b1f..f978da706b7476d04bf656ed63faf5bd38b83d20 100644 --- a/crates/workspace/src/theme_preview.rs +++ b/crates/workspace/src/theme_preview.rs @@ -6,9 +6,9 @@ use strum::IntoEnumIterator; use theme::all_theme_colors; use ui::{ AudioStatus, Avatar, AvatarAudioStatusIndicator, AvatarAvailabilityIndicator, ButtonLike, - Checkbox, CheckboxWithLabel, CollaboratorAvailability, ContentGroup, DecoratedIcon, - ElevationIndex, Facepile, IconDecoration, Indicator, KeybindingHint, Switch, TintColor, - Tooltip, prelude::*, utils::calculate_contrast_ratio, + Checkbox, CollaboratorAvailability, ContentGroup, DecoratedIcon, ElevationIndex, Facepile, + IconDecoration, Indicator, KeybindingHint, Switch, TintColor, Tooltip, prelude::*, + utils::calculate_contrast_ratio, }; use crate::{Item, Workspace}; From d9498b4b555b86eb7e6b7b47ec6b5549561d7367 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Sat, 22 Nov 2025 23:28:34 -0300 Subject: [PATCH 0309/1030] debugger_ui: Improve some elements of the UI (#43344) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - In the launch tab of the new session mode, I've switched it to use the `InputField` component instead given that had all that we needed already. Allows for removing a good chunk of editor-related code - Also in the launch tab, added support for keyboard navigation between all of the elements there (dropdown, inputs, and switch component) - Added some simple an empty state treatment for the breakpoint column when there are none set https://github.com/user-attachments/assets/a441aa8a-360b-4e38-839f-786315a8a235 Release Notes: - debugger: Made the input elements within the launch tab in the new session modal keyboard navigable˙. --- Cargo.lock | 1 + crates/debugger_ui/Cargo.toml | 1 + crates/debugger_ui/src/debugger_panel.rs | 33 +++-- crates/debugger_ui/src/new_process_modal.rs | 133 +++++++----------- .../src/session/running/breakpoint_list.rs | 1 - crates/ui_input/src/input_field.rs | 12 +- 6 files changed, 88 insertions(+), 93 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bfce6ab287c81852f558ea064097443c1131d9a7..bea1424b704eb9e3bc07ddfa2e7b6de817687a72 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4770,6 +4770,7 @@ dependencies = [ "tree-sitter-go", "tree-sitter-json", "ui", + "ui_input", "unindent", "util", "workspace", diff --git a/crates/debugger_ui/Cargo.toml b/crates/debugger_ui/Cargo.toml index c1a0657c0ed93508acb330a98dc6d1c1ee91c570..325bcc300ae637ab46c36b7a3e7875e197f7d3d2 100644 --- a/crates/debugger_ui/Cargo.toml +++ b/crates/debugger_ui/Cargo.toml @@ -70,6 +70,7 @@ theme.workspace = true tree-sitter-json.workspace = true tree-sitter.workspace = true ui.workspace = true +ui_input.workspace = true unindent = { workspace = true, optional = true } util.workspace = true workspace.workspace = true diff --git a/crates/debugger_ui/src/debugger_panel.rs b/crates/debugger_ui/src/debugger_panel.rs index f2f8b5be32c696c61a50c71090f130fcf34ef271..0a5dc744cde6ba053a2c5a5100538100a7d6a49b 100644 --- a/crates/debugger_ui/src/debugger_panel.rs +++ b/crates/debugger_ui/src/debugger_panel.rs @@ -1692,7 +1692,7 @@ impl Render for DebugPanel { .child( Button::new("spawn-new-session-empty-state", "New Session") .icon(IconName::Plus) - .icon_size(IconSize::XSmall) + .icon_size(IconSize::Small) .icon_color(Color::Muted) .icon_position(IconPosition::Start) .on_click(|_, window, cx| { @@ -1702,8 +1702,7 @@ impl Render for DebugPanel { .child( Button::new("edit-debug-settings", "Edit debug.json") .icon(IconName::Code) - .icon_size(IconSize::XSmall) - .color(Color::Muted) + .icon_size(IconSize::Small) .icon_color(Color::Muted) .icon_position(IconPosition::Start) .on_click(|_, window, cx| { @@ -1716,8 +1715,7 @@ impl Render for DebugPanel { .child( Button::new("open-debugger-docs", "Debugger Docs") .icon(IconName::Book) - .color(Color::Muted) - .icon_size(IconSize::XSmall) + .icon_size(IconSize::Small) .icon_color(Color::Muted) .icon_position(IconPosition::Start) .on_click(|_, _, cx| cx.open_url("https://zed.dev/docs/debugger")), @@ -1728,8 +1726,7 @@ impl Render for DebugPanel { "Debugger Extensions", ) .icon(IconName::Blocks) - .color(Color::Muted) - .icon_size(IconSize::XSmall) + .icon_size(IconSize::Small) .icon_color(Color::Muted) .icon_position(IconPosition::Start) .on_click(|_, window, cx| { @@ -1746,6 +1743,15 @@ impl Render for DebugPanel { }), ); + let has_breakpoints = self + .project + .read(cx) + .breakpoint_store() + .read(cx) + .all_source_breakpoints(cx) + .values() + .any(|breakpoints| !breakpoints.is_empty()); + let breakpoint_list = v_flex() .group("base-breakpoint-list") .when_else( @@ -1769,7 +1775,18 @@ impl Render for DebugPanel { ), ), ) - .child(self.breakpoint_list.clone()); + .when(has_breakpoints, |this| { + this.child(self.breakpoint_list.clone()) + }) + .when(!has_breakpoints, |this| { + this.child( + v_flex().size_full().items_center().justify_center().child( + Label::new("No Breakpoints Set") + .size(LabelSize::Small) + .color(Color::Muted), + ), + ) + }); this.child( v_flex() diff --git a/crates/debugger_ui/src/new_process_modal.rs b/crates/debugger_ui/src/new_process_modal.rs index 4460284fadc8f43c86400d1f47d3e9d68c42b39b..40187cef9cc55cb4192a3cea773f42dca15a2571 100644 --- a/crates/debugger_ui/src/new_process_modal.rs +++ b/crates/debugger_ui/src/new_process_modal.rs @@ -12,23 +12,22 @@ use tasks_ui::{TaskOverrides, TasksModal}; use dap::{ DapRegistry, DebugRequest, TelemetrySpawnLocation, adapters::DebugAdapterName, send_telemetry, }; -use editor::{Editor, EditorElement, EditorStyle}; +use editor::Editor; use fuzzy::{StringMatch, StringMatchCandidate}; use gpui::{ Action, App, AppContext, DismissEvent, Entity, EventEmitter, FocusHandle, Focusable, - KeyContext, Render, Subscription, Task, TextStyle, WeakEntity, + KeyContext, Render, Subscription, Task, WeakEntity, }; use itertools::Itertools as _; use picker::{Picker, PickerDelegate, highlighted_match_with_paths::HighlightedMatch}; use project::{DebugScenarioContext, Project, TaskContexts, TaskSourceKind, task_store::TaskStore}; -use settings::Settings; use task::{DebugScenario, RevealTarget, VariableName, ZedDebugConfig}; -use theme::ThemeSettings; use ui::{ ContextMenu, DropdownMenu, FluentBuilder, IconWithIndicator, Indicator, KeyBinding, ListItem, ListItemSpacing, Switch, SwitchLabelPosition, ToggleButtonGroup, ToggleButtonSimple, ToggleState, Tooltip, prelude::*, }; +use ui_input::InputField; use util::{ResultExt, debug_panic, rel_path::RelPath, shell::ShellKind}; use workspace::{ModalView, Workspace, notifications::DetachAndPromptErr, pane}; @@ -448,7 +447,7 @@ impl NewProcessModal { &mut self, window: &mut Window, cx: &mut Context, - ) -> ui::DropdownMenu { + ) -> DropdownMenu { let workspace = self.workspace.clone(); let weak = cx.weak_entity(); let active_buffer = self.task_contexts(cx).and_then(|tc| { @@ -508,6 +507,13 @@ impl NewProcessModal { menu }), ) + .style(ui::DropdownStyle::Outlined) + .tab_index(0) + .attach(gpui::Corner::BottomLeft) + .offset(gpui::Point { + x: px(0.0), + y: px(2.0), + }) } } @@ -540,44 +546,6 @@ impl Focusable for NewProcessMode { } } -fn render_editor(editor: &Entity, window: &mut Window, cx: &App) -> impl IntoElement { - let settings = ThemeSettings::get_global(cx); - let theme = cx.theme(); - - let text_style = TextStyle { - color: cx.theme().colors().text, - font_family: settings.buffer_font.family.clone(), - font_features: settings.buffer_font.features.clone(), - font_size: settings.buffer_font_size(cx).into(), - font_weight: settings.buffer_font.weight, - line_height: relative(settings.buffer_line_height.value()), - background_color: Some(theme.colors().editor_background), - ..Default::default() - }; - - let element = EditorElement::new( - editor, - EditorStyle { - background: theme.colors().editor_background, - local_player: theme.players().local(), - text: text_style, - ..Default::default() - }, - ); - - div() - .rounded_md() - .p_1() - .border_1() - .border_color(theme.colors().border_variant) - .when( - editor.focus_handle(cx).contains_focused(window, cx), - |this| this.border_color(theme.colors().border_focused), - ) - .child(element) - .bg(theme.colors().editor_background) -} - impl Render for NewProcessModal { fn render( &mut self, @@ -788,22 +756,26 @@ impl RenderOnce for AttachMode { #[derive(Clone)] pub(super) struct ConfigureMode { - program: Entity, - cwd: Entity, + program: Entity, + cwd: Entity, stop_on_entry: ToggleState, save_to_debug_json: ToggleState, } impl ConfigureMode { pub(super) fn new(window: &mut Window, cx: &mut App) -> Entity { - let program = cx.new(|cx| Editor::single_line(window, cx)); - program.update(cx, |this, cx| { - this.set_placeholder_text("ENV=Zed ~/bin/program --option", window, cx); + let program = cx.new(|cx| { + InputField::new(window, cx, "ENV=Zed ~/bin/program --option") + .label("Program") + .tab_stop(true) + .tab_index(1) }); - let cwd = cx.new(|cx| Editor::single_line(window, cx)); - cwd.update(cx, |this, cx| { - this.set_placeholder_text("Ex: $ZED_WORKTREE_ROOT", window, cx); + let cwd = cx.new(|cx| { + InputField::new(window, cx, "Ex: $ZED_WORKTREE_ROOT") + .label("Working Directory") + .tab_stop(true) + .tab_index(2) }); cx.new(|_| Self { @@ -815,9 +787,9 @@ impl ConfigureMode { } fn load(&mut self, cwd: PathBuf, window: &mut Window, cx: &mut App) { - self.cwd.update(cx, |editor, cx| { - if editor.is_empty(cx) { - editor.set_text(cwd.to_string_lossy(), window, cx); + self.cwd.update(cx, |input_field, cx| { + if input_field.is_empty(cx) { + input_field.set_text(cwd.to_string_lossy(), window, cx); } }); } @@ -868,49 +840,44 @@ impl ConfigureMode { } } + fn on_tab(&mut self, _: &menu::SelectNext, window: &mut Window, _: &mut Context) { + window.focus_next(); + } + + fn on_tab_prev( + &mut self, + _: &menu::SelectPrevious, + window: &mut Window, + _: &mut Context, + ) { + window.focus_prev(); + } + fn render( &mut self, adapter_menu: DropdownMenu, - window: &mut Window, + _: &mut Window, cx: &mut ui::Context, ) -> impl IntoElement { v_flex() + .tab_group() + .track_focus(&self.program.focus_handle(cx)) + .on_action(cx.listener(Self::on_tab)) + .on_action(cx.listener(Self::on_tab_prev)) .p_2() .w_full() - .gap_2() - .track_focus(&self.program.focus_handle(cx)) + .gap_3() .child( h_flex() - .gap_2() - .child( - Label::new("Debugger") - .size(LabelSize::Small) - .color(Color::Muted), - ) + .gap_1() + .child(Label::new("Debugger:").color(Color::Muted)) .child(adapter_menu), ) - .child( - v_flex() - .gap_0p5() - .child( - Label::new("Program") - .size(LabelSize::Small) - .color(Color::Muted), - ) - .child(render_editor(&self.program, window, cx)), - ) - .child( - v_flex() - .gap_0p5() - .child( - Label::new("Working Directory") - .size(LabelSize::Small) - .color(Color::Muted), - ) - .child(render_editor(&self.cwd, window, cx)), - ) + .child(self.program.clone()) + .child(self.cwd.clone()) .child( Switch::new("debugger-stop-on-entry", self.stop_on_entry) + .tab_index(3_isize) .label("Stop on Entry") .label_position(SwitchLabelPosition::Start) .label_size(LabelSize::Default) diff --git a/crates/debugger_ui/src/session/running/breakpoint_list.rs b/crates/debugger_ui/src/session/running/breakpoint_list.rs index 0a02a5a8e4197bf6b959a592b6e3d3da92c00846..ca50f67c9236d19a9f04f327091eb383ab72e122 100644 --- a/crates/debugger_ui/src/session/running/breakpoint_list.rs +++ b/crates/debugger_ui/src/session/running/breakpoint_list.rs @@ -1407,7 +1407,6 @@ impl RenderOnce for BreakpointOptionsStrip { h_flex() .gap_px() - .mr_3() // Space to avoid overlapping with the scrollbar .justify_end() .when(has_logs || self.is_selected, |this| { this.child( diff --git a/crates/ui_input/src/input_field.rs b/crates/ui_input/src/input_field.rs index 9e8c519ca9acc68c0d968f099f62ad336ee0754a..2bae8c172dcecbc94aa591297831c4f43279197b 100644 --- a/crates/ui_input/src/input_field.rs +++ b/crates/ui_input/src/input_field.rs @@ -120,6 +120,11 @@ impl InputField { self.editor().read(cx).text(cx) } + pub fn clear(&self, window: &mut Window, cx: &mut App) { + self.editor() + .update(cx, |editor, cx| editor.clear(window, cx)) + } + pub fn set_text(&self, text: impl Into>, window: &mut Window, cx: &mut App) { self.editor() .update(cx, |editor, cx| editor.set_text(text, window, cx)) @@ -127,7 +132,8 @@ impl InputField { } impl Render for InputField { - fn render(&mut self, _: &mut Window, cx: &mut Context) -> impl IntoElement { + fn render(&mut self, window: &mut Window, cx: &mut Context) -> impl IntoElement { + let editor = self.editor.clone(); let settings = ThemeSettings::get_global(cx); let theme_color = cx.theme().colors(); @@ -206,6 +212,10 @@ impl Render for InputField { .bg(style.background_color) .border_1() .border_color(style.border_color) + .when( + editor.focus_handle(cx).contains_focused(window, cx), + |this| this.border_color(theme_color.border_focused), + ) .when_some(self.start_icon, |this, icon| { this.gap_1() .child(Icon::new(icon).size(IconSize::Small).color(Color::Muted)) From f57f4cd3607e8298ef5f1b29929df2db0185d826 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Sun, 23 Nov 2025 00:33:18 -0300 Subject: [PATCH 0310/1030] agent_ui: Display footer for model selector when in Zed agent (#43294) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR adds back the footer with the "Configure" button in the model selector but only when the seeing it from the Zed agent (or inline assistant/text threads). I had removed it a while back because seeing the "Configure" button, which takes you to the agent panel settings view, when clicking from an external agent didn't make much sense, given there's nothing model-wise you can configure from Zed (at least yet) for an external agent. This also makes the button in the footer a bit nicer by making it full screen and displaying a keybinding, so that you can easily do the whole "trigger model selector → go to settings view" all with the keyboard. Screenshot 2025-11-21 at 10  38@2x Release Notes: - N/A --- crates/acp_thread/src/connection.rs | 5 ++ crates/agent/src/agent.rs | 4 ++ crates/agent_ui/src/acp/model_selector.rs | 47 +++++++++++++++++-- .../src/acp/model_selector_popover.rs | 12 ++++- .../manage_profiles_modal.rs | 1 + crates/agent_ui/src/agent_model_selector.rs | 3 ++ .../agent_ui/src/language_model_selector.rs | 33 ++++++++----- crates/agent_ui/src/text_thread_editor.rs | 3 ++ 8 files changed, 91 insertions(+), 17 deletions(-) diff --git a/crates/acp_thread/src/connection.rs b/crates/acp_thread/src/connection.rs index 63ca65f22725c54476048542c90f5f5efcfd23ca..80bec0ee9d351711bdf435cfe63eb99eb1e499e3 100644 --- a/crates/acp_thread/src/connection.rs +++ b/crates/acp_thread/src/connection.rs @@ -197,6 +197,11 @@ pub trait AgentModelSelector: 'static { fn watch(&self, _cx: &mut App) -> Option> { None } + + /// Returns whether the model picker should render a footer. + fn should_render_footer(&self) -> bool { + false + } } #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/crates/agent/src/agent.rs b/crates/agent/src/agent.rs index a85c01bb225ab58a372a8b09fb07e7bc155a7aeb..404cd6549e5786b92c49379918346b83fcc0e0c1 100644 --- a/crates/agent/src/agent.rs +++ b/crates/agent/src/agent.rs @@ -961,6 +961,10 @@ impl acp_thread::AgentModelSelector for NativeAgentModelSelector { fn watch(&self, cx: &mut App) -> Option> { Some(self.connection.0.read(cx).models.watch()) } + + fn should_render_footer(&self) -> bool { + true + } } impl acp_thread::AgentConnection for NativeAgentConnection { diff --git a/crates/agent_ui/src/acp/model_selector.rs b/crates/agent_ui/src/acp/model_selector.rs index c60a3b6cb61970caba02df82506848b6efa90cc1..8a0c3c9df90e73d0ef00ecd7232115729dd35347 100644 --- a/crates/agent_ui/src/acp/model_selector.rs +++ b/crates/agent_ui/src/acp/model_selector.rs @@ -7,14 +7,17 @@ use collections::IndexMap; use fs::Fs; use futures::FutureExt; use fuzzy::{StringMatchCandidate, match_strings}; -use gpui::{AsyncWindowContext, BackgroundExecutor, DismissEvent, Task, WeakEntity}; +use gpui::{ + Action, AsyncWindowContext, BackgroundExecutor, DismissEvent, FocusHandle, Task, WeakEntity, +}; use ordered_float::OrderedFloat; use picker::{Picker, PickerDelegate}; use ui::{ - DocumentationAside, DocumentationEdge, DocumentationSide, IntoElement, ListItem, + DocumentationAside, DocumentationEdge, DocumentationSide, IntoElement, KeyBinding, ListItem, ListItemSpacing, prelude::*, }; use util::ResultExt; +use zed_actions::agent::OpenSettings; use crate::ui::HoldForDefault; @@ -24,10 +27,12 @@ pub fn acp_model_selector( selector: Rc, agent_server: Rc, fs: Arc, + focus_handle: FocusHandle, window: &mut Window, cx: &mut Context, ) -> AcpModelSelector { - let delegate = AcpModelPickerDelegate::new(selector, agent_server, fs, window, cx); + let delegate = + AcpModelPickerDelegate::new(selector, agent_server, fs, focus_handle, window, cx); Picker::list(delegate, window, cx) .show_scrollbar(true) .width(rems(20.)) @@ -49,6 +54,7 @@ pub struct AcpModelPickerDelegate { selected_description: Option<(usize, SharedString, bool)>, selected_model: Option, _refresh_models_task: Task<()>, + focus_handle: FocusHandle, } impl AcpModelPickerDelegate { @@ -56,6 +62,7 @@ impl AcpModelPickerDelegate { selector: Rc, agent_server: Rc, fs: Arc, + focus_handle: FocusHandle, window: &mut Window, cx: &mut Context, ) -> Self { @@ -104,6 +111,7 @@ impl AcpModelPickerDelegate { selected_index: 0, selected_description: None, _refresh_models_task: refresh_models_task, + focus_handle, } } @@ -331,6 +339,39 @@ impl PickerDelegate for AcpModelPickerDelegate { ) }) } + + fn render_footer( + &self, + _window: &mut Window, + cx: &mut Context>, + ) -> Option { + let focus_handle = self.focus_handle.clone(); + + if !self.selector.should_render_footer() { + return None; + } + + Some( + h_flex() + .w_full() + .p_1p5() + .border_t_1() + .border_color(cx.theme().colors().border_variant) + .child( + Button::new("configure", "Configure") + .full_width() + .style(ButtonStyle::Outlined) + .key_binding( + KeyBinding::for_action_in(&OpenSettings, &focus_handle, cx) + .map(|kb| kb.size(rems_from_px(12.))), + ) + .on_click(|_, window, cx| { + window.dispatch_action(OpenSettings.boxed_clone(), cx); + }), + ) + .into_any(), + ) + } } fn info_list_to_picker_entries( diff --git a/crates/agent_ui/src/acp/model_selector_popover.rs b/crates/agent_ui/src/acp/model_selector_popover.rs index 04e7e06a85aadf7c7fb1b69bfcaf81ec6ff6bf89..e2393c11bd6c23b79397abf274fb6539c0c7063f 100644 --- a/crates/agent_ui/src/acp/model_selector_popover.rs +++ b/crates/agent_ui/src/acp/model_selector_popover.rs @@ -30,8 +30,18 @@ impl AcpModelSelectorPopover { window: &mut Window, cx: &mut Context, ) -> Self { + let focus_handle_clone = focus_handle.clone(); Self { - selector: cx.new(move |cx| acp_model_selector(selector, agent_server, fs, window, cx)), + selector: cx.new(move |cx| { + acp_model_selector( + selector, + agent_server, + fs, + focus_handle_clone.clone(), + window, + cx, + ) + }), menu_handle, focus_handle, } diff --git a/crates/agent_ui/src/agent_configuration/manage_profiles_modal.rs b/crates/agent_ui/src/agent_configuration/manage_profiles_modal.rs index 210cf5f5dd6612855b32e358a2d3ec38e8259373..7e03dc46b704c22b4665bbce0f3b818134b56634 100644 --- a/crates/agent_ui/src/agent_configuration/manage_profiles_modal.rs +++ b/crates/agent_ui/src/agent_configuration/manage_profiles_modal.rs @@ -253,6 +253,7 @@ impl ManageProfilesModal { }); }, false, // Do not use popover styles for the model picker + self.focus_handle.clone(), window, cx, ) diff --git a/crates/agent_ui/src/agent_model_selector.rs b/crates/agent_ui/src/agent_model_selector.rs index 900ca0b683670a30b3353655d17c2ef79cd5523b..43982cdda7bd887b8fd9970e836090a0e549ae11 100644 --- a/crates/agent_ui/src/agent_model_selector.rs +++ b/crates/agent_ui/src/agent_model_selector.rs @@ -25,6 +25,8 @@ impl AgentModelSelector { window: &mut Window, cx: &mut Context, ) -> Self { + let focus_handle_clone = focus_handle.clone(); + Self { selector: cx.new(move |cx| { let fs = fs.clone(); @@ -48,6 +50,7 @@ impl AgentModelSelector { } }, true, // Use popover styles for picker + focus_handle_clone, window, cx, ) diff --git a/crates/agent_ui/src/language_model_selector.rs b/crates/agent_ui/src/language_model_selector.rs index 996e6a19828c741adbf6f8f824470f9a66c2f049..5b5a4513c6dca32e985c966e07ad84e84fc9a872 100644 --- a/crates/agent_ui/src/language_model_selector.rs +++ b/crates/agent_ui/src/language_model_selector.rs @@ -2,14 +2,17 @@ use std::{cmp::Reverse, sync::Arc}; use collections::IndexMap; use fuzzy::{StringMatch, StringMatchCandidate, match_strings}; -use gpui::{Action, AnyElement, App, BackgroundExecutor, DismissEvent, Subscription, Task}; +use gpui::{ + Action, AnyElement, App, BackgroundExecutor, DismissEvent, FocusHandle, Subscription, Task, +}; use language_model::{ AuthenticateError, ConfiguredModel, LanguageModel, LanguageModelProviderId, LanguageModelRegistry, }; use ordered_float::OrderedFloat; use picker::{Picker, PickerDelegate}; -use ui::{ListItem, ListItemSpacing, prelude::*}; +use ui::{KeyBinding, ListItem, ListItemSpacing, prelude::*}; +use zed_actions::agent::OpenSettings; type OnModelChanged = Arc, &mut App) + 'static>; type GetActiveModel = Arc Option + 'static>; @@ -20,6 +23,7 @@ pub fn language_model_selector( get_active_model: impl Fn(&App) -> Option + 'static, on_model_changed: impl Fn(Arc, &mut App) + 'static, popover_styles: bool, + focus_handle: FocusHandle, window: &mut Window, cx: &mut Context, ) -> LanguageModelSelector { @@ -27,6 +31,7 @@ pub fn language_model_selector( get_active_model, on_model_changed, popover_styles, + focus_handle, window, cx, ); @@ -88,6 +93,7 @@ pub struct LanguageModelPickerDelegate { _authenticate_all_providers_task: Task<()>, _subscriptions: Vec, popover_styles: bool, + focus_handle: FocusHandle, } impl LanguageModelPickerDelegate { @@ -95,6 +101,7 @@ impl LanguageModelPickerDelegate { get_active_model: impl Fn(&App) -> Option + 'static, on_model_changed: impl Fn(Arc, &mut App) + 'static, popover_styles: bool, + focus_handle: FocusHandle, window: &mut Window, cx: &mut Context>, ) -> Self { @@ -128,6 +135,7 @@ impl LanguageModelPickerDelegate { }, )], popover_styles, + focus_handle, } } @@ -521,6 +529,8 @@ impl PickerDelegate for LanguageModelPickerDelegate { _window: &mut Window, cx: &mut Context>, ) -> Option { + let focus_handle = self.focus_handle.clone(); + if !self.popover_styles { return None; } @@ -528,22 +538,19 @@ impl PickerDelegate for LanguageModelPickerDelegate { Some( h_flex() .w_full() + .p_1p5() .border_t_1() .border_color(cx.theme().colors().border_variant) - .p_1() - .gap_4() - .justify_between() .child( Button::new("configure", "Configure") - .icon(IconName::Settings) - .icon_size(IconSize::Small) - .icon_color(Color::Muted) - .icon_position(IconPosition::Start) + .full_width() + .style(ButtonStyle::Outlined) + .key_binding( + KeyBinding::for_action_in(&OpenSettings, &focus_handle, cx) + .map(|kb| kb.size(rems_from_px(12.))), + ) .on_click(|_, window, cx| { - window.dispatch_action( - zed_actions::agent::OpenSettings.boxed_clone(), - cx, - ); + window.dispatch_action(OpenSettings.boxed_clone(), cx); }), ) .into_any(), diff --git a/crates/agent_ui/src/text_thread_editor.rs b/crates/agent_ui/src/text_thread_editor.rs index 2a3c7e10318da78729f35476da872a0651c4a145..8c245a0675a03e65efeaf3e92bc3b7a5062fdd53 100644 --- a/crates/agent_ui/src/text_thread_editor.rs +++ b/crates/agent_ui/src/text_thread_editor.rs @@ -280,6 +280,8 @@ impl TextThreadEditor { .thought_process_output_sections() .to_vec(); let slash_commands = text_thread.read(cx).slash_commands().clone(); + let focus_handle = editor.read(cx).focus_handle(cx); + let mut this = Self { text_thread, slash_commands, @@ -315,6 +317,7 @@ impl TextThreadEditor { }); }, true, // Use popover styles for picker + focus_handle, window, cx, ) From 41c61900d1562107076944ccb993b866fadb7167 Mon Sep 17 00:00:00 2001 From: John Tur Date: Sun, 23 Nov 2025 03:21:43 -0500 Subject: [PATCH 0311/1030] Fix labels for GitHub issue templates (#43348) Release Notes: - N/A --- .github/ISSUE_TEMPLATE/01_bug_ai.yml | 2 +- .github/ISSUE_TEMPLATE/04_bug_debugger.yml | 2 +- .github/ISSUE_TEMPLATE/06_bug_git.yml | 2 +- .github/ISSUE_TEMPLATE/07_bug_windows.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/01_bug_ai.yml b/.github/ISSUE_TEMPLATE/01_bug_ai.yml index 36e9036b7840fb536bfc47f1fd9b8359c6736b61..8d977f4f7ae603f0b2ed991ce6dabee25e045f06 100644 --- a/.github/ISSUE_TEMPLATE/01_bug_ai.yml +++ b/.github/ISSUE_TEMPLATE/01_bug_ai.yml @@ -1,7 +1,7 @@ name: Bug Report (AI) description: Zed Agent Panel Bugs type: "Bug" -labels: ["ai"] +labels: ["area:ai"] title: "AI: " body: - type: textarea diff --git a/.github/ISSUE_TEMPLATE/04_bug_debugger.yml b/.github/ISSUE_TEMPLATE/04_bug_debugger.yml index 8361de5c22fe27a8ea2dd9597fdcbcbf6cd9661e..3191227371a544b3ad6232c204b85afd5c6a238a 100644 --- a/.github/ISSUE_TEMPLATE/04_bug_debugger.yml +++ b/.github/ISSUE_TEMPLATE/04_bug_debugger.yml @@ -1,7 +1,7 @@ name: Bug Report (Debugger) description: Zed Debugger-Related Bugs type: "Bug" -labels: ["debugger"] +labels: ["area:debugger"] title: "Debugger: " body: - type: textarea diff --git a/.github/ISSUE_TEMPLATE/06_bug_git.yml b/.github/ISSUE_TEMPLATE/06_bug_git.yml index ec6276df3a21b8df6fde5e6f49d868dee329d864..e0b6a6750879bf8de41a942e1057b9e4d5cdcc91 100644 --- a/.github/ISSUE_TEMPLATE/06_bug_git.yml +++ b/.github/ISSUE_TEMPLATE/06_bug_git.yml @@ -1,7 +1,7 @@ name: Bug Report (Git) description: Zed Git Related Bugs type: "Bug" -labels: ["git"] +labels: ["area:integrations/git"] title: "Git: " body: - type: textarea diff --git a/.github/ISSUE_TEMPLATE/07_bug_windows.yml b/.github/ISSUE_TEMPLATE/07_bug_windows.yml index b4bda930dc81c13224956e0e2cb75ecb26f9e2f5..7f48890eb961aa35dbae70cc22829ba6cb0e2a77 100644 --- a/.github/ISSUE_TEMPLATE/07_bug_windows.yml +++ b/.github/ISSUE_TEMPLATE/07_bug_windows.yml @@ -1,7 +1,7 @@ name: Bug Report (Windows) description: Zed Windows Related Bugs type: "Bug" -labels: ["windows"] +labels: ["platform:windows"] title: "Windows: " body: - type: textarea From 06e03a41aa86bf898cfc5a44d4480404757e23a1 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Sun, 23 Nov 2025 13:24:01 +0100 Subject: [PATCH 0312/1030] terminal_view: Reuse editor's blink manager (#43351) Release Notes: - N/A *or* Added/Fixed/Improved ... --- Cargo.lock | 1 - crates/editor/src/blink_manager.rs | 19 ++- crates/editor/src/editor.rs | 8 +- crates/terminal_view/Cargo.toml | 1 - crates/terminal_view/src/terminal_view.rs | 137 +++++++++++----------- 5 files changed, 89 insertions(+), 77 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bea1424b704eb9e3bc07ddfa2e7b6de817687a72..e41e973041bfe99dec6258e910e1108a842c0748 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17135,7 +17135,6 @@ dependencies = [ "serde_json", "settings", "shellexpand 2.1.2", - "smol", "task", "terminal", "theme", diff --git a/crates/editor/src/blink_manager.rs b/crates/editor/src/blink_manager.rs index 9c2b911f1b068d5d8cc14c3875af08033f34bc66..d99cf6a7d59d40383e572f4638b17edbf3d0da53 100644 --- a/crates/editor/src/blink_manager.rs +++ b/crates/editor/src/blink_manager.rs @@ -1,20 +1,28 @@ -use crate::EditorSettings; use gpui::Context; -use settings::Settings; use settings::SettingsStore; use smol::Timer; use std::time::Duration; +use ui::App; pub struct BlinkManager { blink_interval: Duration, blink_epoch: usize, + /// Whether the blinking is paused. blinking_paused: bool, + /// Whether the cursor should be visibly rendered or not. visible: bool, + /// Whether the blinking currently enabled. enabled: bool, + /// Whether the blinking is enabled in the settings. + blink_enabled_in_settings: fn(&App) -> bool, } impl BlinkManager { - pub fn new(blink_interval: Duration, cx: &mut Context) -> Self { + pub fn new( + blink_interval: Duration, + blink_enabled_in_settings: fn(&App) -> bool, + cx: &mut Context, + ) -> Self { // Make sure we blink the cursors if the setting is re-enabled cx.observe_global::(move |this, cx| { this.blink_cursors(this.blink_epoch, cx) @@ -27,6 +35,7 @@ impl BlinkManager { blinking_paused: false, visible: true, enabled: false, + blink_enabled_in_settings, } } @@ -55,7 +64,7 @@ impl BlinkManager { } fn blink_cursors(&mut self, epoch: usize, cx: &mut Context) { - if EditorSettings::get_global(cx).cursor_blink { + if (self.blink_enabled_in_settings)(cx) { if epoch == self.blink_epoch && self.enabled && !self.blinking_paused { self.visible = !self.visible; cx.notify(); @@ -83,6 +92,7 @@ impl BlinkManager { } } + /// Enable the blinking of the cursor. pub fn enable(&mut self, cx: &mut Context) { if self.enabled { return; @@ -95,6 +105,7 @@ impl BlinkManager { self.blink_cursors(self.blink_epoch, cx); } + /// Disable the blinking of the cursor. pub fn disable(&mut self, _cx: &mut Context) { self.visible = false; self.enabled = false; diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index d4c09e06cbe2349ec759f0546049c462bf95b0a8..e8d0011a98d4126f6baf542589145c9d1ddc8dda 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -12,7 +12,7 @@ //! //! If you're looking to improve Vim mode, you should check out Vim crate that wraps Editor and overrides its behavior. pub mod actions; -mod blink_manager; +pub mod blink_manager; mod bracket_colorization; mod clangd_ext; pub mod code_context_menus; @@ -1888,7 +1888,11 @@ impl Editor { let selections = SelectionsCollection::new(); let blink_manager = cx.new(|cx| { - let mut blink_manager = BlinkManager::new(CURSOR_BLINK_INTERVAL, cx); + let mut blink_manager = BlinkManager::new( + CURSOR_BLINK_INTERVAL, + |cx| EditorSettings::get_global(cx).cursor_blink, + cx, + ); if is_minimap { blink_manager.disable(cx); } diff --git a/crates/terminal_view/Cargo.toml b/crates/terminal_view/Cargo.toml index 1800562e2fd262d040ef957b402cc650681956a5..eadd00bcbbd7a5469638c2b85d2eb4f1a65b9475 100644 --- a/crates/terminal_view/Cargo.toml +++ b/crates/terminal_view/Cargo.toml @@ -39,7 +39,6 @@ serde.workspace = true serde_json.workspace = true settings.workspace = true shellexpand.workspace = true -smol.workspace = true terminal.workspace = true theme.workspace = true ui.workspace = true diff --git a/crates/terminal_view/src/terminal_view.rs b/crates/terminal_view/src/terminal_view.rs index 66e6c605f9b560dc36db3dde16e84c2ee8c0c5b5..7c8bda83be864353c35b0317efc8599456dca6e5 100644 --- a/crates/terminal_view/src/terminal_view.rs +++ b/crates/terminal_view/src/terminal_view.rs @@ -7,7 +7,7 @@ mod terminal_slash_command; pub mod terminal_tab_tooltip; use assistant_slash_command::SlashCommandRegistry; -use editor::{EditorSettings, actions::SelectAll}; +use editor::{EditorSettings, actions::SelectAll, blink_manager::BlinkManager}; use gpui::{ Action, AnyElement, App, DismissEvent, Entity, EventEmitter, FocusHandle, Focusable, KeyContext, KeyDownEvent, Keystroke, MouseButton, MouseDownEvent, Pixels, Render, @@ -51,7 +51,6 @@ use workspace::{ use serde::Deserialize; use settings::{Settings, SettingsStore, TerminalBlink, WorkingDirectory}; -use smol::Timer; use zed_actions::assistant::InlineAssist; use std::{ @@ -126,12 +125,10 @@ pub struct TerminalView { has_bell: bool, context_menu: Option<(Entity, gpui::Point, Subscription)>, cursor_shape: CursorShape, - blink_state: bool, + blink_manager: Entity, mode: TerminalMode, blinking_terminal_enabled: bool, cwd_serialized: bool, - blinking_paused: bool, - blink_epoch: usize, hover: Option, hover_tooltip_update: Task<()>, workspace_id: Option, @@ -237,6 +234,25 @@ impl TerminalView { let scroll_handle = TerminalScrollHandle::new(terminal.read(cx)); + let blink_manager = cx.new(|cx| { + BlinkManager::new( + CURSOR_BLINK_INTERVAL, + |cx| { + !matches!( + TerminalSettings::get_global(cx).blinking, + TerminalBlink::Off + ) + }, + cx, + ) + }); + + let _subscriptions = vec![ + focus_in, + focus_out, + cx.observe(&blink_manager, |_, _, cx| cx.notify()), + cx.observe_global::(Self::settings_changed), + ]; Self { terminal, workspace: workspace_handle, @@ -245,10 +261,8 @@ impl TerminalView { focus_handle, context_menu: None, cursor_shape, - blink_state: true, + blink_manager, blinking_terminal_enabled: false, - blinking_paused: false, - blink_epoch: 0, hover: None, hover_tooltip_update: Task::ready(()), mode: TerminalMode::Standalone, @@ -259,11 +273,7 @@ impl TerminalView { scroll_handle, cwd_serialized: false, ime_state: None, - _subscriptions: vec![ - focus_in, - focus_out, - cx.observe_global::(Self::settings_changed), - ], + _subscriptions, _terminal_subscriptions: terminal_subscriptions, } } @@ -424,6 +434,11 @@ impl TerminalView { let breadcrumb_visibility_changed = self.show_breadcrumbs != settings.toolbar.breadcrumbs; self.show_breadcrumbs = settings.toolbar.breadcrumbs; + let should_blink = match settings.blinking { + TerminalBlink::Off => false, + TerminalBlink::On => true, + TerminalBlink::TerminalControlled => self.blinking_terminal_enabled, + }; let new_cursor_shape = settings.cursor_shape; let old_cursor_shape = self.cursor_shape; if old_cursor_shape != new_cursor_shape { @@ -433,6 +448,15 @@ impl TerminalView { }); } + self.blink_manager.update( + cx, + if should_blink { + BlinkManager::enable + } else { + BlinkManager::disable + }, + ); + if breadcrumb_visibility_changed { cx.emit(ItemEvent::UpdateBreadcrumbs); } @@ -610,9 +634,8 @@ impl TerminalView { } pub fn should_show_cursor(&self, focused: bool, cx: &mut Context) -> bool { - //Don't blink the cursor when not focused, blinking is disabled, or paused + // Always show cursor when not focused or in special modes if !focused - || self.blinking_paused || self .terminal .read(cx) @@ -623,45 +646,17 @@ impl TerminalView { return true; } + // When focused, check blinking settings and blink manager state match TerminalSettings::get_global(cx).blinking { - //If the user requested to never blink, don't blink it. TerminalBlink::Off => true, - //If the terminal is controlling it, check terminal mode - TerminalBlink::TerminalControlled => { - !self.blinking_terminal_enabled || self.blink_state + TerminalBlink::On | TerminalBlink::TerminalControlled => { + self.blink_manager.read(cx).visible() } - TerminalBlink::On => self.blink_state, - } - } - - fn blink_cursors(&mut self, epoch: usize, window: &mut Window, cx: &mut Context) { - if epoch == self.blink_epoch && !self.blinking_paused { - self.blink_state = !self.blink_state; - cx.notify(); - - let epoch = self.next_blink_epoch(); - cx.spawn_in(window, async move |this, cx| { - Timer::after(CURSOR_BLINK_INTERVAL).await; - this.update_in(cx, |this, window, cx| this.blink_cursors(epoch, window, cx)) - .ok(); - }) - .detach(); } } - pub fn pause_cursor_blinking(&mut self, window: &mut Window, cx: &mut Context) { - self.blink_state = true; - cx.notify(); - - let epoch = self.next_blink_epoch(); - cx.spawn_in(window, async move |this, cx| { - Timer::after(CURSOR_BLINK_INTERVAL).await; - this.update_in(cx, |this, window, cx| { - this.resume_cursor_blinking(epoch, window, cx) - }) - .ok(); - }) - .detach(); + pub fn pause_cursor_blinking(&mut self, _window: &mut Window, cx: &mut Context) { + self.blink_manager.update(cx, BlinkManager::pause_blinking); } pub fn terminal(&self) -> &Entity { @@ -685,23 +680,6 @@ impl TerminalView { cx.notify(); } - fn next_blink_epoch(&mut self) -> usize { - self.blink_epoch += 1; - self.blink_epoch - } - - fn resume_cursor_blinking( - &mut self, - epoch: usize, - window: &mut Window, - cx: &mut Context, - ) { - if epoch == self.blink_epoch { - self.blinking_paused = false; - self.blink_cursors(epoch, window, cx); - } - } - ///Attempt to paste the clipboard into the terminal fn copy(&mut self, _: &Copy, _: &mut Window, cx: &mut Context) { self.terminal.update(cx, |term, _| term.copy(None)); @@ -893,11 +871,21 @@ fn subscribe_for_terminal_events( } Event::BlinkChanged(blinking) => { + terminal_view.blinking_terminal_enabled = *blinking; + + // If in terminal-controlled mode and focused, update blink manager if matches!( TerminalSettings::get_global(cx).blinking, TerminalBlink::TerminalControlled - ) { - terminal_view.blinking_terminal_enabled = *blinking; + ) && terminal_view.focus_handle.is_focused(window) + { + terminal_view.blink_manager.update(cx, |manager, cx| { + if *blinking { + manager.enable(cx); + } else { + manager.disable(cx); + } + }); } } @@ -1023,12 +1011,23 @@ impl TerminalView { terminal.set_cursor_shape(self.cursor_shape); terminal.focus_in(); }); - self.blink_cursors(self.blink_epoch, window, cx); + + let should_blink = match TerminalSettings::get_global(cx).blinking { + TerminalBlink::Off => false, + TerminalBlink::On => true, + TerminalBlink::TerminalControlled => self.blinking_terminal_enabled, + }; + + if should_blink { + self.blink_manager.update(cx, BlinkManager::enable); + } + window.invalidate_character_coordinates(); cx.notify(); } - fn focus_out(&mut self, _: &mut Window, cx: &mut Context) { + fn focus_out(&mut self, _window: &mut Window, cx: &mut Context) { + self.blink_manager.update(cx, BlinkManager::disable); self.terminal.update(cx, |terminal, _| { terminal.focus_out(); terminal.set_cursor_shape(CursorShape::Hollow); From 1f03fc62db65a3f81cec3d84cf8b1237cf5d3d79 Mon Sep 17 00:00:00 2001 From: Mayank Verma Date: Sun, 23 Nov 2025 21:05:43 +0530 Subject: [PATCH 0313/1030] editor: Fix tab tooltips not showing file path for remote files (#43359) Closes #42344 Release Notes: - Fixed editor tab tooltips not showing file path for remote files Here's the before/after, tested both local and remote: https://github.com/user-attachments/assets/2768a0f8-e35b-4eff-aa95-d0decb51ec78 --- crates/editor/src/items.rs | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index e7342f8a5834ded137eaea3ae367442ab99821fd..157ad84d053b9125dfd59243098deb680be7b264 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -23,7 +23,7 @@ use language::{ use lsp::DiagnosticSeverity; use multi_buffer::MultiBufferOffset; use project::{ - Project, ProjectItem as _, ProjectPath, lsp_store::FormatTrigger, + File, Project, ProjectItem as _, ProjectPath, lsp_store::FormatTrigger, project_settings::ProjectSettings, search::SearchQuery, }; use rpc::proto::{self, update_view}; @@ -645,18 +645,20 @@ impl Item for Editor { } fn tab_tooltip_text(&self, cx: &App) -> Option { - let file_path = self - .buffer() - .read(cx) - .as_singleton()? + self.buffer() .read(cx) - .file() - .and_then(|f| f.as_local())? - .abs_path(cx); - - let file_path = file_path.compact().to_string_lossy().into_owned(); - - Some(file_path.into()) + .as_singleton() + .and_then(|buffer| buffer.read(cx).file()) + .and_then(|file| File::from_dyn(Some(file))) + .map(|file| { + file.worktree + .read(cx) + .absolutize(&file.path) + .compact() + .to_string_lossy() + .into_owned() + .into() + }) } fn telemetry_event_text(&self) -> Option<&'static str> { From da143c55274ea7f8fda1f43c2a82461bb27003e1 Mon Sep 17 00:00:00 2001 From: Bennet Bo Fenner Date: Sun, 23 Nov 2025 18:26:07 +0100 Subject: [PATCH 0314/1030] Fix inline assist panic (#43364) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes a panic that was introduced in #42633. Repro steps: 1. Open the inline assistant and mention a file in the prompt 2. Run the inline assistant 3. Remove the mention and insert a different one 4. 💥 This would happen because the mention set still had a reference to the old editor, because we create a new one in `PromptEditor::unlink`. Also removes the unused `crates/agent_ui/src/context_picker/completion_provider.rs` file, which was not removed by mistake in the previous PR. Release Notes: - N/A --- crates/agent_ui/src/acp/message_editor.rs | 29 +- crates/agent_ui/src/completion_provider.rs | 97 +- .../src/context_picker/completion_provider.rs | 1701 ----------------- crates/agent_ui/src/inline_prompt_editor.rs | 64 +- crates/agent_ui/src/mention_set.rs | 50 +- 5 files changed, 138 insertions(+), 1803 deletions(-) delete mode 100644 crates/agent_ui/src/context_picker/completion_provider.rs diff --git a/crates/agent_ui/src/acp/message_editor.rs b/crates/agent_ui/src/acp/message_editor.rs index 9fbdbb04986294aa319c04cb5d76de63f4a758ab..169220a3614bf2d74d24a9638f87b9613a556bd6 100644 --- a/crates/agent_ui/src/acp/message_editor.rs +++ b/crates/agent_ui/src/acp/message_editor.rs @@ -15,8 +15,9 @@ use anyhow::{Result, anyhow}; use collections::HashSet; use editor::{ Addon, AnchorRangeExt, ContextMenuOptions, ContextMenuPlacement, Editor, EditorElement, - EditorEvent, EditorMode, EditorStyle, Inlay, MultiBuffer, MultiBufferOffset, ToOffset, - actions::Paste, code_context_menus::CodeContextMenu, scroll::Autoscroll, + EditorEvent, EditorMode, EditorStyle, Inlay, MultiBuffer, MultiBufferOffset, + MultiBufferSnapshot, ToOffset, actions::Paste, code_context_menus::CodeContextMenu, + scroll::Autoscroll, }; use futures::{FutureExt as _, future::join_all}; use gpui::{ @@ -133,18 +134,16 @@ impl MessageEditor { editor.register_addon(MessageEditorAddon::new()); editor }); - let mention_set = cx.new(|cx| { + let mention_set = cx.new(|_cx| { MentionSet::new( - editor.clone(), project.downgrade(), history_store.clone(), prompt_store.clone(), - window, - cx, ) }); let completion_provider = Rc::new(PromptCompletionProvider::new( cx.entity(), + editor.downgrade(), mention_set.clone(), history_store.clone(), prompt_store.clone(), @@ -166,14 +165,18 @@ impl MessageEditor { let mut has_hint = false; let mut subscriptions = Vec::new(); - subscriptions.push(cx.subscribe(&editor, { - move |this, editor, event, cx| { + subscriptions.push(cx.subscribe_in(&editor, window, { + move |this, editor, event, window, cx| { if let EditorEvent::Edited { .. } = event && !editor.read(cx).read_only(cx) { editor.update(cx, |editor, cx| { + let snapshot = editor.snapshot(window, cx); + this.mention_set + .update(cx, |mention_set, _cx| mention_set.remove_invalid(&snapshot)); + let new_hints = this - .command_hint(editor.buffer(), cx) + .command_hint(snapshot.buffer()) .into_iter() .collect::>(); let has_new_hint = !new_hints.is_empty(); @@ -206,13 +209,12 @@ impl MessageEditor { } } - fn command_hint(&self, buffer: &Entity, cx: &App) -> Option { + fn command_hint(&self, snapshot: &MultiBufferSnapshot) -> Option { let available_commands = self.available_commands.borrow(); if available_commands.is_empty() { return None; } - let snapshot = buffer.read(cx).snapshot(cx); let parsed_command = SlashCommandCompletion::try_parse(&snapshot.text(), 0)?; if parsed_command.argument.is_some() { return None; @@ -286,6 +288,7 @@ impl MessageEditor { content_len, uri, supports_images, + self.editor.clone(), &workspace, window, cx, @@ -480,7 +483,7 @@ impl MessageEditor { editor.remove_creases( self.mention_set.update(cx, |mention_set, _cx| { mention_set - .remove_all() + .clear() .map(|(crease_id, _)| crease_id) .collect::>() }), @@ -628,6 +631,7 @@ impl MessageEditor { content_len, uri, supports_images, + self.editor.clone(), &workspace, window, cx, @@ -659,6 +663,7 @@ impl MessageEditor { PromptCompletionProvider::>::completion_for_action( PromptContextAction::AddSelections, anchor..anchor, + self.editor.downgrade(), self.mention_set.downgrade(), &workspace, cx, diff --git a/crates/agent_ui/src/completion_provider.rs b/crates/agent_ui/src/completion_provider.rs index 4e813570a42b9d7fee3f4ea5ef9ad6dafe1cc80e..61ce313cb0c0c6ed91a08aa07544e766de5c581a 100644 --- a/crates/agent_ui/src/completion_provider.rs +++ b/crates/agent_ui/src/completion_provider.rs @@ -181,6 +181,7 @@ pub trait PromptCompletionProviderDelegate: Send + Sync + 'static { pub struct PromptCompletionProvider { source: Arc, + editor: WeakEntity, mention_set: Entity, history_store: Entity, prompt_store: Option>, @@ -190,6 +191,7 @@ pub struct PromptCompletionProvider { impl PromptCompletionProvider { pub fn new( source: T, + editor: WeakEntity, mention_set: Entity, history_store: Entity, prompt_store: Option>, @@ -197,6 +199,7 @@ impl PromptCompletionProvider { ) -> Self { Self { source: Arc::new(source), + editor, mention_set, workspace, history_store, @@ -207,6 +210,7 @@ impl PromptCompletionProvider { fn completion_for_entry( entry: PromptContextEntry, source_range: Range, + editor: WeakEntity, mention_set: WeakEntity, workspace: &Entity, cx: &mut App, @@ -227,9 +231,14 @@ impl PromptCompletionProvider { // inserted confirm: Some(Arc::new(|_, _, _| true)), }), - PromptContextEntry::Action(action) => { - Self::completion_for_action(action, source_range, mention_set, workspace, cx) - } + PromptContextEntry::Action(action) => Self::completion_for_action( + action, + source_range, + editor, + mention_set, + workspace, + cx, + ), } } @@ -238,6 +247,7 @@ impl PromptCompletionProvider { source_range: Range, recent: bool, source: Arc, + editor: WeakEntity, mention_set: WeakEntity, workspace: Entity, cx: &mut App, @@ -269,6 +279,7 @@ impl PromptCompletionProvider { new_text_len - 1, uri, source, + editor, mention_set, workspace, )), @@ -279,6 +290,7 @@ impl PromptCompletionProvider { rule: RulesContextEntry, source_range: Range, source: Arc, + editor: WeakEntity, mention_set: WeakEntity, workspace: Entity, cx: &mut App, @@ -306,6 +318,7 @@ impl PromptCompletionProvider { new_text_len - 1, uri, source, + editor, mention_set, workspace, )), @@ -319,6 +332,7 @@ impl PromptCompletionProvider { is_directory: bool, source_range: Range, source: Arc, + editor: WeakEntity, mention_set: WeakEntity, workspace: Entity, project: Entity, @@ -364,6 +378,7 @@ impl PromptCompletionProvider { new_text_len - 1, uri, source, + editor, mention_set, workspace, )), @@ -374,6 +389,7 @@ impl PromptCompletionProvider { symbol: Symbol, source_range: Range, source: Arc, + editor: WeakEntity, mention_set: WeakEntity, workspace: Entity, cx: &mut App, @@ -425,6 +441,7 @@ impl PromptCompletionProvider { new_text_len - 1, uri, source, + editor, mention_set, workspace, )), @@ -435,6 +452,7 @@ impl PromptCompletionProvider { source_range: Range, url_to_fetch: SharedString, source: Arc, + editor: WeakEntity, mention_set: WeakEntity, workspace: Entity, cx: &mut App, @@ -463,6 +481,7 @@ impl PromptCompletionProvider { new_text.len() - 1, mention_uri, source, + editor, mention_set, workspace, )), @@ -472,6 +491,7 @@ impl PromptCompletionProvider { pub(crate) fn completion_for_action( action: PromptContextAction, source_range: Range, + editor: WeakEntity, mention_set: WeakEntity, workspace: &Entity, cx: &mut App, @@ -496,20 +516,24 @@ impl PromptCompletionProvider { let callback = Arc::new({ let source_range = source_range.clone(); move |_, window: &mut Window, cx: &mut App| { + let editor = editor.clone(); let selections = selections.clone(); let mention_set = mention_set.clone(); let source_range = source_range.clone(); window.defer(cx, move |window, cx| { - mention_set - .update(cx, |store, cx| { - store.confirm_mention_for_selection( - source_range, - selections, - window, - cx, - ) - }) - .ok(); + if let Some(editor) = editor.upgrade() { + mention_set + .update(cx, |store, cx| { + store.confirm_mention_for_selection( + source_range, + selections, + editor, + window, + cx, + ) + }) + .ok(); + } }); false } @@ -853,6 +877,7 @@ impl CompletionProvider for PromptCompletio ..snapshot.anchor_after(state.source_range().end); let source = self.source.clone(); + let editor = self.editor.clone(); let mention_set = self.mention_set.downgrade(); match state { ContextCompletion::SlashCommand(SlashCommandCompletion { @@ -955,6 +980,7 @@ impl CompletionProvider for PromptCompletio mat.is_dir, source_range.clone(), source.clone(), + editor.clone(), mention_set.clone(), workspace.clone(), project.clone(), @@ -967,6 +993,7 @@ impl CompletionProvider for PromptCompletio symbol, source_range.clone(), source.clone(), + editor.clone(), mention_set.clone(), workspace.clone(), cx, @@ -978,6 +1005,7 @@ impl CompletionProvider for PromptCompletio source_range.clone(), false, source.clone(), + editor.clone(), mention_set.clone(), workspace.clone(), cx, @@ -988,6 +1016,7 @@ impl CompletionProvider for PromptCompletio source_range.clone(), true, source.clone(), + editor.clone(), mention_set.clone(), workspace.clone(), cx, @@ -997,6 +1026,7 @@ impl CompletionProvider for PromptCompletio user_rules, source_range.clone(), source.clone(), + editor.clone(), mention_set.clone(), workspace.clone(), cx, @@ -1006,6 +1036,7 @@ impl CompletionProvider for PromptCompletio source_range.clone(), url, source.clone(), + editor.clone(), mention_set.clone(), workspace.clone(), cx, @@ -1015,6 +1046,7 @@ impl CompletionProvider for PromptCompletio Self::completion_for_entry( entry, source_range.clone(), + editor.clone(), mention_set.clone(), &workspace, cx, @@ -1091,33 +1123,38 @@ fn confirm_completion_callback( content_len: usize, mention_uri: MentionUri, source: Arc, + editor: WeakEntity, mention_set: WeakEntity, workspace: Entity, ) -> Arc bool + Send + Sync> { Arc::new(move |_, window, cx| { let source = source.clone(); + let editor = editor.clone(); let mention_set = mention_set.clone(); let crease_text = crease_text.clone(); let mention_uri = mention_uri.clone(); let workspace = workspace.clone(); window.defer(cx, move |window, cx| { - mention_set - .clone() - .update(cx, |mention_set, cx| { - mention_set - .confirm_mention_completion( - crease_text, - start, - content_len, - mention_uri, - source.supports_images(cx), - &workspace, - window, - cx, - ) - .detach(); - }) - .ok(); + if let Some(editor) = editor.upgrade() { + mention_set + .clone() + .update(cx, |mention_set, cx| { + mention_set + .confirm_mention_completion( + crease_text, + start, + content_len, + mention_uri, + source.supports_images(cx), + editor, + &workspace, + window, + cx, + ) + .detach(); + }) + .ok(); + } }); false }) diff --git a/crates/agent_ui/src/context_picker/completion_provider.rs b/crates/agent_ui/src/context_picker/completion_provider.rs deleted file mode 100644 index 60e27b305437003b99326da29137727faaaf5c7c..0000000000000000000000000000000000000000 --- a/crates/agent_ui/src/context_picker/completion_provider.rs +++ /dev/null @@ -1,1701 +0,0 @@ -use std::ops::Range; -use std::path::{Path, PathBuf}; -use std::sync::Arc; -use std::sync::atomic::AtomicBool; - -use agent::{HistoryEntry, HistoryStore}; -use anyhow::Result; -use editor::{CompletionProvider, Editor, ExcerptId, ToOffset as _}; -use file_icons::FileIcons; -use fuzzy::{StringMatch, StringMatchCandidate}; -use gpui::{App, Entity, Task, WeakEntity}; -use http_client::HttpClientWithUrl; -use itertools::Itertools; -use language::{Buffer, CodeLabel, CodeLabelBuilder, HighlightId}; -use lsp::CompletionContext; -use project::lsp_store::SymbolLocation; -use project::{ - Completion, CompletionDisplayOptions, CompletionIntent, CompletionResponse, Project, - ProjectPath, Symbol, WorktreeId, -}; -use prompt_store::PromptStore; -use rope::Point; -use text::{Anchor, OffsetRangeExt, ToPoint}; -use ui::prelude::*; -use util::ResultExt as _; -use util::paths::PathStyle; -use util::rel_path::RelPath; -use workspace::Workspace; - -use crate::{ - context::{AgentContextHandle, AgentContextKey, RULES_ICON}, - context_store::ContextStore, -}; - -use super::fetch_context_picker::fetch_url_content; -use super::file_context_picker::{FileMatch, search_files}; -use super::rules_context_picker::{RulesContextEntry, search_rules}; -use super::symbol_context_picker::SymbolMatch; -use super::symbol_context_picker::search_symbols; -use super::thread_context_picker::search_threads; -use super::{ - ContextPickerAction, ContextPickerEntry, ContextPickerMode, MentionLink, RecentEntry, - available_context_picker_entries, recent_context_picker_entries_with_store, selection_ranges, -}; -use crate::inline_prompt_editor::ContextCreasesAddon; - -pub(crate) enum Match { - File(FileMatch), - Symbol(SymbolMatch), - Thread(HistoryEntry), - RecentThread(HistoryEntry), - Fetch(SharedString), - Rules(RulesContextEntry), - Entry(EntryMatch), -} - -pub struct EntryMatch { - mat: Option, - entry: ContextPickerEntry, -} - -impl Match { - pub fn score(&self) -> f64 { - match self { - Match::File(file) => file.mat.score, - Match::Entry(mode) => mode.mat.as_ref().map(|mat| mat.score).unwrap_or(1.), - Match::Thread(_) => 1., - Match::RecentThread(_) => 1., - Match::Symbol(_) => 1., - Match::Fetch(_) => 1., - Match::Rules(_) => 1., - } - } -} - -fn search( - mode: Option, - query: String, - cancellation_flag: Arc, - recent_entries: Vec, - prompt_store: Option>, - thread_store: Option>, - workspace: Entity, - cx: &mut App, -) -> Task> { - match mode { - Some(ContextPickerMode::File) => { - let search_files_task = search_files(query, cancellation_flag, &workspace, cx); - cx.background_spawn(async move { - search_files_task - .await - .into_iter() - .map(Match::File) - .collect() - }) - } - - Some(ContextPickerMode::Symbol) => { - let search_symbols_task = search_symbols(query, cancellation_flag, &workspace, cx); - cx.background_spawn(async move { - search_symbols_task - .await - .into_iter() - .map(Match::Symbol) - .collect() - }) - } - - Some(ContextPickerMode::Thread) => { - if let Some(thread_store) = thread_store.as_ref().and_then(|t| t.upgrade()) { - let search_threads_task = - search_threads(query, cancellation_flag, &thread_store, cx); - cx.background_spawn(async move { - search_threads_task - .await - .into_iter() - .map(Match::Thread) - .collect() - }) - } else { - Task::ready(Vec::new()) - } - } - - Some(ContextPickerMode::Fetch) => { - if !query.is_empty() { - Task::ready(vec![Match::Fetch(query.into())]) - } else { - Task::ready(Vec::new()) - } - } - - Some(ContextPickerMode::Rules) => { - if let Some(prompt_store) = prompt_store.as_ref().and_then(|p| p.upgrade()) { - let search_rules_task = search_rules(query, cancellation_flag, &prompt_store, cx); - cx.background_spawn(async move { - search_rules_task - .await - .into_iter() - .map(Match::Rules) - .collect::>() - }) - } else { - Task::ready(Vec::new()) - } - } - - None => { - if query.is_empty() { - let mut matches = recent_entries - .into_iter() - .map(|entry| match entry { - super::RecentEntry::File { - project_path, - path_prefix, - } => Match::File(FileMatch { - mat: fuzzy::PathMatch { - score: 1., - positions: Vec::new(), - worktree_id: project_path.worktree_id.to_usize(), - path: project_path.path, - path_prefix, - is_dir: false, - distance_to_relative_ancestor: 0, - }, - is_recent: true, - }), - super::RecentEntry::Thread(entry) => Match::RecentThread(entry), - }) - .collect::>(); - - matches.extend( - available_context_picker_entries(&prompt_store, &thread_store, &workspace, cx) - .into_iter() - .map(|mode| { - Match::Entry(EntryMatch { - entry: mode, - mat: None, - }) - }), - ); - - Task::ready(matches) - } else { - let executor = cx.background_executor().clone(); - - let search_files_task = - search_files(query.clone(), cancellation_flag, &workspace, cx); - - let entries = - available_context_picker_entries(&prompt_store, &thread_store, &workspace, cx); - let entry_candidates = entries - .iter() - .enumerate() - .map(|(ix, entry)| StringMatchCandidate::new(ix, entry.keyword())) - .collect::>(); - - cx.background_spawn(async move { - let mut matches = search_files_task - .await - .into_iter() - .map(Match::File) - .collect::>(); - - let entry_matches = fuzzy::match_strings( - &entry_candidates, - &query, - false, - true, - 100, - &Arc::new(AtomicBool::default()), - executor, - ) - .await; - - matches.extend(entry_matches.into_iter().map(|mat| { - Match::Entry(EntryMatch { - entry: entries[mat.candidate_id], - mat: Some(mat), - }) - })); - - matches.sort_by(|a, b| { - b.score() - .partial_cmp(&a.score()) - .unwrap_or(std::cmp::Ordering::Equal) - }); - - matches - }) - } - } - } -} - -pub struct ContextPickerCompletionProvider { - workspace: WeakEntity, - context_store: WeakEntity, - thread_store: Option>, - prompt_store: Option>, - editor: WeakEntity, - excluded_buffer: Option>, -} - -impl ContextPickerCompletionProvider { - pub fn new( - workspace: WeakEntity, - context_store: WeakEntity, - thread_store: Option>, - prompt_store: Option>, - editor: WeakEntity, - exclude_buffer: Option>, - ) -> Self { - Self { - workspace, - context_store, - thread_store, - prompt_store, - editor, - excluded_buffer: exclude_buffer, - } - } - - fn completion_for_entry( - entry: ContextPickerEntry, - excerpt_id: ExcerptId, - source_range: Range, - editor: Entity, - context_store: Entity, - workspace: &Entity, - cx: &mut App, - ) -> Option { - match entry { - ContextPickerEntry::Mode(mode) => Some(Completion { - replace_range: source_range, - new_text: format!("@{} ", mode.keyword()), - label: CodeLabel::plain(mode.label().to_string(), None), - icon_path: Some(mode.icon().path().into()), - documentation: None, - source: project::CompletionSource::Custom, - match_start: None, - snippet_deduplication_key: None, - insert_text_mode: None, - // This ensures that when a user accepts this completion, the - // completion menu will still be shown after "@category " is - // inserted - confirm: Some(Arc::new(|_, _, _| true)), - }), - ContextPickerEntry::Action(action) => { - let (new_text, on_action) = match action { - ContextPickerAction::AddSelections => { - let selections = selection_ranges(workspace, cx); - - let selection_infos = selections - .iter() - .map(|(buffer, range)| { - let full_path = buffer - .read(cx) - .file() - .map(|file| file.full_path(cx)) - .unwrap_or_else(|| PathBuf::from("untitled")); - let file_name = full_path - .file_name() - .unwrap_or_default() - .to_string_lossy() - .to_string(); - let line_range = range.to_point(&buffer.read(cx).snapshot()); - - let link = MentionLink::for_selection( - &file_name, - &full_path.to_string_lossy(), - line_range.start.row as usize..line_range.end.row as usize, - ); - (file_name, link, line_range) - }) - .collect::>(); - - let new_text = format!( - "{} ", - selection_infos.iter().map(|(_, link, _)| link).join(" ") - ); - - let callback = Arc::new({ - move |_, window: &mut Window, cx: &mut App| { - context_store.update(cx, |context_store, cx| { - for (buffer, range) in &selections { - context_store.add_selection( - buffer.clone(), - range.clone(), - cx, - ); - } - }); - - let editor = editor.clone(); - let selection_infos = selection_infos.clone(); - window.defer(cx, move |window, cx| { - let mut current_offset = 0; - for (file_name, link, line_range) in selection_infos.iter() { - let snapshot = - editor.read(cx).buffer().read(cx).snapshot(cx); - let Some(start) = snapshot - .anchor_in_excerpt(excerpt_id, source_range.start) - else { - return; - }; - - let offset = start.to_offset(&snapshot) + current_offset; - let text_len = link.len(); - - let range = snapshot.anchor_after(offset) - ..snapshot.anchor_after(offset + text_len); - - let crease = super::crease_for_mention( - format!( - "{} ({}-{})", - file_name, - line_range.start.row + 1, - line_range.end.row + 1 - ) - .into(), - IconName::Reader.path().into(), - range, - editor.downgrade(), - ); - - editor.update(cx, |editor, cx| { - editor.insert_creases(vec![crease.clone()], cx); - editor.fold_creases(vec![crease], false, window, cx); - }); - - current_offset += text_len + 1; - } - }); - - false - } - }); - - (new_text, callback) - } - }; - - Some(Completion { - replace_range: source_range.clone(), - new_text, - label: CodeLabel::plain(action.label().to_string(), None), - icon_path: Some(action.icon().path().into()), - documentation: None, - source: project::CompletionSource::Custom, - match_start: None, - snippet_deduplication_key: None, - insert_text_mode: None, - // This ensures that when a user accepts this completion, the - // completion menu will still be shown after "@category " is - // inserted - confirm: Some(on_action), - }) - } - } - } - - fn completion_for_thread( - thread_entry: HistoryEntry, - excerpt_id: ExcerptId, - source_range: Range, - recent: bool, - editor: Entity, - context_store: Entity, - thread_store: Entity, - project: Entity, - ) -> Completion { - let icon_for_completion = if recent { - IconName::HistoryRerun - } else { - IconName::Thread - }; - let new_text = format!("{} ", MentionLink::for_thread(&thread_entry)); - let new_text_len = new_text.len(); - Completion { - replace_range: source_range.clone(), - new_text, - label: CodeLabel::plain(thread_entry.title().to_string(), None), - match_start: None, - snippet_deduplication_key: None, - documentation: None, - insert_text_mode: None, - source: project::CompletionSource::Custom, - icon_path: Some(icon_for_completion.path().into()), - confirm: Some(confirm_completion_callback( - IconName::Thread.path().into(), - thread_entry.title().clone(), - excerpt_id, - source_range.start, - new_text_len - 1, - editor, - context_store.clone(), - move |window, cx| match &thread_entry { - HistoryEntry::AcpThread(thread) => { - let context_store = context_store.clone(); - let load_thread_task = agent::load_agent_thread( - thread.id.clone(), - thread_store.clone(), - project.clone(), - cx, - ); - window.spawn::<_, Option<_>>(cx, async move |cx| { - let thread = load_thread_task.await.log_err()?; - let context = context_store - .update(cx, |context_store, cx| { - context_store.add_thread(thread, false, cx) - }) - .ok()??; - Some(context) - }) - } - HistoryEntry::TextThread(thread) => { - let path = thread.path.clone(); - let context_store = context_store.clone(); - let thread_store = thread_store.clone(); - cx.spawn::<_, Option<_>>(async move |cx| { - let thread = thread_store - .update(cx, |store, cx| store.load_text_thread(path, cx)) - .ok()? - .await - .log_err()?; - let context = context_store - .update(cx, |context_store, cx| { - context_store.add_text_thread(thread, false, cx) - }) - .ok()??; - Some(context) - }) - } - }, - )), - } - } - - fn completion_for_rules( - rules: RulesContextEntry, - excerpt_id: ExcerptId, - source_range: Range, - editor: Entity, - context_store: Entity, - ) -> Completion { - let new_text = format!("{} ", MentionLink::for_rule(&rules)); - let new_text_len = new_text.len(); - Completion { - replace_range: source_range.clone(), - new_text, - label: CodeLabel::plain(rules.title.to_string(), None), - match_start: None, - snippet_deduplication_key: None, - documentation: None, - insert_text_mode: None, - source: project::CompletionSource::Custom, - icon_path: Some(RULES_ICON.path().into()), - confirm: Some(confirm_completion_callback( - RULES_ICON.path().into(), - rules.title.clone(), - excerpt_id, - source_range.start, - new_text_len - 1, - editor, - context_store.clone(), - move |_, cx| { - let user_prompt_id = rules.prompt_id; - let context = context_store.update(cx, |context_store, cx| { - context_store.add_rules(user_prompt_id, false, cx) - }); - Task::ready(context) - }, - )), - } - } - - fn completion_for_fetch( - source_range: Range, - url_to_fetch: SharedString, - excerpt_id: ExcerptId, - editor: Entity, - context_store: Entity, - http_client: Arc, - ) -> Completion { - let new_text = format!("{} ", MentionLink::for_fetch(&url_to_fetch)); - let new_text_len = new_text.len(); - Completion { - replace_range: source_range.clone(), - new_text, - label: CodeLabel::plain(url_to_fetch.to_string(), None), - documentation: None, - source: project::CompletionSource::Custom, - icon_path: Some(IconName::ToolWeb.path().into()), - match_start: None, - snippet_deduplication_key: None, - insert_text_mode: None, - confirm: Some(confirm_completion_callback( - IconName::ToolWeb.path().into(), - url_to_fetch.clone(), - excerpt_id, - source_range.start, - new_text_len - 1, - editor, - context_store.clone(), - move |_, cx| { - let context_store = context_store.clone(); - let http_client = http_client.clone(); - let url_to_fetch = url_to_fetch.clone(); - cx.spawn(async move |cx| { - if let Some(context) = context_store - .read_with(cx, |context_store, _| { - context_store.get_url_context(url_to_fetch.clone()) - }) - .ok()? - { - return Some(context); - } - let content = cx - .background_spawn(fetch_url_content( - http_client, - url_to_fetch.to_string(), - )) - .await - .log_err()?; - context_store - .update(cx, |context_store, cx| { - context_store.add_fetched_url(url_to_fetch.to_string(), content, cx) - }) - .ok() - }) - }, - )), - } - } - - fn completion_for_path( - project_path: ProjectPath, - path_prefix: &RelPath, - is_recent: bool, - is_directory: bool, - excerpt_id: ExcerptId, - source_range: Range, - path_style: PathStyle, - editor: Entity, - context_store: Entity, - cx: &App, - ) -> Completion { - let (file_name, directory) = super::file_context_picker::extract_file_name_and_directory( - &project_path.path, - path_prefix, - path_style, - ); - - let label = - build_code_label_for_full_path(&file_name, directory.as_ref().map(|s| s.as_ref()), cx); - let full_path = if let Some(directory) = directory { - format!("{}{}", directory, file_name) - } else { - file_name.to_string() - }; - - let path = Path::new(&full_path); - let crease_icon_path = if is_directory { - FileIcons::get_folder_icon(false, path, cx) - .unwrap_or_else(|| IconName::Folder.path().into()) - } else { - FileIcons::get_icon(path, cx).unwrap_or_else(|| IconName::File.path().into()) - }; - let completion_icon_path = if is_recent { - IconName::HistoryRerun.path().into() - } else { - crease_icon_path.clone() - }; - - let new_text = format!("{} ", MentionLink::for_file(&file_name, &full_path)); - let new_text_len = new_text.len(); - Completion { - replace_range: source_range.clone(), - new_text, - label, - documentation: None, - source: project::CompletionSource::Custom, - icon_path: Some(completion_icon_path), - match_start: None, - snippet_deduplication_key: None, - insert_text_mode: None, - confirm: Some(confirm_completion_callback( - crease_icon_path, - file_name, - excerpt_id, - source_range.start, - new_text_len - 1, - editor, - context_store.clone(), - move |_, cx| { - if is_directory { - Task::ready( - context_store - .update(cx, |context_store, cx| { - context_store.add_directory(&project_path, false, cx) - }) - .log_err() - .flatten(), - ) - } else { - let result = context_store.update(cx, |context_store, cx| { - context_store.add_file_from_path(project_path.clone(), false, cx) - }); - cx.spawn(async move |_| result.await.log_err().flatten()) - } - }, - )), - } - } - - fn completion_for_symbol( - symbol: Symbol, - excerpt_id: ExcerptId, - source_range: Range, - editor: Entity, - context_store: Entity, - workspace: Entity, - cx: &mut App, - ) -> Option { - let path_style = workspace.read(cx).path_style(cx); - let SymbolLocation::InProject(symbol_path) = &symbol.path else { - return None; - }; - let _path_prefix = workspace - .read(cx) - .project() - .read(cx) - .worktree_for_id(symbol_path.worktree_id, cx)?; - let path_prefix = RelPath::empty(); - - let (file_name, directory) = super::file_context_picker::extract_file_name_and_directory( - &symbol_path.path, - path_prefix, - path_style, - ); - let full_path = if let Some(directory) = directory { - format!("{}{}", directory, file_name) - } else { - file_name.to_string() - }; - - let comment_id = cx.theme().syntax().highlight_id("comment").map(HighlightId); - let mut label = CodeLabelBuilder::default(); - label.push_str(&symbol.name, None); - label.push_str(" ", None); - label.push_str(&file_name, comment_id); - label.push_str(&format!(" L{}", symbol.range.start.0.row + 1), comment_id); - - let new_text = format!("{} ", MentionLink::for_symbol(&symbol.name, &full_path)); - let new_text_len = new_text.len(); - Some(Completion { - replace_range: source_range.clone(), - new_text, - label: label.build(), - documentation: None, - source: project::CompletionSource::Custom, - icon_path: Some(IconName::Code.path().into()), - match_start: None, - snippet_deduplication_key: None, - insert_text_mode: None, - confirm: Some(confirm_completion_callback( - IconName::Code.path().into(), - symbol.name.clone().into(), - excerpt_id, - source_range.start, - new_text_len - 1, - editor, - context_store.clone(), - move |_, cx| { - let symbol = symbol.clone(); - let context_store = context_store.clone(); - let workspace = workspace.clone(); - let result = super::symbol_context_picker::add_symbol( - symbol, - false, - workspace, - context_store.downgrade(), - cx, - ); - cx.spawn(async move |_| result.await.log_err()?.0) - }, - )), - }) - } -} - -fn build_code_label_for_full_path(file_name: &str, directory: Option<&str>, cx: &App) -> CodeLabel { - let comment_id = cx.theme().syntax().highlight_id("comment").map(HighlightId); - let mut label = CodeLabelBuilder::default(); - - label.push_str(file_name, None); - label.push_str(" ", None); - - if let Some(directory) = directory { - label.push_str(directory, comment_id); - } - - label.build() -} - -impl CompletionProvider for ContextPickerCompletionProvider { - fn completions( - &self, - excerpt_id: ExcerptId, - buffer: &Entity, - buffer_position: Anchor, - _trigger: CompletionContext, - _window: &mut Window, - cx: &mut Context, - ) -> Task>> { - let snapshot = buffer.read(cx).snapshot(); - let position = buffer_position.to_point(&snapshot); - let line_start = Point::new(position.row, 0); - let offset_to_line = snapshot.point_to_offset(line_start); - let mut lines = snapshot.text_for_range(line_start..position).lines(); - let Some(line) = lines.next() else { - return Task::ready(Ok(Vec::new())); - }; - let Some(state) = MentionCompletion::try_parse(line, offset_to_line) else { - return Task::ready(Ok(Vec::new())); - }; - - let Some((workspace, context_store)) = - self.workspace.upgrade().zip(self.context_store.upgrade()) - else { - return Task::ready(Ok(Vec::new())); - }; - - let source_range = snapshot.anchor_before(state.source_range.start) - ..snapshot.anchor_after(state.source_range.end); - - let thread_store = self.thread_store.clone(); - let prompt_store = self.prompt_store.clone(); - let editor = self.editor.clone(); - let http_client = workspace.read(cx).client().http_client(); - let path_style = workspace.read(cx).path_style(cx); - - let MentionCompletion { mode, argument, .. } = state; - let query = argument.unwrap_or_else(|| "".to_string()); - - let excluded_path = self - .excluded_buffer - .as_ref() - .and_then(WeakEntity::upgrade) - .and_then(|b| b.read(cx).file()) - .map(|file| ProjectPath::from_file(file.as_ref(), cx)); - - let recent_entries = recent_context_picker_entries_with_store( - context_store.clone(), - thread_store.clone(), - workspace.clone(), - excluded_path.clone(), - cx, - ); - - let search_task = search( - mode, - query, - Arc::::default(), - recent_entries, - prompt_store, - thread_store.clone(), - workspace.clone(), - cx, - ); - let project = workspace.read(cx).project().downgrade(); - - cx.spawn(async move |_, cx| { - let matches = search_task.await; - let Some((editor, project)) = editor.upgrade().zip(project.upgrade()) else { - return Ok(Vec::new()); - }; - - let completions = cx.update(|cx| { - matches - .into_iter() - .filter_map(|mat| match mat { - Match::File(FileMatch { mat, is_recent }) => { - let project_path = ProjectPath { - worktree_id: WorktreeId::from_usize(mat.worktree_id), - path: mat.path.clone(), - }; - - if excluded_path.as_ref() == Some(&project_path) { - return None; - } - - // If path is empty, this means we're matching with the root directory itself - // so we use the path_prefix as the name - let path_prefix = if mat.path.is_empty() { - project - .read(cx) - .worktree_for_id(project_path.worktree_id, cx) - .map(|wt| wt.read(cx).root_name().into()) - .unwrap_or_else(|| mat.path_prefix.clone()) - } else { - mat.path_prefix.clone() - }; - - Some(Self::completion_for_path( - project_path, - &path_prefix, - is_recent, - mat.is_dir, - excerpt_id, - source_range.clone(), - path_style, - editor.clone(), - context_store.clone(), - cx, - )) - } - - Match::Symbol(SymbolMatch { symbol, .. }) => Self::completion_for_symbol( - symbol, - excerpt_id, - source_range.clone(), - editor.clone(), - context_store.clone(), - workspace.clone(), - cx, - ), - Match::Thread(thread) => { - let thread_store = thread_store.as_ref().and_then(|t| t.upgrade())?; - Some(Self::completion_for_thread( - thread, - excerpt_id, - source_range.clone(), - false, - editor.clone(), - context_store.clone(), - thread_store, - project.clone(), - )) - } - Match::RecentThread(thread) => { - let thread_store = thread_store.as_ref().and_then(|t| t.upgrade())?; - Some(Self::completion_for_thread( - thread, - excerpt_id, - source_range.clone(), - true, - editor.clone(), - context_store.clone(), - thread_store, - project.clone(), - )) - } - Match::Rules(user_rules) => Some(Self::completion_for_rules( - user_rules, - excerpt_id, - source_range.clone(), - editor.clone(), - context_store.clone(), - )), - - Match::Fetch(url) => Some(Self::completion_for_fetch( - source_range.clone(), - url, - excerpt_id, - editor.clone(), - context_store.clone(), - http_client.clone(), - )), - - Match::Entry(EntryMatch { entry, .. }) => Self::completion_for_entry( - entry, - excerpt_id, - source_range.clone(), - editor.clone(), - context_store.clone(), - &workspace, - cx, - ), - }) - .collect() - })?; - - Ok(vec![CompletionResponse { - completions, - display_options: CompletionDisplayOptions::default(), - // Since this does its own filtering (see `filter_completions()` returns false), - // there is no benefit to computing whether this set of completions is incomplete. - is_incomplete: true, - }]) - }) - } - - fn is_completion_trigger( - &self, - buffer: &Entity, - position: language::Anchor, - _text: &str, - _trigger_in_words: bool, - _menu_is_open: bool, - cx: &mut Context, - ) -> bool { - let buffer = buffer.read(cx); - let position = position.to_point(buffer); - let line_start = Point::new(position.row, 0); - let offset_to_line = buffer.point_to_offset(line_start); - let mut lines = buffer.text_for_range(line_start..position).lines(); - if let Some(line) = lines.next() { - MentionCompletion::try_parse(line, offset_to_line) - .map(|completion| { - completion.source_range.start <= offset_to_line + position.column as usize - && completion.source_range.end >= offset_to_line + position.column as usize - }) - .unwrap_or(false) - } else { - false - } - } - - fn sort_completions(&self) -> bool { - false - } - - fn filter_completions(&self) -> bool { - false - } -} - -fn confirm_completion_callback( - crease_icon_path: SharedString, - crease_text: SharedString, - excerpt_id: ExcerptId, - start: Anchor, - content_len: usize, - editor: Entity, - context_store: Entity, - add_context_fn: impl Fn(&mut Window, &mut App) -> Task> - + Send - + Sync - + 'static, -) -> Arc bool + Send + Sync> { - Arc::new(move |_, window, cx| { - let context = add_context_fn(window, cx); - - let crease_text = crease_text.clone(); - let crease_icon_path = crease_icon_path.clone(); - let editor = editor.clone(); - let context_store = context_store.clone(); - window.defer(cx, move |window, cx| { - let crease_id = crate::context_picker::insert_crease_for_mention( - excerpt_id, - start, - content_len, - crease_text.clone(), - crease_icon_path, - editor.clone(), - window, - cx, - ); - cx.spawn(async move |cx| { - let crease_id = crease_id?; - let context = context.await?; - editor - .update(cx, |editor, cx| { - if let Some(addon) = editor.addon_mut::() { - addon.add_creases( - &context_store, - AgentContextKey(context), - [(crease_id, crease_text)], - cx, - ); - } - }) - .ok() - }) - .detach(); - }); - false - }) -} - -#[derive(Debug, Default, PartialEq)] -struct MentionCompletion { - source_range: Range, - mode: Option, - argument: Option, -} - -impl MentionCompletion { - fn try_parse(line: &str, offset_to_line: usize) -> Option { - let last_mention_start = line.rfind('@')?; - if last_mention_start >= line.len() { - return Some(Self::default()); - } - if last_mention_start > 0 - && line - .chars() - .nth(last_mention_start - 1) - .is_some_and(|c| !c.is_whitespace()) - { - return None; - } - - let rest_of_line = &line[last_mention_start + 1..]; - - let mut mode = None; - let mut argument = None; - - let mut parts = rest_of_line.split_whitespace(); - let mut end = last_mention_start + 1; - if let Some(mode_text) = parts.next() { - end += mode_text.len(); - - if let Some(parsed_mode) = ContextPickerMode::try_from(mode_text).ok() { - mode = Some(parsed_mode); - } else { - argument = Some(mode_text.to_string()); - } - match rest_of_line[mode_text.len()..].find(|c: char| !c.is_whitespace()) { - Some(whitespace_count) => { - if let Some(argument_text) = parts.next() { - argument = Some(argument_text.to_string()); - end += whitespace_count + argument_text.len(); - } - } - None => { - // Rest of line is entirely whitespace - end += rest_of_line.len() - mode_text.len(); - } - } - } - - Some(Self { - source_range: last_mention_start + offset_to_line..end + offset_to_line, - mode, - argument, - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use editor::{AnchorRangeExt, MultiBufferOffset}; - use gpui::{EventEmitter, FocusHandle, Focusable, TestAppContext, VisualTestContext}; - use project::{Project, ProjectPath}; - use serde_json::json; - use settings::SettingsStore; - use std::{ops::Deref, rc::Rc}; - use util::{path, rel_path::rel_path}; - use workspace::{AppState, Item}; - - #[test] - fn test_mention_completion_parse() { - assert_eq!(MentionCompletion::try_parse("Lorem Ipsum", 0), None); - - assert_eq!( - MentionCompletion::try_parse("Lorem @", 0), - Some(MentionCompletion { - source_range: 6..7, - mode: None, - argument: None, - }) - ); - - assert_eq!( - MentionCompletion::try_parse("Lorem @file", 0), - Some(MentionCompletion { - source_range: 6..11, - mode: Some(ContextPickerMode::File), - argument: None, - }) - ); - - assert_eq!( - MentionCompletion::try_parse("Lorem @file ", 0), - Some(MentionCompletion { - source_range: 6..12, - mode: Some(ContextPickerMode::File), - argument: None, - }) - ); - - assert_eq!( - MentionCompletion::try_parse("Lorem @file main.rs", 0), - Some(MentionCompletion { - source_range: 6..19, - mode: Some(ContextPickerMode::File), - argument: Some("main.rs".to_string()), - }) - ); - - assert_eq!( - MentionCompletion::try_parse("Lorem @file main.rs ", 0), - Some(MentionCompletion { - source_range: 6..19, - mode: Some(ContextPickerMode::File), - argument: Some("main.rs".to_string()), - }) - ); - - assert_eq!( - MentionCompletion::try_parse("Lorem @file main.rs Ipsum", 0), - Some(MentionCompletion { - source_range: 6..19, - mode: Some(ContextPickerMode::File), - argument: Some("main.rs".to_string()), - }) - ); - - assert_eq!( - MentionCompletion::try_parse("Lorem @main", 0), - Some(MentionCompletion { - source_range: 6..11, - mode: None, - argument: Some("main".to_string()), - }) - ); - - assert_eq!(MentionCompletion::try_parse("test@", 0), None); - } - - struct AtMentionEditor(Entity); - - impl Item for AtMentionEditor { - type Event = (); - - fn include_in_nav_history() -> bool { - false - } - - fn tab_content_text(&self, _detail: usize, _cx: &App) -> SharedString { - "Test".into() - } - } - - impl EventEmitter<()> for AtMentionEditor {} - - impl Focusable for AtMentionEditor { - fn focus_handle(&self, cx: &App) -> FocusHandle { - self.0.read(cx).focus_handle(cx) - } - } - - impl Render for AtMentionEditor { - fn render(&mut self, _window: &mut Window, _cx: &mut Context) -> impl IntoElement { - self.0.clone().into_any_element() - } - } - - #[gpui::test] - async fn test_context_completion_provider(cx: &mut TestAppContext) { - init_test(cx); - - let app_state = cx.update(AppState::test); - - cx.update(|cx| { - editor::init(cx); - workspace::init(app_state.clone(), cx); - }); - - app_state - .fs - .as_fake() - .insert_tree( - path!("/dir"), - json!({ - "editor": "", - "a": { - "one.txt": "", - "two.txt": "", - "three.txt": "", - "four.txt": "" - }, - "b": { - "five.txt": "", - "six.txt": "", - "seven.txt": "", - "eight.txt": "", - } - }), - ) - .await; - - let project = Project::test(app_state.fs.clone(), [path!("/dir").as_ref()], cx).await; - let window = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); - let workspace = window.root(cx).unwrap(); - - let worktree = project.update(cx, |project, cx| { - let mut worktrees = project.worktrees(cx).collect::>(); - assert_eq!(worktrees.len(), 1); - worktrees.pop().unwrap() - }); - let worktree_id = worktree.read_with(cx, |worktree, _| worktree.id()); - - let mut cx = VisualTestContext::from_window(*window.deref(), cx); - - let paths = vec![ - rel_path("a/one.txt"), - rel_path("a/two.txt"), - rel_path("a/three.txt"), - rel_path("a/four.txt"), - rel_path("b/five.txt"), - rel_path("b/six.txt"), - rel_path("b/seven.txt"), - rel_path("b/eight.txt"), - ]; - - let slash = PathStyle::local().separator(); - - let mut opened_editors = Vec::new(); - for path in paths { - let buffer = workspace - .update_in(&mut cx, |workspace, window, cx| { - workspace.open_path( - ProjectPath { - worktree_id, - path: path.into(), - }, - None, - false, - window, - cx, - ) - }) - .await - .unwrap(); - opened_editors.push(buffer); - } - - let editor = workspace.update_in(&mut cx, |workspace, window, cx| { - let editor = cx.new(|cx| { - Editor::new( - editor::EditorMode::full(), - multi_buffer::MultiBuffer::build_simple("", cx), - None, - window, - cx, - ) - }); - workspace.active_pane().update(cx, |pane, cx| { - pane.add_item( - Box::new(cx.new(|_| AtMentionEditor(editor.clone()))), - true, - true, - None, - window, - cx, - ); - }); - editor - }); - - let context_store = cx.new(|_| ContextStore::new(project.downgrade())); - - let editor_entity = editor.downgrade(); - editor.update_in(&mut cx, |editor, window, cx| { - let last_opened_buffer = opened_editors.last().and_then(|editor| { - editor - .downcast::()? - .read(cx) - .buffer() - .read(cx) - .as_singleton() - .as_ref() - .map(Entity::downgrade) - }); - window.focus(&editor.focus_handle(cx)); - editor.set_completion_provider(Some(Rc::new(ContextPickerCompletionProvider::new( - workspace.downgrade(), - context_store.downgrade(), - None, - None, - editor_entity, - last_opened_buffer, - )))); - }); - - cx.simulate_input("Lorem "); - - editor.update(&mut cx, |editor, cx| { - assert_eq!(editor.text(cx), "Lorem "); - assert!(!editor.has_visible_completions_menu()); - }); - - cx.simulate_input("@"); - - editor.update(&mut cx, |editor, cx| { - assert_eq!(editor.text(cx), "Lorem @"); - assert!(editor.has_visible_completions_menu()); - assert_eq!( - current_completion_labels(editor), - &[ - format!("seven.txt b{slash}"), - format!("six.txt b{slash}"), - format!("five.txt b{slash}"), - format!("four.txt a{slash}"), - "Files & Directories".into(), - "Symbols".into(), - "Fetch".into() - ] - ); - }); - - // Select and confirm "File" - editor.update_in(&mut cx, |editor, window, cx| { - assert!(editor.has_visible_completions_menu()); - editor.context_menu_next(&editor::actions::ContextMenuNext, window, cx); - editor.context_menu_next(&editor::actions::ContextMenuNext, window, cx); - editor.context_menu_next(&editor::actions::ContextMenuNext, window, cx); - editor.context_menu_next(&editor::actions::ContextMenuNext, window, cx); - editor.confirm_completion(&editor::actions::ConfirmCompletion::default(), window, cx); - }); - - cx.run_until_parked(); - - editor.update(&mut cx, |editor, cx| { - assert_eq!(editor.text(cx), "Lorem @file "); - assert!(editor.has_visible_completions_menu()); - }); - - cx.simulate_input("one"); - - editor.update(&mut cx, |editor, cx| { - assert_eq!(editor.text(cx), "Lorem @file one"); - assert!(editor.has_visible_completions_menu()); - assert_eq!( - current_completion_labels(editor), - vec![format!("one.txt a{slash}")] - ); - }); - - editor.update_in(&mut cx, |editor, window, cx| { - assert!(editor.has_visible_completions_menu()); - editor.confirm_completion(&editor::actions::ConfirmCompletion::default(), window, cx); - }); - - editor.update(&mut cx, |editor, cx| { - assert_eq!( - editor.text(cx), - format!("Lorem [@one.txt](@file:a{slash}one.txt) ") - ); - assert!(!editor.has_visible_completions_menu()); - assert_eq!( - fold_ranges(editor, cx), - vec![Point::new(0, 6)..Point::new(0, 33)] - ); - }); - - cx.simulate_input(" "); - - editor.update(&mut cx, |editor, cx| { - assert_eq!( - editor.text(cx), - format!("Lorem [@one.txt](@file:a{slash}one.txt) ") - ); - assert!(!editor.has_visible_completions_menu()); - assert_eq!( - fold_ranges(editor, cx), - vec![Point::new(0, 6)..Point::new(0, 33)] - ); - }); - - cx.simulate_input("Ipsum "); - - editor.update(&mut cx, |editor, cx| { - assert_eq!( - editor.text(cx), - format!("Lorem [@one.txt](@file:a{slash}one.txt) Ipsum "), - ); - assert!(!editor.has_visible_completions_menu()); - assert_eq!( - fold_ranges(editor, cx), - vec![Point::new(0, 6)..Point::new(0, 33)] - ); - }); - - cx.simulate_input("@file "); - - editor.update(&mut cx, |editor, cx| { - assert_eq!( - editor.text(cx), - format!("Lorem [@one.txt](@file:a{slash}one.txt) Ipsum @file "), - ); - assert!(editor.has_visible_completions_menu()); - assert_eq!( - fold_ranges(editor, cx), - vec![Point::new(0, 6)..Point::new(0, 33)] - ); - }); - - editor.update_in(&mut cx, |editor, window, cx| { - editor.confirm_completion(&editor::actions::ConfirmCompletion::default(), window, cx); - }); - - cx.run_until_parked(); - - editor.update(&mut cx, |editor, cx| { - assert_eq!( - editor.text(cx), - format!("Lorem [@one.txt](@file:a{slash}one.txt) Ipsum [@seven.txt](@file:b{slash}seven.txt) ") - ); - assert!(!editor.has_visible_completions_menu()); - assert_eq!( - fold_ranges(editor, cx), - vec![ - Point::new(0, 6)..Point::new(0, 33), - Point::new(0, 41)..Point::new(0, 72) - ] - ); - }); - - cx.simulate_input("\n@"); - - editor.update(&mut cx, |editor, cx| { - assert_eq!( - editor.text(cx), - format!("Lorem [@one.txt](@file:a{slash}one.txt) Ipsum [@seven.txt](@file:b{slash}seven.txt) \n@") - ); - assert!(editor.has_visible_completions_menu()); - assert_eq!( - fold_ranges(editor, cx), - vec![ - Point::new(0, 6)..Point::new(0, 33), - Point::new(0, 41)..Point::new(0, 72) - ] - ); - }); - - editor.update_in(&mut cx, |editor, window, cx| { - editor.confirm_completion(&editor::actions::ConfirmCompletion::default(), window, cx); - }); - - cx.run_until_parked(); - - editor.update(&mut cx, |editor, cx| { - assert_eq!( - editor.text(cx), - format!("Lorem [@one.txt](@file:a{slash}one.txt) Ipsum [@seven.txt](@file:b{slash}seven.txt) \n[@six.txt](@file:b{slash}six.txt) ") - ); - assert!(!editor.has_visible_completions_menu()); - assert_eq!( - fold_ranges(editor, cx), - vec![ - Point::new(0, 6)..Point::new(0, 33), - Point::new(0, 41)..Point::new(0, 72), - Point::new(1, 0)..Point::new(1, 27) - ] - ); - }); - } - - #[gpui::test] - async fn test_context_completion_provider_multiple_worktrees(cx: &mut TestAppContext) { - init_test(cx); - - let app_state = cx.update(AppState::test); - - cx.update(|cx| { - editor::init(cx); - workspace::init(app_state.clone(), cx); - }); - - app_state - .fs - .as_fake() - .insert_tree( - path!("/project1"), - json!({ - "a": { - "one.txt": "", - "two.txt": "", - } - }), - ) - .await; - - app_state - .fs - .as_fake() - .insert_tree( - path!("/project2"), - json!({ - "b": { - "three.txt": "", - "four.txt": "", - } - }), - ) - .await; - - let project = Project::test( - app_state.fs.clone(), - [path!("/project1").as_ref(), path!("/project2").as_ref()], - cx, - ) - .await; - let window = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); - let workspace = window.root(cx).unwrap(); - - let worktrees = project.update(cx, |project, cx| { - let worktrees = project.worktrees(cx).collect::>(); - assert_eq!(worktrees.len(), 2); - worktrees - }); - - let mut cx = VisualTestContext::from_window(*window.deref(), cx); - let slash = PathStyle::local().separator(); - - for (worktree_idx, paths) in [ - vec![rel_path("a/one.txt"), rel_path("a/two.txt")], - vec![rel_path("b/three.txt"), rel_path("b/four.txt")], - ] - .iter() - .enumerate() - { - let worktree_id = worktrees[worktree_idx].read_with(&cx, |wt, _| wt.id()); - for path in paths { - workspace - .update_in(&mut cx, |workspace, window, cx| { - workspace.open_path( - ProjectPath { - worktree_id, - path: (*path).into(), - }, - None, - false, - window, - cx, - ) - }) - .await - .unwrap(); - } - } - - let editor = workspace.update_in(&mut cx, |workspace, window, cx| { - let editor = cx.new(|cx| { - Editor::new( - editor::EditorMode::full(), - multi_buffer::MultiBuffer::build_simple("", cx), - None, - window, - cx, - ) - }); - workspace.active_pane().update(cx, |pane, cx| { - pane.add_item( - Box::new(cx.new(|_| AtMentionEditor(editor.clone()))), - true, - true, - None, - window, - cx, - ); - }); - editor - }); - - let context_store = cx.new(|_| ContextStore::new(project.downgrade())); - - let editor_entity = editor.downgrade(); - editor.update_in(&mut cx, |editor, window, cx| { - window.focus(&editor.focus_handle(cx)); - editor.set_completion_provider(Some(Rc::new(ContextPickerCompletionProvider::new( - workspace.downgrade(), - context_store.downgrade(), - None, - None, - editor_entity, - None, - )))); - }); - - cx.simulate_input("@"); - - // With multiple worktrees, we should see the project name as prefix - editor.update(&mut cx, |editor, cx| { - assert_eq!(editor.text(cx), "@"); - assert!(editor.has_visible_completions_menu()); - let labels = current_completion_labels(editor); - - assert!( - labels.contains(&format!("four.txt project2{slash}b{slash}")), - "Expected 'four.txt project2{slash}b{slash}' in labels: {:?}", - labels - ); - assert!( - labels.contains(&format!("three.txt project2{slash}b{slash}")), - "Expected 'three.txt project2{slash}b{slash}' in labels: {:?}", - labels - ); - }); - - editor.update_in(&mut cx, |editor, window, cx| { - editor.context_menu_next(&editor::actions::ContextMenuNext, window, cx); - editor.context_menu_next(&editor::actions::ContextMenuNext, window, cx); - editor.context_menu_next(&editor::actions::ContextMenuNext, window, cx); - editor.context_menu_next(&editor::actions::ContextMenuNext, window, cx); - editor.confirm_completion(&editor::actions::ConfirmCompletion::default(), window, cx); - }); - - cx.run_until_parked(); - - editor.update(&mut cx, |editor, cx| { - assert_eq!(editor.text(cx), "@file "); - assert!(editor.has_visible_completions_menu()); - }); - - cx.simulate_input("one"); - - editor.update(&mut cx, |editor, cx| { - assert_eq!(editor.text(cx), "@file one"); - assert!(editor.has_visible_completions_menu()); - assert_eq!( - current_completion_labels(editor), - vec![format!("one.txt project1{slash}a{slash}")] - ); - }); - - editor.update_in(&mut cx, |editor, window, cx| { - editor.confirm_completion(&editor::actions::ConfirmCompletion::default(), window, cx); - }); - - editor.update(&mut cx, |editor, cx| { - assert_eq!( - editor.text(cx), - format!("[@one.txt](@file:project1{slash}a{slash}one.txt) ") - ); - assert!(!editor.has_visible_completions_menu()); - }); - } - - fn fold_ranges(editor: &Editor, cx: &mut App) -> Vec> { - let snapshot = editor.buffer().read(cx).snapshot(cx); - editor.display_map.update(cx, |display_map, cx| { - display_map - .snapshot(cx) - .folds_in_range(MultiBufferOffset(0)..snapshot.len()) - .map(|fold| fold.range.to_point(&snapshot)) - .collect() - }) - } - - fn current_completion_labels(editor: &Editor) -> Vec { - let completions = editor.current_completions().expect("Missing completions"); - completions - .into_iter() - .map(|completion| completion.label.text) - .collect::>() - } - - pub(crate) fn init_test(cx: &mut TestAppContext) { - cx.update(|cx| { - let store = SettingsStore::test(cx); - cx.set_global(store); - theme::init(theme::LoadThemes::JustBase, cx); - }); - } -} diff --git a/crates/agent_ui/src/inline_prompt_editor.rs b/crates/agent_ui/src/inline_prompt_editor.rs index 7cd7a9d58a71effa18612234f9f718f794c99c06..b9e8d9ada230ba497ffcd4e577d3312dd440e604 100644 --- a/crates/agent_ui/src/inline_prompt_editor.rs +++ b/crates/agent_ui/src/inline_prompt_editor.rs @@ -1,8 +1,8 @@ use agent::HistoryStore; -use collections::VecDeque; +use collections::{HashMap, VecDeque}; use editor::actions::Paste; use editor::code_context_menus::CodeContextMenu; -use editor::display_map::EditorMargins; +use editor::display_map::{CreaseId, EditorMargins}; use editor::{AnchorRangeExt as _, MultiBufferOffset, ToOffset as _}; use editor::{ ContextMenuOptions, Editor, EditorElement, EditorEvent, EditorMode, EditorStyle, MultiBuffer, @@ -226,9 +226,10 @@ impl PromptEditor { } fn assign_completion_provider(&mut self, cx: &mut Context) { - self.editor.update(cx, |editor, _cx| { + self.editor.update(cx, |editor, cx| { editor.set_completion_provider(Some(Rc::new(PromptCompletionProvider::new( PromptEditorCompletionProviderDelegate, + cx.weak_entity(), self.mention_set.clone(), self.history_store.clone(), self.prompt_store.clone(), @@ -253,18 +254,35 @@ impl PromptEditor { extract_message_creases(editor, &self.mention_set, window, cx) }); let focus = self.editor.focus_handle(cx).contains_focused(window, cx); + let mut creases = vec![]; self.editor = cx.new(|cx| { let mut editor = Editor::auto_height(1, Self::MAX_LINES as usize, window, cx); editor.set_soft_wrap_mode(language::language_settings::SoftWrap::EditorWidth, cx); editor.set_placeholder_text("Add a prompt…", window, cx); editor.set_text(prompt, window, cx); - insert_message_creases(&mut editor, &existing_creases, window, cx); + creases = insert_message_creases(&mut editor, &existing_creases, window, cx); if focus { window.focus(&editor.focus_handle(cx)); } editor }); + + self.mention_set.update(cx, |mention_set, _cx| { + debug_assert_eq!( + creases.len(), + mention_set.creases().len(), + "Missing creases" + ); + + let mentions = mention_set + .clear() + .zip(creases) + .map(|((_, value), id)| (id, value)) + .collect::>(); + mention_set.set_mentions(mentions); + }); + self.assign_completion_provider(cx); self.subscribe_to_editor(window, cx); } @@ -304,13 +322,18 @@ impl PromptEditor { fn handle_prompt_editor_events( &mut self, - _: &Entity, + editor: &Entity, event: &EditorEvent, window: &mut Window, cx: &mut Context, ) { match event { EditorEvent::Edited { .. } => { + let snapshot = editor.update(cx, |editor, cx| editor.snapshot(window, cx)); + + self.mention_set + .update(cx, |mention_set, _cx| mention_set.remove_invalid(&snapshot)); + if let Some(workspace) = window.root::().flatten() { workspace.update(cx, |workspace, cx| { let is_via_ssh = workspace.project().read(cx).is_via_remote_server(); @@ -321,7 +344,7 @@ impl PromptEditor { .log_edit_event("inline assist", is_via_ssh); }); } - let prompt = self.editor.read(cx).text(cx); + let prompt = snapshot.text(); if self .prompt_history_ix .is_none_or(|ix| self.prompt_history[ix] != prompt) @@ -848,16 +871,8 @@ impl PromptEditor { editor }); - let mention_set = cx.new(|cx| { - MentionSet::new( - prompt_editor.clone(), - project, - history_store.clone(), - prompt_store.clone(), - window, - cx, - ) - }); + let mention_set = + cx.new(|_cx| MentionSet::new(project, history_store.clone(), prompt_store.clone())); let model_selector_menu_handle = PopoverMenuHandle::default(); @@ -999,16 +1014,8 @@ impl PromptEditor { editor }); - let mention_set = cx.new(|cx| { - MentionSet::new( - prompt_editor.clone(), - project, - history_store.clone(), - prompt_store.clone(), - window, - cx, - ) - }); + let mention_set = + cx.new(|_cx| MentionSet::new(project, history_store.clone(), prompt_store.clone())); let model_selector_menu_handle = PopoverMenuHandle::default(); @@ -1203,7 +1210,7 @@ fn insert_message_creases( message_creases: &[MessageCrease], window: &mut Window, cx: &mut Context<'_, Editor>, -) { +) -> Vec { let buffer_snapshot = editor.buffer().read(cx).snapshot(cx); let creases = message_creases .iter() @@ -1218,6 +1225,7 @@ fn insert_message_creases( ) }) .collect::>(); - editor.insert_creases(creases.clone(), cx); + let ids = editor.insert_creases(creases.clone(), cx); editor.fold_creases(creases, false, window, cx); + ids } diff --git a/crates/agent_ui/src/mention_set.rs b/crates/agent_ui/src/mention_set.rs index 156e62949ae425532dcb897754928011ed2bd8a6..eee28bbfb2d36ce8f41e64cafd2e8f24b504f97f 100644 --- a/crates/agent_ui/src/mention_set.rs +++ b/crates/agent_ui/src/mention_set.rs @@ -6,14 +6,14 @@ use anyhow::{Context as _, Result, anyhow}; use assistant_slash_commands::codeblock_fence_for_path; use collections::{HashMap, HashSet}; use editor::{ - Anchor, Editor, EditorEvent, EditorSnapshot, ExcerptId, FoldPlaceholder, ToOffset, + Anchor, Editor, EditorSnapshot, ExcerptId, FoldPlaceholder, ToOffset, display_map::{Crease, CreaseId, CreaseMetadata, FoldId}, scroll::Autoscroll, }; use futures::{AsyncReadExt as _, FutureExt as _, future::Shared}; use gpui::{ Animation, AnimationExt as _, AppContext, ClipboardEntry, Context, Empty, Entity, EntityId, - Image, ImageFormat, Img, SharedString, Subscription, Task, WeakEntity, pulsating_between, + Image, ImageFormat, Img, SharedString, Task, WeakEntity, pulsating_between, }; use http_client::{AsyncBody, HttpClientWithUrl}; use itertools::Either; @@ -58,40 +58,23 @@ pub struct MentionImage { } pub struct MentionSet { - editor: Entity, project: WeakEntity, history_store: Entity, prompt_store: Option>, mentions: HashMap, - _editor_subscription: Subscription, } impl MentionSet { pub fn new( - editor: Entity, project: WeakEntity, history_store: Entity, prompt_store: Option>, - window: &mut Window, - cx: &mut Context, ) -> Self { - let editor_subscription = - cx.subscribe_in(&editor, window, move |this, editor, event, window, cx| { - if let EditorEvent::Edited { .. } = event - && !editor.read(cx).read_only(cx) - { - let snapshot = editor.update(cx, |editor, cx| editor.snapshot(window, cx)); - this.remove_invalid(snapshot); - } - }); - Self { - editor, project, history_store, prompt_store, mentions: HashMap::default(), - _editor_subscription: editor_subscription, } } @@ -122,9 +105,9 @@ impl MentionSet { }) } - fn remove_invalid(&mut self, snapshot: EditorSnapshot) { + pub fn remove_invalid(&mut self, snapshot: &EditorSnapshot) { for (crease_id, crease) in snapshot.crease_snapshot.creases() { - if !crease.range().start.is_valid(&snapshot.buffer_snapshot()) { + if !crease.range().start.is_valid(snapshot.buffer_snapshot()) { self.mentions.remove(&crease_id); } } @@ -146,7 +129,11 @@ impl MentionSet { self.mentions.values().map(|(uri, _)| uri.clone()).collect() } - pub fn remove_all(&mut self) -> impl Iterator { + pub fn set_mentions(&mut self, mentions: HashMap) { + self.mentions = mentions; + } + + pub fn clear(&mut self) -> impl Iterator { self.mentions.drain() } @@ -157,6 +144,7 @@ impl MentionSet { content_len: usize, mention_uri: MentionUri, supports_images: bool, + editor: Entity, workspace: &Entity, window: &mut Window, cx: &mut Context, @@ -165,9 +153,7 @@ impl MentionSet { return Task::ready(()); }; - let snapshot = self - .editor - .update(cx, |editor, cx| editor.snapshot(window, cx)); + let snapshot = editor.update(cx, |editor, cx| editor.snapshot(window, cx)); let Some(start_anchor) = snapshot.buffer_snapshot().as_singleton_anchor(start) else { return Task::ready(()); }; @@ -206,7 +192,7 @@ impl MentionSet { mention_uri.name().into(), IconName::Image.path().into(), Some(image), - self.editor.clone(), + editor.clone(), window, cx, ) @@ -218,7 +204,7 @@ impl MentionSet { crease_text, mention_uri.icon_path(cx), None, - self.editor.clone(), + editor.clone(), window, cx, ) @@ -265,7 +251,7 @@ impl MentionSet { drop(tx); if result.is_none() { this.update(cx, |this, cx| { - this.editor.update(cx, |editor, cx| { + editor.update(cx, |editor, cx| { // Remove mention editor.edit([(start_anchor..end_anchor, "")], cx); }); @@ -405,6 +391,7 @@ impl MentionSet { &mut self, source_range: Range, selections: Vec<(Entity, Range, Range)>, + editor: Entity, window: &mut Window, cx: &mut Context, ) { @@ -412,7 +399,7 @@ impl MentionSet { return; }; - let snapshot = self.editor.read(cx).buffer().read(cx).snapshot(cx); + let snapshot = editor.read(cx).buffer().read(cx).snapshot(cx); let Some(start) = snapshot.as_singleton_anchor(source_range.start) else { return; }; @@ -443,10 +430,10 @@ impl MentionSet { selection_name(abs_path.as_deref(), &line_range).into(), uri.icon_path(cx), range, - self.editor.downgrade(), + editor.downgrade(), ); - let crease_id = self.editor.update(cx, |editor, cx| { + let crease_id = editor.update(cx, |editor, cx| { let crease_ids = editor.insert_creases(vec![crease.clone()], cx); editor.fold_creases(vec![crease], false, window, cx); crease_ids.first().copied().unwrap() @@ -471,7 +458,6 @@ impl MentionSet { // expected. We're leveraging `cx.on_next_frame` to wait 2 frames and // ensure that the layout has been recalculated so that the autoscroll // request actually shows the cursor's new position. - let editor = self.editor.clone(); cx.on_next_frame(window, move |_, window, cx| { cx.on_next_frame(window, move |_, _, cx| { editor.update(cx, |editor, cx| { From 34a2e1d56b54513098ddd95edcb29f6d403145f0 Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Sun, 23 Nov 2025 13:52:50 -0800 Subject: [PATCH 0315/1030] settings_ui: Don't show sh as default shell on windows (#43276) Closes #ISSUE Release Notes: - Fixed an issue in the settings UI where changing the terminal shell would set the default shell to `sh` on Windows --- crates/settings_ui/src/page_data.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/settings_ui/src/page_data.rs b/crates/settings_ui/src/page_data.rs index edd488f419eeee0a7074a95697d9615317891a4d..7cc5705ced9bc2267834044eff5e5def78182bc4 100644 --- a/crates/settings_ui/src/page_data.rs +++ b/crates/settings_ui/src/page_data.rs @@ -4577,6 +4577,11 @@ pub(crate) fn settings_data(cx: &App) -> Vec { .project .shell .get_or_insert_with(|| settings::Shell::default()); + let default_shell = if cfg!(target_os = "windows") { + "powershell.exe" + } else { + "sh" + }; *settings_value = match value { settings::ShellDiscriminants::System => { settings::Shell::System @@ -4585,7 +4590,7 @@ pub(crate) fn settings_data(cx: &App) -> Vec { let program = match settings_value { settings::Shell::Program(p) => p.clone(), settings::Shell::WithArguments { program, .. } => program.clone(), - _ => String::from("sh"), + _ => String::from(default_shell), }; settings::Shell::Program(program) }, @@ -4595,7 +4600,7 @@ pub(crate) fn settings_data(cx: &App) -> Vec { settings::Shell::WithArguments { program, args, title_override } => { (program.clone(), args.clone(), title_override.clone()) }, - _ => (String::from("sh"), vec![], None), + _ => (String::from(default_shell), vec![], None), }; settings::Shell::WithArguments { program, From dbcfb48198d80e6b6315dd752466279d2d8ec616 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Sun, 23 Nov 2025 19:40:33 -0300 Subject: [PATCH 0316/1030] Add mouse-based affordance to open a recent project in new window (#43373) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes https://github.com/zed-industries/zed/issues/31796 Screenshot 2025-11-23 at 7  39 2@2x Release Notes: - N/A --- crates/recent_projects/src/recent_projects.rs | 44 ++++++++++++------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/crates/recent_projects/src/recent_projects.rs b/crates/recent_projects/src/recent_projects.rs index 8aab66533c59b7dae17cc7864d6a7e716d3ee948..280bf17a385db09c10c2844ac7126b3aac7adafb 100644 --- a/crates/recent_projects/src/recent_projects.rs +++ b/crates/recent_projects/src/recent_projects.rs @@ -544,6 +544,32 @@ impl PickerDelegate for RecentProjectsDelegate { paths, }; + let secondary_actions = h_flex() + .gap_px() + .child( + IconButton::new("open_new_window", IconName::ArrowUpRight) + .icon_size(IconSize::XSmall) + .tooltip(Tooltip::text("Open Project in New Window")) + .on_click(cx.listener(move |this, _event, window, cx| { + cx.stop_propagation(); + window.prevent_default(); + this.delegate.set_selected_index(ix, window, cx); + this.delegate.confirm(true, window, cx); + })), + ) + .child( + IconButton::new("delete", IconName::Close) + .icon_size(IconSize::Small) + .tooltip(Tooltip::text("Delete from Recent Projects")) + .on_click(cx.listener(move |this, _event, window, cx| { + cx.stop_propagation(); + window.prevent_default(); + + this.delegate.delete_recent_project(ix, window, cx) + })), + ) + .into_any_element(); + Some( ListItem::new(ix) .toggle_state(selected) @@ -577,24 +603,10 @@ impl PickerDelegate for RecentProjectsDelegate { }), ) .map(|el| { - let delete_button = div() - .child( - IconButton::new("delete", IconName::Close) - .icon_size(IconSize::Small) - .on_click(cx.listener(move |this, _event, window, cx| { - cx.stop_propagation(); - window.prevent_default(); - - this.delegate.delete_recent_project(ix, window, cx) - })) - .tooltip(Tooltip::text("Delete from Recent Projects...")), - ) - .into_any_element(); - if self.selected_index() == ix { - el.end_slot::(delete_button) + el.end_slot(secondary_actions) } else { - el.end_hover_slot::(delete_button) + el.end_hover_slot(secondary_actions) } }) .tooltip(move |_, cx| { From 07b6686411133a614f61547a5795741058d1e977 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Mon, 24 Nov 2025 00:11:46 -0300 Subject: [PATCH 0317/1030] docs: Improve edit prediction page (#43379) This PR improves the edit prediction page particularly by adding information about pricing and plans, which wasn't at all mentioned here before, _and_ by including a section with a keybinding example demonstrating how to always use just `tab` to always accept edit predictions. Release Notes: - N/A --- docs/src/ai/edit-prediction.md | 59 ++++++++++++++++++++++++++-------- 1 file changed, 45 insertions(+), 14 deletions(-) diff --git a/docs/src/ai/edit-prediction.md b/docs/src/ai/edit-prediction.md index fd073d558ea9f3a2027c5a705320f8554fef7c78..feef6d36d29eca4157254cc4c209f4a614a927de 100644 --- a/docs/src/ai/edit-prediction.md +++ b/docs/src/ai/edit-prediction.md @@ -1,19 +1,30 @@ # Edit Prediction -Edit Prediction is Zed's mechanism for predicting the code you want to write through AI. +Edit Prediction is Zed's LLM mechanism for predicting the code you want to write. Each keystroke sends a new request to the edit prediction provider, which returns individual or multi-line suggestions that can be quickly accepted by pressing `tab`. -The default provider is [Zeta, a proprietary open source and open dataset model](https://huggingface.co/zed-industries/zeta), which [requires being signed into Zed](../authentication.md#what-features-require-signing-in). -Alternatively, you can also use [other providers](#other-providers) like GitHub Copilot and Codestral. +The default provider is [Zeta, a proprietary open source and open dataset model](https://huggingface.co/zed-industries/zeta), but you can also use [other providers](#other-providers) like GitHub Copilot, Supermaven, and Codestral. ## Configuring Zeta -Zed's Edit Prediction was initially introduced via a banner on the title bar. -Clicking on it would take you to a modal with a button ("Enable Edit Prediction") that sets `zed` as your `edit_prediction_provider`. +To use Zeta, the only thing you need to do is [to sign in](../authentication.md#what-features-require-signing-in). +After doing that, you should already see predictions as you type on your files. -![Onboarding banner and modal](https://zed.dev/img/edit-prediction/docs.webp) +You can confirm that Zeta is properly configured either by verifying whether you have the following code in your `settings.json`: -But, if you haven't come across the banner, Zed's Edit Prediction is the default edit prediction provider and you should see it right away in your status bar. +```json [settings] +"features": { + "edit_prediction_provider": "zed" +}, +``` + +Or you can also look for a little Z icon in the right of your status bar at the bottom. + +### Pricing and Plans + +From just signing in, while in Zed's free plan, you get 2,000 Zeta-powered edit predictions per month. +But you can get _**unlimited edit predictions**_ by upgrading to [the Pro plan](../ai/plans-and-usage.md). +More information can be found in [Zed's pricing page](https://zed.dev/pricing). ### Switching Modes {#switching-modes} @@ -34,6 +45,8 @@ Or directly via the UI through the status bar menu: ![Edit Prediction status bar menu, with the modes toggle.](https://zed.dev/img/edit-prediction/status-bar-menu.webp) +> Note that edit prediction modes work with any prediction provider. + ### Conflict With Other `tab` Actions {#edit-predictions-conflict} By default, when `tab` would normally perform a different action, Zed requires a modifier key to accept predictions: @@ -47,8 +60,6 @@ On Linux, `alt-tab` is often used by the window manager for switching windows, s {#action editor::AcceptPartialEditPrediction} ({#kb editor::AcceptPartialEditPrediction}) can be used to accept the current edit prediction up to the next word boundary. -See the [Configuring GitHub Copilot](#github-copilot) and [Configuring Supermaven](#supermaven) sections below for configuration of other providers. Only text insertions at the current cursor are supported for these providers, whereas the Zeta model provides multiple predictions including deletions. - ## Configuring Edit Prediction Keybindings {#edit-predictions-keybinding} By default, `tab` is used to accept edit predictions. You can use another keybinding by inserting this in your keymap: @@ -63,7 +74,8 @@ By default, `tab` is used to accept edit predictions. You can use another keybin } ``` -When there's a [conflict with the `tab` key](#edit-predictions-conflict), Zed uses a different context to accept keybindings (`edit_prediction_conflict`). If you want to use a different one, you can insert this in your keymap: +When there's a [conflict with the `tab` key](#edit-predictions-conflict), Zed uses a different key context to accept keybindings (`edit_prediction_conflict`). +If you want to use a different one, you can insert this in your keymap: ```json [settings] { @@ -76,7 +88,8 @@ When there's a [conflict with the `tab` key](#edit-predictions-conflict), Zed us If your keybinding contains a modifier (`ctrl` in the example above), it will also be used to preview the edit prediction and temporarily hide the language server completion menu. -You can also bind this action to keybind without a modifier. In that case, Zed will use the default modifier (`alt`) to preview the edit prediction. +You can also bind this action to keybind without a modifier. +In that case, Zed will use the default modifier (`alt`) to preview the edit prediction. ```json [settings] { @@ -101,9 +114,26 @@ To maintain the use of the modifier key for accepting predictions when there is } ``` +### Keybinding Example: Always Use Tab + +If you want to use `tab` to always accept edit predictions, you can use the following keybinding: + +```json [keymap] +{ + "context": "Editor && edit_prediction_conflict && showing_completions", + "bindings": { + "tab": "editor::AcceptEditPrediction" + } +} +``` + +This will make `tab` work to accept edit predictions _even when_ you're also seeing language server completions. +That means that you need to rely on `enter` for accepting the latter. + ### Keybinding Example: Always Use Alt-Tab -The keybinding example below causes `alt-tab` to always be used instead of sometimes using `tab`. You might want this in order to have just one keybinding to use for accepting edit predictions, since the behavior of `tab` varies based on context. +The keybinding example below causes `alt-tab` to always be used instead of sometimes using `tab`. +You might want this in order to have just one (alternative) keybinding to use for accepting edit predictions, since the behavior of `tab` varies based on context. ```json [keymap] { @@ -127,7 +157,7 @@ The keybinding example below causes `alt-tab` to always be used instead of somet }, ``` -If `"vim_mode": true` is set within `settings.json`, then additional bindings are needed after the above to return `tab` to its original behavior: +If you are using [Vim mode](../vim.md), then additional bindings are needed after the above to return `tab` to its original behavior: ```json [keymap] { @@ -146,7 +176,8 @@ If `"vim_mode": true` is set within `settings.json`, then additional bindings ar ### Keybinding Example: Displaying Tab and Alt-Tab on Linux -While `tab` and `alt-tab` are supported on Linux, `alt-l` is displayed instead. If your window manager does not reserve `alt-tab`, and you would prefer to use `tab` and `alt-tab`, include these bindings in `keymap.json`: +While `tab` and `alt-tab` are supported on Linux, `alt-l` is displayed instead. +If your window manager does not reserve `alt-tab`, and you would prefer to use `tab` and `alt-tab`, include these bindings in `keymap.json`: ```json [keymap] { From 06f8e355979b708999c28b871988e0f277e2a250 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Mon, 24 Nov 2025 00:12:04 -0300 Subject: [PATCH 0318/1030] agent_ui: Make thread markdown editable (#43377) This PR makes the thread markdown editable. This refers to the "open thread as markdown" feature, where you previously could only read. One benefit of this move is that it makes a bit more obvious that you can `cmd-s` to save the markdown, allowing you to store the content of a given thread. You could already do this before, but due to it being editable now, you see the tab with a dirty indicator, which communicates that better. Release Notes: - agent: Made the thread markdown editable. --- crates/agent_ui/src/acp/thread_view.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 780cff2e78fd4441fc451ddb3bc93b66a940c6c8..93b6f749cd9891cd25cfc659a02d6808a9cc4ccb 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -4775,7 +4775,7 @@ impl AcpThreadView { buffer.update(cx, |buffer, cx| { buffer.set_text(markdown, cx); buffer.set_language(Some(markdown_language), cx); - buffer.set_capability(language::Capability::ReadOnly, cx); + buffer.set_capability(language::Capability::ReadWrite, cx); })?; workspace.update_in(cx, |workspace, window, cx| { From 48e113a90e42e6277ac289ca74a76d8ea54dc2c9 Mon Sep 17 00:00:00 2001 From: Ulysse Buonomo Date: Mon, 24 Nov 2025 10:30:19 +0100 Subject: [PATCH 0319/1030] cli: Allow opening non-existent paths (#43250) Changes are made to `parse_path_with_position`: we try to get the canonical, existing parts of a path, then append the non-existing parts. Closes #4441 Release Notes: - Added the possibility to open a non-existing path using `zed` CLI ``` zed path/to/non/existing/file.txt ``` Co-authored-by: Syed Sadiq Ali --- Cargo.lock | 1 + crates/cli/Cargo.toml | 4 + crates/cli/src/main.rs | 187 ++++++++++++++++++++++++++++++++++++----- crates/zlog/README.md | 15 ++++ script/debug-cli | 2 +- 5 files changed, 185 insertions(+), 24 deletions(-) create mode 100644 crates/zlog/README.md diff --git a/Cargo.lock b/Cargo.lock index e41e973041bfe99dec6258e910e1108a842c0748..3eee0204971ef09650a7b54988542a1f5d4c59ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3086,6 +3086,7 @@ dependencies = [ "rayon", "release_channel", "serde", + "serde_json", "tempfile", "util", "windows 0.61.3", diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml index 54f7ec4f5315a6529579353f3aa489925534d4ba..63e99a3ed25fad919e1a86a3a1917e3617ac2737 100644 --- a/crates/cli/Cargo.toml +++ b/crates/cli/Cargo.toml @@ -34,6 +34,10 @@ util.workspace = true tempfile.workspace = true rayon.workspace = true +[dev-dependencies] +serde_json.workspace = true +util = { workspace = true, features = ["test-support"] } + [target.'cfg(any(target_os = "linux", target_os = "freebsd"))'.dependencies] exec.workspace = true fork.workspace = true diff --git a/crates/cli/src/main.rs b/crates/cli/src/main.rs index 335e75ac4f5e43e63159fb26018849d8e0a22ced..7dd8a3253c9a0c8440d9342e5c0b3fd19e7f9828 100644 --- a/crates/cli/src/main.rs +++ b/crates/cli/src/main.rs @@ -129,32 +129,173 @@ struct Args { askpass: Option, } +/// Parses a path containing a position (e.g. `path:line:column`) +/// and returns its canonicalized string representation. +/// +/// If a part of path doesn't exist, it will canonicalize the +/// existing part and append the non-existing part. +/// +/// This method must return an absolute path, as many zed +/// crates assume absolute paths. fn parse_path_with_position(argument_str: &str) -> anyhow::Result { - let canonicalized = match Path::new(argument_str).canonicalize() { - Ok(existing_path) => PathWithPosition::from_path(existing_path), - Err(_) => { - let path = PathWithPosition::parse_str(argument_str); + match Path::new(argument_str).canonicalize() { + Ok(existing_path) => Ok(PathWithPosition::from_path(existing_path)), + Err(_) => PathWithPosition::parse_str(argument_str).map_path(|mut path| { let curdir = env::current_dir().context("retrieving current directory")?; - path.map_path(|path| match fs::canonicalize(&path) { - Ok(path) => Ok(path), - Err(e) => { - if let Some(mut parent) = path.parent() { - if parent == Path::new("") { - parent = &curdir - } - match fs::canonicalize(parent) { - Ok(parent) => Ok(parent.join(path.file_name().unwrap())), - Err(_) => Err(e), - } - } else { - Err(e) - } + let mut children = Vec::new(); + let root; + loop { + // canonicalize handles './', and '/'. + if let Ok(canonicalized) = fs::canonicalize(&path) { + root = canonicalized; + break; } - }) - } - .with_context(|| format!("parsing as path with position {argument_str}"))?, - }; - Ok(canonicalized.to_string(|path| path.to_string_lossy().into_owned())) + // The comparison to `curdir` is just a shortcut + // since we know it is canonical. The other one + // is if `argument_str` is a string that starts + // with a name (e.g. "foo/bar"). + if path == curdir || path == Path::new("") { + root = curdir; + break; + } + children.push( + path.file_name() + .with_context(|| format!("parsing as path with position {argument_str}"))? + .to_owned(), + ); + if !path.pop() { + unreachable!("parsing as path with position {argument_str}"); + } + } + Ok(children.iter().rev().fold(root, |mut path, child| { + path.push(child); + path + })) + }), + } + .map(|path_with_pos| path_with_pos.to_string(|path| path.to_string_lossy().into_owned())) +} + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + use util::path; + use util::paths::SanitizedPath; + use util::test::TempTree; + + macro_rules! assert_path_eq { + ($left:expr, $right:expr) => { + assert_eq!( + SanitizedPath::new(Path::new(&$left)), + SanitizedPath::new(Path::new(&$right)) + ) + }; + } + + fn cwd() -> PathBuf { + env::current_dir().unwrap() + } + + static CWD_LOCK: Mutex<()> = Mutex::new(()); + + fn with_cwd(path: &Path, f: impl FnOnce() -> anyhow::Result) -> anyhow::Result { + let _lock = CWD_LOCK.lock(); + let old_cwd = cwd(); + env::set_current_dir(path)?; + let result = f(); + env::set_current_dir(old_cwd)?; + result + } + + #[test] + fn test_parse_non_existing_path() { + // Absolute path + let result = parse_path_with_position(path!("/non/existing/path.txt")).unwrap(); + assert_path_eq!(result, path!("/non/existing/path.txt")); + + // Absolute path in cwd + let path = cwd().join(path!("non/existing/path.txt")); + let expected = path.to_string_lossy().to_string(); + let result = parse_path_with_position(&expected).unwrap(); + assert_path_eq!(result, expected); + + // Relative path + let result = parse_path_with_position(path!("non/existing/path.txt")).unwrap(); + assert_path_eq!(result, expected) + } + + #[test] + fn test_parse_existing_path() { + let temp_tree = TempTree::new(json!({ + "file.txt": "", + })); + let file_path = temp_tree.path().join("file.txt"); + let expected = file_path.to_string_lossy().to_string(); + + // Absolute path + let result = parse_path_with_position(file_path.to_str().unwrap()).unwrap(); + assert_path_eq!(result, expected); + + // Relative path + let result = with_cwd(temp_tree.path(), || parse_path_with_position("file.txt")).unwrap(); + assert_path_eq!(result, expected); + } + + // NOTE: + // While POSIX symbolic links are somewhat supported on Windows, they are an opt in by the user, and thus + // we assume that they are not supported out of the box. + #[cfg(not(windows))] + #[test] + fn test_parse_symlink_file() { + let temp_tree = TempTree::new(json!({ + "target.txt": "", + })); + let target_path = temp_tree.path().join("target.txt"); + let symlink_path = temp_tree.path().join("symlink.txt"); + std::os::unix::fs::symlink(&target_path, &symlink_path).unwrap(); + + // Absolute path + let result = parse_path_with_position(symlink_path.to_str().unwrap()).unwrap(); + assert_eq!(result, target_path.to_string_lossy()); + + // Relative path + let result = + with_cwd(temp_tree.path(), || parse_path_with_position("symlink.txt")).unwrap(); + assert_eq!(result, target_path.to_string_lossy()); + } + + #[cfg(not(windows))] + #[test] + fn test_parse_symlink_dir() { + let temp_tree = TempTree::new(json!({ + "some": { + "dir": { // symlink target + "ec": { + "tory": { + "file.txt": "", + }}}}})); + + let target_file_path = temp_tree.path().join("some/dir/ec/tory/file.txt"); + let expected = target_file_path.to_string_lossy(); + + let dir_path = temp_tree.path().join("some/dir"); + let symlink_path = temp_tree.path().join("symlink"); + std::os::unix::fs::symlink(&dir_path, &symlink_path).unwrap(); + + // Absolute path + let result = + parse_path_with_position(symlink_path.join("ec/tory/file.txt").to_str().unwrap()) + .unwrap(); + assert_eq!(result, expected); + + // Relative path + let result = with_cwd(temp_tree.path(), || { + parse_path_with_position("symlink/ec/tory/file.txt") + }) + .unwrap(); + assert_eq!(result, expected); + } } fn parse_path_in_wsl(source: &str, wsl: &str) -> Result { diff --git a/crates/zlog/README.md b/crates/zlog/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6d0fef147cb0fb300e5a4cfd3936a97d0ee111fc --- /dev/null +++ b/crates/zlog/README.md @@ -0,0 +1,15 @@ +# Zlog + +Use the `ZED_LOG` environment variable to control logging output for Zed +applications and libraries. The variable accepts a comma-separated list of +directives that specify logging levels for different modules (crates). The +general format is for instance: + +``` +ZED_LOG=info,project=debug,agent=off +``` + +- Levels can be one of: `off`/`none`, `error`, `warn`, `info`, `debug`, or + `trace`. +- You don't need to specify the global level, default is `trace` in the crate + and `info` set by `RUST_LOG` in Zed. diff --git a/script/debug-cli b/script/debug-cli index 1a40e703381441e87ab621837f4a61fa4741a6ce..65017cd4562adb00cf4f48b10edcdf2c92038b4c 100755 --- a/script/debug-cli +++ b/script/debug-cli @@ -1,3 +1,3 @@ #!/usr/bin/env bash -cargo build; cargo run -p cli -- --foreground --zed=target/debug/zed "$@" +cargo build -p zed && cargo run -p cli -- --foreground --zed=${CARGO_TARGET_DIR:-target}/debug/zed "$@" From 99277a427f3ac18d48a56fc3055dd2999f09b5fd Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Mon, 24 Nov 2025 10:33:03 +0100 Subject: [PATCH 0320/1030] miniprofiler_ui: Copy path to clipboard on click (#43280) Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/miniprofiler_ui/src/miniprofiler_ui.rs | 21 ++++++++++++------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/crates/miniprofiler_ui/src/miniprofiler_ui.rs b/crates/miniprofiler_ui/src/miniprofiler_ui.rs index b3dab02058651177fbcbc53453d8f451dcdcf8a3..5fb80b6307ba3b93b3a9c5def7b8da620fdd738c 100644 --- a/crates/miniprofiler_ui/src/miniprofiler_ui.rs +++ b/crates/miniprofiler_ui/src/miniprofiler_ui.rs @@ -5,16 +5,16 @@ use std::{ }; use gpui::{ - App, AppContext, Context, Entity, Hsla, InteractiveElement, IntoElement, ParentElement, Render, - ScrollHandle, SerializedTaskTiming, StatefulInteractiveElement, Styled, Task, TaskTiming, - TitlebarOptions, WindowBounds, WindowHandle, WindowOptions, div, prelude::FluentBuilder, px, - relative, size, + App, AppContext, ClipboardItem, Context, Entity, Hsla, InteractiveElement, IntoElement, + ParentElement, Render, ScrollHandle, SerializedTaskTiming, SharedString, + StatefulInteractiveElement, Styled, Task, TaskTiming, TitlebarOptions, WindowBounds, + WindowHandle, WindowOptions, div, prelude::FluentBuilder, px, relative, size, }; use util::ResultExt; use workspace::{ Workspace, ui::{ - ActiveTheme, Button, ButtonCommon, ButtonStyle, Checkbox, Clickable, ToggleState, + ActiveTheme, Button, ButtonCommon, ButtonStyle, Checkbox, Clickable, ToggleState, Tooltip, WithScrollbar, h_flex, v_flex, }, }; @@ -184,12 +184,12 @@ impl ProfilerWindow { .1; let location = location.rsplit_once("\\").unwrap_or(("", location)).1; - let label = format!( + let label = SharedString::from(format!( "{}:{}:{}", location, item.location.line(), item.location.column() - ); + )); h_flex() .gap_2() @@ -197,10 +197,15 @@ impl ProfilerWindow { .h(px(32.0)) .child( div() + .id(label.clone()) .w(px(200.0)) .flex_shrink_0() .overflow_hidden() - .child(div().text_ellipsis().child(label)), + .child(div().text_ellipsis().child(label.clone())) + .tooltip(Tooltip::text(label.clone())) + .on_click(move |_, _, cx| { + cx.write_to_clipboard(ClipboardItem::new_string(label.to_string())) + }), ) .child( div() From 194f6c9f952e81598f749cce8931260a5f88de12 Mon Sep 17 00:00:00 2001 From: Benjamin Jurk <106487517+bnjmnjrk@users.noreply.github.com> Date: Mon, 24 Nov 2025 10:36:04 +0100 Subject: [PATCH 0321/1030] Treat `.h++` files as C++ (#42802) Release Notes: - `.h++` files are now treated as C++. --- crates/languages/src/cpp/config.toml | 2 +- crates/theme/src/icon_theme.rs | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/languages/src/cpp/config.toml b/crates/languages/src/cpp/config.toml index 1a994789232e4a58f4bdb2436865c0c28b9164f0..8d85b4f2416cad7cc7935dbb657109d5f1126aa5 100644 --- a/crates/languages/src/cpp/config.toml +++ b/crates/languages/src/cpp/config.toml @@ -1,6 +1,6 @@ name = "C++" grammar = "cpp" -path_suffixes = ["cc", "hh", "cpp", "h", "hpp", "cxx", "hxx", "c++", "ipp", "inl", "ino", "ixx", "cu", "cuh", "C", "H"] +path_suffixes = ["cc", "hh", "cpp", "h", "hpp", "cxx", "hxx", "c++", "h++", "ipp", "inl", "ino", "ixx", "cu", "cuh", "C", "H"] line_comments = ["// ", "/// ", "//! "] decrease_indent_patterns = [ { pattern = "^\\s*\\{.*\\}?\\s*$", valid_after = ["if", "for", "while", "do", "switch", "else"] }, diff --git a/crates/theme/src/icon_theme.rs b/crates/theme/src/icon_theme.rs index c3e7f3cfbc25cc04f05cd939f74154a732f16f58..4c62dd12532e2d06424a0b620617d154e38b6372 100644 --- a/crates/theme/src/icon_theme.rs +++ b/crates/theme/src/icon_theme.rs @@ -88,7 +88,9 @@ const FILE_SUFFIXES_BY_ICON_KEY: &[(&str, &[&str])] = &[ ("coffeescript", &["coffee"]), ( "cpp", - &["c++", "cc", "cpp", "cxx", "hh", "hpp", "hxx", "inl", "ixx"], + &[ + "c++", "h++", "cc", "cpp", "cxx", "hh", "hpp", "hxx", "inl", "ixx", + ], ), ("crystal", &["cr", "ecr"]), ("csharp", &["cs"]), From 3281b9077fddb7d40278c0479b27e1fb5f2c9c0f Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Mon, 24 Nov 2025 10:43:20 +0100 Subject: [PATCH 0322/1030] agent: Fix utf8 panic in outline (#43141) Fixes ZED-3F3 Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/agent/src/outline.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/agent/src/outline.rs b/crates/agent/src/outline.rs index 0de035c34bf285d41ff20676f037abf2464213a1..40a84bc28b4402d8251e164a423a2309127c50ce 100644 --- a/crates/agent/src/outline.rs +++ b/crates/agent/src/outline.rs @@ -48,7 +48,7 @@ pub async fn get_buffer_content_or_outline( if outline_items.is_empty() { let text = buffer.read_with(cx, |buffer, _| { let snapshot = buffer.snapshot(); - let len = snapshot.len().min(1024); + let len = snapshot.len().min(snapshot.as_rope().floor_char_boundary(1024)); let content = snapshot.text_for_range(0..len).collect::(); if let Some(path) = path { format!("# First 1KB of {path} (file too large to show full content, and no outline available)\n\n{content}") @@ -178,7 +178,7 @@ mod tests { let fs = FakeFs::new(cx.executor()); let project = Project::test(fs, [], cx).await; - let content = "A".repeat(100 * 1024); // 100KB + let content = "⚡".repeat(100 * 1024); // 100KB let content_len = content.len(); let buffer = project .update(cx, |project, cx| project.create_buffer(true, cx)) @@ -194,7 +194,7 @@ mod tests { // Should contain some of the actual file content assert!( - result.text.contains("AAAAAAAAAA"), + result.text.contains("⚡⚡⚡⚡⚡⚡⚡"), "Result did not contain content subset" ); From fc11ecfa2b845ae0a24efb3976d71463077fc3fa Mon Sep 17 00:00:00 2001 From: mg <108034506+mg0x7BE@users.noreply.github.com> Date: Mon, 24 Nov 2025 10:46:15 +0100 Subject: [PATCH 0323/1030] Add Windows path for extensions (#42645) ### Description The `installing-extensions.md` guide was missing the directory path for the Windows platform. It currently only lists the paths for macOS and Linux. This PR adds the correct path for Windows users (`%LOCALAPPDATA%\zed\extensions`). Release Notes: - N/A --------- Co-authored-by: Kirill Bulatov --- docs/src/extensions/installing-extensions.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/src/extensions/installing-extensions.md b/docs/src/extensions/installing-extensions.md index 801fe5c55c0f47530e2656cd831619d1457ba13e..d9573556f0d4faeedbfdebbe72e51ad17fbfbb57 100644 --- a/docs/src/extensions/installing-extensions.md +++ b/docs/src/extensions/installing-extensions.md @@ -8,6 +8,7 @@ Here you can view the extensions that you currently have installed or search and - On macOS, extensions are installed in `~/Library/Application Support/Zed/extensions`. - On Linux, they are installed in either `$XDG_DATA_HOME/zed/extensions` or `~/.local/share/zed/extensions`. +- On Windows, the directory is `%LOCALAPPDATA%\Zed\extensions`. This directory contains two subdirectories: From 4b04be602051d89e05e2087e8f38096bf5c31f73 Mon Sep 17 00:00:00 2001 From: shaik-zeeshan <145040231+shaik-zeeshan@users.noreply.github.com> Date: Mon, 24 Nov 2025 15:57:42 +0530 Subject: [PATCH 0324/1030] Fix gutter hover breakpoint not updating when switching the tabs (#43163) Closes #42073 fixes hover breakpoint not disappearing from a tab when tabs are switched https://github.com/user-attachments/assets/43096d2a-cc5b-46c4-b903-5bc8c33305c5 Release Notes: - N/A --------- Co-authored-by: Finn Evers --- crates/editor/src/editor.rs | 22 ++++++++++++++++++---- crates/editor/src/element.rs | 4 ++-- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index e8d0011a98d4126f6baf542589145c9d1ddc8dda..7a3c9b8594596152800442193e9364dc1a2c8aba 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -105,10 +105,10 @@ use gpui::{ AvailableSpace, Background, Bounds, ClickEvent, ClipboardEntry, ClipboardItem, Context, DispatchPhase, Edges, Entity, EntityInputHandler, EventEmitter, FocusHandle, FocusOutEvent, Focusable, FontId, FontWeight, Global, HighlightStyle, Hsla, KeyContext, Modifiers, - MouseButton, MouseDownEvent, PaintQuad, ParentElement, Pixels, Render, ScrollHandle, - SharedString, Size, Stateful, Styled, Subscription, Task, TextStyle, TextStyleRefinement, - UTF16Selection, UnderlineStyle, UniformListScrollHandle, WeakEntity, WeakFocusHandle, Window, - div, point, prelude::*, pulsating_between, px, relative, size, + MouseButton, MouseDownEvent, MouseMoveEvent, PaintQuad, ParentElement, Pixels, Render, + ScrollHandle, SharedString, Size, Stateful, Styled, Subscription, Task, TextStyle, + TextStyleRefinement, UTF16Selection, UnderlineStyle, UniformListScrollHandle, WeakEntity, + WeakFocusHandle, Window, div, point, prelude::*, pulsating_between, px, relative, size, }; use hover_links::{HoverLink, HoveredLinkState, find_file}; use hover_popover::{HoverState, hide_hover}; @@ -22169,6 +22169,20 @@ impl Editor { ); } }); + + if let Some(position_map) = self.last_position_map.clone() { + EditorElement::mouse_moved( + self, + &MouseMoveEvent { + position: window.mouse_position(), + pressed_button: None, + modifiers: window.modifiers(), + }, + &position_map, + window, + cx, + ); + } } } diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index 8801f20323338e28bb7ed62923be65db785af312..71c76c0cb3eba0e70da140191ab5eb8daa5735bc 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -1158,7 +1158,7 @@ impl EditorElement { } } - fn mouse_moved( + pub(crate) fn mouse_moved( editor: &mut Editor, event: &MouseMoveEvent, position_map: &PositionMap, @@ -1169,7 +1169,7 @@ impl EditorElement { let gutter_hitbox = &position_map.gutter_hitbox; let modifiers = event.modifiers; let text_hovered = text_hitbox.is_hovered(window); - let gutter_hovered = gutter_hitbox.is_hovered(window); + let gutter_hovered = gutter_hitbox.bounds.contains(&event.position); editor.set_gutter_hovered(gutter_hovered, cx); editor.show_mouse_cursor(cx); From d333535e768f349021a39a78c3afcc75f950f0c3 Mon Sep 17 00:00:00 2001 From: Oscar Villavicencio Date: Mon, 24 Nov 2025 02:32:44 -0800 Subject: [PATCH 0325/1030] docs: Document `git_hosting_providers` for self-hosted Git instances (#43278) Closes #38433 Document how to register self-hosted GitHub/GitLab/Bitbucket instances via git_hosting_providers setting so permalinks and issue links resolve. Release Notes: - Added documentation on how to register self-hosted GitHub/GitLab/Bitbucket instances via the `git_hosting_providers` setting. This ensures permalinks and issue links can be resolved for these instances. --- docs/src/configuring-zed.md | 28 ++++++++++++++++++++++++++++ docs/src/git.md | 14 ++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/docs/src/configuring-zed.md b/docs/src/configuring-zed.md index 6edcafb3d8f275047ba953cdf6644604709f7f22..cf86a0a6e7a6c4426f5a55d246f93b5c51be9792 100644 --- a/docs/src/configuring-zed.md +++ b/docs/src/configuring-zed.md @@ -4693,6 +4693,34 @@ See the [debugger page](./debugger.md) for more information about debugging supp - `collapse_untracked_diff`: Whether to collapse untracked files in the diff panel - `scrollbar`: When to show the scrollbar in the git panel +## Git Hosting Providers + +- Description: Register self-hosted GitHub, GitLab, or Bitbucket instances so commit hashes, issue references, and permalinks resolve to the right host. +- Setting: `git_hosting_providers` +- Default: `[]` + +**Options** + +Each entry accepts: + +- `provider`: One of `github`, `gitlab`, or `bitbucket` +- `name`: Display name for the instance +- `base_url`: Base URL, e.g. `https://git.example.corp` + +You can define these in user or project settings; project settings are merged on top of user settings. + +```json [settings] +{ + "git_hosting_providers": [ + { + "provider": "github", + "name": "BigCorp GitHub", + "base_url": "https://git.example.corp" + } + ] +} +``` + ## Outline Panel - Description: Customize outline Panel diff --git a/docs/src/git.md b/docs/src/git.md index 85781e37bc628ac493a048b5b7d16d1fbd758d72..b31d9db585e0ff8ba9a2181903037028994a099c 100644 --- a/docs/src/git.md +++ b/docs/src/git.md @@ -146,6 +146,20 @@ Zed currently supports links to the hosted versions of [SourceHut](https://sr.ht) and [Codeberg](https://codeberg.org). +For self-hosted GitHub, GitLab, or Bitbucket instances, add them to the `git_hosting_providers` setting so commit hashes and permalinks resolve to your domain: + +```json [settings] +{ + "git_hosting_providers": [ + { + "provider": "gitlab", + "name": "Corp GitLab", + "base_url": "https://git.example.corp" + } + ] +} +``` + Zed also has a Copy Permalink feature to create a permanent link to a code snippet on your Git hosting service. These links are useful for sharing a specific line or range of lines in a file at a specific commit. Trigger this action via the [Command Palette](./getting-started.md#command-palette) (search for `permalink`), From 2f46e6a43cd50e673b99c20d61c6d699ae9bcdb9 Mon Sep 17 00:00:00 2001 From: Binlogo Date: Mon, 24 Nov 2025 18:51:45 +0800 Subject: [PATCH 0326/1030] http_client: Support `GITHUB_TOKEN` env to auth GitHub requests (#42623) Closes #33903 Release Notes: - Ensured Zed reuses `GITHUB_TOKEN` env variable when querying GitHub --- Before fixing: - The `crates-lsp` extension request captured: ``` curl 'https://api.github.com/repos/MathiasPius/crates-lsp/releases' \ -H 'accept: */*' \ -H 'user-agent: Zed/0.212.3 (macos; aarch64)' \ -H 'host: api.github.com' \ ``` - `crates-lsp` extension error: ``` Language server crates-lsp: from extension "Crates LSP" version 0.2.0: status error 403, response: "{\"message\":\"API rate limit exceeded for x.x.x.x. (But here's the good news: Authenticated requests get a higher rate limit. Check out the documentation for more details.)\",\"documentation_url\":\"https://docs.github.com/rest/overview/resources-in-the-rest-api#rate-limiting\"}\n" ``` After fixing: ``` export GITHUB_TOKEN=$(gh auth token) cargo run ``` - The `crates-lsp` extension request captured: ``` curl 'https://api.github.com/repos/MathiasPius/crates-lsp/releases' \ -H 'authorization: Bearer gho_Nt*****************2KXLw2' \ -H 'accept: */*' \ -H 'user-agent: Zed/0.214.0 (macos; aarch64)' \ -H 'host: api.github.com' \ ``` The API rate limitation is resolved. --- This isn't a perfect solution, but it enables users to avoid the noise. --- crates/http_client/src/github.rs | 35 ++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/crates/http_client/src/github.rs b/crates/http_client/src/github.rs index 32efed8e727330d3ac1c2fb6d8ea5d57fdd66dd4..e52e2f1d2555de477cd4597826bc3bd8308faf89 100644 --- a/crates/http_client/src/github.rs +++ b/crates/http_client/src/github.rs @@ -1,10 +1,13 @@ -use crate::HttpClient; +use crate::{HttpClient, HttpRequestExt}; use anyhow::{Context as _, Result, anyhow, bail}; use futures::AsyncReadExt; +use http::Request; use serde::Deserialize; use std::sync::Arc; use url::Url; +const GITHUB_API_URL: &str = "https://api.github.com"; + pub struct GitHubLspBinaryVersion { pub name: String, pub url: String, @@ -34,12 +37,17 @@ pub async fn latest_github_release( pre_release: bool, http: Arc, ) -> anyhow::Result { + let url = format!("{GITHUB_API_URL}/repos/{repo_name_with_owner}/releases"); + + let request = Request::get(&url) + .follow_redirects(crate::RedirectPolicy::FollowAll) + .when_some(std::env::var("GITHUB_TOKEN").ok(), |builder, token| { + builder.header("Authorization", format!("Bearer {}", token)) + }) + .body(Default::default())?; + let mut response = http - .get( - format!("https://api.github.com/repos/{repo_name_with_owner}/releases").as_str(), - Default::default(), - true, - ) + .send(request) .await .context("error fetching latest release")?; @@ -91,12 +99,17 @@ pub async fn get_release_by_tag_name( tag: &str, http: Arc, ) -> anyhow::Result { + let url = format!("{GITHUB_API_URL}/repos/{repo_name_with_owner}/releases/tags/{tag}"); + + let request = Request::get(&url) + .follow_redirects(crate::RedirectPolicy::FollowAll) + .when_some(std::env::var("GITHUB_TOKEN").ok(), |builder, token| { + builder.header("Authorization", format!("Bearer {}", token)) + }) + .body(Default::default())?; + let mut response = http - .get( - &format!("https://api.github.com/repos/{repo_name_with_owner}/releases/tags/{tag}"), - Default::default(), - true, - ) + .send(request) .await .context("error fetching latest release")?; From f7772af19761e80702b54d640aa75f836ef6c788 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Mon, 24 Nov 2025 12:11:45 +0100 Subject: [PATCH 0327/1030] util: Fix invalid powershell redirection syntax used in uni shell env capture (#43390) Closes https://github.com/zed-industries/zed/issues/42869 Release Notes: - Fixed shell env sourcing not working with powershell on unix systems --- crates/util/src/shell_env.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/util/src/shell_env.rs b/crates/util/src/shell_env.rs index 02b262298795f8a50d9612f65479c0299e7e6a3e..307d127135aa642da088c206122765a80ddc3e88 100644 --- a/crates/util/src/shell_env.rs +++ b/crates/util/src/shell_env.rs @@ -55,6 +55,7 @@ async fn capture_unix( // xonsh doesn't support redirecting to stdin, and control sequences are printed to // stdout on startup ShellKind::Xonsh => (FD_STDERR, "o>e".to_string()), + ShellKind::PowerShell => (FD_STDIN, format!(">{}", FD_STDIN)), _ => (FD_STDIN, format!(">&{}", FD_STDIN)), // `>&0` }; From f8729f6ea0713a19a2388975b1a144ccd2a543e6 Mon Sep 17 00:00:00 2001 From: Kunall Banerjee <14703164+yeskunall@users.noreply.github.com> Date: Mon, 24 Nov 2025 06:24:05 -0500 Subject: [PATCH 0328/1030] docs: Better wording for `terminal.working_directory` setting (#43388) Initially this was just going to be a minor docs fix, but then I wondered if we could improve the copy in the editor as well. Release Notes: - N/A --- assets/settings/default.json | 2 +- crates/settings/src/settings_content/terminal.rs | 5 +++-- docs/src/configuring-zed.md | 6 +++--- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/assets/settings/default.json b/assets/settings/default.json index ba79f0ccbcca3837d94adc49ddbc9c53b3ae0a5f..c8ffd31617df7d057e89329c2db70c6b6aa21e95 100644 --- a/assets/settings/default.json +++ b/assets/settings/default.json @@ -1441,7 +1441,7 @@ "default_height": 320, // What working directory to use when launching the terminal. // May take 4 values: - // 1. Use the current file's project directory. Will Fallback to the + // 1. Use the current file's project directory. Fallback to the // first project directory strategy if unsuccessful // "working_directory": "current_project_directory" // 2. Use the first project in this workspace's directory diff --git a/crates/settings/src/settings_content/terminal.rs b/crates/settings/src/settings_content/terminal.rs index c54ebe2d1c57af6e0fe51c765a5529cc4b1d4d7f..cd01eb14fa5ce19b077c39b67f8bd90ac93ad35f 100644 --- a/crates/settings/src/settings_content/terminal.rs +++ b/crates/settings/src/settings_content/terminal.rs @@ -222,10 +222,11 @@ pub enum Shell { #[strum_discriminants(derive(strum::VariantArray, strum::VariantNames, strum::FromRepr))] #[serde(rename_all = "snake_case")] pub enum WorkingDirectory { - /// Use the current file's project directory. Will Fallback to the + /// Use the current file's project directory. Fallback to the /// first project directory strategy if unsuccessful. CurrentProjectDirectory, - /// Use the first project in this workspace's directory. + /// Use the first project in this workspace's directory. Fallback to using + /// this platform's home directory. FirstProjectDirectory, /// Always use this platform's home directory (if it can be found). AlwaysHome, diff --git a/docs/src/configuring-zed.md b/docs/src/configuring-zed.md index cf86a0a6e7a6c4426f5a55d246f93b5c51be9792..2d866677650a0da141f27b3c78f7ad96f9581967 100644 --- a/docs/src/configuring-zed.md +++ b/docs/src/configuring-zed.md @@ -4071,7 +4071,7 @@ Example command to set the title: `echo -e "\e]2;New Title\007";` **Options** -1. Use the current file's project directory. Will Fallback to the first project directory strategy if unsuccessful +1. Use the current file's project directory. Fallback to the first project directory strategy if unsuccessful. ```json [settings] { @@ -4081,7 +4081,7 @@ Example command to set the title: `echo -e "\e]2;New Title\007";` } ``` -2. Use the first project in this workspace's directory. Will fallback to using this platform's home directory. +2. Use the first project in this workspace's directory. Fallback to using this platform's home directory. ```json [settings] { @@ -4091,7 +4091,7 @@ Example command to set the title: `echo -e "\e]2;New Title\007";` } ``` -3. Always use this platform's home directory (if we can find it) +3. Always use this platform's home directory if it can be found. ```json [settings] { From a0fa5d57c1dfd028810d18ee9bfa8650196ceca3 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Mon, 24 Nov 2025 13:13:20 +0100 Subject: [PATCH 0329/1030] proto: Fix cloned errors losing all context (#43393) Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/proto/src/error.rs | 2 +- crates/remote/src/transport/ssh.rs | 2 +- crates/remote/src/transport/wsl.rs | 6 ++++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/crates/proto/src/error.rs b/crates/proto/src/error.rs index c2fdfcf8204e40abe317ec1404e62eb5caf2bdac..d83b0fc499ba9dddb1d6417307fea9eaed9fdfd7 100644 --- a/crates/proto/src/error.rs +++ b/crates/proto/src/error.rs @@ -126,7 +126,7 @@ impl ErrorExt for anyhow::Error { if let Some(rpc_error) = self.downcast_ref::() { rpc_error.cloned() } else { - anyhow::anyhow!("{self}") + anyhow::anyhow!("{self:#}") } } } diff --git a/crates/remote/src/transport/ssh.rs b/crates/remote/src/transport/ssh.rs index 6244045d1eba3a3f267fbdaf1cb906c58a3d48b2..fdf0f05ae665a8a384ef6dfcf3c659ee407e7124 100644 --- a/crates/remote/src/transport/ssh.rs +++ b/crates/remote/src/transport/ssh.rs @@ -1327,7 +1327,7 @@ fn build_command( let working_dir = RemotePathBuf::new(working_dir, ssh_path_style).to_string(); // shlex will wrap the command in single quotes (''), disabling ~ expansion, - // replace with with something that works + // replace with something that works const TILDE_PREFIX: &'static str = "~/"; if working_dir.starts_with(TILDE_PREFIX) { let working_dir = working_dir.trim_start_matches("~").trim_start_matches("/"); diff --git a/crates/remote/src/transport/wsl.rs b/crates/remote/src/transport/wsl.rs index c075e4fb1213512792191cb3b5ff5eefc423b339..7d4664ab846564fa55fc59912489536b78f8fc38 100644 --- a/crates/remote/src/transport/wsl.rs +++ b/crates/remote/src/transport/wsl.rs @@ -251,11 +251,13 @@ impl WslRemoteConnection { let mkdir = self.shell_kind.prepend_command_prefix("mkdir"); self.run_wsl_command(&mkdir, &["-p", &parent]) .await - .map_err(|e| anyhow!("Failed to create directory when uploading file: {}", e))?; + .context("Failed to create directory when uploading file")?; } let t0 = Instant::now(); - let src_stat = fs::metadata(&src_path).await?; + let src_stat = fs::metadata(&src_path) + .await + .with_context(|| format!("source path does not exist: {}", src_path.display()))?; let size = src_stat.len(); log::info!( "uploading remote server to WSL {:?} ({}kb)", From 2d55c088cccbf5a29d3956dce8e607ae56bf55b4 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Mon, 24 Nov 2025 13:34:04 +0100 Subject: [PATCH 0330/1030] releases: Add build number to Nightly builds (#42990) - **Remove semantic_version crate and use semver instead** - **Update upload-nightly** Release Notes: - N/A --------- Co-authored-by: Conrad Irwin --- Cargo.lock | 38 ++++--- Cargo.toml | 5 +- crates/activity_indicator/Cargo.toml | 1 + .../src/activity_indicator.rs | 4 +- crates/agent_ui/Cargo.toml | 1 + crates/agent_ui/src/acp/entry_view_state.rs | 4 +- crates/agent_ui/src/acp/thread_view.rs | 4 +- crates/auto_update/Cargo.toml | 1 + crates/auto_update/src/auto_update.rs | 75 +++++++------- crates/channel/Cargo.toml | 1 + crates/channel/src/channel_store_tests.rs | 4 +- crates/cli/build.rs | 3 + crates/client/Cargo.toml | 1 + crates/client/src/telemetry.rs | 2 +- crates/collab/Cargo.toml | 1 - crates/collab/src/api/extensions.rs | 6 +- crates/collab/src/db.rs | 4 +- crates/collab/src/db/queries/extensions.rs | 9 +- crates/collab/src/rpc.rs | 12 +-- crates/collab/src/rpc/connection_pool.rs | 10 +- .../remote_editing_collaboration_tests.rs | 25 +++-- crates/collab/src/tests/test_server.rs | 5 +- crates/editor/Cargo.toml | 1 + crates/editor/benches/editor_render.rs | 2 +- crates/editor/src/editor_tests.rs | 6 +- crates/editor/src/inlays/inlay_hints.rs | 4 +- crates/eval/src/eval.rs | 13 ++- crates/extension/Cargo.toml | 2 +- crates/extension/src/extension.rs | 11 +-- crates/extension/src/extension_manifest.rs | 4 +- crates/extension_host/Cargo.toml | 2 +- .../extension_compilation_benchmark.rs | 6 +- crates/extension_host/src/extension_host.rs | 9 +- .../src/extension_store_test.rs | 4 +- crates/extension_host/src/wasm_host.rs | 15 ++- crates/extension_host/src/wasm_host/wit.rs | 11 +-- .../src/wasm_host/wit/since_v0_0_1.rs | 4 +- .../src/wasm_host/wit/since_v0_0_4.rs | 4 +- .../src/wasm_host/wit/since_v0_0_6.rs | 4 +- .../src/wasm_host/wit/since_v0_1_0.rs | 4 +- .../src/wasm_host/wit/since_v0_2_0.rs | 4 +- .../src/wasm_host/wit/since_v0_3_0.rs | 4 +- .../src/wasm_host/wit/since_v0_4_0.rs | 4 +- .../src/wasm_host/wit/since_v0_5_0.rs | 4 +- .../src/wasm_host/wit/since_v0_6_0.rs | 6 +- crates/extensions_ui/Cargo.toml | 2 +- .../src/extension_version_selector.rs | 6 +- crates/gpui/Cargo.toml | 2 +- crates/gpui/src/platform.rs | 1 - crates/gpui/src/platform/mac/platform.rs | 20 ++-- crates/language_models/Cargo.toml | 1 + crates/language_models/src/provider/cloud.rs | 9 +- crates/language_tools/Cargo.toml | 1 + .../language_tools/src/lsp_log_view_tests.rs | 4 +- crates/lsp/Cargo.toml | 1 + crates/lsp/src/lsp.rs | 4 +- crates/project/src/project_tests.rs | 4 +- crates/project_symbols/Cargo.toml | 1 + crates/project_symbols/src/project_symbols.rs | 4 +- crates/recent_projects/Cargo.toml | 1 + .../recent_projects/src/remote_connections.rs | 11 ++- crates/release_channel/Cargo.toml | 1 + crates/release_channel/src/lib.rs | 31 ++++-- crates/remote/Cargo.toml | 1 + crates/remote/src/remote_client.rs | 14 +-- crates/remote/src/transport/ssh.rs | 37 +++---- crates/remote/src/transport/wsl.rs | 23 ++--- crates/remote_server/Cargo.toml | 1 + crates/remote_server/build.rs | 3 + .../remote_server/src/remote_editing_tests.rs | 8 +- crates/remote_server/src/remote_server.rs | 12 ++- crates/remote_server/src/unix.rs | 27 +++-- crates/semantic_version/Cargo.toml | 17 ---- crates/semantic_version/LICENSE-APACHE | 1 - .../semantic_version/src/semantic_version.rs | 99 ------------------- crates/system_specs/Cargo.toml | 1 + crates/system_specs/src/system_specs.rs | 7 +- crates/telemetry_events/Cargo.toml | 2 +- .../telemetry_events/src/telemetry_events.rs | 4 +- crates/vim/Cargo.toml | 1 + crates/vim/src/test/vim_test_context.rs | 5 +- crates/zed/Cargo.toml | 1 + crates/zed/build.rs | 4 + crates/zed/src/main.rs | 3 +- crates/zed/src/zed.rs | 22 ++++- crates/zeta/Cargo.toml | 1 + crates/zeta/src/zeta.rs | 15 +-- crates/zeta2/Cargo.toml | 1 + crates/zeta2/src/zeta2.rs | 15 +-- crates/zeta_cli/src/headless.rs | 12 ++- script/upload-nightly | 6 +- 91 files changed, 372 insertions(+), 419 deletions(-) delete mode 100644 crates/semantic_version/Cargo.toml delete mode 120000 crates/semantic_version/LICENSE-APACHE delete mode 100644 crates/semantic_version/src/semantic_version.rs diff --git a/Cargo.lock b/Cargo.lock index 3eee0204971ef09650a7b54988542a1f5d4c59ce..63734b552d7475eacdb2ee3eac66371f7c029d28 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -103,6 +103,7 @@ dependencies = [ "project", "proto", "release_channel", + "semver", "smallvec", "ui", "util", @@ -373,6 +374,7 @@ dependencies = [ "rules_library", "schemars", "search", + "semver", "serde", "serde_json", "serde_json_lenient", @@ -1341,6 +1343,7 @@ dependencies = [ "parking_lot", "paths", "release_channel", + "semver", "serde", "serde_json", "settings", @@ -2926,6 +2929,7 @@ dependencies = [ "postage", "release_channel", "rpc", + "semver", "settings", "text", "time", @@ -3124,6 +3128,7 @@ dependencies = [ "release_channel", "rpc", "rustls-pki-types", + "semver", "serde", "serde_json", "serde_urlencoded", @@ -3408,7 +3413,6 @@ dependencies = [ "scrypt", "sea-orm", "sea-orm-macros", - "semantic_version", "semver", "serde", "serde_json", @@ -5388,6 +5392,7 @@ dependencies = [ "rope", "rpc", "schemars", + "semver", "serde", "serde_json", "settings", @@ -5850,7 +5855,7 @@ dependencies = [ "parking_lot", "pretty_assertions", "proto", - "semantic_version", + "semver", "serde", "serde_json", "task", @@ -5916,7 +5921,7 @@ dependencies = [ "release_channel", "remote", "reqwest_client", - "semantic_version", + "semver", "serde", "serde_json", "serde_json_lenient", @@ -5955,7 +5960,7 @@ dependencies = [ "picker", "project", "release_channel", - "semantic_version", + "semver", "serde", "settings", "smallvec", @@ -7327,7 +7332,7 @@ dependencies = [ "resvg", "schemars", "seahash", - "semantic_version", + "semver", "serde", "serde_json", "slotmap", @@ -8907,6 +8912,7 @@ dependencies = [ "project", "release_channel", "schemars", + "semver", "serde", "serde_json", "settings", @@ -8972,6 +8978,7 @@ dependencies = [ "project", "proto", "release_channel", + "semver", "serde_json", "settings", "theme", @@ -9487,6 +9494,7 @@ dependencies = [ "postage", "release_channel", "schemars", + "semver", "serde", "serde_json", "smol", @@ -13126,6 +13134,7 @@ dependencies = [ "picker", "project", "release_channel", + "semver", "serde_json", "settings", "theme", @@ -13775,6 +13784,7 @@ dependencies = [ "project", "release_channel", "remote", + "semver", "serde", "serde_json", "settings", @@ -13945,6 +13955,7 @@ name = "release_channel" version = "0.1.0" dependencies = [ "gpui", + "semver", ] [[package]] @@ -13965,6 +13976,7 @@ dependencies = [ "release_channel", "rpc", "schemars", + "semver", "serde", "serde_json", "settings", @@ -14029,6 +14041,7 @@ dependencies = [ "reqwest_client", "rpc", "rust-embed", + "semver", "serde", "serde_json", "settings", @@ -15150,14 +15163,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f7d95a54511e0c7be3f51e8867aa8cf35148d7b9445d44de2f943e2b206e749" -[[package]] -name = "semantic_version" -version = "0.1.0" -dependencies = [ - "anyhow", - "serde", -] - [[package]] name = "semver" version = "1.0.27" @@ -16882,6 +16887,7 @@ dependencies = [ "human_bytes", "pciid-parser", "release_channel", + "semver", "serde", "sysinfo 0.37.2", ] @@ -17029,7 +17035,7 @@ dependencies = [ name = "telemetry_events" version = "0.1.0" dependencies = [ - "semantic_version", + "semver", "serde", "serde_json", ] @@ -18799,6 +18805,7 @@ dependencies = [ "release_channel", "schemars", "search", + "semver", "serde", "serde_json", "settings", @@ -21262,6 +21269,7 @@ dependencies = [ "reqwest_client", "rope", "search", + "semver", "serde", "serde_json", "session", @@ -21659,6 +21667,7 @@ dependencies = [ "release_channel", "reqwest_client", "rpc", + "semver", "serde", "serde_json", "settings", @@ -21705,6 +21714,7 @@ dependencies = [ "pretty_assertions", "project", "release_channel", + "semver", "serde", "serde_json", "settings", diff --git a/Cargo.toml b/Cargo.toml index abf74a8108fa06cf3c154438c13ce015719b7481..e3ba2cb817357f5733179864bc23161d01aa1123 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -147,7 +147,6 @@ members = [ "crates/rules_library", "crates/schema_generator", "crates/search", - "crates/semantic_version", "crates/session", "crates/settings", "crates/settings_json", @@ -381,7 +380,6 @@ rope = { path = "crates/rope" } rpc = { path = "crates/rpc" } rules_library = { path = "crates/rules_library" } search = { path = "crates/search" } -semantic_version = { path = "crates/semantic_version" } session = { path = "crates/session" } settings = { path = "crates/settings" } settings_json = { path = "crates/settings_json" } @@ -629,7 +627,7 @@ rustls-platform-verifier = "0.5.0" # WARNING: If you change this, you must also publish a new version of zed-scap to crates.io scap = { git = "https://github.com/zed-industries/scap", rev = "4afea48c3b002197176fb19cd0f9b180dd36eaac", default-features = false, package = "zed-scap", version = "0.0.8-zed" } schemars = { version = "1.0", features = ["indexmap2"] } -semver = "1.0" +semver = { version = "1.0", features = ["serde"] } serde = { version = "1.0.221", features = ["derive", "rc"] } serde_derive = "1.0.221" serde_json = { version = "1.0.144", features = ["preserve_order", "raw_value"] } @@ -845,7 +843,6 @@ refineable = { codegen-units = 1 } release_channel = { codegen-units = 1 } reqwest_client = { codegen-units = 1 } rich_text = { codegen-units = 1 } -semantic_version = { codegen-units = 1 } session = { codegen-units = 1 } snippet = { codegen-units = 1 } snippets_ui = { codegen-units = 1 } diff --git a/crates/activity_indicator/Cargo.toml b/crates/activity_indicator/Cargo.toml index 99ae5b5b077a14c0909737d64935220698a007c7..8587e52723b48d1495bbfbc5442bb8007aed1786 100644 --- a/crates/activity_indicator/Cargo.toml +++ b/crates/activity_indicator/Cargo.toml @@ -23,6 +23,7 @@ gpui.workspace = true language.workspace = true project.workspace = true proto.workspace = true +semver.workspace = true smallvec.workspace = true ui.workspace = true util.workspace = true diff --git a/crates/activity_indicator/src/activity_indicator.rs b/crates/activity_indicator/src/activity_indicator.rs index 5cb4e1c6153154782bf10447c13c3a9017cbcce7..b537fabc9b7102f0d9cfab42370a21983a941f19 100644 --- a/crates/activity_indicator/src/activity_indicator.rs +++ b/crates/activity_indicator/src/activity_indicator.rs @@ -925,15 +925,15 @@ impl StatusItemView for ActivityIndicator { #[cfg(test)] mod tests { - use gpui::SemanticVersion; use release_channel::AppCommitSha; + use semver::Version; use super::*; #[test] fn test_version_tooltip_message() { let message = ActivityIndicator::version_tooltip_message(&VersionCheckType::Semantic( - SemanticVersion::new(1, 0, 0), + Version::new(1, 0, 0), )); assert_eq!(message, "Version: 1.0.0"); diff --git a/crates/agent_ui/Cargo.toml b/crates/agent_ui/Cargo.toml index 17e90775b2fd386524b01ea3ab056a00273aa82f..0f52c07078f447c9d8a95312ccd96561516907a1 100644 --- a/crates/agent_ui/Cargo.toml +++ b/crates/agent_ui/Cargo.toml @@ -113,6 +113,7 @@ languages = { workspace = true, features = ["test-support"] } language_model = { workspace = true, "features" = ["test-support"] } pretty_assertions.workspace = true project = { workspace = true, features = ["test-support"] } +semver.workspace = true rand.workspace = true tree-sitter-md.workspace = true unindent.workspace = true diff --git a/crates/agent_ui/src/acp/entry_view_state.rs b/crates/agent_ui/src/acp/entry_view_state.rs index 60f39e47dceb6daebc72bba7e3e4a5fc70676dd6..6fb94dfb6b84826d715e9b28163e9968fc2df3b9 100644 --- a/crates/agent_ui/src/acp/entry_view_state.rs +++ b/crates/agent_ui/src/acp/entry_view_state.rs @@ -405,7 +405,7 @@ mod tests { use buffer_diff::{DiffHunkStatus, DiffHunkStatusKind}; use editor::RowInfo; use fs::FakeFs; - use gpui::{AppContext as _, SemanticVersion, TestAppContext}; + use gpui::{AppContext as _, TestAppContext}; use crate::acp::entry_view_state::EntryViewState; use multi_buffer::MultiBufferRow; @@ -539,7 +539,7 @@ mod tests { let settings_store = SettingsStore::test(cx); cx.set_global(settings_store); theme::init(theme::LoadThemes::JustBase, cx); - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); }); } } diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 93b6f749cd9891cd25cfc659a02d6808a9cc4ccb..3d387fc87c3377aed0278756b1c12644757e687d 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -6086,7 +6086,7 @@ pub(crate) mod tests { use assistant_text_thread::TextThreadStore; use editor::MultiBufferOffset; use fs::FakeFs; - use gpui::{EventEmitter, SemanticVersion, TestAppContext, VisualTestContext}; + use gpui::{EventEmitter, TestAppContext, VisualTestContext}; use project::Project; use serde_json::json; use settings::SettingsStore; @@ -6603,7 +6603,7 @@ pub(crate) mod tests { let settings_store = SettingsStore::test(cx); cx.set_global(settings_store); theme::init(theme::LoadThemes::JustBase, cx); - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); prompt_store::init(cx) }); } diff --git a/crates/auto_update/Cargo.toml b/crates/auto_update/Cargo.toml index ae7c869493d8ca33528800f91c446e9546c952d0..6f352fbd7b74138d29a1f4f350b4d958139f11d5 100644 --- a/crates/auto_update/Cargo.toml +++ b/crates/auto_update/Cargo.toml @@ -21,6 +21,7 @@ http_client.workspace = true log.workspace = true paths.workspace = true release_channel.workspace = true +semver.workspace = true serde.workspace = true serde_json.workspace = true settings.workspace = true diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index 010e011526ffc6d0332a5c22da107f380ff37c91..f5132ffe30e0f360e642d7796c3865bcd48cd71c 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -2,12 +2,13 @@ use anyhow::{Context as _, Result}; use client::Client; use db::kvp::KEY_VALUE_STORE; use gpui::{ - App, AppContext as _, AsyncApp, BackgroundExecutor, Context, Entity, Global, SemanticVersion, - Task, Window, actions, + App, AppContext as _, AsyncApp, BackgroundExecutor, Context, Entity, Global, Task, Window, + actions, }; use http_client::{HttpClient, HttpClientWithUrl}; use paths::remote_servers_dir; use release_channel::{AppCommitSha, ReleaseChannel}; +use semver::Version; use serde::{Deserialize, Serialize}; use settings::{RegisterSetting, Settings, SettingsStore}; use smol::fs::File; @@ -44,7 +45,7 @@ actions!( #[derive(Clone, Debug, PartialEq, Eq)] pub enum VersionCheckType { Sha(AppCommitSha), - Semantic(SemanticVersion), + Semantic(Version), } #[derive(Serialize, Debug)] @@ -100,7 +101,7 @@ impl AutoUpdateStatus { pub struct AutoUpdater { status: AutoUpdateStatus, - current_version: SemanticVersion, + current_version: Version, client: Arc, pending_poll: Option>>, quit_subscription: Option, @@ -256,7 +257,7 @@ pub fn view_release_notes(_: &ViewReleaseNotes, cx: &mut App) -> Option<()> { match release_channel { ReleaseChannel::Stable | ReleaseChannel::Preview => { let auto_updater = auto_updater.read(cx); - let current_version = auto_updater.current_version; + let current_version = auto_updater.current_version.clone(); let release_channel = release_channel.dev_name(); let path = format!("/releases/{release_channel}/{current_version}"); let url = &auto_updater.client.http_client().build_url(&path); @@ -322,7 +323,7 @@ impl AutoUpdater { cx.default_global::().0.clone() } - fn new(current_version: SemanticVersion, client: Arc, cx: &mut Context) -> Self { + fn new(current_version: Version, client: Arc, cx: &mut Context) -> Self { // On windows, executable files cannot be overwritten while they are // running, so we must wait to overwrite the application until quitting // or restarting. When quitting the app, we spawn the auto update helper @@ -400,8 +401,8 @@ impl AutoUpdater { })); } - pub fn current_version(&self) -> SemanticVersion { - self.current_version + pub fn current_version(&self) -> Version { + self.current_version.clone() } pub fn status(&self) -> AutoUpdateStatus { @@ -422,7 +423,7 @@ impl AutoUpdater { // Ok(None). pub async fn download_remote_server_release( release_channel: ReleaseChannel, - version: Option, + version: Option, os: &str, arch: &str, set_status: impl Fn(&str, &mut AsyncApp) + Send + 'static, @@ -469,7 +470,7 @@ impl AutoUpdater { pub async fn get_remote_server_release_url( channel: ReleaseChannel, - version: Option, + version: Option, os: &str, arch: &str, cx: &mut AsyncApp, @@ -491,7 +492,7 @@ impl AutoUpdater { async fn get_release_asset( this: &Entity, release_channel: ReleaseChannel, - version: Option, + version: Option, asset: &str, os: &str, arch: &str, @@ -554,7 +555,7 @@ impl AutoUpdater { this.read_with(cx, |this, cx| { ( this.client.http_client(), - this.current_version, + this.current_version.clone(), this.status.clone(), ReleaseChannel::try_global(cx).unwrap_or(ReleaseChannel::Stable), ) @@ -627,11 +628,11 @@ impl AutoUpdater { fn check_if_fetched_version_is_newer( release_channel: ReleaseChannel, app_commit_sha: Result>, - installed_version: SemanticVersion, + installed_version: Version, fetched_version: String, status: AutoUpdateStatus, ) -> Result> { - let parsed_fetched_version = fetched_version.parse::(); + let parsed_fetched_version = fetched_version.parse::(); if let AutoUpdateStatus::Updated { version, .. } = status { match version { @@ -708,8 +709,8 @@ impl AutoUpdater { } fn check_if_fetched_version_is_newer_non_nightly( - installed_version: SemanticVersion, - fetched_version: SemanticVersion, + installed_version: Version, + fetched_version: Version, ) -> Result> { let should_download = fetched_version > installed_version; let newer_version = should_download.then(|| VersionCheckType::Semantic(fetched_version)); @@ -1020,7 +1021,7 @@ mod tests { cx.update(|cx| { settings::init(cx); - let current_version = SemanticVersion::new(0, 100, 0); + let current_version = semver::Version::new(0, 100, 0); release_channel::init_test(current_version, ReleaseChannel::Stable, cx); let clock = Arc::new(FakeSystemClock::new()); @@ -1059,7 +1060,7 @@ mod tests { auto_updater.read_with(cx, |updater, _| { assert_eq!(updater.status(), AutoUpdateStatus::Idle); - assert_eq!(updater.current_version(), SemanticVersion::new(0, 100, 0)); + assert_eq!(updater.current_version(), semver::Version::new(0, 100, 0)); }); release_available.store(true, atomic::Ordering::SeqCst); @@ -1078,7 +1079,7 @@ mod tests { assert_eq!( status, AutoUpdateStatus::Downloading { - version: VersionCheckType::Semantic(SemanticVersion::new(0, 100, 1)) + version: VersionCheckType::Semantic(semver::Version::new(0, 100, 1)) } ); @@ -1108,7 +1109,7 @@ mod tests { assert_eq!( status, AutoUpdateStatus::Updated { - version: VersionCheckType::Semantic(SemanticVersion::new(0, 100, 1)) + version: VersionCheckType::Semantic(semver::Version::new(0, 100, 1)) } ); let will_restart = cx.expect_restart(); @@ -1122,9 +1123,9 @@ mod tests { fn test_stable_does_not_update_when_fetched_version_is_not_higher() { let release_channel = ReleaseChannel::Stable; let app_commit_sha = Ok(Some("a".to_string())); - let installed_version = SemanticVersion::new(1, 0, 0); + let installed_version = semver::Version::new(1, 0, 0); let status = AutoUpdateStatus::Idle; - let fetched_version = SemanticVersion::new(1, 0, 0); + let fetched_version = semver::Version::new(1, 0, 0); let newer_version = AutoUpdater::check_if_fetched_version_is_newer( release_channel, @@ -1141,9 +1142,9 @@ mod tests { fn test_stable_does_update_when_fetched_version_is_higher() { let release_channel = ReleaseChannel::Stable; let app_commit_sha = Ok(Some("a".to_string())); - let installed_version = SemanticVersion::new(1, 0, 0); + let installed_version = semver::Version::new(1, 0, 0); let status = AutoUpdateStatus::Idle; - let fetched_version = SemanticVersion::new(1, 0, 1); + let fetched_version = semver::Version::new(1, 0, 1); let newer_version = AutoUpdater::check_if_fetched_version_is_newer( release_channel, @@ -1163,11 +1164,11 @@ mod tests { fn test_stable_does_not_update_when_fetched_version_is_not_higher_than_cached() { let release_channel = ReleaseChannel::Stable; let app_commit_sha = Ok(Some("a".to_string())); - let installed_version = SemanticVersion::new(1, 0, 0); + let installed_version = semver::Version::new(1, 0, 0); let status = AutoUpdateStatus::Updated { - version: VersionCheckType::Semantic(SemanticVersion::new(1, 0, 1)), + version: VersionCheckType::Semantic(semver::Version::new(1, 0, 1)), }; - let fetched_version = SemanticVersion::new(1, 0, 1); + let fetched_version = semver::Version::new(1, 0, 1); let newer_version = AutoUpdater::check_if_fetched_version_is_newer( release_channel, @@ -1184,11 +1185,11 @@ mod tests { fn test_stable_does_update_when_fetched_version_is_higher_than_cached() { let release_channel = ReleaseChannel::Stable; let app_commit_sha = Ok(Some("a".to_string())); - let installed_version = SemanticVersion::new(1, 0, 0); + let installed_version = semver::Version::new(1, 0, 0); let status = AutoUpdateStatus::Updated { - version: VersionCheckType::Semantic(SemanticVersion::new(1, 0, 1)), + version: VersionCheckType::Semantic(semver::Version::new(1, 0, 1)), }; - let fetched_version = SemanticVersion::new(1, 0, 2); + let fetched_version = semver::Version::new(1, 0, 2); let newer_version = AutoUpdater::check_if_fetched_version_is_newer( release_channel, @@ -1208,7 +1209,7 @@ mod tests { fn test_nightly_does_not_update_when_fetched_sha_is_same() { let release_channel = ReleaseChannel::Nightly; let app_commit_sha = Ok(Some("a".to_string())); - let installed_version = SemanticVersion::new(1, 0, 0); + let installed_version = semver::Version::new(1, 0, 0); let status = AutoUpdateStatus::Idle; let fetched_sha = "a".to_string(); @@ -1227,7 +1228,7 @@ mod tests { fn test_nightly_does_update_when_fetched_sha_is_not_same() { let release_channel = ReleaseChannel::Nightly; let app_commit_sha = Ok(Some("a".to_string())); - let installed_version = SemanticVersion::new(1, 0, 0); + let installed_version = semver::Version::new(1, 0, 0); let status = AutoUpdateStatus::Idle; let fetched_sha = "b".to_string(); @@ -1249,7 +1250,7 @@ mod tests { fn test_nightly_does_not_update_when_fetched_sha_is_same_as_cached() { let release_channel = ReleaseChannel::Nightly; let app_commit_sha = Ok(Some("a".to_string())); - let installed_version = SemanticVersion::new(1, 0, 0); + let installed_version = semver::Version::new(1, 0, 0); let status = AutoUpdateStatus::Updated { version: VersionCheckType::Sha(AppCommitSha::new("b".to_string())), }; @@ -1270,7 +1271,7 @@ mod tests { fn test_nightly_does_update_when_fetched_sha_is_not_same_as_cached() { let release_channel = ReleaseChannel::Nightly; let app_commit_sha = Ok(Some("a".to_string())); - let installed_version = SemanticVersion::new(1, 0, 0); + let installed_version = semver::Version::new(1, 0, 0); let status = AutoUpdateStatus::Updated { version: VersionCheckType::Sha(AppCommitSha::new("b".to_string())), }; @@ -1294,7 +1295,7 @@ mod tests { fn test_nightly_does_update_when_installed_versions_sha_cannot_be_retrieved() { let release_channel = ReleaseChannel::Nightly; let app_commit_sha = Ok(None); - let installed_version = SemanticVersion::new(1, 0, 0); + let installed_version = semver::Version::new(1, 0, 0); let status = AutoUpdateStatus::Idle; let fetched_sha = "a".to_string(); @@ -1317,7 +1318,7 @@ mod tests { { let release_channel = ReleaseChannel::Nightly; let app_commit_sha = Ok(None); - let installed_version = SemanticVersion::new(1, 0, 0); + let installed_version = semver::Version::new(1, 0, 0); let status = AutoUpdateStatus::Updated { version: VersionCheckType::Sha(AppCommitSha::new("b".to_string())), }; @@ -1339,7 +1340,7 @@ mod tests { { let release_channel = ReleaseChannel::Nightly; let app_commit_sha = Ok(None); - let installed_version = SemanticVersion::new(1, 0, 0); + let installed_version = semver::Version::new(1, 0, 0); let status = AutoUpdateStatus::Updated { version: VersionCheckType::Sha(AppCommitSha::new("b".to_string())), }; diff --git a/crates/channel/Cargo.toml b/crates/channel/Cargo.toml index 43af27ac8b6f21d4e1e16c9102da3de9c0585db4..a8664da8e93738fc21241e8185a9747bd405f469 100644 --- a/crates/channel/Cargo.toml +++ b/crates/channel/Cargo.toml @@ -37,6 +37,7 @@ collections = { workspace = true, features = ["test-support"] } gpui = { workspace = true, features = ["test-support"] } rpc = { workspace = true, features = ["test-support"] } client = { workspace = true, features = ["test-support"] } +semver.workspace = true settings = { workspace = true, features = ["test-support"] } util = { workspace = true, features = ["test-support"] } http_client = { workspace = true, features = ["test-support"] } diff --git a/crates/channel/src/channel_store_tests.rs b/crates/channel/src/channel_store_tests.rs index c3a6e80955605be096ba9b1cdb6975b5ab2ee389..f1f9d23a99f25f14385a061c0732869c21f160f1 100644 --- a/crates/channel/src/channel_store_tests.rs +++ b/crates/channel/src/channel_store_tests.rs @@ -1,7 +1,7 @@ use super::*; use client::{Client, UserStore}; use clock::FakeSystemClock; -use gpui::{App, AppContext as _, Entity, SemanticVersion}; +use gpui::{App, AppContext as _, Entity}; use http_client::FakeHttpClient; use rpc::proto::{self}; use settings::SettingsStore; @@ -236,7 +236,7 @@ fn test_dangling_channel_paths(cx: &mut App) { fn init_test(cx: &mut App) -> Entity { let settings_store = SettingsStore::test(cx); cx.set_global(settings_store); - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); let clock = Arc::new(FakeSystemClock::new()); let http = FakeHttpClient::with_404_response(); diff --git a/crates/cli/build.rs b/crates/cli/build.rs index 50ef631ebfbdc0628c7eacdac615a5e38811621f..a3c4bc643735d002eb1ce836cda3c86a0b99a5cb 100644 --- a/crates/cli/build.rs +++ b/crates/cli/build.rs @@ -23,4 +23,7 @@ fn main() { println!("cargo:rustc-env=ZED_COMMIT_SHA={git_sha}"); } + if let Some(build_identifier) = option_env!("GITHUB_RUN_NUMBER") { + println!("cargo:rustc-env=ZED_BUILD_ID={build_identifier}"); + } } diff --git a/crates/client/Cargo.toml b/crates/client/Cargo.toml index 513a73be4581f3b0c8069dde831cc6811f5e045b..7149ad4f55feaae5b596a39a3dd460d71cc5daa5 100644 --- a/crates/client/Cargo.toml +++ b/crates/client/Cargo.toml @@ -70,6 +70,7 @@ settings = { workspace = true, features = ["test-support"] } util = { workspace = true, features = ["test-support"] } [target.'cfg(target_os = "windows")'.dependencies] +semver.workspace = true windows.workspace = true [target.'cfg(target_os = "macos")'.dependencies] diff --git a/crates/client/src/telemetry.rs b/crates/client/src/telemetry.rs index 018e81d3e3a9bc27670bf96f2de8896b16875b20..68b6c302fb20b1afe78a89dada745538d8150d0d 100644 --- a/crates/client/src/telemetry.rs +++ b/crates/client/src/telemetry.rs @@ -158,7 +158,7 @@ pub fn os_version() -> String { let mut info = unsafe { std::mem::zeroed() }; let status = unsafe { windows::Wdk::System::SystemServices::RtlGetVersion(&mut info) }; if status.is_ok() { - gpui::SemanticVersion::new( + semver::Version::new( info.dwMajorVersion as _, info.dwMinorVersion as _, info.dwBuildNumber as _, diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index c8467da7954b195c0eef09ce1bed8361d7fa2c7b..b8a4c035499d45adc494c9f8175a772d15aa96df 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -50,7 +50,6 @@ scrypt = "0.11" # sea-orm and sea-orm-macros versions must match exactly. sea-orm = { version = "=1.1.10", features = ["sqlx-postgres", "postgres-array", "runtime-tokio-rustls", "with-uuid"] } sea-orm-macros = "=1.1.10" -semantic_version.workspace = true semver.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/crates/collab/src/api/extensions.rs b/crates/collab/src/api/extensions.rs index 1ace433db298be7ffd159128b54b194395ba4fe5..187b2ab279e1b99b257a61cd2841617ecefb7b3c 100644 --- a/crates/collab/src/api/extensions.rs +++ b/crates/collab/src/api/extensions.rs @@ -11,7 +11,7 @@ use axum::{ }; use collections::{BTreeSet, HashMap}; use rpc::{ExtensionApiManifest, ExtensionProvides, GetExtensionsResponse}; -use semantic_version::SemanticVersion; +use semver::Version as SemanticVersion; use serde::Deserialize; use std::str::FromStr; use std::{sync::Arc, time::Duration}; @@ -108,8 +108,8 @@ struct GetExtensionUpdatesParams { ids: String, min_schema_version: i32, max_schema_version: i32, - min_wasm_api_version: SemanticVersion, - max_wasm_api_version: SemanticVersion, + min_wasm_api_version: semver::Version, + max_wasm_api_version: semver::Version, } async fn get_extension_updates( diff --git a/crates/collab/src/db.rs b/crates/collab/src/db.rs index 1152cb97d79ef2c7df437479d79b28a5ca6d2ef7..a3eceb472cbb6c7d9686b520f0a5f3f13fa4dd90 100644 --- a/crates/collab/src/db.rs +++ b/crates/collab/src/db.rs @@ -22,7 +22,7 @@ use sea_orm::{ entity::prelude::*, sea_query::{Alias, Expr, OnConflict}, }; -use semantic_version::SemanticVersion; +use semver::Version; use serde::{Deserialize, Serialize}; use std::ops::RangeInclusive; use std::{ @@ -671,7 +671,7 @@ pub struct NewExtensionVersion { pub struct ExtensionVersionConstraints { pub schema_versions: RangeInclusive, - pub wasm_api_versions: RangeInclusive, + pub wasm_api_versions: RangeInclusive, } impl LocalSettingsKind { diff --git a/crates/collab/src/db/queries/extensions.rs b/crates/collab/src/db/queries/extensions.rs index b4dc4dd89d15fa1b80b561408f2bdc9a233094c0..729e3de99f16bd7351bb510ec24156a4fcae7c60 100644 --- a/crates/collab/src/db/queries/extensions.rs +++ b/crates/collab/src/db/queries/extensions.rs @@ -69,7 +69,7 @@ impl Database { extensions: &[extension::Model], constraints: Option<&ExtensionVersionConstraints>, tx: &DatabaseTransaction, - ) -> Result> { + ) -> Result> { let mut versions = extension_version::Entity::find() .filter( extension_version::Column::ExtensionId @@ -79,11 +79,10 @@ impl Database { .await?; let mut max_versions = - HashMap::::default(); + HashMap::::default(); while let Some(version) = versions.next().await { let version = version?; - let Some(extension_version) = SemanticVersion::from_str(&version.version).log_err() - else { + let Some(extension_version) = Version::from_str(&version.version).log_err() else { continue; }; @@ -102,7 +101,7 @@ impl Database { } if let Some(wasm_api_version) = version.wasm_api_version.as_ref() { - if let Some(version) = SemanticVersion::from_str(wasm_api_version).log_err() { + if let Some(version) = Version::from_str(wasm_api_version).log_err() { if !constraints.wasm_api_versions.contains(&version) { continue; } diff --git a/crates/collab/src/rpc.rs b/crates/collab/src/rpc.rs index a3bd93db5fc177a60bb450b469335dc6b9e6ce3d..aa77ba25bfb687b6c5cb0da84e14c843f8a2a3bc 100644 --- a/crates/collab/src/rpc.rs +++ b/crates/collab/src/rpc.rs @@ -50,7 +50,7 @@ use rpc::{ RequestMessage, ShareProject, UpdateChannelBufferCollaborators, }, }; -use semantic_version::SemanticVersion; +use semver::Version; use serde::{Serialize, Serializer}; use std::{ any::TypeId, @@ -985,14 +985,14 @@ impl Server { { let mut pool = self.connection_pool.lock(); - pool.add_connection(connection_id, user.id, user.admin, zed_version); + pool.add_connection(connection_id, user.id, user.admin, zed_version.clone()); self.peer.send( connection_id, build_initial_contacts_update(contacts, &pool), )?; } - if should_auto_subscribe_to_channels(zed_version) { + if should_auto_subscribe_to_channels(&zed_version) { subscribe_user_to_channels(user.id, session).await?; } @@ -1136,7 +1136,7 @@ impl Header for ProtocolVersion { } } -pub struct AppVersionHeader(SemanticVersion); +pub struct AppVersionHeader(Version); impl Header for AppVersionHeader { fn name() -> &'static HeaderName { static ZED_APP_VERSION: OnceLock = OnceLock::new(); @@ -2834,8 +2834,8 @@ async fn remove_contact( Ok(()) } -fn should_auto_subscribe_to_channels(version: ZedVersion) -> bool { - version.0.minor() < 139 +fn should_auto_subscribe_to_channels(version: &ZedVersion) -> bool { + version.0.minor < 139 } async fn subscribe_to_channels( diff --git a/crates/collab/src/rpc/connection_pool.rs b/crates/collab/src/rpc/connection_pool.rs index 417edd66d66d7479f42fb09b01c7a5d9f05a6223..b1193239163fe34a0cb5802aa398abc37d1cca42 100644 --- a/crates/collab/src/rpc/connection_pool.rs +++ b/crates/collab/src/rpc/connection_pool.rs @@ -2,7 +2,7 @@ use crate::db::{ChannelId, ChannelRole, UserId}; use anyhow::{Context as _, Result}; use collections::{BTreeMap, HashMap, HashSet}; use rpc::ConnectionId; -use semantic_version::SemanticVersion; +use semver::Version; use serde::Serialize; use std::fmt; use tracing::instrument; @@ -19,8 +19,8 @@ struct ConnectedPrincipal { connection_ids: HashSet, } -#[derive(Copy, Clone, Debug, Serialize, PartialOrd, PartialEq, Eq, Ord)] -pub struct ZedVersion(pub SemanticVersion); +#[derive(Clone, Debug, Serialize, PartialOrd, PartialEq, Eq, Ord)] +pub struct ZedVersion(pub Version); impl fmt::Display for ZedVersion { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -32,13 +32,13 @@ impl ZedVersion { pub fn can_collaborate(&self) -> bool { // v0.204.1 was the first version after the auto-update bug. // We reject any clients older than that to hope we can persuade them to upgrade. - if self.0 < SemanticVersion::new(0, 204, 1) { + if self.0 < Version::new(0, 204, 1) { return false; } // Since we hotfixed the changes to no longer connect to Collab automatically to Preview, we also need to reject // versions in the range [v0.199.0, v0.199.1]. - if self.0 >= SemanticVersion::new(0, 199, 0) && self.0 < SemanticVersion::new(0, 199, 2) { + if self.0 >= Version::new(0, 199, 0) && self.0 < Version::new(0, 199, 2) { return false; } diff --git a/crates/collab/src/tests/remote_editing_collaboration_tests.rs b/crates/collab/src/tests/remote_editing_collaboration_tests.rs index e5cc506bbca8b0a4a2fca972df61d373a288702c..04403de9fa0883e9d738f3d96b9b2acdf1d66967 100644 --- a/crates/collab/src/tests/remote_editing_collaboration_tests.rs +++ b/crates/collab/src/tests/remote_editing_collaboration_tests.rs @@ -7,10 +7,7 @@ use debugger_ui::debugger_panel::DebugPanel; use extension::ExtensionHostProxy; use fs::{FakeFs, Fs as _, RemoveOptions}; use futures::StreamExt as _; -use gpui::{ - AppContext as _, BackgroundExecutor, SemanticVersion, TestAppContext, UpdateGlobal as _, - VisualContext, -}; +use gpui::{AppContext as _, BackgroundExecutor, TestAppContext, UpdateGlobal as _, VisualContext}; use http_client::BlockedHttpClient; use language::{ FakeLspAdapter, Language, LanguageConfig, LanguageMatcher, LanguageRegistry, @@ -43,10 +40,10 @@ async fn test_sharing_an_ssh_remote_project( ) { let executor = cx_a.executor(); cx_a.update(|cx| { - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); }); server_cx.update(|cx| { - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); }); let mut server = TestServer::start(executor.clone()).await; let client_a = server.create_client(cx_a, "user_a").await; @@ -211,10 +208,10 @@ async fn test_ssh_collaboration_git_branches( server_cx.set_name("server"); cx_a.update(|cx| { - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); }); server_cx.update(|cx| { - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); }); let mut server = TestServer::start(executor.clone()).await; @@ -396,10 +393,10 @@ async fn test_ssh_collaboration_formatting_with_prettier( server_cx.set_name("server"); cx_a.update(|cx| { - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); }); server_cx.update(|cx| { - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); }); let mut server = TestServer::start(executor.clone()).await; @@ -583,13 +580,13 @@ async fn test_remote_server_debugger( executor: BackgroundExecutor, ) { cx_a.update(|cx| { - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); command_palette_hooks::init(cx); zlog::init_test(); dap_adapters::init(cx); }); server_cx.update(|cx| { - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); dap_adapters::init(cx); }); let (opts, server_ssh) = RemoteClient::fake_server(cx_a, server_cx); @@ -691,13 +688,13 @@ async fn test_slow_adapter_startup_retries( executor: BackgroundExecutor, ) { cx_a.update(|cx| { - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); command_palette_hooks::init(cx); zlog::init_test(); dap_adapters::init(cx); }); server_cx.update(|cx| { - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); dap_adapters::init(cx); }); let (opts, server_ssh) = RemoteClient::fake_server(cx_a, server_cx); diff --git a/crates/collab/src/tests/test_server.rs b/crates/collab/src/tests/test_server.rs index 0afaccf80cae609ae3264b9b5e374964b01b6f38..959d54cf0864ccddf7273cca0276d18d4f59308b 100644 --- a/crates/collab/src/tests/test_server.rs +++ b/crates/collab/src/tests/test_server.rs @@ -31,7 +31,6 @@ use rpc::{ RECEIVE_TIMEOUT, proto::{self, ChannelRole}, }; -use semantic_version::SemanticVersion; use serde_json::json; use session::{AppSession, Session}; use settings::SettingsStore; @@ -173,7 +172,7 @@ impl TestServer { let settings = SettingsStore::test(cx); cx.set_global(settings); theme::init(theme::LoadThemes::JustBase, cx); - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); }); let clock = Arc::new(FakeSystemClock::new()); @@ -295,7 +294,7 @@ impl TestServer { server_conn, client_name, Principal::User(user), - ZedVersion(SemanticVersion::new(1, 0, 0)), + ZedVersion(semver::Version::new(1, 0, 0)), Some("test".to_string()), None, None, diff --git a/crates/editor/Cargo.toml b/crates/editor/Cargo.toml index 62226f5dec2aa88f0ccdb6ad59935f6bdfe6536e..bd3ac75649ce705f98f7fa1c2616dc2bcf152642 100644 --- a/crates/editor/Cargo.toml +++ b/crates/editor/Cargo.toml @@ -106,6 +106,7 @@ multi_buffer = { workspace = true, features = ["test-support"] } project = { workspace = true, features = ["test-support"] } release_channel.workspace = true rand.workspace = true +semver.workspace = true settings = { workspace = true, features = ["test-support"] } tempfile.workspace = true text = { workspace = true, features = ["test-support"] } diff --git a/crates/editor/benches/editor_render.rs b/crates/editor/benches/editor_render.rs index cb7f37810aad04f8c6a73440c7da93658224ba26..4323c6c973f3729623d8939ca89ecf3ac403bcbf 100644 --- a/crates/editor/benches/editor_render.rs +++ b/crates/editor/benches/editor_render.rs @@ -123,7 +123,7 @@ pub fn benches() { cx.set_global(store); assets::Assets.load_test_fonts(cx); theme::init(theme::LoadThemes::JustBase, cx); - // release_channel::init(SemanticVersion::default(), cx); + // release_channel::init(semver::Version::new(0,0,0), cx); editor::init(cx); }); diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index 2bd1316371449d8f4b7e4c428e5e6c7c27f43457..119c7058e061406b4a75017b6c5c8717f9f250c0 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -17,8 +17,8 @@ use buffer_diff::{BufferDiff, DiffHunkSecondaryStatus, DiffHunkStatus, DiffHunkS use collections::HashMap; use futures::{StreamExt, channel::oneshot}; use gpui::{ - BackgroundExecutor, DismissEvent, Rgba, SemanticVersion, TestAppContext, UpdateGlobal, - VisualTestContext, WindowBounds, WindowOptions, div, + BackgroundExecutor, DismissEvent, Rgba, TestAppContext, UpdateGlobal, VisualTestContext, + WindowBounds, WindowOptions, div, }; use indoc::indoc; use language::{ @@ -26303,7 +26303,7 @@ pub(crate) fn init_test(cx: &mut TestAppContext, f: fn(&mut AllLanguageSettingsC let store = SettingsStore::test(cx); cx.set_global(store); theme::init(theme::LoadThemes::JustBase, cx); - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); crate::init(cx); }); zlog::init_test(); diff --git a/crates/editor/src/inlays/inlay_hints.rs b/crates/editor/src/inlays/inlay_hints.rs index b30137cf7796d6c916623d100420ac34eb80224a..cd9456be7a109ce5c2535339bc153bb5434ab94f 100644 --- a/crates/editor/src/inlays/inlay_hints.rs +++ b/crates/editor/src/inlays/inlay_hints.rs @@ -941,7 +941,7 @@ pub mod tests { use crate::{ExcerptRange, scroll::Autoscroll}; use collections::HashSet; use futures::{StreamExt, future}; - use gpui::{AppContext as _, Context, SemanticVersion, TestAppContext, WindowHandle}; + use gpui::{AppContext as _, Context, TestAppContext, WindowHandle}; use itertools::Itertools as _; use language::language_settings::InlayHintKind; use language::{Capability, FakeLspAdapter}; @@ -4062,7 +4062,7 @@ let c = 3;"# let settings_store = SettingsStore::test(cx); cx.set_global(settings_store); theme::init(theme::LoadThemes::JustBase, cx); - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); crate::init(cx); }); diff --git a/crates/eval/src/eval.rs b/crates/eval/src/eval.rs index 3166a7321782069153deb74b90e6b5b71fc99e06..80633696b7d5e655bb7db3627568b881642cf62c 100644 --- a/crates/eval/src/eval.rs +++ b/crates/eval/src/eval.rs @@ -25,7 +25,7 @@ use language_model::{ConfiguredModel, LanguageModel, LanguageModelRegistry, Sele use node_runtime::{NodeBinaryOptions, NodeRuntime}; use project::project_settings::ProjectSettings; use prompt_store::PromptBuilder; -use release_channel::AppVersion; +use release_channel::{AppCommitSha, AppVersion}; use reqwest_client::ReqwestClient; use settings::{Settings, SettingsStore}; use std::cell::RefCell; @@ -347,8 +347,15 @@ pub struct AgentAppState { } pub fn init(cx: &mut App) -> Arc { - let app_version = AppVersion::load(env!("ZED_PKG_VERSION")); - release_channel::init(app_version, cx); + let app_commit_sha = option_env!("ZED_COMMIT_SHA").map(|s| AppCommitSha::new(s.to_owned())); + + let app_version = AppVersion::load( + env!("ZED_PKG_VERSION"), + option_env!("ZED_BUILD_ID"), + app_commit_sha, + ); + + release_channel::init(app_version.clone(), cx); gpui_tokio::init(cx); let settings_store = SettingsStore::new(cx, &settings::default_settings()); diff --git a/crates/extension/Cargo.toml b/crates/extension/Cargo.toml index 09492027a1bb59770e3ac70166f042cae8e22d29..ed084a0c1c0d6ea237dd06391330d8e41917b574 100644 --- a/crates/extension/Cargo.toml +++ b/crates/extension/Cargo.toml @@ -26,7 +26,7 @@ log.workspace = true lsp.workspace = true parking_lot.workspace = true proto.workspace = true -semantic_version.workspace = true +semver.workspace = true serde.workspace = true serde_json.workspace = true task.workspace = true diff --git a/crates/extension/src/extension.rs b/crates/extension/src/extension.rs index bd2b37c337dcaca448e2175472ea46c126d2f9a3..88f2bea0c0c68480a2ad67f536ecf9d465a6a9ae 100644 --- a/crates/extension/src/extension.rs +++ b/crates/extension/src/extension.rs @@ -14,7 +14,7 @@ use async_trait::async_trait; use fs::normalize_path; use gpui::{App, Task}; use language::LanguageName; -use semantic_version::SemanticVersion; +use semver::Version; use task::{SpawnInTerminal, ZedDebugConfig}; use util::rel_path::RelPath; @@ -170,10 +170,7 @@ pub trait Extension: Send + Sync + 'static { ) -> Result; } -pub fn parse_wasm_extension_version( - extension_id: &str, - wasm_bytes: &[u8], -) -> Result { +pub fn parse_wasm_extension_version(extension_id: &str, wasm_bytes: &[u8]) -> Result { let mut version = None; for part in wasmparser::Parser::new(0).parse_all(wasm_bytes) { @@ -200,9 +197,9 @@ pub fn parse_wasm_extension_version( version.with_context(|| format!("extension {extension_id} has no zed:api-version section")) } -fn parse_wasm_extension_version_custom_section(data: &[u8]) -> Option { +fn parse_wasm_extension_version_custom_section(data: &[u8]) -> Option { if data.len() == 6 { - Some(SemanticVersion::new( + Some(Version::new( u16::from_be_bytes([data[0], data[1]]) as _, u16::from_be_bytes([data[2], data[3]]) as _, u16::from_be_bytes([data[4], data[5]]) as _, diff --git a/crates/extension/src/extension_manifest.rs b/crates/extension/src/extension_manifest.rs index 11cefa339b24f8d6707c0f683ec38b50394c6a9e..4ecdd378ca86dbee263e439e13fa4776dab9e316 100644 --- a/crates/extension/src/extension_manifest.rs +++ b/crates/extension/src/extension_manifest.rs @@ -3,7 +3,7 @@ use collections::{BTreeMap, HashMap}; use fs::Fs; use language::LanguageName; use lsp::LanguageServerName; -use semantic_version::SemanticVersion; +use semver::Version; use serde::{Deserialize, Serialize}; use std::{ ffi::OsStr, @@ -137,7 +137,7 @@ pub fn build_debug_adapter_schema_path( #[derive(Clone, Default, PartialEq, Eq, Debug, Deserialize, Serialize)] pub struct LibManifestEntry { pub kind: Option, - pub version: Option, + pub version: Option, } #[derive(Clone, PartialEq, Eq, Debug, Deserialize, Serialize)] diff --git a/crates/extension_host/Cargo.toml b/crates/extension_host/Cargo.toml index 16cbd9ac0c0ef938322f2b57789c7542549a570a..328b808b1310e3402405c52ce27a8ae15c4d5ece 100644 --- a/crates/extension_host/Cargo.toml +++ b/crates/extension_host/Cargo.toml @@ -38,7 +38,7 @@ paths.workspace = true project.workspace = true remote.workspace = true release_channel.workspace = true -semantic_version.workspace = true +semver.workspace = true serde.workspace = true serde_json.workspace = true serde_json_lenient.workspace = true diff --git a/crates/extension_host/benches/extension_compilation_benchmark.rs b/crates/extension_host/benches/extension_compilation_benchmark.rs index 9cb57fc1fb800df3f20d277cff5c85ecddadf5ad..c3459cf116b5510bc98fd167ea32c242bd48ad9a 100644 --- a/crates/extension_host/benches/extension_compilation_benchmark.rs +++ b/crates/extension_host/benches/extension_compilation_benchmark.rs @@ -8,7 +8,7 @@ use extension::{ }; use extension_host::wasm_host::WasmHost; use fs::RealFs; -use gpui::{SemanticVersion, TestAppContext, TestDispatcher}; +use gpui::{TestAppContext, TestDispatcher}; use http_client::{FakeHttpClient, Response}; use node_runtime::NodeRuntime; use rand::{SeedableRng, rngs::StdRng}; @@ -54,7 +54,7 @@ fn init() -> TestAppContext { cx.update(|cx| { let store = SettingsStore::test(cx); cx.set_global(store); - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); }); cx @@ -124,7 +124,7 @@ fn manifest() -> ExtensionManifest { icon_themes: Vec::new(), lib: LibManifestEntry { kind: Some(ExtensionLibraryKind::Rust), - version: Some(SemanticVersion::new(0, 1, 0)), + version: Some(semver::Version::new(0, 1, 0)), }, languages: Vec::new(), grammars: BTreeMap::default(), diff --git a/crates/extension_host/src/extension_host.rs b/crates/extension_host/src/extension_host.rs index bf28ee1acd29337f21cf290e886a353b943d3ac6..ecbe8c99f0f7e6c80065a230b9c00c5476e7b689 100644 --- a/crates/extension_host/src/extension_host.rs +++ b/crates/extension_host/src/extension_host.rs @@ -44,7 +44,7 @@ use node_runtime::NodeRuntime; use project::ContextProviderWithTasks; use release_channel::ReleaseChannel; use remote::RemoteClient; -use semantic_version::SemanticVersion; +use semver::Version; use serde::{Deserialize, Serialize}; use settings::Settings; use std::ops::RangeInclusive; @@ -98,7 +98,7 @@ pub fn is_version_compatible( .manifest .wasm_api_version .as_ref() - .and_then(|wasm_api_version| SemanticVersion::from_str(wasm_api_version).ok()) + .and_then(|wasm_api_version| Version::from_str(wasm_api_version).ok()) && !is_supported_wasm_api_version(release_channel, wasm_api_version) { return false; @@ -639,9 +639,8 @@ impl ExtensionStore { this.extension_index.extensions.get(&extension.id) { let installed_version = - SemanticVersion::from_str(&installed_extension.manifest.version).ok()?; - let latest_version = - SemanticVersion::from_str(&extension.manifest.version).ok()?; + Version::from_str(&installed_extension.manifest.version).ok()?; + let latest_version = Version::from_str(&extension.manifest.version).ok()?; if installed_version >= latest_version { return None; diff --git a/crates/extension_host/src/extension_store_test.rs b/crates/extension_host/src/extension_store_test.rs index 7ba368c667ef5bdd37a09b53b697a062a2a0fc8b..85a3a720ce8c62fc4317756ec264926c981864c4 100644 --- a/crates/extension_host/src/extension_store_test.rs +++ b/crates/extension_host/src/extension_store_test.rs @@ -8,7 +8,7 @@ use collections::{BTreeMap, HashSet}; use extension::ExtensionHostProxy; use fs::{FakeFs, Fs, RealFs}; use futures::{AsyncReadExt, StreamExt, io::BufReader}; -use gpui::{AppContext as _, SemanticVersion, TestAppContext}; +use gpui::{AppContext as _, TestAppContext}; use http_client::{FakeHttpClient, Response}; use language::{BinaryStatus, LanguageMatcher, LanguageName, LanguageRegistry}; use language_extension::LspAccess; @@ -866,7 +866,7 @@ fn init_test(cx: &mut TestAppContext) { cx.update(|cx| { let store = SettingsStore::test(cx); cx.set_global(store); - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); extension::init(cx); theme::init(theme::LoadThemes::JustBase, cx); gpui_tokio::init(cx); diff --git a/crates/extension_host/src/wasm_host.rs b/crates/extension_host/src/wasm_host.rs index 1fe8d0117f36b25a5a83fe574006adeb7f7ef035..cecaf2039bc6dc049ece1177700a14eead3d86bc 100644 --- a/crates/extension_host/src/wasm_host.rs +++ b/crates/extension_host/src/wasm_host.rs @@ -28,7 +28,7 @@ use lsp::LanguageServerName; use moka::sync::Cache; use node_runtime::NodeRuntime; use release_channel::ReleaseChannel; -use semantic_version::SemanticVersion; +use semver::Version; use settings::Settings; use std::{ borrow::Cow, @@ -68,7 +68,7 @@ pub struct WasmExtension { pub manifest: Arc, pub work_dir: Arc, #[allow(unused)] - pub zed_api_version: SemanticVersion, + pub zed_api_version: Version, _task: Arc>>, } @@ -630,7 +630,7 @@ impl WasmHost { &executor, &mut store, this.release_channel, - zed_api_version, + zed_api_version.clone(), &component, ) .await?; @@ -713,10 +713,7 @@ impl WasmHost { } } -pub fn parse_wasm_extension_version( - extension_id: &str, - wasm_bytes: &[u8], -) -> Result { +pub fn parse_wasm_extension_version(extension_id: &str, wasm_bytes: &[u8]) -> Result { let mut version = None; for part in wasmparser::Parser::new(0).parse_all(wasm_bytes) { @@ -743,9 +740,9 @@ pub fn parse_wasm_extension_version( version.with_context(|| format!("extension {extension_id} has no zed:api-version section")) } -fn parse_wasm_extension_version_custom_section(data: &[u8]) -> Option { +fn parse_wasm_extension_version_custom_section(data: &[u8]) -> Option { if data.len() == 6 { - Some(SemanticVersion::new( + Some(Version::new( u16::from_be_bytes([data[0], data[1]]) as _, u16::from_be_bytes([data[2], data[3]]) as _, u16::from_be_bytes([data[4], data[5]]) as _, diff --git a/crates/extension_host/src/wasm_host/wit.rs b/crates/extension_host/src/wasm_host/wit.rs index 1f1fa49bd535ad19f4981eeed9fcdca1ba9421a9..4c88af1b0a023441b237a26e5c14f1e6f0d0102d 100644 --- a/crates/extension_host/src/wasm_host/wit.rs +++ b/crates/extension_host/src/wasm_host/wit.rs @@ -19,7 +19,7 @@ use crate::wasm_host::wit::since_v0_6_0::dap::StartDebuggingRequestArgumentsRequ use super::{WasmState, wasm_engine}; use anyhow::{Context as _, Result, anyhow}; -use semantic_version::SemanticVersion; +use semver::Version; use since_v0_6_0 as latest; use std::{ops::RangeInclusive, path::PathBuf, sync::Arc}; use wasmtime::{ @@ -54,16 +54,13 @@ fn wasi_view(state: &mut WasmState) -> &mut WasmState { } /// Returns whether the given Wasm API version is supported by the Wasm host. -pub fn is_supported_wasm_api_version( - release_channel: ReleaseChannel, - version: SemanticVersion, -) -> bool { +pub fn is_supported_wasm_api_version(release_channel: ReleaseChannel, version: Version) -> bool { wasm_api_version_range(release_channel).contains(&version) } /// Returns the Wasm API version range that is supported by the Wasm host. #[inline(always)] -pub fn wasm_api_version_range(release_channel: ReleaseChannel) -> RangeInclusive { +pub fn wasm_api_version_range(release_channel: ReleaseChannel) -> RangeInclusive { // Note: The release channel can be used to stage a new version of the extension API. let _ = release_channel; @@ -114,7 +111,7 @@ impl Extension { executor: &BackgroundExecutor, store: &mut Store, release_channel: ReleaseChannel, - version: SemanticVersion, + version: Version, component: &Component, ) -> Result { // Note: The release channel can be used to stage a new version of the extension API. diff --git a/crates/extension_host/src/wasm_host/wit/since_v0_0_1.rs b/crates/extension_host/src/wasm_host/wit/since_v0_0_1.rs index 168dea4a22b4d836277860eebd74c19ed9d31847..17d5c00a9ad08507bbad39190fdfe5134fe77aa1 100644 --- a/crates/extension_host/src/wasm_host/wit/since_v0_0_1.rs +++ b/crates/extension_host/src/wasm_host/wit/since_v0_0_1.rs @@ -5,11 +5,11 @@ use anyhow::Result; use extension::{ExtensionLanguageServerProxy, WorktreeDelegate}; use gpui::BackgroundExecutor; use language::BinaryStatus; -use semantic_version::SemanticVersion; +use semver::Version; use std::sync::{Arc, OnceLock}; use wasmtime::component::{Linker, Resource}; -pub const MIN_VERSION: SemanticVersion = SemanticVersion::new(0, 0, 1); +pub const MIN_VERSION: Version = Version::new(0, 0, 1); wasmtime::component::bindgen!({ async: true, diff --git a/crates/extension_host/src/wasm_host/wit/since_v0_0_4.rs b/crates/extension_host/src/wasm_host/wit/since_v0_0_4.rs index 31f752080b6eabe8cf63e38c002fa145b061fb13..11b2e9f66187ea04983b83ace5814620e7ae7f53 100644 --- a/crates/extension_host/src/wasm_host/wit/since_v0_0_4.rs +++ b/crates/extension_host/src/wasm_host/wit/since_v0_0_4.rs @@ -3,11 +3,11 @@ use crate::wasm_host::WasmState; use anyhow::Result; use extension::WorktreeDelegate; use gpui::BackgroundExecutor; -use semantic_version::SemanticVersion; +use semver::Version; use std::sync::{Arc, OnceLock}; use wasmtime::component::{Linker, Resource}; -pub const MIN_VERSION: SemanticVersion = SemanticVersion::new(0, 0, 4); +pub const MIN_VERSION: Version = Version::new(0, 0, 4); wasmtime::component::bindgen!({ async: true, diff --git a/crates/extension_host/src/wasm_host/wit/since_v0_0_6.rs b/crates/extension_host/src/wasm_host/wit/since_v0_0_6.rs index 2fc29abadb2eb60d051b37e072727931aee72d69..835a2b30fbadd3d54649d075b588fd79532c5186 100644 --- a/crates/extension_host/src/wasm_host/wit/since_v0_0_6.rs +++ b/crates/extension_host/src/wasm_host/wit/since_v0_0_6.rs @@ -3,11 +3,11 @@ use crate::wasm_host::WasmState; use anyhow::Result; use extension::WorktreeDelegate; use gpui::BackgroundExecutor; -use semantic_version::SemanticVersion; +use semver::Version; use std::sync::{Arc, OnceLock}; use wasmtime::component::{Linker, Resource}; -pub const MIN_VERSION: SemanticVersion = SemanticVersion::new(0, 0, 6); +pub const MIN_VERSION: Version = Version::new(0, 0, 6); wasmtime::component::bindgen!({ async: true, diff --git a/crates/extension_host/src/wasm_host/wit/since_v0_1_0.rs b/crates/extension_host/src/wasm_host/wit/since_v0_1_0.rs index 6e6eca975d92f9c8cf5eb206f04da5fccc3f097c..a7a20f6dc7f1dbedddf34a13032887adf5b61a6e 100644 --- a/crates/extension_host/src/wasm_host/wit/since_v0_1_0.rs +++ b/crates/extension_host/src/wasm_host/wit/since_v0_1_0.rs @@ -11,7 +11,7 @@ use gpui::BackgroundExecutor; use language::LanguageName; use language::{BinaryStatus, language_settings::AllLanguageSettings}; use project::project_settings::ProjectSettings; -use semantic_version::SemanticVersion; +use semver::Version; use std::{ path::{Path, PathBuf}, sync::{Arc, OnceLock}, @@ -23,7 +23,7 @@ use wasmtime::component::{Linker, Resource}; use super::latest; -pub const MIN_VERSION: SemanticVersion = SemanticVersion::new(0, 1, 0); +pub const MIN_VERSION: Version = Version::new(0, 1, 0); wasmtime::component::bindgen!({ async: true, diff --git a/crates/extension_host/src/wasm_host/wit/since_v0_2_0.rs b/crates/extension_host/src/wasm_host/wit/since_v0_2_0.rs index 9475438b660d2e126ae6ca24d276795d51d4ce8b..05e3f5a4e7e2997bb40699c2ac8b7e02c71b1a77 100644 --- a/crates/extension_host/src/wasm_host/wit/since_v0_2_0.rs +++ b/crates/extension_host/src/wasm_host/wit/since_v0_2_0.rs @@ -2,13 +2,13 @@ use crate::wasm_host::WasmState; use anyhow::Result; use extension::{KeyValueStoreDelegate, ProjectDelegate, WorktreeDelegate}; use gpui::BackgroundExecutor; -use semantic_version::SemanticVersion; +use semver::Version; use std::sync::{Arc, OnceLock}; use wasmtime::component::{Linker, Resource}; use super::latest; -pub const MIN_VERSION: SemanticVersion = SemanticVersion::new(0, 2, 0); +pub const MIN_VERSION: Version = Version::new(0, 2, 0); wasmtime::component::bindgen!({ async: true, diff --git a/crates/extension_host/src/wasm_host/wit/since_v0_3_0.rs b/crates/extension_host/src/wasm_host/wit/since_v0_3_0.rs index b6a75ba7dda6ded2e074a2ece35b4b3f881f1619..08393934fe365640ed4c82172a33a71381edbc54 100644 --- a/crates/extension_host/src/wasm_host/wit/since_v0_3_0.rs +++ b/crates/extension_host/src/wasm_host/wit/since_v0_3_0.rs @@ -2,13 +2,13 @@ use crate::wasm_host::WasmState; use anyhow::Result; use extension::{KeyValueStoreDelegate, ProjectDelegate, WorktreeDelegate}; use gpui::BackgroundExecutor; -use semantic_version::SemanticVersion; +use semver::Version; use std::sync::{Arc, OnceLock}; use wasmtime::component::{Linker, Resource}; use super::latest; -pub const MIN_VERSION: SemanticVersion = SemanticVersion::new(0, 3, 0); +pub const MIN_VERSION: Version = Version::new(0, 3, 0); wasmtime::component::bindgen!({ async: true, diff --git a/crates/extension_host/src/wasm_host/wit/since_v0_4_0.rs b/crates/extension_host/src/wasm_host/wit/since_v0_4_0.rs index 7c8be1322f94e35ded911d64e13f5afb4bf3702c..1b2a95023b611d9366b47faeb9b3a43c81cc24e7 100644 --- a/crates/extension_host/src/wasm_host/wit/since_v0_4_0.rs +++ b/crates/extension_host/src/wasm_host/wit/since_v0_4_0.rs @@ -2,13 +2,13 @@ use crate::wasm_host::WasmState; use anyhow::Result; use extension::{KeyValueStoreDelegate, ProjectDelegate, WorktreeDelegate}; use gpui::BackgroundExecutor; -use semantic_version::SemanticVersion; +use semver::Version; use std::sync::{Arc, OnceLock}; use wasmtime::component::{Linker, Resource}; use super::latest; -pub const MIN_VERSION: SemanticVersion = SemanticVersion::new(0, 4, 0); +pub const MIN_VERSION: Version = Version::new(0, 4, 0); wasmtime::component::bindgen!({ async: true, diff --git a/crates/extension_host/src/wasm_host/wit/since_v0_5_0.rs b/crates/extension_host/src/wasm_host/wit/since_v0_5_0.rs index 6d04663de7772e9c965cf1b88840727cfdcb4b59..23701c9d03f3dccd908a06d90d4d1fe11e74af5c 100644 --- a/crates/extension_host/src/wasm_host/wit/since_v0_5_0.rs +++ b/crates/extension_host/src/wasm_host/wit/since_v0_5_0.rs @@ -2,13 +2,13 @@ use crate::wasm_host::WasmState; use anyhow::Result; use extension::{KeyValueStoreDelegate, ProjectDelegate, WorktreeDelegate}; use gpui::BackgroundExecutor; -use semantic_version::SemanticVersion; +use semver::Version; use std::sync::{Arc, OnceLock}; use wasmtime::component::{Linker, Resource}; use super::latest; -pub const MIN_VERSION: SemanticVersion = SemanticVersion::new(0, 5, 0); +pub const MIN_VERSION: Version = Version::new(0, 5, 0); wasmtime::component::bindgen!({ async: true, diff --git a/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs b/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs index bb16ab879eac90b7a943b02f5f97dfc004167ea0..dd0548d9d182e4b81e8490476eef2420f0e6c13d 100644 --- a/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs +++ b/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs @@ -21,7 +21,7 @@ use futures::{FutureExt as _, io::BufReader}; use gpui::{BackgroundExecutor, SharedString}; use language::{BinaryStatus, LanguageName, language_settings::AllLanguageSettings}; use project::project_settings::ProjectSettings; -use semantic_version::SemanticVersion; +use semver::Version; use std::{ env, net::Ipv4Addr, @@ -36,8 +36,8 @@ use util::{ }; use wasmtime::component::{Linker, Resource}; -pub const MIN_VERSION: SemanticVersion = SemanticVersion::new(0, 6, 0); -pub const MAX_VERSION: SemanticVersion = SemanticVersion::new(0, 7, 0); +pub const MIN_VERSION: Version = Version::new(0, 6, 0); +pub const MAX_VERSION: Version = Version::new(0, 7, 0); wasmtime::component::bindgen!({ async: true, diff --git a/crates/extensions_ui/Cargo.toml b/crates/extensions_ui/Cargo.toml index 87c76b684725dd9f88031d70c67bff76670cdcf5..707938a9eb83209149a261fbe4c8cf9d6ca4e91e 100644 --- a/crates/extensions_ui/Cargo.toml +++ b/crates/extensions_ui/Cargo.toml @@ -28,7 +28,7 @@ num-format.workspace = true picker.workspace = true project.workspace = true release_channel.workspace = true -semantic_version.workspace = true +semver.workspace = true serde.workspace = true settings.workspace = true smallvec.workspace = true diff --git a/crates/extensions_ui/src/extension_version_selector.rs b/crates/extensions_ui/src/extension_version_selector.rs index d38c27375f6c32324d4832d308768af8473869eb..17d293da76d4831e30af8ed7d26d2df4c7706716 100644 --- a/crates/extensions_ui/src/extension_version_selector.rs +++ b/crates/extensions_ui/src/extension_version_selector.rs @@ -8,7 +8,7 @@ use fuzzy::{StringMatch, StringMatchCandidate, match_strings}; use gpui::{App, DismissEvent, Entity, EventEmitter, Focusable, Task, WeakEntity, prelude::*}; use picker::{Picker, PickerDelegate}; use release_channel::ReleaseChannel; -use semantic_version::SemanticVersion; +use semver::Version; use settings::update_settings_file; use ui::{HighlightedLabel, ListItem, ListItemSpacing, prelude::*}; use util::ResultExt; @@ -60,8 +60,8 @@ impl ExtensionVersionSelectorDelegate { mut extension_versions: Vec, ) -> Self { extension_versions.sort_unstable_by(|a, b| { - let a_version = SemanticVersion::from_str(&a.manifest.version); - let b_version = SemanticVersion::from_str(&b.manifest.version); + let a_version = Version::from_str(&a.manifest.version); + let b_version = Version::from_str(&b.manifest.version); match (a_version, b_version) { (Ok(a_version), Ok(b_version)) => b_version.cmp(&a_version), diff --git a/crates/gpui/Cargo.toml b/crates/gpui/Cargo.toml index f8c668cbe71ccce399987cb6887abd5e9ef5cb92..4985cc07383aac56d6975fa09a410a0cee6c549d 100644 --- a/crates/gpui/Cargo.toml +++ b/crates/gpui/Cargo.toml @@ -121,7 +121,7 @@ usvg = { version = "0.45.0", default-features = false } util_macros.workspace = true schemars.workspace = true seahash = "4.1" -semantic_version.workspace = true +semver.workspace = true serde.workspace = true serde_json.workspace = true slotmap.workspace = true diff --git a/crates/gpui/src/platform.rs b/crates/gpui/src/platform.rs index caedf0317f21b9bceb548b31543da2f33bfac254..370043fb6b8ec7f5df251931d1363f577327caaa 100644 --- a/crates/gpui/src/platform.rs +++ b/crates/gpui/src/platform.rs @@ -76,7 +76,6 @@ pub use keystroke::*; pub(crate) use linux::*; #[cfg(target_os = "macos")] pub(crate) use mac::*; -pub use semantic_version::SemanticVersion; #[cfg(any(test, feature = "test-support"))] pub(crate) use test::*; #[cfg(target_os = "windows")] diff --git a/crates/gpui/src/platform/mac/platform.rs b/crates/gpui/src/platform/mac/platform.rs index 7f71d4f164b4974675af5f0d1df5a1f5bb34b7de..c2363afe270f973513c8ba696bf5d3f99fb92cad 100644 --- a/crates/gpui/src/platform/mac/platform.rs +++ b/crates/gpui/src/platform/mac/platform.rs @@ -9,8 +9,7 @@ use crate::{ CursorStyle, ForegroundExecutor, Image, ImageFormat, KeyContext, Keymap, MacDispatcher, MacDisplay, MacWindow, Menu, MenuItem, OsMenu, OwnedMenu, PathPromptOptions, Platform, PlatformDisplay, PlatformKeyboardLayout, PlatformKeyboardMapper, PlatformTextSystem, - PlatformWindow, Result, SemanticVersion, SystemMenuType, Task, WindowAppearance, WindowParams, - hash, + PlatformWindow, Result, SystemMenuType, Task, WindowAppearance, WindowParams, hash, }; use anyhow::{Context as _, anyhow}; use block::ConcreteBlock; @@ -47,6 +46,7 @@ use objc::{ }; use parking_lot::Mutex; use ptr::null_mut; +use semver::Version; use std::{ cell::Cell, convert::TryInto, @@ -389,7 +389,7 @@ impl MacPlatform { ns_string(key_to_native(keystroke.key()).as_ref()), ) .autorelease(); - if Self::os_version() >= SemanticVersion::new(12, 0, 0) { + if Self::os_version() >= Version::new(12, 0, 0) { let _: () = msg_send![item, setAllowsAutomaticKeyEquivalentLocalization: NO]; } item.setKeyEquivalentModifierMask_(mask); @@ -452,15 +452,15 @@ impl MacPlatform { } } - fn os_version() -> SemanticVersion { + fn os_version() -> Version { let version = unsafe { let process_info = NSProcessInfo::processInfo(nil); process_info.operatingSystemVersion() }; - SemanticVersion::new( - version.majorVersion as usize, - version.minorVersion as usize, - version.patchVersion as usize, + Version::new( + version.majorVersion, + version.minorVersion, + version.patchVersion, ) } } @@ -668,7 +668,7 @@ impl Platform for MacPlatform { // API only available post Monterey // https://developer.apple.com/documentation/appkit/nsworkspace/3753004-setdefaultapplicationaturl let (done_tx, done_rx) = oneshot::channel(); - if Self::os_version() < SemanticVersion::new(12, 0, 0) { + if Self::os_version() < Version::new(12, 0, 0) { return Task::ready(Err(anyhow!( "macOS 12.0 or later is required to register URL schemes" ))); @@ -812,7 +812,7 @@ impl Platform for MacPlatform { // to break that use-case than breaking `a.sql`. if chunks.len() == 3 && chunks[1].starts_with(chunks[2]) - && Self::os_version() >= SemanticVersion::new(15, 0, 0) + && Self::os_version() >= Version::new(15, 0, 0) { let new_filename = OsStr::from_bytes( &filename.as_bytes() diff --git a/crates/language_models/Cargo.toml b/crates/language_models/Cargo.toml index 7d4cd3a618d725429a3979951f04445b5a1fc8eb..6c5704312d94e2c98ff62c49d3d5b57c1b274057 100644 --- a/crates/language_models/Cargo.toml +++ b/crates/language_models/Cargo.toml @@ -46,6 +46,7 @@ open_router = { workspace = true, features = ["schemars"] } partial-json-fixer.workspace = true release_channel.workspace = true schemars.workspace = true +semver.workspace = true serde.workspace = true serde_json.workspace = true settings.workspace = true diff --git a/crates/language_models/src/provider/cloud.rs b/crates/language_models/src/provider/cloud.rs index a9ff767146287db25fb0b42685525fd56d29d71e..a19a427dbacb32883b1877888ec04899a2b8d427 100644 --- a/crates/language_models/src/provider/cloud.rs +++ b/crates/language_models/src/provider/cloud.rs @@ -15,9 +15,7 @@ use futures::{ AsyncBufReadExt, FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream, }; use google_ai::GoogleModelMode; -use gpui::{ - AnyElement, AnyView, App, AsyncApp, Context, Entity, SemanticVersion, Subscription, Task, -}; +use gpui::{AnyElement, AnyView, App, AsyncApp, Context, Entity, Subscription, Task}; use http_client::http::{HeaderMap, HeaderValue}; use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Response, StatusCode}; use language_model::{ @@ -30,6 +28,7 @@ use language_model::{ }; use release_channel::AppVersion; use schemars::JsonSchema; +use semver::Version; use serde::{Deserialize, Serialize, de::DeserializeOwned}; use settings::SettingsStore; pub use settings::ZedDotDevAvailableModel as AvailableModel; @@ -384,7 +383,7 @@ impl CloudLanguageModel { async fn perform_llm_completion( client: Arc, llm_api_token: LlmApiToken, - app_version: Option, + app_version: Option, body: CompletionBody, ) -> Result { let http_client = &client.http_client(); @@ -396,7 +395,7 @@ impl CloudLanguageModel { let request = http_client::Request::builder() .method(Method::POST) .uri(http_client.build_zed_llm_url("/completions", &[])?.as_ref()) - .when_some(app_version, |builder, app_version| { + .when_some(app_version.as_ref(), |builder, app_version| { builder.header(ZED_VERSION_HEADER_NAME, app_version.to_string()) }) .header("Content-Type", "application/json") diff --git a/crates/language_tools/Cargo.toml b/crates/language_tools/Cargo.toml index d251a297d4d0fd71b9c464230e2180c0e34fdfa4..6181975a9be8a23a92acb48f74a2c17b17a8d6ff 100644 --- a/crates/language_tools/Cargo.toml +++ b/crates/language_tools/Cargo.toml @@ -39,5 +39,6 @@ zed_actions.workspace = true editor = { workspace = true, features = ["test-support"] } release_channel.workspace = true gpui = { workspace = true, features = ["test-support"] } +semver.workspace = true util = { workspace = true, features = ["test-support"] } zlog.workspace = true diff --git a/crates/language_tools/src/lsp_log_view_tests.rs b/crates/language_tools/src/lsp_log_view_tests.rs index 16b3ff7ed6f4ff9f5ac7ffa9aaa89ad3f1cb2932..0b4516f5d052260ac4274e9afe14d3bc1a5ef8ee 100644 --- a/crates/language_tools/src/lsp_log_view_tests.rs +++ b/crates/language_tools/src/lsp_log_view_tests.rs @@ -4,7 +4,7 @@ use crate::lsp_log_view::LogMenuItem; use super::*; use futures::StreamExt; -use gpui::{AppContext as _, SemanticVersion, TestAppContext, VisualTestContext}; +use gpui::{AppContext as _, TestAppContext, VisualTestContext}; use language::{FakeLspAdapter, Language, LanguageConfig, LanguageMatcher, tree_sitter_rust}; use lsp::LanguageServerName; use project::{ @@ -110,6 +110,6 @@ fn init_test(cx: &mut gpui::TestAppContext) { let settings_store = SettingsStore::test(cx); cx.set_global(settings_store); theme::init(theme::LoadThemes::JustBase, cx); - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); }); } diff --git a/crates/lsp/Cargo.toml b/crates/lsp/Cargo.toml index 39a86547f29c90f507bbd908f3af3a2c1a0cdec8..9533ddb600b18213de4d6e50599c62aa182b9b8a 100644 --- a/crates/lsp/Cargo.toml +++ b/crates/lsp/Cargo.toml @@ -36,5 +36,6 @@ release_channel.workspace = true async-pipe.workspace = true ctor.workspace = true gpui = { workspace = true, features = ["test-support"] } +semver.workspace = true util = { workspace = true, features = ["test-support"] } zlog.workspace = true diff --git a/crates/lsp/src/lsp.rs b/crates/lsp/src/lsp.rs index af6760a36817ed4857ad070768c645832f053ca8..05771b8ce5db870a41228f81e4aac8222b11ad53 100644 --- a/crates/lsp/src/lsp.rs +++ b/crates/lsp/src/lsp.rs @@ -1852,7 +1852,7 @@ impl FakeLanguageServer { #[cfg(test)] mod tests { use super::*; - use gpui::{SemanticVersion, TestAppContext}; + use gpui::TestAppContext; use std::str::FromStr; #[ctor::ctor] @@ -1863,7 +1863,7 @@ mod tests { #[gpui::test] async fn test_fake(cx: &mut TestAppContext) { cx.update(|cx| { - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); }); let (server, mut fake) = FakeLanguageServer::new( LanguageServerId(0), diff --git a/crates/project/src/project_tests.rs b/crates/project/src/project_tests.rs index cf13879471969f2fd459c473e72bd7e115589799..d42859de5d5491d4a5388d311266e22962889f35 100644 --- a/crates/project/src/project_tests.rs +++ b/crates/project/src/project_tests.rs @@ -20,7 +20,7 @@ use git::{ status::{StatusCode, TrackedStatus}, }; use git2::RepositoryInitOptions; -use gpui::{App, BackgroundExecutor, FutureExt, SemanticVersion, UpdateGlobal}; +use gpui::{App, BackgroundExecutor, FutureExt, UpdateGlobal}; use itertools::Itertools; use language::{ Diagnostic, DiagnosticEntry, DiagnosticEntryRef, DiagnosticSet, DiagnosticSourceKind, @@ -10346,7 +10346,7 @@ pub fn init_test(cx: &mut gpui::TestAppContext) { cx.update(|cx| { let settings_store = SettingsStore::test(cx); cx.set_global(settings_store); - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); }); } diff --git a/crates/project_symbols/Cargo.toml b/crates/project_symbols/Cargo.toml index 034e95de8245c59624e7a5e6be3665eb869d8563..83e3cb587d46a5bddf1c8b30c593c18a9b131ad2 100644 --- a/crates/project_symbols/Cargo.toml +++ b/crates/project_symbols/Cargo.toml @@ -34,6 +34,7 @@ language = { workspace = true, features = ["test-support"] } lsp = { workspace = true, features = ["test-support"] } project = { workspace = true, features = ["test-support"] } release_channel.workspace = true +semver.workspace = true settings = { workspace = true, features = ["test-support"] } theme = { workspace = true, features = ["test-support"] } workspace = { workspace = true, features = ["test-support"] } diff --git a/crates/project_symbols/src/project_symbols.rs b/crates/project_symbols/src/project_symbols.rs index 245f43e11a22bb3980dbbd7936109db2546287d5..95926116b7b450e36e8c53e192f95dfec76f1f00 100644 --- a/crates/project_symbols/src/project_symbols.rs +++ b/crates/project_symbols/src/project_symbols.rs @@ -289,7 +289,7 @@ impl PickerDelegate for ProjectSymbolsDelegate { mod tests { use super::*; use futures::StreamExt; - use gpui::{SemanticVersion, TestAppContext, VisualContext}; + use gpui::{TestAppContext, VisualContext}; use language::{FakeLspAdapter, Language, LanguageConfig, LanguageMatcher}; use lsp::OneOf; use project::FakeFs; @@ -438,7 +438,7 @@ mod tests { let store = SettingsStore::test(cx); cx.set_global(store); theme::init(theme::LoadThemes::JustBase, cx); - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); editor::init(cx); }); } diff --git a/crates/recent_projects/Cargo.toml b/crates/recent_projects/Cargo.toml index 51e55d94ea20e6c929c1424243b96b27c11ab1df..abaeafa335fd48991da46268ccd59450e908528c 100644 --- a/crates/recent_projects/Cargo.toml +++ b/crates/recent_projects/Cargo.toml @@ -32,6 +32,7 @@ picker.workspace = true project.workspace = true release_channel.workspace = true remote.workspace = true +semver.workspace = true serde.workspace = true settings.workspace = true smol.workspace = true diff --git a/crates/recent_projects/src/remote_connections.rs b/crates/recent_projects/src/remote_connections.rs index 50d7912b80d0842854c36810378c5f8abbf7a2f7..562fcccb204212fb43e0b9457b1c08bdb15c3772 100644 --- a/crates/recent_projects/src/remote_connections.rs +++ b/crates/recent_projects/src/remote_connections.rs @@ -11,8 +11,7 @@ use extension_host::ExtensionStore; use futures::channel::oneshot; use gpui::{ AnyWindowHandle, App, AsyncApp, DismissEvent, Entity, EventEmitter, Focusable, FontFeatures, - ParentElement as _, PromptLevel, Render, SemanticVersion, SharedString, Task, - TextStyleRefinement, WeakEntity, + ParentElement as _, PromptLevel, Render, SharedString, Task, TextStyleRefinement, WeakEntity, }; use language::{CursorShape, Point}; @@ -22,6 +21,7 @@ use remote::{ ConnectionIdentifier, RemoteClient, RemoteConnection, RemoteConnectionOptions, RemotePlatform, SshConnectionOptions, }; +use semver::Version; pub use settings::SshConnection; use settings::{ExtendingVec, RegisterSetting, Settings, WslConnection}; use theme::ThemeSettings; @@ -480,14 +480,14 @@ impl remote::RemoteClientDelegate for RemoteClientDelegate { &self, platform: RemotePlatform, release_channel: ReleaseChannel, - version: Option, + version: Option, cx: &mut AsyncApp, ) -> Task> { let this = self.clone(); cx.spawn(async move |cx| { AutoUpdater::download_remote_server_release( release_channel, - version, + version.clone(), platform.os, platform.arch, move |status, cx| this.set_status(Some(status), cx), @@ -498,6 +498,7 @@ impl remote::RemoteClientDelegate for RemoteClientDelegate { format!( "Downloading remote server binary (version: {}, os: {}, arch: {})", version + .as_ref() .map(|v| format!("{}", v)) .unwrap_or("unknown".to_string()), platform.os, @@ -511,7 +512,7 @@ impl remote::RemoteClientDelegate for RemoteClientDelegate { &self, platform: RemotePlatform, release_channel: ReleaseChannel, - version: Option, + version: Option, cx: &mut AsyncApp, ) -> Task>> { cx.spawn(async move |cx| { diff --git a/crates/release_channel/Cargo.toml b/crates/release_channel/Cargo.toml index 53ae53504579e54eb7432edeb54cfc114ef8f17e..54a8afff8c152318103d1e4065fd3714802c6074 100644 --- a/crates/release_channel/Cargo.toml +++ b/crates/release_channel/Cargo.toml @@ -10,3 +10,4 @@ workspace = true [dependencies] gpui.workspace = true +semver.workspace = true diff --git a/crates/release_channel/src/lib.rs b/crates/release_channel/src/lib.rs index c0ceafc760a5949d636dc2df3e93dc8926111417..e84bf91c1db5e891abae0aeb67089cc40b1ec009 100644 --- a/crates/release_channel/src/lib.rs +++ b/crates/release_channel/src/lib.rs @@ -4,7 +4,8 @@ use std::{env, str::FromStr, sync::LazyLock}; -use gpui::{App, Global, SemanticVersion}; +use gpui::{App, Global}; +use semver::Version; /// stable | dev | nightly | preview pub static RELEASE_CHANNEL_NAME: LazyLock = LazyLock::new(|| { @@ -70,7 +71,7 @@ impl AppCommitSha { } } -struct GlobalAppVersion(SemanticVersion); +struct GlobalAppVersion(Version); impl Global for GlobalAppVersion {} @@ -79,20 +80,32 @@ pub struct AppVersion; impl AppVersion { /// Load the app version from env. - pub fn load(pkg_version: &str) -> SemanticVersion { - if let Ok(from_env) = env::var("ZED_APP_VERSION") { + pub fn load( + pkg_version: &str, + build_id: Option<&str>, + commit_sha: Option, + ) -> Version { + let mut version: Version = if let Ok(from_env) = env::var("ZED_APP_VERSION") { from_env.parse().expect("invalid ZED_APP_VERSION") } else { pkg_version.parse().expect("invalid version in Cargo.toml") + }; + if let Some(build_id) = build_id { + version.pre = semver::Prerelease::new(&build_id).expect("Invalid build identifier"); } + if let Some(sha) = commit_sha { + version.build = semver::BuildMetadata::new(&sha.0).expect("Invalid build metadata"); + } + + version } /// Returns the global version number. - pub fn global(cx: &App) -> SemanticVersion { + pub fn global(cx: &App) -> Version { if cx.has_global::() { - cx.global::().0 + cx.global::().0.clone() } else { - SemanticVersion::default() + Version::new(0, 0, 0) } } } @@ -121,13 +134,13 @@ struct GlobalReleaseChannel(ReleaseChannel); impl Global for GlobalReleaseChannel {} /// Initializes the release channel. -pub fn init(app_version: SemanticVersion, cx: &mut App) { +pub fn init(app_version: Version, cx: &mut App) { cx.set_global(GlobalAppVersion(app_version)); cx.set_global(GlobalReleaseChannel(*RELEASE_CHANNEL)) } /// Initializes the release channel for tests that rely on fake release channel. -pub fn init_test(app_version: SemanticVersion, release_channel: ReleaseChannel, cx: &mut App) { +pub fn init_test(app_version: Version, release_channel: ReleaseChannel, cx: &mut App) { cx.set_global(GlobalAppVersion(app_version)); cx.set_global(GlobalReleaseChannel(release_channel)) } diff --git a/crates/remote/Cargo.toml b/crates/remote/Cargo.toml index 838723f3660558f93ac6f8066627a8b182a2cf24..07eb7d795e21c2f4b99817e301f6d8687c4aab60 100644 --- a/crates/remote/Cargo.toml +++ b/crates/remote/Cargo.toml @@ -32,6 +32,7 @@ prost.workspace = true release_channel.workspace = true rpc = { workspace = true, features = ["gpui"] } schemars.workspace = true +semver.workspace = true serde.workspace = true serde_json.workspace = true settings.workspace = true diff --git a/crates/remote/src/remote_client.rs b/crates/remote/src/remote_client.rs index 1c14a0e244c3f09bb8b02e4aa99bd6b282435db5..85b19ba25ca7187dfb400eb4716234bb3716ba9c 100644 --- a/crates/remote/src/remote_client.rs +++ b/crates/remote/src/remote_client.rs @@ -22,7 +22,7 @@ use futures::{ }; use gpui::{ App, AppContext as _, AsyncApp, BackgroundExecutor, BorrowAppContext, Context, Entity, - EventEmitter, FutureExt, Global, SemanticVersion, Task, WeakEntity, + EventEmitter, FutureExt, Global, Task, WeakEntity, }; use parking_lot::Mutex; @@ -31,6 +31,7 @@ use rpc::{ AnyProtoClient, ErrorExt, ProtoClient, ProtoMessageHandlerSet, RpcError, proto::{self, Envelope, EnvelopedMessage, PeerId, RequestMessage, build_typed_envelope}, }; +use semver::Version; use std::{ collections::VecDeque, fmt, @@ -71,14 +72,14 @@ pub trait RemoteClientDelegate: Send + Sync { &self, platform: RemotePlatform, release_channel: ReleaseChannel, - version: Option, + version: Option, cx: &mut AsyncApp, ) -> Task>>; fn download_server_binary_locally( &self, platform: RemotePlatform, release_channel: ReleaseChannel, - version: Option, + version: Option, cx: &mut AsyncApp, ) -> Task>; fn set_status(&self, status: Option<&str>, cx: &mut AsyncApp); @@ -1506,9 +1507,10 @@ mod fake { }, select_biased, }; - use gpui::{App, AppContext as _, AsyncApp, SemanticVersion, Task, TestAppContext}; + use gpui::{App, AppContext as _, AsyncApp, Task, TestAppContext}; use release_channel::ReleaseChannel; use rpc::proto::Envelope; + use semver::Version; use std::{path::PathBuf, sync::Arc}; use util::paths::{PathStyle, RemotePathBuf}; @@ -1663,7 +1665,7 @@ mod fake { &self, _: RemotePlatform, _: ReleaseChannel, - _: Option, + _: Option, _: &mut AsyncApp, ) -> Task> { unreachable!() @@ -1673,7 +1675,7 @@ mod fake { &self, _platform: RemotePlatform, _release_channel: ReleaseChannel, - _version: Option, + _version: Option, _cx: &mut AsyncApp, ) -> Task>> { unreachable!() diff --git a/crates/remote/src/transport/ssh.rs b/crates/remote/src/transport/ssh.rs index fdf0f05ae665a8a384ef6dfcf3c659ee407e7124..bf537a3d6715eb8492fa87b802a26a111ec402b7 100644 --- a/crates/remote/src/transport/ssh.rs +++ b/crates/remote/src/transport/ssh.rs @@ -10,11 +10,12 @@ use futures::{ channel::mpsc::{Sender, UnboundedReceiver, UnboundedSender}, select_biased, }; -use gpui::{App, AppContext as _, AsyncApp, SemanticVersion, Task}; +use gpui::{App, AppContext as _, AsyncApp, Task}; use parking_lot::Mutex; use paths::remote_server_dir_relative; -use release_channel::{AppCommitSha, AppVersion, ReleaseChannel}; +use release_channel::{AppVersion, ReleaseChannel}; use rpc::proto::Envelope; +use semver::Version; pub use settings::SshPortForwardOption; use smol::{ fs, @@ -515,15 +516,10 @@ impl SshRemoteConnection { ssh_default_system_shell, }; - let (release_channel, version, commit) = cx.update(|cx| { - ( - ReleaseChannel::global(cx), - AppVersion::global(cx), - AppCommitSha::try_global(cx), - ) - })?; + let (release_channel, version) = + cx.update(|cx| (ReleaseChannel::global(cx), AppVersion::global(cx)))?; this.remote_binary_path = Some( - this.ensure_server_binary(&delegate, release_channel, version, commit, cx) + this.ensure_server_binary(&delegate, release_channel, version, cx) .await?, ); @@ -534,15 +530,10 @@ impl SshRemoteConnection { &self, delegate: &Arc, release_channel: ReleaseChannel, - version: SemanticVersion, - commit: Option, + version: Version, cx: &mut AsyncApp, ) -> Result> { let version_str = match release_channel { - ReleaseChannel::Nightly => { - let commit = commit.map(|s| s.full()).unwrap_or_default(); - format!("{}-{}", version, commit) - } ReleaseChannel::Dev => "build".to_string(), _ => version.to_string(), }; @@ -609,7 +600,12 @@ impl SshRemoteConnection { ); if !self.socket.connection_options.upload_binary_over_ssh && let Some(url) = delegate - .get_download_url(self.ssh_platform, release_channel, wanted_version, cx) + .get_download_url( + self.ssh_platform, + release_channel, + wanted_version.clone(), + cx, + ) .await? { match self @@ -631,7 +627,12 @@ impl SshRemoteConnection { } let src_path = delegate - .download_server_binary_locally(self.ssh_platform, release_channel, wanted_version, cx) + .download_server_binary_locally( + self.ssh_platform, + release_channel, + wanted_version.clone(), + cx, + ) .await .context("downloading server binary locally")?; self.upload_local_server_binary(&src_path, &tmp_path_gz, delegate, cx) diff --git a/crates/remote/src/transport/wsl.rs b/crates/remote/src/transport/wsl.rs index 7d4664ab846564fa55fc59912489536b78f8fc38..9fdf14d9fed6e6caf108171e292d4c2f33709ce7 100644 --- a/crates/remote/src/transport/wsl.rs +++ b/crates/remote/src/transport/wsl.rs @@ -6,9 +6,10 @@ use anyhow::{Context, Result, anyhow, bail}; use async_trait::async_trait; use collections::HashMap; use futures::channel::mpsc::{Sender, UnboundedReceiver, UnboundedSender}; -use gpui::{App, AppContext as _, AsyncApp, SemanticVersion, Task}; -use release_channel::{AppCommitSha, AppVersion, ReleaseChannel}; +use gpui::{App, AppContext as _, AsyncApp, Task}; +use release_channel::{AppVersion, ReleaseChannel}; use rpc::proto::Envelope; +use semver::Version; use smol::{fs, process}; use std::{ ffi::OsStr, @@ -62,13 +63,8 @@ impl WslRemoteConnection { connection_options.distro_name, connection_options.user ); - let (release_channel, version, commit) = cx.update(|cx| { - ( - ReleaseChannel::global(cx), - AppVersion::global(cx), - AppCommitSha::try_global(cx), - ) - })?; + let (release_channel, version) = + cx.update(|cx| (ReleaseChannel::global(cx), AppVersion::global(cx)))?; let mut this = Self { connection_options, @@ -94,7 +90,7 @@ impl WslRemoteConnection { .context("failed detecting platform")?; log::info!("Remote platform discovered: {:?}", this.platform); this.remote_binary_path = Some( - this.ensure_server_binary(&delegate, release_channel, version, commit, cx) + this.ensure_server_binary(&delegate, release_channel, version, cx) .await .context("failed ensuring server binary")?, ); @@ -157,15 +153,10 @@ impl WslRemoteConnection { &self, delegate: &Arc, release_channel: ReleaseChannel, - version: SemanticVersion, - commit: Option, + version: Version, cx: &mut AsyncApp, ) -> Result> { let version_str = match release_channel { - ReleaseChannel::Nightly => { - let commit = commit.map(|s| s.full()).unwrap_or_default(); - format!("{}-{}", version, commit) - } ReleaseChannel::Dev => "build".to_string(), _ => version.to_string(), }; diff --git a/crates/remote_server/Cargo.toml b/crates/remote_server/Cargo.toml index ba64f7aec9ee0a3759c2943e42b0f19742d905c1..114dc777c1d518fc2bcbc6aaff5a4b9aa7b68a1d 100644 --- a/crates/remote_server/Cargo.toml +++ b/crates/remote_server/Cargo.toml @@ -55,6 +55,7 @@ remote.workspace = true reqwest_client.workspace = true rpc.workspace = true rust-embed = { workspace = true, optional = true, features = ["debug-embed"] } +semver.workspace = true serde.workspace = true serde_json.workspace = true settings.workspace = true diff --git a/crates/remote_server/build.rs b/crates/remote_server/build.rs index f0852a1b0a2560540ff4e1b7a9e91aaffcc9b228..3ad13d3d6e51ded17d717693e9e2b0a9cdc7213f 100644 --- a/crates/remote_server/build.rs +++ b/crates/remote_server/build.rs @@ -28,4 +28,7 @@ fn main() { println!("cargo:rustc-env=ZED_COMMIT_SHA={git_sha}"); } + if let Some(build_identifier) = option_env!("GITHUB_RUN_NUMBER") { + println!("cargo:rustc-env=ZED_BUILD_ID={build_identifier}"); + } } diff --git a/crates/remote_server/src/remote_editing_tests.rs b/crates/remote_server/src/remote_editing_tests.rs index 4b931edb9e63443c6cf23756e737e015c291741c..1e6ecddb5f2599a0ded0180f3afd3df0f197f037 100644 --- a/crates/remote_server/src/remote_editing_tests.rs +++ b/crates/remote_server/src/remote_editing_tests.rs @@ -11,7 +11,7 @@ use prompt_store::ProjectContext; use extension::ExtensionHostProxy; use fs::{FakeFs, Fs}; -use gpui::{AppContext as _, Entity, SemanticVersion, SharedString, TestAppContext}; +use gpui::{AppContext as _, Entity, SharedString, TestAppContext}; use http_client::{BlockedHttpClient, FakeHttpClient}; use language::{ Buffer, FakeLspAdapter, LanguageConfig, LanguageMatcher, LanguageRegistry, LineEnding, @@ -1503,7 +1503,7 @@ async fn test_remote_git_diffs_when_recv_update_repository_delay( let settings_store = SettingsStore::test(cx); cx.set_global(settings_store); theme::init(theme::LoadThemes::JustBase, cx); - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); editor::init(cx); }); @@ -1910,10 +1910,10 @@ pub async fn init_test( ) -> (Entity, Entity) { let server_fs = server_fs.clone(); cx.update(|cx| { - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); }); server_cx.update(|cx| { - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(semver::Version::new(0, 0, 0), cx); }); init_logger(); diff --git a/crates/remote_server/src/remote_server.rs b/crates/remote_server/src/remote_server.rs index c14a4828ac28890ad1711882045b83cee02104a9..98e8744b11114c8ccfcfcac86cca3ca6f9418610 100644 --- a/crates/remote_server/src/remote_server.rs +++ b/crates/remote_server/src/remote_server.rs @@ -71,10 +71,14 @@ pub fn run(command: Commands) -> anyhow::Result<()> { println!("{}", env!("ZED_PKG_VERSION")) } ReleaseChannel::Nightly | ReleaseChannel::Dev => { - println!( - "{}", - option_env!("ZED_COMMIT_SHA").unwrap_or(release_channel.dev_name()) - ) + let commit_sha = + option_env!("ZED_COMMIT_SHA").unwrap_or(release_channel.dev_name()); + let build_id = option_env!("ZED_BUILD_ID"); + if let Some(build_id) = build_id { + println!("{}+{}", build_id, commit_sha) + } else { + println!("{commit_sha}"); + } } }; Ok(()) diff --git a/crates/remote_server/src/unix.rs b/crates/remote_server/src/unix.rs index c631d47b8c2cea5d2ed74cd6ce8bd2956c3fbb1a..0407539a4c131d92202e3177cc95137062b039ec 100644 --- a/crates/remote_server/src/unix.rs +++ b/crates/remote_server/src/unix.rs @@ -9,7 +9,7 @@ use fs::{Fs, RealFs}; use futures::channel::{mpsc, oneshot}; use futures::{AsyncRead, AsyncWrite, AsyncWriteExt, FutureExt, SinkExt, select, select_biased}; use git::GitHostingProviderRegistry; -use gpui::{App, AppContext as _, Context, Entity, SemanticVersion, UpdateGlobal as _}; +use gpui::{App, AppContext as _, Context, Entity, UpdateGlobal as _}; use gpui_tokio::Tokio; use http_client::{Url, read_proxy_from_env}; use language::LanguageRegistry; @@ -19,7 +19,7 @@ use project::project_settings::ProjectSettings; use util::command::new_smol_command; use proto::CrashReport; -use release_channel::{AppVersion, RELEASE_CHANNEL, ReleaseChannel}; +use release_channel::{AppCommitSha, AppVersion, RELEASE_CHANNEL, ReleaseChannel}; use remote::RemoteClient; use remote::{ json_log::LogRecord, @@ -48,10 +48,16 @@ use std::{ }; use thiserror::Error; -pub static VERSION: LazyLock<&str> = LazyLock::new(|| match *RELEASE_CHANNEL { - ReleaseChannel::Stable | ReleaseChannel::Preview => env!("ZED_PKG_VERSION"), +pub static VERSION: LazyLock = LazyLock::new(|| match *RELEASE_CHANNEL { + ReleaseChannel::Stable | ReleaseChannel::Preview => env!("ZED_PKG_VERSION").to_owned(), ReleaseChannel::Nightly | ReleaseChannel::Dev => { - option_env!("ZED_COMMIT_SHA").unwrap_or("missing-zed-commit-sha") + let commit_sha = option_env!("ZED_COMMIT_SHA").unwrap_or("missing-zed-commit-sha"); + let build_identifier = option_env!("ZED_BUILD_ID"); + if let Some(build_id) = build_identifier { + format!("{build_id}+{commit_sha}") + } else { + commit_sha.to_owned() + } } }); @@ -390,7 +396,12 @@ pub fn execute_run( let git_hosting_provider_registry = Arc::new(GitHostingProviderRegistry::new()); app.run(move |cx| { settings::init(cx); - let app_version = AppVersion::load(env!("ZED_PKG_VERSION")); + let app_commit_sha = option_env!("ZED_COMMIT_SHA").map(|s| AppCommitSha::new(s.to_owned())); + let app_version = AppVersion::load( + env!("ZED_PKG_VERSION"), + option_env!("ZED_BUILD_ID"), + app_commit_sha, + ); release_channel::init(app_version, cx); gpui_tokio::init(cx); @@ -1002,9 +1013,9 @@ fn cleanup_old_binaries() -> Result<()> { } fn is_new_version(version: &str) -> bool { - SemanticVersion::from_str(version) + semver::Version::from_str(version) .ok() - .zip(SemanticVersion::from_str(env!("ZED_PKG_VERSION")).ok()) + .zip(semver::Version::from_str(env!("ZED_PKG_VERSION")).ok()) .is_some_and(|(version, current_version)| version >= current_version) } diff --git a/crates/semantic_version/Cargo.toml b/crates/semantic_version/Cargo.toml deleted file mode 100644 index a8bd3ab5ccba24700cc8de9607f825d022967b0b..0000000000000000000000000000000000000000 --- a/crates/semantic_version/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "semantic_version" -version = "0.1.0" -edition.workspace = true -publish = false -license = "Apache-2.0" -description = "A library for working with semantic versioning in gpui and Zed" - -[lints] -workspace = true - -[lib] -path = "src/semantic_version.rs" - -[dependencies] -anyhow.workspace = true -serde.workspace = true diff --git a/crates/semantic_version/LICENSE-APACHE b/crates/semantic_version/LICENSE-APACHE deleted file mode 120000 index 1cd601d0a3affae83854be02a0afdec3b7a9ec4d..0000000000000000000000000000000000000000 --- a/crates/semantic_version/LICENSE-APACHE +++ /dev/null @@ -1 +0,0 @@ -../../LICENSE-APACHE \ No newline at end of file diff --git a/crates/semantic_version/src/semantic_version.rs b/crates/semantic_version/src/semantic_version.rs deleted file mode 100644 index 11688ec4c61aba3b2bf66be121b3e1bd18724540..0000000000000000000000000000000000000000 --- a/crates/semantic_version/src/semantic_version.rs +++ /dev/null @@ -1,99 +0,0 @@ -//! Constructs for working with [semantic versions](https://semver.org/). - -#![deny(missing_docs)] - -use std::{ - fmt::{self, Display}, - str::FromStr, -}; - -use anyhow::{Context as _, Result}; -use serde::{Deserialize, Serialize, de::Error}; - -/// A [semantic version](https://semver.org/) number. -#[derive(Clone, Copy, Debug, Default, Eq, Ord, PartialEq, PartialOrd)] -pub struct SemanticVersion { - major: usize, - minor: usize, - patch: usize, -} - -impl SemanticVersion { - /// Returns a new [`SemanticVersion`] from the given components. - pub const fn new(major: usize, minor: usize, patch: usize) -> Self { - Self { - major, - minor, - patch, - } - } - - /// Returns the major version number. - #[inline(always)] - pub fn major(&self) -> usize { - self.major - } - - /// Returns the minor version number. - #[inline(always)] - pub fn minor(&self) -> usize { - self.minor - } - - /// Returns the patch version number. - #[inline(always)] - pub fn patch(&self) -> usize { - self.patch - } -} - -impl FromStr for SemanticVersion { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - let mut components = s.trim().split('.'); - let major = components - .next() - .context("missing major version number")? - .parse()?; - let minor = components - .next() - .context("missing minor version number")? - .parse()?; - let patch = components - .next() - .context("missing patch version number")? - .parse()?; - Ok(Self { - major, - minor, - patch, - }) - } -} - -impl Display for SemanticVersion { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}.{}.{}", self.major, self.minor, self.patch) - } -} - -impl Serialize for SemanticVersion { - fn serialize(&self, serializer: S) -> std::prelude::v1::Result - where - S: serde::Serializer, - { - serializer.serialize_str(&self.to_string()) - } -} - -impl<'de> Deserialize<'de> for SemanticVersion { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let string = String::deserialize(deserializer)?; - Self::from_str(&string) - .map_err(|_| Error::custom(format!("Invalid version string \"{string}\""))) - } -} diff --git a/crates/system_specs/Cargo.toml b/crates/system_specs/Cargo.toml index 86ac3c09116a00d8061f88fb52c5fe884a1a3fe4..15d6822b387024424550a0707f3403ec6bc9331b 100644 --- a/crates/system_specs/Cargo.toml +++ b/crates/system_specs/Cargo.toml @@ -20,6 +20,7 @@ client.workspace = true gpui.workspace = true human_bytes.workspace = true release_channel.workspace = true +semver.workspace = true serde.workspace = true sysinfo.workspace = true diff --git a/crates/system_specs/src/system_specs.rs b/crates/system_specs/src/system_specs.rs index 5c2b0a7fed82c53bbaab5f0ee9a8cc74462870d8..139f23d19373cfec76710abd398b8d47511d4562 100644 --- a/crates/system_specs/src/system_specs.rs +++ b/crates/system_specs/src/system_specs.rs @@ -1,10 +1,9 @@ -//! # system_specs - use client::telemetry; pub use gpui::GpuSpecs; -use gpui::{App, AppContext as _, SemanticVersion, Task, Window, actions}; +use gpui::{App, AppContext as _, Task, Window, actions}; use human_bytes::human_bytes; use release_channel::{AppCommitSha, AppVersion, ReleaseChannel}; +use semver::Version; use serde::Serialize; use std::{env, fmt::Display}; use sysinfo::{MemoryRefreshKind, RefreshKind, System}; @@ -72,7 +71,7 @@ impl SystemSpecs { } pub fn new_stateless( - app_version: SemanticVersion, + app_version: Version, app_commit_sha: Option, release_channel: ReleaseChannel, ) -> Self { diff --git a/crates/telemetry_events/Cargo.toml b/crates/telemetry_events/Cargo.toml index 87a02baf06549748e7ac5ccf6ee6ae396681f87c..6a5149c545895ea4f7ccc1a2a44f051ddc8c9907 100644 --- a/crates/telemetry_events/Cargo.toml +++ b/crates/telemetry_events/Cargo.toml @@ -12,6 +12,6 @@ workspace = true path = "src/telemetry_events.rs" [dependencies] -semantic_version.workspace = true +semver.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/crates/telemetry_events/src/telemetry_events.rs b/crates/telemetry_events/src/telemetry_events.rs index aeb6fe99b3d284f7f9599edf04966a4c9876cf42..83ec2c064499804463ada5eabfcbd48d77369bb6 100644 --- a/crates/telemetry_events/src/telemetry_events.rs +++ b/crates/telemetry_events/src/telemetry_events.rs @@ -1,6 +1,6 @@ //! See [Telemetry in Zed](https://zed.dev/docs/telemetry) for additional information. -use semantic_version::SemanticVersion; +use semver::Version; use serde::{Deserialize, Serialize}; use std::{collections::HashMap, fmt::Display, time::Duration}; @@ -28,7 +28,7 @@ pub struct EventRequestBody { } impl EventRequestBody { - pub fn semver(&self) -> Option { + pub fn semver(&self) -> Option { self.app_version.parse().ok() } } diff --git a/crates/vim/Cargo.toml b/crates/vim/Cargo.toml index c935fd76dba79d24d6637ef5acd70f50870f82e7..1262a615f7b632cb572a13a7a6a42de742884119 100644 --- a/crates/vim/Cargo.toml +++ b/crates/vim/Cargo.toml @@ -66,6 +66,7 @@ lsp = { workspace = true, features = ["test-support"] } parking_lot.workspace = true project_panel.workspace = true release_channel.workspace = true +semver.workspace = true settings_ui.workspace = true settings.workspace = true perf.workspace = true diff --git a/crates/vim/src/test/vim_test_context.rs b/crates/vim/src/test/vim_test_context.rs index 1e92715d2b3c874f110c0fa76b2a7d747fbf3b51..80208fb23ee229c4dc90a7d792ce0348f59ed950 100644 --- a/crates/vim/src/test/vim_test_context.rs +++ b/crates/vim/src/test/vim_test_context.rs @@ -1,8 +1,9 @@ use std::ops::{Deref, DerefMut}; use editor::test::editor_lsp_test_context::EditorLspTestContext; -use gpui::{Context, Entity, SemanticVersion, UpdateGlobal}; +use gpui::{Context, Entity, UpdateGlobal}; use search::{BufferSearchBar, project_search::ProjectSearchBar}; +use semver::Version; use crate::{state::Operator, *}; @@ -19,7 +20,7 @@ impl VimTestContext { cx.update(|cx| { let settings = SettingsStore::test(cx); cx.set_global(settings); - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(Version::new(0, 0, 0), cx); command_palette::init(cx); project_panel::init(cx); git_ui::init(cx); diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index b0a4f6344c9a710af5cf6a391d7b2c0f03efe7b1..68ba338102202f1803ab97746ec8372adb45a66a 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -186,6 +186,7 @@ itertools.workspace = true language = { workspace = true, features = ["test-support"] } pretty_assertions.workspace = true project = { workspace = true, features = ["test-support"] } +semver.workspace = true terminal_view = { workspace = true, features = ["test-support"] } tree-sitter-md.workspace = true tree-sitter-rust.workspace = true diff --git a/crates/zed/build.rs b/crates/zed/build.rs index f37996b644c2966a74998be9e6d40a3fc70557df..e9316dc30c3a349d4f54a9be916a26a12a0214c4 100644 --- a/crates/zed/build.rs +++ b/crates/zed/build.rs @@ -32,6 +32,10 @@ fn main() { println!("cargo:rustc-env=ZED_COMMIT_SHA={git_sha}"); + if let Some(build_identifier) = option_env!("GITHUB_RUN_NUMBER") { + println!("cargo:rustc-env=ZED_BUILD_ID={build_identifier}"); + } + if let Ok(build_profile) = std::env::var("PROFILE") && build_profile == "release" { diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 3f1044c309b75bf2400c85f41cc6abe78e8e3e30..e4c41c9ec5ebb523033995c9ed9d780d65b79f31 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -250,9 +250,10 @@ pub fn main() { }; } - let app_version = AppVersion::load(env!("CARGO_PKG_VERSION")); + let version = option_env!("ZED_BUILD_ID"); let app_commit_sha = option_env!("ZED_COMMIT_SHA").map(|commit_sha| AppCommitSha::new(commit_sha.to_string())); + let app_version = AppVersion::load(env!("CARGO_PKG_VERSION"), version, app_commit_sha.clone()); if args.system_specs { let system_specs = system_specs::SystemSpecs::new_stateless( diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index be38b3f0952d5ccab6d9d729d77f3fce1e407a4d..33a715283b9d63f0238eb55b758d71aac17c9b5c 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -53,7 +53,7 @@ use project_panel::ProjectPanel; use prompt_store::PromptBuilder; use quick_action_bar::QuickActionBar; use recent_projects::open_remote_project; -use release_channel::{AppCommitSha, ReleaseChannel}; +use release_channel::{AppCommitSha, AppVersion, ReleaseChannel}; use rope::Rope; use search::project_search::ProjectSearchBar; use settings::{ @@ -1168,7 +1168,9 @@ fn initialize_pane( } fn about(_: &mut Workspace, window: &mut Window, cx: &mut Context) { + use std::fmt::Write; let release_channel = ReleaseChannel::global(cx).display_name(); + let full_version = AppVersion::global(cx); let version = env!("CARGO_PKG_VERSION"); let debug = if cfg!(debug_assertions) { "(debug)" @@ -1176,7 +1178,16 @@ fn about(_: &mut Workspace, window: &mut Window, cx: &mut Context) { "" }; let message = format!("{release_channel} {version} {debug}"); - let detail = AppCommitSha::try_global(cx).map(|sha| sha.full()); + + let mut detail = AppCommitSha::try_global(cx) + .map(|sha| sha.full()) + .unwrap_or_default(); + if !detail.is_empty() { + detail.push('\n'); + } + _ = write!(&mut detail, "\n{full_version}"); + + let detail = Some(detail); let prompt = window.prompt( PromptLevel::Info, @@ -2235,12 +2246,13 @@ mod tests { DisplayPoint, Editor, MultiBufferOffset, SelectionEffects, display_map::DisplayRow, }; use gpui::{ - Action, AnyWindowHandle, App, AssetSource, BorrowAppContext, SemanticVersion, - TestAppContext, UpdateGlobal, VisualTestContext, WindowHandle, actions, + Action, AnyWindowHandle, App, AssetSource, BorrowAppContext, TestAppContext, UpdateGlobal, + VisualTestContext, WindowHandle, actions, }; use language::{LanguageMatcher, LanguageRegistry}; use pretty_assertions::{assert_eq, assert_ne}; use project::{Project, ProjectPath}; + use semver::Version; use serde_json::json; use settings::{SettingsStore, watch_config_file}; use std::{ @@ -4777,7 +4789,7 @@ mod tests { call::init(app_state.client.clone(), app_state.user_store.clone(), cx); notifications::init(app_state.client.clone(), app_state.user_store.clone(), cx); workspace::init(app_state.clone(), cx); - release_channel::init(SemanticVersion::default(), cx); + release_channel::init(Version::new(0, 0, 0), cx); command_palette::init(cx); editor::init(cx); collab_ui::init(&app_state, cx); diff --git a/crates/zeta/Cargo.toml b/crates/zeta/Cargo.toml index 821d3e0b9e7a5ff37302cf613f4e09b047f121f1..df569c7bc39655d99ee01b464a05e0ef3873f8d6 100644 --- a/crates/zeta/Cargo.toml +++ b/crates/zeta/Cargo.toml @@ -44,6 +44,7 @@ project.workspace = true rand.workspace = true regex.workspace = true release_channel.workspace = true +semver.workspace = true serde.workspace = true serde_json.workspace = true settings.workspace = true diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index cb31488d17668531ee11a67d1e4be19a1674d3d2..5b2c3856eda2cd984e6675d671f8c99aa183e883 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -27,8 +27,8 @@ use cloud_llm_client::{ use collections::{HashMap, HashSet, VecDeque}; use futures::AsyncReadExt; use gpui::{ - App, AppContext as _, AsyncApp, Context, Entity, EntityId, Global, SemanticVersion, - SharedString, Subscription, Task, actions, + App, AppContext as _, AsyncApp, Context, Entity, EntityId, Global, SharedString, Subscription, + Task, actions, }; use http_client::{AsyncBody, HttpClient, Method, Request, Response}; use input_excerpt::excerpt_for_cursor_position; @@ -38,6 +38,7 @@ use language::{ use language_model::{LlmApiToken, RefreshLlmTokenListener}; use project::{Project, ProjectPath}; use release_channel::AppVersion; +use semver::Version; use settings::WorktreeId; use std::collections::hash_map; use std::mem; @@ -608,7 +609,7 @@ impl Zeta { if let Some(minimum_required_version) = response .headers() .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) - .and_then(|version| SemanticVersion::from_str(version.to_str().ok()?).ok()) + .and_then(|version| Version::from_str(version.to_str().ok()?).ok()) { anyhow::ensure!( app_version >= minimum_required_version, @@ -683,7 +684,7 @@ impl Zeta { if let Some(minimum_required_version) = response .headers() .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) - .and_then(|version| SemanticVersion::from_str(version.to_str().ok()?).ok()) + .and_then(|version| Version::from_str(version.to_str().ok()?).ok()) && app_version < minimum_required_version { return Err(anyhow!(ZedUpdateRequiredError { @@ -752,7 +753,7 @@ impl Zeta { if let Some(minimum_required_version) = response .headers() .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) - .and_then(|version| SemanticVersion::from_str(version.to_str().ok()?).ok()) + .and_then(|version| Version::from_str(version.to_str().ok()?).ok()) && app_version < minimum_required_version { return Err(anyhow!(ZedUpdateRequiredError { @@ -1115,7 +1116,7 @@ impl Zeta { pub struct PerformPredictEditsParams { pub client: Arc, pub llm_token: LlmApiToken, - pub app_version: SemanticVersion, + pub app_version: Version, pub body: PredictEditsBody, } @@ -1124,7 +1125,7 @@ pub struct PerformPredictEditsParams { "You must update to Zed version {minimum_version} or higher to continue using edit predictions." )] pub struct ZedUpdateRequiredError { - minimum_version: SemanticVersion, + minimum_version: Version, } fn common_prefix, T2: Iterator>(a: T1, b: T2) -> usize { diff --git a/crates/zeta2/Cargo.toml b/crates/zeta2/Cargo.toml index 834762447707b88d6b009f0d6700c639306c9bbd..0b20f980feaa6c2e86b0d3a6b88150d27d06fab2 100644 --- a/crates/zeta2/Cargo.toml +++ b/crates/zeta2/Cargo.toml @@ -37,6 +37,7 @@ open_ai.workspace = true pretty_assertions.workspace = true project.workspace = true release_channel.workspace = true +semver.workspace = true serde.workspace = true serde_json.workspace = true smol.workspace = true diff --git a/crates/zeta2/src/zeta2.rs b/crates/zeta2/src/zeta2.rs index a06d7043cf565dccf0d8a4e8830cbb41c2e9981b..1cee72ce60e2fcc97d2e4f3b50f274d90a080ee9 100644 --- a/crates/zeta2/src/zeta2.rs +++ b/crates/zeta2/src/zeta2.rs @@ -20,8 +20,8 @@ use futures::AsyncReadExt as _; use futures::channel::{mpsc, oneshot}; use gpui::http_client::{AsyncBody, Method}; use gpui::{ - App, AsyncApp, Entity, EntityId, Global, SemanticVersion, SharedString, Subscription, Task, - WeakEntity, http_client, prelude::*, + App, AsyncApp, Entity, EntityId, Global, SharedString, Subscription, Task, WeakEntity, + http_client, prelude::*, }; use language::{Anchor, Buffer, DiagnosticSet, LanguageServerId, Point, ToOffset as _, ToPoint}; use language::{BufferSnapshot, OffsetRangeExt}; @@ -30,13 +30,14 @@ use lsp::DiagnosticSeverity; use open_ai::FunctionDefinition; use project::{Project, ProjectPath}; use release_channel::AppVersion; +use semver::Version; use serde::de::DeserializeOwned; use std::collections::{VecDeque, hash_map}; use std::fmt::Write; use std::ops::Range; use std::path::Path; -use std::str::FromStr as _; +use std::str::FromStr; use std::sync::{Arc, LazyLock}; use std::time::{Duration, Instant}; use std::{env, mem}; @@ -1696,7 +1697,7 @@ impl Zeta { request: open_ai::Request, client: Arc, llm_token: LlmApiToken, - app_version: SemanticVersion, + app_version: Version, #[cfg(feature = "eval-support")] eval_cache: Option>, #[cfg(feature = "eval-support")] eval_cache_kind: EvalCacheEntryKind, ) -> Result<(open_ai::Response, Option)> { @@ -1798,7 +1799,7 @@ impl Zeta { build: impl Fn(http_client::http::request::Builder) -> Result>, client: Arc, llm_token: LlmApiToken, - app_version: SemanticVersion, + app_version: Version, ) -> Result<(Res, Option)> where Res: DeserializeOwned, @@ -1822,7 +1823,7 @@ impl Zeta { if let Some(minimum_required_version) = response .headers() .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) - .and_then(|version| SemanticVersion::from_str(version.to_str().ok()?).ok()) + .and_then(|version| Version::from_str(version.to_str().ok()?).ok()) { anyhow::ensure!( app_version >= minimum_required_version, @@ -2310,7 +2311,7 @@ pub fn text_from_response(mut res: open_ai::Response) -> Option { "You must update to Zed version {minimum_version} or higher to continue using edit predictions." )] pub struct ZedUpdateRequiredError { - minimum_version: SemanticVersion, + minimum_version: Version, } fn make_syntax_context_cloud_request( diff --git a/crates/zeta_cli/src/headless.rs b/crates/zeta_cli/src/headless.rs index 35fbf1313e21bac33f8dfb5325fae365ae133aee..c4d8667d63dfb3dd39fbced609e0ae0bc44974d2 100644 --- a/crates/zeta_cli/src/headless.rs +++ b/crates/zeta_cli/src/headless.rs @@ -8,7 +8,7 @@ use language::LanguageRegistry; use language_extension::LspAccess; use node_runtime::{NodeBinaryOptions, NodeRuntime}; use project::project_settings::ProjectSettings; -use release_channel::AppVersion; +use release_channel::{AppCommitSha, AppVersion}; use reqwest_client::ReqwestClient; use settings::{Settings, SettingsStore}; use std::path::PathBuf; @@ -26,8 +26,14 @@ pub struct ZetaCliAppState { // TODO: dedupe with crates/eval/src/eval.rs pub fn init(cx: &mut App) -> ZetaCliAppState { - let app_version = AppVersion::load(env!("ZED_PKG_VERSION")); - release_channel::init(app_version, cx); + let app_commit_sha = option_env!("ZED_COMMIT_SHA").map(|s| AppCommitSha::new(s.to_owned())); + + let app_version = AppVersion::load( + env!("ZED_PKG_VERSION"), + option_env!("ZED_BUILD_ID"), + app_commit_sha, + ); + release_channel::init(app_version.clone(), cx); gpui_tokio::init(cx); let settings_store = SettingsStore::new(cx, &settings::default_settings()); diff --git a/script/upload-nightly b/script/upload-nightly index 043085989f248c4845bf4c8cb788c78b9f21b61a..527af96ab91238c879be788b0e344f0aca9632cb 100755 --- a/script/upload-nightly +++ b/script/upload-nightly @@ -4,14 +4,14 @@ bash -euo pipefail source script/lib/blob-store.sh bucket_name="zed-nightly-host" +version=$(./script/get-crate-version zed)-"${GITHUB_RUN_NUMBER}+${GITHUB_SHA}" for file_to_upload in ./release-artifacts/*; do [ -f "$file_to_upload" ] || continue upload_to_blob_store_public $bucket_name "$file_to_upload" "nightly/$(basename "$file_to_upload")" - upload_to_blob_store_public $bucket_name "$file_to_upload" "${GITHUB_SHA}/$(basename "$file_to_upload")" + upload_to_blob_store_public $bucket_name "$file_to_upload" "${version}/$(basename "$file_to_upload")" rm -f "$file_to_upload" done -sha=$(git rev-parse HEAD) -echo -n ${sha} > ./release-artifacts/latest-sha +echo -n ${version} > ./release-artifacts/latest-sha upload_to_blob_store_public $bucket_name "release-artifacts/latest-sha" "nightly/latest-sha" From f6f8fc1229093bfed8f66c2cc7e377dbdd68fb8f Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Mon, 24 Nov 2025 14:35:24 +0100 Subject: [PATCH 0331/1030] gpui: Do not panic when `GetMonitorInfoW` fails (#43397) Fixes ZED-29R Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/gpui/src/platform/windows/display.rs | 37 ++++++++++----------- crates/gpui/src/platform/windows/events.rs | 4 +-- 2 files changed, 20 insertions(+), 21 deletions(-) diff --git a/crates/gpui/src/platform/windows/display.rs b/crates/gpui/src/platform/windows/display.rs index 79716c951da783f48e333a9a5dae85bd7bb34a67..ea8960580dc7f45f0dc878247e8387b6a1032ea2 100644 --- a/crates/gpui/src/platform/windows/display.rs +++ b/crates/gpui/src/platform/windows/display.rs @@ -63,22 +63,21 @@ impl WindowsDisplay { }) } - pub fn new_with_handle(monitor: HMONITOR) -> Self { - let info = get_monitor_info(monitor).expect("unable to get monitor info"); + pub fn new_with_handle(monitor: HMONITOR) -> anyhow::Result { + let info = get_monitor_info(monitor)?; let monitor_size = info.monitorInfo.rcMonitor; let uuid = generate_uuid(&info.szDevice); let display_id = available_monitors() .iter() .position(|handle| handle.0 == monitor.0) .unwrap(); - let scale_factor = - get_scale_factor_for_monitor(monitor).expect("unable to get scale factor for monitor"); + let scale_factor = get_scale_factor_for_monitor(monitor)?; let physical_size = size( (monitor_size.right - monitor_size.left).into(), (monitor_size.bottom - monitor_size.top).into(), ); - WindowsDisplay { + Ok(WindowsDisplay { handle: monitor, display_id: DisplayId(display_id as _), scale_factor, @@ -95,21 +94,20 @@ impl WindowsDisplay { size: physical_size, }, uuid, - } + }) } - fn new_with_handle_and_id(handle: HMONITOR, display_id: DisplayId) -> Self { - let info = get_monitor_info(handle).expect("unable to get monitor info"); + fn new_with_handle_and_id(handle: HMONITOR, display_id: DisplayId) -> anyhow::Result { + let info = get_monitor_info(handle)?; let monitor_size = info.monitorInfo.rcMonitor; let uuid = generate_uuid(&info.szDevice); - let scale_factor = - get_scale_factor_for_monitor(handle).expect("unable to get scale factor for monitor"); + let scale_factor = get_scale_factor_for_monitor(handle)?; let physical_size = size( (monitor_size.right - monitor_size.left).into(), (monitor_size.bottom - monitor_size.top).into(), ); - WindowsDisplay { + Ok(WindowsDisplay { handle, display_id, scale_factor, @@ -126,7 +124,7 @@ impl WindowsDisplay { size: physical_size, }, uuid, - } + }) } pub fn primary_monitor() -> Option { @@ -140,7 +138,7 @@ impl WindowsDisplay { ); return None; } - Some(WindowsDisplay::new_with_handle(monitor)) + WindowsDisplay::new_with_handle(monitor).log_err() } /// Check if the center point of given bounds is inside this monitor @@ -154,7 +152,9 @@ impl WindowsDisplay { if monitor.is_invalid() { false } else { - let display = WindowsDisplay::new_with_handle(monitor); + let Ok(display) = WindowsDisplay::new_with_handle(monitor) else { + return false; + }; display.uuid == self.uuid } } @@ -163,11 +163,10 @@ impl WindowsDisplay { available_monitors() .into_iter() .enumerate() - .map(|(id, handle)| { - Rc::new(WindowsDisplay::new_with_handle_and_id( - handle, - DisplayId(id as _), - )) as Rc + .filter_map(|(id, handle)| { + Some(Rc::new( + WindowsDisplay::new_with_handle_and_id(handle, DisplayId(id as _)).ok()?, + ) as Rc) }) .collect() } diff --git a/crates/gpui/src/platform/windows/events.rs b/crates/gpui/src/platform/windows/events.rs index 7b1d0efd1821c0651cc2f092f51041c0de84cc14..43825cba40c4f2889dcb86316d3e7ea01b90eba3 100644 --- a/crates/gpui/src/platform/windows/events.rs +++ b/crates/gpui/src/platform/windows/events.rs @@ -138,7 +138,7 @@ impl WindowsWindowInner { // monitor is invalid, we do nothing. if !monitor.is_invalid() && lock.display.handle != monitor { // we will get the same monitor if we only have one - lock.display = WindowsDisplay::new_with_handle(monitor); + lock.display = WindowsDisplay::new_with_handle(monitor).log_err()?; } } if let Some(mut callback) = lock.callbacks.moved.take() { @@ -829,7 +829,7 @@ impl WindowsWindowInner { log::error!("No monitor detected!"); return None; } - let new_display = WindowsDisplay::new_with_handle(new_monitor); + let new_display = WindowsDisplay::new_with_handle(new_monitor).log_err()?; self.state.borrow_mut().display = new_display; Some(0) } From 138286f3b138cc653456376860dacddda6b18093 Mon Sep 17 00:00:00 2001 From: Vasyl Protsiv Date: Mon, 24 Nov 2025 15:49:00 +0200 Subject: [PATCH 0332/1030] sum_tree: Make SumTree::append run in logarithmic time (#43349) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The `SumTree::append` method is slow when appending large trees to small trees. The reason is this code here: https://github.com/zed-industries/zed/blob/f57f4cd3607e8298ef5f1b29929df2db0185d826/crates/sum_tree/src/sum_tree.rs#L628-L630 `append` is called recursively until `self` and `other` have the same height, effectively making this code `O(log^2 n)` in the number of leaves of `other` tree in the worst case. There are no algorithmic reasons why appending large trees must be this much slower. This PR proves it by providing implementation of `append` that works in logarithmic time regardless if `self` is smaller or larger than `other`. The helper method `append_large` has the symmetric logic to `push_tree_recursive` but moves the (unlikely) case of merging underflowing node in a separate helper function to reduce stack usage. I am a bit unsure about some implementation choices made in `push_tree_recursive` and would like to discuss some of these later, but at the moment I didn't change anything there and tried to follow the same logic in `append_large`. We might also consider adding `push_front`/`prepend` methods to `SumTree`. I did not find a good benchmark that covers this case so I added a new one to rope benchmarks.
cargo bench (compared to current main) ``` Running benches\rope_benchmark.rs (D:\zed\target\release\deps\rope_benchmark-59c669d2895cd2c4.exe) Gnuplot not found, using plotters backend push/4096 time: [195.67 µs 195.75 µs 195.86 µs] thrpt: [19.944 MiB/s 19.955 MiB/s 19.964 MiB/s] change: time: [+0.2162% +0.3040% +0.4057%] (p = 0.00 < 0.05) thrpt: [-0.4040% -0.3030% -0.2157%] Change within noise threshold. Found 14 outliers among 100 measurements (14.00%) 2 (2.00%) low mild 6 (6.00%) high mild 6 (6.00%) high severe Benchmarking push/65536: Warming up for 3.0000 s Warning: Unable to complete 100 samples in 5.0s. You may wish to increase target time to 7.8s, enable flat sampling, or reduce sample count to 50. push/65536 time: [1.4431 ms 1.4485 ms 1.4546 ms] thrpt: [42.966 MiB/s 43.147 MiB/s 43.310 MiB/s] change: time: [-3.2257% -1.2013% +0.6431%] (p = 0.27 > 0.05) thrpt: [-0.6390% +1.2159% +3.3332%] No change in performance detected. Found 11 outliers among 100 measurements (11.00%) 1 (1.00%) low mild 5 (5.00%) high mild 5 (5.00%) high severe append/4096 time: [15.107 µs 15.128 µs 15.149 µs] thrpt: [257.86 MiB/s 258.22 MiB/s 258.58 MiB/s] change: time: [+0.9650% +1.5256% +1.9057%] (p = 0.00 < 0.05) thrpt: [-1.8701% -1.5026% -0.9557%] Change within noise threshold. Found 2 outliers among 100 measurements (2.00%) 1 (1.00%) low mild 1 (1.00%) high severe append/65536 time: [1.2870 µs 1.4496 µs 1.6484 µs] thrpt: [37.028 GiB/s 42.106 GiB/s 47.425 GiB/s] change: time: [-28.699% -16.073% -0.3133%] (p = 0.04 < 0.05) thrpt: [+0.3142% +19.151% +40.250%] Change within noise threshold. Found 17 outliers among 100 measurements (17.00%) 1 (1.00%) high mild 16 (16.00%) high severe slice/4096 time: [30.580 µs 30.611 µs 30.639 µs] thrpt: [127.49 MiB/s 127.61 MiB/s 127.74 MiB/s] change: time: [-2.2958% -0.9674% -0.1835%] (p = 0.08 > 0.05) thrpt: [+0.1838% +0.9769% +2.3498%] No change in performance detected. slice/65536 time: [614.86 µs 795.04 µs 1.0293 ms] thrpt: [60.723 MiB/s 78.613 MiB/s 101.65 MiB/s] change: time: [-12.714% +7.2092% +30.676%] (p = 0.52 > 0.05) thrpt: [-23.475% -6.7244% +14.566%] No change in performance detected. Found 14 outliers among 100 measurements (14.00%) 14 (14.00%) high severe bytes_in_range/4096 time: [3.3298 µs 3.3416 µs 3.3563 µs] thrpt: [1.1366 GiB/s 1.1416 GiB/s 1.1456 GiB/s] change: time: [+2.0652% +3.0667% +4.3765%] (p = 0.00 < 0.05) thrpt: [-4.1930% -2.9754% -2.0234%] Performance has regressed. Found 2 outliers among 100 measurements (2.00%) 2 (2.00%) high severe bytes_in_range/65536 time: [80.640 µs 80.825 µs 81.024 µs] thrpt: [771.38 MiB/s 773.28 MiB/s 775.05 MiB/s] change: time: [-0.6566% +1.0994% +2.9691%] (p = 0.27 > 0.05) thrpt: [-2.8835% -1.0875% +0.6609%] No change in performance detected. Found 10 outliers among 100 measurements (10.00%) 2 (2.00%) high mild 8 (8.00%) high severe chars/4096 time: [763.17 ns 763.68 ns 764.36 ns] thrpt: [4.9907 GiB/s 4.9952 GiB/s 4.9985 GiB/s] change: time: [-2.1138% -0.7973% +0.1096%] (p = 0.18 > 0.05) thrpt: [-0.1095% +0.8037% +2.1595%] No change in performance detected. Found 10 outliers among 100 measurements (10.00%) 1 (1.00%) low severe 6 (6.00%) low mild 3 (3.00%) high severe chars/65536 time: [12.479 µs 12.503 µs 12.529 µs] thrpt: [4.8714 GiB/s 4.8817 GiB/s 4.8910 GiB/s] change: time: [-2.4451% -1.0638% +0.6633%] (p = 0.16 > 0.05) thrpt: [-0.6589% +1.0753% +2.5063%] No change in performance detected. Found 11 outliers among 100 measurements (11.00%) 4 (4.00%) high mild 7 (7.00%) high severe clip_point/4096 time: [63.148 µs 63.182 µs 63.229 µs] thrpt: [61.779 MiB/s 61.825 MiB/s 61.859 MiB/s] change: time: [+1.0107% +2.1329% +4.2849%] (p = 0.02 < 0.05) thrpt: [-4.1088% -2.0883% -1.0006%] Performance has regressed. Found 5 outliers among 100 measurements (5.00%) 4 (4.00%) high mild 1 (1.00%) high severe Benchmarking clip_point/65536: Warming up for 3.0000 s Warning: Unable to complete 100 samples in 5.0s. You may wish to increase target time to 7.8s, enable flat sampling, or reduce sample count to 50. clip_point/65536 time: [1.2578 ms 1.2593 ms 1.2608 ms] thrpt: [49.573 MiB/s 49.631 MiB/s 49.690 MiB/s] change: time: [+0.4881% +0.8942% +1.3488%] (p = 0.00 < 0.05) thrpt: [-1.3308% -0.8863% -0.4857%] Change within noise threshold. Found 15 outliers among 100 measurements (15.00%) 1 (1.00%) high mild 14 (14.00%) high severe point_to_offset/4096 time: [16.211 µs 16.235 µs 16.257 µs] thrpt: [240.28 MiB/s 240.61 MiB/s 240.97 MiB/s] change: time: [-1.4913% +0.1685% +2.2662%] (p = 0.89 > 0.05) thrpt: [-2.2159% -0.1682% +1.5139%] No change in performance detected. Found 2 outliers among 100 measurements (2.00%) 1 (1.00%) high mild 1 (1.00%) high severe point_to_offset/65536 time: [360.06 µs 360.58 µs 361.16 µs] thrpt: [173.05 MiB/s 173.33 MiB/s 173.58 MiB/s] change: time: [+0.0939% +0.8792% +1.8751%] (p = 0.06 > 0.05) thrpt: [-1.8406% -0.8715% -0.0938%] No change in performance detected. Found 10 outliers among 100 measurements (10.00%) 3 (3.00%) high mild 7 (7.00%) high severe cursor/4096 time: [19.266 µs 19.282 µs 19.302 µs] thrpt: [202.38 MiB/s 202.58 MiB/s 202.75 MiB/s] change: time: [+1.2457% +2.2477% +2.8702%] (p = 0.00 < 0.05) thrpt: [-2.7901% -2.1983% -1.2304%] Performance has regressed. Found 4 outliers among 100 measurements (4.00%) 2 (2.00%) high mild 2 (2.00%) high severe cursor/65536 time: [467.63 µs 468.36 µs 469.14 µs] thrpt: [133.22 MiB/s 133.44 MiB/s 133.65 MiB/s] change: time: [-0.2019% +1.3419% +2.8915%] (p = 0.10 > 0.05) thrpt: [-2.8103% -1.3241% +0.2023%] No change in performance detected. Found 12 outliers among 100 measurements (12.00%) 3 (3.00%) high mild 9 (9.00%) high severe append many/small to large time: [37.419 ms 37.656 ms 37.929 ms] thrpt: [321.84 MiB/s 324.17 MiB/s 326.22 MiB/s] change: time: [+0.8113% +1.7361% +2.6538%] (p = 0.00 < 0.05) thrpt: [-2.5852% -1.7065% -0.8047%] Change within noise threshold. Found 9 outliers among 100 measurements (9.00%) 9 (9.00%) high severe append many/large to small time: [51.289 ms 51.437 ms 51.614 ms] thrpt: [236.50 MiB/s 237.32 MiB/s 238.00 MiB/s] change: time: [-87.518% -87.479% -87.438%] (p = 0.00 < 0.05) thrpt: [+696.08% +698.66% +701.13%] Performance has improved. Found 13 outliers among 100 measurements (13.00%) 4 (4.00%) high mild 9 (9.00%) high severe ```
Release Notes: - sum_tree: Make SumTree::append run in logarithmic time --- crates/rope/benches/rope_benchmark.rs | 29 ++++ crates/sum_tree/src/sum_tree.rs | 188 +++++++++++++++++++++++++- 2 files changed, 214 insertions(+), 3 deletions(-) diff --git a/crates/rope/benches/rope_benchmark.rs b/crates/rope/benches/rope_benchmark.rs index 030bec01df4d223cd5288842ba0f9c1386dac31b..8599328aacf73a9b846795ee19791f4b0c4c5c2c 100644 --- a/crates/rope/benches/rope_benchmark.rs +++ b/crates/rope/benches/rope_benchmark.rs @@ -238,6 +238,35 @@ fn rope_benchmarks(c: &mut Criterion) { }); } group.finish(); + + let mut group = c.benchmark_group("append many"); + group.throughput(Throughput::Bytes(128 * 100_000)); + + group.bench_function("small to large", |b| { + b.iter(|| { + let mut rope = Rope::new(); + let small = Rope::from("A".repeat(128)); + for _ in 0..100_000 { + rope.append(small.clone()); + } + assert_eq!(rope.len(), 128 * 100_000); + }); + }); + + group.bench_function("large to small", |b| { + b.iter(|| { + let mut rope = Rope::new(); + let small = Rope::from("A".repeat(128)); + for _ in 0..100_000 { + let large = rope; + rope = small.clone(); + rope.append(large); + } + assert_eq!(rope.len(), 128 * 100_000); + }); + }); + + group.finish(); } criterion_group!(benches, rope_benchmarks); diff --git a/crates/sum_tree/src/sum_tree.rs b/crates/sum_tree/src/sum_tree.rs index 95fbd5ed0d5f5700d0c894cda68ed15ce6590ced..da700201f558a0b29ed4dc45bd3d3d3e7474a297 100644 --- a/crates/sum_tree/src/sum_tree.rs +++ b/crates/sum_tree/src/sum_tree.rs @@ -620,13 +620,15 @@ impl SumTree { ); } - pub fn append(&mut self, other: Self, cx: ::Context<'_>) { + pub fn append(&mut self, mut other: Self, cx: ::Context<'_>) { if self.is_empty() { *self = other; } else if !other.0.is_leaf() || !other.0.items().is_empty() { if self.0.height() < other.0.height() { - for tree in other.0.child_trees() { - self.append(tree.clone(), cx); + if let Some(tree) = Self::append_large(self.clone(), &mut other, cx) { + *self = Self::from_child_trees(tree, other, cx); + } else { + *self = other; } } else if let Some(split_tree) = self.push_tree_recursive(other, cx) { *self = Self::from_child_trees(self.clone(), split_tree, cx); @@ -754,6 +756,186 @@ impl SumTree { } } + // appends the `large` tree to a `small` tree, assumes small.height() <= large.height() + fn append_large( + small: Self, + large: &mut Self, + cx: ::Context<'_>, + ) -> Option { + if small.0.height() == large.0.height() { + if !small.0.is_underflowing() { + Some(small) + } else { + Self::merge_into_right(small, large, cx) + } + } else { + debug_assert!(small.0.height() < large.0.height()); + let Node::Internal { + height, + summary, + child_summaries, + child_trees, + } = Arc::make_mut(&mut large.0) + else { + unreachable!(); + }; + let mut full_summary = small.summary().clone(); + Summary::add_summary(&mut full_summary, summary, cx); + *summary = full_summary; + + let first = child_trees.first_mut().unwrap(); + let res = Self::append_large(small, first, cx); + *child_summaries.first_mut().unwrap() = first.summary().clone(); + if let Some(tree) = res { + if child_trees.len() < 2 * TREE_BASE { + child_summaries.insert(0, tree.summary().clone()); + child_trees.insert(0, tree); + None + } else { + let new_child_summaries = { + let mut res = ArrayVec::from_iter([tree.summary().clone()]); + res.extend(child_summaries.drain(..TREE_BASE)); + res + }; + let tree = SumTree(Arc::new(Node::Internal { + height: *height, + summary: sum(new_child_summaries.iter(), cx), + child_summaries: new_child_summaries, + child_trees: { + let mut res = ArrayVec::from_iter([tree]); + res.extend(child_trees.drain(..TREE_BASE)); + res + }, + })); + + *summary = sum(child_summaries.iter(), cx); + Some(tree) + } + } else { + None + } + } + } + + // Merge two nodes into `large`. + // + // `large` will contain the contents of `small` followed by its own data. + // If the combined data exceed the node capacity, returns a new node that + // holds the first half of the merged items and `large` is left with the + // second half + // + // The nodes must be on the same height + // It only makes sense to call this when `small` is underflowing + fn merge_into_right( + small: Self, + large: &mut Self, + cx: <::Summary as Summary>::Context<'_>, + ) -> Option> { + debug_assert_eq!(small.0.height(), large.0.height()); + match (small.0.as_ref(), Arc::make_mut(&mut large.0)) { + ( + Node::Internal { + summary: small_summary, + child_summaries: small_child_summaries, + child_trees: small_child_trees, + .. + }, + Node::Internal { + summary, + child_summaries, + child_trees, + height, + }, + ) => { + let total_child_count = child_trees.len() + small_child_trees.len(); + if total_child_count <= 2 * TREE_BASE { + let mut all_trees = small_child_trees.clone(); + all_trees.extend(child_trees.drain(..)); + *child_trees = all_trees; + + let mut all_summaries = small_child_summaries.clone(); + all_summaries.extend(child_summaries.drain(..)); + *child_summaries = all_summaries; + + let mut full_summary = small_summary.clone(); + Summary::add_summary(&mut full_summary, summary, cx); + *summary = full_summary; + None + } else { + let midpoint = total_child_count.div_ceil(2); + let mut all_trees = small_child_trees.iter().chain(child_trees.iter()).cloned(); + let left_trees = all_trees.by_ref().take(midpoint).collect(); + *child_trees = all_trees.collect(); + + let mut all_summaries = small_child_summaries + .iter() + .chain(child_summaries.iter()) + .cloned(); + let left_summaries: ArrayVec<_, { 2 * TREE_BASE }> = + all_summaries.by_ref().take(midpoint).collect(); + *child_summaries = all_summaries.collect(); + + *summary = sum(child_summaries.iter(), cx); + Some(SumTree(Arc::new(Node::Internal { + height: *height, + summary: sum(left_summaries.iter(), cx), + child_summaries: left_summaries, + child_trees: left_trees, + }))) + } + } + ( + Node::Leaf { + summary: small_summary, + items: small_items, + item_summaries: small_item_summaries, + }, + Node::Leaf { + summary, + items, + item_summaries, + }, + ) => { + let total_child_count = small_items.len() + items.len(); + if total_child_count <= 2 * TREE_BASE { + let mut all_items = small_items.clone(); + all_items.extend(items.drain(..)); + *items = all_items; + + let mut all_summaries = small_item_summaries.clone(); + all_summaries.extend(item_summaries.drain(..)); + *item_summaries = all_summaries; + + let mut full_summary = small_summary.clone(); + Summary::add_summary(&mut full_summary, summary, cx); + *summary = full_summary; + None + } else { + let midpoint = total_child_count.div_ceil(2); + let mut all_items = small_items.iter().chain(items.iter()).cloned(); + let left_items = all_items.by_ref().take(midpoint).collect(); + *items = all_items.collect(); + + let mut all_summaries = small_item_summaries + .iter() + .chain(item_summaries.iter()) + .cloned(); + let left_summaries: ArrayVec<_, { 2 * TREE_BASE }> = + all_summaries.by_ref().take(midpoint).collect(); + *item_summaries = all_summaries.collect(); + + *summary = sum(item_summaries.iter(), cx); + Some(SumTree(Arc::new(Node::Leaf { + items: left_items, + summary: sum(left_summaries.iter(), cx), + item_summaries: left_summaries, + }))) + } + } + _ => unreachable!(), + } + } + fn from_child_trees( left: SumTree, right: SumTree, From eff592c447ba123a5aad8362cb66b37d6a543ece Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Mon, 24 Nov 2025 11:13:38 -0300 Subject: [PATCH 0333/1030] agent_ui: Refine "reject"/"keep" behavior when regenerating previous prompts (#43347) Closes https://github.com/zed-industries/zed/issues/42753 Consider the following flow: you submit prompt A. Prompt A generates some edits. You don't click on either "reject" or "keep"; they stay in a pending state. You then submit prompt B, but before the agent outputs any response, you click to edit prompt B, thus submitting a regeneration. Before this PR, the above flow would make the edits originated from prompt A to be auto-rejected. This feels very incorrect and can surprise users when they see that the edits that were pending got rejected. It feels more correct to only auto-reject changes if you're regenerating the prompt that directly generated those edits in the first place. Then, it also feels more correct to assume that if there was a follow-up prompt after some edits were made, those edits were passively "accepted". So, this is what this PR is doing. Consider the following flow to get a picture of the behavior change: - You submit prompt A. - Prompt A generates some edits. - You don't click on either "reject" or "keep"; they're pending. - You then submit prompt B, but before the agents outputs anything, you click to edit prompt B, submitting a regeneration. - Now, edits from prompt A will be auto-kept. Release Notes: - agent: Improved the "reject"/"keep" behavior when regenerating older prompts by auto-keeping pending edits that don't originate from the prompt to-be-regenerated. --- crates/agent_ui/src/acp/thread_view.rs | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 3d387fc87c3377aed0278756b1c12644757e687d..92765140f5101034a30fc95db675ff335f2cb324 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -1273,6 +1273,28 @@ impl AcpThreadView { }; cx.spawn_in(window, async move |this, cx| { + // Check if there are any edits from prompts before the one being regenerated. + // + // If there are, we keep/accept them since we're not regenerating the prompt that created them. + // + // If editing the prompt that generated the edits, they are auto-rejected + // through the `rewind` function in the `acp_thread`. + let has_earlier_edits = thread.read_with(cx, |thread, _| { + thread + .entries() + .iter() + .take(entry_ix) + .any(|entry| entry.diffs().next().is_some()) + })?; + + if has_earlier_edits { + thread.update(cx, |thread, cx| { + thread.action_log().update(cx, |action_log, cx| { + action_log.keep_all_edits(None, cx); + }); + })?; + } + thread .update(cx, |thread, cx| thread.rewind(user_message_id, cx))? .await?; From d6c550c8382462b5cec334974e05fc7787e66a87 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Mon, 24 Nov 2025 12:28:23 -0300 Subject: [PATCH 0334/1030] debugger_ui: Add button to close the panel when docked to bottom (#43409) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR adds a button to close the panel when it is docked to the bottom. Effectively, the button triggers the same `ToggleBottomDock` action that clicking on the button that opened the panel triggers, but I think having it there just makes it extra obvious how to close it, which is beneficial. As a bonus, also fixed the panel controls container height when it is docked to the sides, so it perfectly aligns with the panel tabbar height. | Perfectly Aligned Header | Close Button | |--------|--------| | Screenshot 2025-11-24 at 12  01
2@2x | Screenshot 2025-11-24 at 12 
01@2x | Release Notes: - N/A --- crates/debugger_ui/src/debugger_panel.rs | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/crates/debugger_ui/src/debugger_panel.rs b/crates/debugger_ui/src/debugger_panel.rs index 0a5dc744cde6ba053a2c5a5100538100a7d6a49b..3890fa6326329d0d72aa6f81c6b94e7c2f364d34 100644 --- a/crates/debugger_ui/src/debugger_panel.rs +++ b/crates/debugger_ui/src/debugger_panel.rs @@ -651,6 +651,17 @@ impl DebugPanel { .tooltip(Tooltip::text("Open Debug Adapter Logs")) }; + let close_bottom_panel_button = { + h_flex().pl_0p5().gap_1().child(Divider::vertical()).child( + IconButton::new("debug-close-panel", IconName::Close) + .icon_size(IconSize::Small) + .on_click(move |_, window, cx| { + window.dispatch_action(workspace::ToggleBottomDock.boxed_clone(), cx) + }) + .tooltip(Tooltip::text("Close Panel")), + ) + }; + Some( div.w_full() .py_1() @@ -658,7 +669,7 @@ impl DebugPanel { .justify_between() .border_b_1() .border_color(cx.theme().colors().border) - .when(is_side, |this| this.gap_1()) + .when(is_side, |this| this.gap_1().h(Tab::container_height(cx))) .child( h_flex() .justify_between() @@ -957,6 +968,7 @@ impl DebugPanel { .child(edit_debug_json_button()) .child(documentation_button()) .child(logs_button()) + .child(close_bottom_panel_button) }), ), ), From 7bbc65ea717e9ae31f614ea9e2d8707b88ca9a7e Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Mon, 24 Nov 2025 16:39:17 +0100 Subject: [PATCH 0335/1030] auto_updater: Fix `upload-nightly.ps1` and auto-update check (#43404) Release Notes: - N/A --- crates/auto_update/src/auto_update.rs | 31 ++++++++++++++++++--------- script/upload-nightly.ps1 | 11 +++++----- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index f5132ffe30e0f360e642d7796c3865bcd48cd71c..599afcf62d610cfc57a1216f46b1910a88e99bea 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -637,7 +637,10 @@ impl AutoUpdater { if let AutoUpdateStatus::Updated { version, .. } = status { match version { VersionCheckType::Sha(cached_version) => { - let should_download = fetched_version != cached_version.full(); + let should_download = parsed_fetched_version + .as_ref() + .ok() + .is_none_or(|version| version.build.as_str() != cached_version.full()); let newer_version = should_download .then(|| VersionCheckType::Sha(AppCommitSha::new(fetched_version))); return Ok(newer_version); @@ -656,7 +659,12 @@ impl AutoUpdater { let should_download = app_commit_sha .ok() .flatten() - .map(|sha| fetched_version != sha) + .map(|sha| { + parsed_fetched_version + .as_ref() + .ok() + .is_none_or(|version| version.build.as_str() != sha) + }) .unwrap_or(true); let newer_version = should_download .then(|| VersionCheckType::Sha(AppCommitSha::new(fetched_version))); @@ -1209,9 +1217,10 @@ mod tests { fn test_nightly_does_not_update_when_fetched_sha_is_same() { let release_channel = ReleaseChannel::Nightly; let app_commit_sha = Ok(Some("a".to_string())); - let installed_version = semver::Version::new(1, 0, 0); + let mut installed_version = semver::Version::new(1, 0, 0); + installed_version.build = semver::BuildMetadata::new("a").unwrap(); let status = AutoUpdateStatus::Idle; - let fetched_sha = "a".to_string(); + let fetched_sha = "1.0.0+a".to_string(); let newer_version = AutoUpdater::check_if_fetched_version_is_newer( release_channel, @@ -1247,14 +1256,15 @@ mod tests { } #[test] - fn test_nightly_does_not_update_when_fetched_sha_is_same_as_cached() { + fn test_nightly_does_not_update_when_fetched_version_is_same_as_cached() { let release_channel = ReleaseChannel::Nightly; let app_commit_sha = Ok(Some("a".to_string())); - let installed_version = semver::Version::new(1, 0, 0); + let mut installed_version = semver::Version::new(1, 0, 0); + installed_version.build = semver::BuildMetadata::new("a").unwrap(); let status = AutoUpdateStatus::Updated { version: VersionCheckType::Sha(AppCommitSha::new("b".to_string())), }; - let fetched_sha = "b".to_string(); + let fetched_sha = "1.0.0+b".to_string(); let newer_version = AutoUpdater::check_if_fetched_version_is_newer( release_channel, @@ -1271,11 +1281,12 @@ mod tests { fn test_nightly_does_update_when_fetched_sha_is_not_same_as_cached() { let release_channel = ReleaseChannel::Nightly; let app_commit_sha = Ok(Some("a".to_string())); - let installed_version = semver::Version::new(1, 0, 0); + let mut installed_version = semver::Version::new(1, 0, 0); + installed_version.build = semver::BuildMetadata::new("a").unwrap(); let status = AutoUpdateStatus::Updated { version: VersionCheckType::Sha(AppCommitSha::new("b".to_string())), }; - let fetched_sha = "c".to_string(); + let fetched_sha = "1.0.0+c".to_string(); let newer_version = AutoUpdater::check_if_fetched_version_is_newer( release_channel, @@ -1322,7 +1333,7 @@ mod tests { let status = AutoUpdateStatus::Updated { version: VersionCheckType::Sha(AppCommitSha::new("b".to_string())), }; - let fetched_sha = "b".to_string(); + let fetched_sha = "1.0.0+b".to_string(); let newer_version = AutoUpdater::check_if_fetched_version_is_newer( release_channel, diff --git a/script/upload-nightly.ps1 b/script/upload-nightly.ps1 index deec4baecc9274381b4d3f99e611190ab0865636..7eb3665063370d44ec125372b2f22976d40a7efa 100644 --- a/script/upload-nightly.ps1 +++ b/script/upload-nightly.ps1 @@ -12,10 +12,8 @@ ParseZedWorkspace Write-Host "Uploading nightly for target: $target" $bucketName = "zed-nightly-host" - -# Get current git SHA -$sha = git rev-parse HEAD -$sha | Out-File -FilePath "target/latest-sha" -NoNewline +$releaseVersion = & "$PSScriptRoot\get-crate-version.ps1" zed +$version = "$releaseVersion-$env:GITHUB_RUN_NUMBER+$env:GITHUB_SHA" # TODO: # Upload remote server files @@ -26,7 +24,10 @@ $sha | Out-File -FilePath "target/latest-sha" -NoNewline # } UploadToBlobStore -BucketName $bucketName -FileToUpload "target/Zed-$Architecture.exe" -BlobStoreKey "nightly/Zed-$Architecture.exe" -UploadToBlobStore -BucketName $bucketName -FileToUpload "target/latest-sha" -BlobStoreKey "nightly/latest-sha-windows" +UploadToBlobStore -BucketName $bucketName -FileToUpload "target/Zed-$Architecture.exe" -BlobStoreKey "$version/Zed-$Architecture.exe" Remove-Item -Path "target/Zed-$Architecture.exe" -ErrorAction SilentlyContinue + +$version | Out-File -FilePath "target/latest-sha" -NoNewline +UploadToBlobStore -BucketName $bucketName -FileToUpload "target/latest-sha" -BlobStoreKey "nightly/latest-sha-windows" Remove-Item -Path "target/latest-sha" -ErrorAction SilentlyContinue From e6b42a2be26ac33ba1af50242a50725d38bd0cdc Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Mon, 24 Nov 2025 18:15:38 +0200 Subject: [PATCH 0336/1030] Use a proper name for highlights.scm (#43412) Release Notes: - N/A --- script/analyze_highlights.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/script/analyze_highlights.py b/script/analyze_highlights.py index 09a6419653da018e00c530e0591ea8576290c6d3..aaf7386be6d11a4209eede118cc39740092ad1b0 100644 --- a/script/analyze_highlights.py +++ b/script/analyze_highlights.py @@ -1,24 +1,24 @@ #!/usr/bin/env python3 """ -This script analyzes all the highlight.scm files in our embedded languages and extensions. +This script analyzes all the highlights.scm files in our embedded languages and extensions. It counts the number of unique instances of @{name} and the languages in which they are used. This is useful to help avoid accidentally introducing new tags when appropriate ones already exist when adding new languages. Flags: --v, --verbose: Include a detailed list of languages for each tag found in the highlight.scm files. +-v, --verbose: Include a detailed list of languages for each tag found in the highlights.scm files. """ +import argparse +import re from collections import defaultdict from pathlib import Path from typing import Any -import argparse -import re pattern = re.compile(r'@(?!_)[a-zA-Z_.]+') def parse_arguments(): - parser = argparse.ArgumentParser(description='Analyze highlight.scm files for unique instances and their languages.') + parser = argparse.ArgumentParser(description='Analyze highlights.scm files for unique instances and their languages.') parser.add_argument('-v', '--verbose', action='store_true', help='Include a list of languages for each tag.') return parser.parse_args() From ea7568ceb315f8ca9a03cbd1efc7b5bcb95d57be Mon Sep 17 00:00:00 2001 From: Oleksiy Syvokon Date: Mon, 24 Nov 2025 18:27:11 +0200 Subject: [PATCH 0337/1030] zeta2: Support experimental 1120-seedcoder model (#43411) 1. Introduce a common `PromptFormatter` trait 2. Let models define their generation params. 3. Add support for the experimental 1120-seedcoder prompt format Release Notes: - N/A --- .../cloud_llm_client/src/predict_edits_v3.rs | 3 + .../src/cloud_zeta2_prompt.rs | 152 +++++++++++++++--- crates/zeta2/src/zeta2.rs | 10 +- crates/zeta_cli/src/main.rs | 2 + 4 files changed, 144 insertions(+), 23 deletions(-) diff --git a/crates/cloud_llm_client/src/predict_edits_v3.rs b/crates/cloud_llm_client/src/predict_edits_v3.rs index 2d7a1aec52ae9cb007238dbd61e58597a9e81666..32a5a34d9d3b63332008a9f7df84a1990f87f17c 100644 --- a/crates/cloud_llm_client/src/predict_edits_v3.rs +++ b/crates/cloud_llm_client/src/predict_edits_v3.rs @@ -80,6 +80,8 @@ pub enum PromptFormat { Minimal, /// One-sentence instructions + FIM-like template MinimalQwen, + /// No instructions, Qwen chat + Seed-Coder 1120 FIM-like template + SeedCoder1120, } impl PromptFormat { @@ -108,6 +110,7 @@ impl std::fmt::Display for PromptFormat { PromptFormat::OldTextNewText => write!(f, "Old Text / New Text"), PromptFormat::Minimal => write!(f, "Minimal"), PromptFormat::MinimalQwen => write!(f, "Minimal + Qwen FIM"), + PromptFormat::SeedCoder1120 => write!(f, "Seed-Coder 1120"), } } } diff --git a/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs b/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs index 48ab2097d4ca960c28f7edb498e57ded95e208f7..2ddabf750be763542bfc10b794afcb034ff08443 100644 --- a/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs +++ b/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs @@ -169,15 +169,18 @@ pub fn build_prompt( ) -> Result<(String, SectionLabels)> { let mut section_labels = Default::default(); + let prompt_data = PromptData { + events: request.events.clone(), + cursor_point: request.cursor_point, + cursor_path: request.excerpt_path.clone(), + included_files: request.included_files.clone(), + }; match request.prompt_format { PromptFormat::MinimalQwen => { - let prompt = MinimalQwenPrompt { - events: request.events.clone(), - cursor_point: request.cursor_point, - cursor_path: request.excerpt_path.clone(), - included_files: request.included_files.clone(), - }; - return Ok((prompt.render(), section_labels)); + return Ok((MinimalQwenPrompt.render(&prompt_data), section_labels)); + } + PromptFormat::SeedCoder1120 => { + return Ok((SeedCoder1120Prompt.render(&prompt_data), section_labels)); } _ => (), }; @@ -208,6 +211,7 @@ pub fn build_prompt( } PromptFormat::OnlySnippets => vec![], PromptFormat::MinimalQwen => unreachable!(), + PromptFormat::SeedCoder1120 => unreachable!(), }; let mut prompt = match request.prompt_format { @@ -218,6 +222,7 @@ pub fn build_prompt( PromptFormat::OnlySnippets => String::new(), PromptFormat::Minimal => STUDENT_MODEL_INSTRUCTIONS.to_string(), PromptFormat::MinimalQwen => unreachable!(), + PromptFormat::SeedCoder1120 => unreachable!(), }; if request.events.is_empty() { @@ -328,6 +333,13 @@ pub fn build_prompt( Ok((prompt, section_labels)) } +pub fn generation_params(prompt_format: PromptFormat) -> GenerationParams { + match prompt_format { + PromptFormat::SeedCoder1120 => SeedCoder1120Prompt::generation_params(), + _ => GenerationParams::default(), + } +} + pub fn write_codeblock<'a>( path: &Path, excerpts: impl IntoIterator, @@ -786,6 +798,7 @@ impl<'a> SyntaxBasedPrompt<'a> { } } PromptFormat::MinimalQwen => unreachable!(), + PromptFormat::SeedCoder1120 => unreachable!(), } let push_full_snippet = |output: &mut String| { @@ -896,19 +909,34 @@ fn declaration_size(declaration: &ReferencedDeclaration, style: DeclarationStyle } } -struct MinimalQwenPrompt { +struct PromptData { events: Vec, cursor_point: Point, cursor_path: Arc, // TODO: make a common struct with cursor_point included_files: Vec, } -impl MinimalQwenPrompt { - const INSTRUCTIONS: &str = "You are a code completion assistant that analyzes edit history to identify and systematically complete incomplete refactorings or patterns across the entire codebase.\n"; +#[derive(Default)] +pub struct GenerationParams { + pub temperature: Option, + pub top_p: Option, + pub stop: Option>, +} + +trait PromptFormatter { + fn render(&self, data: &PromptData) -> String; - fn render(&self) -> String { - let edit_history = self.fmt_edit_history(); - let context = self.fmt_context(); + fn generation_params() -> GenerationParams { + return GenerationParams::default(); + } +} + +struct MinimalQwenPrompt; + +impl PromptFormatter for MinimalQwenPrompt { + fn render(&self, data: &PromptData) -> String { + let edit_history = self.fmt_edit_history(data); + let context = self.fmt_context(data); format!( "{instructions}\n\n{edit_history}\n\n{context}", @@ -917,13 +945,17 @@ impl MinimalQwenPrompt { context = context ) } +} - fn fmt_edit_history(&self) -> String { - if self.events.is_empty() { +impl MinimalQwenPrompt { + const INSTRUCTIONS: &str = "You are a code completion assistant that analyzes edit history to identify and systematically complete incomplete refactorings or patterns across the entire codebase.\n"; + + fn fmt_edit_history(&self, data: &PromptData) -> String { + if data.events.is_empty() { "(No edit history)\n\n".to_string() } else { let mut events_str = String::new(); - push_events(&mut events_str, &self.events); + push_events(&mut events_str, &data.events); format!( "The following are the latest edits made by the user, from earlier to later.\n\n{}", events_str @@ -931,18 +963,18 @@ impl MinimalQwenPrompt { } } - fn fmt_context(&self) -> String { + fn fmt_context(&self, data: &PromptData) -> String { let mut context = String::new(); let include_line_numbers = true; - for related_file in &self.included_files { + for related_file in &data.included_files { writeln!(context, "<|file_sep|>{}", DiffPathFmt(&related_file.path)).unwrap(); - if related_file.path == self.cursor_path { + if related_file.path == data.cursor_path { write!(context, "<|fim_prefix|>").unwrap(); write_excerpts( &related_file.excerpts, - &[(self.cursor_point, "<|fim_suffix|>")], + &[(data.cursor_point, "<|fim_suffix|>")], related_file.max_row, include_line_numbers, &mut context, @@ -961,3 +993,83 @@ impl MinimalQwenPrompt { context } } + +struct SeedCoder1120Prompt; + +impl PromptFormatter for SeedCoder1120Prompt { + fn render(&self, data: &PromptData) -> String { + let edit_history = self.fmt_edit_history(data); + let context = self.fmt_context(data); + + format!( + "# Edit History:\n{edit_history}\n\n{context}", + edit_history = edit_history, + context = context + ) + } + + fn generation_params() -> GenerationParams { + GenerationParams { + temperature: Some(0.2), + top_p: Some(0.9), + stop: Some(vec!["<[end_of_sentence]>".into()]), + } + } +} + +impl SeedCoder1120Prompt { + fn fmt_edit_history(&self, data: &PromptData) -> String { + if data.events.is_empty() { + "(No edit history)\n\n".to_string() + } else { + let mut events_str = String::new(); + push_events(&mut events_str, &data.events); + events_str + } + } + + fn fmt_context(&self, data: &PromptData) -> String { + let mut context = String::new(); + let include_line_numbers = true; + + for related_file in &data.included_files { + writeln!(context, "# Path: {}\n", DiffPathFmt(&related_file.path)).unwrap(); + + if related_file.path == data.cursor_path { + let fim_prompt = self.fmt_fim(&related_file, data.cursor_point); + context.push_str(&fim_prompt); + } else { + write_excerpts( + &related_file.excerpts, + &[], + related_file.max_row, + include_line_numbers, + &mut context, + ); + } + } + context + } + + fn fmt_fim(&self, file: &IncludedFile, cursor_point: Point) -> String { + let mut buf = String::new(); + const FIM_SUFFIX: &str = "<[fim-suffix]>"; + const FIM_PREFIX: &str = "<[fim-prefix]>"; + const FIM_MIDDLE: &str = "<[fim-middle]>"; + write!(buf, "{}", FIM_PREFIX).unwrap(); + write_excerpts( + &file.excerpts, + &[(cursor_point, FIM_SUFFIX)], + file.max_row, + true, + &mut buf, + ); + + // Swap prefix and suffix parts + let index = buf.find(FIM_SUFFIX).unwrap(); + let prefix = &buf[..index]; + let suffix = &buf[index..]; + + format!("{}{}{}", suffix, prefix, FIM_MIDDLE) + } +} diff --git a/crates/zeta2/src/zeta2.rs b/crates/zeta2/src/zeta2.rs index 1cee72ce60e2fcc97d2e4f3b50f274d90a080ee9..255b294d7cc25fade197c3a50d39130bc6bb99c5 100644 --- a/crates/zeta2/src/zeta2.rs +++ b/crates/zeta2/src/zeta2.rs @@ -1562,6 +1562,8 @@ impl Zeta { } let (prompt, _) = prompt_result?; + let generation_params = + cloud_zeta2_prompt::generation_params(cloud_request.prompt_format); let request = open_ai::Request { model: EDIT_PREDICTIONS_MODEL_ID.clone(), messages: vec![open_ai::RequestMessage::User { @@ -1569,8 +1571,8 @@ impl Zeta { }], stream: false, max_completion_tokens: None, - stop: Default::default(), - temperature: 0.7, + stop: generation_params.stop.unwrap_or_default(), + temperature: generation_params.temperature.unwrap_or(0.7), tool_choice: None, parallel_tool_calls: None, tools: vec![], @@ -1636,7 +1638,9 @@ impl Zeta { // TODO: Implement parsing of multi-file diffs crate::udiff::parse_diff(&output_text, get_buffer_from_context).await? } - PromptFormat::Minimal | PromptFormat::MinimalQwen => { + PromptFormat::Minimal + | PromptFormat::MinimalQwen + | PromptFormat::SeedCoder1120 => { if output_text.contains("--- a/\n+++ b/\nNo edits") { let edits = vec![]; (&active_snapshot, edits) diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index 53f231599b7d0449b1f2a9cdef8227a7c3e6bbd5..914b141915cd3a89cd35a02bc6c9463094f0de96 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -230,6 +230,7 @@ enum PromptFormat { OldTextNewText, Minimal, MinimalQwen, + SeedCoder1120, } impl Into for PromptFormat { @@ -242,6 +243,7 @@ impl Into for PromptFormat { Self::OldTextNewText => predict_edits_v3::PromptFormat::OldTextNewText, Self::Minimal => predict_edits_v3::PromptFormat::Minimal, Self::MinimalQwen => predict_edits_v3::PromptFormat::MinimalQwen, + Self::SeedCoder1120 => predict_edits_v3::PromptFormat::SeedCoder1120, } } } From 47e8946581938844259d5aafcf6c9302c400fda1 Mon Sep 17 00:00:00 2001 From: HuaGu-Dragon <1801943622@qq.com> Date: Tue, 25 Nov 2025 00:32:30 +0800 Subject: [PATCH 0338/1030] Attempt to fix `go to the end of the line` when using helix mode (#41575) Closes #41550 Release Notes: - Fixed `` behavior in helix mode which will now correctly go to the last charactor of the line. - Fixed not switching to helix normal mode when in default vim context and pressing escape. --------- Co-authored-by: Jakub Konka --- assets/keymaps/vim.json | 3 +- crates/vim/src/helix.rs | 80 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 1 deletion(-) diff --git a/assets/keymaps/vim.json b/assets/keymaps/vim.json index 233c9fa7e4468142c3e5a31b730bb4d80b83a907..34228530d2d398348d57d71ae41654dacb479712 100644 --- a/assets/keymaps/vim.json +++ b/assets/keymaps/vim.json @@ -414,8 +414,9 @@ } }, { - "context": "vim_mode == helix_normal && !menu", + "context": "VimControl && vim_mode == helix_normal && !menu", "bindings": { + "escape": "vim::SwitchToHelixNormalMode", "i": "vim::HelixInsert", "a": "vim::HelixAppend", "ctrl-[": "editor::Cancel" diff --git a/crates/vim/src/helix.rs b/crates/vim/src/helix.rs index eb0749794adb321d8ce19f8ad5adcf67b9a41bba..67c99ff6aea249692bddc38d3681be5c491a7437 100644 --- a/crates/vim/src/helix.rs +++ b/crates/vim/src/helix.rs @@ -263,6 +263,31 @@ impl Vim { cx: &mut Context, ) { match motion { + Motion::EndOfLine { .. } => { + // In Helix mode, EndOfLine should position cursor ON the last character, + // not after it. We therefore need special handling for it. + self.update_editor(cx, |_, editor, cx| { + let text_layout_details = editor.text_layout_details(window); + editor.change_selections(Default::default(), window, cx, |s| { + s.move_with(|map, selection| { + let goal = selection.goal; + let cursor = if selection.is_empty() || selection.reversed { + selection.head() + } else { + movement::left(map, selection.head()) + }; + + let (point, _goal) = motion + .move_point(map, cursor, goal, times, &text_layout_details) + .unwrap_or((cursor, goal)); + + // Move left by one character to position on the last character + let adjusted_point = movement::saturating_left(map, point); + selection.collapse_to(adjusted_point, SelectionGoal::None) + }) + }); + }); + } Motion::NextWordStart { ignore_punctuation } => { self.helix_find_range_forward(times, window, cx, |left, right, classifier| { let left_kind = classifier.kind_with(left, ignore_punctuation); @@ -1493,4 +1518,59 @@ mod test { Mode::Insert, ); } + + #[gpui::test] + async fn test_g_l_end_of_line(cx: &mut gpui::TestAppContext) { + let mut cx = VimTestContext::new(cx, true).await; + cx.enable_helix(); + + // Test g l moves to last character, not after it + cx.set_state("hello ˇworld!", Mode::HelixNormal); + cx.simulate_keystrokes("g l"); + cx.assert_state("hello worldˇ!", Mode::HelixNormal); + + // Test with Chinese characters, test if work with UTF-8? + cx.set_state("ˇ你好世界", Mode::HelixNormal); + cx.simulate_keystrokes("g l"); + cx.assert_state("你好世ˇ界", Mode::HelixNormal); + + // Test with end of line + cx.set_state("endˇ", Mode::HelixNormal); + cx.simulate_keystrokes("g l"); + cx.assert_state("enˇd", Mode::HelixNormal); + + // Test with empty line + cx.set_state( + indoc! {" + hello + ˇ + world"}, + Mode::HelixNormal, + ); + cx.simulate_keystrokes("g l"); + cx.assert_state( + indoc! {" + hello + ˇ + world"}, + Mode::HelixNormal, + ); + + // Test with multiple lines + cx.set_state( + indoc! {" + ˇfirst line + second line + third line"}, + Mode::HelixNormal, + ); + cx.simulate_keystrokes("g l"); + cx.assert_state( + indoc! {" + first linˇe + second line + third line"}, + Mode::HelixNormal, + ); + } } From 4a36f67f94970958cb991872f6ad5f27185c8f3d Mon Sep 17 00:00:00 2001 From: AidanV <84053180+AidanV@users.noreply.github.com> Date: Mon, 24 Nov 2025 08:55:19 -0800 Subject: [PATCH 0339/1030] vim: Fix bug where `d . .` freezes the editor (#42145) This bug seems to be caused by pushing an operator (i.e. `d`) followed by a repeat (i.e. `.`) so the recording includes the push operator and the repeat. When this is repeated (i.e. `.`) it causes an infinite loop. This change fixes this bug by pushing a ClearOperator action if there is an ongoing recording when repeat is called. Release Notes: - Fixed bug where pressing `d . .` in Vim mode would freeze the editor. --------- Co-authored-by: dino --- crates/vim/src/normal/repeat.rs | 100 +++++++++++++++++- crates/vim/src/state.rs | 3 + crates/vim/src/vim.rs | 5 +- .../test_data/test_repeat_clear_count.json | 21 ++++ .../test_data/test_repeat_clear_repeat.json | 8 ++ 5 files changed, 134 insertions(+), 3 deletions(-) create mode 100644 crates/vim/test_data/test_repeat_clear_count.json create mode 100644 crates/vim/test_data/test_repeat_clear_repeat.json diff --git a/crates/vim/src/normal/repeat.rs b/crates/vim/src/normal/repeat.rs index e0b515595db013b23730c535df64123aa4dd6707..e47b2b350f9644f99fe7d8ec924ff0f0b9ab23f7 100644 --- a/crates/vim/src/normal/repeat.rs +++ b/crates/vim/src/normal/repeat.rs @@ -230,8 +230,19 @@ impl Vim { window: &mut Window, cx: &mut Context, ) { - let count = Vim::take_count(cx); + if self.active_operator().is_some() { + Vim::update_globals(cx, |globals, _| { + globals.recording_actions.clear(); + globals.recording_count = None; + globals.dot_recording = false; + globals.stop_recording_after_next_action = false; + }); + self.clear_operator(window, cx); + return; + } + Vim::take_forced_motion(cx); + let count = Vim::take_count(cx); let Some((mut actions, selection, mode)) = Vim::update_globals(cx, |globals, _| { let actions = globals.recorded_actions.clone(); @@ -810,4 +821,91 @@ mod test { cx.simulate_shared_keystrokes("@ b").await; cx.shared_state().await.assert_eq("aaaaaaabbbˇd"); } + + #[gpui::test] + async fn test_repeat_clear(cx: &mut gpui::TestAppContext) { + let mut cx = VimTestContext::new(cx, true).await; + + // Check that, when repeat is preceded by something other than a number, + // the current operator is cleared, in order to prevent infinite loops. + cx.set_state("ˇhello world", Mode::Normal); + cx.simulate_keystrokes("d ."); + assert_eq!(cx.active_operator(), None); + } + + #[gpui::test] + async fn test_repeat_clear_repeat(cx: &mut gpui::TestAppContext) { + let mut cx = NeovimBackedTestContext::new(cx).await; + + cx.set_shared_state(indoc! { + "ˇthe quick brown + fox jumps over + the lazy dog" + }) + .await; + cx.simulate_shared_keystrokes("d d").await; + cx.shared_state().await.assert_eq(indoc! { + "ˇfox jumps over + the lazy dog" + }); + cx.simulate_shared_keystrokes("d . .").await; + cx.shared_state().await.assert_eq(indoc! { + "ˇthe lazy dog" + }); + } + + #[gpui::test] + async fn test_repeat_clear_count(cx: &mut gpui::TestAppContext) { + let mut cx = NeovimBackedTestContext::new(cx).await; + + cx.set_shared_state(indoc! { + "ˇthe quick brown + fox jumps over + the lazy dog" + }) + .await; + cx.simulate_shared_keystrokes("d d").await; + cx.shared_state().await.assert_eq(indoc! { + "ˇfox jumps over + the lazy dog" + }); + cx.simulate_shared_keystrokes("2 d .").await; + cx.shared_state().await.assert_eq(indoc! { + "ˇfox jumps over + the lazy dog" + }); + cx.simulate_shared_keystrokes(".").await; + cx.shared_state().await.assert_eq(indoc! { + "ˇthe lazy dog" + }); + + cx.set_shared_state(indoc! { + "ˇthe quick brown + fox jumps over + the lazy dog + the quick brown + fox jumps over + the lazy dog" + }) + .await; + cx.simulate_shared_keystrokes("2 d d").await; + cx.shared_state().await.assert_eq(indoc! { + "ˇthe lazy dog + the quick brown + fox jumps over + the lazy dog" + }); + cx.simulate_shared_keystrokes("5 d .").await; + cx.shared_state().await.assert_eq(indoc! { + "ˇthe lazy dog + the quick brown + fox jumps over + the lazy dog" + }); + cx.simulate_shared_keystrokes(".").await; + cx.shared_state().await.assert_eq(indoc! { + "ˇfox jumps over + the lazy dog" + }); + } } diff --git a/crates/vim/src/state.rs b/crates/vim/src/state.rs index d1c52e8f53a2214c3e46473c59b15ea1f6f4f407..eba4476ea878932518dc8a3951e04f4c6ea96d29 100644 --- a/crates/vim/src/state.rs +++ b/crates/vim/src/state.rs @@ -217,6 +217,7 @@ pub struct VimGlobals { pub forced_motion: bool, pub stop_recording_after_next_action: bool, pub ignore_current_insertion: bool, + pub recording_count: Option, pub recorded_count: Option, pub recording_actions: Vec, pub recorded_actions: Vec, @@ -898,6 +899,7 @@ impl VimGlobals { if self.stop_recording_after_next_action { self.dot_recording = false; self.recorded_actions = std::mem::take(&mut self.recording_actions); + self.recorded_count = self.recording_count.take(); self.stop_recording_after_next_action = false; } } @@ -924,6 +926,7 @@ impl VimGlobals { if self.stop_recording_after_next_action { self.dot_recording = false; self.recorded_actions = std::mem::take(&mut self.recording_actions); + self.recorded_count = self.recording_count.take(); self.stop_recording_after_next_action = false; } } diff --git a/crates/vim/src/vim.rs b/crates/vim/src/vim.rs index 9f31b3d3ac0c23457d585990de3a0b201f08b795..f87c562c8a0821f5dfea66dd33b1c44ca6021f42 100644 --- a/crates/vim/src/vim.rs +++ b/crates/vim/src/vim.rs @@ -1258,7 +1258,7 @@ impl Vim { }; if global_state.dot_recording { - global_state.recorded_count = count; + global_state.recording_count = count; } count } @@ -1516,7 +1516,7 @@ impl Vim { if !globals.dot_replaying { globals.dot_recording = true; globals.recording_actions = Default::default(); - globals.recorded_count = None; + globals.recording_count = None; let selections = self.editor().map(|editor| { editor.update(cx, |editor, cx| { @@ -1586,6 +1586,7 @@ impl Vim { .recording_actions .push(ReplayableAction::Action(action.boxed_clone())); globals.recorded_actions = mem::take(&mut globals.recording_actions); + globals.recorded_count = globals.recording_count.take(); globals.dot_recording = false; globals.stop_recording_after_next_action = false; } diff --git a/crates/vim/test_data/test_repeat_clear_count.json b/crates/vim/test_data/test_repeat_clear_count.json new file mode 100644 index 0000000000000000000000000000000000000000..352c6ca4a8d2ee0534d3b695e2eb36ad26bc62d8 --- /dev/null +++ b/crates/vim/test_data/test_repeat_clear_count.json @@ -0,0 +1,21 @@ +{"Put":{"state":"ˇthe quick brown\nfox jumps over\nthe lazy dog"}} +{"Key":"d"} +{"Key":"d"} +{"Get":{"state":"ˇfox jumps over\nthe lazy dog","mode":"Normal"}} +{"Key":"2"} +{"Key":"d"} +{"Key":"."} +{"Get":{"state":"ˇfox jumps over\nthe lazy dog","mode":"Normal"}} +{"Key":"."} +{"Get":{"state":"ˇthe lazy dog","mode":"Normal"}} +{"Put":{"state":"ˇthe quick brown\nfox jumps over\nthe lazy dog\nthe quick brown\nfox jumps over\nthe lazy dog"}} +{"Key":"2"} +{"Key":"d"} +{"Key":"d"} +{"Get":{"state":"ˇthe lazy dog\nthe quick brown\nfox jumps over\nthe lazy dog","mode":"Normal"}} +{"Key":"5"} +{"Key":"d"} +{"Key":"."} +{"Get":{"state":"ˇthe lazy dog\nthe quick brown\nfox jumps over\nthe lazy dog","mode":"Normal"}} +{"Key":"."} +{"Get":{"state":"ˇfox jumps over\nthe lazy dog","mode":"Normal"}} diff --git a/crates/vim/test_data/test_repeat_clear_repeat.json b/crates/vim/test_data/test_repeat_clear_repeat.json new file mode 100644 index 0000000000000000000000000000000000000000..39d96e2a3759d75994e24e6ad80a3ef00b64259b --- /dev/null +++ b/crates/vim/test_data/test_repeat_clear_repeat.json @@ -0,0 +1,8 @@ +{"Put":{"state":"ˇthe quick brown\nfox jumps over\nthe lazy dog"}} +{"Key":"d"} +{"Key":"d"} +{"Get":{"state":"ˇfox jumps over\nthe lazy dog","mode":"Normal"}} +{"Key":"d"} +{"Key":"."} +{"Key":"."} +{"Get":{"state":"ˇthe lazy dog","mode":"Normal"}} From a7fff59136c2a53a92ad4dacf37b5c69cdfe7b4b Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Mon, 24 Nov 2025 14:41:40 -0300 Subject: [PATCH 0340/1030] Add each panel to the workspace as soon as it's ready (#43414) We'll now add panels to the workspace as soon as they're ready rather than waiting for all the rest to complete. We should strive to make all panels fast, but given that their load tasks are fallible and do IO, this approach seems more resilient. Additionally, we'll now start loading the agent panel at the same time as the rest. Release Notes: - workspace: Add panels as soon as they are ready --- crates/collab/src/tests/following_tests.rs | 2 +- crates/workspace/src/dock.rs | 17 +- crates/workspace/src/workspace.rs | 12 +- crates/zed/src/zed.rs | 195 +++++++++++---------- 4 files changed, 122 insertions(+), 104 deletions(-) diff --git a/crates/collab/src/tests/following_tests.rs b/crates/collab/src/tests/following_tests.rs index f3827b6f1195392ddedcab4f45854a8e9790dc28..ec654e06341b6fdcbe88e4031f425d18dd6461e7 100644 --- a/crates/collab/src/tests/following_tests.rs +++ b/crates/collab/src/tests/following_tests.rs @@ -529,7 +529,7 @@ async fn test_basic_following( }); // Client B activates a panel, and the previously-opened screen-sharing item gets activated. - let panel = cx_b.new(|cx| TestPanel::new(DockPosition::Left, cx)); + let panel = cx_b.new(|cx| TestPanel::new(DockPosition::Left, 100, cx)); workspace_b.update_in(cx_b, |workspace, window, cx| { workspace.add_panel(panel, window, cx); workspace.toggle_panel_focus::(window, cx); diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index 05af5d080c4c965f3d53f61b5af144a456ce0074..dfc341db9c71fd1059853b9480a7e679109ead40 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -560,7 +560,16 @@ impl Dock { .binary_search_by_key(&panel.read(cx).activation_priority(), |entry| { entry.panel.activation_priority(cx) }) { - Ok(ix) => ix, + Ok(ix) => { + if cfg!(debug_assertions) { + panic!( + "Panels `{}` and `{}` have the same activation priority. Each panel must have a unique priority so the status bar order is deterministic.", + T::panel_key(), + self.panel_entries[ix].panel.panel_key() + ); + } + ix + } Err(ix) => ix, }; if let Some(active_index) = self.active_panel_index.as_mut() @@ -994,19 +1003,21 @@ pub mod test { pub active: bool, pub focus_handle: FocusHandle, pub size: Pixels, + pub activation_priority: u32, } actions!(test_only, [ToggleTestPanel]); impl EventEmitter for TestPanel {} impl TestPanel { - pub fn new(position: DockPosition, cx: &mut App) -> Self { + pub fn new(position: DockPosition, activation_priority: u32, cx: &mut App) -> Self { Self { position, zoomed: false, active: false, focus_handle: cx.focus_handle(), size: px(300.), + activation_priority, } } } @@ -1072,7 +1083,7 @@ pub mod test { } fn activation_priority(&self) -> u32 { - 100 + self.activation_priority } } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 6e553ac93588ab4a127437adc03bf9323d47014f..96fed9f65517bd0005ff27907e6f888edd7a48f9 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -9176,7 +9176,7 @@ mod tests { cx.add_window_view(|window, cx| Workspace::test_new(project, window, cx)); let panel = workspace.update_in(cx, |workspace, window, cx| { - let panel = cx.new(|cx| TestPanel::new(DockPosition::Right, cx)); + let panel = cx.new(|cx| TestPanel::new(DockPosition::Right, 100, cx)); workspace.add_panel(panel.clone(), window, cx); workspace @@ -9409,10 +9409,10 @@ mod tests { // Open two docks (left and right) with one panel each let (left_panel, right_panel) = workspace.update_in(cx, |workspace, window, cx| { - let left_panel = cx.new(|cx| TestPanel::new(DockPosition::Left, cx)); + let left_panel = cx.new(|cx| TestPanel::new(DockPosition::Left, 100, cx)); workspace.add_panel(left_panel.clone(), window, cx); - let right_panel = cx.new(|cx| TestPanel::new(DockPosition::Right, cx)); + let right_panel = cx.new(|cx| TestPanel::new(DockPosition::Right, 101, cx)); workspace.add_panel(right_panel.clone(), window, cx); workspace.toggle_dock(DockPosition::Left, window, cx); @@ -9840,10 +9840,10 @@ mod tests { cx.add_window_view(|window, cx| Workspace::test_new(project, window, cx)); let (panel_1, panel_2) = workspace.update_in(cx, |workspace, window, cx| { - let panel_1 = cx.new(|cx| TestPanel::new(DockPosition::Left, cx)); + let panel_1 = cx.new(|cx| TestPanel::new(DockPosition::Left, 100, cx)); workspace.add_panel(panel_1.clone(), window, cx); workspace.toggle_dock(DockPosition::Left, window, cx); - let panel_2 = cx.new(|cx| TestPanel::new(DockPosition::Right, cx)); + let panel_2 = cx.new(|cx| TestPanel::new(DockPosition::Right, 101, cx)); workspace.add_panel(panel_2.clone(), window, cx); workspace.toggle_dock(DockPosition::Right, window, cx); @@ -10750,7 +10750,7 @@ mod tests { // Add a new panel to the right dock, opening the dock and setting the // focus to the new panel. let panel = workspace.update_in(cx, |workspace, window, cx| { - let panel = cx.new(|cx| TestPanel::new(DockPosition::Right, cx)); + let panel = cx.new(|cx| TestPanel::new(DockPosition::Right, 100, cx)); workspace.add_panel(panel.clone(), window, cx); workspace diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 33a715283b9d63f0238eb55b758d71aac17c9b5c..f6348a8cf22bda6441bca6d31abe8823c1d2215a 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -22,16 +22,17 @@ use editor::{Editor, MultiBuffer}; use extension_host::ExtensionStore; use feature_flags::{FeatureFlagAppExt, PanicFeatureFlag}; use fs::Fs; +use futures::FutureExt as _; use futures::future::Either; use futures::{StreamExt, channel::mpsc, select_biased}; use git_ui::commit_view::CommitViewToolbar; use git_ui::git_panel::GitPanel; use git_ui::project_diff::ProjectDiffToolbar; use gpui::{ - Action, App, AppContext as _, Context, DismissEvent, Element, Entity, Focusable, KeyBinding, - ParentElement, PathPromptOptions, PromptLevel, ReadGlobal, SharedString, Styled, Task, - TitlebarOptions, UpdateGlobal, Window, WindowKind, WindowOptions, actions, image_cache, point, - px, retain_all, + Action, App, AppContext as _, AsyncWindowContext, Context, DismissEvent, Element, Entity, + Focusable, KeyBinding, ParentElement, PathPromptOptions, PromptLevel, ReadGlobal, SharedString, + Styled, Task, TitlebarOptions, UpdateGlobal, WeakEntity, Window, WindowKind, WindowOptions, + actions, image_cache, point, px, retain_all, }; use image_viewer::ImageInfo; use language::Capability; @@ -655,105 +656,111 @@ fn initialize_panels( ); let debug_panel = DebugPanel::load(workspace_handle.clone(), cx); - let ( - project_panel, - outline_panel, - terminal_panel, - git_panel, - channels_panel, - notification_panel, - debug_panel, - ) = futures::try_join!( - project_panel, - outline_panel, - git_panel, - terminal_panel, - channels_panel, - notification_panel, - debug_panel, - )?; - - workspace_handle.update_in(cx, |workspace, window, cx| { - workspace.add_panel(project_panel, window, cx); - workspace.add_panel(outline_panel, window, cx); - workspace.add_panel(terminal_panel, window, cx); - workspace.add_panel(git_panel, window, cx); - workspace.add_panel(channels_panel, window, cx); - workspace.add_panel(notification_panel, window, cx); - workspace.add_panel(debug_panel, window, cx); - })?; - - fn setup_or_teardown_agent_panel( - workspace: &mut Workspace, - prompt_builder: Arc, - window: &mut Window, - cx: &mut Context, - ) -> Task> { - let disable_ai = SettingsStore::global(cx) - .get::(None) - .disable_ai - || cfg!(test); - let existing_panel = workspace.panel::(cx); - match (disable_ai, existing_panel) { - (false, None) => cx.spawn_in(window, async move |workspace, cx| { - let panel = - agent_ui::AgentPanel::load(workspace.clone(), prompt_builder, cx.clone()) - .await?; - workspace.update_in(cx, |workspace, window, cx| { - let disable_ai = SettingsStore::global(cx) - .get::(None) - .disable_ai; - let have_panel = workspace.panel::(cx).is_some(); - if !disable_ai && !have_panel { - workspace.add_panel(panel, window, cx); - } + async fn add_panel_when_ready( + panel_task: impl Future>> + 'static, + workspace_handle: WeakEntity, + mut cx: gpui::AsyncWindowContext, + ) { + if let Some(panel) = panel_task.await.context("failed to load panel").log_err() + { + workspace_handle + .update_in(&mut cx, |workspace, window, cx| { + workspace.add_panel(panel, window, cx); }) - }), - (true, Some(existing_panel)) => { - workspace.remove_panel::(&existing_panel, window, cx); - Task::ready(Ok(())) - } - _ => Task::ready(Ok(())), + .log_err(); } } - workspace_handle - .update_in(cx, |workspace, window, cx| { - setup_or_teardown_agent_panel(workspace, prompt_builder.clone(), window, cx) - })? - .await?; + futures::join!( + add_panel_when_ready(project_panel, workspace_handle.clone(), cx.clone()), + add_panel_when_ready(outline_panel, workspace_handle.clone(), cx.clone()), + add_panel_when_ready(terminal_panel, workspace_handle.clone(), cx.clone()), + add_panel_when_ready(git_panel, workspace_handle.clone(), cx.clone()), + add_panel_when_ready(channels_panel, workspace_handle.clone(), cx.clone()), + add_panel_when_ready(notification_panel, workspace_handle.clone(), cx.clone()), + add_panel_when_ready(debug_panel, workspace_handle.clone(), cx.clone()), + initialize_agent_panel(workspace_handle, prompt_builder, cx.clone()).map(|r| r.log_err()) + ); - workspace_handle.update_in(cx, |workspace, window, cx| { - cx.observe_global_in::(window, { - let prompt_builder = prompt_builder.clone(); - move |workspace, window, cx| { - setup_or_teardown_agent_panel(workspace, prompt_builder.clone(), window, cx) - .detach_and_log_err(cx); - } - }) - .detach(); + anyhow::Ok(()) + }) + .detach(); +} - // Register the actions that are shared between `assistant` and `assistant2`. - // - // We need to do this here instead of within the individual `init` - // functions so that we only register the actions once. - // - // Once we ship `assistant2` we can push this back down into `agent::agent_panel::init`. - if !cfg!(test) { - ::set_global( - Arc::new(agent_ui::ConcreteAssistantPanelDelegate), - cx, - ); +async fn initialize_agent_panel( + workspace_handle: WeakEntity, + prompt_builder: Arc, + mut cx: AsyncWindowContext, +) -> anyhow::Result<()> { + fn setup_or_teardown_agent_panel( + workspace: &mut Workspace, + prompt_builder: Arc, + window: &mut Window, + cx: &mut Context, + ) -> Task> { + let disable_ai = SettingsStore::global(cx) + .get::(None) + .disable_ai + || cfg!(test); + let existing_panel = workspace.panel::(cx); + match (disable_ai, existing_panel) { + (false, None) => cx.spawn_in(window, async move |workspace, cx| { + let panel = + agent_ui::AgentPanel::load(workspace.clone(), prompt_builder, cx.clone()) + .await?; + workspace.update_in(cx, |workspace, window, cx| { + let disable_ai = SettingsStore::global(cx) + .get::(None) + .disable_ai; + let have_panel = workspace.panel::(cx).is_some(); + if !disable_ai && !have_panel { + workspace.add_panel(panel, window, cx); + } + }) + }), + (true, Some(existing_panel)) => { + workspace.remove_panel::(&existing_panel, window, cx); + Task::ready(Ok(())) + } + _ => Task::ready(Ok(())), + } + } - workspace - .register_action(agent_ui::AgentPanel::toggle_focus) - .register_action(agent_ui::InlineAssistant::inline_assist); + workspace_handle + .update_in(&mut cx, |workspace, window, cx| { + setup_or_teardown_agent_panel(workspace, prompt_builder.clone(), window, cx) + })? + .await?; + + workspace_handle.update_in(&mut cx, |workspace, window, cx| { + cx.observe_global_in::(window, { + let prompt_builder = prompt_builder.clone(); + move |workspace, window, cx| { + setup_or_teardown_agent_panel(workspace, prompt_builder.clone(), window, cx) + .detach_and_log_err(cx); } - })?; + }) + .detach(); - anyhow::Ok(()) - }) - .detach(); + // Register the actions that are shared between `assistant` and `assistant2`. + // + // We need to do this here instead of within the individual `init` + // functions so that we only register the actions once. + // + // Once we ship `assistant2` we can push this back down into `agent::agent_panel::init`. + if !cfg!(test) { + ::set_global( + Arc::new(agent_ui::ConcreteAssistantPanelDelegate), + cx, + ); + + workspace + .register_action(agent_ui::AgentPanel::toggle_focus) + .register_action(agent_ui::InlineAssistant::inline_assist); + } + })?; + + anyhow::Ok(()) } fn register_actions( From 6631d8be4e658f9e3e4fa1fcd770295408221a40 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Mon, 24 Nov 2025 13:24:26 -0500 Subject: [PATCH 0341/1030] Fix Gemini 3 on OpenRouter (#43416) Release Notes: - Gemini 3 now works on OpenRouter in the Agent Panel --- crates/agent/src/db.rs | 1 + crates/agent/src/edit_agent.rs | 1 + crates/agent/src/edit_agent/evals.rs | 3 + crates/agent/src/tests/mod.rs | 87 ++++-- crates/agent/src/thread.rs | 20 ++ crates/agent_ui/src/buffer_codegen.rs | 1 + .../agent_ui/src/terminal_inline_assistant.rs | 1 + .../assistant_text_thread/src/text_thread.rs | 8 + crates/copilot/src/copilot_chat.rs | 3 + crates/copilot/src/copilot_responses.rs | 4 + crates/eval/src/instance.rs | 6 +- crates/git_ui/src/git_panel.rs | 1 + crates/language_model/src/language_model.rs | 6 +- crates/language_model/src/request.rs | 2 + .../language_models/src/provider/anthropic.rs | 1 + .../src/provider/copilot_chat.rs | 16 +- crates/language_models/src/provider/google.rs | 4 + .../language_models/src/provider/mistral.rs | 3 + .../language_models/src/provider/open_ai.rs | 1 + .../src/provider/open_router.rs | 267 +++++++++++++++++- crates/open_router/src/open_router.rs | 8 + crates/rules_library/src/rules_library.rs | 1 + 22 files changed, 408 insertions(+), 37 deletions(-) diff --git a/crates/agent/src/db.rs b/crates/agent/src/db.rs index 84d080ff48107e7173226df81a419b90603d82fd..6b6312e48176c93fbfb12f97e26c7943c6cbf89a 100644 --- a/crates/agent/src/db.rs +++ b/crates/agent/src/db.rs @@ -182,6 +182,7 @@ impl DbThread { crate::Message::Agent(AgentMessage { content, tool_results, + reasoning_details: None, }) } language_model::Role::System => { diff --git a/crates/agent/src/edit_agent.rs b/crates/agent/src/edit_agent.rs index 2ecf3429d46540ea309052e833c3e40ea2a53cb5..e5b1d1e3871ecb0070f60f5f382196482e24963a 100644 --- a/crates/agent/src/edit_agent.rs +++ b/crates/agent/src/edit_agent.rs @@ -703,6 +703,7 @@ impl EditAgent { role: Role::User, content: vec![MessageContent::Text(prompt)], cache: false, + reasoning_details: None, }); // Include tools in the request so that we can take advantage of diff --git a/crates/agent/src/edit_agent/evals.rs b/crates/agent/src/edit_agent/evals.rs index ddb9052b84b986229720efa89b9e912452411d86..81dce33d0394b5757be4934031f31b6f17233e9c 100644 --- a/crates/agent/src/edit_agent/evals.rs +++ b/crates/agent/src/edit_agent/evals.rs @@ -1081,6 +1081,7 @@ fn message( role, content: contents.into_iter().collect(), cache: false, + reasoning_details: None, } } @@ -1268,6 +1269,7 @@ impl EvalAssertion { role: Role::User, content: vec![prompt.into()], cache: false, + reasoning_details: None, }], thinking_allowed: true, ..Default::default() @@ -1594,6 +1596,7 @@ impl EditAgentTest { role: Role::System, content: vec![MessageContent::Text(system_prompt)], cache: true, + reasoning_details: None, }] .into_iter() .chain(eval.conversation) diff --git a/crates/agent/src/tests/mod.rs b/crates/agent/src/tests/mod.rs index f43cbed952afd434c4262da486ce11dffa40a5c8..efba471f1a927446aa96b1c1426c60b42b725b89 100644 --- a/crates/agent/src/tests/mod.rs +++ b/crates/agent/src/tests/mod.rs @@ -215,7 +215,8 @@ async fn test_prompt_caching(cx: &mut TestAppContext) { vec![LanguageModelRequestMessage { role: Role::User, content: vec!["Message 1".into()], - cache: true + cache: true, + reasoning_details: None, }] ); fake_model.send_last_completion_stream_event(LanguageModelCompletionEvent::Text( @@ -239,17 +240,20 @@ async fn test_prompt_caching(cx: &mut TestAppContext) { LanguageModelRequestMessage { role: Role::User, content: vec!["Message 1".into()], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::Assistant, content: vec!["Response to Message 1".into()], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::User, content: vec!["Message 2".into()], - cache: true + cache: true, + reasoning_details: None, } ] ); @@ -295,37 +299,44 @@ async fn test_prompt_caching(cx: &mut TestAppContext) { LanguageModelRequestMessage { role: Role::User, content: vec!["Message 1".into()], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::Assistant, content: vec!["Response to Message 1".into()], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::User, content: vec!["Message 2".into()], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::Assistant, content: vec!["Response to Message 2".into()], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::User, content: vec!["Use the echo tool".into()], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::Assistant, content: vec![MessageContent::ToolUse(tool_use)], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::User, content: vec![MessageContent::ToolResult(tool_result)], - cache: true + cache: true, + reasoning_details: None, } ] ); @@ -648,17 +659,20 @@ async fn test_resume_after_tool_use_limit(cx: &mut TestAppContext) { LanguageModelRequestMessage { role: Role::User, content: vec!["abc".into()], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::Assistant, content: vec![MessageContent::ToolUse(tool_use.clone())], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::User, content: vec![MessageContent::ToolResult(tool_result.clone())], - cache: true + cache: true, + reasoning_details: None, }, ] ); @@ -682,22 +696,26 @@ async fn test_resume_after_tool_use_limit(cx: &mut TestAppContext) { LanguageModelRequestMessage { role: Role::User, content: vec!["abc".into()], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::Assistant, content: vec![MessageContent::ToolUse(tool_use)], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::User, content: vec![MessageContent::ToolResult(tool_result)], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::User, content: vec!["Continue where you left off".into()], - cache: true + cache: true, + reasoning_details: None, } ] ); @@ -769,22 +787,26 @@ async fn test_send_after_tool_use_limit(cx: &mut TestAppContext) { LanguageModelRequestMessage { role: Role::User, content: vec!["abc".into()], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::Assistant, content: vec![MessageContent::ToolUse(tool_use)], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::User, content: vec![MessageContent::ToolResult(tool_result)], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::User, content: vec!["ghi".into()], - cache: true + cache: true, + reasoning_details: None, } ] ); @@ -1827,7 +1849,8 @@ async fn test_building_request_with_pending_tools(cx: &mut TestAppContext) { LanguageModelRequestMessage { role: Role::User, content: vec!["Hey!".into()], - cache: true + cache: true, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::Assistant, @@ -1835,7 +1858,8 @@ async fn test_building_request_with_pending_tools(cx: &mut TestAppContext) { MessageContent::Text("Hi!".into()), MessageContent::ToolUse(echo_tool_use.clone()) ], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::User, @@ -1846,7 +1870,8 @@ async fn test_building_request_with_pending_tools(cx: &mut TestAppContext) { content: "test".into(), output: Some("test".into()) })], - cache: false + cache: false, + reasoning_details: None, }, ], ); @@ -2244,12 +2269,14 @@ async fn test_send_retry_finishes_tool_calls_on_error(cx: &mut TestAppContext) { LanguageModelRequestMessage { role: Role::User, content: vec!["Call the echo tool!".into()], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::Assistant, content: vec![language_model::MessageContent::ToolUse(tool_use_1.clone())], - cache: false + cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::User, @@ -2262,7 +2289,8 @@ async fn test_send_retry_finishes_tool_calls_on_error(cx: &mut TestAppContext) { output: Some("test".into()) } )], - cache: true + cache: true, + reasoning_details: None, }, ] ); @@ -2276,7 +2304,8 @@ async fn test_send_retry_finishes_tool_calls_on_error(cx: &mut TestAppContext) { thread.last_message(), Some(Message::Agent(AgentMessage { content: vec![AgentMessageContent::Text("Done".into())], - tool_results: IndexMap::default() + tool_results: IndexMap::default(), + reasoning_details: None, })) ); }) diff --git a/crates/agent/src/thread.rs b/crates/agent/src/thread.rs index 928b60eee4bc3ccdf296e8ba7f4f0bdc49cb9fa3..294c96b3ecb7800ab5b5f62749d335682efebd60 100644 --- a/crates/agent/src/thread.rs +++ b/crates/agent/src/thread.rs @@ -113,6 +113,7 @@ impl Message { role: Role::User, content: vec!["Continue where you left off".into()], cache: false, + reasoning_details: None, }], } } @@ -177,6 +178,7 @@ impl UserMessage { role: Role::User, content: Vec::with_capacity(self.content.len()), cache: false, + reasoning_details: None, }; const OPEN_CONTEXT: &str = "\n\ @@ -444,6 +446,7 @@ impl AgentMessage { role: Role::Assistant, content: Vec::with_capacity(self.content.len()), cache: false, + reasoning_details: self.reasoning_details.clone(), }; for chunk in &self.content { match chunk { @@ -479,6 +482,7 @@ impl AgentMessage { role: Role::User, content: Vec::new(), cache: false, + reasoning_details: None, }; for tool_result in self.tool_results.values() { @@ -508,6 +512,7 @@ impl AgentMessage { pub struct AgentMessage { pub content: Vec, pub tool_results: IndexMap, + pub reasoning_details: Option, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -1398,6 +1403,18 @@ impl Thread { self.handle_thinking_event(text, signature, event_stream, cx) } RedactedThinking { data } => self.handle_redacted_thinking_event(data, cx), + ReasoningDetails(details) => { + let last_message = self.pending_message(); + // Store the last non-empty reasoning_details (overwrites earlier ones) + // This ensures we keep the encrypted reasoning with signatures, not the early text reasoning + if let serde_json::Value::Array(ref arr) = details { + if !arr.is_empty() { + last_message.reasoning_details = Some(details); + } + } else { + last_message.reasoning_details = Some(details); + } + } ToolUse(tool_use) => { return Ok(self.handle_tool_use_event(tool_use, event_stream, cx)); } @@ -1673,6 +1690,7 @@ impl Thread { role: Role::User, content: vec![SUMMARIZE_THREAD_DETAILED_PROMPT.into()], cache: false, + reasoning_details: None, }); let task = cx @@ -1737,6 +1755,7 @@ impl Thread { role: Role::User, content: vec![SUMMARIZE_THREAD_PROMPT.into()], cache: false, + reasoning_details: None, }); self.pending_title_generation = Some(cx.spawn(async move |this, cx| { let mut title = String::new(); @@ -1984,6 +2003,7 @@ impl Thread { role: Role::System, content: vec![system_prompt.into()], cache: false, + reasoning_details: None, }]; for message in &self.messages { messages.extend(message.to_request()); diff --git a/crates/agent_ui/src/buffer_codegen.rs b/crates/agent_ui/src/buffer_codegen.rs index ba52b0298d37211626b6baf6aae1fb3da0be6372..647437770604b766ab054cb66fc6c8e154402ab7 100644 --- a/crates/agent_ui/src/buffer_codegen.rs +++ b/crates/agent_ui/src/buffer_codegen.rs @@ -423,6 +423,7 @@ impl CodegenAlternative { role: Role::User, content: Vec::new(), cache: false, + reasoning_details: None, }; if let Some(context) = context_task.await { diff --git a/crates/agent_ui/src/terminal_inline_assistant.rs b/crates/agent_ui/src/terminal_inline_assistant.rs index c6da11a35af22c4052cd580e58c896e19a1faf78..43ea697bece318699f350259a0e2e38d1a4f4d8d 100644 --- a/crates/agent_ui/src/terminal_inline_assistant.rs +++ b/crates/agent_ui/src/terminal_inline_assistant.rs @@ -262,6 +262,7 @@ impl TerminalInlineAssistant { role: Role::User, content: vec![], cache: false, + reasoning_details: None, }; if let Some(context) = load_context_task.await { diff --git a/crates/assistant_text_thread/src/text_thread.rs b/crates/assistant_text_thread/src/text_thread.rs index 9f065e9ca7a1daf933c1313dd1d5f092cbed2771..a50e410ab7d1bd1eb34ba367dfbfd36a7b2ec826 100644 --- a/crates/assistant_text_thread/src/text_thread.rs +++ b/crates/assistant_text_thread/src/text_thread.rs @@ -1417,6 +1417,7 @@ impl TextThread { role: Role::User, content: vec!["Respond only with OK, nothing else.".into()], cache: false, + reasoning_details: None, }); req }; @@ -2085,6 +2086,11 @@ impl TextThread { ); } LanguageModelCompletionEvent::StartMessage { .. } => {} + LanguageModelCompletionEvent::ReasoningDetails(_) => { + // ReasoningDetails are metadata (signatures, encrypted data, format info) + // used for request/response validation, not UI content. + // The displayable thinking text is already handled by the Thinking event. + } LanguageModelCompletionEvent::Stop(reason) => { stop_reason = reason; } @@ -2308,6 +2314,7 @@ impl TextThread { role: message.role, content: Vec::new(), cache: message.cache.as_ref().is_some_and(|cache| cache.is_anchor), + reasoning_details: None, }; while let Some(content) = contents.peek() { @@ -2679,6 +2686,7 @@ impl TextThread { role: Role::User, content: vec![SUMMARIZE_THREAD_PROMPT.into()], cache: false, + reasoning_details: None, }); // If there is no summary, it is set with `done: false` so that "Loading Summary…" can diff --git a/crates/copilot/src/copilot_chat.rs b/crates/copilot/src/copilot_chat.rs index 5d22760942dbbcfd72f1dacb83c249a08f2fe72a..d4051701f72331bf5fc25fcd634002f0206ba529 100644 --- a/crates/copilot/src/copilot_chat.rs +++ b/crates/copilot/src/copilot_chat.rs @@ -353,6 +353,8 @@ pub enum ToolCallContent { pub struct FunctionContent { pub name: String, pub arguments: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub thought_signature: Option, } #[derive(Deserialize, Debug)] @@ -396,6 +398,7 @@ pub struct ToolCallChunk { pub struct FunctionChunk { pub name: Option, pub arguments: Option, + pub thought_signature: Option, } #[derive(Deserialize)] diff --git a/crates/copilot/src/copilot_responses.rs b/crates/copilot/src/copilot_responses.rs index c1e066208823dcab34a32096cfa447dd0ec9592f..938577e224bcf4af440c3bd646cd1910ec1fbd13 100644 --- a/crates/copilot/src/copilot_responses.rs +++ b/crates/copilot/src/copilot_responses.rs @@ -127,6 +127,8 @@ pub enum ResponseInputItem { arguments: String, #[serde(skip_serializing_if = "Option::is_none")] status: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + thought_signature: Option, }, FunctionCallOutput { call_id: String, @@ -251,6 +253,8 @@ pub enum ResponseOutputItem { arguments: String, #[serde(skip_serializing_if = "Option::is_none")] status: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + thought_signature: Option, }, Reasoning { id: String, diff --git a/crates/eval/src/instance.rs b/crates/eval/src/instance.rs index 075a1a5cea1da782d40778befeb04bf2e6bac316..99a8af053609b98efe29a179964a38137c4ba021 100644 --- a/crates/eval/src/instance.rs +++ b/crates/eval/src/instance.rs @@ -553,6 +553,7 @@ impl ExampleInstance { role: Role::User, content: vec![MessageContent::Text(to_prompt(assertion.description))], cache: false, + reasoning_details: None, }], temperature: None, tools: Vec::new(), @@ -1255,7 +1256,8 @@ pub fn response_events_to_markdown( | LanguageModelCompletionEvent::StartMessage { .. } | LanguageModelCompletionEvent::UsageUpdated { .. } | LanguageModelCompletionEvent::Queued { .. } - | LanguageModelCompletionEvent::Started, + | LanguageModelCompletionEvent::Started + | LanguageModelCompletionEvent::ReasoningDetails(_), ) => {} Ok(LanguageModelCompletionEvent::ToolUseJsonParseError { json_parse_error, .. @@ -1341,6 +1343,7 @@ impl ThreadDialog { Ok(LanguageModelCompletionEvent::UsageUpdate(_)) | Ok(LanguageModelCompletionEvent::RedactedThinking { .. }) | Ok(LanguageModelCompletionEvent::StartMessage { .. }) + | Ok(LanguageModelCompletionEvent::ReasoningDetails(_)) | Ok(LanguageModelCompletionEvent::Stop(_)) | Ok(LanguageModelCompletionEvent::Queued { .. }) | Ok(LanguageModelCompletionEvent::Started) @@ -1372,6 +1375,7 @@ impl ThreadDialog { role: Role::Assistant, content, cache: false, + reasoning_details: None, }) } else { None diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index e810672c69b9ed602ddf76c2ca1f1035b958cd26..a6c6113a33b61cd16f007b6d2d818e42ad2a191e 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -2051,6 +2051,7 @@ impl GitPanel { role: Role::User, content: vec![content.into()], cache: false, + reasoning_details: None, }], tools: Vec::new(), tool_choice: None, diff --git a/crates/language_model/src/language_model.rs b/crates/language_model/src/language_model.rs index 3322409c09399b3ec957d8288b45e1833b77c106..c9b6391136da1a2b2e9a2ae470229179615a865a 100644 --- a/crates/language_model/src/language_model.rs +++ b/crates/language_model/src/language_model.rs @@ -98,6 +98,7 @@ pub enum LanguageModelCompletionEvent { StartMessage { message_id: String, }, + ReasoningDetails(serde_json::Value), UsageUpdate(TokenUsage), } @@ -680,6 +681,7 @@ pub trait LanguageModel: Send + Sync { Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)), Ok(LanguageModelCompletionEvent::Thinking { .. }) => None, Ok(LanguageModelCompletionEvent::RedactedThinking { .. }) => None, + Ok(LanguageModelCompletionEvent::ReasoningDetails(_)) => None, Ok(LanguageModelCompletionEvent::Stop(_)) => None, Ok(LanguageModelCompletionEvent::ToolUse(_)) => None, Ok(LanguageModelCompletionEvent::ToolUseJsonParseError { @@ -1034,8 +1036,8 @@ mod tests { let original = LanguageModelToolUse { id: LanguageModelToolUseId::from("no_sig_id"), name: "no_sig_tool".into(), - raw_input: json!({"key": "value"}).to_string(), - input: json!({"key": "value"}), + raw_input: json!({"arg": "value"}).to_string(), + input: json!({"arg": "value"}), is_input_complete: true, thought_signature: None, }; diff --git a/crates/language_model/src/request.rs b/crates/language_model/src/request.rs index d0f7789e40dd71ada8dcae2712cefcef966ad52f..d97d87bdc95c443aeaf3f2b5578bf7f0c1ef322a 100644 --- a/crates/language_model/src/request.rs +++ b/crates/language_model/src/request.rs @@ -357,6 +357,8 @@ pub struct LanguageModelRequestMessage { pub role: Role, pub content: Vec, pub cache: bool, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reasoning_details: Option, } impl LanguageModelRequestMessage { diff --git a/crates/language_models/src/provider/anthropic.rs b/crates/language_models/src/provider/anthropic.rs index 2491e8277a8b2632f6835af13736c23e94966c4c..1affe38a08d22e2aaed8c1207513ce41a13b8e59 100644 --- a/crates/language_models/src/provider/anthropic.rs +++ b/crates/language_models/src/provider/anthropic.rs @@ -989,6 +989,7 @@ mod tests { MessageContent::Image(language_model::LanguageModelImage::empty()), ], cache: true, + reasoning_details: None, }], thread_id: None, prompt_id: None, diff --git a/crates/language_models/src/provider/copilot_chat.rs b/crates/language_models/src/provider/copilot_chat.rs index f62b899318ae56452509f8d9e7cca05f8859cf27..1d0410c0cfff5f0f757120c9b91432593c8c1053 100644 --- a/crates/language_models/src/provider/copilot_chat.rs +++ b/crates/language_models/src/provider/copilot_chat.rs @@ -361,6 +361,7 @@ pub fn map_to_language_model_completion_events( id: String, name: String, arguments: String, + thought_signature: Option, } struct State { @@ -418,6 +419,11 @@ pub fn map_to_language_model_completion_events( if let Some(arguments) = function.arguments.clone() { entry.arguments.push_str(&arguments); } + + if let Some(thought_signature) = function.thought_signature.clone() + { + entry.thought_signature = Some(thought_signature); + } } } @@ -458,7 +464,7 @@ pub fn map_to_language_model_completion_events( is_input_complete: true, input, raw_input: tool_call.arguments, - thought_signature: None, + thought_signature: tool_call.thought_signature, }, )), Err(error) => Ok( @@ -550,6 +556,7 @@ impl CopilotResponsesEventMapper { call_id, name, arguments, + thought_signature, .. } => { let mut events = Vec::new(); @@ -561,7 +568,7 @@ impl CopilotResponsesEventMapper { is_input_complete: true, input, raw_input: arguments.clone(), - thought_signature: None, + thought_signature, }, ))), Err(error) => { @@ -776,6 +783,7 @@ fn into_copilot_chat( function: copilot::copilot_chat::FunctionContent { name: tool_use.name.to_string(), arguments: serde_json::to_string(&tool_use.input)?, + thought_signature: tool_use.thought_signature.clone(), }, }, }); @@ -950,6 +958,7 @@ fn into_copilot_responses( name: tool_use.name.to_string(), arguments: tool_use.raw_input.clone(), status: None, + thought_signature: tool_use.thought_signature.clone(), }); } } @@ -1122,6 +1131,7 @@ mod tests { name: "do_it".into(), arguments: "{\"x\":1}".into(), status: None, + thought_signature: None, }, }]; @@ -1147,6 +1157,7 @@ mod tests { name: "do_it".into(), arguments: "{not json}".into(), status: None, + thought_signature: None, }, }]; @@ -1250,6 +1261,7 @@ mod tests { name: "do_it".into(), arguments: "{}".into(), status: None, + thought_signature: None, }, }, responses::StreamEvent::Completed { diff --git a/crates/language_models/src/provider/google.rs b/crates/language_models/src/provider/google.rs index 68b6f976418b2125027e5800527f73cc49e5a1bb..c5a5affcd3d9e8c34f6306f86cb5348f86397892 100644 --- a/crates/language_models/src/provider/google.rs +++ b/crates/language_models/src/provider/google.rs @@ -1094,6 +1094,7 @@ mod tests { role: Role::Assistant, content: vec![MessageContent::ToolUse(tool_use)], cache: false, + reasoning_details: None, }], ..Default::default() }, @@ -1130,6 +1131,7 @@ mod tests { role: Role::Assistant, content: vec![MessageContent::ToolUse(tool_use)], cache: false, + reasoning_details: None, }], ..Default::default() }, @@ -1162,6 +1164,7 @@ mod tests { role: Role::Assistant, content: vec![MessageContent::ToolUse(tool_use)], cache: false, + reasoning_details: None, }], ..Default::default() }, @@ -1218,6 +1221,7 @@ mod tests { role: Role::Assistant, content: vec![MessageContent::ToolUse(tool_use)], cache: false, + reasoning_details: None, }], ..Default::default() }, diff --git a/crates/language_models/src/provider/mistral.rs b/crates/language_models/src/provider/mistral.rs index 0c45913bea83e32c508daa6c6579ecd0382b3dc0..8372a8c95e579f1d860fd9bb25656731ee2c7e50 100644 --- a/crates/language_models/src/provider/mistral.rs +++ b/crates/language_models/src/provider/mistral.rs @@ -1025,11 +1025,13 @@ mod tests { role: Role::System, content: vec![MessageContent::Text("System prompt".into())], cache: false, + reasoning_details: None, }, LanguageModelRequestMessage { role: Role::User, content: vec![MessageContent::Text("Hello".into())], cache: false, + reasoning_details: None, }, ], temperature: Some(0.5), @@ -1064,6 +1066,7 @@ mod tests { }), ], cache: false, + reasoning_details: None, }], tools: vec![], tool_choice: None, diff --git a/crates/language_models/src/provider/open_ai.rs b/crates/language_models/src/provider/open_ai.rs index ee62522882c214dfa1384f75ced6eba46c9ec35f..9d828d188586b92e3f47a1345e070f33af380d48 100644 --- a/crates/language_models/src/provider/open_ai.rs +++ b/crates/language_models/src/provider/open_ai.rs @@ -882,6 +882,7 @@ mod tests { role: Role::User, content: vec![MessageContent::Text("message".into())], cache: false, + reasoning_details: None, }], tools: vec![], tool_choice: None, diff --git a/crates/language_models/src/provider/open_router.rs b/crates/language_models/src/provider/open_router.rs index c98ee02efd7b7af32ea6c649f29eef685753ba7d..7b10ebf963033603ede691fa72d2fa523bcdbab9 100644 --- a/crates/language_models/src/provider/open_router.rs +++ b/crates/language_models/src/provider/open_router.rs @@ -393,6 +393,7 @@ pub fn into_open_router( ) -> open_router::Request { let mut messages = Vec::new(); for message in request.messages { + let reasoning_details = message.reasoning_details.clone(); for content in message.content { match content { MessageContent::Text(text) => add_message_content_part( @@ -419,18 +420,26 @@ pub fn into_open_router( name: tool_use.name.to_string(), arguments: serde_json::to_string(&tool_use.input) .unwrap_or_default(), + thought_signature: tool_use.thought_signature.clone(), }, }, }; - if let Some(open_router::RequestMessage::Assistant { tool_calls, .. }) = - messages.last_mut() + if let Some(open_router::RequestMessage::Assistant { + tool_calls, + reasoning_details: existing_reasoning, + .. + }) = messages.last_mut() { tool_calls.push(tool_call); + if existing_reasoning.is_none() && reasoning_details.is_some() { + *existing_reasoning = reasoning_details.clone(); + } } else { messages.push(open_router::RequestMessage::Assistant { content: None, tool_calls: vec![tool_call], + reasoning_details: reasoning_details.clone(), }); } } @@ -529,6 +538,7 @@ fn add_message_content_part( Role::Assistant => open_router::RequestMessage::Assistant { content: Some(open_router::MessageContent::from(vec![new_part])), tool_calls: Vec::new(), + reasoning_details: None, }, Role::System => open_router::RequestMessage::System { content: open_router::MessageContent::from(vec![new_part]), @@ -540,12 +550,14 @@ fn add_message_content_part( pub struct OpenRouterEventMapper { tool_calls_by_index: HashMap, + reasoning_details: Option, } impl OpenRouterEventMapper { pub fn new() -> Self { Self { tool_calls_by_index: HashMap::default(), + reasoning_details: None, } } @@ -577,6 +589,15 @@ impl OpenRouterEventMapper { }; let mut events = Vec::new(); + + if let Some(details) = choice.delta.reasoning_details.clone() { + // Emit reasoning_details immediately + events.push(Ok(LanguageModelCompletionEvent::ReasoningDetails( + details.clone(), + ))); + self.reasoning_details = Some(details); + } + if let Some(reasoning) = choice.delta.reasoning.clone() { events.push(Ok(LanguageModelCompletionEvent::Thinking { text: reasoning, @@ -608,6 +629,10 @@ impl OpenRouterEventMapper { if let Some(arguments) = function.arguments.clone() { entry.arguments.push_str(&arguments); } + + if let Some(signature) = function.thought_signature.clone() { + entry.thought_signature = Some(signature); + } } } } @@ -623,6 +648,7 @@ impl OpenRouterEventMapper { match choice.finish_reason.as_deref() { Some("stop") => { + // Don't emit reasoning_details here - already emitted immediately when captured events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn))); } Some("tool_calls") => { @@ -635,7 +661,7 @@ impl OpenRouterEventMapper { is_input_complete: true, input, raw_input: tool_call.arguments.clone(), - thought_signature: None, + thought_signature: tool_call.thought_signature.clone(), }, )), Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError { @@ -647,10 +673,12 @@ impl OpenRouterEventMapper { } })); + // Don't emit reasoning_details here - already emitted immediately when captured events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse))); } Some(stop_reason) => { log::error!("Unexpected OpenRouter stop_reason: {stop_reason:?}",); + // Don't emit reasoning_details here - already emitted immediately when captured events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn))); } None => {} @@ -665,6 +693,7 @@ struct RawToolCall { id: String, name: String, arguments: String, + thought_signature: Option, } pub fn count_open_router_tokens( @@ -832,3 +861,235 @@ impl Render for ConfigurationView { } } } + +#[cfg(test)] +mod tests { + use super::*; + + use open_router::{ChoiceDelta, FunctionChunk, ResponseMessageDelta, ToolCallChunk}; + + #[gpui::test] + async fn test_reasoning_details_preservation_with_tool_calls() { + // This test verifies that reasoning_details are properly captured and preserved + // when a model uses tool calling with reasoning/thinking tokens. + // + // The key regression this prevents: + // - OpenRouter sends multiple reasoning_details updates during streaming + // - First with actual content (encrypted reasoning data) + // - Then with empty array on completion + // - We must NOT overwrite the real data with the empty array + + let mut mapper = OpenRouterEventMapper::new(); + + // Simulate the streaming events as they come from OpenRouter/Gemini + let events = vec![ + // Event 1: Initial reasoning details with text + ResponseStreamEvent { + id: Some("response_123".into()), + created: 1234567890, + model: "google/gemini-3-pro-preview".into(), + choices: vec![ChoiceDelta { + index: 0, + delta: ResponseMessageDelta { + role: None, + content: None, + reasoning: None, + tool_calls: None, + reasoning_details: Some(serde_json::json!([ + { + "type": "reasoning.text", + "text": "Let me analyze this request...", + "format": "google-gemini-v1", + "index": 0 + } + ])), + }, + finish_reason: None, + }], + usage: None, + }, + // Event 2: More reasoning details + ResponseStreamEvent { + id: Some("response_123".into()), + created: 1234567890, + model: "google/gemini-3-pro-preview".into(), + choices: vec![ChoiceDelta { + index: 0, + delta: ResponseMessageDelta { + role: None, + content: None, + reasoning: None, + tool_calls: None, + reasoning_details: Some(serde_json::json!([ + { + "type": "reasoning.encrypted", + "data": "EtgDCtUDAdHtim9OF5jm4aeZSBAtl/randomized123", + "format": "google-gemini-v1", + "index": 0, + "id": "tool_call_abc123" + } + ])), + }, + finish_reason: None, + }], + usage: None, + }, + // Event 3: Tool call starts + ResponseStreamEvent { + id: Some("response_123".into()), + created: 1234567890, + model: "google/gemini-3-pro-preview".into(), + choices: vec![ChoiceDelta { + index: 0, + delta: ResponseMessageDelta { + role: None, + content: None, + reasoning: None, + tool_calls: Some(vec![ToolCallChunk { + index: 0, + id: Some("tool_call_abc123".into()), + function: Some(FunctionChunk { + name: Some("list_directory".into()), + arguments: Some("{\"path\":\"test\"}".into()), + thought_signature: Some("sha256:test_signature_xyz789".into()), + }), + }]), + reasoning_details: None, + }, + finish_reason: None, + }], + usage: None, + }, + // Event 4: Empty reasoning_details on tool_calls finish + // This is the critical event - we must not overwrite with this empty array! + ResponseStreamEvent { + id: Some("response_123".into()), + created: 1234567890, + model: "google/gemini-3-pro-preview".into(), + choices: vec![ChoiceDelta { + index: 0, + delta: ResponseMessageDelta { + role: None, + content: None, + reasoning: None, + tool_calls: None, + reasoning_details: Some(serde_json::json!([])), + }, + finish_reason: Some("tool_calls".into()), + }], + usage: None, + }, + ]; + + // Process all events + let mut collected_events = Vec::new(); + for event in events { + let mapped = mapper.map_event(event); + collected_events.extend(mapped); + } + + // Verify we got the expected events + let mut has_tool_use = false; + let mut reasoning_details_events = Vec::new(); + let mut thought_signature_value = None; + + for event_result in collected_events { + match event_result { + Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => { + has_tool_use = true; + assert_eq!(tool_use.id.to_string(), "tool_call_abc123"); + assert_eq!(tool_use.name.as_ref(), "list_directory"); + thought_signature_value = tool_use.thought_signature.clone(); + } + Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => { + reasoning_details_events.push(details); + } + _ => {} + } + } + + // Assertions + assert!(has_tool_use, "Should have emitted ToolUse event"); + assert!( + !reasoning_details_events.is_empty(), + "Should have emitted ReasoningDetails events" + ); + + // We should have received multiple reasoning_details events (text, encrypted, empty) + // The agent layer is responsible for keeping only the first non-empty one + assert!( + reasoning_details_events.len() >= 2, + "Should have multiple reasoning_details events from streaming" + ); + + // Verify at least one contains the encrypted data + let has_encrypted = reasoning_details_events.iter().any(|details| { + if let serde_json::Value::Array(arr) = details { + arr.iter().any(|item| { + item["type"] == "reasoning.encrypted" + && item["data"] + .as_str() + .map_or(false, |s| s.contains("EtgDCtUDAdHtim9OF5jm4aeZSBAtl")) + }) + } else { + false + } + }); + assert!( + has_encrypted, + "Should have at least one reasoning_details with encrypted data" + ); + + // Verify thought_signature was captured + assert!( + thought_signature_value.is_some(), + "Tool use should have thought_signature" + ); + assert_eq!( + thought_signature_value.unwrap(), + "sha256:test_signature_xyz789" + ); + } + + #[gpui::test] + async fn test_agent_prevents_empty_reasoning_details_overwrite() { + // This test verifies that the agent layer prevents empty reasoning_details + // from overwriting non-empty ones, even though the mapper emits all events. + + // Simulate what the agent does when it receives multiple ReasoningDetails events + let mut agent_reasoning_details: Option = None; + + let events = vec![ + // First event: non-empty reasoning_details + serde_json::json!([ + { + "type": "reasoning.encrypted", + "data": "real_data_here", + "format": "google-gemini-v1" + } + ]), + // Second event: empty array (should not overwrite) + serde_json::json!([]), + ]; + + for details in events { + // This mimics the agent's logic: only store if we don't already have it + if agent_reasoning_details.is_none() { + agent_reasoning_details = Some(details); + } + } + + // Verify the agent kept the first non-empty reasoning_details + assert!(agent_reasoning_details.is_some()); + let final_details = agent_reasoning_details.unwrap(); + if let serde_json::Value::Array(arr) = &final_details { + assert!( + !arr.is_empty(), + "Agent should have kept the non-empty reasoning_details" + ); + assert_eq!(arr[0]["data"], "real_data_here"); + } else { + panic!("Expected array"); + } + } +} diff --git a/crates/open_router/src/open_router.rs b/crates/open_router/src/open_router.rs index 0081c877756dab46433481ac58f2180877e7667f..57ff9558c261194136b84f0e96a4936a183a15b5 100644 --- a/crates/open_router/src/open_router.rs +++ b/crates/open_router/src/open_router.rs @@ -215,6 +215,8 @@ pub enum RequestMessage { content: Option, #[serde(default, skip_serializing_if = "Vec::is_empty")] tool_calls: Vec, + #[serde(default, skip_serializing_if = "Option::is_none")] + reasoning_details: Option, }, User { content: MessageContent, @@ -341,6 +343,8 @@ pub enum ToolCallContent { pub struct FunctionContent { pub name: String, pub arguments: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub thought_signature: Option, } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] @@ -350,6 +354,8 @@ pub struct ResponseMessageDelta { pub reasoning: Option, #[serde(default, skip_serializing_if = "is_none_or_empty")] pub tool_calls: Option>, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub reasoning_details: Option, } #[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] @@ -363,6 +369,8 @@ pub struct ToolCallChunk { pub struct FunctionChunk { pub name: Option, pub arguments: Option, + #[serde(default)] + pub thought_signature: Option, } #[derive(Serialize, Deserialize, Debug)] diff --git a/crates/rules_library/src/rules_library.rs b/crates/rules_library/src/rules_library.rs index b5b664f6e5c91e2a4f3760b3ad34c3b055bb2df7..09b7e0b539cde7371b97ef092fbd8f904b241c13 100644 --- a/crates/rules_library/src/rules_library.rs +++ b/crates/rules_library/src/rules_library.rs @@ -1072,6 +1072,7 @@ impl RulesLibrary { role: Role::System, content: vec![body.to_string().into()], cache: false, + reasoning_details: None, }], tools: Vec::new(), tool_choice: None, From 4329a817aa4426874dc6a52e4d1f5dd4111a8a8e Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Mon, 24 Nov 2025 15:31:13 -0300 Subject: [PATCH 0342/1030] ui: Update `ThreadItem` component design (#43421) Release Notes: - N/A --- .../ui/src/components/label/spinner_label.rs | 4 +- crates/ui/src/components/thread_item.rs | 38 ++++++++++++++----- 2 files changed, 30 insertions(+), 12 deletions(-) diff --git a/crates/ui/src/components/label/spinner_label.rs b/crates/ui/src/components/label/spinner_label.rs index de88e9bb7ab04a3d595183513c2b00da70e172aa..33eeeae125106cd8c8d2db64605b7017121c0394 100644 --- a/crates/ui/src/components/label/spinner_label.rs +++ b/crates/ui/src/components/label/spinner_label.rs @@ -77,7 +77,7 @@ impl SpinnerLabel { let duration = variant.duration(); SpinnerLabel { - base: Label::new(frames[0]), + base: Label::new(frames[0]).color(Color::Muted), variant, frames, duration, @@ -164,7 +164,7 @@ impl RenderOnce for SpinnerLabel { let frames = self.frames.clone(); let duration = self.duration; - self.base.color(Color::Muted).with_animation( + self.base.with_animation( self.variant.animation_id(), Animation::new(duration).repeat(), move |mut label, delta| { diff --git a/crates/ui/src/components/thread_item.rs b/crates/ui/src/components/thread_item.rs index dcf159f502e2d3c67576f9c6eefaff10585992eb..a4f6a8a53348d78563900c2a53b30e95588c2aac 100644 --- a/crates/ui/src/components/thread_item.rs +++ b/crates/ui/src/components/thread_item.rs @@ -1,4 +1,6 @@ -use crate::{Chip, DiffStat, Indicator, SpinnerLabel, prelude::*}; +use crate::{ + Chip, DecoratedIcon, DiffStat, IconDecoration, IconDecorationKind, SpinnerLabel, prelude::*, +}; use gpui::{ClickEvent, SharedString}; #[derive(IntoElement, RegisterComponent)] @@ -85,16 +87,29 @@ impl ThreadItem { impl RenderOnce for ThreadItem { fn render(self, _: &mut Window, cx: &mut App) -> impl IntoElement { let icon_container = || h_flex().size_4().justify_center(); + let agent_icon = Icon::new(self.icon) + .color(Color::Muted) + .size(IconSize::Small); + let icon = if self.generation_done { - icon_container().child(Indicator::dot().color(Color::Accent)) - } else if self.running { - icon_container().child(SpinnerLabel::new().color(Color::Accent)) - } else { - icon_container().child( - Icon::new(self.icon) - .color(Color::Muted) - .size(IconSize::Small), + DecoratedIcon::new( + agent_icon, + Some( + IconDecoration::new( + IconDecorationKind::Dot, + cx.theme().colors().surface_background, + cx, + ) + .color(cx.theme().colors().text_accent) + .position(gpui::Point { + x: px(-2.), + y: px(-2.), + }), + ), ) + .into_any_element() + } else { + agent_icon.into_any_element() }; let has_no_changes = self.added.is_none() && self.removed.is_none(); @@ -112,7 +127,10 @@ impl RenderOnce for ThreadItem { .w_full() .gap_1p5() .child(icon) - .child(Label::new(self.title).truncate()), + .child(Label::new(self.title).truncate()) + .when(self.running, |this| { + this.child(icon_container().child(SpinnerLabel::new().color(Color::Accent))) + }), ) .child( h_flex() From b577f8a5ea3f4fc439a57c03126b20479d58ff2e Mon Sep 17 00:00:00 2001 From: Yeoh Joer Date: Tue, 25 Nov 2025 03:08:45 +0800 Subject: [PATCH 0343/1030] Passthrough env to npm subcommands when using the system node runtime (#43102) Closes #39448 Closes #37866 This PR expands the env-clearing fix from #42587 to include the SystemNodeRuntime, which covers Node.js installations managed by Mise. When running under the system runtime, npm subcommands were still launched with a cleared environment, preventing variables such as MISE_DATA_DIR from reaching the shim or the mise binary itself. As a result, Mise finds the npm binary in the default MISE_DATA_DIR, consistent with the behavior described in https://github.com/zed-industries/zed/issues/39448#issuecomment-3433644569. This change ensures that environment variables are passed through for npm subcommands when using the system Node runtime, restoring expected behavior for Mise-managed Node installations. This also fixes cases where envs are used by npm itself. Release Notes: - Enable environment passthrough for npm subcommands --- crates/node_runtime/src/node_runtime.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/crates/node_runtime/src/node_runtime.rs b/crates/node_runtime/src/node_runtime.rs index 6e705289a3fd9e814574198004b8213e5147b7ca..1faf22dc9844f648fec53654ef3bde500cec32e2 100644 --- a/crates/node_runtime/src/node_runtime.rs +++ b/crates/node_runtime/src/node_runtime.rs @@ -414,7 +414,6 @@ impl ManagedNodeRuntime { let valid = if fs::metadata(&node_binary).await.is_ok() { let result = util::command::new_smol_command(&node_binary) - .env_clear() .env(NODE_CA_CERTS_ENV_VAR, node_ca_certs) .arg(npm_file) .arg("--version") @@ -701,7 +700,6 @@ impl NodeRuntimeTrait for SystemNodeRuntime { let mut command = util::command::new_smol_command(self.npm.clone()); let path = path_with_node_binary_prepended(&self.node).unwrap_or_default(); command - .env_clear() .env("PATH", path) .env(NODE_CA_CERTS_ENV_VAR, node_ca_certs) .arg(subcommand) From 092250b4fa99a0cfeb107df3f7dca8c8fd7f5d24 Mon Sep 17 00:00:00 2001 From: Kunall Banerjee <14703164+yeskunall@users.noreply.github.com> Date: Mon, 24 Nov 2025 14:12:54 -0500 Subject: [PATCH 0344/1030] Rework and consolidate issue templates (#43403) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We’re reworking our triage process and in doing so, reworking our issue templates is worth looking into. We have multiple issue templates, for arbitrary categories, and not enough enforcement. The plan is to consolidate the issue templates (maybe all into one) and drop the others. Release Notes: - N/A --- .github/ISSUE_TEMPLATE/01_bug_ai.yml | 59 ----------------- .github/ISSUE_TEMPLATE/04_bug_debugger.yml | 53 --------------- .github/ISSUE_TEMPLATE/06_bug_git.yml | 53 --------------- .github/ISSUE_TEMPLATE/07_bug_windows.yml | 53 --------------- .github/ISSUE_TEMPLATE/1.bug-report.yml | 70 ++++++++++++++++++++ .github/ISSUE_TEMPLATE/10_bug_report.yml | 75 ---------------------- .github/ISSUE_TEMPLATE/11_crash_report.yml | 50 --------------- .github/ISSUE_TEMPLATE/2.crash-report.yml | 52 +++++++++++++++ .github/ISSUE_TEMPLATE/config.yml | 10 +-- 9 files changed, 127 insertions(+), 348 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/01_bug_ai.yml delete mode 100644 .github/ISSUE_TEMPLATE/04_bug_debugger.yml delete mode 100644 .github/ISSUE_TEMPLATE/06_bug_git.yml delete mode 100644 .github/ISSUE_TEMPLATE/07_bug_windows.yml create mode 100644 .github/ISSUE_TEMPLATE/1.bug-report.yml delete mode 100644 .github/ISSUE_TEMPLATE/10_bug_report.yml delete mode 100644 .github/ISSUE_TEMPLATE/11_crash_report.yml create mode 100644 .github/ISSUE_TEMPLATE/2.crash-report.yml diff --git a/.github/ISSUE_TEMPLATE/01_bug_ai.yml b/.github/ISSUE_TEMPLATE/01_bug_ai.yml deleted file mode 100644 index 8d977f4f7ae603f0b2ed991ce6dabee25e045f06..0000000000000000000000000000000000000000 --- a/.github/ISSUE_TEMPLATE/01_bug_ai.yml +++ /dev/null @@ -1,59 +0,0 @@ -name: Bug Report (AI) -description: Zed Agent Panel Bugs -type: "Bug" -labels: ["area:ai"] -title: "AI:
" -body: - - type: textarea - attributes: - label: Summary - description: Describe the bug with a one line summary, and provide detailed reproduction steps - value: | - - SUMMARY_SENTENCE_HERE - - ### Description - - Steps to trigger the problem: - 1. - 2. - 3. - - **Expected Behavior**: - **Actual Behavior**: - - ### Model Provider Details - - Provider: (Anthropic via ZedPro, Anthropic via API key, Copilot Chat, Mistral, OpenAI, etc) - - Model Name: - - Mode: (Agent Panel, Inline Assistant, Terminal Assistant or Text Threads) - - Other Details (MCPs, other settings, etc): - validations: - required: true - - - type: textarea - id: environment - attributes: - label: Zed Version and System Specs - description: 'Open Zed, and in the command palette select "zed: copy system specs into clipboard"' - placeholder: | - Output of "zed: copy system specs into clipboard" - validations: - required: true - - - type: textarea - attributes: - label: If applicable, attach your `Zed.log` file to this issue. - description: | - From the command palette, run `zed: open log` to see the last 1000 lines. - Or run `zed: reveal log in file manager` to reveal the log file itself. - value: | -
Zed.log - - - ```log - - ``` - -
- validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/04_bug_debugger.yml b/.github/ISSUE_TEMPLATE/04_bug_debugger.yml deleted file mode 100644 index 3191227371a544b3ad6232c204b85afd5c6a238a..0000000000000000000000000000000000000000 --- a/.github/ISSUE_TEMPLATE/04_bug_debugger.yml +++ /dev/null @@ -1,53 +0,0 @@ -name: Bug Report (Debugger) -description: Zed Debugger-Related Bugs -type: "Bug" -labels: ["area:debugger"] -title: "Debugger:
" -body: - - type: textarea - attributes: - label: Summary - description: Describe the bug with a one line summary, and provide detailed reproduction steps - value: | - - SUMMARY_SENTENCE_HERE - - ### Description - - Steps to trigger the problem: - 1. - 2. - 3. - - **Expected Behavior**: - **Actual Behavior**: - - validations: - required: true - - type: textarea - id: environment - attributes: - label: Zed Version and System Specs - description: 'Open Zed, and in the command palette select "zed: copy system specs into clipboard"' - placeholder: | - Output of "zed: copy system specs into clipboard" - validations: - required: true - - - type: textarea - attributes: - label: If applicable, attach your `Zed.log` file to this issue. - description: | - From the command palette, run `zed: open log` to see the last 1000 lines. - Or run `zed: reveal log in file manager` to reveal the log file itself. - value: | -
Zed.log - - - ```log - - ``` - -
- validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/06_bug_git.yml b/.github/ISSUE_TEMPLATE/06_bug_git.yml deleted file mode 100644 index e0b6a6750879bf8de41a942e1057b9e4d5cdcc91..0000000000000000000000000000000000000000 --- a/.github/ISSUE_TEMPLATE/06_bug_git.yml +++ /dev/null @@ -1,53 +0,0 @@ -name: Bug Report (Git) -description: Zed Git Related Bugs -type: "Bug" -labels: ["area:integrations/git"] -title: "Git:
" -body: - - type: textarea - attributes: - label: Summary - description: Describe the bug with a one-line summary, and provide detailed reproduction steps - value: | - - SUMMARY_SENTENCE_HERE - - ### Description - - Steps to trigger the problem: - 1. - 2. - 3. - - **Expected Behavior**: - **Actual Behavior**: - - validations: - required: true - - type: textarea - id: environment - attributes: - label: Zed Version and System Specs - description: 'Open Zed, and in the command palette select "zed: copy system specs into clipboard"' - placeholder: | - Output of "zed: copy system specs into clipboard" - validations: - required: true - - - type: textarea - attributes: - label: If applicable, attach your `Zed.log` file to this issue. - description: | - From the command palette, run `zed: open log` to see the last 1000 lines. - Or run `zed: reveal log in file manager` to reveal the log file itself. - value: | -
Zed.log - - - ```log - - ``` - -
- validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/07_bug_windows.yml b/.github/ISSUE_TEMPLATE/07_bug_windows.yml deleted file mode 100644 index 7f48890eb961aa35dbae70cc22829ba6cb0e2a77..0000000000000000000000000000000000000000 --- a/.github/ISSUE_TEMPLATE/07_bug_windows.yml +++ /dev/null @@ -1,53 +0,0 @@ -name: Bug Report (Windows) -description: Zed Windows Related Bugs -type: "Bug" -labels: ["platform:windows"] -title: "Windows:
" -body: - - type: textarea - attributes: - label: Summary - description: Describe the bug with a one-line summary, and provide detailed reproduction steps - value: | - - SUMMARY_SENTENCE_HERE - - ### Description - - Steps to trigger the problem: - 1. - 2. - 3. - - **Expected Behavior**: - **Actual Behavior**: - - validations: - required: true - - type: textarea - id: environment - attributes: - label: Zed Version and System Specs - description: 'Open Zed, and in the command palette select "zed: copy system specs into clipboard"' - placeholder: | - Output of "zed: copy system specs into clipboard" - validations: - required: true - - - type: textarea - attributes: - label: If applicable, attach your `Zed.log` file to this issue. - description: | - From the command palette, run `zed: open log` to see the last 1000 lines. - Or run `zed: reveal log in file manager` to reveal the log file itself. - value: | -
Zed.log - - - ```log - - ``` - -
- validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/1.bug-report.yml b/.github/ISSUE_TEMPLATE/1.bug-report.yml new file mode 100644 index 0000000000000000000000000000000000000000..543c22117c2aa889b91fddd9eddd905c09dd0644 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/1.bug-report.yml @@ -0,0 +1,70 @@ +name: Report an issue +description: Report an issue with Zed. +type: Bug +body: + - type: markdown + attributes: + value: | + Feature requests should be opened in [discussions](https://github.com/zed-industries/zed/discussions/new/choose). + + Before opening a new issue, please do a [search](https://github.com/zed-industries/zed/issues) of existing issues and :+1: upvote the existing issue instead. This will help us maintain a proper signal-to-noise ratio. + + If you need help with your own project, you can ask a question in our [Discord Support Forums](https://discord.com/invite/zedindustries). + - type: textarea + attributes: + label: Reproduction steps + description: A step-by-step description of how to reproduce the issue from a **clean Zed install**. Any code must be sufficient to reproduce (make sure to include context!). Include code as text, not just as a screenshot. **Issues with insufficient detail may be summarily closed**. + placeholder: | + 1. Start Zed + 2. Click X + 3. Y will happen + validations: + required: true + - type: textarea + attributes: + label: Current vs. Expected behavior + description: | + A clear and concise description of what is the current behavior (screenshots, videos), vs. what you expected the behavior to be. + + **Skipping this/failure to provide complete information will result in the issue being closed.** + placeholder: "Based on my reproduction steps above, when I click X, I expect this to happen, but instead Y happens." + validations: + required: true + - type: textarea + attributes: + label: If applicable, attach your Zed log file to this issue. + description: | + Open the command palette in Zed, then type `zed: open log` to see the last 1000 lines. Or type `zed: reveal log in file manager` in the command palette to reveal the log file itself. + value: | +
Zed.log + + + ```log + + ``` + +
+ validations: + required: false + - type: textarea + attributes: + label: If applicable, provide details about your model provider + placeholder: | + - Provider: (Anthropic via ZedPro, Anthropic via API key, Copilot Chat, Mistral, OpenAI, etc.) + - Model Name: (Claude Sonnet 4.5, Gemini 3 Pro, GPT-5) + - Mode: (Agent Panel, Inline Assistant, Terminal Assistant or Text Threads) + - Other details (ACPs, MCPs, other settings, etc.): + validations: + required: false + - type: textarea + attributes: + label: Zed version and system specs + description: | + Open the command palette in Zed, then type “zed: copy system specs into clipboard”. **Skipping this/failure to provide complete information will result in the issue being closed**. + placeholder: | + Zed: v0.215.0 (Zed Nightly bfe141ea79aa4984028934067ba75c48d99136ae) + OS: macOS 15.1 + Memory: 36 GiB + Architecture: aarch64 + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/10_bug_report.yml b/.github/ISSUE_TEMPLATE/10_bug_report.yml deleted file mode 100644 index 9f069a7a355188f91d18fc528a69433f214f5167..0000000000000000000000000000000000000000 --- a/.github/ISSUE_TEMPLATE/10_bug_report.yml +++ /dev/null @@ -1,75 +0,0 @@ -name: Bug Report (Other) -description: | - Something else is broken in Zed (exclude crashing). -type: "Bug" -body: - - type: textarea - attributes: - label: Summary - description: Provide a one sentence summary and detailed reproduction steps - value: | - - SUMMARY_SENTENCE_HERE - - ### Description - - - DESCRIPTION_HERE - - Steps to reproduce: - 1. - 2. - 3. - 4. - - **Expected Behavior**: - **Actual Behavior**: - - - - validations: - required: true - - - type: textarea - id: environment - attributes: - label: Zed Version and System Specs - description: | - Open Zed, from the command palette select "zed: copy system specs into clipboard" - placeholder: | - Output of "zed: copy system specs into clipboard" - validations: - required: true - - type: textarea - attributes: - label: If applicable, attach your `Zed.log` file to this issue. - description: | - From the command palette, run `zed: open log` to see the last 1000 lines. - Or run `zed: reveal log in file manager` to reveal the log file itself. - value: | -
Zed.log - - - ```log - - ``` - -
- validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/11_crash_report.yml b/.github/ISSUE_TEMPLATE/11_crash_report.yml deleted file mode 100644 index 97979308ae5ab4037c32db2660544c1299f2c750..0000000000000000000000000000000000000000 --- a/.github/ISSUE_TEMPLATE/11_crash_report.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: Crash Report -description: Zed is Crashing or Hanging -type: "Crash" -body: - - type: textarea - attributes: - label: Summary - description: Summarize the issue with detailed reproduction steps - value: | - - SUMMARY_SENTENCE_HERE - - ### Description - - Steps to trigger the problem: - 1. - 2. - 3. - - Actual Behavior: - Expected Behavior: - - validations: - required: true - - type: textarea - id: environment - attributes: - label: Zed Version and System Specs - description: 'Open Zed, and in the command palette select "zed: copy system specs into clipboard"' - placeholder: | - Output of "zed: copy system specs into clipboard" - validations: - required: true - - type: textarea - attributes: - label: If applicable, attach your `Zed.log` file to this issue. - description: | - From the command palette, run `zed: open log` to see the last 1000 lines. - Or run `zed: reveal log in file manager` to reveal the log file itself. - value: | -
Zed.log - - - ```log - - ``` - -
- validations: - required: false diff --git a/.github/ISSUE_TEMPLATE/2.crash-report.yml b/.github/ISSUE_TEMPLATE/2.crash-report.yml new file mode 100644 index 0000000000000000000000000000000000000000..47cedcc3100854060c2cbde2147b754d89afef51 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/2.crash-report.yml @@ -0,0 +1,52 @@ +name: Report a crash +description: Zed is crashing or freezing or hanging. +type: Crash +body: + - type: textarea + attributes: + label: Reproduction steps + description: A step-by-step description of how to reproduce the crash from a **clean Zed install**. **Be verbose**. **Issues with insufficient detail may be summarily closed**. + placeholder: | + 1. Start Zed + 2. Perform an action + 3. Zed crashes + validations: + required: true + - type: textarea + attributes: + label: Current vs. Expected behavior + description: | + Go into depth about what actions you’re performing in Zed to trigger the crash. If Zed crashes before it loads any windows, make sure to mention that. Again, **be verbose**. + + **Skipping this/failure to provide complete information will result in the issue being closed.** + placeholder: "Based on my reproduction steps above, when I perform said action, I expect this to happen, but instead Zed crashes." + validations: + required: true + - type: textarea + attributes: + label: Zed version and system specs + description: | + Open the command palette in Zed, then type “zed: copy system specs into clipboard”. **Skipping this/failure to provide complete information will result in the issue being closed**. + placeholder: | + Zed: v0.215.0 (Zed Nightly bfe141ea79aa4984028934067ba75c48d99136ae) + OS: macOS 15.1 + Memory: 36 GiB + Architecture: aarch64 + validations: + required: true + - type: textarea + attributes: + label: If applicable, attach your Zed log file to this issue + description: | + Open the command palette in Zed, then type `zed: open log` to see the last 1000 lines. Or type `zed: reveal log in file manager` in the command palette to reveal the log file itself. + value: | +
Zed.log + + + ```log + + ``` + +
+ validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 8602daf90ddd0fe49d9db27c39f3d52d6e7da032..9bf14ce72d5feb5da9f04bb1064e7351407e6f55 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,9 +1,9 @@ # yaml-language-server: $schema=https://www.schemastore.org/github-issue-config.json blank_issues_enabled: false contact_links: - - name: Feature Request + - name: Feature request url: https://github.com/zed-industries/zed/discussions/new/choose - about: To request a feature, open a new Discussion in one of the appropriate Discussion categories - - name: "Zed Discord" - url: https://zed.dev/community-links - about: Real-time discussion and user support + about: To request a feature, open a new discussion under one of the appropriate categories. + - name: Our Discord community + url: https://discord.com/invite/zedindustries + about: Join our Discord server for real-time discussion and user support. From 7ce4f2ae622ab25ff1e83c1aed19ae9d45b7bee5 Mon Sep 17 00:00:00 2001 From: morgankrey Date: Mon, 24 Nov 2025 13:38:14 -0600 Subject: [PATCH 0345/1030] Opus 4.5 and Gemini 3 to docs (#43424) Add Opus 4.5 and Gemini 3 to docs Release Notes: - N/A --- docs/src/ai/models.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/src/ai/models.md b/docs/src/ai/models.md index 5b379fc75435c14ac46587f7449c7a5c54becfcf..6033bf23fad372b15909ff09a43f2747f3e619c0 100644 --- a/docs/src/ai/models.md +++ b/docs/src/ai/models.md @@ -5,6 +5,10 @@ We’re working hard to expand the models supported by Zed’s subscription offe | Model | Provider | Token Type | Provider Price per 1M tokens | Zed Price per 1M tokens | | ---------------------- | --------- | ------------------- | ---------------------------- | ----------------------- | +| Claude Opus 4.5 | Anthropic | Input | $5.00 | $5.50 | +| | Anthropic | Output | $25.00 | $27.50 | +| | Anthropic | Input - Cache Write | $6.25 | $6.875 | +| | Anthropic | Input - Cache Read | $0.50 | $0.55 | | Claude Opus 4.1 | Anthropic | Input | $15.00 | $16.50 | | | Anthropic | Output | $75.00 | $82.50 | | | Anthropic | Input - Cache Write | $18.75 | $20.625 | @@ -34,6 +38,8 @@ We’re working hard to expand the models supported by Zed’s subscription offe | GPT-5 nano | OpenAI | Input | $0.05 | $0.055 | | | OpenAI | Output | $0.40 | $0.44 | | | OpenAI | Cached Input | $0.005 | $0.0055 | +| Gemini 3.0 Pro | Google | Input | $2.00 | $2.20 | +| | Google | Output | $12.00 | $13.20 | | Gemini 2.5 Pro | Google | Input | $1.25 | $1.375 | | | Google | Output | $10.00 | $11.00 | | Gemini 2.5 Flash | Google | Input | $0.30 | $0.33 | @@ -63,6 +69,7 @@ A context window is the maximum span of text and code an LLM can consider at onc | Model | Provider | Zed-Hosted Context Window | | ----------------- | --------- | ------------------------- | +| Claude Opus 4.5 | Anthropic | 200k | | Claude Opus 4.1 | Anthropic | 200k | | Claude Sonnet 4 | Anthropic | 200k | | Claude Sonnet 3.7 | Anthropic | 200k | @@ -72,6 +79,7 @@ A context window is the maximum span of text and code an LLM can consider at onc | GPT-5 nano | OpenAI | 400k | | Gemini 2.5 Pro | Google | 200k | | Gemini 2.5 Flash | Google | 200k | +| Gemini 3.0 Pro | Google | 200k | > We're planning on expanding supported context windows for hosted Sonnet 4 and Gemini 2.5 Pro/Flash in the near future. Stay tuned! From d295ff4f04f0b38eca419c04bc3ac128474d92fc Mon Sep 17 00:00:00 2001 From: localcc Date: Mon, 24 Nov 2025 20:48:16 +0100 Subject: [PATCH 0346/1030] Improve Windows path canonicalization (#43423) Path canonicalization on windows will now favor keeping the drive letter intact when canonicalizing paths. This helps some lsps with mapped network drive compatibility. Closes #41336 Release Notes: - N/A --- crates/fs/src/fs.rs | 77 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 76 insertions(+), 1 deletion(-) diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 33cc83a7886349a537a87d4b6c8bb3f5211608fc..93192ecd2bd2449dafa622a69045be6811a43cf7 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -421,6 +421,75 @@ impl RealFs { job_event_subscribers: Arc::new(Mutex::new(Vec::new())), } } + + #[cfg(target_os = "windows")] + fn canonicalize(path: &Path) -> Result { + let mut strip_prefix = None; + + let mut new_path = PathBuf::new(); + for component in path.components() { + match component { + std::path::Component::Prefix(_) => { + let canonicalized = std::fs::canonicalize(component)?; + + let mut strip = PathBuf::new(); + for component in canonicalized.components() { + match component { + Component::Prefix(prefix_component) => { + match prefix_component.kind() { + std::path::Prefix::Verbatim(os_str) => { + strip.push(os_str); + } + std::path::Prefix::VerbatimUNC(host, share) => { + strip.push("\\\\"); + strip.push(host); + strip.push(share); + } + std::path::Prefix::VerbatimDisk(disk) => { + strip.push(format!("{}:", disk as char)); + } + _ => strip.push(component), + }; + } + _ => strip.push(component), + } + } + strip_prefix = Some(strip); + new_path.push(component); + } + std::path::Component::RootDir => { + new_path.push(component); + } + std::path::Component::CurDir => { + if strip_prefix.is_none() { + // unrooted path + new_path.push(component); + } + } + std::path::Component::ParentDir => { + if strip_prefix.is_some() { + // rooted path + new_path.pop(); + } else { + new_path.push(component); + } + } + std::path::Component::Normal(_) => { + if let Ok(link) = std::fs::read_link(new_path.join(component)) { + let link = match &strip_prefix { + Some(e) => link.strip_prefix(e).unwrap_or(&link), + None => &link, + }; + new_path.extend(link); + } else { + new_path.push(component); + } + } + } + } + + Ok(new_path) + } } #[async_trait::async_trait] @@ -749,7 +818,13 @@ impl Fs for RealFs { let path = path.to_owned(); self.executor .spawn(async move { - std::fs::canonicalize(&path).with_context(|| format!("canonicalizing {path:?}")) + #[cfg(target_os = "windows")] + let result = Self::canonicalize(&path); + + #[cfg(not(target_os = "windows"))] + let result = std::fs::canonicalize(&path); + + result.with_context(|| format!("canonicalizing {path:?}")) }) .await } From bd2c1027fac828b3b6cecb37a51bacc553f431b2 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 24 Nov 2025 12:01:43 -0800 Subject: [PATCH 0347/1030] Add support for Opus 4.5 (#43425) Adds support for Opus 4.5 - [x] BYOK - [x] Amazon Bedrock Release Notes: - Added support for Opus 4.5 Co-authored-by: Richard Feldman --- crates/anthropic/src/anthropic.rs | 30 +++++++++++++++ crates/bedrock/src/models.rs | 62 ++++++++++++++++++++++++++----- 2 files changed, 82 insertions(+), 10 deletions(-) diff --git a/crates/anthropic/src/anthropic.rs b/crates/anthropic/src/anthropic.rs index d4f89808379b0bf10c8f3eaa22484b61fd8c26f1..041401418c427251a944fc39bb8ac83a0e22bc13 100644 --- a/crates/anthropic/src/anthropic.rs +++ b/crates/anthropic/src/anthropic.rs @@ -67,6 +67,13 @@ pub enum Model { alias = "claude-opus-4-1-thinking-latest" )] ClaudeOpus4_1Thinking, + #[serde(rename = "claude-opus-4-5", alias = "claude-opus-4-5-latest")] + ClaudeOpus4_5, + #[serde( + rename = "claude-opus-4-5-thinking", + alias = "claude-opus-4-5-thinking-latest" + )] + ClaudeOpus4_5Thinking, #[serde(rename = "claude-sonnet-4", alias = "claude-sonnet-4-latest")] ClaudeSonnet4, #[serde( @@ -131,6 +138,14 @@ impl Model { } pub fn from_id(id: &str) -> Result { + if id.starts_with("claude-opus-4-5-thinking") { + return Ok(Self::ClaudeOpus4_5Thinking); + } + + if id.starts_with("claude-opus-4-5") { + return Ok(Self::ClaudeOpus4_5); + } + if id.starts_with("claude-opus-4-1-thinking") { return Ok(Self::ClaudeOpus4_1Thinking); } @@ -208,6 +223,8 @@ impl Model { Self::ClaudeOpus4_1 => "claude-opus-4-1-latest", Self::ClaudeOpus4Thinking => "claude-opus-4-thinking-latest", Self::ClaudeOpus4_1Thinking => "claude-opus-4-1-thinking-latest", + Self::ClaudeOpus4_5 => "claude-opus-4-5-latest", + Self::ClaudeOpus4_5Thinking => "claude-opus-4-5-thinking-latest", Self::ClaudeSonnet4 => "claude-sonnet-4-latest", Self::ClaudeSonnet4Thinking => "claude-sonnet-4-thinking-latest", Self::ClaudeSonnet4_5 => "claude-sonnet-4-5-latest", @@ -230,6 +247,7 @@ impl Model { match self { Self::ClaudeOpus4 | Self::ClaudeOpus4Thinking => "claude-opus-4-20250514", Self::ClaudeOpus4_1 | Self::ClaudeOpus4_1Thinking => "claude-opus-4-1-20250805", + Self::ClaudeOpus4_5 | Self::ClaudeOpus4_5Thinking => "claude-opus-4-5-20251101", Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking => "claude-sonnet-4-20250514", Self::ClaudeSonnet4_5 | Self::ClaudeSonnet4_5Thinking => "claude-sonnet-4-5-20250929", Self::Claude3_5Sonnet => "claude-3-5-sonnet-latest", @@ -249,6 +267,8 @@ impl Model { Self::ClaudeOpus4_1 => "Claude Opus 4.1", Self::ClaudeOpus4Thinking => "Claude Opus 4 Thinking", Self::ClaudeOpus4_1Thinking => "Claude Opus 4.1 Thinking", + Self::ClaudeOpus4_5 => "Claude Opus 4.5", + Self::ClaudeOpus4_5Thinking => "Claude Opus 4.5 Thinking", Self::ClaudeSonnet4 => "Claude Sonnet 4", Self::ClaudeSonnet4Thinking => "Claude Sonnet 4 Thinking", Self::ClaudeSonnet4_5 => "Claude Sonnet 4.5", @@ -274,6 +294,8 @@ impl Model { | Self::ClaudeOpus4_1 | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking | Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking | Self::ClaudeSonnet4_5 @@ -303,6 +325,8 @@ impl Model { | Self::ClaudeOpus4_1 | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking | Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking | Self::ClaudeSonnet4_5 @@ -326,6 +350,8 @@ impl Model { | Self::ClaudeOpus4_1 | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking | Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking | Self::ClaudeSonnet4_5 @@ -348,6 +374,8 @@ impl Model { | Self::ClaudeOpus4_1 | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking | Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking | Self::ClaudeSonnet4_5 @@ -372,6 +400,7 @@ impl Model { match self { Self::ClaudeOpus4 | Self::ClaudeOpus4_1 + | Self::ClaudeOpus4_5 | Self::ClaudeSonnet4 | Self::ClaudeSonnet4_5 | Self::Claude3_5Sonnet @@ -383,6 +412,7 @@ impl Model { | Self::Claude3Haiku => AnthropicModelMode::Default, Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5Thinking | Self::ClaudeSonnet4Thinking | Self::ClaudeSonnet4_5Thinking | Self::ClaudeHaiku4_5Thinking diff --git a/crates/bedrock/src/models.rs b/crates/bedrock/src/models.rs index 1691ffe199975983fbb40b781aac00a2703871ea..f3b276a8d2f30e8062931e76608bbc3a302ad734 100644 --- a/crates/bedrock/src/models.rs +++ b/crates/bedrock/src/models.rs @@ -51,6 +51,13 @@ pub enum Model { alias = "claude-opus-4-1-thinking-latest" )] ClaudeOpus4_1Thinking, + #[serde(rename = "claude-opus-4-5", alias = "claude-opus-4-5-latest")] + ClaudeOpus4_5, + #[serde( + rename = "claude-opus-4-5-thinking", + alias = "claude-opus-4-5-thinking-latest" + )] + ClaudeOpus4_5Thinking, #[serde(rename = "claude-3-5-sonnet-v2", alias = "claude-3-5-sonnet-latest")] Claude3_5SonnetV2, #[serde(rename = "claude-3-7-sonnet", alias = "claude-3-7-sonnet-latest")] @@ -141,7 +148,19 @@ impl Model { } pub fn from_id(id: &str) -> anyhow::Result { - if id.starts_with("claude-3-5-sonnet-v2") { + if id.starts_with("claude-opus-4-5-thinking") { + Ok(Self::ClaudeOpus4_5Thinking) + } else if id.starts_with("claude-opus-4-5") { + Ok(Self::ClaudeOpus4_5) + } else if id.starts_with("claude-opus-4-1-thinking") { + Ok(Self::ClaudeOpus4_1Thinking) + } else if id.starts_with("claude-opus-4-1") { + Ok(Self::ClaudeOpus4_1) + } else if id.starts_with("claude-opus-4-thinking") { + Ok(Self::ClaudeOpus4Thinking) + } else if id.starts_with("claude-opus-4") { + Ok(Self::ClaudeOpus4) + } else if id.starts_with("claude-3-5-sonnet-v2") { Ok(Self::Claude3_5SonnetV2) } else if id.starts_with("claude-3-opus") { Ok(Self::Claude3Opus) @@ -178,6 +197,8 @@ impl Model { Model::ClaudeOpus4_1 => "claude-opus-4-1", Model::ClaudeOpus4Thinking => "claude-opus-4-thinking", Model::ClaudeOpus4_1Thinking => "claude-opus-4-1-thinking", + Model::ClaudeOpus4_5 => "claude-opus-4-5", + Model::ClaudeOpus4_5Thinking => "claude-opus-4-5-thinking", Model::Claude3_5SonnetV2 => "claude-3-5-sonnet-v2", Model::Claude3_5Sonnet => "claude-3-5-sonnet", Model::Claude3Opus => "claude-3-opus", @@ -245,6 +266,9 @@ impl Model { Model::ClaudeOpus4_1 | Model::ClaudeOpus4_1Thinking => { "anthropic.claude-opus-4-1-20250805-v1:0" } + Model::ClaudeOpus4_5 | Model::ClaudeOpus4_5Thinking => { + "anthropic.claude-opus-4-5-20251101-v1:0" + } Model::Claude3_5SonnetV2 => "anthropic.claude-3-5-sonnet-20241022-v2:0", Model::Claude3_5Sonnet => "anthropic.claude-3-5-sonnet-20240620-v1:0", Model::Claude3Opus => "anthropic.claude-3-opus-20240229-v1:0", @@ -309,6 +333,8 @@ impl Model { Self::ClaudeOpus4_1 => "Claude Opus 4.1", Self::ClaudeOpus4Thinking => "Claude Opus 4 Thinking", Self::ClaudeOpus4_1Thinking => "Claude Opus 4.1 Thinking", + Self::ClaudeOpus4_5 => "Claude Opus 4.5", + Self::ClaudeOpus4_5Thinking => "Claude Opus 4.5 Thinking", Self::Claude3_5SonnetV2 => "Claude 3.5 Sonnet v2", Self::Claude3_5Sonnet => "Claude 3.5 Sonnet", Self::Claude3Opus => "Claude 3 Opus", @@ -379,7 +405,9 @@ impl Model { | Self::ClaudeSonnet4_5 | Self::ClaudeSonnet4_5Thinking | Self::ClaudeOpus4Thinking - | Self::ClaudeOpus4_1Thinking => 200_000, + | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking => 200_000, Self::AmazonNovaPremier => 1_000_000, Self::PalmyraWriterX5 => 1_000_000, Self::PalmyraWriterX4 => 128_000, @@ -393,7 +421,11 @@ impl Model { Self::Claude3Opus | Self::Claude3Sonnet | Self::Claude3_5Haiku => 4_096, Self::Claude3_7Sonnet | Self::Claude3_7SonnetThinking => 128_000, Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking => 64_000, - Self::ClaudeSonnet4_5 | Self::ClaudeSonnet4_5Thinking | Self::ClaudeHaiku4_5 => 64_000, + Self::ClaudeSonnet4_5 + | Self::ClaudeSonnet4_5Thinking + | Self::ClaudeHaiku4_5 + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking => 64_000, Self::ClaudeOpus4 | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1 @@ -418,6 +450,8 @@ impl Model { | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1 | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking | Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking | Self::ClaudeSonnet4_5 @@ -443,6 +477,8 @@ impl Model { | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1 | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking | Self::ClaudeSonnet4 | Self::ClaudeSonnet4Thinking | Self::ClaudeSonnet4_5 @@ -484,7 +520,9 @@ impl Model { | Self::ClaudeOpus4 | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1 - | Self::ClaudeOpus4_1Thinking => true, + | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking => true, // Custom models - check if they have cache configuration Self::Custom { @@ -506,7 +544,9 @@ impl Model { | Self::ClaudeOpus4 | Self::ClaudeOpus4Thinking | Self::ClaudeOpus4_1 - | Self::ClaudeOpus4_1Thinking => Some(BedrockModelCacheConfiguration { + | Self::ClaudeOpus4_1Thinking + | Self::ClaudeOpus4_5 + | Self::ClaudeOpus4_5Thinking => Some(BedrockModelCacheConfiguration { max_cache_anchors: 4, min_total_token: 1024, }), @@ -535,11 +575,11 @@ impl Model { budget_tokens: Some(4096), } } - Model::ClaudeOpus4Thinking | Model::ClaudeOpus4_1Thinking => { - BedrockModelMode::Thinking { - budget_tokens: Some(4096), - } - } + Model::ClaudeOpus4Thinking + | Model::ClaudeOpus4_1Thinking + | Model::ClaudeOpus4_5Thinking => BedrockModelMode::Thinking { + budget_tokens: Some(4096), + }, _ => BedrockModelMode::Default, } } @@ -593,6 +633,8 @@ impl Model { | Model::ClaudeOpus4Thinking | Model::ClaudeOpus4_1 | Model::ClaudeOpus4_1Thinking + | Model::ClaudeOpus4_5 + | Model::ClaudeOpus4_5Thinking | Model::Claude3Haiku | Model::Claude3Opus | Model::Claude3Sonnet From 342eba6f220625c015d00334c6bc354f0e2c52e1 Mon Sep 17 00:00:00 2001 From: Mayank Verma Date: Tue, 25 Nov 2025 02:46:35 +0530 Subject: [PATCH 0348/1030] project: Send LSP metadata to remote ServerInfo (#42831) Closes #39582 Release Notes: - Added LSP metadata to remote ServerInfo Here's the before/after: https://github.com/user-attachments/assets/1057faa5-82af-4975-abad-5e10e139fac1 --------- Co-authored-by: Kirill Bulatov --- crates/language_tools/src/lsp_log_view.rs | 82 +++++++++++++---------- crates/project/src/lsp_store.rs | 46 ++++++++++++- crates/project/src/project.rs | 45 ++++++++++--- crates/proto/proto/lsp.proto | 8 +++ 4 files changed, 136 insertions(+), 45 deletions(-) diff --git a/crates/language_tools/src/lsp_log_view.rs b/crates/language_tools/src/lsp_log_view.rs index 3f99a3e83413691c3893b184406f6e2569062623..c7aa78067294cbb63266e55a0d05d7abbeefddc2 100644 --- a/crates/language_tools/src/lsp_log_view.rs +++ b/crates/language_tools/src/lsp_log_view.rs @@ -7,12 +7,15 @@ use gpui::{ }; use language::{LanguageServerId, language_settings::SoftWrap}; use lsp::{ - LanguageServer, LanguageServerBinary, LanguageServerName, LanguageServerSelector, MessageType, - SetTraceParams, TraceValue, notification::SetTrace, + LanguageServer, LanguageServerName, LanguageServerSelector, MessageType, SetTraceParams, + TraceValue, notification::SetTrace, }; use project::{ - Project, - lsp_store::log_store::{self, Event, LanguageServerKind, LogKind, LogStore, Message}, + LanguageServerStatus, Project, + lsp_store::{ + LanguageServerBinaryInfo, + log_store::{self, Event, LanguageServerKind, LogKind, LogStore, Message}, + }, search::SearchQuery, }; use proto::toggle_lsp_logs::LogType; @@ -337,16 +340,28 @@ impl LspLogView { * Capabilities: {CAPABILITIES} * Configuration: {CONFIGURATION}", - NAME = info.name, + NAME = info.status.name, ID = info.id, - BINARY = info - .binary - .as_ref() - .map_or_else(|| "Unknown".to_string(), |binary| format!("{binary:#?}")), - WORKSPACE_FOLDERS = info.workspace_folders.join(", "), + BINARY = info.status.binary.as_ref().map_or_else( + || "Unknown".to_string(), + |binary| serde_json::to_string_pretty(binary) + .unwrap_or_else(|e| format!("Failed to serialize binary info: {e:#}")) + ), + WORKSPACE_FOLDERS = info + .status + .workspace_folders + .iter() + .filter_map(|uri| { + uri.to_file_path() + .ok() + .map(|path| path.to_string_lossy().into_owned()) + }) + .collect::>() + .join(", "), CAPABILITIES = serde_json::to_string_pretty(&info.capabilities) .unwrap_or_else(|e| format!("Failed to serialize capabilities: {e}")), CONFIGURATION = info + .status .configuration .map(|configuration| serde_json::to_string_pretty(&configuration)) .transpose() @@ -633,17 +648,12 @@ impl LspLogView { .or_else(move || { let capabilities = lsp_store.lsp_server_capabilities.get(&server_id)?.clone(); - let name = lsp_store - .language_server_statuses - .get(&server_id) - .map(|status| status.name.clone())?; + let status = lsp_store.language_server_statuses.get(&server_id)?.clone(); + Some(ServerInfo { id: server_id, capabilities, - binary: None, - name, - workspace_folders: Vec::new(), - configuration: None, + status, }) }) }) @@ -1314,10 +1324,7 @@ impl LspLogToolbarItemView { struct ServerInfo { id: LanguageServerId, capabilities: lsp::ServerCapabilities, - binary: Option, - name: LanguageServerName, - workspace_folders: Vec, - configuration: Option, + status: LanguageServerStatus, } impl ServerInfo { @@ -1325,18 +1332,25 @@ impl ServerInfo { Self { id: server.server_id(), capabilities: server.capabilities(), - binary: Some(server.binary().clone()), - name: server.name(), - workspace_folders: server - .workspace_folders() - .into_iter() - .filter_map(|path| { - path.to_file_path() - .ok() - .map(|path| path.to_string_lossy().into_owned()) - }) - .collect::>(), - configuration: Some(server.configuration().clone()), + status: LanguageServerStatus { + name: server.name(), + pending_work: Default::default(), + has_pending_diagnostic_updates: false, + progress_tokens: Default::default(), + worktree: None, + binary: Some(LanguageServerBinaryInfo { + path: server.binary().path.to_string_lossy().into_owned(), + arguments: server + .binary() + .arguments + .iter() + .map(|arg| arg.to_string_lossy().into_owned()) + .collect(), + env: server.binary().env.clone(), + }), + configuration: Some(server.configuration().clone()), + workspace_folders: server.workspace_folders(), + }, } } } diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index e233ff1c5c121e301a85d829093cbdd37020fe07..d18262215198b8a1d7da38a4a325b6f1dcb82084 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -93,6 +93,7 @@ use rpc::{ proto::{LspRequestId, LspRequestMessage as _}, }; use serde::Serialize; +use serde_json::Value; use settings::{Settings, SettingsLocation, SettingsStore}; use sha2::{Digest, Sha256}; use smol::channel::Sender; @@ -3557,6 +3558,21 @@ fn notify_server_capabilities_updated(server: &LanguageServer, cx: &mut Context< message: proto::update_language_server::Variant::MetadataUpdated( proto::ServerMetadataUpdated { capabilities: Some(capabilities), + binary: Some(proto::LanguageServerBinaryInfo { + path: server.binary().path.to_string_lossy().into_owned(), + arguments: server + .binary() + .arguments + .iter() + .map(|arg| arg.to_string_lossy().into_owned()) + .collect(), + }), + configuration: serde_json::to_string(server.configuration()).ok(), + workspace_folders: server + .workspace_folders() + .iter() + .map(|uri| uri.to_string()) + .collect(), }, ), }); @@ -3713,13 +3729,23 @@ pub enum LspStoreEvent { }, } +#[derive(Clone, Debug, Serialize)] +pub struct LanguageServerBinaryInfo { + pub path: String, + pub arguments: Vec, + pub env: Option>, +} + #[derive(Clone, Debug, Serialize)] pub struct LanguageServerStatus { pub name: LanguageServerName, pub pending_work: BTreeMap, pub has_pending_diagnostic_updates: bool, - progress_tokens: HashSet, + pub progress_tokens: HashSet, pub worktree: Option, + pub binary: Option, + pub configuration: Option, + pub workspace_folders: BTreeSet, } #[derive(Clone, Debug)] @@ -8130,6 +8156,9 @@ impl LspStore { has_pending_diagnostic_updates: false, progress_tokens: Default::default(), worktree, + binary: None, + configuration: None, + workspace_folders: BTreeSet::new(), }, ) }) @@ -9139,6 +9168,9 @@ impl LspStore { has_pending_diagnostic_updates: false, progress_tokens: Default::default(), worktree: server.worktree_id.map(WorktreeId::from_proto), + binary: None, + configuration: None, + workspace_folders: BTreeSet::new(), }, ); cx.emit(LspStoreEvent::LanguageServerAdded( @@ -11155,6 +11187,18 @@ impl LspStore { has_pending_diagnostic_updates: false, progress_tokens: Default::default(), worktree: Some(key.worktree_id), + binary: Some(LanguageServerBinaryInfo { + path: language_server.binary().path.to_string_lossy().into_owned(), + arguments: language_server + .binary() + .arguments + .iter() + .map(|arg| arg.to_string_lossy().into_owned()) + .collect(), + env: language_server.binary().env.clone(), + }), + configuration: Some(language_server.configuration().clone()), + workspace_folders: language_server.workspace_folders(), }, ); diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 3f325aba2b18efb4f36faef4e0a655f716a860bd..63041f8dfff3a432ed8e447ab0fcdb47f519e9e7 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -37,7 +37,7 @@ use dap::inline_value::{InlineValueLocation, VariableLookupKind, VariableScope}; use crate::{ git_store::GitStore, - lsp_store::{SymbolLocation, log_store::LogKind}, + lsp_store::{LanguageServerBinaryInfo, SymbolLocation, log_store::LogKind}, project_search::SearchResultsHandle, }; pub use agent_server_store::{AgentServerStore, AgentServersUpdated, ExternalAgentServerName}; @@ -114,7 +114,7 @@ use std::{ ops::{Not as _, Range}, path::{Path, PathBuf}, pin::pin, - str, + str::{self, FromStr}, sync::Arc, time::Duration, }; @@ -3111,17 +3111,42 @@ impl Project { match message { proto::update_language_server::Variant::MetadataUpdated(update) => { - if let Some(capabilities) = update - .capabilities - .as_ref() - .and_then(|capabilities| serde_json::from_str(capabilities).ok()) - { - self.lsp_store.update(cx, |lsp_store, _| { + self.lsp_store.update(cx, |lsp_store, _| { + if let Some(capabilities) = update + .capabilities + .as_ref() + .and_then(|capabilities| serde_json::from_str(capabilities).ok()) + { lsp_store .lsp_server_capabilities .insert(*language_server_id, capabilities); - }); - } + } + + if let Some(language_server_status) = lsp_store + .language_server_statuses + .get_mut(language_server_id) + { + if let Some(binary) = &update.binary { + language_server_status.binary = + Some(LanguageServerBinaryInfo { + path: binary.path.clone(), + arguments: binary.arguments.clone(), + env: None, + }); + } + + language_server_status.configuration = update + .configuration + .as_ref() + .and_then(|config_str| serde_json::from_str(config_str).ok()); + + language_server_status.workspace_folders = update + .workspace_folders + .iter() + .filter_map(|uri_str| lsp::Uri::from_str(uri_str).ok()) + .collect(); + } + }); } proto::update_language_server::Variant::RegisteredForBuffer(update) => { if let Some(buffer_id) = BufferId::new(update.buffer_id).ok() { diff --git a/crates/proto/proto/lsp.proto b/crates/proto/proto/lsp.proto index 3bdd46c4572acbc570c198288ba5c79b93aa4286..fa44528e2ed6009e6f18b6b5b9702b5228f10f05 100644 --- a/crates/proto/proto/lsp.proto +++ b/crates/proto/proto/lsp.proto @@ -615,8 +615,16 @@ message RegisteredForBuffer { uint64 buffer_id = 2; } +message LanguageServerBinaryInfo { + string path = 1; + repeated string arguments = 2; +} + message ServerMetadataUpdated { optional string capabilities = 1; + optional LanguageServerBinaryInfo binary = 2; + optional string configuration = 3; + repeated string workspace_folders = 4; } message LanguageServerLog { From 769464762a0233f0a60aa27ec899021588085e43 Mon Sep 17 00:00:00 2001 From: Lennart Date: Mon, 24 Nov 2025 22:31:20 +0100 Subject: [PATCH 0349/1030] vim: Fix cursor shape after deactivation (#42834) Update the `Vim.deactivate` method to ensure that the cursor shape is reset to the one available in the user's settings, in the `cursor_shape` setting, instead of simply defaulting to `CursorShape::Bar`. In order to test this behavior, the `Editor.cursor_shape` method was also introduced. Release Notes: - Fixed the cursor shape reset in vim mode deactivation, ensuring that the user's `cursor_shape` setting is used --------- Co-authored-by: dino --- crates/editor/src/editor.rs | 4 ++++ crates/vim/src/test.rs | 26 +++++++++++++++++++++++++- crates/vim/src/vim.rs | 7 ++++++- 3 files changed, 35 insertions(+), 2 deletions(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 7a3c9b8594596152800442193e9364dc1a2c8aba..adb24900bf144b9cdedfb432e296a9a9e27a51c7 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -3025,6 +3025,10 @@ impl Editor { cx.notify(); } + pub fn cursor_shape(&self) -> CursorShape { + self.cursor_shape + } + pub fn set_current_line_highlight( &mut self, current_line_highlight: Option, diff --git a/crates/vim/src/test.rs b/crates/vim/src/test.rs index 5a98ec47b122e0d1ed7fd1edfc7c5e2265c40d90..5932a740945becae9d15025d358a52d5a4e279dd 100644 --- a/crates/vim/src/test.rs +++ b/crates/vim/src/test.rs @@ -16,7 +16,7 @@ use editor::{ use futures::StreamExt; use gpui::{KeyBinding, Modifiers, MouseButton, TestAppContext, px}; use itertools::Itertools; -use language::{Language, LanguageConfig, Point}; +use language::{CursorShape, Language, LanguageConfig, Point}; pub use neovim_backed_test_context::*; use settings::SettingsStore; use ui::Pixels; @@ -2404,3 +2404,27 @@ async fn test_repeat_grouping_41735(cx: &mut gpui::TestAppContext) { cx.simulate_shared_keystrokes("u").await; cx.shared_state().await.assert_eq("ˇaaa"); } + +#[gpui::test] +async fn test_deactivate(cx: &mut gpui::TestAppContext) { + let mut cx = VimTestContext::new(cx, true).await; + + cx.update_global(|store: &mut SettingsStore, cx| { + store.update_user_settings(cx, |settings| { + settings.editor.cursor_shape = Some(settings::CursorShape::Underline); + }); + }); + + // Assert that, while in `Normal` mode, the cursor shape is `Block` but, + // after deactivating vim mode, it should revert to the one specified in the + // user's settings, if set. + cx.update_editor(|editor, _window, _cx| { + assert_eq!(editor.cursor_shape(), CursorShape::Block); + }); + + cx.disable_vim(); + + cx.update_editor(|editor, _window, _cx| { + assert_eq!(editor.cursor_shape(), CursorShape::Underline); + }); +} diff --git a/crates/vim/src/vim.rs b/crates/vim/src/vim.rs index f87c562c8a0821f5dfea66dd33b1c44ca6021f42..1ffcf7e2224341affc7498032fd5a181e256943d 100644 --- a/crates/vim/src/vim.rs +++ b/crates/vim/src/vim.rs @@ -954,7 +954,12 @@ impl Vim { } fn deactivate(editor: &mut Editor, cx: &mut Context) { - editor.set_cursor_shape(CursorShape::Bar, cx); + editor.set_cursor_shape( + EditorSettings::get_global(cx) + .cursor_shape + .unwrap_or_default(), + cx, + ); editor.set_clip_at_line_ends(false, cx); editor.set_collapse_matches(false); editor.set_input_enabled(true); From 9e69ac889c7fac29a88f5e213826ce3be4a2895b Mon Sep 17 00:00:00 2001 From: Mayank Verma Date: Tue, 25 Nov 2025 03:15:12 +0530 Subject: [PATCH 0350/1030] editor: Fix copy file actions not working in remote environments (#43362) Closes #42500 Release Notes: - Fixed all three editor actions not working in remote environments - `editor: copy file name` - `editor: copy file location` - `editor: copy file name without extension` Here's the before/after: https://github.com/user-attachments/assets/bfb03e99-2e1a-47a2-bd26-280180154fe3 --- crates/collab/src/tests/editor_tests.rs | 287 +++++++++++++++++++++++- crates/editor/src/editor.rs | 27 ++- 2 files changed, 302 insertions(+), 12 deletions(-) diff --git a/crates/collab/src/tests/editor_tests.rs b/crates/collab/src/tests/editor_tests.rs index 33f07bfb388763875565bc9e37bda363f02600f0..fe20ab935c9fb2ffd2c18962953f9d62ca06fb16 100644 --- a/crates/collab/src/tests/editor_tests.rs +++ b/crates/collab/src/tests/editor_tests.rs @@ -7,8 +7,9 @@ use editor::{ DocumentColorsRenderMode, Editor, FETCH_COLORS_DEBOUNCE_TIMEOUT, MultiBufferOffset, RowInfo, SelectionEffects, actions::{ - ConfirmCodeAction, ConfirmCompletion, ConfirmRename, ContextMenuFirst, - ExpandMacroRecursively, MoveToEnd, Redo, Rename, SelectAll, ToggleCodeActions, Undo, + ConfirmCodeAction, ConfirmCompletion, ConfirmRename, ContextMenuFirst, CopyFileLocation, + CopyFileName, CopyFileNameWithoutExtension, ExpandMacroRecursively, MoveToEnd, Redo, + Rename, SelectAll, ToggleCodeActions, Undo, }, test::{ editor_test_context::{AssertionContextManager, EditorTestContext}, @@ -4269,6 +4270,288 @@ async fn test_client_can_query_lsp_ext(cx_a: &mut TestAppContext, cx_b: &mut Tes }); } +#[gpui::test] +async fn test_copy_file_name_without_extension( + cx_a: &mut TestAppContext, + cx_b: &mut TestAppContext, +) { + let mut server = TestServer::start(cx_a.executor()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + server + .create_room(&mut [(&client_a, cx_a), (&client_b, cx_b)]) + .await; + + cx_b.update(editor::init); + + client_a + .fs() + .insert_tree( + path!("/root"), + json!({ + "src": { + "main.rs": indoc! {" + fn main() { + println!(\"Hello, world!\"); + } + "}, + } + }), + ) + .await; + + let (project_a, worktree_id) = client_a.build_local_project(path!("/root"), cx_a).await; + let active_call_a = cx_a.read(ActiveCall::global); + let project_id = active_call_a + .update(cx_a, |call, cx| call.share_project(project_a.clone(), cx)) + .await + .unwrap(); + + let project_b = client_b.join_remote_project(project_id, cx_b).await; + + let (workspace_a, cx_a) = client_a.build_workspace(&project_a, cx_a); + let (workspace_b, cx_b) = client_b.build_workspace(&project_b, cx_b); + + let editor_a = workspace_a + .update_in(cx_a, |workspace, window, cx| { + workspace.open_path( + (worktree_id, rel_path("src/main.rs")), + None, + true, + window, + cx, + ) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + let editor_b = workspace_b + .update_in(cx_b, |workspace, window, cx| { + workspace.open_path( + (worktree_id, rel_path("src/main.rs")), + None, + true, + window, + cx, + ) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + cx_a.run_until_parked(); + cx_b.run_until_parked(); + + editor_a.update_in(cx_a, |editor, window, cx| { + editor.copy_file_name_without_extension(&CopyFileNameWithoutExtension, window, cx); + }); + + assert_eq!( + cx_a.read_from_clipboard().and_then(|item| item.text()), + Some("main".to_string()) + ); + + editor_b.update_in(cx_b, |editor, window, cx| { + editor.copy_file_name_without_extension(&CopyFileNameWithoutExtension, window, cx); + }); + + assert_eq!( + cx_b.read_from_clipboard().and_then(|item| item.text()), + Some("main".to_string()) + ); +} + +#[gpui::test] +async fn test_copy_file_name(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + let mut server = TestServer::start(cx_a.executor()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + server + .create_room(&mut [(&client_a, cx_a), (&client_b, cx_b)]) + .await; + + cx_b.update(editor::init); + + client_a + .fs() + .insert_tree( + path!("/root"), + json!({ + "src": { + "main.rs": indoc! {" + fn main() { + println!(\"Hello, world!\"); + } + "}, + } + }), + ) + .await; + + let (project_a, worktree_id) = client_a.build_local_project(path!("/root"), cx_a).await; + let active_call_a = cx_a.read(ActiveCall::global); + let project_id = active_call_a + .update(cx_a, |call, cx| call.share_project(project_a.clone(), cx)) + .await + .unwrap(); + + let project_b = client_b.join_remote_project(project_id, cx_b).await; + + let (workspace_a, cx_a) = client_a.build_workspace(&project_a, cx_a); + let (workspace_b, cx_b) = client_b.build_workspace(&project_b, cx_b); + + let editor_a = workspace_a + .update_in(cx_a, |workspace, window, cx| { + workspace.open_path( + (worktree_id, rel_path("src/main.rs")), + None, + true, + window, + cx, + ) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + let editor_b = workspace_b + .update_in(cx_b, |workspace, window, cx| { + workspace.open_path( + (worktree_id, rel_path("src/main.rs")), + None, + true, + window, + cx, + ) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + cx_a.run_until_parked(); + cx_b.run_until_parked(); + + editor_a.update_in(cx_a, |editor, window, cx| { + editor.copy_file_name(&CopyFileName, window, cx); + }); + + assert_eq!( + cx_a.read_from_clipboard().and_then(|item| item.text()), + Some("main.rs".to_string()) + ); + + editor_b.update_in(cx_b, |editor, window, cx| { + editor.copy_file_name(&CopyFileName, window, cx); + }); + + assert_eq!( + cx_b.read_from_clipboard().and_then(|item| item.text()), + Some("main.rs".to_string()) + ); +} + +#[gpui::test] +async fn test_copy_file_location(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { + let mut server = TestServer::start(cx_a.executor()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + server + .create_room(&mut [(&client_a, cx_a), (&client_b, cx_b)]) + .await; + + cx_b.update(editor::init); + + client_a + .fs() + .insert_tree( + path!("/root"), + json!({ + "src": { + "main.rs": indoc! {" + fn main() { + println!(\"Hello, world!\"); + } + "}, + } + }), + ) + .await; + + let (project_a, worktree_id) = client_a.build_local_project(path!("/root"), cx_a).await; + let active_call_a = cx_a.read(ActiveCall::global); + let project_id = active_call_a + .update(cx_a, |call, cx| call.share_project(project_a.clone(), cx)) + .await + .unwrap(); + + let project_b = client_b.join_remote_project(project_id, cx_b).await; + + let (workspace_a, cx_a) = client_a.build_workspace(&project_a, cx_a); + let (workspace_b, cx_b) = client_b.build_workspace(&project_b, cx_b); + + let editor_a = workspace_a + .update_in(cx_a, |workspace, window, cx| { + workspace.open_path( + (worktree_id, rel_path("src/main.rs")), + None, + true, + window, + cx, + ) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + let editor_b = workspace_b + .update_in(cx_b, |workspace, window, cx| { + workspace.open_path( + (worktree_id, rel_path("src/main.rs")), + None, + true, + window, + cx, + ) + }) + .await + .unwrap() + .downcast::() + .unwrap(); + + cx_a.run_until_parked(); + cx_b.run_until_parked(); + + editor_a.update_in(cx_a, |editor, window, cx| { + editor.change_selections(Default::default(), window, cx, |s| { + s.select_ranges([MultiBufferOffset(16)..MultiBufferOffset(16)]); + }); + editor.copy_file_location(&CopyFileLocation, window, cx); + }); + + assert_eq!( + cx_a.read_from_clipboard().and_then(|item| item.text()), + Some(format!("{}:2", path!("src/main.rs"))) + ); + + editor_b.update_in(cx_b, |editor, window, cx| { + editor.change_selections(Default::default(), window, cx, |s| { + s.select_ranges([MultiBufferOffset(16)..MultiBufferOffset(16)]); + }); + editor.copy_file_location(&CopyFileLocation, window, cx); + }); + + assert_eq!( + cx_b.read_from_clipboard().and_then(|item| item.text()), + Some(format!("{}:2", path!("src/main.rs"))) + ); +} + #[track_caller] fn tab_undo_assert( cx_a: &mut EditorTestContext, diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index adb24900bf144b9cdedfb432e296a9a9e27a51c7..08627f1bd64be6e62581014628c57306df43623e 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -20234,18 +20234,20 @@ impl Editor { _: &mut Window, cx: &mut Context, ) { - if let Some(file) = self.target_file(cx) - && let Some(file_stem) = file.path().file_stem() - { + if let Some(file_stem) = self.active_excerpt(cx).and_then(|(_, buffer, _)| { + let file = buffer.read(cx).file()?; + file.path().file_stem() + }) { cx.write_to_clipboard(ClipboardItem::new_string(file_stem.to_string())); } } pub fn copy_file_name(&mut self, _: &CopyFileName, _: &mut Window, cx: &mut Context) { - if let Some(file) = self.target_file(cx) - && let Some(name) = file.path().file_name() - { - cx.write_to_clipboard(ClipboardItem::new_string(name.to_string())); + if let Some(file_name) = self.active_excerpt(cx).and_then(|(_, buffer, _)| { + let file = buffer.read(cx).file()?; + Some(file.file_name(cx)) + }) { + cx.write_to_clipboard(ClipboardItem::new_string(file_name.to_string())); } } @@ -20519,9 +20521,14 @@ impl Editor { .start .row + 1; - if let Some(file) = self.target_file(cx) { - let path = file.path().display(file.path_style(cx)); - cx.write_to_clipboard(ClipboardItem::new_string(format!("{path}:{selection}"))); + if let Some(file_location) = self.active_excerpt(cx).and_then(|(_, buffer, _)| { + let project = self.project()?.read(cx); + let file = buffer.read(cx).file()?; + let path = file.path().display(project.path_style(cx)); + + Some(format!("{path}:{selection}")) + }) { + cx.write_to_clipboard(ClipboardItem::new_string(file_location)); } } From f75e7582e68236629b44999b3031fee3b0d991f4 Mon Sep 17 00:00:00 2001 From: Julia Ryan Date: Mon, 24 Nov 2025 13:46:13 -0800 Subject: [PATCH 0351/1030] Fix zed cli in NixOS WSL instances (#43433) This fixes running `zed ` inside nixos wsl instances. We're copying the approach used elsewhere which is to try using `--exec` first, and if that fails use an actual shell which should cover the nixos case because it only puts binaries on your PATH inside the `/etc/profile` script which is sourced on shell startup. Release Notes: - N/A --------- Co-authored-by: John Tur --- crates/cli/src/main.rs | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/crates/cli/src/main.rs b/crates/cli/src/main.rs index 7dd8a3253c9a0c8440d9342e5c0b3fd19e7f9828..7988f001dab37858d36f791fa8a184fe329c4be5 100644 --- a/crates/cli/src/main.rs +++ b/crates/cli/src/main.rs @@ -12,7 +12,9 @@ use clap::Parser; use cli::{CliRequest, CliResponse, IpcHandshake, ipc::IpcOneShotServer}; use parking_lot::Mutex; use std::{ - env, fs, io, + env, + ffi::OsStr, + fs, io, path::{Path, PathBuf}, process::ExitStatus, sync::Arc, @@ -300,7 +302,6 @@ mod tests { fn parse_path_in_wsl(source: &str, wsl: &str) -> Result { let mut source = PathWithPosition::parse_str(source); - let mut command = util::command::new_std_command("wsl.exe"); let (user, distro_name) = if let Some((user, distro)) = wsl.split_once('@') { if user.is_empty() { @@ -311,20 +312,34 @@ fn parse_path_in_wsl(source: &str, wsl: &str) -> Result { (None, wsl) }; + let mut args = vec!["--distribution", distro_name]; if let Some(user) = user { - command.arg("--user").arg(user); + args.push("--user"); + args.push(user); } - let output = command - .arg("--distribution") - .arg(distro_name) + let command = [ + OsStr::new("realpath"), + OsStr::new("-s"), + source.path.as_ref(), + ]; + + let output = util::command::new_std_command("wsl.exe") + .args(&args) .arg("--exec") - .arg("realpath") - .arg("-s") - .arg(&source.path) + .args(&command) .output()?; + let result = if output.status.success() { + String::from_utf8_lossy(&output.stdout).to_string() + } else { + let fallback = util::command::new_std_command("wsl.exe") + .args(&args) + .arg("--") + .args(&command) + .output()?; + String::from_utf8_lossy(&fallback.stdout).to_string() + }; - let result = String::from_utf8_lossy(&output.stdout); source.path = Path::new(result.trim()).to_owned(); Ok(source.to_string(|path| path.to_string_lossy().into_owned())) From e499f157dda2719fe0d19f921e0eafadaca7d4a7 Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Mon, 24 Nov 2025 23:46:55 +0200 Subject: [PATCH 0352/1030] Keep single default PHP language server (#43432) https://github.com/zed-extensions/php/blob/9a119b18eeb247072964a19ce46fab54bbd1bb30/extension.toml provides 3 language servers for `php`, so `...` will always include all 3 if those are not excluded or included explicitly. Change the configs and docs so, that only one php language server is used. Release Notes: - N/A --- assets/settings/default.json | 4 ++-- docs/src/configuring-languages.md | 2 +- docs/src/languages/php.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/assets/settings/default.json b/assets/settings/default.json index c8ffd31617df7d057e89329c2db70c6b6aa21e95..a222a16cb290eae905a69b492e9de8d3a1493592 100644 --- a/assets/settings/default.json +++ b/assets/settings/default.json @@ -1889,7 +1889,7 @@ } }, "PHP": { - "language_servers": ["phpactor", "!intelephense", "..."], + "language_servers": ["phpactor", "!intelephense", "!phptools", "..."], "prettier": { "allowed": true, "plugins": ["@prettier/plugin-php"], @@ -2138,7 +2138,7 @@ "windows": { "languages": { "PHP": { - "language_servers": ["intelephense", "!phpactor", "..."] + "language_servers": ["intelephense", "!phpactor", "!phptools", "..."] } } }, diff --git a/docs/src/configuring-languages.md b/docs/src/configuring-languages.md index 7b3456986e2766d134f3c1f15f94632feb067fb0..e478fab075acec67967a6c44cc5966e632aa1110 100644 --- a/docs/src/configuring-languages.md +++ b/docs/src/configuring-languages.md @@ -123,7 +123,7 @@ You can specify your preference using the `language_servers` setting: ```json [settings] "languages": { "PHP": { - "language_servers": ["intelephense", "!phpactor", "..."] + "language_servers": ["intelephense", "!phpactor", "!phptools", "..."] } } ``` diff --git a/docs/src/languages/php.md b/docs/src/languages/php.md index 1d7de27c5480421e2bc4d1f150a0b6d04a5ee49c..73d5ecbf37eae6ab9b7e710c132025d217fe57bd 100644 --- a/docs/src/languages/php.md +++ b/docs/src/languages/php.md @@ -35,7 +35,7 @@ To switch to `intelephense`, add the following to your `settings.json`: { "languages": { "PHP": { - "language_servers": ["intelephense", "!phpactor", "..."] + "language_servers": ["intelephense", "!phpactor", "!phptools", "..."] } } } From 8fd2e2164c9a181fff090792ecd088a486aacf26 Mon Sep 17 00:00:00 2001 From: Julia Ryan Date: Mon, 24 Nov 2025 13:54:18 -0800 Subject: [PATCH 0353/1030] Fix remote project snippet duplication (#43429) Closes #43311 Release Notes: - N/A --------- Co-authored-by: John Tur --- crates/project/src/project.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 63041f8dfff3a432ed8e447ab0fcdb47f519e9e7..8875b3bb6facfb6ce268a38a54585497c8b198cd 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -1244,9 +1244,7 @@ impl Project { let (tx, rx) = mpsc::unbounded(); cx.spawn(async move |this, cx| Self::send_buffer_ordered_messages(this, rx, cx).await) .detach(); - let global_snippets_dir = paths::snippets_dir().to_owned(); - let snippets = - SnippetProvider::new(fs.clone(), BTreeSet::from_iter([global_snippets_dir]), cx); + let snippets = SnippetProvider::new(fs.clone(), BTreeSet::from_iter([]), cx); let (remote_proto, path_style) = remote.read_with(cx, |remote, _| (remote.proto_client(), remote.path_style())); From 17d7988ad482b8aec7ea9ae2560f055e3a8ee26f Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Tue, 25 Nov 2025 00:05:16 +0200 Subject: [PATCH 0354/1030] Redact environment variables in server info view (#43436) Follow-up of https://github.com/zed-industries/zed/pull/42831 Release Notes: - N/A --- crates/language_tools/src/lsp_log_view.rs | 27 +++++------------------ crates/lsp/src/lsp.rs | 2 +- crates/project/src/lsp_store.rs | 20 ++--------------- crates/project/src/project.rs | 21 +++++++++++------- 4 files changed, 22 insertions(+), 48 deletions(-) diff --git a/crates/language_tools/src/lsp_log_view.rs b/crates/language_tools/src/lsp_log_view.rs index c7aa78067294cbb63266e55a0d05d7abbeefddc2..e7586583704750b0c84832ecb8cb9ba8d5a9b5a1 100644 --- a/crates/language_tools/src/lsp_log_view.rs +++ b/crates/language_tools/src/lsp_log_view.rs @@ -5,6 +5,7 @@ use gpui::{ App, Context, Corner, Entity, EventEmitter, FocusHandle, Focusable, IntoElement, ParentElement, Render, Styled, Subscription, Task, WeakEntity, Window, actions, div, }; +use itertools::Itertools as _; use language::{LanguageServerId, language_settings::SoftWrap}; use lsp::{ LanguageServer, LanguageServerName, LanguageServerSelector, MessageType, SetTraceParams, @@ -12,10 +13,7 @@ use lsp::{ }; use project::{ LanguageServerStatus, Project, - lsp_store::{ - LanguageServerBinaryInfo, - log_store::{self, Event, LanguageServerKind, LogKind, LogStore, Message}, - }, + lsp_store::log_store::{self, Event, LanguageServerKind, LogKind, LogStore, Message}, search::SearchQuery, }; use proto::toggle_lsp_logs::LogType; @@ -351,12 +349,8 @@ impl LspLogView { .status .workspace_folders .iter() - .filter_map(|uri| { - uri.to_file_path() - .ok() - .map(|path| path.to_string_lossy().into_owned()) - }) - .collect::>() + .filter_map(|uri| uri.to_file_path().ok()) + .map(|path| path.to_string_lossy().into_owned()) .join(", "), CAPABILITIES = serde_json::to_string_pretty(&info.capabilities) .unwrap_or_else(|e| format!("Failed to serialize capabilities: {e}")), @@ -968,7 +962,7 @@ impl Render for LspLogToolbarItemView { for (server_id, name, worktree_root, active_entry_kind) in available_language_servers.iter() { - let label = format!("{} ({})", name, worktree_root); + let label = format!("{name} ({worktree_root})"); let server_id = *server_id; let active_entry_kind = *active_entry_kind; menu = menu.entry( @@ -1338,16 +1332,7 @@ impl ServerInfo { has_pending_diagnostic_updates: false, progress_tokens: Default::default(), worktree: None, - binary: Some(LanguageServerBinaryInfo { - path: server.binary().path.to_string_lossy().into_owned(), - arguments: server - .binary() - .arguments - .iter() - .map(|arg| arg.to_string_lossy().into_owned()) - .collect(), - env: server.binary().env.clone(), - }), + binary: Some(server.binary().clone()), configuration: Some(server.configuration().clone()), workspace_folders: server.workspace_folders(), }, diff --git a/crates/lsp/src/lsp.rs b/crates/lsp/src/lsp.rs index 05771b8ce5db870a41228f81e4aac8222b11ad53..1bc635dcbeca2d38506640b86e547ce90ec76d3d 100644 --- a/crates/lsp/src/lsp.rs +++ b/crates/lsp/src/lsp.rs @@ -62,7 +62,7 @@ pub enum IoKind { /// Represents a launchable language server. This can either be a standalone binary or the path /// to a runtime with arguments to instruct it to launch the actual language server file. -#[derive(Clone)] +#[derive(Clone, Serialize)] pub struct LanguageServerBinary { pub path: PathBuf, pub arguments: Vec, diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index d18262215198b8a1d7da38a4a325b6f1dcb82084..349bfa9ed00223ea71d4d77dd32bdf433c39c784 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -3729,13 +3729,6 @@ pub enum LspStoreEvent { }, } -#[derive(Clone, Debug, Serialize)] -pub struct LanguageServerBinaryInfo { - pub path: String, - pub arguments: Vec, - pub env: Option>, -} - #[derive(Clone, Debug, Serialize)] pub struct LanguageServerStatus { pub name: LanguageServerName, @@ -3743,7 +3736,7 @@ pub struct LanguageServerStatus { pub has_pending_diagnostic_updates: bool, pub progress_tokens: HashSet, pub worktree: Option, - pub binary: Option, + pub binary: Option, pub configuration: Option, pub workspace_folders: BTreeSet, } @@ -11187,16 +11180,7 @@ impl LspStore { has_pending_diagnostic_updates: false, progress_tokens: Default::default(), worktree: Some(key.worktree_id), - binary: Some(LanguageServerBinaryInfo { - path: language_server.binary().path.to_string_lossy().into_owned(), - arguments: language_server - .binary() - .arguments - .iter() - .map(|arg| arg.to_string_lossy().into_owned()) - .collect(), - env: language_server.binary().env.clone(), - }), + binary: Some(language_server.binary().clone()), configuration: Some(language_server.configuration().clone()), workspace_folders: language_server.workspace_folders(), }, diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 8875b3bb6facfb6ce268a38a54585497c8b198cd..149d30a5283c13f71477fc6776d5ca7f61f6205d 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -37,7 +37,7 @@ use dap::inline_value::{InlineValueLocation, VariableLookupKind, VariableScope}; use crate::{ git_store::GitStore, - lsp_store::{LanguageServerBinaryInfo, SymbolLocation, log_store::LogKind}, + lsp_store::{SymbolLocation, log_store::LogKind}, project_search::SearchResultsHandle, }; pub use agent_server_store::{AgentServerStore, AgentServersUpdated, ExternalAgentServerName}; @@ -87,7 +87,8 @@ use language::{ }; use lsp::{ CodeActionKind, CompletionContext, CompletionItemKind, DocumentHighlightKind, InsertTextMode, - LanguageServerId, LanguageServerName, LanguageServerSelector, MessageActionItem, + LanguageServerBinary, LanguageServerId, LanguageServerName, LanguageServerSelector, + MessageActionItem, }; use lsp_command::*; use lsp_store::{CompletionDocumentation, LspFormatTarget, OpenLspBufferHandle}; @@ -111,6 +112,7 @@ use snippet_provider::SnippetProvider; use std::{ borrow::Cow, collections::BTreeMap, + ffi::OsString, ops::{Not as _, Range}, path::{Path, PathBuf}, pin::pin, @@ -3125,12 +3127,15 @@ impl Project { .get_mut(language_server_id) { if let Some(binary) = &update.binary { - language_server_status.binary = - Some(LanguageServerBinaryInfo { - path: binary.path.clone(), - arguments: binary.arguments.clone(), - env: None, - }); + language_server_status.binary = Some(LanguageServerBinary { + path: PathBuf::from(&binary.path), + arguments: binary + .arguments + .iter() + .map(OsString::from) + .collect(), + env: None, + }); } language_server_status.configuration = update From 9122dd2d701a8628b4fa7ef7c82897205df2d908 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Mon, 24 Nov 2025 22:17:48 -0800 Subject: [PATCH 0355/1030] Combine zeta and zeta2 edit prediction providers (#43284) We've realized that a lot of the logic within an `EditPredictionProvider` is not specific to a particular edit prediction model / service. Rather, it is just the generic state management required to perform edit predictions at all in Zed. We want to move to a setup where there's one "built-in" edit prediction provider in Zed, which can be pointed at different edit prediction models. The only logic that is different for different models is how we construct the prompt, send the request, and parse the output. This PR also changes the behavior of the staff-only `zeta2` feature flag so that in only gates your *ability* to use Zeta2, but you can still use your local settings file to choose between different edit prediction models/services: zeta1, zeta2, and sweep. This PR also makes zeta1's outcome reporting and prediction-rating features work with all prediction models, not just zeta1. To do: * [x] remove duplicated logic around sending cloud requests between zeta1 and zeta2 * [x] port the outcome reporting logic from zeta to zeta2. * [x] get the "rate completions" modal working with all EP models * [x] display edit prediction diff * [x] show edit history events * [x] remove the original `zeta` crate. Release Notes: - N/A --------- Co-authored-by: Agus Zubiaga Co-authored-by: Ben Kunkle --- Cargo.lock | 65 +- Cargo.toml | 2 - assets/keymaps/default-macos.json | 16 +- .../cloud_llm_client/src/predict_edits_v3.rs | 39 +- .../src/cloud_zeta2_prompt.rs | 4 +- crates/edit_prediction_button/Cargo.toml | 1 - .../src/edit_prediction_button.rs | 28 +- crates/language/src/buffer.rs | 46 +- crates/language/src/syntax_map.rs | 7 + .../settings/src/settings_content/language.rs | 29 +- crates/zed/Cargo.toml | 1 - .../zed/src/zed/edit_prediction_registry.rs | 118 +- crates/zeta/Cargo.toml | 37 +- .../{zeta2 => zeta}/src/assemble_excerpts.rs | 0 crates/zeta/src/completion_diff_element.rs | 173 - crates/zeta/src/init.rs | 110 - crates/zeta/src/onboarding_modal.rs | 12 +- crates/zeta/src/onboarding_telemetry.rs | 9 - crates/{zeta2 => zeta}/src/prediction.rs | 44 +- crates/{zeta2 => zeta}/src/provider.rs | 14 +- ...tion_modal.rs => rate_prediction_modal.rs} | 426 +- .../{zeta2 => zeta}/src/retrieval_search.rs | 0 crates/{zeta2 => zeta}/src/sweep_ai.rs | 31 +- crates/{zeta2 => zeta}/src/udiff.rs | 0 crates/{zeta2 => zeta}/src/xml_edits.rs | 0 crates/zeta/src/zeta.rs | 4925 ++++++++++------- crates/zeta/src/zeta1.rs | 500 ++ crates/zeta/src/{ => zeta1}/input_excerpt.rs | 6 +- crates/zeta/src/zeta_tests.rs | 671 +++ crates/zeta2/Cargo.toml | 61 - crates/zeta2/LICENSE-GPL | 1 - crates/zeta2/src/zeta2.rs | 2968 ---------- crates/zeta2_tools/Cargo.toml | 5 +- crates/zeta2_tools/src/zeta2_context_view.rs | 2 +- crates/zeta2_tools/src/zeta2_tools.rs | 576 +- crates/zeta_cli/Cargo.toml | 3 +- crates/zeta_cli/src/evaluate.rs | 2 +- crates/zeta_cli/src/example.rs | 4 +- crates/zeta_cli/src/main.rs | 13 +- crates/zeta_cli/src/predict.rs | 42 +- crates/zeta_cli/src/syntax_retrieval_stats.rs | 4 +- 41 files changed, 4900 insertions(+), 6095 deletions(-) rename crates/{zeta2 => zeta}/src/assemble_excerpts.rs (100%) delete mode 100644 crates/zeta/src/completion_diff_element.rs delete mode 100644 crates/zeta/src/init.rs delete mode 100644 crates/zeta/src/onboarding_telemetry.rs rename crates/{zeta2 => zeta}/src/prediction.rs (86%) rename crates/{zeta2 => zeta}/src/provider.rs (93%) rename crates/zeta/src/{rate_completion_modal.rs => rate_prediction_modal.rs} (60%) rename crates/{zeta2 => zeta}/src/retrieval_search.rs (100%) rename crates/{zeta2 => zeta}/src/sweep_ai.rs (77%) rename crates/{zeta2 => zeta}/src/udiff.rs (100%) rename crates/{zeta2 => zeta}/src/xml_edits.rs (100%) create mode 100644 crates/zeta/src/zeta1.rs rename crates/zeta/src/{ => zeta1}/input_excerpt.rs (98%) create mode 100644 crates/zeta/src/zeta_tests.rs delete mode 100644 crates/zeta2/Cargo.toml delete mode 120000 crates/zeta2/LICENSE-GPL delete mode 100644 crates/zeta2/src/zeta2.rs diff --git a/Cargo.lock b/Cargo.lock index 63734b552d7475eacdb2ee3eac66371f7c029d28..93961b4181aa1ad721ba8d740736d86c2ae32ca2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5309,7 +5309,6 @@ dependencies = [ "workspace", "zed_actions", "zeta", - "zeta2", ] [[package]] @@ -21316,7 +21315,6 @@ dependencies = [ "zed_actions", "zed_env_vars", "zeta", - "zeta2", "zeta2_tools", "zlog", "zlog_settings", @@ -21636,48 +21634,52 @@ dependencies = [ "ai_onboarding", "anyhow", "arrayvec", - "call", + "brotli", + "buffer_diff", "client", "clock", "cloud_api_types", "cloud_llm_client", + "cloud_zeta2_prompt", "collections", "command_palette_hooks", "copilot", "ctor", "db", "edit_prediction", + "edit_prediction_context", "editor", "feature_flags", "fs", "futures 0.3.31", "gpui", - "http_client", "indoc", "itertools 0.14.0", "language", "language_model", "log", + "lsp", + "markdown", "menu", + "open_ai", "parking_lot", "postage", + "pretty_assertions", "project", "rand 0.9.2", "regex", "release_channel", - "reqwest_client", - "rpc", "semver", "serde", "serde_json", "settings", + "smol", + "strsim", "strum 0.27.2", "telemetry", "telemetry_events", "theme", "thiserror 2.0.17", - "tree-sitter-go", - "tree-sitter-rust", "ui", "util", "uuid", @@ -21687,53 +21689,11 @@ dependencies = [ "zlog", ] -[[package]] -name = "zeta2" -version = "0.1.0" -dependencies = [ - "anyhow", - "arrayvec", - "brotli", - "chrono", - "client", - "clock", - "cloud_llm_client", - "cloud_zeta2_prompt", - "collections", - "edit_prediction", - "edit_prediction_context", - "feature_flags", - "futures 0.3.31", - "gpui", - "indoc", - "language", - "language_model", - "log", - "lsp", - "open_ai", - "pretty_assertions", - "project", - "release_channel", - "semver", - "serde", - "serde_json", - "settings", - "smol", - "strsim", - "thiserror 2.0.17", - "util", - "uuid", - "workspace", - "worktree", - "zlog", -] - [[package]] name = "zeta2_tools" version = "0.1.0" dependencies = [ "anyhow", - "chrono", "clap", "client", "cloud_llm_client", @@ -21746,9 +21706,7 @@ dependencies = [ "gpui", "indoc", "language", - "log", "multi_buffer", - "ordered-float 2.10.1", "pretty_assertions", "project", "serde", @@ -21760,7 +21718,7 @@ dependencies = [ "ui_input", "util", "workspace", - "zeta2", + "zeta", "zlog", ] @@ -21810,7 +21768,6 @@ dependencies = [ "util", "watch", "zeta", - "zeta2", "zlog", ] diff --git a/Cargo.toml b/Cargo.toml index e3ba2cb817357f5733179864bc23161d01aa1123..ab18418939e1b7100684e3c0acec277e7ec75a88 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -201,7 +201,6 @@ members = [ "crates/zed_actions", "crates/zed_env_vars", "crates/zeta", - "crates/zeta2", "crates/zeta_cli", "crates/zlog", "crates/zlog_settings", @@ -433,7 +432,6 @@ zed = { path = "crates/zed" } zed_actions = { path = "crates/zed_actions" } zed_env_vars = { path = "crates/zed_env_vars" } zeta = { path = "crates/zeta" } -zeta2 = { path = "crates/zeta2" } zlog = { path = "crates/zlog" } zlog_settings = { path = "crates/zlog_settings" } diff --git a/assets/keymaps/default-macos.json b/assets/keymaps/default-macos.json index 2f7c25a3560e09bccb9f45c64df38048eefdddd6..a298db28e63fd761f2f6d58827a7bcf5c8b39962 100644 --- a/assets/keymaps/default-macos.json +++ b/assets/keymaps/default-macos.json @@ -1218,23 +1218,23 @@ } }, { - "context": "RateCompletionModal", + "context": "RatePredictionsModal", "use_key_equivalents": true, "bindings": { - "cmd-shift-enter": "zeta::ThumbsUpActiveCompletion", - "cmd-shift-backspace": "zeta::ThumbsDownActiveCompletion", + "cmd-shift-enter": "zeta::ThumbsUpActivePrediction", + "cmd-shift-backspace": "zeta::ThumbsDownActivePrediction", "shift-down": "zeta::NextEdit", "shift-up": "zeta::PreviousEdit", - "right": "zeta::PreviewCompletion" + "right": "zeta::PreviewPrediction" } }, { - "context": "RateCompletionModal > Editor", + "context": "RatePredictionsModal > Editor", "use_key_equivalents": true, "bindings": { - "escape": "zeta::FocusCompletions", - "cmd-shift-enter": "zeta::ThumbsUpActiveCompletion", - "cmd-shift-backspace": "zeta::ThumbsDownActiveCompletion" + "escape": "zeta::FocusPredictions", + "cmd-shift-enter": "zeta::ThumbsUpActivePrediction", + "cmd-shift-backspace": "zeta::ThumbsDownActivePrediction" } }, { diff --git a/crates/cloud_llm_client/src/predict_edits_v3.rs b/crates/cloud_llm_client/src/predict_edits_v3.rs index 32a5a34d9d3b63332008a9f7df84a1990f87f17c..47e5e71589c806f71725ee4f218ca4a86bee62d0 100644 --- a/crates/cloud_llm_client/src/predict_edits_v3.rs +++ b/crates/cloud_llm_client/src/predict_edits_v3.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use std::{ fmt::{Display, Write as _}, ops::{Add, Range, Sub}, - path::{Path, PathBuf}, + path::Path, sync::Arc, }; use strum::EnumIter; @@ -17,7 +17,7 @@ pub struct PlanContextRetrievalRequest { pub excerpt_path: Arc, pub excerpt_line_range: Range, pub cursor_file_max_row: Line, - pub events: Vec, + pub events: Vec>, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -36,7 +36,7 @@ pub struct PredictEditsRequest { pub signatures: Vec, #[serde(skip_serializing_if = "Vec::is_empty", default)] pub referenced_declarations: Vec, - pub events: Vec, + pub events: Vec>, #[serde(default)] pub can_collect_data: bool, #[serde(skip_serializing_if = "Vec::is_empty", default)] @@ -120,10 +120,11 @@ impl std::fmt::Display for PromptFormat { #[serde(tag = "event")] pub enum Event { BufferChange { - path: Option, - old_path: Option, + path: Arc, + old_path: Arc, diff: String, predicted: bool, + in_open_source_repo: bool, }, } @@ -135,23 +136,21 @@ impl Display for Event { old_path, diff, predicted, + .. } => { - let new_path = path.as_deref().unwrap_or(Path::new("untitled")); - let old_path = old_path.as_deref().unwrap_or(new_path); - if *predicted { write!( f, "// User accepted prediction:\n--- a/{}\n+++ b/{}\n{diff}", DiffPathFmt(old_path), - DiffPathFmt(new_path) + DiffPathFmt(path) ) } else { write!( f, "--- a/{}\n+++ b/{}\n{diff}", DiffPathFmt(old_path), - DiffPathFmt(new_path) + DiffPathFmt(path) ) } } @@ -300,10 +299,11 @@ mod tests { #[test] fn test_event_display() { let ev = Event::BufferChange { - path: None, - old_path: None, + path: Path::new("untitled").into(), + old_path: Path::new("untitled").into(), diff: "@@ -1,2 +1,2 @@\n-a\n-b\n".into(), predicted: false, + in_open_source_repo: true, }; assert_eq!( ev.to_string(), @@ -317,10 +317,11 @@ mod tests { ); let ev = Event::BufferChange { - path: Some(PathBuf::from("foo/bar.txt")), - old_path: Some(PathBuf::from("foo/bar.txt")), + path: Path::new("foo/bar.txt").into(), + old_path: Path::new("foo/bar.txt").into(), diff: "@@ -1,2 +1,2 @@\n-a\n-b\n".into(), predicted: false, + in_open_source_repo: true, }; assert_eq!( ev.to_string(), @@ -334,10 +335,11 @@ mod tests { ); let ev = Event::BufferChange { - path: Some(PathBuf::from("abc.txt")), - old_path: Some(PathBuf::from("123.txt")), + path: Path::new("abc.txt").into(), + old_path: Path::new("123.txt").into(), diff: "@@ -1,2 +1,2 @@\n-a\n-b\n".into(), predicted: false, + in_open_source_repo: true, }; assert_eq!( ev.to_string(), @@ -351,10 +353,11 @@ mod tests { ); let ev = Event::BufferChange { - path: Some(PathBuf::from("abc.txt")), - old_path: Some(PathBuf::from("123.txt")), + path: Path::new("abc.txt").into(), + old_path: Path::new("123.txt").into(), diff: "@@ -1,2 +1,2 @@\n-a\n-b\n".into(), predicted: true, + in_open_source_repo: true, }; assert_eq!( ev.to_string(), diff --git a/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs b/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs index 2ddabf750be763542bfc10b794afcb034ff08443..d67190c17556c5eb8b901e9baad73cc2691a9c78 100644 --- a/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs +++ b/crates/cloud_zeta2_prompt/src/cloud_zeta2_prompt.rs @@ -432,7 +432,7 @@ pub fn write_excerpts<'a>( } } -pub fn push_events(output: &mut String, events: &[predict_edits_v3::Event]) { +pub fn push_events(output: &mut String, events: &[Arc]) { if events.is_empty() { return; }; @@ -910,7 +910,7 @@ fn declaration_size(declaration: &ReferencedDeclaration, style: DeclarationStyle } struct PromptData { - events: Vec, + events: Vec>, cursor_point: Point, cursor_path: Arc, // TODO: make a common struct with cursor_point included_files: Vec, diff --git a/crates/edit_prediction_button/Cargo.toml b/crates/edit_prediction_button/Cargo.toml index 9877b70161b3fdd16a0f667d85085520c9fe4f86..9062aca3c56f527385aecb000ebcd625f588eb9a 100644 --- a/crates/edit_prediction_button/Cargo.toml +++ b/crates/edit_prediction_button/Cargo.toml @@ -35,7 +35,6 @@ ui.workspace = true workspace.workspace = true zed_actions.workspace = true zeta.workspace = true -zeta2.workspace = true [dev-dependencies] copilot = { workspace = true, features = ["test-support"] } diff --git a/crates/edit_prediction_button/src/edit_prediction_button.rs b/crates/edit_prediction_button/src/edit_prediction_button.rs index 051ca6e85ccb985ba6b325cda725f83029aa3193..254caa698aa05214f73a749e540233952db4978b 100644 --- a/crates/edit_prediction_button/src/edit_prediction_button.rs +++ b/crates/edit_prediction_button/src/edit_prediction_button.rs @@ -21,7 +21,9 @@ use language::{ use project::DisableAiSettings; use regex::Regex; use settings::{ - EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, Settings, SettingsStore, update_settings_file, + EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, + EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME, Settings, SettingsStore, + update_settings_file, }; use std::{ sync::{Arc, LazyLock}, @@ -38,7 +40,7 @@ use workspace::{ }; use zed_actions::OpenBrowser; use zeta::RateCompletions; -use zeta2::SweepFeatureFlag; +use zeta::{SweepFeatureFlag, Zeta2FeatureFlag}; actions!( edit_prediction, @@ -300,10 +302,7 @@ impl Render for EditPredictionButton { .with_handle(self.popover_menu_handle.clone()), ) } - provider @ (EditPredictionProvider::Experimental( - EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, - ) - | EditPredictionProvider::Zed) => { + provider @ (EditPredictionProvider::Experimental(_) | EditPredictionProvider::Zed) => { let enabled = self.editor_enabled.unwrap_or(true); let is_sweep = matches!( @@ -430,9 +429,7 @@ impl Render for EditPredictionButton { div().child(popover_menu.into_any_element()) } - EditPredictionProvider::None | EditPredictionProvider::Experimental(_) => { - div().hidden() - } + EditPredictionProvider::None => div().hidden(), } } } @@ -497,6 +494,12 @@ impl EditPredictionButton { )); } + if cx.has_flag::() { + providers.push(EditPredictionProvider::Experimental( + EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME, + )); + } + providers } @@ -554,7 +557,7 @@ impl EditPredictionButton { EditPredictionProvider::Experimental( EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, ) => { - let has_api_token = zeta2::Zeta::try_global(cx) + let has_api_token = zeta::Zeta::try_global(cx) .map_or(false, |zeta| zeta.read(cx).has_sweep_api_token()); let entry = ContextMenuEntry::new("Sweep") @@ -571,6 +574,11 @@ impl EditPredictionButton { menu.item(entry) } + EditPredictionProvider::Experimental( + EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME, + ) => menu.entry("Zeta2", None, move |_, cx| { + set_completion_provider(fs.clone(), cx, provider); + }), EditPredictionProvider::None | EditPredictionProvider::Experimental(_) => { continue; } diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index fd5e6fcaf6435a2836ab1ad828933a9d0763f5b9..c599a4751b60f150e31b7ddf6e32a6234a510c74 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -13,6 +13,7 @@ use crate::{ }, task_context::RunnableRange, text_diff::text_diff, + unified_diff, }; pub use crate::{ Grammar, Language, LanguageRegistry, @@ -745,6 +746,33 @@ pub struct EditPreview { } impl EditPreview { + pub fn as_unified_diff(&self, edits: &[(Range, impl AsRef)]) -> Option { + let (first, _) = edits.first()?; + let (last, _) = edits.last()?; + + let start = first.start.to_point(&self.old_snapshot); + let old_end = last.end.to_point(&self.old_snapshot); + let new_end = last + .end + .bias_right(&self.old_snapshot) + .to_point(&self.applied_edits_snapshot); + + let start = Point::new(start.row.saturating_sub(3), 0); + let old_end = Point::new(old_end.row + 3, 0).min(self.old_snapshot.max_point()); + let new_end = Point::new(new_end.row + 3, 0).min(self.applied_edits_snapshot.max_point()); + + Some(unified_diff( + &self + .old_snapshot + .text_for_range(start..old_end) + .collect::(), + &self + .applied_edits_snapshot + .text_for_range(start..new_end) + .collect::(), + )) + } + pub fn highlight_edits( &self, current_snapshot: &BufferSnapshot, @@ -758,6 +786,8 @@ impl EditPreview { let mut highlighted_text = HighlightedTextBuilder::default(); + let visible_range_in_preview_snapshot = + visible_range_in_preview_snapshot.to_offset(&self.applied_edits_snapshot); let mut offset_in_preview_snapshot = visible_range_in_preview_snapshot.start; let insertion_highlight_style = HighlightStyle { @@ -825,7 +855,19 @@ impl EditPreview { highlighted_text.build() } - fn compute_visible_range(&self, edits: &[(Range, T)]) -> Option> { + pub fn build_result_buffer(&self, cx: &mut App) -> Entity { + cx.new(|cx| { + let mut buffer = Buffer::local_normalized( + self.applied_edits_snapshot.as_rope().clone(), + self.applied_edits_snapshot.line_ending(), + cx, + ); + buffer.set_language(self.syntax_snapshot.root_language(), cx); + buffer + }) + } + + pub fn compute_visible_range(&self, edits: &[(Range, T)]) -> Option> { let (first, _) = edits.first()?; let (last, _) = edits.last()?; @@ -842,7 +884,7 @@ impl EditPreview { let range = Point::new(start.row, 0) ..Point::new(end.row, self.applied_edits_snapshot.line_len(end.row)); - Some(range.to_offset(&self.applied_edits_snapshot)) + Some(range) } } diff --git a/crates/language/src/syntax_map.rs b/crates/language/src/syntax_map.rs index a9ac2faad9da9d5e07261ec826dda138921717a6..33a652b6fdeb32a2adbc1743cf8a70fe453518f5 100644 --- a/crates/language/src/syntax_map.rs +++ b/crates/language/src/syntax_map.rs @@ -279,6 +279,13 @@ impl SyntaxSnapshot { self.layers.is_empty() } + pub fn root_language(&self) -> Option> { + match &self.layers.first()?.content { + SyntaxLayerContent::Parsed { language, .. } => Some(language.clone()), + SyntaxLayerContent::Pending { .. } => None, + } + } + pub fn update_count(&self) -> usize { self.update_count } diff --git a/crates/settings/src/settings_content/language.rs b/crates/settings/src/settings_content/language.rs index 78ecc270166483b13af7e169b2390ad9f76d595d..166444c44b28133cfe20933c5b12acc42edb2399 100644 --- a/crates/settings/src/settings_content/language.rs +++ b/crates/settings/src/settings_content/language.rs @@ -78,6 +78,7 @@ pub enum EditPredictionProvider { } pub const EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME: &str = "sweep"; +pub const EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME: &str = "zeta2"; impl<'de> Deserialize<'de> for EditPredictionProvider { fn deserialize(deserializer: D) -> Result @@ -101,17 +102,25 @@ impl<'de> Deserialize<'de> for EditPredictionProvider { Content::Supermaven => EditPredictionProvider::Supermaven, Content::Zed => EditPredictionProvider::Zed, Content::Codestral => EditPredictionProvider::Codestral, + Content::Experimental(name) + if name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME => + { + EditPredictionProvider::Experimental( + EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, + ) + } + Content::Experimental(name) + if name == EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME => + { + EditPredictionProvider::Experimental( + EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME, + ) + } Content::Experimental(name) => { - if name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME { - EditPredictionProvider::Experimental( - EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, - ) - } else { - return Err(D::Error::custom(format!( - "Unknown experimental edit prediction provider: {}", - name - ))); - } + return Err(D::Error::custom(format!( + "Unknown experimental edit prediction provider: {}", + name + ))); } }) } diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 68ba338102202f1803ab97746ec8372adb45a66a..470f1ea28a3663838080b7e7bf98f58215a0a8fc 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -161,7 +161,6 @@ workspace.workspace = true zed_actions.workspace = true zed_env_vars.workspace = true zeta.workspace = true -zeta2.workspace = true zlog.workspace = true zlog_settings.workspace = true chrono.workspace = true diff --git a/crates/zed/src/zed/edit_prediction_registry.rs b/crates/zed/src/zed/edit_prediction_registry.rs index 577e81c6a9b36bc29a4b1d1f0cda63170c75d5a2..f413fd94cb1a48adb213120364ed2f59c4cf58e0 100644 --- a/crates/zed/src/zed/edit_prediction_registry.rs +++ b/crates/zed/src/zed/edit_prediction_registry.rs @@ -7,13 +7,14 @@ use feature_flags::FeatureFlagAppExt; use gpui::{AnyWindowHandle, App, AppContext as _, Context, Entity, WeakEntity}; use language::language_settings::{EditPredictionProvider, all_language_settings}; use language_models::MistralLanguageModelProvider; -use settings::{EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, SettingsStore}; +use settings::{ + EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME, + EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME, SettingsStore, +}; use std::{cell::RefCell, rc::Rc, sync::Arc}; use supermaven::{Supermaven, SupermavenCompletionProvider}; use ui::Window; -use zeta::ZetaEditPredictionProvider; -use zeta2::SweepFeatureFlag; -use zeta2::Zeta2FeatureFlag; +use zeta::{SweepFeatureFlag, Zeta2FeatureFlag, ZetaEditPredictionProvider}; pub fn init(client: Arc, user_store: Entity, cx: &mut App) { let editors: Rc, AnyWindowHandle>>> = Rc::default(); @@ -100,9 +101,7 @@ pub fn init(client: Arc, user_store: Entity, cx: &mut App) { } fn clear_zeta_edit_history(_: &zeta::ClearHistory, cx: &mut App) { - if let Some(zeta) = zeta::Zeta::global(cx) { - zeta.update(cx, |zeta, _| zeta.clear_history()); - } else if let Some(zeta) = zeta2::Zeta::try_global(cx) { + if let Some(zeta) = zeta::Zeta::try_global(cx) { zeta.update(cx, |zeta, _| zeta.clear_history()); } } @@ -204,86 +203,41 @@ fn assign_edit_prediction_provider( editor.set_edit_prediction_provider(Some(provider), window, cx); } value @ (EditPredictionProvider::Experimental(_) | EditPredictionProvider::Zed) => { - let zeta2 = zeta2::Zeta::global(client, &user_store, cx); - - if let Some(project) = editor.project() { - let mut worktree = None; - if let Some(buffer) = &singleton_buffer - && let Some(file) = buffer.read(cx).file() - { - let id = file.worktree_id(cx); - worktree = project.read(cx).worktree_for_id(id, cx); - } - - if let EditPredictionProvider::Experimental(name) = value - && name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME - && cx.has_flag::() - { - let provider = cx.new(|cx| { - zeta2::ZetaEditPredictionProvider::new( - project.clone(), - &client, - &user_store, - cx, - ) - }); - - if let Some(buffer) = &singleton_buffer - && buffer.read(cx).file().is_some() - { - zeta2.update(cx, |zeta, cx| { - zeta.set_edit_prediction_model(zeta2::ZetaEditPredictionModel::Sweep); - zeta.register_buffer(buffer, project, cx); - }); - } - - editor.set_edit_prediction_provider(Some(provider), window, cx); - } else if user_store.read(cx).current_user().is_some() { - if cx.has_flag::() { - let zeta = zeta2::Zeta::global(client, &user_store, cx); - let provider = cx.new(|cx| { - zeta2::ZetaEditPredictionProvider::new( - project.clone(), - &client, - &user_store, - cx, - ) - }); - - // TODO [zeta2] handle multibuffers - if let Some(buffer) = &singleton_buffer - && buffer.read(cx).file().is_some() + let zeta = zeta::Zeta::global(client, &user_store, cx); + + if let Some(project) = editor.project() + && let Some(buffer) = &singleton_buffer + && buffer.read(cx).file().is_some() + { + let has_model = zeta.update(cx, |zeta, cx| { + let model = if let EditPredictionProvider::Experimental(name) = value { + if name == EXPERIMENTAL_SWEEP_EDIT_PREDICTION_PROVIDER_NAME + && cx.has_flag::() + { + zeta::ZetaEditPredictionModel::Sweep + } else if name == EXPERIMENTAL_ZETA2_EDIT_PREDICTION_PROVIDER_NAME + && cx.has_flag::() { - zeta.update(cx, |zeta, cx| { - zeta.set_edit_prediction_model( - zeta2::ZetaEditPredictionModel::ZedCloud, - ); - zeta.register_buffer(buffer, project, cx); - }); + zeta::ZetaEditPredictionModel::Zeta2 + } else { + return false; } - - editor.set_edit_prediction_provider(Some(provider), window, cx); + } else if user_store.read(cx).current_user().is_some() { + zeta::ZetaEditPredictionModel::Zeta1 } else { - let zeta = zeta::Zeta::register(worktree, client.clone(), user_store, cx); + return false; + }; - if let Some(buffer) = &singleton_buffer - && buffer.read(cx).file().is_some() - { - zeta.update(cx, |zeta, cx| { - zeta.register_buffer(buffer, project, cx); - }); - } + zeta.set_edit_prediction_model(model); + zeta.register_buffer(buffer, project, cx); + true + }); - let provider = cx.new(|cx| { - zeta::ZetaEditPredictionProvider::new( - zeta, - project.clone(), - singleton_buffer, - cx, - ) - }); - editor.set_edit_prediction_provider(Some(provider), window, cx); - } + if has_model { + let provider = cx.new(|cx| { + ZetaEditPredictionProvider::new(project.clone(), &client, &user_store, cx) + }); + editor.set_edit_prediction_provider(Some(provider), window, cx); } } } diff --git a/crates/zeta/Cargo.toml b/crates/zeta/Cargo.toml index df569c7bc39655d99ee01b464a05e0ef3873f8d6..61eeab16229d82dc01d800f37bf729aa11469afd 100644 --- a/crates/zeta/Cargo.toml +++ b/crates/zeta/Cargo.toml @@ -4,81 +4,80 @@ version = "0.1.0" edition.workspace = true publish.workspace = true license = "GPL-3.0-or-later" -exclude = ["fixtures"] [lints] workspace = true [lib] path = "src/zeta.rs" -doctest = false [features] -test-support = [] +eval-support = [] [dependencies] ai_onboarding.workspace = true anyhow.workspace = true arrayvec.workspace = true +brotli.workspace = true +buffer_diff.workspace = true client.workspace = true cloud_llm_client.workspace = true +cloud_zeta2_prompt.workspace = true +copilot.workspace = true collections.workspace = true command_palette_hooks.workspace = true -copilot.workspace = true db.workspace = true edit_prediction.workspace = true +edit_prediction_context.workspace = true editor.workspace = true feature_flags.workspace = true fs.workspace = true futures.workspace = true gpui.workspace = true -http_client.workspace = true indoc.workspace = true itertools.workspace = true language.workspace = true language_model.workspace = true log.workspace = true +lsp.workspace = true +markdown.workspace = true menu.workspace = true +open_ai.workspace = true +pretty_assertions.workspace = true postage.workspace = true project.workspace = true rand.workspace = true -regex.workspace = true release_channel.workspace = true +regex.workspace = true semver.workspace = true serde.workspace = true serde_json.workspace = true settings.workspace = true +smol.workspace = true +strsim.workspace = true strum.workspace = true telemetry.workspace = true telemetry_events.workspace = true theme.workspace = true thiserror.workspace = true -ui.workspace = true util.workspace = true +ui.workspace = true uuid.workspace = true workspace.workspace = true worktree.workspace = true zed_actions.workspace = true [dev-dependencies] -call = { workspace = true, features = ["test-support"] } -client = { workspace = true, features = ["test-support"] } clock = { workspace = true, features = ["test-support"] } cloud_api_types.workspace = true -collections = { workspace = true, features = ["test-support"] } +cloud_llm_client = { workspace = true, features = ["test-support"] } ctor.workspace = true -editor = { workspace = true, features = ["test-support"] } gpui = { workspace = true, features = ["test-support"] } -http_client = { workspace = true, features = ["test-support"] } indoc.workspace = true language = { workspace = true, features = ["test-support"] } +language_model = { workspace = true, features = ["test-support"] } +lsp.workspace = true parking_lot.workspace = true -reqwest_client = { workspace = true, features = ["test-support"] } -rpc = { workspace = true, features = ["test-support"] } +project = { workspace = true, features = ["test-support"] } settings = { workspace = true, features = ["test-support"] } -theme = { workspace = true, features = ["test-support"] } -tree-sitter-go.workspace = true -tree-sitter-rust.workspace = true -workspace = { workspace = true, features = ["test-support"] } -worktree = { workspace = true, features = ["test-support"] } zlog.workspace = true diff --git a/crates/zeta2/src/assemble_excerpts.rs b/crates/zeta/src/assemble_excerpts.rs similarity index 100% rename from crates/zeta2/src/assemble_excerpts.rs rename to crates/zeta/src/assemble_excerpts.rs diff --git a/crates/zeta/src/completion_diff_element.rs b/crates/zeta/src/completion_diff_element.rs deleted file mode 100644 index 73c3cb20cd7de5da92fbf6e5a32a8ca8d42a5933..0000000000000000000000000000000000000000 --- a/crates/zeta/src/completion_diff_element.rs +++ /dev/null @@ -1,173 +0,0 @@ -use std::cmp; - -use crate::EditPrediction; -use gpui::{ - AnyElement, App, BorderStyle, Bounds, Corners, Edges, HighlightStyle, Hsla, StyledText, - TextLayout, TextStyle, point, prelude::*, quad, size, -}; -use language::OffsetRangeExt; -use settings::Settings; -use theme::ThemeSettings; -use ui::prelude::*; - -pub struct CompletionDiffElement { - element: AnyElement, - text_layout: TextLayout, - cursor_offset: usize, -} - -impl CompletionDiffElement { - pub fn new(completion: &EditPrediction, cx: &App) -> Self { - let mut diff = completion - .snapshot - .text_for_range(completion.excerpt_range.clone()) - .collect::(); - - let mut cursor_offset_in_diff = None; - let mut delta = 0; - let mut diff_highlights = Vec::new(); - for (old_range, new_text) in completion.edits.iter() { - let old_range = old_range.to_offset(&completion.snapshot); - - if cursor_offset_in_diff.is_none() && completion.cursor_offset <= old_range.end { - cursor_offset_in_diff = - Some(completion.cursor_offset - completion.excerpt_range.start + delta); - } - - let old_start_in_diff = old_range.start - completion.excerpt_range.start + delta; - let old_end_in_diff = old_range.end - completion.excerpt_range.start + delta; - if old_start_in_diff < old_end_in_diff { - diff_highlights.push(( - old_start_in_diff..old_end_in_diff, - HighlightStyle { - background_color: Some(cx.theme().status().deleted_background), - strikethrough: Some(gpui::StrikethroughStyle { - thickness: px(1.), - color: Some(cx.theme().colors().text_muted), - }), - ..Default::default() - }, - )); - } - - if !new_text.is_empty() { - diff.insert_str(old_end_in_diff, new_text); - diff_highlights.push(( - old_end_in_diff..old_end_in_diff + new_text.len(), - HighlightStyle { - background_color: Some(cx.theme().status().created_background), - ..Default::default() - }, - )); - delta += new_text.len(); - } - } - - let cursor_offset_in_diff = cursor_offset_in_diff - .unwrap_or_else(|| completion.cursor_offset - completion.excerpt_range.start + delta); - - let settings = ThemeSettings::get_global(cx).clone(); - let text_style = TextStyle { - color: cx.theme().colors().editor_foreground, - font_size: settings.buffer_font_size(cx).into(), - font_family: settings.buffer_font.family, - font_features: settings.buffer_font.features, - font_fallbacks: settings.buffer_font.fallbacks, - line_height: relative(settings.buffer_line_height.value()), - font_weight: settings.buffer_font.weight, - font_style: settings.buffer_font.style, - ..Default::default() - }; - let element = StyledText::new(diff).with_default_highlights(&text_style, diff_highlights); - let text_layout = element.layout().clone(); - - CompletionDiffElement { - element: element.into_any_element(), - text_layout, - cursor_offset: cursor_offset_in_diff, - } - } -} - -impl IntoElement for CompletionDiffElement { - type Element = Self; - - fn into_element(self) -> Self { - self - } -} - -impl Element for CompletionDiffElement { - type RequestLayoutState = (); - type PrepaintState = (); - - fn id(&self) -> Option { - None - } - - fn source_location(&self) -> Option<&'static core::panic::Location<'static>> { - None - } - - fn request_layout( - &mut self, - _id: Option<&gpui::GlobalElementId>, - _inspector_id: Option<&gpui::InspectorElementId>, - window: &mut Window, - cx: &mut App, - ) -> (gpui::LayoutId, Self::RequestLayoutState) { - (self.element.request_layout(window, cx), ()) - } - - fn prepaint( - &mut self, - _id: Option<&gpui::GlobalElementId>, - _inspector_id: Option<&gpui::InspectorElementId>, - _bounds: gpui::Bounds, - _request_layout: &mut Self::RequestLayoutState, - window: &mut Window, - cx: &mut App, - ) -> Self::PrepaintState { - self.element.prepaint(window, cx); - } - - fn paint( - &mut self, - _id: Option<&gpui::GlobalElementId>, - _inspector_id: Option<&gpui::InspectorElementId>, - _bounds: gpui::Bounds, - _request_layout: &mut Self::RequestLayoutState, - _prepaint: &mut Self::PrepaintState, - window: &mut Window, - cx: &mut App, - ) { - if let Some(position) = self.text_layout.position_for_index(self.cursor_offset) { - let bounds = self.text_layout.bounds(); - let line_height = self.text_layout.line_height(); - let line_width = self - .text_layout - .line_layout_for_index(self.cursor_offset) - .map_or(bounds.size.width, |layout| layout.width()); - window.paint_quad(quad( - Bounds::new( - point(bounds.origin.x, position.y), - size(cmp::max(bounds.size.width, line_width), line_height), - ), - Corners::default(), - cx.theme().colors().editor_active_line_background, - Edges::default(), - Hsla::transparent_black(), - BorderStyle::default(), - )); - self.element.paint(window, cx); - window.paint_quad(quad( - Bounds::new(position, size(px(2.), line_height)), - Corners::default(), - cx.theme().players().local().cursor, - Edges::default(), - Hsla::transparent_black(), - BorderStyle::default(), - )); - } - } -} diff --git a/crates/zeta/src/init.rs b/crates/zeta/src/init.rs deleted file mode 100644 index 0167d878fa34976d7175a64269d9dfe29d18d8fe..0000000000000000000000000000000000000000 --- a/crates/zeta/src/init.rs +++ /dev/null @@ -1,110 +0,0 @@ -use std::any::{Any, TypeId}; - -use command_palette_hooks::CommandPaletteFilter; -use feature_flags::{FeatureFlagAppExt as _, PredictEditsRateCompletionsFeatureFlag}; -use gpui::actions; -use language::language_settings::EditPredictionProvider; -use project::DisableAiSettings; -use settings::{Settings, SettingsStore, update_settings_file}; -use ui::App; -use workspace::Workspace; - -use crate::{RateCompletionModal, onboarding_modal::ZedPredictModal}; - -actions!( - edit_prediction, - [ - /// Resets the edit prediction onboarding state. - ResetOnboarding, - /// Opens the rate completions modal. - RateCompletions - ] -); - -pub fn init(cx: &mut App) { - feature_gate_predict_edits_actions(cx); - - cx.observe_new(move |workspace: &mut Workspace, _, _cx| { - workspace.register_action(|workspace, _: &RateCompletions, window, cx| { - if cx.has_flag::() { - RateCompletionModal::toggle(workspace, window, cx); - } - }); - - workspace.register_action( - move |workspace, _: &zed_actions::OpenZedPredictOnboarding, window, cx| { - ZedPredictModal::toggle( - workspace, - workspace.user_store().clone(), - workspace.client().clone(), - window, - cx, - ) - }, - ); - - workspace.register_action(|workspace, _: &ResetOnboarding, _window, cx| { - update_settings_file(workspace.app_state().fs.clone(), cx, move |settings, _| { - settings - .project - .all_languages - .features - .get_or_insert_default() - .edit_prediction_provider = Some(EditPredictionProvider::None) - }); - }); - }) - .detach(); -} - -fn feature_gate_predict_edits_actions(cx: &mut App) { - let rate_completion_action_types = [TypeId::of::()]; - let reset_onboarding_action_types = [TypeId::of::()]; - let zeta_all_action_types = [ - TypeId::of::(), - TypeId::of::(), - zed_actions::OpenZedPredictOnboarding.type_id(), - TypeId::of::(), - TypeId::of::(), - TypeId::of::(), - TypeId::of::(), - TypeId::of::(), - ]; - - CommandPaletteFilter::update_global(cx, |filter, _cx| { - filter.hide_action_types(&rate_completion_action_types); - filter.hide_action_types(&reset_onboarding_action_types); - filter.hide_action_types(&[zed_actions::OpenZedPredictOnboarding.type_id()]); - }); - - cx.observe_global::(move |cx| { - let is_ai_disabled = DisableAiSettings::get_global(cx).disable_ai; - let has_feature_flag = cx.has_flag::(); - - CommandPaletteFilter::update_global(cx, |filter, _cx| { - if is_ai_disabled { - filter.hide_action_types(&zeta_all_action_types); - } else if has_feature_flag { - filter.show_action_types(&rate_completion_action_types); - } else { - filter.hide_action_types(&rate_completion_action_types); - } - }); - }) - .detach(); - - cx.observe_flag::(move |is_enabled, cx| { - if !DisableAiSettings::get_global(cx).disable_ai { - if is_enabled { - CommandPaletteFilter::update_global(cx, |filter, _cx| { - filter.show_action_types(&rate_completion_action_types); - }); - } else { - CommandPaletteFilter::update_global(cx, |filter, _cx| { - filter.hide_action_types(&rate_completion_action_types); - }); - } - } - }) - .detach(); -} diff --git a/crates/zeta/src/onboarding_modal.rs b/crates/zeta/src/onboarding_modal.rs index 94480add3053bece5017cf478e9f74065491639b..ed7adfc75476afb07f9c56b9c9c03abbbcef1134 100644 --- a/crates/zeta/src/onboarding_modal.rs +++ b/crates/zeta/src/onboarding_modal.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::{ZedPredictUpsell, onboarding_event}; +use crate::ZedPredictUpsell; use ai_onboarding::EditPredictionOnboarding; use client::{Client, UserStore}; use db::kvp::Dismissable; @@ -14,6 +14,16 @@ use settings::update_settings_file; use ui::{Vector, VectorName, prelude::*}; use workspace::{ModalView, Workspace}; +#[macro_export] +macro_rules! onboarding_event { + ($name:expr) => { + telemetry::event!($name, source = "Edit Prediction Onboarding"); + }; + ($name:expr, $($key:ident $(= $value:expr)?),+ $(,)?) => { + telemetry::event!($name, source = "Edit Prediction Onboarding", $($key $(= $value)?),+); + }; +} + /// Introduces user to Zed's Edit Prediction feature pub struct ZedPredictModal { onboarding: Entity, diff --git a/crates/zeta/src/onboarding_telemetry.rs b/crates/zeta/src/onboarding_telemetry.rs deleted file mode 100644 index 3c7d5e1442947c3e8cea446ebf37597a3cce1f80..0000000000000000000000000000000000000000 --- a/crates/zeta/src/onboarding_telemetry.rs +++ /dev/null @@ -1,9 +0,0 @@ -#[macro_export] -macro_rules! onboarding_event { - ($name:expr) => { - telemetry::event!($name, source = "Edit Prediction Onboarding"); - }; - ($name:expr, $($key:ident $(= $value:expr)?),+ $(,)?) => { - telemetry::event!($name, source = "Edit Prediction Onboarding", $($key $(= $value)?),+); - }; -} diff --git a/crates/zeta2/src/prediction.rs b/crates/zeta/src/prediction.rs similarity index 86% rename from crates/zeta2/src/prediction.rs rename to crates/zeta/src/prediction.rs index e9f726ce00c36b5235919c0e185876996f4fda03..0125e739f335fc133cbff84dcd8b4c4bac3e6e7b 100644 --- a/crates/zeta2/src/prediction.rs +++ b/crates/zeta/src/prediction.rs @@ -1,7 +1,13 @@ -use std::{ops::Range, sync::Arc}; +use std::{ + ops::Range, + path::Path, + sync::Arc, + time::{Duration, Instant}, +}; use gpui::{AsyncApp, Entity, SharedString}; use language::{Anchor, Buffer, BufferSnapshot, EditPreview, OffsetRangeExt, TextBufferSnapshot}; +use serde::Serialize; #[derive(Clone, Default, Debug, PartialEq, Eq, Hash)] pub struct EditPredictionId(pub SharedString); @@ -26,6 +32,17 @@ pub struct EditPrediction { pub edit_preview: EditPreview, // We keep a reference to the buffer so that we do not need to reload it from disk when applying the prediction. pub buffer: Entity, + pub buffer_snapshotted_at: Instant, + pub response_received_at: Instant, + pub inputs: EditPredictionInputs, +} + +#[derive(Debug, Clone, Serialize)] +pub struct EditPredictionInputs { + pub events: Vec>, + pub included_files: Vec, + pub cursor_point: cloud_llm_client::predict_edits_v3::Point, + pub cursor_path: Arc, } impl EditPrediction { @@ -33,14 +50,17 @@ impl EditPrediction { id: EditPredictionId, edited_buffer: &Entity, edited_buffer_snapshot: &BufferSnapshot, - edits: Vec<(Range, Arc)>, + edits: Arc<[(Range, Arc)]>, + buffer_snapshotted_at: Instant, + response_received_at: Instant, + inputs: EditPredictionInputs, cx: &mut AsyncApp, ) -> Option { let (edits, snapshot, edit_preview_task) = edited_buffer .read_with(cx, |buffer, cx| { let new_snapshot = buffer.snapshot(); let edits: Arc<[_]> = - interpolate_edits(&edited_buffer_snapshot, &new_snapshot, edits.into())?.into(); + interpolate_edits(&edited_buffer_snapshot, &new_snapshot, edits)?.into(); Some((edits.clone(), new_snapshot, buffer.preview_edits(edits, cx))) }) @@ -53,7 +73,10 @@ impl EditPrediction { edits, snapshot, edit_preview, + inputs, buffer: edited_buffer.clone(), + buffer_snapshotted_at, + response_received_at, }) } @@ -67,6 +90,10 @@ impl EditPrediction { pub fn targets_buffer(&self, buffer: &Buffer) -> bool { self.snapshot.remote_id() == buffer.remote_id() } + + pub fn latency(&self) -> Duration { + self.response_received_at - self.buffer_snapshotted_at + } } impl std::fmt::Debug for EditPrediction { @@ -147,6 +174,17 @@ mod tests { snapshot: cx.read(|cx| buffer.read(cx).snapshot()), buffer: buffer.clone(), edit_preview, + inputs: EditPredictionInputs { + events: vec![], + included_files: vec![], + cursor_point: cloud_llm_client::predict_edits_v3::Point { + line: cloud_llm_client::predict_edits_v3::Line(0), + column: 0, + }, + cursor_path: Path::new("path.txt").into(), + }, + buffer_snapshotted_at: Instant::now(), + response_received_at: Instant::now(), }; cx.update(|cx| { diff --git a/crates/zeta2/src/provider.rs b/crates/zeta/src/provider.rs similarity index 93% rename from crates/zeta2/src/provider.rs rename to crates/zeta/src/provider.rs index 768af6253fe1a2aa60ef9cb0a10fcee0035dc3e2..a2b3eed1b5efe953ebdf5a2448ca06e7866bea86 100644 --- a/crates/zeta2/src/provider.rs +++ b/crates/zeta/src/provider.rs @@ -131,8 +131,14 @@ impl EditPredictionProvider for ZetaEditPredictionProvider { } fn discard(&mut self, cx: &mut Context) { - self.zeta.update(cx, |zeta, _cx| { - zeta.discard_current_prediction(&self.project); + self.zeta.update(cx, |zeta, cx| { + zeta.discard_current_prediction(&self.project, cx); + }); + } + + fn did_show(&mut self, cx: &mut Context) { + self.zeta.update(cx, |zeta, cx| { + zeta.did_show_current_prediction(&self.project, cx); }); } @@ -162,8 +168,8 @@ impl EditPredictionProvider for ZetaEditPredictionProvider { let snapshot = buffer.snapshot(); let Some(edits) = prediction.interpolate(&snapshot) else { - self.zeta.update(cx, |zeta, _cx| { - zeta.discard_current_prediction(&self.project); + self.zeta.update(cx, |zeta, cx| { + zeta.discard_current_prediction(&self.project, cx); }); return None; }; diff --git a/crates/zeta/src/rate_completion_modal.rs b/crates/zeta/src/rate_prediction_modal.rs similarity index 60% rename from crates/zeta/src/rate_completion_modal.rs rename to crates/zeta/src/rate_prediction_modal.rs index a081538f5528946ea5b959981b7bd70d44b8b11b..0cceb86608ed609122c81d406c71280894789e88 100644 --- a/crates/zeta/src/rate_completion_modal.rs +++ b/crates/zeta/src/rate_prediction_modal.rs @@ -1,8 +1,18 @@ -use crate::{CompletionDiffElement, EditPrediction, EditPredictionRating, Zeta}; -use editor::Editor; -use gpui::{App, DismissEvent, Entity, EventEmitter, FocusHandle, Focusable, actions, prelude::*}; -use language::language_settings; +use crate::{EditPrediction, EditPredictionRating, Zeta}; +use buffer_diff::{BufferDiff, BufferDiffSnapshot}; +use cloud_zeta2_prompt::write_codeblock; +use editor::{Editor, ExcerptRange, MultiBuffer}; +use gpui::{ + App, BorderStyle, DismissEvent, EdgesRefinement, Entity, EventEmitter, FocusHandle, Focusable, + Length, StyleRefinement, TextStyleRefinement, Window, actions, prelude::*, +}; +use language::{LanguageRegistry, Point, language_settings}; +use markdown::{Markdown, MarkdownStyle}; +use settings::Settings as _; +use std::fmt::Write; +use std::sync::Arc; use std::time::Duration; +use theme::ThemeSettings; use ui::{KeyBinding, List, ListItem, ListItemSpacing, Tooltip, prelude::*}; use workspace::{ModalView, Workspace}; @@ -10,41 +20,44 @@ actions!( zeta, [ /// Rates the active completion with a thumbs up. - ThumbsUpActiveCompletion, + ThumbsUpActivePrediction, /// Rates the active completion with a thumbs down. - ThumbsDownActiveCompletion, + ThumbsDownActivePrediction, /// Navigates to the next edit in the completion history. NextEdit, /// Navigates to the previous edit in the completion history. PreviousEdit, /// Focuses on the completions list. - FocusCompletions, + FocusPredictions, /// Previews the selected completion. - PreviewCompletion, + PreviewPrediction, ] ); -pub struct RateCompletionModal { +pub struct RatePredictionsModal { zeta: Entity, - active_completion: Option, + language_registry: Arc, + active_prediction: Option, selected_index: usize, + diff_editor: Entity, focus_handle: FocusHandle, _subscription: gpui::Subscription, - current_view: RateCompletionView, + current_view: RatePredictionView, } -struct ActiveCompletion { - completion: EditPrediction, +struct ActivePrediction { + prediction: EditPrediction, feedback_editor: Entity, + formatted_inputs: Entity, } #[derive(Debug, Clone, Copy, PartialEq, PartialOrd)] -enum RateCompletionView { +enum RatePredictionView { SuggestedEdits, RawInput, } -impl RateCompletionView { +impl RatePredictionView { pub fn name(&self) -> &'static str { match self { Self::SuggestedEdits => "Suggested Edits", @@ -53,25 +66,42 @@ impl RateCompletionView { } } -impl RateCompletionModal { +impl RatePredictionsModal { pub fn toggle(workspace: &mut Workspace, window: &mut Window, cx: &mut Context) { - if let Some(zeta) = Zeta::global(cx) { - workspace.toggle_modal(window, cx, |_window, cx| RateCompletionModal::new(zeta, cx)); + if let Some(zeta) = Zeta::try_global(cx) { + let language_registry = workspace.app_state().languages.clone(); + workspace.toggle_modal(window, cx, |window, cx| { + RatePredictionsModal::new(zeta, language_registry, window, cx) + }); - telemetry::event!("Rate Completion Modal Open", source = "Edit Prediction"); + telemetry::event!("Rate Prediction Modal Open", source = "Edit Prediction"); } } - pub fn new(zeta: Entity, cx: &mut Context) -> Self { + pub fn new( + zeta: Entity, + language_registry: Arc, + window: &mut Window, + cx: &mut Context, + ) -> Self { let subscription = cx.observe(&zeta, |_, _, cx| cx.notify()); Self { zeta, + language_registry, selected_index: 0, focus_handle: cx.focus_handle(), - active_completion: None, + active_prediction: None, _subscription: subscription, - current_view: RateCompletionView::SuggestedEdits, + diff_editor: cx.new(|cx| { + let multibuffer = cx.new(|_| MultiBuffer::new(language::Capability::ReadOnly)); + let mut editor = Editor::for_multibuffer(multibuffer, None, window, cx); + editor.disable_inline_diagnostics(); + editor.set_expand_all_diff_hunks(cx); + editor.set_show_git_diff_gutter(false, cx); + editor + }), + current_view: RatePredictionView::SuggestedEdits, } } @@ -83,7 +113,7 @@ impl RateCompletionModal { self.selected_index += 1; self.selected_index = usize::min( self.selected_index, - self.zeta.read(cx).shown_completions().count(), + self.zeta.read(cx).shown_predictions().count(), ); cx.notify(); } @@ -102,7 +132,7 @@ impl RateCompletionModal { let next_index = self .zeta .read(cx) - .shown_completions() + .shown_predictions() .skip(self.selected_index) .enumerate() .skip(1) // Skip straight to the next item @@ -122,7 +152,7 @@ impl RateCompletionModal { let prev_index = self .zeta .read(cx) - .shown_completions() + .shown_predictions() .rev() .skip((completions_len - 1) - self.selected_index) .enumerate() @@ -149,14 +179,14 @@ impl RateCompletionModal { pub fn thumbs_up_active( &mut self, - _: &ThumbsUpActiveCompletion, + _: &ThumbsUpActivePrediction, window: &mut Window, cx: &mut Context, ) { self.zeta.update(cx, |zeta, cx| { - if let Some(active) = &self.active_completion { - zeta.rate_completion( - &active.completion, + if let Some(active) = &self.active_prediction { + zeta.rate_prediction( + &active.prediction, EditPredictionRating::Positive, active.feedback_editor.read(cx).text(cx), cx, @@ -165,9 +195,9 @@ impl RateCompletionModal { }); let current_completion = self - .active_completion + .active_prediction .as_ref() - .map(|completion| completion.completion.clone()); + .map(|completion| completion.prediction.clone()); self.select_completion(current_completion, false, window, cx); self.select_next_edit(&Default::default(), window, cx); self.confirm(&Default::default(), window, cx); @@ -177,18 +207,18 @@ impl RateCompletionModal { pub fn thumbs_down_active( &mut self, - _: &ThumbsDownActiveCompletion, + _: &ThumbsDownActivePrediction, window: &mut Window, cx: &mut Context, ) { - if let Some(active) = &self.active_completion { + if let Some(active) = &self.active_prediction { if active.feedback_editor.read(cx).text(cx).is_empty() { return; } self.zeta.update(cx, |zeta, cx| { - zeta.rate_completion( - &active.completion, + zeta.rate_prediction( + &active.prediction, EditPredictionRating::Negative, active.feedback_editor.read(cx).text(cx), cx, @@ -197,9 +227,9 @@ impl RateCompletionModal { } let current_completion = self - .active_completion + .active_prediction .as_ref() - .map(|completion| completion.completion.clone()); + .map(|completion| completion.prediction.clone()); self.select_completion(current_completion, false, window, cx); self.select_next_edit(&Default::default(), window, cx); self.confirm(&Default::default(), window, cx); @@ -209,7 +239,7 @@ impl RateCompletionModal { fn focus_completions( &mut self, - _: &FocusCompletions, + _: &FocusPredictions, window: &mut Window, cx: &mut Context, ) { @@ -219,14 +249,14 @@ impl RateCompletionModal { fn preview_completion( &mut self, - _: &PreviewCompletion, + _: &PreviewPrediction, window: &mut Window, cx: &mut Context, ) { let completion = self .zeta .read(cx) - .shown_completions() + .shown_predictions() .skip(self.selected_index) .take(1) .next() @@ -239,7 +269,7 @@ impl RateCompletionModal { let completion = self .zeta .read(cx) - .shown_completions() + .shown_predictions() .skip(self.selected_index) .take(1) .next() @@ -250,54 +280,145 @@ impl RateCompletionModal { pub fn select_completion( &mut self, - completion: Option, + prediction: Option, focus: bool, window: &mut Window, cx: &mut Context, ) { // Avoid resetting completion rating if it's already selected. - if let Some(completion) = completion.as_ref() { + if let Some(prediction) = prediction { self.selected_index = self .zeta .read(cx) - .shown_completions() + .shown_predictions() .enumerate() - .find(|(_, completion_b)| completion.id == completion_b.id) + .find(|(_, completion_b)| prediction.id == completion_b.id) .map(|(ix, _)| ix) .unwrap_or(self.selected_index); cx.notify(); - if let Some(prev_completion) = self.active_completion.as_ref() - && completion.id == prev_completion.completion.id + if let Some(prev_prediction) = self.active_prediction.as_ref() + && prediction.id == prev_prediction.prediction.id { if focus { - window.focus(&prev_completion.feedback_editor.focus_handle(cx)); + window.focus(&prev_prediction.feedback_editor.focus_handle(cx)); } return; } + + self.diff_editor.update(cx, |editor, cx| { + let new_buffer = prediction.edit_preview.build_result_buffer(cx); + let new_buffer_snapshot = new_buffer.read(cx).snapshot(); + let old_buffer_snapshot = prediction.snapshot.clone(); + let new_buffer_id = new_buffer_snapshot.remote_id(); + + let range = prediction + .edit_preview + .compute_visible_range(&prediction.edits) + .unwrap_or(Point::zero()..Point::zero()); + let start = Point::new(range.start.row.saturating_sub(5), 0); + let end = Point::new(range.end.row + 5, 0).min(new_buffer_snapshot.max_point()); + + let diff = cx.new::(|cx| { + let diff_snapshot = BufferDiffSnapshot::new_with_base_buffer( + new_buffer_snapshot.text.clone(), + Some(old_buffer_snapshot.text().into()), + old_buffer_snapshot.clone(), + cx, + ); + let diff = BufferDiff::new(&new_buffer_snapshot, cx); + cx.spawn(async move |diff, cx| { + let diff_snapshot = diff_snapshot.await; + diff.update(cx, |diff, cx| { + diff.set_snapshot(diff_snapshot, &new_buffer_snapshot.text, cx); + }) + }) + .detach(); + diff + }); + + editor.disable_header_for_buffer(new_buffer_id, cx); + editor.buffer().update(cx, |multibuffer, cx| { + multibuffer.clear(cx); + multibuffer.push_excerpts( + new_buffer, + vec![ExcerptRange { + context: start..end, + primary: start..end, + }], + cx, + ); + multibuffer.add_diff(diff, cx); + }); + }); + + let mut formatted_inputs = String::new(); + + write!(&mut formatted_inputs, "## Events\n\n").unwrap(); + + for event in &prediction.inputs.events { + write!(&mut formatted_inputs, "```diff\n{event}```\n\n").unwrap(); + } + + write!(&mut formatted_inputs, "## Included files\n\n").unwrap(); + + for included_file in &prediction.inputs.included_files { + let cursor_insertions = &[(prediction.inputs.cursor_point, "<|CURSOR|>")]; + + write!( + &mut formatted_inputs, + "### {}\n\n", + included_file.path.display() + ) + .unwrap(); + + write_codeblock( + &included_file.path, + &included_file.excerpts, + if included_file.path == prediction.inputs.cursor_path { + cursor_insertions + } else { + &[] + }, + included_file.max_row, + false, + &mut formatted_inputs, + ); + } + + self.active_prediction = Some(ActivePrediction { + prediction, + feedback_editor: cx.new(|cx| { + let mut editor = Editor::multi_line(window, cx); + editor.disable_scrollbars_and_minimap(window, cx); + editor.set_soft_wrap_mode(language_settings::SoftWrap::EditorWidth, cx); + editor.set_show_line_numbers(false, cx); + editor.set_show_git_diff_gutter(false, cx); + editor.set_show_code_actions(false, cx); + editor.set_show_runnables(false, cx); + editor.set_show_breakpoints(false, cx); + editor.set_show_wrap_guides(false, cx); + editor.set_show_indent_guides(false, cx); + editor.set_show_edit_predictions(Some(false), window, cx); + editor.set_placeholder_text("Add your feedback…", window, cx); + if focus { + cx.focus_self(window); + } + editor + }), + formatted_inputs: cx.new(|cx| { + Markdown::new( + formatted_inputs.into(), + Some(self.language_registry.clone()), + None, + cx, + ) + }), + }); + } else { + self.active_prediction = None; } - self.active_completion = completion.map(|completion| ActiveCompletion { - completion, - feedback_editor: cx.new(|cx| { - let mut editor = Editor::multi_line(window, cx); - editor.disable_scrollbars_and_minimap(window, cx); - editor.set_soft_wrap_mode(language_settings::SoftWrap::EditorWidth, cx); - editor.set_show_line_numbers(false, cx); - editor.set_show_git_diff_gutter(false, cx); - editor.set_show_code_actions(false, cx); - editor.set_show_runnables(false, cx); - editor.set_show_breakpoints(false, cx); - editor.set_show_wrap_guides(false, cx); - editor.set_show_indent_guides(false, cx); - editor.set_show_edit_predictions(Some(false), window, cx); - editor.set_placeholder_text("Add your feedback…", window, cx); - if focus { - cx.focus_self(window); - } - editor - }), - }); cx.notify(); } @@ -312,33 +433,31 @@ impl RateCompletionModal { .child( Button::new( ElementId::Name("suggested-edits".into()), - RateCompletionView::SuggestedEdits.name(), + RatePredictionView::SuggestedEdits.name(), ) .label_size(LabelSize::Small) .on_click(cx.listener(move |this, _, _window, cx| { - this.current_view = RateCompletionView::SuggestedEdits; + this.current_view = RatePredictionView::SuggestedEdits; cx.notify(); })) - .toggle_state(self.current_view == RateCompletionView::SuggestedEdits), + .toggle_state(self.current_view == RatePredictionView::SuggestedEdits), ) .child( Button::new( ElementId::Name("raw-input".into()), - RateCompletionView::RawInput.name(), + RatePredictionView::RawInput.name(), ) .label_size(LabelSize::Small) .on_click(cx.listener(move |this, _, _window, cx| { - this.current_view = RateCompletionView::RawInput; + this.current_view = RatePredictionView::RawInput; cx.notify(); })) - .toggle_state(self.current_view == RateCompletionView::RawInput), + .toggle_state(self.current_view == RatePredictionView::RawInput), ) } fn render_suggested_edits(&self, cx: &mut Context) -> Option> { - let active_completion = self.active_completion.as_ref()?; let bg_color = cx.theme().colors().editor_background; - Some( div() .id("diff") @@ -347,14 +466,18 @@ impl RateCompletionModal { .bg(bg_color) .overflow_scroll() .whitespace_nowrap() - .child(CompletionDiffElement::new( - &active_completion.completion, - cx, - )), + .child(self.diff_editor.clone()), ) } - fn render_raw_input(&self, cx: &mut Context) -> Option> { + fn render_raw_input( + &self, + window: &mut Window, + cx: &mut Context, + ) -> Option> { + let theme_settings = ThemeSettings::get_global(cx); + let buffer_font_size = theme_settings.buffer_font_size(cx); + Some( v_flex() .size_full() @@ -368,30 +491,81 @@ impl RateCompletionModal { .size_full() .bg(cx.theme().colors().editor_background) .overflow_scroll() - .child(if let Some(active_completion) = &self.active_completion { - format!( - "{}\n{}", - active_completion.completion.input_events, - active_completion.completion.input_excerpt + .child(if let Some(active_prediction) = &self.active_prediction { + markdown::MarkdownElement::new( + active_prediction.formatted_inputs.clone(), + MarkdownStyle { + base_text_style: window.text_style(), + syntax: cx.theme().syntax().clone(), + code_block: StyleRefinement { + text: Some(TextStyleRefinement { + font_family: Some( + theme_settings.buffer_font.family.clone(), + ), + font_size: Some(buffer_font_size.into()), + ..Default::default() + }), + padding: EdgesRefinement { + top: Some(DefiniteLength::Absolute( + AbsoluteLength::Pixels(px(8.)), + )), + left: Some(DefiniteLength::Absolute( + AbsoluteLength::Pixels(px(8.)), + )), + right: Some(DefiniteLength::Absolute( + AbsoluteLength::Pixels(px(8.)), + )), + bottom: Some(DefiniteLength::Absolute( + AbsoluteLength::Pixels(px(8.)), + )), + }, + margin: EdgesRefinement { + top: Some(Length::Definite(px(8.).into())), + left: Some(Length::Definite(px(0.).into())), + right: Some(Length::Definite(px(0.).into())), + bottom: Some(Length::Definite(px(12.).into())), + }, + border_style: Some(BorderStyle::Solid), + border_widths: EdgesRefinement { + top: Some(AbsoluteLength::Pixels(px(1.))), + left: Some(AbsoluteLength::Pixels(px(1.))), + right: Some(AbsoluteLength::Pixels(px(1.))), + bottom: Some(AbsoluteLength::Pixels(px(1.))), + }, + border_color: Some(cx.theme().colors().border_variant), + background: Some( + cx.theme().colors().editor_background.into(), + ), + ..Default::default() + }, + ..Default::default() + }, ) + .into_any_element() } else { - "No active completion".to_string() + div() + .child("No active completion".to_string()) + .into_any_element() }), ) .id("raw-input-view"), ) } - fn render_active_completion(&mut self, cx: &mut Context) -> Option { - let active_completion = self.active_completion.as_ref()?; - let completion_id = active_completion.completion.id; + fn render_active_completion( + &mut self, + window: &mut Window, + cx: &mut Context, + ) -> Option { + let active_prediction = self.active_prediction.as_ref()?; + let completion_id = active_prediction.prediction.id.clone(); let focus_handle = &self.focus_handle(cx); let border_color = cx.theme().colors().border; let bg_color = cx.theme().colors().editor_background; - let rated = self.zeta.read(cx).is_completion_rated(completion_id); - let feedback_empty = active_completion + let rated = self.zeta.read(cx).is_prediction_rated(&completion_id); + let feedback_empty = active_prediction .feedback_editor .read(cx) .text(cx) @@ -412,10 +586,10 @@ impl RateCompletionModal { .child(self.render_view_nav(cx)) .when_some( match self.current_view { - RateCompletionView::SuggestedEdits => { + RatePredictionView::SuggestedEdits => { self.render_suggested_edits(cx) } - RateCompletionView::RawInput => self.render_raw_input(cx), + RatePredictionView::RawInput => self.render_raw_input(window, cx), }, |this, element| this.child(element), ), @@ -450,7 +624,7 @@ impl RateCompletionModal { .h_40() .pt_1() .bg(bg_color) - .child(active_completion.feedback_editor.clone()), + .child(active_prediction.feedback_editor.clone()), ) }) .child( @@ -472,7 +646,7 @@ impl RateCompletionModal { ) .child(Label::new("Rated completion.").color(Color::Muted)), ) - } else if active_completion.completion.edits.is_empty() { + } else if active_prediction.prediction.edits.is_empty() { Some( label_container .child( @@ -489,7 +663,7 @@ impl RateCompletionModal { h_flex() .gap_1() .child( - Button::new("bad", "Bad Completion") + Button::new("bad", "Bad Prediction") .icon(IconName::ThumbsDown) .icon_size(IconSize::Small) .icon_position(IconPosition::Start) @@ -500,14 +674,14 @@ impl RateCompletionModal { )) }) .key_binding(KeyBinding::for_action_in( - &ThumbsDownActiveCompletion, + &ThumbsDownActivePrediction, focus_handle, cx, )) .on_click(cx.listener(move |this, _, window, cx| { - if this.active_completion.is_some() { + if this.active_prediction.is_some() { this.thumbs_down_active( - &ThumbsDownActiveCompletion, + &ThumbsDownActivePrediction, window, cx, ); @@ -515,20 +689,20 @@ impl RateCompletionModal { })), ) .child( - Button::new("good", "Good Completion") + Button::new("good", "Good Prediction") .icon(IconName::ThumbsUp) .icon_size(IconSize::Small) .icon_position(IconPosition::Start) .disabled(rated) .key_binding(KeyBinding::for_action_in( - &ThumbsUpActiveCompletion, + &ThumbsUpActivePrediction, focus_handle, cx, )) .on_click(cx.listener(move |this, _, window, cx| { - if this.active_completion.is_some() { + if this.active_prediction.is_some() { this.thumbs_up_active( - &ThumbsUpActiveCompletion, + &ThumbsUpActivePrediction, window, cx, ); @@ -543,34 +717,32 @@ impl RateCompletionModal { fn render_shown_completions(&self, cx: &Context) -> impl Iterator { self.zeta .read(cx) - .shown_completions() + .shown_predictions() .cloned() .enumerate() .map(|(index, completion)| { let selected = self - .active_completion + .active_prediction .as_ref() - .is_some_and(|selected| selected.completion.id == completion.id); - let rated = self.zeta.read(cx).is_completion_rated(completion.id); + .is_some_and(|selected| selected.prediction.id == completion.id); + let rated = self.zeta.read(cx).is_prediction_rated(&completion.id); let (icon_name, icon_color, tooltip_text) = match (rated, completion.edits.is_empty()) { - (true, _) => (IconName::Check, Color::Success, "Rated Completion"), + (true, _) => (IconName::Check, Color::Success, "Rated Prediction"), (false, true) => (IconName::File, Color::Muted, "No Edits Produced"), (false, false) => (IconName::FileDiff, Color::Accent, "Edits Available"), }; - let file_name = completion - .path - .file_name() - .map(|f| f.to_string_lossy().into_owned()) - .unwrap_or("untitled".to_string()); - let file_path = completion - .path - .parent() - .map(|p| p.to_string_lossy().into_owned()); - - ListItem::new(completion.id) + let file = completion.buffer.read(cx).file(); + let file_name = file + .as_ref() + .map_or(SharedString::new_static("untitled"), |file| { + file.file_name(cx).to_string().into() + }); + let file_path = file.map(|file| file.path().as_unix_str().to_string()); + + ListItem::new(completion.id.clone()) .inset(true) .spacing(ListItemSpacing::Sparse) .focused(index == self.selected_index) @@ -615,12 +787,12 @@ impl RateCompletionModal { } } -impl Render for RateCompletionModal { +impl Render for RatePredictionsModal { fn render(&mut self, window: &mut Window, cx: &mut Context) -> impl IntoElement { let border_color = cx.theme().colors().border; h_flex() - .key_context("RateCompletionModal") + .key_context("RatePredictionModal") .track_focus(&self.focus_handle) .on_action(cx.listener(Self::dismiss)) .on_action(cx.listener(Self::confirm)) @@ -688,20 +860,20 @@ impl Render for RateCompletionModal { ), ), ) - .children(self.render_active_completion(cx)) + .children(self.render_active_completion(window, cx)) .on_mouse_down_out(cx.listener(|_, _, _, cx| cx.emit(DismissEvent))) } } -impl EventEmitter for RateCompletionModal {} +impl EventEmitter for RatePredictionsModal {} -impl Focusable for RateCompletionModal { +impl Focusable for RatePredictionsModal { fn focus_handle(&self, _cx: &App) -> FocusHandle { self.focus_handle.clone() } } -impl ModalView for RateCompletionModal {} +impl ModalView for RatePredictionsModal {} fn format_time_ago(elapsed: Duration) -> String { let seconds = elapsed.as_secs(); diff --git a/crates/zeta2/src/retrieval_search.rs b/crates/zeta/src/retrieval_search.rs similarity index 100% rename from crates/zeta2/src/retrieval_search.rs rename to crates/zeta/src/retrieval_search.rs diff --git a/crates/zeta2/src/sweep_ai.rs b/crates/zeta/src/sweep_ai.rs similarity index 77% rename from crates/zeta2/src/sweep_ai.rs rename to crates/zeta/src/sweep_ai.rs index c56d7409fa212734c5f5a73a6b24319c27c7494f..0e226ab9df26ffc945a2d8e810790d0b00d0f198 100644 --- a/crates/zeta2/src/sweep_ai.rs +++ b/crates/zeta/src/sweep_ai.rs @@ -2,7 +2,6 @@ use std::fmt; use std::{path::Path, sync::Arc}; use serde::{Deserialize, Serialize}; -use util::rel_path::RelPath; #[derive(Debug, Clone, Serialize)] pub struct AutocompleteRequest { @@ -91,34 +90,24 @@ pub struct AdditionalCompletion { pub finish_reason: Option, } -pub(crate) fn write_event(event: crate::Event, f: &mut impl fmt::Write) -> fmt::Result { +pub(crate) fn write_event( + event: &cloud_llm_client::predict_edits_v3::Event, + f: &mut impl fmt::Write, +) -> fmt::Result { match event { - crate::Event::BufferChange { - old_snapshot, - new_snapshot, + cloud_llm_client::predict_edits_v3::Event::BufferChange { + old_path, + path, + diff, .. } => { - let old_path = old_snapshot - .file() - .map(|f| f.path().as_ref()) - .unwrap_or(RelPath::unix("untitled").unwrap()); - let new_path = new_snapshot - .file() - .map(|f| f.path().as_ref()) - .unwrap_or(RelPath::unix("untitled").unwrap()); - if old_path != new_path { + if old_path != path { // TODO confirm how to do this for sweep // writeln!(f, "User renamed {:?} to {:?}\n", old_path, new_path)?; } - let diff = language::unified_diff(&old_snapshot.text(), &new_snapshot.text()); if !diff.is_empty() { - write!( - f, - "File: {}:\n{}\n", - new_path.display(util::paths::PathStyle::Posix), - diff - )? + write!(f, "File: {}:\n{}\n", path.display(), diff)? } fmt::Result::Ok(()) diff --git a/crates/zeta2/src/udiff.rs b/crates/zeta/src/udiff.rs similarity index 100% rename from crates/zeta2/src/udiff.rs rename to crates/zeta/src/udiff.rs diff --git a/crates/zeta2/src/xml_edits.rs b/crates/zeta/src/xml_edits.rs similarity index 100% rename from crates/zeta2/src/xml_edits.rs rename to crates/zeta/src/xml_edits.rs diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index 5b2c3856eda2cd984e6675d671f8c99aa183e883..6464ce19ebaf1f95ad58e2954fb68e934600dac4 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -1,130 +1,178 @@ -mod completion_diff_element; -mod init; -mod input_excerpt; -mod license_detection; -mod onboarding_modal; -mod onboarding_telemetry; -mod rate_completion_modal; - -pub(crate) use completion_diff_element::*; -use db::kvp::{Dismissable, KEY_VALUE_STORE}; -use db::smol::stream::StreamExt as _; -use edit_prediction::DataCollectionState; -use futures::channel::mpsc; -pub use init::*; -use license_detection::LicenseDetectionWatcher; -pub use rate_completion_modal::*; - -use anyhow::{Context as _, Result, anyhow}; +use anyhow::{Context as _, Result, anyhow, bail}; use arrayvec::ArrayVec; use client::{Client, EditPredictionUsage, UserStore}; +use cloud_llm_client::predict_edits_v3::{self, Event, PromptFormat, Signature}; use cloud_llm_client::{ AcceptEditPredictionBody, EXPIRED_LLM_TOKEN_HEADER_NAME, EditPredictionRejection, MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST, MINIMUM_REQUIRED_VERSION_HEADER_NAME, - PredictEditsBody, PredictEditsGitInfo, PredictEditsResponse, RejectEditPredictionsBody, - ZED_VERSION_HEADER_NAME, + RejectEditPredictionsBody, ZED_VERSION_HEADER_NAME, }; -use collections::{HashMap, HashSet, VecDeque}; -use futures::AsyncReadExt; +use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery}; +use cloud_zeta2_prompt::{CURSOR_MARKER, DEFAULT_MAX_PROMPT_BYTES}; +use collections::{HashMap, HashSet}; +use command_palette_hooks::CommandPaletteFilter; +use db::kvp::{Dismissable, KEY_VALUE_STORE}; +use edit_prediction_context::{ + DeclarationId, DeclarationStyle, EditPredictionContext, EditPredictionContextOptions, + EditPredictionExcerpt, EditPredictionExcerptOptions, EditPredictionScoreOptions, Line, + SyntaxIndex, SyntaxIndexState, +}; +use feature_flags::{FeatureFlag, FeatureFlagAppExt as _, PredictEditsRateCompletionsFeatureFlag}; +use futures::channel::{mpsc, oneshot}; +use futures::{AsyncReadExt as _, StreamExt as _}; use gpui::{ - App, AppContext as _, AsyncApp, Context, Entity, EntityId, Global, SharedString, Subscription, - Task, actions, + App, AsyncApp, Entity, EntityId, Global, SharedString, Subscription, Task, WeakEntity, actions, + http_client::{self, AsyncBody, Method}, + prelude::*, }; -use http_client::{AsyncBody, HttpClient, Method, Request, Response}; -use input_excerpt::excerpt_for_cursor_position; use language::{ - Anchor, Buffer, BufferSnapshot, EditPreview, File, OffsetRangeExt, ToOffset, ToPoint, text_diff, + Anchor, Buffer, DiagnosticSet, File, LanguageServerId, Point, ToOffset as _, ToPoint, }; +use language::{BufferSnapshot, OffsetRangeExt}; use language_model::{LlmApiToken, RefreshLlmTokenListener}; -use project::{Project, ProjectPath}; +use lsp::DiagnosticSeverity; +use open_ai::FunctionDefinition; +use project::{DisableAiSettings, Project, ProjectPath, WorktreeId}; use release_channel::AppVersion; use semver::Version; -use settings::WorktreeId; -use std::collections::hash_map; -use std::mem; -use std::str::FromStr; -use std::{ - cmp, - fmt::Write, - future::Future, - ops::Range, - path::Path, - rc::Rc, - sync::Arc, - time::{Duration, Instant}, -}; +use serde::de::DeserializeOwned; +use settings::{EditPredictionProvider, Settings as _, SettingsStore, update_settings_file}; +use std::any::{Any as _, TypeId}; +use std::collections::{VecDeque, hash_map}; use telemetry_events::EditPredictionRating; +use workspace::Workspace; + +use std::fmt::Write as _; +use std::ops::Range; +use std::path::Path; +use std::rc::Rc; +use std::str::FromStr as _; +use std::sync::{Arc, LazyLock}; +use std::time::{Duration, Instant}; +use std::{env, mem}; use thiserror::Error; -use util::ResultExt; -use util::rel_path::RelPath; -use uuid::Uuid; +use util::rel_path::RelPathBuf; +use util::{LogErrorFuture, RangeExt as _, ResultExt as _, TryFutureExt}; use workspace::notifications::{ErrorMessagePrompt, NotificationId, show_app_notification}; -use worktree::Worktree; - -const CURSOR_MARKER: &str = "<|user_cursor_is_here|>"; -const START_OF_FILE_MARKER: &str = "<|start_of_file|>"; -const EDITABLE_REGION_START_MARKER: &str = "<|editable_region_start|>"; -const EDITABLE_REGION_END_MARKER: &str = "<|editable_region_end|>"; -const BUFFER_CHANGE_GROUPING_INTERVAL: Duration = Duration::from_secs(1); -const ZED_PREDICT_DATA_COLLECTION_CHOICE: &str = "zed_predict_data_collection_choice"; -const MAX_CONTEXT_TOKENS: usize = 150; -const MAX_REWRITE_TOKENS: usize = 350; -const MAX_EVENT_TOKENS: usize = 500; +pub mod assemble_excerpts; +mod license_detection; +mod onboarding_modal; +mod prediction; +mod provider; +mod rate_prediction_modal; +pub mod retrieval_search; +mod sweep_ai; +pub mod udiff; +mod xml_edits; +pub mod zeta1; -/// Maximum number of events to track. -const MAX_EVENT_COUNT: usize = 16; +#[cfg(test)] +mod zeta_tests; + +use crate::assemble_excerpts::assemble_excerpts; +use crate::license_detection::LicenseDetectionWatcher; +use crate::onboarding_modal::ZedPredictModal; +pub use crate::prediction::EditPrediction; +pub use crate::prediction::EditPredictionId; +pub use crate::prediction::EditPredictionInputs; +use crate::rate_prediction_modal::{ + NextEdit, PreviousEdit, RatePredictionsModal, ThumbsDownActivePrediction, + ThumbsUpActivePrediction, +}; +use crate::zeta1::request_prediction_with_zeta1; +pub use provider::ZetaEditPredictionProvider; actions!( edit_prediction, [ + /// Resets the edit prediction onboarding state. + ResetOnboarding, + /// Opens the rate completions modal. + RateCompletions, /// Clears the edit prediction history. - ClearHistory + ClearHistory, ] ); -#[derive(Copy, Clone, Default, Debug, PartialEq, Eq, Hash)] -pub struct EditPredictionId(Uuid); +/// Maximum number of events to track. +const EVENT_COUNT_MAX: usize = 6; +const CHANGE_GROUPING_LINE_SPAN: u32 = 8; +const ZED_PREDICT_DATA_COLLECTION_CHOICE: &str = "zed_predict_data_collection_choice"; -impl From for gpui::ElementId { - fn from(value: EditPredictionId) -> Self { - gpui::ElementId::Uuid(value.0) - } -} +pub struct SweepFeatureFlag; -impl std::fmt::Display for EditPredictionId { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } +impl FeatureFlag for SweepFeatureFlag { + const NAME: &str = "sweep-ai"; } +pub const DEFAULT_EXCERPT_OPTIONS: EditPredictionExcerptOptions = EditPredictionExcerptOptions { + max_bytes: 512, + min_bytes: 128, + target_before_cursor_over_total_bytes: 0.5, +}; -struct ZedPredictUpsell; +pub const DEFAULT_CONTEXT_OPTIONS: ContextMode = + ContextMode::Agentic(DEFAULT_AGENTIC_CONTEXT_OPTIONS); -impl Dismissable for ZedPredictUpsell { - const KEY: &'static str = "dismissed-edit-predict-upsell"; +pub const DEFAULT_AGENTIC_CONTEXT_OPTIONS: AgenticContextOptions = AgenticContextOptions { + excerpt: DEFAULT_EXCERPT_OPTIONS, +}; - fn dismissed() -> bool { - // To make this backwards compatible with older versions of Zed, we - // check if the user has seen the previous Edit Prediction Onboarding - // before, by checking the data collection choice which was written to - // the database once the user clicked on "Accept and Enable" - if KEY_VALUE_STORE - .read_kvp(ZED_PREDICT_DATA_COLLECTION_CHOICE) - .log_err() - .is_some_and(|s| s.is_some()) - { - return true; +pub const DEFAULT_SYNTAX_CONTEXT_OPTIONS: EditPredictionContextOptions = + EditPredictionContextOptions { + use_imports: true, + max_retrieved_declarations: 0, + excerpt: DEFAULT_EXCERPT_OPTIONS, + score: EditPredictionScoreOptions { + omit_excerpt_overlaps: true, + }, + }; + +pub const DEFAULT_OPTIONS: ZetaOptions = ZetaOptions { + context: DEFAULT_CONTEXT_OPTIONS, + max_prompt_bytes: DEFAULT_MAX_PROMPT_BYTES, + max_diagnostic_bytes: 2048, + prompt_format: PromptFormat::DEFAULT, + file_indexing_parallelism: 1, + buffer_change_grouping_interval: Duration::from_secs(1), +}; + +static USE_OLLAMA: LazyLock = + LazyLock::new(|| env::var("ZED_ZETA2_OLLAMA").is_ok_and(|var| !var.is_empty())); +static CONTEXT_RETRIEVAL_MODEL_ID: LazyLock = LazyLock::new(|| { + env::var("ZED_ZETA2_CONTEXT_MODEL").unwrap_or(if *USE_OLLAMA { + "qwen3-coder:30b".to_string() + } else { + "yqvev8r3".to_string() + }) +}); +static EDIT_PREDICTIONS_MODEL_ID: LazyLock = LazyLock::new(|| { + match env::var("ZED_ZETA2_MODEL").as_deref() { + Ok("zeta2-exp") => "4w5n28vw", // Fine-tuned model @ Baseten + Ok(model) => model, + Err(_) if *USE_OLLAMA => "qwen3-coder:30b", + Err(_) => "yqvev8r3", // Vanilla qwen3-coder @ Baseten + } + .to_string() +}); +static PREDICT_EDITS_URL: LazyLock> = LazyLock::new(|| { + env::var("ZED_PREDICT_EDITS_URL").ok().or_else(|| { + if *USE_OLLAMA { + Some("http://localhost:11434/v1/chat/completions".into()) + } else { + None } + }) +}); - KEY_VALUE_STORE - .read_kvp(Self::KEY) - .log_err() - .is_some_and(|s| s.is_some()) - } -} +pub struct Zeta2FeatureFlag; -pub fn should_show_upsell_modal() -> bool { - !ZedPredictUpsell::dismissed() +impl FeatureFlag for Zeta2FeatureFlag { + const NAME: &'static str = "zeta2"; + + fn enabled_for_staff() -> bool { + false + } } #[derive(Clone)] @@ -132,108 +180,291 @@ struct ZetaGlobal(Entity); impl Global for ZetaGlobal {} -#[derive(Clone)] -pub struct EditPrediction { - id: EditPredictionId, - path: Arc, - excerpt_range: Range, - cursor_offset: usize, - edits: Arc<[(Range, Arc)]>, - snapshot: BufferSnapshot, - edit_preview: EditPreview, - input_outline: Arc, - input_events: Arc, - input_excerpt: Arc, - output_excerpt: Arc, - buffer_snapshotted_at: Instant, - response_received_at: Instant, +pub struct Zeta { + client: Arc, + user_store: Entity, + llm_token: LlmApiToken, + _llm_token_subscription: Subscription, + projects: HashMap, + options: ZetaOptions, + update_required: bool, + debug_tx: Option>, + #[cfg(feature = "eval-support")] + eval_cache: Option>, + edit_prediction_model: ZetaEditPredictionModel, + sweep_api_token: Option, + sweep_ai_debug_info: Arc, + data_collection_choice: DataCollectionChoice, + rejected_predictions: Vec, + reject_predictions_tx: mpsc::UnboundedSender<()>, + reject_predictions_debounce_task: Option>, + shown_predictions: VecDeque, + rated_predictions: HashSet, } -impl EditPrediction { - fn latency(&self) -> Duration { - self.response_received_at - .duration_since(self.buffer_snapshotted_at) - } +#[derive(Default, PartialEq, Eq)] +pub enum ZetaEditPredictionModel { + #[default] + Zeta1, + Zeta2, + Sweep, +} - fn interpolate(&self, new_snapshot: &BufferSnapshot) -> Option, Arc)>> { - edit_prediction::interpolate_edits(&self.snapshot, new_snapshot, &self.edits) - } +#[derive(Debug, Clone, PartialEq)] +pub struct ZetaOptions { + pub context: ContextMode, + pub max_prompt_bytes: usize, + pub max_diagnostic_bytes: usize, + pub prompt_format: predict_edits_v3::PromptFormat, + pub file_indexing_parallelism: usize, + pub buffer_change_grouping_interval: Duration, } -impl std::fmt::Debug for EditPrediction { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("EditPrediction") - .field("id", &self.id) - .field("path", &self.path) - .field("edits", &self.edits) - .finish_non_exhaustive() +#[derive(Debug, Clone, PartialEq)] +pub enum ContextMode { + Agentic(AgenticContextOptions), + Syntax(EditPredictionContextOptions), +} + +#[derive(Debug, Clone, PartialEq)] +pub struct AgenticContextOptions { + pub excerpt: EditPredictionExcerptOptions, +} + +impl ContextMode { + pub fn excerpt(&self) -> &EditPredictionExcerptOptions { + match self { + ContextMode::Agentic(options) => &options.excerpt, + ContextMode::Syntax(options) => &options.excerpt, + } } } -pub struct Zeta { - projects: HashMap, - client: Arc, - shown_completions: VecDeque, - rated_completions: HashSet, - data_collection_choice: DataCollectionChoice, - discarded_completions: Vec, - llm_token: LlmApiToken, - _llm_token_subscription: Subscription, - /// Whether an update to a newer version of Zed is required to continue using Zeta. - update_required: bool, - user_store: Entity, - license_detection_watchers: HashMap>, - discard_completions_debounce_task: Option>, - discard_completions_tx: mpsc::UnboundedSender<()>, +#[derive(Debug)] +pub enum ZetaDebugInfo { + ContextRetrievalStarted(ZetaContextRetrievalStartedDebugInfo), + SearchQueriesGenerated(ZetaSearchQueryDebugInfo), + SearchQueriesExecuted(ZetaContextRetrievalDebugInfo), + ContextRetrievalFinished(ZetaContextRetrievalDebugInfo), + EditPredictionRequested(ZetaEditPredictionDebugInfo), +} + +#[derive(Debug)] +pub struct ZetaContextRetrievalStartedDebugInfo { + pub project: Entity, + pub timestamp: Instant, + pub search_prompt: String, +} + +#[derive(Debug)] +pub struct ZetaContextRetrievalDebugInfo { + pub project: Entity, + pub timestamp: Instant, +} + +#[derive(Debug)] +pub struct ZetaEditPredictionDebugInfo { + pub inputs: EditPredictionInputs, + pub retrieval_time: Duration, + pub buffer: WeakEntity, + pub position: language::Anchor, + pub local_prompt: Result, + pub response_rx: oneshot::Receiver<(Result, Duration)>, +} + +#[derive(Debug)] +pub struct ZetaSearchQueryDebugInfo { + pub project: Entity, + pub timestamp: Instant, + pub search_queries: Vec, } +pub type RequestDebugInfo = predict_edits_v3::DebugInfo; + struct ZetaProject { - events: VecDeque, + syntax_index: Option>, + events: VecDeque>, + last_event: Option, + recent_paths: VecDeque, registered_buffers: HashMap, + current_prediction: Option, + next_pending_prediction_id: usize, + pending_predictions: ArrayVec, + last_prediction_refresh: Option<(EntityId, Instant)>, + context: Option, Vec>>>, + refresh_context_task: Option>>>, + refresh_context_debounce_task: Option>>, + refresh_context_timestamp: Option, + license_detection_watchers: HashMap>, + _subscription: gpui::Subscription, } -impl Zeta { - pub fn global(cx: &mut App) -> Option> { - cx.try_global::().map(|global| global.0.clone()) +impl ZetaProject { + pub fn events(&self, cx: &App) -> Vec> { + self.events + .iter() + .cloned() + .chain( + self.last_event + .as_ref() + .and_then(|event| event.finalize(&self.license_detection_watchers, cx)), + ) + .collect() } +} - pub fn register( - worktree: Option>, - client: Arc, - user_store: Entity, - cx: &mut App, - ) -> Entity { - let this = Self::global(cx).unwrap_or_else(|| { - let entity = cx.new(|cx| Self::new(client, user_store, cx)); - cx.set_global(ZetaGlobal(entity.clone())); - entity - }); +#[derive(Debug, Clone)] +struct CurrentEditPrediction { + pub requested_by: PredictionRequestedBy, + pub prediction: EditPrediction, + pub was_shown: bool, +} - this.update(cx, move |this, cx| { - if let Some(worktree) = worktree { - let worktree_id = worktree.read(cx).id(); - this.license_detection_watchers - .entry(worktree_id) - .or_insert_with(|| Rc::new(LicenseDetectionWatcher::new(&worktree, cx))); - } - }); +impl CurrentEditPrediction { + fn should_replace_prediction(&self, old_prediction: &Self, cx: &App) -> bool { + let Some(new_edits) = self + .prediction + .interpolate(&self.prediction.buffer.read(cx)) + else { + return false; + }; + + if self.prediction.buffer != old_prediction.prediction.buffer { + return true; + } + + let Some(old_edits) = old_prediction + .prediction + .interpolate(&old_prediction.prediction.buffer.read(cx)) + else { + return true; + }; - this + let requested_by_buffer_id = self.requested_by.buffer_id(); + + // This reduces the occurrence of UI thrash from replacing edits + // + // TODO: This is fairly arbitrary - should have a more general heuristic that handles multiple edits. + if requested_by_buffer_id == Some(self.prediction.buffer.entity_id()) + && requested_by_buffer_id == Some(old_prediction.prediction.buffer.entity_id()) + && old_edits.len() == 1 + && new_edits.len() == 1 + { + let (old_range, old_text) = &old_edits[0]; + let (new_range, new_text) = &new_edits[0]; + new_range == old_range && new_text.starts_with(old_text.as_ref()) + } else { + true + } } +} - pub fn clear_history(&mut self) { - for zeta_project in self.projects.values_mut() { - zeta_project.events.clear(); +#[derive(Debug, Clone)] +enum PredictionRequestedBy { + DiagnosticsUpdate, + Buffer(EntityId), +} + +impl PredictionRequestedBy { + pub fn buffer_id(&self) -> Option { + match self { + PredictionRequestedBy::DiagnosticsUpdate => None, + PredictionRequestedBy::Buffer(buffer_id) => Some(*buffer_id), } } +} - pub fn usage(&self, cx: &App) -> Option { - self.user_store.read(cx).edit_prediction_usage() +struct PendingPrediction { + id: usize, + task: Task>, +} + +/// A prediction from the perspective of a buffer. +#[derive(Debug)] +enum BufferEditPrediction<'a> { + Local { prediction: &'a EditPrediction }, + Jump { prediction: &'a EditPrediction }, +} + +struct RegisteredBuffer { + snapshot: BufferSnapshot, + _subscriptions: [gpui::Subscription; 2], +} + +struct LastEvent { + old_snapshot: BufferSnapshot, + new_snapshot: BufferSnapshot, + end_edit_anchor: Option, +} + +impl LastEvent { + pub fn finalize( + &self, + license_detection_watchers: &HashMap>, + cx: &App, + ) -> Option> { + let path = buffer_path_with_id_fallback(&self.new_snapshot, cx); + let old_path = buffer_path_with_id_fallback(&self.old_snapshot, cx); + + let file = self.new_snapshot.file(); + let old_file = self.old_snapshot.file(); + + let in_open_source_repo = [file, old_file].iter().all(|file| { + file.is_some_and(|file| { + license_detection_watchers + .get(&file.worktree_id(cx)) + .is_some_and(|watcher| watcher.is_project_open_source()) + }) + }); + + let diff = language::unified_diff(&self.old_snapshot.text(), &self.new_snapshot.text()); + + if path == old_path && diff.is_empty() { + None + } else { + Some(Arc::new(predict_edits_v3::Event::BufferChange { + old_path, + path, + diff, + in_open_source_repo, + // TODO: Actually detect if this edit was predicted or not + predicted: false, + })) + } + } +} + +fn buffer_path_with_id_fallback(snapshot: &BufferSnapshot, cx: &App) -> Arc { + if let Some(file) = snapshot.file() { + file.full_path(cx).into() + } else { + Path::new(&format!("untitled-{}", snapshot.remote_id())).into() + } +} + +impl Zeta { + pub fn try_global(cx: &App) -> Option> { + cx.try_global::().map(|global| global.0.clone()) + } + + pub fn global( + client: &Arc, + user_store: &Entity, + cx: &mut App, + ) -> Entity { + cx.try_global::() + .map(|global| global.0.clone()) + .unwrap_or_else(|| { + let zeta = cx.new(|cx| Self::new(client.clone(), user_store.clone(), cx)); + cx.set_global(ZetaGlobal(zeta.clone())); + zeta + }) } - fn new(client: Arc, user_store: Entity, cx: &mut Context) -> Self { + pub fn new(client: Arc, user_store: Entity, cx: &mut Context) -> Self { let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx); let data_collection_choice = Self::load_data_collection_choice(); + let (reject_tx, mut reject_rx) = mpsc::unbounded(); cx.spawn(async move |this, cx| { while let Some(()) = reject_rx.next().await { @@ -248,12 +479,8 @@ impl Zeta { Self { projects: HashMap::default(), client, - shown_completions: VecDeque::new(), - rated_completions: HashSet::default(), - discarded_completions: Vec::new(), - discard_completions_debounce_task: None, - discard_completions_tx: reject_tx, - data_collection_choice, + user_store, + options: DEFAULT_OPTIONS, llm_token: LlmApiToken::default(), _llm_token_subscription: cx.subscribe( &refresh_llm_token_listener, @@ -268,64 +495,85 @@ impl Zeta { }, ), update_required: false, - license_detection_watchers: HashMap::default(), - user_store, + debug_tx: None, + #[cfg(feature = "eval-support")] + eval_cache: None, + edit_prediction_model: ZetaEditPredictionModel::Zeta2, + sweep_api_token: std::env::var("SWEEP_AI_TOKEN") + .context("No SWEEP_AI_TOKEN environment variable set") + .log_err(), + data_collection_choice, + sweep_ai_debug_info: sweep_ai::debug_info(cx), + rejected_predictions: Vec::new(), + reject_predictions_debounce_task: None, + reject_predictions_tx: reject_tx, + rated_predictions: Default::default(), + shown_predictions: Default::default(), } } - fn get_or_init_zeta_project( - &mut self, - project: &Entity, - cx: &mut Context, - ) -> &mut ZetaProject { - let project_id = project.entity_id(); - match self.projects.entry(project_id) { - hash_map::Entry::Occupied(entry) => entry.into_mut(), - hash_map::Entry::Vacant(entry) => { - cx.observe_release(project, move |this, _, _cx| { - this.projects.remove(&project_id); - }) - .detach(); - entry.insert(ZetaProject { - events: VecDeque::with_capacity(MAX_EVENT_COUNT), - registered_buffers: HashMap::default(), - }) - } - } + pub fn set_edit_prediction_model(&mut self, model: ZetaEditPredictionModel) { + self.edit_prediction_model = model; } - fn push_event(zeta_project: &mut ZetaProject, event: Event) { - let events = &mut zeta_project.events; + pub fn has_sweep_api_token(&self) -> bool { + self.sweep_api_token.is_some() + } - if let Some(Event::BufferChange { - new_snapshot: last_new_snapshot, - timestamp: last_timestamp, - .. - }) = events.back_mut() - { - // Coalesce edits for the same buffer when they happen one after the other. - let Event::BufferChange { - old_snapshot, - new_snapshot, - timestamp, - } = &event; - - if timestamp.duration_since(*last_timestamp) <= BUFFER_CHANGE_GROUPING_INTERVAL - && old_snapshot.remote_id() == last_new_snapshot.remote_id() - && old_snapshot.version == last_new_snapshot.version - { - *last_new_snapshot = new_snapshot.clone(); - *last_timestamp = *timestamp; - return; - } + #[cfg(feature = "eval-support")] + pub fn with_eval_cache(&mut self, cache: Arc) { + self.eval_cache = Some(cache); + } + + pub fn debug_info(&mut self) -> mpsc::UnboundedReceiver { + let (debug_watch_tx, debug_watch_rx) = mpsc::unbounded(); + self.debug_tx = Some(debug_watch_tx); + debug_watch_rx + } + + pub fn options(&self) -> &ZetaOptions { + &self.options + } + + pub fn set_options(&mut self, options: ZetaOptions) { + self.options = options; + } + + pub fn clear_history(&mut self) { + for zeta_project in self.projects.values_mut() { + zeta_project.events.clear(); } + } + + pub fn context_for_project( + &self, + project: &Entity, + ) -> impl Iterator, &[Range])> { + self.projects + .get(&project.entity_id()) + .and_then(|project| { + Some( + project + .context + .as_ref()? + .iter() + .map(|(buffer, ranges)| (buffer.clone(), ranges.as_slice())), + ) + }) + .into_iter() + .flatten() + } - if events.len() >= MAX_EVENT_COUNT { - // These are halved instead of popping to improve prompt caching. - events.drain(..MAX_EVENT_COUNT / 2); + pub fn usage(&self, cx: &App) -> Option { + if self.edit_prediction_model == ZetaEditPredictionModel::Zeta2 { + self.user_store.read(cx).edit_prediction_usage() + } else { + None } + } - events.push_back(event); + pub fn register_project(&mut self, project: &Entity, cx: &mut Context) { + self.get_or_init_zeta_project(project, cx); } pub fn register_buffer( @@ -338,6 +586,69 @@ impl Zeta { Self::register_buffer_impl(zeta_project, buffer, project, cx); } + fn get_or_init_zeta_project( + &mut self, + project: &Entity, + cx: &mut Context, + ) -> &mut ZetaProject { + self.projects + .entry(project.entity_id()) + .or_insert_with(|| ZetaProject { + syntax_index: if let ContextMode::Syntax(_) = &self.options.context { + Some(cx.new(|cx| { + SyntaxIndex::new(project, self.options.file_indexing_parallelism, cx) + })) + } else { + None + }, + events: VecDeque::new(), + last_event: None, + recent_paths: VecDeque::new(), + registered_buffers: HashMap::default(), + current_prediction: None, + pending_predictions: ArrayVec::new(), + next_pending_prediction_id: 0, + last_prediction_refresh: None, + context: None, + refresh_context_task: None, + refresh_context_debounce_task: None, + refresh_context_timestamp: None, + license_detection_watchers: HashMap::default(), + _subscription: cx.subscribe(&project, Self::handle_project_event), + }) + } + + fn handle_project_event( + &mut self, + project: Entity, + event: &project::Event, + cx: &mut Context, + ) { + // TODO [zeta2] init with recent paths + match event { + project::Event::ActiveEntryChanged(Some(active_entry_id)) => { + let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) else { + return; + }; + let path = project.read(cx).path_for_entry(*active_entry_id, cx); + if let Some(path) = path { + if let Some(ix) = zeta_project + .recent_paths + .iter() + .position(|probe| probe == &path) + { + zeta_project.recent_paths.remove(ix); + } + zeta_project.recent_paths.push_front(path); + } + } + project::Event::DiagnosticsUpdated { .. } => { + self.refresh_prediction_from_diagnostics(project, cx); + } + _ => (), + } + } + fn register_buffer_impl<'a>( zeta_project: &'a mut ZetaProject, buffer: &Entity, @@ -345,6 +656,28 @@ impl Zeta { cx: &mut Context, ) -> &'a mut RegisteredBuffer { let buffer_id = buffer.entity_id(); + + if let Some(file) = buffer.read(cx).file() { + let worktree_id = file.worktree_id(cx); + if let Some(worktree) = project.read(cx).worktree_for_id(worktree_id, cx) { + zeta_project + .license_detection_watchers + .entry(worktree_id) + .or_insert_with(|| { + let project_entity_id = project.entity_id(); + cx.observe_release(&worktree, move |this, _worktree, _cx| { + let Some(zeta_project) = this.projects.get_mut(&project_entity_id) + else { + return; + }; + zeta_project.license_detection_watchers.remove(&worktree_id); + }) + .detach(); + Rc::new(LicenseDetectionWatcher::new(&worktree, cx)) + }); + } + } + match zeta_project.registered_buffers.entry(buffer_id) { hash_map::Entry::Occupied(entry) => entry.into_mut(), hash_map::Entry::Vacant(entry) => { @@ -376,2037 +709,2755 @@ impl Zeta { } } - fn request_completion_impl( + fn report_changes_for_buffer( &mut self, - project: &Entity, buffer: &Entity, - cursor: language::Anchor, + project: &Entity, cx: &mut Context, - perform_predict_edits: F, - ) -> Task>> - where - F: FnOnce(PerformPredictEditsParams) -> R + 'static, - R: Future)>> - + Send - + 'static, - { - let buffer = buffer.clone(); - let buffer_snapshotted_at = Instant::now(); - let snapshot = self.report_changes_for_buffer(&buffer, project, cx); - let zeta = cx.entity(); - let client = self.client.clone(); - let llm_token = self.llm_token.clone(); - let app_version = AppVersion::global(cx); - - let zeta_project = self.get_or_init_zeta_project(project, cx); - let mut events = Vec::with_capacity(zeta_project.events.len()); - events.extend(zeta_project.events.iter().cloned()); - let events = Arc::new(events); - - let (git_info, can_collect_file) = if let Some(file) = snapshot.file() { - let can_collect_file = self.can_collect_file(file, cx); - let git_info = if can_collect_file { - git_info_for_file(project, &ProjectPath::from_file(file.as_ref(), cx), cx) - } else { - None - }; - (git_info, can_collect_file) - } else { - (None, false) - }; - - let full_path: Arc = snapshot - .file() - .map(|f| Arc::from(f.full_path(cx).as_path())) - .unwrap_or_else(|| Arc::from(Path::new("untitled"))); - let full_path_str = full_path.to_string_lossy().into_owned(); - let cursor_point = cursor.to_point(&snapshot); - let cursor_offset = cursor_point.to_offset(&snapshot); - let prompt_for_events = { - let events = events.clone(); - move || prompt_for_events_impl(&events, MAX_EVENT_TOKENS) - }; - let gather_task = gather_context( - full_path_str, - &snapshot, - cursor_point, - prompt_for_events, - cx, - ); - - cx.spawn(async move |this, cx| { - let GatherContextOutput { - mut body, - editable_range, - included_events_count, - } = gather_task.await?; - let done_gathering_context_at = Instant::now(); - - let included_events = &events[events.len() - included_events_count..events.len()]; - body.can_collect_data = can_collect_file - && this - .read_with(cx, |this, cx| this.can_collect_events(included_events, cx)) - .unwrap_or(false); - if body.can_collect_data { - body.git_info = git_info; - } - - log::debug!( - "Events:\n{}\nExcerpt:\n{:?}", - body.input_events, - body.input_excerpt - ); - - let input_outline = body.outline.clone().unwrap_or_default(); - let input_events = body.input_events.clone(); - let input_excerpt = body.input_excerpt.clone(); - - let response = perform_predict_edits(PerformPredictEditsParams { - client, - llm_token, - app_version, - body, - }) - .await; - let (response, usage) = match response { - Ok(response) => response, - Err(err) => { - if err.is::() { - cx.update(|cx| { - zeta.update(cx, |zeta, _cx| { - zeta.update_required = true; - }); - - let error_message: SharedString = err.to_string().into(); - show_app_notification( - NotificationId::unique::(), - cx, - move |cx| { - cx.new(|cx| { - ErrorMessagePrompt::new(error_message.clone(), cx) - .with_link_button( - "Update Zed", - "https://zed.dev/releases", - ) - }) - }, - ); - }) - .ok(); - } + ) { + let project_state = self.get_or_init_zeta_project(project, cx); + let registered_buffer = Self::register_buffer_impl(project_state, buffer, project, cx); - return Err(err); - } - }; + let new_snapshot = buffer.read(cx).snapshot(); + if new_snapshot.version == registered_buffer.snapshot.version { + return; + } - let received_response_at = Instant::now(); - log::debug!("completion response: {}", &response.output_excerpt); + let old_snapshot = mem::replace(&mut registered_buffer.snapshot, new_snapshot.clone()); + let end_edit_anchor = new_snapshot + .anchored_edits_since::(&old_snapshot.version) + .last() + .map(|(_, range)| range.end); + let events = &mut project_state.events; - if let Some(usage) = usage { - this.update(cx, |this, cx| { - this.user_store.update(cx, |user_store, cx| { - user_store.update_edit_prediction_usage(usage, cx); + if let Some(LastEvent { + new_snapshot: last_new_snapshot, + end_edit_anchor: last_end_edit_anchor, + .. + }) = project_state.last_event.as_mut() + { + let is_next_snapshot_of_same_buffer = old_snapshot.remote_id() + == last_new_snapshot.remote_id() + && old_snapshot.version == last_new_snapshot.version; + + let should_coalesce = is_next_snapshot_of_same_buffer + && end_edit_anchor + .as_ref() + .zip(last_end_edit_anchor.as_ref()) + .is_some_and(|(a, b)| { + let a = a.to_point(&new_snapshot); + let b = b.to_point(&new_snapshot); + a.row.abs_diff(b.row) <= CHANGE_GROUPING_LINE_SPAN }); - }) - .ok(); + + if should_coalesce { + *last_end_edit_anchor = end_edit_anchor; + *last_new_snapshot = new_snapshot; + return; } + } - let edit_prediction = Self::process_completion_response( - response, - buffer, - &snapshot, - editable_range, - cursor_offset, - full_path, - input_outline, - input_events, - input_excerpt, - buffer_snapshotted_at, - cx, - ) - .await; + if events.len() + 1 >= EVENT_COUNT_MAX { + events.pop_front(); + } - let finished_at = Instant::now(); - - // record latency for ~1% of requests - if rand::random::() <= 2 { - telemetry::event!( - "Edit Prediction Request", - context_latency = done_gathering_context_at - .duration_since(buffer_snapshotted_at) - .as_millis(), - request_latency = received_response_at - .duration_since(done_gathering_context_at) - .as_millis(), - process_latency = finished_at.duration_since(received_response_at).as_millis() - ); - } + if let Some(event) = project_state.last_event.take() { + events.extend(event.finalize(&project_state.license_detection_watchers, cx)); + } - edit_prediction - }) + project_state.last_event = Some(LastEvent { + old_snapshot, + new_snapshot, + end_edit_anchor, + }); } - #[cfg(any(test, feature = "test-support"))] - pub fn fake_completion( - &mut self, - project: &Entity, + fn current_prediction_for_buffer( + &self, buffer: &Entity, - position: language::Anchor, - response: PredictEditsResponse, - cx: &mut Context, - ) -> Task>> { - self.request_completion_impl(project, buffer, position, cx, |_params| { - std::future::ready(Ok((response, None))) - }) - } - - pub fn request_completion( - &mut self, project: &Entity, - buffer: &Entity, - position: language::Anchor, - cx: &mut Context, - ) -> Task>> { - self.request_completion_impl(project, buffer, position, cx, Self::perform_predict_edits) - } - - pub fn perform_predict_edits( - params: PerformPredictEditsParams, - ) -> impl Future)>> { - async move { - let PerformPredictEditsParams { - client, - llm_token, - app_version, - body, - .. - } = params; - - let http_client = client.http_client(); - let mut token = llm_token.acquire(&client).await?; - let mut did_retry = false; - - loop { - let request_builder = http_client::Request::builder().method(Method::POST); - let request_builder = - if let Ok(predict_edits_url) = std::env::var("ZED_PREDICT_EDITS_URL") { - request_builder.uri(predict_edits_url) - } else { - request_builder.uri( - http_client - .build_zed_llm_url("/predict_edits/v2", &[])? - .as_ref(), - ) - }; - let request = request_builder - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {}", token)) - .header(ZED_VERSION_HEADER_NAME, app_version.to_string()) - .body(serde_json::to_string(&body)?.into())?; + cx: &App, + ) -> Option> { + let project_state = self.projects.get(&project.entity_id())?; - let mut response = http_client.send(request).await?; + let CurrentEditPrediction { + requested_by, + prediction, + .. + } = project_state.current_prediction.as_ref()?; - if let Some(minimum_required_version) = response - .headers() - .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) - .and_then(|version| Version::from_str(version.to_str().ok()?).ok()) - { - anyhow::ensure!( - app_version >= minimum_required_version, - ZedUpdateRequiredError { - minimum_version: minimum_required_version - } - ); + if prediction.targets_buffer(buffer.read(cx)) { + Some(BufferEditPrediction::Local { prediction }) + } else { + let show_jump = match requested_by { + PredictionRequestedBy::Buffer(requested_by_buffer_id) => { + requested_by_buffer_id == &buffer.entity_id() } + PredictionRequestedBy::DiagnosticsUpdate => true, + }; - if response.status().is_success() { - let usage = EditPredictionUsage::from_headers(response.headers()).ok(); - - let mut body = String::new(); - response.body_mut().read_to_string(&mut body).await?; - return Ok((serde_json::from_str(&body)?, usage)); - } else if !did_retry - && response - .headers() - .get(EXPIRED_LLM_TOKEN_HEADER_NAME) - .is_some() - { - did_retry = true; - token = llm_token.refresh(&client).await?; - } else { - let mut body = String::new(); - response.body_mut().read_to_string(&mut body).await?; - anyhow::bail!( - "error predicting edits.\nStatus: {:?}\nBody: {}", - response.status(), - body - ); - } + if show_jump { + Some(BufferEditPrediction::Jump { prediction }) + } else { + None } } } - fn accept_edit_prediction( - &mut self, - request_id: EditPredictionId, - cx: &mut Context, - ) -> Task> { + fn accept_current_prediction(&mut self, project: &Entity, cx: &mut Context) { + match self.edit_prediction_model { + ZetaEditPredictionModel::Zeta1 | ZetaEditPredictionModel::Zeta2 => {} + ZetaEditPredictionModel::Sweep => return, + } + + let Some(project_state) = self.projects.get_mut(&project.entity_id()) else { + return; + }; + + let Some(prediction) = project_state.current_prediction.take() else { + return; + }; + let request_id = prediction.prediction.id.to_string(); + for pending_prediction in mem::take(&mut project_state.pending_predictions) { + self.cancel_pending_prediction(pending_prediction, cx); + } + let client = self.client.clone(); let llm_token = self.llm_token.clone(); let app_version = AppVersion::global(cx); cx.spawn(async move |this, cx| { - let http_client = client.http_client(); - let mut response = llm_token_retry(&llm_token, &client, |token| { - let request_builder = http_client::Request::builder().method(Method::POST); - let request_builder = - if let Ok(accept_prediction_url) = std::env::var("ZED_ACCEPT_PREDICTION_URL") { - request_builder.uri(accept_prediction_url) - } else { - request_builder.uri( - http_client - .build_zed_llm_url("/predict_edits/accept", &[])? - .as_ref(), - ) - }; - Ok(request_builder - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {}", token)) - .header(ZED_VERSION_HEADER_NAME, app_version.to_string()) - .body( - serde_json::to_string(&AcceptEditPredictionBody { - request_id: request_id.0.to_string(), - })? - .into(), - )?) - }) - .await?; - - if let Some(minimum_required_version) = response - .headers() - .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) - .and_then(|version| Version::from_str(version.to_str().ok()?).ok()) - && app_version < minimum_required_version - { - return Err(anyhow!(ZedUpdateRequiredError { - minimum_version: minimum_required_version - })); - } - - if response.status().is_success() { - if let Some(usage) = EditPredictionUsage::from_headers(response.headers()).ok() { - this.update(cx, |this, cx| { - this.user_store.update(cx, |user_store, cx| { - user_store.update_edit_prediction_usage(usage, cx); - }); - })?; - } - - Ok(()) + let url = if let Ok(predict_edits_url) = env::var("ZED_ACCEPT_PREDICTION_URL") { + http_client::Url::parse(&predict_edits_url)? } else { - let mut body = String::new(); - response.body_mut().read_to_string(&mut body).await?; - Err(anyhow!( - "error accepting edit prediction.\nStatus: {:?}\nBody: {}", - response.status(), - body + client + .http_client() + .build_zed_llm_url("/predict_edits/accept", &[])? + }; + + let response = cx + .background_spawn(Self::send_api_request::<()>( + move |builder| { + let req = builder.uri(url.as_ref()).body( + serde_json::to_string(&AcceptEditPredictionBody { + request_id: request_id.clone(), + })? + .into(), + ); + Ok(req?) + }, + client, + llm_token, + app_version, )) - } + .await; + + Self::handle_api_response(&this, response, cx)?; + anyhow::Ok(()) }) + .detach_and_log_err(cx); } fn reject_edit_predictions(&mut self, cx: &mut Context) -> Task> { + match self.edit_prediction_model { + ZetaEditPredictionModel::Zeta1 | ZetaEditPredictionModel::Zeta2 => {} + ZetaEditPredictionModel::Sweep => return Task::ready(anyhow::Ok(())), + } + let client = self.client.clone(); let llm_token = self.llm_token.clone(); let app_version = AppVersion::global(cx); - let last_rejection = self.discarded_completions.last().cloned(); - let body = serde_json::to_string(&RejectEditPredictionsBody { - rejections: self.discarded_completions.clone(), - }) - .ok(); - + let last_rejection = self.rejected_predictions.last().cloned(); let Some(last_rejection) = last_rejection else { return Task::ready(anyhow::Ok(())); }; + let body = serde_json::to_string(&RejectEditPredictionsBody { + rejections: self.rejected_predictions.clone(), + }) + .ok(); + cx.spawn(async move |this, cx| { - let http_client = client.http_client(); - let mut response = llm_token_retry(&llm_token, &client, |token| { - let request_builder = http_client::Request::builder().method(Method::POST); - let request_builder = request_builder.uri( - http_client - .build_zed_llm_url("/predict_edits/reject", &[])? - .as_ref(), - ); - Ok(request_builder - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {}", token)) - .header(ZED_VERSION_HEADER_NAME, app_version.to_string()) - .body( - body.as_ref() - .context("failed to serialize body")? - .clone() - .into(), - )?) + let url = client + .http_client() + .build_zed_llm_url("/predict_edits/reject", &[])?; + + cx.background_spawn(Self::send_api_request::<()>( + move |builder| { + let req = builder.uri(url.as_ref()).body(body.clone().into()); + Ok(req?) + }, + client, + llm_token, + app_version, + )) + .await + .context("Failed to reject edit predictions")?; + + this.update(cx, |this, _| { + if let Some(ix) = this + .rejected_predictions + .iter() + .position(|rejection| rejection.request_id == last_rejection.request_id) + { + this.rejected_predictions.drain(..ix + 1); + } }) - .await?; + }) + } - if let Some(minimum_required_version) = response - .headers() - .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) - .and_then(|version| Version::from_str(version.to_str().ok()?).ok()) - && app_version < minimum_required_version - { - return Err(anyhow!(ZedUpdateRequiredError { - minimum_version: minimum_required_version - })); + fn discard_current_prediction(&mut self, project: &Entity, cx: &mut Context) { + if let Some(project_state) = self.projects.get_mut(&project.entity_id()) { + project_state.pending_predictions.clear(); + if let Some(prediction) = project_state.current_prediction.take() { + self.discard_prediction(prediction.prediction.id, prediction.was_shown, cx); } + }; + } - if response.status().is_success() { - this.update(cx, |this, _| { - if let Some(ix) = this - .discarded_completions - .iter() - .position(|rejection| rejection.request_id == last_rejection.request_id) - { - this.discarded_completions.drain(..ix + 1); + fn did_show_current_prediction(&mut self, project: &Entity, _cx: &mut Context) { + if let Some(project_state) = self.projects.get_mut(&project.entity_id()) { + if let Some(current_prediction) = project_state.current_prediction.as_mut() { + if !current_prediction.was_shown { + current_prediction.was_shown = true; + self.shown_predictions + .push_front(current_prediction.prediction.clone()); + if self.shown_predictions.len() > 50 { + let completion = self.shown_predictions.pop_back().unwrap(); + self.rated_predictions.remove(&completion.id); } - }) - } else { - let mut body = String::new(); - response.body_mut().read_to_string(&mut body).await?; - Err(anyhow!( - "error rejecting edit predictions.\nStatus: {:?}\nBody: {}", - response.status(), - body - )) + } + } + } + } + + fn discard_prediction( + &mut self, + prediction_id: EditPredictionId, + was_shown: bool, + cx: &mut Context, + ) { + self.rejected_predictions.push(EditPredictionRejection { + request_id: prediction_id.to_string(), + was_shown, + }); + + let reached_request_limit = + self.rejected_predictions.len() >= MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST; + let reject_tx = self.reject_predictions_tx.clone(); + self.reject_predictions_debounce_task = Some(cx.spawn(async move |_this, cx| { + const DISCARD_COMPLETIONS_DEBOUNCE: Duration = Duration::from_secs(15); + if !reached_request_limit { + cx.background_executor() + .timer(DISCARD_COMPLETIONS_DEBOUNCE) + .await; } + reject_tx.unbounded_send(()).log_err(); + })); + } + + fn cancel_pending_prediction( + &self, + pending_prediction: PendingPrediction, + cx: &mut Context, + ) { + cx.spawn(async move |this, cx| { + let Some(prediction_id) = pending_prediction.task.await else { + return; + }; + + this.update(cx, |this, cx| { + this.discard_prediction(prediction_id, false, cx); + }) + .ok(); }) + .detach() + } + + fn is_refreshing(&self, project: &Entity) -> bool { + self.projects + .get(&project.entity_id()) + .is_some_and(|project_state| !project_state.pending_predictions.is_empty()) } - fn process_completion_response( - prediction_response: PredictEditsResponse, + pub fn refresh_prediction_from_buffer( + &mut self, + project: Entity, buffer: Entity, - snapshot: &BufferSnapshot, - editable_range: Range, - cursor_offset: usize, - path: Arc, - input_outline: String, - input_events: String, - input_excerpt: String, - buffer_snapshotted_at: Instant, - cx: &AsyncApp, - ) -> Task>> { - let snapshot = snapshot.clone(); - let request_id = prediction_response.request_id; - let output_excerpt = prediction_response.output_excerpt; - cx.spawn(async move |cx| { - let output_excerpt: Arc = output_excerpt.into(); - - let edits: Arc<[(Range, Arc)]> = cx - .background_spawn({ - let output_excerpt = output_excerpt.clone(); - let editable_range = editable_range.clone(); - let snapshot = snapshot.clone(); - async move { Self::parse_edits(output_excerpt, editable_range, &snapshot) } + position: language::Anchor, + cx: &mut Context, + ) { + self.queue_prediction_refresh(project.clone(), buffer.entity_id(), cx, move |this, cx| { + let Some(request_task) = this + .update(cx, |this, cx| { + this.request_prediction(&project, &buffer, position, cx) }) - .await? - .into(); - - let Some((edits, snapshot, edit_preview)) = buffer.read_with(cx, { - let edits = edits.clone(); - move |buffer, cx| { - let new_snapshot = buffer.snapshot(); - let edits: Arc<[(Range, Arc)]> = - edit_prediction::interpolate_edits(&snapshot, &new_snapshot, &edits)? - .into(); - Some((edits.clone(), new_snapshot, buffer.preview_edits(edits, cx))) - } - })? + .log_err() else { - return anyhow::Ok(None); + return Task::ready(anyhow::Ok(None)); }; - let request_id = Uuid::from_str(&request_id).context("failed to parse request id")?; - - let edit_preview = edit_preview.await; - - Ok(Some(EditPrediction { - id: EditPredictionId(request_id), - path, - excerpt_range: editable_range, - cursor_offset, - edits, - edit_preview, - snapshot, - input_outline: input_outline.into(), - input_events: input_events.into(), - input_excerpt: input_excerpt.into(), - output_excerpt, - buffer_snapshotted_at, - response_received_at: Instant::now(), - })) + let project = project.clone(); + cx.spawn(async move |cx| { + if let Some(prediction) = request_task.await? { + let id = prediction.id.clone(); + this.update(cx, |this, cx| { + let project_state = this + .projects + .get_mut(&project.entity_id()) + .context("Project not found")?; + + let new_prediction = CurrentEditPrediction { + requested_by: PredictionRequestedBy::Buffer(buffer.entity_id()), + prediction: prediction, + was_shown: false, + }; + + if project_state + .current_prediction + .as_ref() + .is_none_or(|old_prediction| { + new_prediction.should_replace_prediction(&old_prediction, cx) + }) + { + project_state.current_prediction = Some(new_prediction); + cx.notify(); + } + anyhow::Ok(()) + })??; + Ok(Some(id)) + } else { + Ok(None) + } + }) }) } - fn parse_edits( - output_excerpt: Arc, - editable_range: Range, - snapshot: &BufferSnapshot, - ) -> Result, Arc)>> { - let content = output_excerpt.replace(CURSOR_MARKER, ""); - - let start_markers = content - .match_indices(EDITABLE_REGION_START_MARKER) - .collect::>(); - anyhow::ensure!( - start_markers.len() == 1, - "expected exactly one start marker, found {}", - start_markers.len() - ); + pub fn refresh_prediction_from_diagnostics( + &mut self, + project: Entity, + cx: &mut Context, + ) { + let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) else { + return; + }; - let end_markers = content - .match_indices(EDITABLE_REGION_END_MARKER) - .collect::>(); - anyhow::ensure!( - end_markers.len() == 1, - "expected exactly one end marker, found {}", - end_markers.len() - ); - - let sof_markers = content - .match_indices(START_OF_FILE_MARKER) - .collect::>(); - anyhow::ensure!( - sof_markers.len() <= 1, - "expected at most one start-of-file marker, found {}", - sof_markers.len() - ); + // Prefer predictions from buffer + if zeta_project.current_prediction.is_some() { + return; + }; - let codefence_start = start_markers[0].0; - let content = &content[codefence_start..]; + self.queue_prediction_refresh(project.clone(), project.entity_id(), cx, move |this, cx| { + let Some(open_buffer_task) = project + .update(cx, |project, cx| { + project + .active_entry() + .and_then(|entry| project.path_for_entry(entry, cx)) + .map(|path| project.open_buffer(path, cx)) + }) + .log_err() + .flatten() + else { + return Task::ready(anyhow::Ok(None)); + }; - let newline_ix = content.find('\n').context("could not find newline")?; - let content = &content[newline_ix + 1..]; + cx.spawn(async move |cx| { + let active_buffer = open_buffer_task.await?; + let snapshot = active_buffer.read_with(cx, |buffer, _cx| buffer.snapshot())?; + + let Some((jump_buffer, jump_position)) = Self::next_diagnostic_location( + active_buffer, + &snapshot, + Default::default(), + Default::default(), + &project, + cx, + ) + .await? + else { + return anyhow::Ok(None); + }; - let codefence_end = content - .rfind(&format!("\n{EDITABLE_REGION_END_MARKER}")) - .context("could not find end marker")?; - let new_text = &content[..codefence_end]; + let Some(prediction) = this + .update(cx, |this, cx| { + this.request_prediction(&project, &jump_buffer, jump_position, cx) + })? + .await? + else { + return anyhow::Ok(None); + }; - let old_text = snapshot - .text_for_range(editable_range.clone()) - .collect::(); + let id = prediction.id.clone(); + this.update(cx, |this, cx| { + if let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) { + zeta_project.current_prediction.get_or_insert_with(|| { + cx.notify(); + CurrentEditPrediction { + requested_by: PredictionRequestedBy::DiagnosticsUpdate, + prediction, + was_shown: false, + } + }); + } + })?; - Ok(Self::compute_edits( - old_text, - new_text, - editable_range.start, - snapshot, - )) + anyhow::Ok(Some(id)) + }) + }); } - pub fn compute_edits( - old_text: String, - new_text: &str, - offset: usize, - snapshot: &BufferSnapshot, - ) -> Vec<(Range, Arc)> { - text_diff(&old_text, new_text) - .into_iter() - .map(|(mut old_range, new_text)| { - old_range.start += offset; - old_range.end += offset; + #[cfg(not(test))] + pub const THROTTLE_TIMEOUT: Duration = Duration::from_millis(300); + #[cfg(test)] + pub const THROTTLE_TIMEOUT: Duration = Duration::ZERO; - let prefix_len = common_prefix( - snapshot.chars_for_range(old_range.clone()), - new_text.chars(), - ); - old_range.start += prefix_len; + fn queue_prediction_refresh( + &mut self, + project: Entity, + throttle_entity: EntityId, + cx: &mut Context, + do_refresh: impl FnOnce( + WeakEntity, + &mut AsyncApp, + ) -> Task>> + + 'static, + ) { + let zeta_project = self.get_or_init_zeta_project(&project, cx); + let pending_prediction_id = zeta_project.next_pending_prediction_id; + zeta_project.next_pending_prediction_id += 1; + let last_request = zeta_project.last_prediction_refresh; - let suffix_len = common_prefix( - snapshot.reversed_chars_for_range(old_range.clone()), - new_text[prefix_len..].chars().rev(), - ); - old_range.end = old_range.end.saturating_sub(suffix_len); + // TODO report cancelled requests like in zeta1 + let task = cx.spawn(async move |this, cx| { + if let Some((last_entity, last_timestamp)) = last_request + && throttle_entity == last_entity + && let Some(timeout) = + (last_timestamp + Self::THROTTLE_TIMEOUT).checked_duration_since(Instant::now()) + { + cx.background_executor().timer(timeout).await; + } - let new_text = new_text[prefix_len..new_text.len() - suffix_len].into(); - let range = if old_range.is_empty() { - let anchor = snapshot.anchor_after(old_range.start); - anchor..anchor - } else { - snapshot.anchor_after(old_range.start)..snapshot.anchor_before(old_range.end) - }; - (range, new_text) + let edit_prediction_id = do_refresh(this.clone(), cx).await.log_err().flatten(); + + // When a prediction completes, remove it from the pending list, and cancel + // any pending predictions that were enqueued before it. + this.update(cx, |this, cx| { + let zeta_project = this.get_or_init_zeta_project(&project, cx); + let mut pending_predictions = mem::take(&mut zeta_project.pending_predictions); + for (ix, pending_prediction) in pending_predictions.iter().enumerate() { + if pending_prediction.id == pending_prediction_id { + pending_predictions.remove(ix); + for pending_prediction in pending_predictions.drain(0..ix) { + this.cancel_pending_prediction(pending_prediction, cx) + } + break; + } + } + this.get_or_init_zeta_project(&project, cx) + .pending_predictions = pending_predictions; + cx.notify(); }) - .collect() - } + .ok(); - pub fn is_completion_rated(&self, completion_id: EditPredictionId) -> bool { - self.rated_completions.contains(&completion_id) - } + edit_prediction_id + }); - pub fn completion_shown(&mut self, completion: &EditPrediction, cx: &mut Context) { - self.shown_completions.push_front(completion.clone()); - if self.shown_completions.len() > 50 { - let completion = self.shown_completions.pop_back().unwrap(); - self.rated_completions.remove(&completion.id); + if zeta_project.pending_predictions.len() <= 1 { + zeta_project.pending_predictions.push(PendingPrediction { + id: pending_prediction_id, + task, + }); + } else if zeta_project.pending_predictions.len() == 2 { + let pending_prediction = zeta_project.pending_predictions.pop().unwrap(); + zeta_project.pending_predictions.push(PendingPrediction { + id: pending_prediction_id, + task, + }); + self.cancel_pending_prediction(pending_prediction, cx); } - cx.notify(); } - pub fn rate_completion( + pub fn request_prediction( &mut self, - completion: &EditPrediction, - rating: EditPredictionRating, - feedback: String, + project: &Entity, + active_buffer: &Entity, + position: language::Anchor, cx: &mut Context, - ) { - self.rated_completions.insert(completion.id); - telemetry::event!( - "Edit Prediction Rated", - rating, - input_events = completion.input_events, - input_excerpt = completion.input_excerpt, - input_outline = completion.input_outline, - output_excerpt = completion.output_excerpt, - feedback - ); - self.client.telemetry().flush_events().detach(); - cx.notify(); - } - - pub fn shown_completions(&self) -> impl DoubleEndedIterator { - self.shown_completions.iter() - } - - pub fn shown_completions_len(&self) -> usize { - self.shown_completions.len() + ) -> Task>> { + match self.edit_prediction_model { + ZetaEditPredictionModel::Zeta1 => { + request_prediction_with_zeta1(self, project, active_buffer, position, cx) + } + ZetaEditPredictionModel::Zeta2 => { + self.request_prediction_with_zeta2(project, active_buffer, position, cx) + } + ZetaEditPredictionModel::Sweep => { + self.request_prediction_with_sweep(project, active_buffer, position, true, cx) + } + } } - fn report_changes_for_buffer( + fn request_prediction_with_sweep( &mut self, - buffer: &Entity, project: &Entity, + active_buffer: &Entity, + position: language::Anchor, + allow_jump: bool, cx: &mut Context, - ) -> BufferSnapshot { - let zeta_project = self.get_or_init_zeta_project(project, cx); - let registered_buffer = Self::register_buffer_impl(zeta_project, buffer, project, cx); + ) -> Task>> { + let snapshot = active_buffer.read(cx).snapshot(); + let debug_info = self.sweep_ai_debug_info.clone(); + let Some(api_token) = self.sweep_api_token.clone() else { + return Task::ready(Ok(None)); + }; + let full_path: Arc = snapshot + .file() + .map(|file| file.full_path(cx)) + .unwrap_or_else(|| "untitled".into()) + .into(); + + let project_file = project::File::from_dyn(snapshot.file()); + let repo_name = project_file + .map(|file| file.worktree.read(cx).root_name_str()) + .unwrap_or("untitled") + .into(); + let offset = position.to_offset(&snapshot); + + let project_state = self.get_or_init_zeta_project(project, cx); + let events = project_state.events(cx); + let has_events = !events.is_empty(); + let recent_buffers = project_state.recent_paths.iter().cloned(); + let http_client = cx.http_client(); + + let recent_buffer_snapshots = recent_buffers + .filter_map(|project_path| { + let buffer = project.read(cx).get_open_buffer(&project_path, cx)?; + if active_buffer == &buffer { + None + } else { + Some(buffer.read(cx).snapshot()) + } + }) + .take(3) + .collect::>(); - let new_snapshot = buffer.read(cx).snapshot(); - if new_snapshot.version != registered_buffer.snapshot.version { - let old_snapshot = mem::replace(&mut registered_buffer.snapshot, new_snapshot.clone()); - Self::push_event( - zeta_project, - Event::BufferChange { - old_snapshot, - new_snapshot: new_snapshot.clone(), - timestamp: Instant::now(), - }, - ); - } + const DIAGNOSTIC_LINES_RANGE: u32 = 20; - new_snapshot - } + let cursor_point = position.to_point(&snapshot); + let diagnostic_search_start = cursor_point.row.saturating_sub(DIAGNOSTIC_LINES_RANGE); + let diagnostic_search_end = cursor_point.row + DIAGNOSTIC_LINES_RANGE; + let diagnostic_search_range = + Point::new(diagnostic_search_start, 0)..Point::new(diagnostic_search_end, 0); + let buffer_snapshotted_at = Instant::now(); - fn can_collect_file(&self, file: &Arc, cx: &App) -> bool { - self.data_collection_choice.is_enabled() && self.is_file_open_source(file, cx) - } + let result = cx.background_spawn({ + let snapshot = snapshot.clone(); + let diagnostic_search_range = diagnostic_search_range.clone(); + async move { + let text = snapshot.text(); - fn can_collect_events(&self, events: &[Event], cx: &App) -> bool { - if !self.data_collection_choice.is_enabled() { - return false; - } - let mut last_checked_file = None; - for event in events { - match event { - Event::BufferChange { - old_snapshot, - new_snapshot, - .. - } => { - if let Some(old_file) = old_snapshot.file() - && let Some(new_file) = new_snapshot.file() - { - if let Some(last_checked_file) = last_checked_file - && Arc::ptr_eq(last_checked_file, old_file) - && Arc::ptr_eq(last_checked_file, new_file) - { - continue; - } - if !self.can_collect_file(old_file, cx) { - return false; - } - if !Arc::ptr_eq(old_file, new_file) && !self.can_collect_file(new_file, cx) - { - return false; + let mut recent_changes = String::new(); + for event in &events { + sweep_ai::write_event(event.as_ref(), &mut recent_changes).unwrap(); + } + + let mut file_chunks = recent_buffer_snapshots + .into_iter() + .map(|snapshot| { + let end_point = Point::new(30, 0).min(snapshot.max_point()); + sweep_ai::FileChunk { + content: snapshot.text_for_range(Point::zero()..end_point).collect(), + file_path: snapshot + .file() + .map(|f| f.path().as_unix_str()) + .unwrap_or("untitled") + .to_string(), + start_line: 0, + end_line: end_point.row as usize, + timestamp: snapshot.file().and_then(|file| { + Some( + file.disk_state() + .mtime()? + .to_seconds_and_nanos_for_persistence()? + .0, + ) + }), } - last_checked_file = Some(new_file); - } else { - return false; - } + }) + .collect::>(); + + let diagnostic_entries = + snapshot.diagnostics_in_range(diagnostic_search_range, false); + let mut diagnostic_content = String::new(); + let mut diagnostic_count = 0; + + for entry in diagnostic_entries { + let start_point: Point = entry.range.start; + + let severity = match entry.diagnostic.severity { + DiagnosticSeverity::ERROR => "error", + DiagnosticSeverity::WARNING => "warning", + DiagnosticSeverity::INFORMATION => "info", + DiagnosticSeverity::HINT => "hint", + _ => continue, + }; + + diagnostic_count += 1; + + writeln!( + &mut diagnostic_content, + "{} at line {}: {}", + severity, + start_point.row + 1, + entry.diagnostic.message + )?; + } + + if !diagnostic_content.is_empty() { + file_chunks.push(sweep_ai::FileChunk { + file_path: format!("Diagnostics for {}", full_path.display()), + start_line: 0, + end_line: diagnostic_count, + content: diagnostic_content, + timestamp: None, + }); + } + + let request_body = sweep_ai::AutocompleteRequest { + debug_info, + repo_name, + file_path: full_path.clone(), + file_contents: text.clone(), + original_file_contents: text, + cursor_position: offset, + recent_changes: recent_changes.clone(), + changes_above_cursor: true, + multiple_suggestions: false, + branch: None, + file_chunks, + retrieval_chunks: vec![], + recent_user_actions: vec![], + // TODO + privacy_mode_enabled: false, + }; + + let mut buf: Vec = Vec::new(); + let writer = brotli::CompressorWriter::new(&mut buf, 4096, 11, 22); + serde_json::to_writer(writer, &request_body)?; + let body: AsyncBody = buf.into(); + + let inputs = EditPredictionInputs { + events, + included_files: vec![cloud_llm_client::predict_edits_v3::IncludedFile { + path: full_path.clone(), + max_row: cloud_llm_client::predict_edits_v3::Line(snapshot.max_point().row), + excerpts: vec![cloud_llm_client::predict_edits_v3::Excerpt { + start_line: cloud_llm_client::predict_edits_v3::Line(0), + text: request_body.file_contents.into(), + }], + }], + cursor_point: cloud_llm_client::predict_edits_v3::Point { + column: cursor_point.column, + line: cloud_llm_client::predict_edits_v3::Line(cursor_point.row), + }, + cursor_path: full_path.clone(), + }; + + const SWEEP_API_URL: &str = + "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; + + let request = http_client::Request::builder() + .uri(SWEEP_API_URL) + .header("Content-Type", "application/json") + .header("Authorization", format!("Bearer {}", api_token)) + .header("Connection", "keep-alive") + .header("Content-Encoding", "br") + .method(Method::POST) + .body(body)?; + + let mut response = http_client.send(request).await?; + + let mut body: Vec = Vec::new(); + response.body_mut().read_to_end(&mut body).await?; + + let response_received_at = Instant::now(); + if !response.status().is_success() { + anyhow::bail!( + "Request failed with status: {:?}\nBody: {}", + response.status(), + String::from_utf8_lossy(&body), + ); + }; + + let response: sweep_ai::AutocompleteResponse = serde_json::from_slice(&body)?; + + let old_text = snapshot + .text_for_range(response.start_index..response.end_index) + .collect::(); + let edits = language::text_diff(&old_text, &response.completion) + .into_iter() + .map(|(range, text)| { + ( + snapshot.anchor_after(response.start_index + range.start) + ..snapshot.anchor_before(response.start_index + range.end), + text, + ) + }) + .collect::>(); + + anyhow::Ok(( + response.autocomplete_id, + edits, + snapshot, + response_received_at, + inputs, + )) + } + }); + + let buffer = active_buffer.clone(); + let project = project.clone(); + let active_buffer = active_buffer.clone(); + + cx.spawn(async move |this, cx| { + let (id, edits, old_snapshot, response_received_at, inputs) = result.await?; + + if edits.is_empty() { + if has_events + && allow_jump + && let Some((jump_buffer, jump_position)) = Self::next_diagnostic_location( + active_buffer, + &snapshot, + diagnostic_search_range, + cursor_point, + &project, + cx, + ) + .await? + { + return this + .update(cx, |this, cx| { + this.request_prediction_with_sweep( + &project, + &jump_buffer, + jump_position, + false, + cx, + ) + })? + .await; } + + return anyhow::Ok(None); } - } - true - } - fn is_file_open_source(&self, file: &Arc, cx: &App) -> bool { - if !file.is_local() || file.is_private() { - return false; - } - self.license_detection_watchers - .get(&file.worktree_id(cx)) - .is_some_and(|watcher| watcher.is_project_open_source()) + anyhow::Ok( + EditPrediction::new( + EditPredictionId(id.into()), + &buffer, + &old_snapshot, + edits.into(), + buffer_snapshotted_at, + response_received_at, + inputs, + cx, + ) + .await, + ) + }) } - fn load_data_collection_choice() -> DataCollectionChoice { - let choice = KEY_VALUE_STORE - .read_kvp(ZED_PREDICT_DATA_COLLECTION_CHOICE) - .log_err() - .flatten(); + async fn next_diagnostic_location( + active_buffer: Entity, + active_buffer_snapshot: &BufferSnapshot, + active_buffer_diagnostic_search_range: Range, + active_buffer_cursor_point: Point, + project: &Entity, + cx: &mut AsyncApp, + ) -> Result, language::Anchor)>> { + // find the closest diagnostic to the cursor that wasn't close enough to be included in the last request + let mut jump_location = active_buffer_snapshot + .diagnostic_groups(None) + .into_iter() + .filter_map(|(_, group)| { + let range = &group.entries[group.primary_ix] + .range + .to_point(&active_buffer_snapshot); + if range.overlaps(&active_buffer_diagnostic_search_range) { + None + } else { + Some(range.start) + } + }) + .min_by_key(|probe| probe.row.abs_diff(active_buffer_cursor_point.row)) + .map(|position| { + ( + active_buffer.clone(), + active_buffer_snapshot.anchor_before(position), + ) + }); - match choice.as_deref() { - Some("true") => DataCollectionChoice::Enabled, - Some("false") => DataCollectionChoice::Disabled, - Some(_) => { - log::error!("unknown value in '{ZED_PREDICT_DATA_COLLECTION_CHOICE}'"); - DataCollectionChoice::NotAnswered + if jump_location.is_none() { + let active_buffer_path = active_buffer.read_with(cx, |buffer, cx| { + let file = buffer.file()?; + + Some(ProjectPath { + worktree_id: file.worktree_id(cx), + path: file.path().clone(), + }) + })?; + + let buffer_task = project.update(cx, |project, cx| { + let (path, _, _) = project + .diagnostic_summaries(false, cx) + .filter(|(path, _, _)| Some(path) != active_buffer_path.as_ref()) + .max_by_key(|(path, _, _)| { + // find the buffer with errors that shares most parent directories + path.path + .components() + .zip( + active_buffer_path + .as_ref() + .map(|p| p.path.components()) + .unwrap_or_default(), + ) + .take_while(|(a, b)| a == b) + .count() + })?; + + Some(project.open_buffer(path, cx)) + })?; + + if let Some(buffer_task) = buffer_task { + let closest_buffer = buffer_task.await?; + + jump_location = closest_buffer + .read_with(cx, |buffer, _cx| { + buffer + .buffer_diagnostics(None) + .into_iter() + .min_by_key(|entry| entry.diagnostic.severity) + .map(|entry| entry.range.start) + })? + .map(|position| (closest_buffer, position)); } - None => DataCollectionChoice::NotAnswered, } - } - fn toggle_data_collection_choice(&mut self, cx: &mut Context) { - self.data_collection_choice = self.data_collection_choice.toggle(); - let new_choice = self.data_collection_choice; - db::write_and_log(cx, move || { - KEY_VALUE_STORE.write_kvp( - ZED_PREDICT_DATA_COLLECTION_CHOICE.into(), - new_choice.is_enabled().to_string(), - ) - }); + anyhow::Ok(jump_location) } - fn discard_completion( + fn request_prediction_with_zeta2( &mut self, - completion_id: EditPredictionId, - was_shown: bool, + project: &Entity, + active_buffer: &Entity, + position: language::Anchor, cx: &mut Context, - ) { - self.discarded_completions.push(EditPredictionRejection { - request_id: completion_id.to_string(), - was_shown, - }); + ) -> Task>> { + let project_state = self.projects.get(&project.entity_id()); - let reached_request_limit = - self.discarded_completions.len() >= MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST; - let discard_completions_tx = self.discard_completions_tx.clone(); - self.discard_completions_debounce_task = Some(cx.spawn(async move |_this, cx| { - const DISCARD_COMPLETIONS_DEBOUNCE: Duration = Duration::from_secs(15); - if !reached_request_limit { - cx.background_executor() - .timer(DISCARD_COMPLETIONS_DEBOUNCE) - .await; - } - discard_completions_tx.unbounded_send(()).log_err(); - })); - } -} + let index_state = project_state.and_then(|state| { + state + .syntax_index + .as_ref() + .map(|syntax_index| syntax_index.read_with(cx, |index, _cx| index.state().clone())) + }); + let options = self.options.clone(); + let active_snapshot = active_buffer.read(cx).snapshot(); + let buffer_snapshotted_at = Instant::now(); + let Some(excerpt_path) = active_snapshot + .file() + .map(|path| -> Arc { path.full_path(cx).into() }) + else { + return Task::ready(Err(anyhow!("No file path for excerpt"))); + }; + let client = self.client.clone(); + let llm_token = self.llm_token.clone(); + let app_version = AppVersion::global(cx); + let worktree_snapshots = project + .read(cx) + .worktrees(cx) + .map(|worktree| worktree.read(cx).snapshot()) + .collect::>(); + let debug_tx = self.debug_tx.clone(); -pub struct PerformPredictEditsParams { - pub client: Arc, - pub llm_token: LlmApiToken, - pub app_version: Version, - pub body: PredictEditsBody, -} + let events = project_state + .map(|state| state.events(cx)) + .unwrap_or_default(); -#[derive(Error, Debug)] -#[error( - "You must update to Zed version {minimum_version} or higher to continue using edit predictions." -)] -pub struct ZedUpdateRequiredError { - minimum_version: Version, -} + let diagnostics = active_snapshot.diagnostic_sets().clone(); -fn common_prefix, T2: Iterator>(a: T1, b: T2) -> usize { - a.zip(b) - .take_while(|(a, b)| a == b) - .map(|(a, _)| a.len_utf8()) - .sum() -} + let file = active_buffer.read(cx).file(); + let parent_abs_path = project::File::from_dyn(file).and_then(|f| { + let mut path = f.worktree.read(cx).absolutize(&f.path); + if path.pop() { Some(path) } else { None } + }); -fn git_info_for_file( - project: &Entity, - project_path: &ProjectPath, - cx: &App, -) -> Option { - let git_store = project.read(cx).git_store().read(cx); - if let Some((repository, _repo_path)) = - git_store.repository_and_path_for_project_path(project_path, cx) - { - let repository = repository.read(cx); - let head_sha = repository - .head_commit + // TODO data collection + let can_collect_data = file .as_ref() - .map(|head_commit| head_commit.sha.to_string()); - let remote_origin_url = repository.remote_origin_url.clone(); - let remote_upstream_url = repository.remote_upstream_url.clone(); - if head_sha.is_none() && remote_origin_url.is_none() && remote_upstream_url.is_none() { - return None; - } - Some(PredictEditsGitInfo { - head_sha, - remote_origin_url, - remote_upstream_url, - }) - } else { - None - } -} + .map_or(false, |file| self.can_collect_file(project, file, cx)); + + let empty_context_files = HashMap::default(); + let context_files = project_state + .and_then(|project_state| project_state.context.as_ref()) + .unwrap_or(&empty_context_files); + + #[cfg(feature = "eval-support")] + let parsed_fut = futures::future::join_all( + context_files + .keys() + .map(|buffer| buffer.read(cx).parsing_idle()), + ); -pub struct GatherContextOutput { - pub body: PredictEditsBody, - pub editable_range: Range, - pub included_events_count: usize, -} + let mut included_files = context_files + .iter() + .filter_map(|(buffer_entity, ranges)| { + let buffer = buffer_entity.read(cx); + Some(( + buffer_entity.clone(), + buffer.snapshot(), + buffer.file()?.full_path(cx).into(), + ranges.clone(), + )) + }) + .collect::>(); -pub fn gather_context( - full_path_str: String, - snapshot: &BufferSnapshot, - cursor_point: language::Point, - prompt_for_events: impl FnOnce() -> (String, usize) + Send + 'static, - cx: &App, -) -> Task> { - cx.background_spawn({ - let snapshot = snapshot.clone(); - async move { - let input_excerpt = excerpt_for_cursor_position( - cursor_point, - &full_path_str, - &snapshot, - MAX_REWRITE_TOKENS, - MAX_CONTEXT_TOKENS, - ); - let (input_events, included_events_count) = prompt_for_events(); - let editable_range = input_excerpt.editable_range.to_offset(&snapshot); - - let body = PredictEditsBody { - input_events, - input_excerpt: input_excerpt.prompt, - can_collect_data: false, - diagnostic_groups: None, - git_info: None, - outline: None, - speculated_output: None, - }; + included_files.sort_by(|(_, _, path_a, ranges_a), (_, _, path_b, ranges_b)| { + (path_a, ranges_a.len()).cmp(&(path_b, ranges_b.len())) + }); - Ok(GatherContextOutput { - body, - editable_range, - included_events_count, - }) - } - }) -} + #[cfg(feature = "eval-support")] + let eval_cache = self.eval_cache.clone(); -fn prompt_for_events_impl(events: &[Event], mut remaining_tokens: usize) -> (String, usize) { - let mut result = String::new(); - for (ix, event) in events.iter().rev().enumerate() { - let event_string = event.to_prompt(); - let event_tokens = guess_token_count(event_string.len()); - if event_tokens > remaining_tokens { - return (result, ix); - } + let request_task = cx.background_spawn({ + let active_buffer = active_buffer.clone(); + async move { + #[cfg(feature = "eval-support")] + parsed_fut.await; - if !result.is_empty() { - result.insert_str(0, "\n\n"); - } - result.insert_str(0, &event_string); - remaining_tokens -= event_tokens; - } - return (result, events.len()); -} + let index_state = if let Some(index_state) = index_state { + Some(index_state.lock_owned().await) + } else { + None + }; -struct RegisteredBuffer { - snapshot: BufferSnapshot, - _subscriptions: [gpui::Subscription; 2], -} + let cursor_offset = position.to_offset(&active_snapshot); + let cursor_point = cursor_offset.to_point(&active_snapshot); -#[derive(Clone)] -pub enum Event { - BufferChange { - old_snapshot: BufferSnapshot, - new_snapshot: BufferSnapshot, - timestamp: Instant, - }, -} + let before_retrieval = Instant::now(); -impl Event { - fn to_prompt(&self) -> String { - match self { - Event::BufferChange { - old_snapshot, - new_snapshot, - .. - } => { - let mut prompt = String::new(); - - let old_path = old_snapshot - .file() - .map(|f| f.path().as_ref()) - .unwrap_or(RelPath::unix("untitled").unwrap()); - let new_path = new_snapshot - .file() - .map(|f| f.path().as_ref()) - .unwrap_or(RelPath::unix("untitled").unwrap()); - if old_path != new_path { - writeln!(prompt, "User renamed {:?} to {:?}\n", old_path, new_path).unwrap(); + let (diagnostic_groups, diagnostic_groups_truncated) = + Self::gather_nearby_diagnostics( + cursor_offset, + &diagnostics, + &active_snapshot, + options.max_diagnostic_bytes, + ); + + let cloud_request = match options.context { + ContextMode::Agentic(context_options) => { + let Some(excerpt) = EditPredictionExcerpt::select_from_buffer( + cursor_point, + &active_snapshot, + &context_options.excerpt, + index_state.as_deref(), + ) else { + return Ok((None, None)); + }; + + let excerpt_anchor_range = active_snapshot.anchor_after(excerpt.range.start) + ..active_snapshot.anchor_before(excerpt.range.end); + + if let Some(buffer_ix) = + included_files.iter().position(|(_, snapshot, _, _)| { + snapshot.remote_id() == active_snapshot.remote_id() + }) + { + let (_, buffer, _, ranges) = &mut included_files[buffer_ix]; + ranges.push(excerpt_anchor_range); + retrieval_search::merge_anchor_ranges(ranges, buffer); + let last_ix = included_files.len() - 1; + included_files.swap(buffer_ix, last_ix); + } else { + included_files.push(( + active_buffer.clone(), + active_snapshot.clone(), + excerpt_path.clone(), + vec![excerpt_anchor_range], + )); + } + + let included_files = included_files + .iter() + .map(|(_, snapshot, path, ranges)| { + let ranges = ranges + .iter() + .map(|range| { + let point_range = range.to_point(&snapshot); + Line(point_range.start.row)..Line(point_range.end.row) + }) + .collect::>(); + let excerpts = assemble_excerpts(&snapshot, ranges); + predict_edits_v3::IncludedFile { + path: path.clone(), + max_row: Line(snapshot.max_point().row), + excerpts, + } + }) + .collect::>(); + + predict_edits_v3::PredictEditsRequest { + excerpt_path, + excerpt: String::new(), + excerpt_line_range: Line(0)..Line(0), + excerpt_range: 0..0, + cursor_point: predict_edits_v3::Point { + line: predict_edits_v3::Line(cursor_point.row), + column: cursor_point.column, + }, + included_files, + referenced_declarations: vec![], + events, + can_collect_data, + diagnostic_groups, + diagnostic_groups_truncated, + debug_info: debug_tx.is_some(), + prompt_max_bytes: Some(options.max_prompt_bytes), + prompt_format: options.prompt_format, + // TODO [zeta2] + signatures: vec![], + excerpt_parent: None, + git_info: None, + } + } + ContextMode::Syntax(context_options) => { + let Some(context) = EditPredictionContext::gather_context( + cursor_point, + &active_snapshot, + parent_abs_path.as_deref(), + &context_options, + index_state.as_deref(), + ) else { + return Ok((None, None)); + }; + + make_syntax_context_cloud_request( + excerpt_path, + context, + events, + can_collect_data, + diagnostic_groups, + diagnostic_groups_truncated, + None, + debug_tx.is_some(), + &worktree_snapshots, + index_state.as_deref(), + Some(options.max_prompt_bytes), + options.prompt_format, + ) + } + }; + + let prompt_result = cloud_zeta2_prompt::build_prompt(&cloud_request); + + let inputs = EditPredictionInputs { + included_files: cloud_request.included_files, + events: cloud_request.events, + cursor_point: cloud_request.cursor_point, + cursor_path: cloud_request.excerpt_path, + }; + + let retrieval_time = Instant::now() - before_retrieval; + + let debug_response_tx = if let Some(debug_tx) = &debug_tx { + let (response_tx, response_rx) = oneshot::channel(); + + debug_tx + .unbounded_send(ZetaDebugInfo::EditPredictionRequested( + ZetaEditPredictionDebugInfo { + inputs: inputs.clone(), + retrieval_time, + buffer: active_buffer.downgrade(), + local_prompt: match prompt_result.as_ref() { + Ok((prompt, _)) => Ok(prompt.clone()), + Err(err) => Err(err.to_string()), + }, + position, + response_rx, + }, + )) + .ok(); + Some(response_tx) + } else { + None + }; + + if cfg!(debug_assertions) && env::var("ZED_ZETA2_SKIP_REQUEST").is_ok() { + if let Some(debug_response_tx) = debug_response_tx { + debug_response_tx + .send((Err("Request skipped".to_string()), Duration::ZERO)) + .ok(); + } + anyhow::bail!("Skipping request because ZED_ZETA2_SKIP_REQUEST is set") } - let diff = language::unified_diff(&old_snapshot.text(), &new_snapshot.text()); - if !diff.is_empty() { - write!( - prompt, - "User edited {:?}:\n```diff\n{}\n```", - new_path, diff - ) - .unwrap(); + let (prompt, _) = prompt_result?; + let generation_params = + cloud_zeta2_prompt::generation_params(cloud_request.prompt_format); + let request = open_ai::Request { + model: EDIT_PREDICTIONS_MODEL_ID.clone(), + messages: vec![open_ai::RequestMessage::User { + content: open_ai::MessageContent::Plain(prompt), + }], + stream: false, + max_completion_tokens: None, + stop: generation_params.stop.unwrap_or_default(), + temperature: generation_params.temperature.unwrap_or(0.7), + tool_choice: None, + parallel_tool_calls: None, + tools: vec![], + prompt_cache_key: None, + reasoning_effort: None, + }; + + log::trace!("Sending edit prediction request"); + + let before_request = Instant::now(); + let response = Self::send_raw_llm_request( + request, + client, + llm_token, + app_version, + #[cfg(feature = "eval-support")] + eval_cache, + #[cfg(feature = "eval-support")] + EvalCacheEntryKind::Prediction, + ) + .await; + let received_response_at = Instant::now(); + let request_time = received_response_at - before_request; + + log::trace!("Got edit prediction response"); + + if let Some(debug_response_tx) = debug_response_tx { + debug_response_tx + .send(( + response + .as_ref() + .map_err(|err| err.to_string()) + .map(|response| response.0.clone()), + request_time, + )) + .ok(); } - prompt + let (res, usage) = response?; + let request_id = EditPredictionId(res.id.clone().into()); + let Some(mut output_text) = text_from_response(res) else { + return Ok((None, usage)); + }; + + if output_text.contains(CURSOR_MARKER) { + log::trace!("Stripping out {CURSOR_MARKER} from response"); + output_text = output_text.replace(CURSOR_MARKER, ""); + } + + let get_buffer_from_context = |path: &Path| { + included_files + .iter() + .find_map(|(_, buffer, probe_path, ranges)| { + if probe_path.as_ref() == path { + Some((buffer, ranges.as_slice())) + } else { + None + } + }) + }; + + let (edited_buffer_snapshot, edits) = match options.prompt_format { + PromptFormat::NumLinesUniDiff => { + // TODO: Implement parsing of multi-file diffs + crate::udiff::parse_diff(&output_text, get_buffer_from_context).await? + } + PromptFormat::Minimal + | PromptFormat::MinimalQwen + | PromptFormat::SeedCoder1120 => { + if output_text.contains("--- a/\n+++ b/\nNo edits") { + let edits = vec![]; + (&active_snapshot, edits) + } else { + crate::udiff::parse_diff(&output_text, get_buffer_from_context).await? + } + } + PromptFormat::OldTextNewText => { + crate::xml_edits::parse_xml_edits(&output_text, get_buffer_from_context) + .await? + } + _ => { + bail!("unsupported prompt format {}", options.prompt_format) + } + }; + + let edited_buffer = included_files + .iter() + .find_map(|(buffer, snapshot, _, _)| { + if snapshot.remote_id() == edited_buffer_snapshot.remote_id() { + Some(buffer.clone()) + } else { + None + } + }) + .context("Failed to find buffer in included_buffers")?; + + anyhow::Ok(( + Some(( + request_id, + inputs, + edited_buffer, + edited_buffer_snapshot.clone(), + edits, + received_response_at, + )), + usage, + )) } - } - } -} + }); -#[derive(Debug, Clone)] -struct CurrentEditPrediction { - buffer_id: EntityId, - completion: EditPrediction, - was_shown: bool, - was_accepted: bool, -} + cx.spawn({ + async move |this, cx| { + let Some(( + id, + inputs, + edited_buffer, + edited_buffer_snapshot, + edits, + received_response_at, + )) = Self::handle_api_response(&this, request_task.await, cx)? + else { + return Ok(None); + }; -impl CurrentEditPrediction { - fn should_replace_completion(&self, old_completion: &Self, snapshot: &BufferSnapshot) -> bool { - if self.buffer_id != old_completion.buffer_id { - return true; - } + // TODO telemetry: duration, etc + Ok(EditPrediction::new( + id, + &edited_buffer, + &edited_buffer_snapshot, + edits.into(), + buffer_snapshotted_at, + received_response_at, + inputs, + cx, + ) + .await) + } + }) + } - let Some(old_edits) = old_completion.completion.interpolate(snapshot) else { - return true; - }; - let Some(new_edits) = self.completion.interpolate(snapshot) else { - return false; + async fn send_raw_llm_request( + request: open_ai::Request, + client: Arc, + llm_token: LlmApiToken, + app_version: Version, + #[cfg(feature = "eval-support")] eval_cache: Option>, + #[cfg(feature = "eval-support")] eval_cache_kind: EvalCacheEntryKind, + ) -> Result<(open_ai::Response, Option)> { + let url = if let Some(predict_edits_url) = PREDICT_EDITS_URL.as_ref() { + http_client::Url::parse(&predict_edits_url)? + } else { + client + .http_client() + .build_zed_llm_url("/predict_edits/raw", &[])? }; - if old_edits.len() == 1 && new_edits.len() == 1 { - let (old_range, old_text) = &old_edits[0]; - let (new_range, new_text) = &new_edits[0]; - new_range == old_range && new_text.starts_with(old_text.as_ref()) - } else { - true - } - } -} + #[cfg(feature = "eval-support")] + let cache_key = if let Some(cache) = eval_cache { + use collections::FxHasher; + use std::hash::{Hash, Hasher}; -struct PendingCompletion { - id: usize, - task: Task<()>, -} + let mut hasher = FxHasher::default(); + url.hash(&mut hasher); + let request_str = serde_json::to_string_pretty(&request)?; + request_str.hash(&mut hasher); + let hash = hasher.finish(); -#[derive(Debug, Clone, Copy)] -pub enum DataCollectionChoice { - NotAnswered, - Enabled, - Disabled, -} + let key = (eval_cache_kind, hash); + if let Some(response_str) = cache.read(key) { + return Ok((serde_json::from_str(&response_str)?, None)); + } -impl DataCollectionChoice { - pub fn is_enabled(self) -> bool { - match self { - Self::Enabled => true, - Self::NotAnswered | Self::Disabled => false, - } - } + Some((cache, request_str, key)) + } else { + None + }; - pub fn is_answered(self) -> bool { - match self { - Self::Enabled | Self::Disabled => true, - Self::NotAnswered => false, + let (response, usage) = Self::send_api_request( + |builder| { + let req = builder + .uri(url.as_ref()) + .body(serde_json::to_string(&request)?.into()); + Ok(req?) + }, + client, + llm_token, + app_version, + ) + .await?; + + #[cfg(feature = "eval-support")] + if let Some((cache, request, key)) = cache_key { + cache.write(key, &request, &serde_json::to_string_pretty(&response)?); } + + Ok((response, usage)) } - #[must_use] - pub fn toggle(&self) -> DataCollectionChoice { - match self { - Self::Enabled => Self::Disabled, - Self::Disabled => Self::Enabled, - Self::NotAnswered => Self::Enabled, + fn handle_api_response( + this: &WeakEntity, + response: Result<(T, Option)>, + cx: &mut gpui::AsyncApp, + ) -> Result { + match response { + Ok((data, usage)) => { + if let Some(usage) = usage { + this.update(cx, |this, cx| { + this.user_store.update(cx, |user_store, cx| { + user_store.update_edit_prediction_usage(usage, cx); + }); + }) + .ok(); + } + Ok(data) + } + Err(err) => { + if err.is::() { + cx.update(|cx| { + this.update(cx, |this, _cx| { + this.update_required = true; + }) + .ok(); + + let error_message: SharedString = err.to_string().into(); + show_app_notification( + NotificationId::unique::(), + cx, + move |cx| { + cx.new(|cx| { + ErrorMessagePrompt::new(error_message.clone(), cx) + .with_link_button("Update Zed", "https://zed.dev/releases") + }) + }, + ); + }) + .ok(); + } + Err(err) + } } } -} -impl From for DataCollectionChoice { - fn from(value: bool) -> Self { - match value { - true => DataCollectionChoice::Enabled, - false => DataCollectionChoice::Disabled, + async fn send_api_request( + build: impl Fn(http_client::http::request::Builder) -> Result>, + client: Arc, + llm_token: LlmApiToken, + app_version: Version, + ) -> Result<(Res, Option)> + where + Res: DeserializeOwned, + { + let http_client = client.http_client(); + let mut token = llm_token.acquire(&client).await?; + let mut did_retry = false; + + loop { + let request_builder = http_client::Request::builder().method(Method::POST); + + let request = build( + request_builder + .header("Content-Type", "application/json") + .header("Authorization", format!("Bearer {}", token)) + .header(ZED_VERSION_HEADER_NAME, app_version.to_string()), + )?; + + let mut response = http_client.send(request).await?; + + if let Some(minimum_required_version) = response + .headers() + .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) + .and_then(|version| Version::from_str(version.to_str().ok()?).ok()) + { + anyhow::ensure!( + app_version >= minimum_required_version, + ZedUpdateRequiredError { + minimum_version: minimum_required_version + } + ); + } + + if response.status().is_success() { + let usage = EditPredictionUsage::from_headers(response.headers()).ok(); + + let mut body = Vec::new(); + response.body_mut().read_to_end(&mut body).await?; + return Ok((serde_json::from_slice(&body)?, usage)); + } else if !did_retry + && response + .headers() + .get(EXPIRED_LLM_TOKEN_HEADER_NAME) + .is_some() + { + did_retry = true; + token = llm_token.refresh(&client).await?; + } else { + let mut body = String::new(); + response.body_mut().read_to_string(&mut body).await?; + anyhow::bail!( + "Request failed with status: {:?}\nBody: {}", + response.status(), + body + ); + } } } -} -async fn llm_token_retry( - llm_token: &LlmApiToken, - client: &Arc, - build_request: impl Fn(String) -> Result>, -) -> Result> { - let mut did_retry = false; - let http_client = client.http_client(); - let mut token = llm_token.acquire(client).await?; - loop { - let request = build_request(token.clone())?; - let response = http_client.send(request).await?; - - if !did_retry - && !response.status().is_success() - && response - .headers() - .get(EXPIRED_LLM_TOKEN_HEADER_NAME) - .is_some() - { - did_retry = true; - token = llm_token.refresh(client).await?; - continue; + pub const CONTEXT_RETRIEVAL_IDLE_DURATION: Duration = Duration::from_secs(10); + pub const CONTEXT_RETRIEVAL_DEBOUNCE_DURATION: Duration = Duration::from_secs(3); + + // Refresh the related excerpts when the user just beguns editing after + // an idle period, and after they pause editing. + fn refresh_context_if_needed( + &mut self, + project: &Entity, + buffer: &Entity, + cursor_position: language::Anchor, + cx: &mut Context, + ) { + if !matches!(&self.options().context, ContextMode::Agentic { .. }) { + return; } - return Ok(response); - } -} + let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) else { + return; + }; -pub struct ZetaEditPredictionProvider { - zeta: Entity, - singleton_buffer: Option>, - pending_completions: ArrayVec, - canceled_completions: HashMap>, - next_pending_completion_id: usize, - current_completion: Option, - last_request_timestamp: Instant, - project: Entity, -} + let now = Instant::now(); + let was_idle = zeta_project + .refresh_context_timestamp + .map_or(true, |timestamp| { + now - timestamp > Self::CONTEXT_RETRIEVAL_IDLE_DURATION + }); + zeta_project.refresh_context_timestamp = Some(now); + zeta_project.refresh_context_debounce_task = Some(cx.spawn({ + let buffer = buffer.clone(); + let project = project.clone(); + async move |this, cx| { + if was_idle { + log::debug!("refetching edit prediction context after idle"); + } else { + cx.background_executor() + .timer(Self::CONTEXT_RETRIEVAL_DEBOUNCE_DURATION) + .await; + log::debug!("refetching edit prediction context after pause"); + } + this.update(cx, |this, cx| { + let task = this.refresh_context(project.clone(), buffer, cursor_position, cx); -impl ZetaEditPredictionProvider { - pub const THROTTLE_TIMEOUT: Duration = Duration::from_millis(300); + if let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) { + zeta_project.refresh_context_task = Some(task.log_err()); + }; + }) + .ok() + } + })); + } - pub fn new( - zeta: Entity, + // Refresh the related excerpts asynchronously. Ensure the task runs to completion, + // and avoid spawning more than one concurrent task. + pub fn refresh_context( + &mut self, project: Entity, - singleton_buffer: Option>, + buffer: Entity, + cursor_position: language::Anchor, cx: &mut Context, - ) -> Self { - cx.on_release(|this, cx| { - this.take_current_edit_prediction(cx); - }) - .detach(); + ) -> Task> { + let Some(zeta_project) = self.projects.get(&project.entity_id()) else { + return Task::ready(anyhow::Ok(())); + }; - Self { - zeta, - singleton_buffer, - pending_completions: ArrayVec::new(), - canceled_completions: HashMap::default(), - next_pending_completion_id: 0, - current_completion: None, - last_request_timestamp: Instant::now(), - project, + let ContextMode::Agentic(options) = &self.options().context else { + return Task::ready(anyhow::Ok(())); + }; + + let snapshot = buffer.read(cx).snapshot(); + let cursor_point = cursor_position.to_point(&snapshot); + let Some(cursor_excerpt) = EditPredictionExcerpt::select_from_buffer( + cursor_point, + &snapshot, + &options.excerpt, + None, + ) else { + return Task::ready(Ok(())); + }; + + let app_version = AppVersion::global(cx); + let client = self.client.clone(); + let llm_token = self.llm_token.clone(); + let debug_tx = self.debug_tx.clone(); + let current_file_path: Arc = snapshot + .file() + .map(|f| f.full_path(cx).into()) + .unwrap_or_else(|| Path::new("untitled").into()); + + let prompt = match cloud_zeta2_prompt::retrieval_prompt::build_prompt( + predict_edits_v3::PlanContextRetrievalRequest { + excerpt: cursor_excerpt.text(&snapshot).body, + excerpt_path: current_file_path, + excerpt_line_range: cursor_excerpt.line_range, + cursor_file_max_row: Line(snapshot.max_point().row), + events: zeta_project.events(cx), + }, + ) { + Ok(prompt) => prompt, + Err(err) => { + return Task::ready(Err(err)); + } + }; + + if let Some(debug_tx) = &debug_tx { + debug_tx + .unbounded_send(ZetaDebugInfo::ContextRetrievalStarted( + ZetaContextRetrievalStartedDebugInfo { + project: project.clone(), + timestamp: Instant::now(), + search_prompt: prompt.clone(), + }, + )) + .ok(); } - } - fn take_current_edit_prediction(&mut self, cx: &mut App) { - if let Some(completion) = self.current_completion.take() { - if !completion.was_accepted { - self.zeta.update(cx, |zeta, cx| { - zeta.discard_completion(completion.completion.id, completion.was_shown, cx); - }); + pub static TOOL_SCHEMA: LazyLock<(serde_json::Value, String)> = LazyLock::new(|| { + let schema = language_model::tool_schema::root_schema_for::( + language_model::LanguageModelToolSchemaFormat::JsonSchemaSubset, + ); + + let description = schema + .get("description") + .and_then(|description| description.as_str()) + .unwrap() + .to_string(); + + (schema.into(), description) + }); + + let (tool_schema, tool_description) = TOOL_SCHEMA.clone(); + + let request = open_ai::Request { + model: CONTEXT_RETRIEVAL_MODEL_ID.clone(), + messages: vec![open_ai::RequestMessage::User { + content: open_ai::MessageContent::Plain(prompt), + }], + stream: false, + max_completion_tokens: None, + stop: Default::default(), + temperature: 0.7, + tool_choice: None, + parallel_tool_calls: None, + tools: vec![open_ai::ToolDefinition::Function { + function: FunctionDefinition { + name: cloud_zeta2_prompt::retrieval_prompt::TOOL_NAME.to_string(), + description: Some(tool_description), + parameters: Some(tool_schema), + }, + }], + prompt_cache_key: None, + reasoning_effort: None, + }; + + #[cfg(feature = "eval-support")] + let eval_cache = self.eval_cache.clone(); + + cx.spawn(async move |this, cx| { + log::trace!("Sending search planning request"); + let response = Self::send_raw_llm_request( + request, + client, + llm_token, + app_version, + #[cfg(feature = "eval-support")] + eval_cache.clone(), + #[cfg(feature = "eval-support")] + EvalCacheEntryKind::Context, + ) + .await; + let mut response = Self::handle_api_response(&this, response, cx)?; + log::trace!("Got search planning response"); + + let choice = response + .choices + .pop() + .context("No choices in retrieval response")?; + let open_ai::RequestMessage::Assistant { + content: _, + tool_calls, + } = choice.message + else { + anyhow::bail!("Retrieval response didn't include an assistant message"); + }; + + let mut queries: Vec = Vec::new(); + for tool_call in tool_calls { + let open_ai::ToolCallContent::Function { function } = tool_call.content; + if function.name != cloud_zeta2_prompt::retrieval_prompt::TOOL_NAME { + log::warn!( + "Context retrieval response tried to call an unknown tool: {}", + function.name + ); + + continue; + } + + let input: SearchToolInput = serde_json::from_str(&function.arguments) + .with_context(|| format!("invalid search json {}", &function.arguments))?; + queries.extend(input.queries); } - } - } -} -impl edit_prediction::EditPredictionProvider for ZetaEditPredictionProvider { - fn name() -> &'static str { - "zed-predict" - } + if let Some(debug_tx) = &debug_tx { + debug_tx + .unbounded_send(ZetaDebugInfo::SearchQueriesGenerated( + ZetaSearchQueryDebugInfo { + project: project.clone(), + timestamp: Instant::now(), + search_queries: queries.clone(), + }, + )) + .ok(); + } - fn display_name() -> &'static str { - "Zed's Edit Predictions" - } + log::trace!("Running retrieval search: {queries:#?}"); - fn show_completions_in_menu() -> bool { - true - } + let related_excerpts_result = retrieval_search::run_retrieval_searches( + queries, + project.clone(), + #[cfg(feature = "eval-support")] + eval_cache, + cx, + ) + .await; - fn show_tab_accept_marker() -> bool { - true - } + log::trace!("Search queries executed"); + + if let Some(debug_tx) = &debug_tx { + debug_tx + .unbounded_send(ZetaDebugInfo::SearchQueriesExecuted( + ZetaContextRetrievalDebugInfo { + project: project.clone(), + timestamp: Instant::now(), + }, + )) + .ok(); + } - fn data_collection_state(&self, cx: &App) -> DataCollectionState { - if let Some(buffer) = &self.singleton_buffer - && let Some(file) = buffer.read(cx).file() - { - let is_project_open_source = self.zeta.read(cx).is_file_open_source(file, cx); - if self.zeta.read(cx).data_collection_choice.is_enabled() { - DataCollectionState::Enabled { - is_project_open_source, + this.update(cx, |this, _cx| { + let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) else { + return Ok(()); + }; + zeta_project.refresh_context_task.take(); + if let Some(debug_tx) = &this.debug_tx { + debug_tx + .unbounded_send(ZetaDebugInfo::ContextRetrievalFinished( + ZetaContextRetrievalDebugInfo { + project, + timestamp: Instant::now(), + }, + )) + .ok(); } - } else { - DataCollectionState::Disabled { - is_project_open_source, + match related_excerpts_result { + Ok(excerpts) => { + zeta_project.context = Some(excerpts); + Ok(()) + } + Err(error) => Err(error), } - } - } else { - return DataCollectionState::Disabled { - is_project_open_source: false, - }; - } - } - - fn toggle_data_collection(&mut self, cx: &mut App) { - self.zeta - .update(cx, |zeta, cx| zeta.toggle_data_collection_choice(cx)); - } - - fn usage(&self, cx: &App) -> Option { - self.zeta.read(cx).usage(cx) - } - - fn is_enabled( - &self, - _buffer: &Entity, - _cursor_position: language::Anchor, - _cx: &App, - ) -> bool { - true - } - fn is_refreshing(&self, _cx: &App) -> bool { - !self.pending_completions.is_empty() + })? + }) } - fn refresh( + pub fn set_context( &mut self, - buffer: Entity, - position: language::Anchor, - _debounce: bool, - cx: &mut Context, + project: Entity, + context: HashMap, Vec>>, ) { - if self.zeta.read(cx).update_required { - return; + if let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) { + zeta_project.context = Some(context); } + } - if self - .zeta - .read(cx) - .user_store - .read_with(cx, |user_store, _cx| { - user_store.account_too_young() || user_store.has_overdue_invoices() - }) - { - return; + fn gather_nearby_diagnostics( + cursor_offset: usize, + diagnostic_sets: &[(LanguageServerId, DiagnosticSet)], + snapshot: &BufferSnapshot, + max_diagnostics_bytes: usize, + ) -> (Vec, bool) { + // TODO: Could make this more efficient + let mut diagnostic_groups = Vec::new(); + for (language_server_id, diagnostics) in diagnostic_sets { + let mut groups = Vec::new(); + diagnostics.groups(*language_server_id, &mut groups, &snapshot); + diagnostic_groups.extend( + groups + .into_iter() + .map(|(_, group)| group.resolve::(&snapshot)), + ); } - if let Some(current_completion) = self.current_completion.as_ref() { - let snapshot = buffer.read(cx).snapshot(); - if current_completion - .completion - .interpolate(&snapshot) - .is_some() - { - return; + // sort by proximity to cursor + diagnostic_groups.sort_by_key(|group| { + let range = &group.entries[group.primary_ix].range; + if range.start >= cursor_offset { + range.start - cursor_offset + } else if cursor_offset >= range.end { + cursor_offset - range.end + } else { + (cursor_offset - range.start).min(range.end - cursor_offset) + } + }); + + let mut results = Vec::new(); + let mut diagnostic_groups_truncated = false; + let mut diagnostics_byte_count = 0; + for group in diagnostic_groups { + let raw_value = serde_json::value::to_raw_value(&group).unwrap(); + diagnostics_byte_count += raw_value.get().len(); + if diagnostics_byte_count > max_diagnostics_bytes { + diagnostic_groups_truncated = true; + break; } + results.push(predict_edits_v3::DiagnosticGroup(raw_value)); } - let pending_completion_id = self.next_pending_completion_id; - self.next_pending_completion_id += 1; - let last_request_timestamp = self.last_request_timestamp; + (results, diagnostic_groups_truncated) + } - let project = self.project.clone(); - let task = cx.spawn(async move |this, cx| { - if let Some(timeout) = (last_request_timestamp + Self::THROTTLE_TIMEOUT) - .checked_duration_since(Instant::now()) - { - cx.background_executor().timer(timeout).await; - } + // TODO: Dedupe with similar code in request_prediction? + pub fn cloud_request_for_zeta_cli( + &mut self, + project: &Entity, + buffer: &Entity, + position: language::Anchor, + cx: &mut Context, + ) -> Task> { + let project_state = self.projects.get(&project.entity_id()); + + let index_state = project_state.and_then(|state| { + state + .syntax_index + .as_ref() + .map(|index| index.read_with(cx, |index, _cx| index.state().clone())) + }); + let options = self.options.clone(); + let snapshot = buffer.read(cx).snapshot(); + let Some(excerpt_path) = snapshot.file().map(|path| path.full_path(cx)) else { + return Task::ready(Err(anyhow!("No file path for excerpt"))); + }; + let worktree_snapshots = project + .read(cx) + .worktrees(cx) + .map(|worktree| worktree.read(cx).snapshot()) + .collect::>(); - let completion_request = this.update(cx, |this, cx| { - this.last_request_timestamp = Instant::now(); - this.zeta.update(cx, |zeta, cx| { - zeta.request_completion(&project, &buffer, position, cx) - }) - }); + let parent_abs_path = project::File::from_dyn(buffer.read(cx).file()).and_then(|f| { + let mut path = f.worktree.read(cx).absolutize(&f.path); + if path.pop() { Some(path) } else { None } + }); - let completion = match completion_request { - Ok(completion_request) => { - let completion_request = completion_request.await; - completion_request.map(|c| { - c.map(|completion| CurrentEditPrediction { - buffer_id: buffer.entity_id(), - completion, - was_shown: false, - was_accepted: false, - }) - }) - } - Err(error) => Err(error), + cx.background_spawn(async move { + let index_state = if let Some(index_state) = index_state { + Some(index_state.lock_owned().await) + } else { + None }; - let discarded = this - .update(cx, |this, cx| { - if this - .pending_completions - .first() - .is_some_and(|completion| completion.id == pending_completion_id) - { - this.pending_completions.remove(0); - } else { - if let Some(discarded) = this.pending_completions.drain(..).next() { - this.canceled_completions - .insert(discarded.id, discarded.task); - } - } - - let canceled = this.canceled_completions.remove(&pending_completion_id); + let cursor_point = position.to_point(&snapshot); - if canceled.is_some() - && let Ok(Some(new_completion)) = &completion - { - this.zeta.update(cx, |zeta, cx| { - zeta.discard_completion(new_completion.completion.id, false, cx); - }); - return true; + let debug_info = true; + EditPredictionContext::gather_context( + cursor_point, + &snapshot, + parent_abs_path.as_deref(), + match &options.context { + ContextMode::Agentic(_) => { + // TODO + panic!("Llm mode not supported in zeta cli yet"); } - - cx.notify(); - false - }) - .ok() - .unwrap_or(true); - - if discarded { - return; - } - - let Some(new_completion) = completion - .context("edit prediction failed") - .log_err() - .flatten() - else { - return; - }; - - this.update(cx, |this, cx| { - if let Some(old_completion) = this.current_completion.as_ref() { - let snapshot = buffer.read(cx).snapshot(); - if new_completion.should_replace_completion(old_completion, &snapshot) { - this.zeta.update(cx, |zeta, cx| { - zeta.completion_shown(&new_completion.completion, cx); - }); - this.take_current_edit_prediction(cx); - this.current_completion = Some(new_completion); + ContextMode::Syntax(edit_prediction_context_options) => { + edit_prediction_context_options } - } else { - this.zeta.update(cx, |zeta, cx| { - zeta.completion_shown(&new_completion.completion, cx); - }); - this.current_completion = Some(new_completion); - } - - cx.notify(); + }, + index_state.as_deref(), + ) + .context("Failed to select excerpt") + .map(|context| { + make_syntax_context_cloud_request( + excerpt_path.into(), + context, + // TODO pass everything + Vec::new(), + false, + Vec::new(), + false, + None, + debug_info, + &worktree_snapshots, + index_state.as_deref(), + Some(options.max_prompt_bytes), + options.prompt_format, + ) }) - .ok(); - }); - - // We always maintain at most two pending completions. When we already - // have two, we replace the newest one. - if self.pending_completions.len() <= 1 { - self.pending_completions.push(PendingCompletion { - id: pending_completion_id, - task, - }); - } else if self.pending_completions.len() == 2 { - if let Some(discarded) = self.pending_completions.pop() { - self.canceled_completions - .insert(discarded.id, discarded.task); - } - self.pending_completions.push(PendingCompletion { - id: pending_completion_id, - task, - }); - } + }) } - fn cycle( + pub fn wait_for_initial_indexing( &mut self, - _buffer: Entity, - _cursor_position: language::Anchor, - _direction: edit_prediction::Direction, - _cx: &mut Context, - ) { - // Right now we don't support cycling. + project: &Entity, + cx: &mut Context, + ) -> Task> { + let zeta_project = self.get_or_init_zeta_project(project, cx); + if let Some(syntax_index) = &zeta_project.syntax_index { + syntax_index.read(cx).wait_for_initial_file_indexing(cx) + } else { + Task::ready(Ok(())) + } } - fn accept(&mut self, cx: &mut Context) { - let completion = self.current_completion.as_mut(); - if let Some(completion) = completion { - completion.was_accepted = true; - self.zeta - .update(cx, |zeta, cx| { - zeta.accept_edit_prediction(completion.completion.id, cx) - }) - .detach(); + fn is_file_open_source( + &self, + project: &Entity, + file: &Arc, + cx: &App, + ) -> bool { + if !file.is_local() || file.is_private() { + return false; } - self.pending_completions.clear(); + let Some(zeta_project) = self.projects.get(&project.entity_id()) else { + return false; + }; + zeta_project + .license_detection_watchers + .get(&file.worktree_id(cx)) + .as_ref() + .is_some_and(|watcher| watcher.is_project_open_source()) } - fn discard(&mut self, cx: &mut Context) { - self.pending_completions.clear(); - self.take_current_edit_prediction(cx); + fn can_collect_file(&self, project: &Entity, file: &Arc, cx: &App) -> bool { + self.data_collection_choice.is_enabled() && self.is_file_open_source(project, file, cx) } - fn did_show(&mut self, _cx: &mut Context) { - if let Some(current_completion) = self.current_completion.as_mut() { - current_completion.was_shown = true; + fn can_collect_events(&self, events: &[Arc]) -> bool { + if !self.data_collection_choice.is_enabled() { + return false; } + events.iter().all(|event| { + matches!( + event.as_ref(), + Event::BufferChange { + in_open_source_repo: true, + .. + } + ) + }) } - fn suggest( - &mut self, - buffer: &Entity, - cursor_position: language::Anchor, - cx: &mut Context, - ) -> Option { - let CurrentEditPrediction { - buffer_id, - completion, - .. - } = self.current_completion.as_mut()?; - - // Invalidate previous completion if it was generated for a different buffer. - if *buffer_id != buffer.entity_id() { - self.take_current_edit_prediction(cx); - return None; - } - - let buffer = buffer.read(cx); - let Some(edits) = completion.interpolate(&buffer.snapshot()) else { - self.take_current_edit_prediction(cx); - return None; - }; - - let cursor_row = cursor_position.to_point(buffer).row; - let (closest_edit_ix, (closest_edit_range, _)) = - edits.iter().enumerate().min_by_key(|(_, (range, _))| { - let distance_from_start = cursor_row.abs_diff(range.start.to_point(buffer).row); - let distance_from_end = cursor_row.abs_diff(range.end.to_point(buffer).row); - cmp::min(distance_from_start, distance_from_end) - })?; + fn load_data_collection_choice() -> DataCollectionChoice { + let choice = KEY_VALUE_STORE + .read_kvp(ZED_PREDICT_DATA_COLLECTION_CHOICE) + .log_err() + .flatten(); - let mut edit_start_ix = closest_edit_ix; - for (range, _) in edits[..edit_start_ix].iter().rev() { - let distance_from_closest_edit = - closest_edit_range.start.to_point(buffer).row - range.end.to_point(buffer).row; - if distance_from_closest_edit <= 1 { - edit_start_ix -= 1; - } else { - break; + match choice.as_deref() { + Some("true") => DataCollectionChoice::Enabled, + Some("false") => DataCollectionChoice::Disabled, + Some(_) => { + log::error!("unknown value in '{ZED_PREDICT_DATA_COLLECTION_CHOICE}'"); + DataCollectionChoice::NotAnswered } + None => DataCollectionChoice::NotAnswered, } + } - let mut edit_end_ix = closest_edit_ix + 1; - for (range, _) in &edits[edit_end_ix..] { - let distance_from_closest_edit = - range.start.to_point(buffer).row - closest_edit_range.end.to_point(buffer).row; - if distance_from_closest_edit <= 1 { - edit_end_ix += 1; - } else { - break; - } - } + pub fn shown_predictions(&self) -> impl DoubleEndedIterator { + self.shown_predictions.iter() + } - Some(edit_prediction::EditPrediction::Local { - id: Some(completion.id.to_string().into()), - edits: edits[edit_start_ix..edit_end_ix].to_vec(), - edit_preview: Some(completion.edit_preview.clone()), - }) + pub fn shown_completions_len(&self) -> usize { + self.shown_predictions.len() } -} -/// Typical number of string bytes per token for the purposes of limiting model input. This is -/// intentionally low to err on the side of underestimating limits. -const BYTES_PER_TOKEN_GUESS: usize = 3; + pub fn is_prediction_rated(&self, id: &EditPredictionId) -> bool { + self.rated_predictions.contains(id) + } -fn guess_token_count(bytes: usize) -> usize { - bytes / BYTES_PER_TOKEN_GUESS + pub fn rate_prediction( + &mut self, + prediction: &EditPrediction, + rating: EditPredictionRating, + feedback: String, + cx: &mut Context, + ) { + self.rated_predictions.insert(prediction.id.clone()); + telemetry::event!( + "Edit Prediction Rated", + rating, + inputs = prediction.inputs, + output = prediction.edit_preview.as_unified_diff(&prediction.edits), + feedback + ); + self.client.telemetry().flush_events().detach(); + cx.notify(); + } } -#[cfg(test)] -mod tests { - use client::test::FakeServer; - use clock::{FakeSystemClock, ReplicaId}; - use cloud_api_types::{CreateLlmTokenResponse, LlmToken}; - use gpui::TestAppContext; - use http_client::FakeHttpClient; - use indoc::indoc; - use language::Point; - use parking_lot::Mutex; - use serde_json::json; - use settings::SettingsStore; - use util::{path, rel_path::rel_path}; - - use super::*; - - const BSD_0_TXT: &str = include_str!("../license_examples/0bsd.txt"); +pub fn text_from_response(mut res: open_ai::Response) -> Option { + let choice = res.choices.pop()?; + let output_text = match choice.message { + open_ai::RequestMessage::Assistant { + content: Some(open_ai::MessageContent::Plain(content)), + .. + } => content, + open_ai::RequestMessage::Assistant { + content: Some(open_ai::MessageContent::Multipart(mut content)), + .. + } => { + if content.is_empty() { + log::error!("No output from Baseten completion response"); + return None; + } - #[gpui::test] - async fn test_edit_prediction_basic_interpolation(cx: &mut TestAppContext) { - let buffer = cx.new(|cx| Buffer::local("Lorem ipsum dolor", cx)); - let edits: Arc<[(Range, Arc)]> = cx.update(|cx| { - to_completion_edits([(2..5, "REM".into()), (9..11, "".into())], &buffer, cx).into() - }); + match content.remove(0) { + open_ai::MessagePart::Text { text } => text, + open_ai::MessagePart::Image { .. } => { + log::error!("Expected text, got an image"); + return None; + } + } + } + _ => { + log::error!("Invalid response message: {:?}", choice.message); + return None; + } + }; + Some(output_text) +} - let edit_preview = cx - .read(|cx| buffer.read(cx).preview_edits(edits.clone(), cx)) - .await; +#[derive(Error, Debug)] +#[error( + "You must update to Zed version {minimum_version} or higher to continue using edit predictions." +)] +pub struct ZedUpdateRequiredError { + minimum_version: Version, +} - let completion = EditPrediction { - edits, - edit_preview, - path: Path::new("").into(), - snapshot: cx.read(|cx| buffer.read(cx).snapshot()), - id: EditPredictionId(Uuid::new_v4()), - excerpt_range: 0..0, - cursor_offset: 0, - input_outline: "".into(), - input_events: "".into(), - input_excerpt: "".into(), - output_excerpt: "".into(), - buffer_snapshotted_at: Instant::now(), - response_received_at: Instant::now(), +fn make_syntax_context_cloud_request( + excerpt_path: Arc, + context: EditPredictionContext, + events: Vec>, + can_collect_data: bool, + diagnostic_groups: Vec, + diagnostic_groups_truncated: bool, + git_info: Option, + debug_info: bool, + worktrees: &Vec, + index_state: Option<&SyntaxIndexState>, + prompt_max_bytes: Option, + prompt_format: PromptFormat, +) -> predict_edits_v3::PredictEditsRequest { + let mut signatures = Vec::new(); + let mut declaration_to_signature_index = HashMap::default(); + let mut referenced_declarations = Vec::new(); + + for snippet in context.declarations { + let project_entry_id = snippet.declaration.project_entry_id(); + let Some(path) = worktrees.iter().find_map(|worktree| { + worktree.entry_for_id(project_entry_id).map(|entry| { + let mut full_path = RelPathBuf::new(); + full_path.push(worktree.root_name()); + full_path.push(&entry.path); + full_path + }) + }) else { + continue; }; - cx.update(|cx| { - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(2..5, "REM".into()), (9..11, "".into())] - ); - - buffer.update(cx, |buffer, cx| buffer.edit([(2..5, "")], None, cx)); - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(2..2, "REM".into()), (6..8, "".into())] - ); - - buffer.update(cx, |buffer, cx| buffer.undo(cx)); - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(2..5, "REM".into()), (9..11, "".into())] - ); + let parent_index = index_state.and_then(|index_state| { + snippet.declaration.parent().and_then(|parent| { + add_signature( + parent, + &mut declaration_to_signature_index, + &mut signatures, + index_state, + ) + }) + }); - buffer.update(cx, |buffer, cx| buffer.edit([(2..5, "R")], None, cx)); - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(3..3, "EM".into()), (7..9, "".into())] - ); + let (text, text_is_truncated) = snippet.declaration.item_text(); + referenced_declarations.push(predict_edits_v3::ReferencedDeclaration { + path: path.as_std_path().into(), + text: text.into(), + range: snippet.declaration.item_line_range(), + text_is_truncated, + signature_range: snippet.declaration.signature_range_in_item_text(), + parent_index, + signature_score: snippet.score(DeclarationStyle::Signature), + declaration_score: snippet.score(DeclarationStyle::Declaration), + score_components: snippet.components, + }); + } - buffer.update(cx, |buffer, cx| buffer.edit([(3..3, "E")], None, cx)); - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(4..4, "M".into()), (8..10, "".into())] - ); + let excerpt_parent = index_state.and_then(|index_state| { + context + .excerpt + .parent_declarations + .last() + .and_then(|(parent, _)| { + add_signature( + *parent, + &mut declaration_to_signature_index, + &mut signatures, + index_state, + ) + }) + }); + + predict_edits_v3::PredictEditsRequest { + excerpt_path, + excerpt: context.excerpt_text.body, + excerpt_line_range: context.excerpt.line_range, + excerpt_range: context.excerpt.range, + cursor_point: predict_edits_v3::Point { + line: predict_edits_v3::Line(context.cursor_point.row), + column: context.cursor_point.column, + }, + referenced_declarations, + included_files: vec![], + signatures, + excerpt_parent, + events, + can_collect_data, + diagnostic_groups, + diagnostic_groups_truncated, + git_info, + debug_info, + prompt_max_bytes, + prompt_format, + } +} - buffer.update(cx, |buffer, cx| buffer.edit([(4..4, "M")], None, cx)); - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(9..11, "".into())] - ); +fn add_signature( + declaration_id: DeclarationId, + declaration_to_signature_index: &mut HashMap, + signatures: &mut Vec, + index: &SyntaxIndexState, +) -> Option { + if let Some(signature_index) = declaration_to_signature_index.get(&declaration_id) { + return Some(*signature_index); + } + let Some(parent_declaration) = index.declaration(declaration_id) else { + log::error!("bug: missing parent declaration"); + return None; + }; + let parent_index = parent_declaration.parent().and_then(|parent| { + add_signature(parent, declaration_to_signature_index, signatures, index) + }); + let (text, text_is_truncated) = parent_declaration.signature_text(); + let signature_index = signatures.len(); + signatures.push(Signature { + text: text.into(), + text_is_truncated, + parent_index, + range: parent_declaration.signature_line_range(), + }); + declaration_to_signature_index.insert(declaration_id, signature_index); + Some(signature_index) +} - buffer.update(cx, |buffer, cx| buffer.edit([(4..5, "")], None, cx)); - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(4..4, "M".into()), (8..10, "".into())] - ); +#[cfg(feature = "eval-support")] +pub type EvalCacheKey = (EvalCacheEntryKind, u64); - buffer.update(cx, |buffer, cx| buffer.edit([(8..10, "")], None, cx)); - assert_eq!( - from_completion_edits( - &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), - &buffer, - cx - ), - vec![(4..4, "M".into())] - ); +#[cfg(feature = "eval-support")] +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum EvalCacheEntryKind { + Context, + Search, + Prediction, +} - buffer.update(cx, |buffer, cx| buffer.edit([(4..6, "")], None, cx)); - assert_eq!(completion.interpolate(&buffer.read(cx).snapshot()), None); - }) +#[cfg(feature = "eval-support")] +impl std::fmt::Display for EvalCacheEntryKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + EvalCacheEntryKind::Search => write!(f, "search"), + EvalCacheEntryKind::Context => write!(f, "context"), + EvalCacheEntryKind::Prediction => write!(f, "prediction"), + } } +} - #[gpui::test] - async fn test_clean_up_diff(cx: &mut TestAppContext) { - init_test(cx); - - assert_eq!( - apply_edit_prediction( - indoc! {" - fn main() { - let word_1 = \"lorem\"; - let range = word.len()..word.len(); - } - "}, - indoc! {" - <|editable_region_start|> - fn main() { - let word_1 = \"lorem\"; - let range = word_1.len()..word_1.len(); - } +#[cfg(feature = "eval-support")] +pub trait EvalCache: Send + Sync { + fn read(&self, key: EvalCacheKey) -> Option; + fn write(&self, key: EvalCacheKey, input: &str, value: &str); +} - <|editable_region_end|> - "}, - cx, - ) - .await, - indoc! {" - fn main() { - let word_1 = \"lorem\"; - let range = word_1.len()..word_1.len(); - } - "}, - ); +#[derive(Debug, Clone, Copy)] +pub enum DataCollectionChoice { + NotAnswered, + Enabled, + Disabled, +} - assert_eq!( - apply_edit_prediction( - indoc! {" - fn main() { - let story = \"the quick\" - } - "}, - indoc! {" - <|editable_region_start|> - fn main() { - let story = \"the quick brown fox jumps over the lazy dog\"; - } +impl DataCollectionChoice { + pub fn is_enabled(self) -> bool { + match self { + Self::Enabled => true, + Self::NotAnswered | Self::Disabled => false, + } + } - <|editable_region_end|> - "}, - cx, - ) - .await, - indoc! {" - fn main() { - let story = \"the quick brown fox jumps over the lazy dog\"; - } - "}, - ); + pub fn is_answered(self) -> bool { + match self { + Self::Enabled | Self::Disabled => true, + Self::NotAnswered => false, + } } - #[gpui::test] - async fn test_edit_prediction_end_of_buffer(cx: &mut TestAppContext) { - init_test(cx); - - let buffer_content = "lorem\n"; - let completion_response = indoc! {" - ```animals.js - <|start_of_file|> - <|editable_region_start|> - lorem - ipsum - <|editable_region_end|> - ```"}; + #[must_use] + pub fn toggle(&self) -> DataCollectionChoice { + match self { + Self::Enabled => Self::Disabled, + Self::Disabled => Self::Enabled, + Self::NotAnswered => Self::Enabled, + } + } +} - assert_eq!( - apply_edit_prediction(buffer_content, completion_response, cx).await, - "lorem\nipsum" - ); +impl From for DataCollectionChoice { + fn from(value: bool) -> Self { + match value { + true => DataCollectionChoice::Enabled, + false => DataCollectionChoice::Disabled, + } } +} - #[gpui::test] - async fn test_can_collect_data(cx: &mut TestAppContext) { - init_test(cx); +struct ZedPredictUpsell; - let fs = project::FakeFs::new(cx.executor()); - fs.insert_tree(path!("/project"), json!({ "LICENSE": BSD_0_TXT })) - .await; +impl Dismissable for ZedPredictUpsell { + const KEY: &'static str = "dismissed-edit-predict-upsell"; - let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; - let buffer = project - .update(cx, |project, cx| { - project.open_local_buffer(path!("/project/src/main.rs"), cx) - }) - .await - .unwrap(); + fn dismissed() -> bool { + // To make this backwards compatible with older versions of Zed, we + // check if the user has seen the previous Edit Prediction Onboarding + // before, by checking the data collection choice which was written to + // the database once the user clicked on "Accept and Enable" + if KEY_VALUE_STORE + .read_kvp(ZED_PREDICT_DATA_COLLECTION_CHOICE) + .log_err() + .is_some_and(|s| s.is_some()) + { + return true; + } - let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Enabled - }); + KEY_VALUE_STORE + .read_kvp(Self::KEY) + .log_err() + .is_some_and(|s| s.is_some()) + } +} - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - true - ); +pub fn should_show_upsell_modal() -> bool { + !ZedPredictUpsell::dismissed() +} + +pub fn init(cx: &mut App) { + feature_gate_predict_edits_actions(cx); - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Disabled + cx.observe_new(move |workspace: &mut Workspace, _, _cx| { + workspace.register_action(|workspace, _: &RateCompletions, window, cx| { + if cx.has_flag::() { + RatePredictionsModal::toggle(workspace, window, cx); + } }); - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false + workspace.register_action( + move |workspace, _: &zed_actions::OpenZedPredictOnboarding, window, cx| { + ZedPredictModal::toggle( + workspace, + workspace.user_store().clone(), + workspace.client().clone(), + window, + cx, + ) + }, ); - } - #[gpui::test] - async fn test_no_data_collection_for_remote_file(cx: &mut TestAppContext) { - init_test(cx); - - let fs = project::FakeFs::new(cx.executor()); - let project = Project::test(fs.clone(), [], cx).await; - - let buffer = cx.new(|_cx| { - Buffer::remote( - language::BufferId::new(1).unwrap(), - ReplicaId::new(1), - language::Capability::ReadWrite, - "fn main() {\n println!(\"Hello\");\n}", - ) + workspace.register_action(|workspace, _: &ResetOnboarding, _window, cx| { + update_settings_file(workspace.app_state().fs.clone(), cx, move |settings, _| { + settings + .project + .all_languages + .features + .get_or_insert_default() + .edit_prediction_provider = Some(EditPredictionProvider::None) + }); }); + }) + .detach(); +} - let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Enabled +fn feature_gate_predict_edits_actions(cx: &mut App) { + let rate_completion_action_types = [TypeId::of::()]; + let reset_onboarding_action_types = [TypeId::of::()]; + let zeta_all_action_types = [ + TypeId::of::(), + TypeId::of::(), + zed_actions::OpenZedPredictOnboarding.type_id(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + TypeId::of::(), + ]; + + CommandPaletteFilter::update_global(cx, |filter, _cx| { + filter.hide_action_types(&rate_completion_action_types); + filter.hide_action_types(&reset_onboarding_action_types); + filter.hide_action_types(&[zed_actions::OpenZedPredictOnboarding.type_id()]); + }); + + cx.observe_global::(move |cx| { + let is_ai_disabled = DisableAiSettings::get_global(cx).disable_ai; + let has_feature_flag = cx.has_flag::(); + + CommandPaletteFilter::update_global(cx, |filter, _cx| { + if is_ai_disabled { + filter.hide_action_types(&zeta_all_action_types); + } else if has_feature_flag { + filter.show_action_types(&rate_completion_action_types); + } else { + filter.hide_action_types(&rate_completion_action_types); + } }); + }) + .detach(); - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false - ); - } + cx.observe_flag::(move |is_enabled, cx| { + if !DisableAiSettings::get_global(cx).disable_ai { + if is_enabled { + CommandPaletteFilter::update_global(cx, |filter, _cx| { + filter.show_action_types(&rate_completion_action_types); + }); + } else { + CommandPaletteFilter::update_global(cx, |filter, _cx| { + filter.hide_action_types(&rate_completion_action_types); + }); + } + } + }) + .detach(); +} - #[gpui::test] - async fn test_no_data_collection_for_private_file(cx: &mut TestAppContext) { - init_test(cx); +#[cfg(test)] +mod tests { + use std::{path::Path, sync::Arc}; + + use client::UserStore; + use clock::FakeSystemClock; + use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery}; + use futures::{ + AsyncReadExt, StreamExt, + channel::{mpsc, oneshot}, + }; + use gpui::{ + Entity, TestAppContext, + http_client::{FakeHttpClient, Response}, + prelude::*, + }; + use indoc::indoc; + use language::OffsetRangeExt as _; + use open_ai::Usage; + use pretty_assertions::{assert_eq, assert_matches}; + use project::{FakeFs, Project}; + use serde_json::json; + use settings::SettingsStore; + use util::path; + use uuid::Uuid; - let fs = project::FakeFs::new(cx.executor()); + use crate::{BufferEditPrediction, Zeta}; + + #[gpui::test] + async fn test_current_state(cx: &mut TestAppContext) { + let (zeta, mut req_rx) = init_test(cx); + let fs = FakeFs::new(cx.executor()); fs.insert_tree( - path!("/project"), + "/root", json!({ - "LICENSE": BSD_0_TXT, - ".env": "SECRET_KEY=secret" + "1.txt": "Hello!\nHow\nBye\n", + "2.txt": "Hola!\nComo\nAdios\n" }), ) .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; - let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; - let buffer = project + zeta.update(cx, |zeta, cx| { + zeta.register_project(&project, cx); + }); + + let buffer1 = project .update(cx, |project, cx| { - project.open_local_buffer("/project/.env", cx) + let path = project.find_project_path(path!("root/1.txt"), cx).unwrap(); + project.open_buffer(path, cx) }) .await .unwrap(); + let snapshot1 = buffer1.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot1.anchor_before(language::Point::new(1, 3)); - let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Enabled - }); - - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false - ); - } + // Prediction for current file - #[gpui::test] - async fn test_no_data_collection_for_untitled_buffer(cx: &mut TestAppContext) { - init_test(cx); + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx) + }); + let (_request, respond_tx) = req_rx.next().await.unwrap(); + + respond_tx + .send(model_response(indoc! {r" + --- a/root/1.txt + +++ b/root/1.txt + @@ ... @@ + Hello! + -How + +How are you? + Bye + "})) + .unwrap(); - let fs = project::FakeFs::new(cx.executor()); - let project = Project::test(fs.clone(), [], cx).await; - let buffer = cx.new(|cx| Buffer::local("", cx)); + cx.run_until_parked(); - let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Enabled + zeta.read_with(cx, |zeta, cx| { + let prediction = zeta + .current_prediction_for_buffer(&buffer1, &project, cx) + .unwrap(); + assert_matches!(prediction, BufferEditPrediction::Local { .. }); }); - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false - ); - } + // Context refresh + let refresh_task = zeta.update(cx, |zeta, cx| { + zeta.refresh_context(project.clone(), buffer1.clone(), position, cx) + }); + let (_request, respond_tx) = req_rx.next().await.unwrap(); + respond_tx + .send(open_ai::Response { + id: Uuid::new_v4().to_string(), + object: "response".into(), + created: 0, + model: "model".into(), + choices: vec![open_ai::Choice { + index: 0, + message: open_ai::RequestMessage::Assistant { + content: None, + tool_calls: vec![open_ai::ToolCall { + id: "search".into(), + content: open_ai::ToolCallContent::Function { + function: open_ai::FunctionContent { + name: cloud_zeta2_prompt::retrieval_prompt::TOOL_NAME + .to_string(), + arguments: serde_json::to_string(&SearchToolInput { + queries: Box::new([SearchToolQuery { + glob: "root/2.txt".to_string(), + syntax_node: vec![], + content: Some(".".into()), + }]), + }) + .unwrap(), + }, + }, + }], + }, + finish_reason: None, + }], + usage: Usage { + prompt_tokens: 0, + completion_tokens: 0, + total_tokens: 0, + }, + }) + .unwrap(); + refresh_task.await.unwrap(); - #[gpui::test] - async fn test_no_data_collection_when_closed_source(cx: &mut TestAppContext) { - init_test(cx); + zeta.update(cx, |zeta, cx| { + zeta.discard_current_prediction(&project, cx); + }); - let fs = project::FakeFs::new(cx.executor()); - fs.insert_tree(path!("/project"), json!({ "main.rs": "fn main() {}" })) - .await; + // Prediction for another file + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx) + }); + let (_request, respond_tx) = req_rx.next().await.unwrap(); + respond_tx + .send(model_response(indoc! {r#" + --- a/root/2.txt + +++ b/root/2.txt + Hola! + -Como + +Como estas? + Adios + "#})) + .unwrap(); + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + let prediction = zeta + .current_prediction_for_buffer(&buffer1, &project, cx) + .unwrap(); + assert_matches!( + prediction, + BufferEditPrediction::Jump { prediction } if prediction.snapshot.file().unwrap().full_path(cx) == Path::new(path!("root/2.txt")) + ); + }); - let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; - let buffer = project + let buffer2 = project .update(cx, |project, cx| { - project.open_local_buffer("/project/main.rs", cx) + let path = project.find_project_path(path!("root/2.txt"), cx).unwrap(); + project.open_buffer(path, cx) }) .await .unwrap(); - let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Enabled + zeta.read_with(cx, |zeta, cx| { + let prediction = zeta + .current_prediction_for_buffer(&buffer2, &project, cx) + .unwrap(); + assert_matches!(prediction, BufferEditPrediction::Local { .. }); }); - - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false - ); } #[gpui::test] - async fn test_data_collection_status_changes_on_move(cx: &mut TestAppContext) { - init_test(cx); - - let fs = project::FakeFs::new(cx.executor()); + async fn test_simple_request(cx: &mut TestAppContext) { + let (zeta, mut req_rx) = init_test(cx); + let fs = FakeFs::new(cx.executor()); fs.insert_tree( - path!("/open_source_worktree"), - json!({ "LICENSE": BSD_0_TXT, "main.rs": "" }), + "/root", + json!({ + "foo.md": "Hello!\nHow\nBye\n" + }), ) .await; - fs.insert_tree(path!("/closed_source_worktree"), json!({ "main.rs": "" })) - .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; - let project = Project::test( - fs.clone(), - [ - path!("/open_source_worktree").as_ref(), - path!("/closed_source_worktree").as_ref(), - ], - cx, - ) - .await; let buffer = project .update(cx, |project, cx| { - project.open_local_buffer(path!("/open_source_worktree/main.rs"), cx) + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) }) .await .unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); - let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Enabled + let prediction_task = zeta.update(cx, |zeta, cx| { + zeta.request_prediction(&project, &buffer, position, cx) }); - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - true - ); - - let closed_source_file = project - .update(cx, |project, cx| { - let worktree2 = project - .worktree_for_root_name("closed_source_worktree", cx) - .unwrap(); - worktree2.update(cx, |worktree2, cx| { - worktree2.load_file(rel_path("main.rs"), cx) - }) - }) - .await - .unwrap() - .file; + let (_, respond_tx) = req_rx.next().await.unwrap(); + + // TODO Put back when we have a structured request again + // assert_eq!( + // request.excerpt_path.as_ref(), + // Path::new(path!("root/foo.md")) + // ); + // assert_eq!( + // request.cursor_point, + // Point { + // line: Line(1), + // column: 3 + // } + // ); + + respond_tx + .send(model_response(indoc! { r" + --- a/root/foo.md + +++ b/root/foo.md + @@ ... @@ + Hello! + -How + +How are you? + Bye + "})) + .unwrap(); - buffer.update(cx, |buffer, cx| { - buffer.file_updated(closed_source_file, cx); - }); + let prediction = prediction_task.await.unwrap().unwrap(); - run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!(prediction.edits.len(), 1); assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false + prediction.edits[0].0.to_point(&snapshot).start, + language::Point::new(1, 3) ); + assert_eq!(prediction.edits[0].1.as_ref(), " are you?"); } #[gpui::test] - async fn test_no_data_collection_for_events_in_uncollectable_buffers(cx: &mut TestAppContext) { - init_test(cx); - - let fs = project::FakeFs::new(cx.executor()); + async fn test_request_events(cx: &mut TestAppContext) { + let (zeta, mut req_rx) = init_test(cx); + let fs = FakeFs::new(cx.executor()); fs.insert_tree( - path!("/worktree1"), - json!({ "LICENSE": BSD_0_TXT, "main.rs": "", "other.rs": "" }), + "/root", + json!({ + "foo.md": "Hello!\n\nBye\n" + }), ) .await; - fs.insert_tree(path!("/worktree2"), json!({ "private.rs": "" })) - .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; - let project = Project::test( - fs.clone(), - [path!("/worktree1").as_ref(), path!("/worktree2").as_ref()], - cx, - ) - .await; let buffer = project .update(cx, |project, cx| { - project.open_local_buffer(path!("/worktree1/main.rs"), cx) - }) - .await - .unwrap(); - let private_buffer = project - .update(cx, |project, cx| { - project.open_local_buffer(path!("/worktree2/file.rs"), cx) + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) }) .await .unwrap(); - let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; - zeta.update(cx, |zeta, _cx| { - zeta.data_collection_choice = DataCollectionChoice::Enabled + zeta.update(cx, |zeta, cx| { + zeta.register_buffer(&buffer, &project, cx); }); - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - true - ); + buffer.update(cx, |buffer, cx| { + buffer.edit(vec![(7..7, "How")], None, cx); + }); - // this has a side effect of registering the buffer to watch for edits - run_edit_prediction(&private_buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false - ); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); - private_buffer.update(cx, |private_buffer, cx| { - private_buffer.edit([(0..0, "An edit for the history!")], None, cx); + let prediction_task = zeta.update(cx, |zeta, cx| { + zeta.request_prediction(&project, &buffer, position, cx) }); - run_edit_prediction(&buffer, &project, &zeta, cx).await; - assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - false + let (request, respond_tx) = req_rx.next().await.unwrap(); + + let prompt = prompt_from_request(&request); + assert!( + prompt.contains(indoc! {" + --- a/root/foo.md + +++ b/root/foo.md + @@ -1,3 +1,3 @@ + Hello! + - + +How + Bye + "}), + "{prompt}" ); - // make an edit that uses too many bytes, causing private_buffer edit to not be able to be - // included - buffer.update(cx, |buffer, cx| { - buffer.edit( - [(0..0, " ".repeat(MAX_EVENT_TOKENS * BYTES_PER_TOKEN_GUESS))], - None, - cx, - ); - }); + respond_tx + .send(model_response(indoc! {r#" + --- a/root/foo.md + +++ b/root/foo.md + @@ ... @@ + Hello! + -How + +How are you? + Bye + "#})) + .unwrap(); + + let prediction = prediction_task.await.unwrap().unwrap(); - run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!(prediction.edits.len(), 1); assert_eq!( - captured_request.lock().clone().unwrap().can_collect_data, - true + prediction.edits[0].0.to_point(&snapshot).start, + language::Point::new(1, 3) ); + assert_eq!(prediction.edits[0].1.as_ref(), " are you?"); + } + + // Skipped until we start including diagnostics in prompt + // #[gpui::test] + // async fn test_request_diagnostics(cx: &mut TestAppContext) { + // let (zeta, mut req_rx) = init_test(cx); + // let fs = FakeFs::new(cx.executor()); + // fs.insert_tree( + // "/root", + // json!({ + // "foo.md": "Hello!\nBye" + // }), + // ) + // .await; + // let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; + + // let path_to_buffer_uri = lsp::Uri::from_file_path(path!("/root/foo.md")).unwrap(); + // let diagnostic = lsp::Diagnostic { + // range: lsp::Range::new(lsp::Position::new(1, 1), lsp::Position::new(1, 5)), + // severity: Some(lsp::DiagnosticSeverity::ERROR), + // message: "\"Hello\" deprecated. Use \"Hi\" instead".to_string(), + // ..Default::default() + // }; + + // project.update(cx, |project, cx| { + // project.lsp_store().update(cx, |lsp_store, cx| { + // // Create some diagnostics + // lsp_store + // .update_diagnostics( + // LanguageServerId(0), + // lsp::PublishDiagnosticsParams { + // uri: path_to_buffer_uri.clone(), + // diagnostics: vec![diagnostic], + // version: None, + // }, + // None, + // language::DiagnosticSourceKind::Pushed, + // &[], + // cx, + // ) + // .unwrap(); + // }); + // }); + + // let buffer = project + // .update(cx, |project, cx| { + // let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + // project.open_buffer(path, cx) + // }) + // .await + // .unwrap(); + + // let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + // let position = snapshot.anchor_before(language::Point::new(0, 0)); + + // let _prediction_task = zeta.update(cx, |zeta, cx| { + // zeta.request_prediction(&project, &buffer, position, cx) + // }); + + // let (request, _respond_tx) = req_rx.next().await.unwrap(); + + // assert_eq!(request.diagnostic_groups.len(), 1); + // let value = serde_json::from_str::(request.diagnostic_groups[0].0.get()) + // .unwrap(); + // // We probably don't need all of this. TODO define a specific diagnostic type in predict_edits_v3 + // assert_eq!( + // value, + // json!({ + // "entries": [{ + // "range": { + // "start": 8, + // "end": 10 + // }, + // "diagnostic": { + // "source": null, + // "code": null, + // "code_description": null, + // "severity": 1, + // "message": "\"Hello\" deprecated. Use \"Hi\" instead", + // "markdown": null, + // "group_id": 0, + // "is_primary": true, + // "is_disk_based": false, + // "is_unnecessary": false, + // "source_kind": "Pushed", + // "data": null, + // "underline": true + // } + // }], + // "primary_ix": 0 + // }) + // ); + // } + + fn model_response(text: &str) -> open_ai::Response { + open_ai::Response { + id: Uuid::new_v4().to_string(), + object: "response".into(), + created: 0, + model: "model".into(), + choices: vec![open_ai::Choice { + index: 0, + message: open_ai::RequestMessage::Assistant { + content: Some(open_ai::MessageContent::Plain(text.to_string())), + tool_calls: vec![], + }, + finish_reason: None, + }], + usage: Usage { + prompt_tokens: 0, + completion_tokens: 0, + total_tokens: 0, + }, + } } - fn init_test(cx: &mut TestAppContext) { - cx.update(|cx| { - let settings_store = SettingsStore::test(cx); - cx.set_global(settings_store); - }); - } - - async fn apply_edit_prediction( - buffer_content: &str, - completion_response: &str, - cx: &mut TestAppContext, - ) -> String { - let fs = project::FakeFs::new(cx.executor()); - let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; - let buffer = cx.new(|cx| Buffer::local(buffer_content, cx)); - let (zeta, _, response) = make_test_zeta(&project, cx).await; - *response.lock() = completion_response.to_string(); - let edit_prediction = run_edit_prediction(&buffer, &project, &zeta, cx).await; - buffer.update(cx, |buffer, cx| { - buffer.edit(edit_prediction.edits.iter().cloned(), None, cx) - }); - buffer.read_with(cx, |buffer, _| buffer.text()) - } - - async fn run_edit_prediction( - buffer: &Entity, - project: &Entity, - zeta: &Entity, - cx: &mut TestAppContext, - ) -> EditPrediction { - let cursor = buffer.read_with(cx, |buffer, _| buffer.anchor_before(Point::new(1, 0))); - zeta.update(cx, |zeta, cx| zeta.register_buffer(buffer, &project, cx)); - cx.background_executor.run_until_parked(); - let completion_task = zeta.update(cx, |zeta, cx| { - zeta.request_completion(&project, buffer, cursor, cx) - }); - completion_task.await.unwrap().unwrap() + fn prompt_from_request(request: &open_ai::Request) -> &str { + assert_eq!(request.messages.len(), 1); + let open_ai::RequestMessage::User { + content: open_ai::MessageContent::Plain(content), + .. + } = &request.messages[0] + else { + panic!( + "Request does not have single user message of type Plain. {:#?}", + request + ); + }; + content } - async fn make_test_zeta( - project: &Entity, + fn init_test( cx: &mut TestAppContext, ) -> ( Entity, - Arc>>, - Arc>, + mpsc::UnboundedReceiver<(open_ai::Request, oneshot::Sender)>, ) { - let default_response = indoc! {" - ```main.rs - <|start_of_file|> - <|editable_region_start|> - hello world - <|editable_region_end|> - ```" - }; - let captured_request: Arc>> = Arc::new(Mutex::new(None)); - let completion_response: Arc> = - Arc::new(Mutex::new(default_response.to_string())); - let http_client = FakeHttpClient::create({ - let captured_request = captured_request.clone(); - let completion_response = completion_response.clone(); - move |req| { - let captured_request = captured_request.clone(); - let completion_response = completion_response.clone(); - async move { - match (req.method(), req.uri().path()) { - (&Method::POST, "/client/llm_tokens") => { - Ok(http_client::Response::builder() - .status(200) - .body( - serde_json::to_string(&CreateLlmTokenResponse { - token: LlmToken("the-llm-token".to_string()), - }) - .unwrap() - .into(), - ) - .unwrap()) - } - (&Method::POST, "/predict_edits/v2") => { - let mut request_body = String::new(); - req.into_body().read_to_string(&mut request_body).await?; - *captured_request.lock() = - Some(serde_json::from_str(&request_body).unwrap()); - Ok(http_client::Response::builder() - .status(200) - .body( - serde_json::to_string(&PredictEditsResponse { - request_id: Uuid::new_v4().to_string(), - output_excerpt: completion_response.lock().clone(), - }) - .unwrap() - .into(), - ) - .unwrap()) - } - _ => Ok(http_client::Response::builder() - .status(404) - .body("Not Found".into()) - .unwrap()), + cx.update(move |cx| { + let settings_store = SettingsStore::test(cx); + cx.set_global(settings_store); + zlog::init_test(); + + let (req_tx, req_rx) = mpsc::unbounded(); + + let http_client = FakeHttpClient::create({ + move |req| { + let uri = req.uri().path().to_string(); + let mut body = req.into_body(); + let req_tx = req_tx.clone(); + async move { + let resp = match uri.as_str() { + "/client/llm_tokens" => serde_json::to_string(&json!({ + "token": "test" + })) + .unwrap(), + "/predict_edits/raw" => { + let mut buf = Vec::new(); + body.read_to_end(&mut buf).await.ok(); + let req = serde_json::from_slice(&buf).unwrap(); + + let (res_tx, res_rx) = oneshot::channel(); + req_tx.unbounded_send((req, res_tx)).unwrap(); + serde_json::to_string(&res_rx.await?).unwrap() + } + _ => { + panic!("Unexpected path: {}", uri) + } + }; + + Ok(Response::builder().body(resp.into()).unwrap()) } } - } - }); - - let client = cx.update(|cx| Client::new(Arc::new(FakeSystemClock::new()), http_client, cx)); - cx.update(|cx| { - RefreshLlmTokenListener::register(client.clone(), cx); - }); - let _server = FakeServer::for_client(42, &client, cx).await; - - let zeta = cx.new(|cx| { - let mut zeta = Zeta::new(client, project.read(cx).user_store(), cx); - - let worktrees = project.read(cx).worktrees(cx).collect::>(); - for worktree in worktrees { - let worktree_id = worktree.read(cx).id(); - zeta.license_detection_watchers - .entry(worktree_id) - .or_insert_with(|| Rc::new(LicenseDetectionWatcher::new(&worktree, cx))); - } - - zeta - }); + }); - (zeta, captured_request, completion_response) - } + let client = client::Client::new(Arc::new(FakeSystemClock::new()), http_client, cx); + client.cloud_client().set_credentials(1, "test".into()); - fn to_completion_edits( - iterator: impl IntoIterator, Arc)>, - buffer: &Entity, - cx: &App, - ) -> Vec<(Range, Arc)> { - let buffer = buffer.read(cx); - iterator - .into_iter() - .map(|(range, text)| { - ( - buffer.anchor_after(range.start)..buffer.anchor_before(range.end), - text, - ) - }) - .collect() - } + language_model::init(client.clone(), cx); - fn from_completion_edits( - editor_edits: &[(Range, Arc)], - buffer: &Entity, - cx: &App, - ) -> Vec<(Range, Arc)> { - let buffer = buffer.read(cx); - editor_edits - .iter() - .map(|(range, text)| { - ( - range.start.to_offset(buffer)..range.end.to_offset(buffer), - text.clone(), - ) - }) - .collect() - } + let user_store = cx.new(|cx| UserStore::new(client.clone(), cx)); + let zeta = Zeta::global(&client, &user_store, cx); - #[ctor::ctor] - fn init_logger() { - zlog::init_test(); + (zeta, req_rx) + }) } } diff --git a/crates/zeta/src/zeta1.rs b/crates/zeta/src/zeta1.rs new file mode 100644 index 0000000000000000000000000000000000000000..5a779cabeceac0bcb58340f7bbb98175409916e8 --- /dev/null +++ b/crates/zeta/src/zeta1.rs @@ -0,0 +1,500 @@ +mod input_excerpt; + +use std::{fmt::Write, ops::Range, path::Path, sync::Arc, time::Instant}; + +use crate::{ + EditPredictionId, ZedUpdateRequiredError, Zeta, + prediction::{EditPrediction, EditPredictionInputs}, +}; +use anyhow::{Context as _, Result}; +use cloud_llm_client::{ + PredictEditsBody, PredictEditsGitInfo, PredictEditsResponse, predict_edits_v3::Event, +}; +use gpui::{App, AppContext as _, AsyncApp, Context, Entity, SharedString, Task}; +use input_excerpt::excerpt_for_cursor_position; +use language::{ + Anchor, Buffer, BufferSnapshot, OffsetRangeExt as _, Point, ToPoint as _, text_diff, +}; +use project::{Project, ProjectPath}; +use release_channel::AppVersion; +use workspace::notifications::{ErrorMessagePrompt, NotificationId, show_app_notification}; + +const CURSOR_MARKER: &str = "<|user_cursor_is_here|>"; +const START_OF_FILE_MARKER: &str = "<|start_of_file|>"; +const EDITABLE_REGION_START_MARKER: &str = "<|editable_region_start|>"; +const EDITABLE_REGION_END_MARKER: &str = "<|editable_region_end|>"; + +pub(crate) const MAX_CONTEXT_TOKENS: usize = 150; +pub(crate) const MAX_REWRITE_TOKENS: usize = 350; +pub(crate) const MAX_EVENT_TOKENS: usize = 500; + +pub(crate) fn request_prediction_with_zeta1( + zeta: &mut Zeta, + project: &Entity, + buffer: &Entity, + position: language::Anchor, + cx: &mut Context, +) -> Task>> { + let buffer = buffer.clone(); + let buffer_snapshotted_at = Instant::now(); + let snapshot = buffer.read(cx).snapshot(); + let client = zeta.client.clone(); + let llm_token = zeta.llm_token.clone(); + let app_version = AppVersion::global(cx); + + let zeta_project = zeta.get_or_init_zeta_project(project, cx); + let events = Arc::new(zeta_project.events(cx)); + + let (git_info, can_collect_file) = if let Some(file) = snapshot.file() { + let can_collect_file = zeta.can_collect_file(project, file, cx); + let git_info = if can_collect_file { + git_info_for_file(project, &ProjectPath::from_file(file.as_ref(), cx), cx) + } else { + None + }; + (git_info, can_collect_file) + } else { + (None, false) + }; + + let full_path: Arc = snapshot + .file() + .map(|f| Arc::from(f.full_path(cx).as_path())) + .unwrap_or_else(|| Arc::from(Path::new("untitled"))); + let full_path_str = full_path.to_string_lossy().into_owned(); + let cursor_point = position.to_point(&snapshot); + let prompt_for_events = { + let events = events.clone(); + move || prompt_for_events_impl(&events, MAX_EVENT_TOKENS) + }; + let gather_task = gather_context( + full_path_str, + &snapshot, + cursor_point, + prompt_for_events, + cx, + ); + + cx.spawn(async move |this, cx| { + let GatherContextOutput { + mut body, + context_range, + editable_range, + included_events_count, + } = gather_task.await?; + let done_gathering_context_at = Instant::now(); + + let included_events = &events[events.len() - included_events_count..events.len()]; + body.can_collect_data = can_collect_file + && this + .read_with(cx, |this, _| this.can_collect_events(included_events)) + .unwrap_or(false); + if body.can_collect_data { + body.git_info = git_info; + } + + log::debug!( + "Events:\n{}\nExcerpt:\n{:?}", + body.input_events, + body.input_excerpt + ); + + let http_client = client.http_client(); + + let response = Zeta::send_api_request::( + |request| { + let uri = if let Ok(predict_edits_url) = std::env::var("ZED_PREDICT_EDITS_URL") { + predict_edits_url + } else { + http_client + .build_zed_llm_url("/predict_edits/v2", &[])? + .as_str() + .into() + }; + Ok(request + .uri(uri) + .body(serde_json::to_string(&body)?.into())?) + }, + client, + llm_token, + app_version, + ) + .await; + + let inputs = EditPredictionInputs { + events: included_events.into(), + included_files: vec![cloud_llm_client::predict_edits_v3::IncludedFile { + path: full_path.clone(), + max_row: cloud_llm_client::predict_edits_v3::Line(snapshot.max_point().row), + excerpts: vec![cloud_llm_client::predict_edits_v3::Excerpt { + start_line: cloud_llm_client::predict_edits_v3::Line(context_range.start.row), + text: snapshot + .text_for_range(context_range) + .collect::() + .into(), + }], + }], + cursor_point: cloud_llm_client::predict_edits_v3::Point { + column: cursor_point.column, + line: cloud_llm_client::predict_edits_v3::Line(cursor_point.row), + }, + cursor_path: full_path, + }; + + // let response = perform_predict_edits(PerformPredictEditsParams { + // client, + // llm_token, + // app_version, + // body, + // }) + // .await; + + let (response, usage) = match response { + Ok(response) => response, + Err(err) => { + if err.is::() { + cx.update(|cx| { + this.update(cx, |zeta, _cx| { + zeta.update_required = true; + }) + .ok(); + + let error_message: SharedString = err.to_string().into(); + show_app_notification( + NotificationId::unique::(), + cx, + move |cx| { + cx.new(|cx| { + ErrorMessagePrompt::new(error_message.clone(), cx) + .with_link_button("Update Zed", "https://zed.dev/releases") + }) + }, + ); + }) + .ok(); + } + + return Err(err); + } + }; + + let received_response_at = Instant::now(); + log::debug!("completion response: {}", &response.output_excerpt); + + if let Some(usage) = usage { + this.update(cx, |this, cx| { + this.user_store.update(cx, |user_store, cx| { + user_store.update_edit_prediction_usage(usage, cx); + }); + }) + .ok(); + } + + let edit_prediction = process_completion_response( + response, + buffer, + &snapshot, + editable_range, + inputs, + buffer_snapshotted_at, + received_response_at, + cx, + ) + .await; + + let finished_at = Instant::now(); + + // record latency for ~1% of requests + if rand::random::() <= 2 { + telemetry::event!( + "Edit Prediction Request", + context_latency = done_gathering_context_at + .duration_since(buffer_snapshotted_at) + .as_millis(), + request_latency = received_response_at + .duration_since(done_gathering_context_at) + .as_millis(), + process_latency = finished_at.duration_since(received_response_at).as_millis() + ); + } + + edit_prediction + }) +} + +fn process_completion_response( + prediction_response: PredictEditsResponse, + buffer: Entity, + snapshot: &BufferSnapshot, + editable_range: Range, + inputs: EditPredictionInputs, + buffer_snapshotted_at: Instant, + received_response_at: Instant, + cx: &AsyncApp, +) -> Task>> { + let snapshot = snapshot.clone(); + let request_id = prediction_response.request_id; + let output_excerpt = prediction_response.output_excerpt; + cx.spawn(async move |cx| { + let output_excerpt: Arc = output_excerpt.into(); + + let edits: Arc<[(Range, Arc)]> = cx + .background_spawn({ + let output_excerpt = output_excerpt.clone(); + let editable_range = editable_range.clone(); + let snapshot = snapshot.clone(); + async move { parse_edits(output_excerpt, editable_range, &snapshot) } + }) + .await? + .into(); + + Ok(EditPrediction::new( + EditPredictionId(request_id.into()), + &buffer, + &snapshot, + edits, + buffer_snapshotted_at, + received_response_at, + inputs, + cx, + ) + .await) + }) +} + +fn parse_edits( + output_excerpt: Arc, + editable_range: Range, + snapshot: &BufferSnapshot, +) -> Result, Arc)>> { + let content = output_excerpt.replace(CURSOR_MARKER, ""); + + let start_markers = content + .match_indices(EDITABLE_REGION_START_MARKER) + .collect::>(); + anyhow::ensure!( + start_markers.len() == 1, + "expected exactly one start marker, found {}", + start_markers.len() + ); + + let end_markers = content + .match_indices(EDITABLE_REGION_END_MARKER) + .collect::>(); + anyhow::ensure!( + end_markers.len() == 1, + "expected exactly one end marker, found {}", + end_markers.len() + ); + + let sof_markers = content + .match_indices(START_OF_FILE_MARKER) + .collect::>(); + anyhow::ensure!( + sof_markers.len() <= 1, + "expected at most one start-of-file marker, found {}", + sof_markers.len() + ); + + let codefence_start = start_markers[0].0; + let content = &content[codefence_start..]; + + let newline_ix = content.find('\n').context("could not find newline")?; + let content = &content[newline_ix + 1..]; + + let codefence_end = content + .rfind(&format!("\n{EDITABLE_REGION_END_MARKER}")) + .context("could not find end marker")?; + let new_text = &content[..codefence_end]; + + let old_text = snapshot + .text_for_range(editable_range.clone()) + .collect::(); + + Ok(compute_edits( + old_text, + new_text, + editable_range.start, + snapshot, + )) +} + +pub fn compute_edits( + old_text: String, + new_text: &str, + offset: usize, + snapshot: &BufferSnapshot, +) -> Vec<(Range, Arc)> { + text_diff(&old_text, new_text) + .into_iter() + .map(|(mut old_range, new_text)| { + old_range.start += offset; + old_range.end += offset; + + let prefix_len = common_prefix( + snapshot.chars_for_range(old_range.clone()), + new_text.chars(), + ); + old_range.start += prefix_len; + + let suffix_len = common_prefix( + snapshot.reversed_chars_for_range(old_range.clone()), + new_text[prefix_len..].chars().rev(), + ); + old_range.end = old_range.end.saturating_sub(suffix_len); + + let new_text = new_text[prefix_len..new_text.len() - suffix_len].into(); + let range = if old_range.is_empty() { + let anchor = snapshot.anchor_after(old_range.start); + anchor..anchor + } else { + snapshot.anchor_after(old_range.start)..snapshot.anchor_before(old_range.end) + }; + (range, new_text) + }) + .collect() +} + +fn common_prefix, T2: Iterator>(a: T1, b: T2) -> usize { + a.zip(b) + .take_while(|(a, b)| a == b) + .map(|(a, _)| a.len_utf8()) + .sum() +} + +fn git_info_for_file( + project: &Entity, + project_path: &ProjectPath, + cx: &App, +) -> Option { + let git_store = project.read(cx).git_store().read(cx); + if let Some((repository, _repo_path)) = + git_store.repository_and_path_for_project_path(project_path, cx) + { + let repository = repository.read(cx); + let head_sha = repository + .head_commit + .as_ref() + .map(|head_commit| head_commit.sha.to_string()); + let remote_origin_url = repository.remote_origin_url.clone(); + let remote_upstream_url = repository.remote_upstream_url.clone(); + if head_sha.is_none() && remote_origin_url.is_none() && remote_upstream_url.is_none() { + return None; + } + Some(PredictEditsGitInfo { + head_sha, + remote_origin_url, + remote_upstream_url, + }) + } else { + None + } +} + +pub struct GatherContextOutput { + pub body: PredictEditsBody, + pub context_range: Range, + pub editable_range: Range, + pub included_events_count: usize, +} + +pub fn gather_context( + full_path_str: String, + snapshot: &BufferSnapshot, + cursor_point: language::Point, + prompt_for_events: impl FnOnce() -> (String, usize) + Send + 'static, + cx: &App, +) -> Task> { + cx.background_spawn({ + let snapshot = snapshot.clone(); + async move { + let input_excerpt = excerpt_for_cursor_position( + cursor_point, + &full_path_str, + &snapshot, + MAX_REWRITE_TOKENS, + MAX_CONTEXT_TOKENS, + ); + let (input_events, included_events_count) = prompt_for_events(); + let editable_range = input_excerpt.editable_range.to_offset(&snapshot); + + let body = PredictEditsBody { + input_events, + input_excerpt: input_excerpt.prompt, + can_collect_data: false, + diagnostic_groups: None, + git_info: None, + outline: None, + speculated_output: None, + }; + + Ok(GatherContextOutput { + body, + context_range: input_excerpt.context_range, + editable_range, + included_events_count, + }) + } + }) +} + +fn prompt_for_events_impl(events: &[Arc], mut remaining_tokens: usize) -> (String, usize) { + let mut result = String::new(); + for (ix, event) in events.iter().rev().enumerate() { + let event_string = format_event(event.as_ref()); + let event_tokens = guess_token_count(event_string.len()); + if event_tokens > remaining_tokens { + return (result, ix); + } + + if !result.is_empty() { + result.insert_str(0, "\n\n"); + } + result.insert_str(0, &event_string); + remaining_tokens -= event_tokens; + } + return (result, events.len()); +} + +pub fn format_event(event: &Event) -> String { + match event { + Event::BufferChange { + path, + old_path, + diff, + .. + } => { + let mut prompt = String::new(); + + if old_path != path { + writeln!( + prompt, + "User renamed {} to {}\n", + old_path.display(), + path.display() + ) + .unwrap(); + } + + if !diff.is_empty() { + write!( + prompt, + "User edited {}:\n```diff\n{}\n```", + path.display(), + diff + ) + .unwrap(); + } + + prompt + } + } +} + +/// Typical number of string bytes per token for the purposes of limiting model input. This is +/// intentionally low to err on the side of underestimating limits. +pub(crate) const BYTES_PER_TOKEN_GUESS: usize = 3; + +fn guess_token_count(bytes: usize) -> usize { + bytes / BYTES_PER_TOKEN_GUESS +} diff --git a/crates/zeta/src/input_excerpt.rs b/crates/zeta/src/zeta1/input_excerpt.rs similarity index 98% rename from crates/zeta/src/input_excerpt.rs rename to crates/zeta/src/zeta1/input_excerpt.rs index 06bff5b1bea0f099b2ccd98605ac5de5bb5e6360..853d74da463c19de4f1d3915cb703a53b6c43c61 100644 --- a/crates/zeta/src/input_excerpt.rs +++ b/crates/zeta/src/zeta1/input_excerpt.rs @@ -1,4 +1,4 @@ -use crate::{ +use super::{ CURSOR_MARKER, EDITABLE_REGION_END_MARKER, EDITABLE_REGION_START_MARKER, START_OF_FILE_MARKER, guess_token_count, }; @@ -7,6 +7,7 @@ use std::{fmt::Write, ops::Range}; #[derive(Debug)] pub struct InputExcerpt { + pub context_range: Range, pub editable_range: Range, pub prompt: String, } @@ -63,6 +64,7 @@ pub fn excerpt_for_cursor_position( write!(prompt, "\n```").unwrap(); InputExcerpt { + context_range, editable_range, prompt, } @@ -124,7 +126,7 @@ mod tests { use super::*; use gpui::{App, AppContext}; use indoc::indoc; - use language::{Buffer, Language, LanguageConfig, LanguageMatcher}; + use language::{Buffer, Language, LanguageConfig, LanguageMatcher, tree_sitter_rust}; use std::sync::Arc; #[gpui::test] diff --git a/crates/zeta/src/zeta_tests.rs b/crates/zeta/src/zeta_tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..eb12f81af25d72b5e7003187ab0a9536622c9a74 --- /dev/null +++ b/crates/zeta/src/zeta_tests.rs @@ -0,0 +1,671 @@ +use client::test::FakeServer; +use clock::{FakeSystemClock, ReplicaId}; +use cloud_api_types::{CreateLlmTokenResponse, LlmToken}; +use cloud_llm_client::{PredictEditsBody, PredictEditsResponse}; +use gpui::TestAppContext; +use http_client::FakeHttpClient; +use indoc::indoc; +use language::Point; +use parking_lot::Mutex; +use serde_json::json; +use settings::SettingsStore; +use util::{path, rel_path::rel_path}; + +use crate::zeta1::MAX_EVENT_TOKENS; + +use super::*; + +const BSD_0_TXT: &str = include_str!("../license_examples/0bsd.txt"); + +#[gpui::test] +async fn test_edit_prediction_basic_interpolation(cx: &mut TestAppContext) { + let buffer = cx.new(|cx| Buffer::local("Lorem ipsum dolor", cx)); + let edits: Arc<[(Range, Arc)]> = cx.update(|cx| { + to_completion_edits([(2..5, "REM".into()), (9..11, "".into())], &buffer, cx).into() + }); + + let edit_preview = cx + .read(|cx| buffer.read(cx).preview_edits(edits.clone(), cx)) + .await; + + let completion = EditPrediction { + edits, + edit_preview, + buffer: buffer.clone(), + snapshot: cx.read(|cx| buffer.read(cx).snapshot()), + id: EditPredictionId("the-id".into()), + inputs: EditPredictionInputs { + events: Default::default(), + included_files: Default::default(), + cursor_point: cloud_llm_client::predict_edits_v3::Point { + line: Line(0), + column: 0, + }, + cursor_path: Path::new("").into(), + }, + buffer_snapshotted_at: Instant::now(), + response_received_at: Instant::now(), + }; + + cx.update(|cx| { + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(2..5, "REM".into()), (9..11, "".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.edit([(2..5, "")], None, cx)); + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(2..2, "REM".into()), (6..8, "".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.undo(cx)); + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(2..5, "REM".into()), (9..11, "".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.edit([(2..5, "R")], None, cx)); + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(3..3, "EM".into()), (7..9, "".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.edit([(3..3, "E")], None, cx)); + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(4..4, "M".into()), (8..10, "".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.edit([(4..4, "M")], None, cx)); + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(9..11, "".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.edit([(4..5, "")], None, cx)); + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(4..4, "M".into()), (8..10, "".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.edit([(8..10, "")], None, cx)); + assert_eq!( + from_completion_edits( + &completion.interpolate(&buffer.read(cx).snapshot()).unwrap(), + &buffer, + cx + ), + vec![(4..4, "M".into())] + ); + + buffer.update(cx, |buffer, cx| buffer.edit([(4..6, "")], None, cx)); + assert_eq!(completion.interpolate(&buffer.read(cx).snapshot()), None); + }) +} + +#[gpui::test] +async fn test_clean_up_diff(cx: &mut TestAppContext) { + init_test(cx); + + assert_eq!( + apply_edit_prediction( + indoc! {" + fn main() { + let word_1 = \"lorem\"; + let range = word.len()..word.len(); + } + "}, + indoc! {" + <|editable_region_start|> + fn main() { + let word_1 = \"lorem\"; + let range = word_1.len()..word_1.len(); + } + + <|editable_region_end|> + "}, + cx, + ) + .await, + indoc! {" + fn main() { + let word_1 = \"lorem\"; + let range = word_1.len()..word_1.len(); + } + "}, + ); + + assert_eq!( + apply_edit_prediction( + indoc! {" + fn main() { + let story = \"the quick\" + } + "}, + indoc! {" + <|editable_region_start|> + fn main() { + let story = \"the quick brown fox jumps over the lazy dog\"; + } + + <|editable_region_end|> + "}, + cx, + ) + .await, + indoc! {" + fn main() { + let story = \"the quick brown fox jumps over the lazy dog\"; + } + "}, + ); +} + +#[gpui::test] +async fn test_edit_prediction_end_of_buffer(cx: &mut TestAppContext) { + init_test(cx); + + let buffer_content = "lorem\n"; + let completion_response = indoc! {" + ```animals.js + <|start_of_file|> + <|editable_region_start|> + lorem + ipsum + <|editable_region_end|> + ```"}; + + assert_eq!( + apply_edit_prediction(buffer_content, completion_response, cx).await, + "lorem\nipsum" + ); +} + +#[gpui::test] +async fn test_can_collect_data(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + fs.insert_tree(path!("/project"), json!({ "LICENSE": BSD_0_TXT })) + .await; + + let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; + let buffer = project + .update(cx, |project, cx| { + project.open_local_buffer(path!("/project/src/main.rs"), cx) + }) + .await + .unwrap(); + + let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Enabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + true + ); + + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Disabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); +} + +#[gpui::test] +async fn test_no_data_collection_for_remote_file(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + let project = Project::test(fs.clone(), [], cx).await; + + let buffer = cx.new(|_cx| { + Buffer::remote( + language::BufferId::new(1).unwrap(), + ReplicaId::new(1), + language::Capability::ReadWrite, + "fn main() {\n println!(\"Hello\");\n}", + ) + }); + + let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Enabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); +} + +#[gpui::test] +async fn test_no_data_collection_for_private_file(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + fs.insert_tree( + path!("/project"), + json!({ + "LICENSE": BSD_0_TXT, + ".env": "SECRET_KEY=secret" + }), + ) + .await; + + let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; + let buffer = project + .update(cx, |project, cx| { + project.open_local_buffer("/project/.env", cx) + }) + .await + .unwrap(); + + let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Enabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); +} + +#[gpui::test] +async fn test_no_data_collection_for_untitled_buffer(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + let project = Project::test(fs.clone(), [], cx).await; + let buffer = cx.new(|cx| Buffer::local("", cx)); + + let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Enabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); +} + +#[gpui::test] +async fn test_no_data_collection_when_closed_source(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + fs.insert_tree(path!("/project"), json!({ "main.rs": "fn main() {}" })) + .await; + + let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; + let buffer = project + .update(cx, |project, cx| { + project.open_local_buffer("/project/main.rs", cx) + }) + .await + .unwrap(); + + let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Enabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); +} + +#[gpui::test] +async fn test_data_collection_status_changes_on_move(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + fs.insert_tree( + path!("/open_source_worktree"), + json!({ "LICENSE": BSD_0_TXT, "main.rs": "" }), + ) + .await; + fs.insert_tree(path!("/closed_source_worktree"), json!({ "main.rs": "" })) + .await; + + let project = Project::test( + fs.clone(), + [ + path!("/open_source_worktree").as_ref(), + path!("/closed_source_worktree").as_ref(), + ], + cx, + ) + .await; + let buffer = project + .update(cx, |project, cx| { + project.open_local_buffer(path!("/open_source_worktree/main.rs"), cx) + }) + .await + .unwrap(); + + let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Enabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + true + ); + + let closed_source_file = project + .update(cx, |project, cx| { + let worktree2 = project + .worktree_for_root_name("closed_source_worktree", cx) + .unwrap(); + worktree2.update(cx, |worktree2, cx| { + worktree2.load_file(rel_path("main.rs"), cx) + }) + }) + .await + .unwrap() + .file; + + buffer.update(cx, |buffer, cx| { + buffer.file_updated(closed_source_file, cx); + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); +} + +#[gpui::test] +async fn test_no_data_collection_for_events_in_uncollectable_buffers(cx: &mut TestAppContext) { + init_test(cx); + + let fs = project::FakeFs::new(cx.executor()); + fs.insert_tree( + path!("/worktree1"), + json!({ "LICENSE": BSD_0_TXT, "main.rs": "", "other.rs": "" }), + ) + .await; + fs.insert_tree(path!("/worktree2"), json!({ "private.rs": "" })) + .await; + + let project = Project::test( + fs.clone(), + [path!("/worktree1").as_ref(), path!("/worktree2").as_ref()], + cx, + ) + .await; + let buffer = project + .update(cx, |project, cx| { + project.open_local_buffer(path!("/worktree1/main.rs"), cx) + }) + .await + .unwrap(); + let private_buffer = project + .update(cx, |project, cx| { + project.open_local_buffer(path!("/worktree2/file.rs"), cx) + }) + .await + .unwrap(); + + let (zeta, captured_request, _) = make_test_zeta(&project, cx).await; + zeta.update(cx, |zeta, _cx| { + zeta.data_collection_choice = DataCollectionChoice::Enabled + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + true + ); + + // this has a side effect of registering the buffer to watch for edits + run_edit_prediction(&private_buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); + + private_buffer.update(cx, |private_buffer, cx| { + private_buffer.edit([(0..0, "An edit for the history!")], None, cx); + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + false + ); + + // make an edit that uses too many bytes, causing private_buffer edit to not be able to be + // included + buffer.update(cx, |buffer, cx| { + buffer.edit( + [( + 0..0, + " ".repeat(MAX_EVENT_TOKENS * zeta1::BYTES_PER_TOKEN_GUESS), + )], + None, + cx, + ); + }); + + run_edit_prediction(&buffer, &project, &zeta, cx).await; + assert_eq!( + captured_request.lock().clone().unwrap().can_collect_data, + true + ); +} + +fn init_test(cx: &mut TestAppContext) { + cx.update(|cx| { + let settings_store = SettingsStore::test(cx); + cx.set_global(settings_store); + }); +} + +async fn apply_edit_prediction( + buffer_content: &str, + completion_response: &str, + cx: &mut TestAppContext, +) -> String { + let fs = project::FakeFs::new(cx.executor()); + let project = Project::test(fs.clone(), [path!("/project").as_ref()], cx).await; + let buffer = cx.new(|cx| Buffer::local(buffer_content, cx)); + let (zeta, _, response) = make_test_zeta(&project, cx).await; + *response.lock() = completion_response.to_string(); + let edit_prediction = run_edit_prediction(&buffer, &project, &zeta, cx).await; + buffer.update(cx, |buffer, cx| { + buffer.edit(edit_prediction.edits.iter().cloned(), None, cx) + }); + buffer.read_with(cx, |buffer, _| buffer.text()) +} + +async fn run_edit_prediction( + buffer: &Entity, + project: &Entity, + zeta: &Entity, + cx: &mut TestAppContext, +) -> EditPrediction { + let cursor = buffer.read_with(cx, |buffer, _| buffer.anchor_before(Point::new(1, 0))); + zeta.update(cx, |zeta, cx| zeta.register_buffer(buffer, &project, cx)); + cx.background_executor.run_until_parked(); + let prediction_task = zeta.update(cx, |zeta, cx| { + zeta.request_prediction(&project, buffer, cursor, cx) + }); + prediction_task.await.unwrap().unwrap() +} + +async fn make_test_zeta( + project: &Entity, + cx: &mut TestAppContext, +) -> ( + Entity, + Arc>>, + Arc>, +) { + let default_response = indoc! {" + ```main.rs + <|start_of_file|> + <|editable_region_start|> + hello world + <|editable_region_end|> + ```" + }; + let captured_request: Arc>> = Arc::new(Mutex::new(None)); + let completion_response: Arc> = + Arc::new(Mutex::new(default_response.to_string())); + let http_client = FakeHttpClient::create({ + let captured_request = captured_request.clone(); + let completion_response = completion_response.clone(); + let mut next_request_id = 0; + move |req| { + let captured_request = captured_request.clone(); + let completion_response = completion_response.clone(); + async move { + match (req.method(), req.uri().path()) { + (&Method::POST, "/client/llm_tokens") => Ok(http_client::Response::builder() + .status(200) + .body( + serde_json::to_string(&CreateLlmTokenResponse { + token: LlmToken("the-llm-token".to_string()), + }) + .unwrap() + .into(), + ) + .unwrap()), + (&Method::POST, "/predict_edits/v2") => { + let mut request_body = String::new(); + req.into_body().read_to_string(&mut request_body).await?; + *captured_request.lock() = + Some(serde_json::from_str(&request_body).unwrap()); + next_request_id += 1; + Ok(http_client::Response::builder() + .status(200) + .body( + serde_json::to_string(&PredictEditsResponse { + request_id: format!("request-{next_request_id}"), + output_excerpt: completion_response.lock().clone(), + }) + .unwrap() + .into(), + ) + .unwrap()) + } + _ => Ok(http_client::Response::builder() + .status(404) + .body("Not Found".into()) + .unwrap()), + } + } + } + }); + + let client = cx.update(|cx| Client::new(Arc::new(FakeSystemClock::new()), http_client, cx)); + cx.update(|cx| { + RefreshLlmTokenListener::register(client.clone(), cx); + }); + let _server = FakeServer::for_client(42, &client, cx).await; + + let zeta = cx.new(|cx| { + let mut zeta = Zeta::new(client, project.read(cx).user_store(), cx); + zeta.set_edit_prediction_model(ZetaEditPredictionModel::Zeta1); + + let worktrees = project.read(cx).worktrees(cx).collect::>(); + for worktree in worktrees { + let worktree_id = worktree.read(cx).id(); + zeta.get_or_init_zeta_project(project, cx) + .license_detection_watchers + .entry(worktree_id) + .or_insert_with(|| Rc::new(LicenseDetectionWatcher::new(&worktree, cx))); + } + + zeta + }); + + (zeta, captured_request, completion_response) +} + +fn to_completion_edits( + iterator: impl IntoIterator, Arc)>, + buffer: &Entity, + cx: &App, +) -> Vec<(Range, Arc)> { + let buffer = buffer.read(cx); + iterator + .into_iter() + .map(|(range, text)| { + ( + buffer.anchor_after(range.start)..buffer.anchor_before(range.end), + text, + ) + }) + .collect() +} + +fn from_completion_edits( + editor_edits: &[(Range, Arc)], + buffer: &Entity, + cx: &App, +) -> Vec<(Range, Arc)> { + let buffer = buffer.read(cx); + editor_edits + .iter() + .map(|(range, text)| { + ( + range.start.to_offset(buffer)..range.end.to_offset(buffer), + text.clone(), + ) + }) + .collect() +} + +#[ctor::ctor] +fn init_logger() { + zlog::init_test(); +} diff --git a/crates/zeta2/Cargo.toml b/crates/zeta2/Cargo.toml deleted file mode 100644 index 0b20f980feaa6c2e86b0d3a6b88150d27d06fab2..0000000000000000000000000000000000000000 --- a/crates/zeta2/Cargo.toml +++ /dev/null @@ -1,61 +0,0 @@ -[package] -name = "zeta2" -version = "0.1.0" -edition.workspace = true -publish.workspace = true -license = "GPL-3.0-or-later" - -[lints] -workspace = true - -[lib] -path = "src/zeta2.rs" - -[features] -eval-support = [] - -[dependencies] -anyhow.workspace = true -arrayvec.workspace = true -brotli.workspace = true -chrono.workspace = true -client.workspace = true -cloud_llm_client.workspace = true -cloud_zeta2_prompt.workspace = true -collections.workspace = true -edit_prediction.workspace = true -edit_prediction_context.workspace = true -feature_flags.workspace = true -futures.workspace = true -gpui.workspace = true -indoc.workspace = true -language.workspace = true -language_model.workspace = true -log.workspace = true -lsp.workspace = true -open_ai.workspace = true -pretty_assertions.workspace = true -project.workspace = true -release_channel.workspace = true -semver.workspace = true -serde.workspace = true -serde_json.workspace = true -smol.workspace = true -strsim.workspace = true -thiserror.workspace = true -util.workspace = true -uuid.workspace = true -workspace.workspace = true -worktree.workspace = true - -[dev-dependencies] -clock = { workspace = true, features = ["test-support"] } -cloud_llm_client = { workspace = true, features = ["test-support"] } -gpui = { workspace = true, features = ["test-support"] } -lsp.workspace = true -indoc.workspace = true -language = { workspace = true, features = ["test-support"] } -language_model = { workspace = true, features = ["test-support"] } -project = { workspace = true, features = ["test-support"] } -settings = { workspace = true, features = ["test-support"] } -zlog.workspace = true diff --git a/crates/zeta2/LICENSE-GPL b/crates/zeta2/LICENSE-GPL deleted file mode 120000 index 89e542f750cd3860a0598eff0dc34b56d7336dc4..0000000000000000000000000000000000000000 --- a/crates/zeta2/LICENSE-GPL +++ /dev/null @@ -1 +0,0 @@ -../../LICENSE-GPL \ No newline at end of file diff --git a/crates/zeta2/src/zeta2.rs b/crates/zeta2/src/zeta2.rs deleted file mode 100644 index 255b294d7cc25fade197c3a50d39130bc6bb99c5..0000000000000000000000000000000000000000 --- a/crates/zeta2/src/zeta2.rs +++ /dev/null @@ -1,2968 +0,0 @@ -use anyhow::{Context as _, Result, anyhow, bail}; -use arrayvec::ArrayVec; -use chrono::TimeDelta; -use client::{Client, EditPredictionUsage, UserStore}; -use cloud_llm_client::predict_edits_v3::{self, PromptFormat, Signature}; -use cloud_llm_client::{ - AcceptEditPredictionBody, EXPIRED_LLM_TOKEN_HEADER_NAME, MINIMUM_REQUIRED_VERSION_HEADER_NAME, - ZED_VERSION_HEADER_NAME, -}; -use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery}; -use cloud_zeta2_prompt::{CURSOR_MARKER, DEFAULT_MAX_PROMPT_BYTES}; -use collections::HashMap; -use edit_prediction_context::{ - DeclarationId, DeclarationStyle, EditPredictionContext, EditPredictionContextOptions, - EditPredictionExcerpt, EditPredictionExcerptOptions, EditPredictionScoreOptions, Line, - SyntaxIndex, SyntaxIndexState, -}; -use feature_flags::{FeatureFlag, FeatureFlagAppExt as _}; -use futures::AsyncReadExt as _; -use futures::channel::{mpsc, oneshot}; -use gpui::http_client::{AsyncBody, Method}; -use gpui::{ - App, AsyncApp, Entity, EntityId, Global, SharedString, Subscription, Task, WeakEntity, - http_client, prelude::*, -}; -use language::{Anchor, Buffer, DiagnosticSet, LanguageServerId, Point, ToOffset as _, ToPoint}; -use language::{BufferSnapshot, OffsetRangeExt}; -use language_model::{LlmApiToken, RefreshLlmTokenListener}; -use lsp::DiagnosticSeverity; -use open_ai::FunctionDefinition; -use project::{Project, ProjectPath}; -use release_channel::AppVersion; -use semver::Version; -use serde::de::DeserializeOwned; -use std::collections::{VecDeque, hash_map}; - -use std::fmt::Write; -use std::ops::Range; -use std::path::Path; -use std::str::FromStr; -use std::sync::{Arc, LazyLock}; -use std::time::{Duration, Instant}; -use std::{env, mem}; -use thiserror::Error; -use util::rel_path::RelPathBuf; -use util::{LogErrorFuture, RangeExt as _, ResultExt as _, TryFutureExt}; -use workspace::notifications::{ErrorMessagePrompt, NotificationId, show_app_notification}; - -pub mod assemble_excerpts; -mod prediction; -mod provider; -pub mod retrieval_search; -mod sweep_ai; -pub mod udiff; -mod xml_edits; - -use crate::assemble_excerpts::assemble_excerpts; -pub use crate::prediction::EditPrediction; -pub use crate::prediction::EditPredictionId; -pub use provider::ZetaEditPredictionProvider; - -/// Maximum number of events to track. -const EVENT_COUNT_MAX_SWEEP: usize = 6; -const EVENT_COUNT_MAX_ZETA: usize = 16; -const CHANGE_GROUPING_LINE_SPAN: u32 = 8; - -pub struct SweepFeatureFlag; - -impl FeatureFlag for SweepFeatureFlag { - const NAME: &str = "sweep-ai"; -} -pub const DEFAULT_EXCERPT_OPTIONS: EditPredictionExcerptOptions = EditPredictionExcerptOptions { - max_bytes: 512, - min_bytes: 128, - target_before_cursor_over_total_bytes: 0.5, -}; - -pub const DEFAULT_CONTEXT_OPTIONS: ContextMode = - ContextMode::Agentic(DEFAULT_AGENTIC_CONTEXT_OPTIONS); - -pub const DEFAULT_AGENTIC_CONTEXT_OPTIONS: AgenticContextOptions = AgenticContextOptions { - excerpt: DEFAULT_EXCERPT_OPTIONS, -}; - -pub const DEFAULT_SYNTAX_CONTEXT_OPTIONS: EditPredictionContextOptions = - EditPredictionContextOptions { - use_imports: true, - max_retrieved_declarations: 0, - excerpt: DEFAULT_EXCERPT_OPTIONS, - score: EditPredictionScoreOptions { - omit_excerpt_overlaps: true, - }, - }; - -pub const DEFAULT_OPTIONS: ZetaOptions = ZetaOptions { - context: DEFAULT_CONTEXT_OPTIONS, - max_prompt_bytes: DEFAULT_MAX_PROMPT_BYTES, - max_diagnostic_bytes: 2048, - prompt_format: PromptFormat::DEFAULT, - file_indexing_parallelism: 1, - buffer_change_grouping_interval: Duration::from_secs(1), -}; - -static USE_OLLAMA: LazyLock = - LazyLock::new(|| env::var("ZED_ZETA2_OLLAMA").is_ok_and(|var| !var.is_empty())); -static CONTEXT_RETRIEVAL_MODEL_ID: LazyLock = LazyLock::new(|| { - env::var("ZED_ZETA2_CONTEXT_MODEL").unwrap_or(if *USE_OLLAMA { - "qwen3-coder:30b".to_string() - } else { - "yqvev8r3".to_string() - }) -}); -static EDIT_PREDICTIONS_MODEL_ID: LazyLock = LazyLock::new(|| { - match env::var("ZED_ZETA2_MODEL").as_deref() { - Ok("zeta2-exp") => "4w5n28vw", // Fine-tuned model @ Baseten - Ok(model) => model, - Err(_) if *USE_OLLAMA => "qwen3-coder:30b", - Err(_) => "yqvev8r3", // Vanilla qwen3-coder @ Baseten - } - .to_string() -}); -static PREDICT_EDITS_URL: LazyLock> = LazyLock::new(|| { - env::var("ZED_PREDICT_EDITS_URL").ok().or_else(|| { - if *USE_OLLAMA { - Some("http://localhost:11434/v1/chat/completions".into()) - } else { - None - } - }) -}); - -pub struct Zeta2FeatureFlag; - -impl FeatureFlag for Zeta2FeatureFlag { - const NAME: &'static str = "zeta2"; - - fn enabled_for_staff() -> bool { - false - } -} - -#[derive(Clone)] -struct ZetaGlobal(Entity); - -impl Global for ZetaGlobal {} - -pub struct Zeta { - client: Arc, - user_store: Entity, - llm_token: LlmApiToken, - _llm_token_subscription: Subscription, - projects: HashMap, - options: ZetaOptions, - update_required: bool, - debug_tx: Option>, - #[cfg(feature = "eval-support")] - eval_cache: Option>, - edit_prediction_model: ZetaEditPredictionModel, - sweep_api_token: Option, - sweep_ai_debug_info: Arc, -} - -#[derive(PartialEq, Eq)] -pub enum ZetaEditPredictionModel { - ZedCloud, - Sweep, -} - -#[derive(Debug, Clone, PartialEq)] -pub struct ZetaOptions { - pub context: ContextMode, - pub max_prompt_bytes: usize, - pub max_diagnostic_bytes: usize, - pub prompt_format: predict_edits_v3::PromptFormat, - pub file_indexing_parallelism: usize, - pub buffer_change_grouping_interval: Duration, -} - -#[derive(Debug, Clone, PartialEq)] -pub enum ContextMode { - Agentic(AgenticContextOptions), - Syntax(EditPredictionContextOptions), -} - -#[derive(Debug, Clone, PartialEq)] -pub struct AgenticContextOptions { - pub excerpt: EditPredictionExcerptOptions, -} - -impl ContextMode { - pub fn excerpt(&self) -> &EditPredictionExcerptOptions { - match self { - ContextMode::Agentic(options) => &options.excerpt, - ContextMode::Syntax(options) => &options.excerpt, - } - } -} - -#[derive(Debug)] -pub enum ZetaDebugInfo { - ContextRetrievalStarted(ZetaContextRetrievalStartedDebugInfo), - SearchQueriesGenerated(ZetaSearchQueryDebugInfo), - SearchQueriesExecuted(ZetaContextRetrievalDebugInfo), - ContextRetrievalFinished(ZetaContextRetrievalDebugInfo), - EditPredictionRequested(ZetaEditPredictionDebugInfo), -} - -#[derive(Debug)] -pub struct ZetaContextRetrievalStartedDebugInfo { - pub project: Entity, - pub timestamp: Instant, - pub search_prompt: String, -} - -#[derive(Debug)] -pub struct ZetaContextRetrievalDebugInfo { - pub project: Entity, - pub timestamp: Instant, -} - -#[derive(Debug)] -pub struct ZetaEditPredictionDebugInfo { - pub request: predict_edits_v3::PredictEditsRequest, - pub retrieval_time: TimeDelta, - pub buffer: WeakEntity, - pub position: language::Anchor, - pub local_prompt: Result, - pub response_rx: oneshot::Receiver<(Result, TimeDelta)>, -} - -#[derive(Debug)] -pub struct ZetaSearchQueryDebugInfo { - pub project: Entity, - pub timestamp: Instant, - pub search_queries: Vec, -} - -pub type RequestDebugInfo = predict_edits_v3::DebugInfo; - -struct ZetaProject { - syntax_index: Option>, - events: VecDeque, - recent_paths: VecDeque, - registered_buffers: HashMap, - current_prediction: Option, - next_pending_prediction_id: usize, - pending_predictions: ArrayVec, - last_prediction_refresh: Option<(EntityId, Instant)>, - context: Option, Vec>>>, - refresh_context_task: Option>>>, - refresh_context_debounce_task: Option>>, - refresh_context_timestamp: Option, - _subscription: gpui::Subscription, -} - -#[derive(Debug, Clone)] -struct CurrentEditPrediction { - pub requested_by: PredictionRequestedBy, - pub prediction: EditPrediction, -} - -impl CurrentEditPrediction { - fn should_replace_prediction(&self, old_prediction: &Self, cx: &App) -> bool { - let Some(new_edits) = self - .prediction - .interpolate(&self.prediction.buffer.read(cx)) - else { - return false; - }; - - if self.prediction.buffer != old_prediction.prediction.buffer { - return true; - } - - let Some(old_edits) = old_prediction - .prediction - .interpolate(&old_prediction.prediction.buffer.read(cx)) - else { - return true; - }; - - let requested_by_buffer_id = self.requested_by.buffer_id(); - - // This reduces the occurrence of UI thrash from replacing edits - // - // TODO: This is fairly arbitrary - should have a more general heuristic that handles multiple edits. - if requested_by_buffer_id == Some(self.prediction.buffer.entity_id()) - && requested_by_buffer_id == Some(old_prediction.prediction.buffer.entity_id()) - && old_edits.len() == 1 - && new_edits.len() == 1 - { - let (old_range, old_text) = &old_edits[0]; - let (new_range, new_text) = &new_edits[0]; - new_range == old_range && new_text.starts_with(old_text.as_ref()) - } else { - true - } - } -} - -#[derive(Debug, Clone)] -enum PredictionRequestedBy { - DiagnosticsUpdate, - Buffer(EntityId), -} - -impl PredictionRequestedBy { - pub fn buffer_id(&self) -> Option { - match self { - PredictionRequestedBy::DiagnosticsUpdate => None, - PredictionRequestedBy::Buffer(buffer_id) => Some(*buffer_id), - } - } -} - -struct PendingPrediction { - id: usize, - _task: Task<()>, -} - -/// A prediction from the perspective of a buffer. -#[derive(Debug)] -enum BufferEditPrediction<'a> { - Local { prediction: &'a EditPrediction }, - Jump { prediction: &'a EditPrediction }, -} - -struct RegisteredBuffer { - snapshot: BufferSnapshot, - _subscriptions: [gpui::Subscription; 2], -} - -#[derive(Clone)] -pub enum Event { - BufferChange { - old_snapshot: BufferSnapshot, - new_snapshot: BufferSnapshot, - end_edit_anchor: Option, - timestamp: Instant, - }, -} - -impl Event { - pub fn to_request_event(&self, cx: &App) -> Option { - match self { - Event::BufferChange { - old_snapshot, - new_snapshot, - .. - } => { - let path = new_snapshot.file().map(|f| f.full_path(cx)); - - let old_path = old_snapshot.file().and_then(|f| { - let old_path = f.full_path(cx); - if Some(&old_path) != path.as_ref() { - Some(old_path) - } else { - None - } - }); - - // TODO [zeta2] move to bg? - let diff = language::unified_diff(&old_snapshot.text(), &new_snapshot.text()); - - if path == old_path && diff.is_empty() { - None - } else { - Some(predict_edits_v3::Event::BufferChange { - old_path, - path, - diff, - //todo: Actually detect if this edit was predicted or not - predicted: false, - }) - } - } - } - } - - pub fn project_path(&self, cx: &App) -> Option { - match self { - Event::BufferChange { new_snapshot, .. } => new_snapshot - .file() - .map(|f| project::ProjectPath::from_file(f.as_ref(), cx)), - } - } -} - -impl Zeta { - pub fn try_global(cx: &App) -> Option> { - cx.try_global::().map(|global| global.0.clone()) - } - - pub fn global( - client: &Arc, - user_store: &Entity, - cx: &mut App, - ) -> Entity { - cx.try_global::() - .map(|global| global.0.clone()) - .unwrap_or_else(|| { - let zeta = cx.new(|cx| Self::new(client.clone(), user_store.clone(), cx)); - cx.set_global(ZetaGlobal(zeta.clone())); - zeta - }) - } - - pub fn new(client: Arc, user_store: Entity, cx: &mut Context) -> Self { - let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx); - - Self { - projects: HashMap::default(), - client, - user_store, - options: DEFAULT_OPTIONS, - llm_token: LlmApiToken::default(), - _llm_token_subscription: cx.subscribe( - &refresh_llm_token_listener, - |this, _listener, _event, cx| { - let client = this.client.clone(); - let llm_token = this.llm_token.clone(); - cx.spawn(async move |_this, _cx| { - llm_token.refresh(&client).await?; - anyhow::Ok(()) - }) - .detach_and_log_err(cx); - }, - ), - update_required: false, - debug_tx: None, - #[cfg(feature = "eval-support")] - eval_cache: None, - edit_prediction_model: ZetaEditPredictionModel::ZedCloud, - sweep_api_token: std::env::var("SWEEP_AI_TOKEN") - .context("No SWEEP_AI_TOKEN environment variable set") - .log_err(), - sweep_ai_debug_info: sweep_ai::debug_info(cx), - } - } - - pub fn set_edit_prediction_model(&mut self, model: ZetaEditPredictionModel) { - self.edit_prediction_model = model; - } - - pub fn has_sweep_api_token(&self) -> bool { - self.sweep_api_token.is_some() - } - - #[cfg(feature = "eval-support")] - pub fn with_eval_cache(&mut self, cache: Arc) { - self.eval_cache = Some(cache); - } - - pub fn debug_info(&mut self) -> mpsc::UnboundedReceiver { - let (debug_watch_tx, debug_watch_rx) = mpsc::unbounded(); - self.debug_tx = Some(debug_watch_tx); - debug_watch_rx - } - - pub fn options(&self) -> &ZetaOptions { - &self.options - } - - pub fn set_options(&mut self, options: ZetaOptions) { - self.options = options; - } - - pub fn clear_history(&mut self) { - for zeta_project in self.projects.values_mut() { - zeta_project.events.clear(); - } - } - - pub fn history_for_project( - &self, - project: &Entity, - ) -> impl DoubleEndedIterator { - self.projects - .get(&project.entity_id()) - .map(|project| project.events.iter()) - .into_iter() - .flatten() - } - - pub fn context_for_project( - &self, - project: &Entity, - ) -> impl Iterator, &[Range])> { - self.projects - .get(&project.entity_id()) - .and_then(|project| { - Some( - project - .context - .as_ref()? - .iter() - .map(|(buffer, ranges)| (buffer.clone(), ranges.as_slice())), - ) - }) - .into_iter() - .flatten() - } - - pub fn usage(&self, cx: &App) -> Option { - if self.edit_prediction_model == ZetaEditPredictionModel::ZedCloud { - self.user_store.read(cx).edit_prediction_usage() - } else { - None - } - } - - pub fn register_project(&mut self, project: &Entity, cx: &mut Context) { - self.get_or_init_zeta_project(project, cx); - } - - pub fn register_buffer( - &mut self, - buffer: &Entity, - project: &Entity, - cx: &mut Context, - ) { - let zeta_project = self.get_or_init_zeta_project(project, cx); - Self::register_buffer_impl(zeta_project, buffer, project, cx); - } - - fn get_or_init_zeta_project( - &mut self, - project: &Entity, - cx: &mut Context, - ) -> &mut ZetaProject { - self.projects - .entry(project.entity_id()) - .or_insert_with(|| ZetaProject { - syntax_index: if let ContextMode::Syntax(_) = &self.options.context { - Some(cx.new(|cx| { - SyntaxIndex::new(project, self.options.file_indexing_parallelism, cx) - })) - } else { - None - }, - events: VecDeque::new(), - recent_paths: VecDeque::new(), - registered_buffers: HashMap::default(), - current_prediction: None, - pending_predictions: ArrayVec::new(), - next_pending_prediction_id: 0, - last_prediction_refresh: None, - context: None, - refresh_context_task: None, - refresh_context_debounce_task: None, - refresh_context_timestamp: None, - _subscription: cx.subscribe(&project, Self::handle_project_event), - }) - } - - fn handle_project_event( - &mut self, - project: Entity, - event: &project::Event, - cx: &mut Context, - ) { - // TODO [zeta2] init with recent paths - match event { - project::Event::ActiveEntryChanged(Some(active_entry_id)) => { - let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) else { - return; - }; - let path = project.read(cx).path_for_entry(*active_entry_id, cx); - if let Some(path) = path { - if let Some(ix) = zeta_project - .recent_paths - .iter() - .position(|probe| probe == &path) - { - zeta_project.recent_paths.remove(ix); - } - zeta_project.recent_paths.push_front(path); - } - } - project::Event::DiagnosticsUpdated { .. } => { - self.refresh_prediction_from_diagnostics(project, cx); - } - _ => (), - } - } - - fn register_buffer_impl<'a>( - zeta_project: &'a mut ZetaProject, - buffer: &Entity, - project: &Entity, - cx: &mut Context, - ) -> &'a mut RegisteredBuffer { - let buffer_id = buffer.entity_id(); - match zeta_project.registered_buffers.entry(buffer_id) { - hash_map::Entry::Occupied(entry) => entry.into_mut(), - hash_map::Entry::Vacant(entry) => { - let snapshot = buffer.read(cx).snapshot(); - let project_entity_id = project.entity_id(); - entry.insert(RegisteredBuffer { - snapshot, - _subscriptions: [ - cx.subscribe(buffer, { - let project = project.downgrade(); - move |this, buffer, event, cx| { - if let language::BufferEvent::Edited = event - && let Some(project) = project.upgrade() - { - this.report_changes_for_buffer(&buffer, &project, cx); - } - } - }), - cx.observe_release(buffer, move |this, _buffer, _cx| { - let Some(zeta_project) = this.projects.get_mut(&project_entity_id) - else { - return; - }; - zeta_project.registered_buffers.remove(&buffer_id); - }), - ], - }) - } - } - } - - fn report_changes_for_buffer( - &mut self, - buffer: &Entity, - project: &Entity, - cx: &mut Context, - ) { - let event_count_max = match self.edit_prediction_model { - ZetaEditPredictionModel::ZedCloud => EVENT_COUNT_MAX_ZETA, - ZetaEditPredictionModel::Sweep => EVENT_COUNT_MAX_SWEEP, - }; - - let sweep_ai_project = self.get_or_init_zeta_project(project, cx); - let registered_buffer = Self::register_buffer_impl(sweep_ai_project, buffer, project, cx); - - let new_snapshot = buffer.read(cx).snapshot(); - if new_snapshot.version == registered_buffer.snapshot.version { - return; - } - - let old_snapshot = mem::replace(&mut registered_buffer.snapshot, new_snapshot.clone()); - let end_edit_anchor = new_snapshot - .anchored_edits_since::(&old_snapshot.version) - .last() - .map(|(_, range)| range.end); - let events = &mut sweep_ai_project.events; - - if let Some(Event::BufferChange { - new_snapshot: last_new_snapshot, - end_edit_anchor: last_end_edit_anchor, - .. - }) = events.back_mut() - { - let is_next_snapshot_of_same_buffer = old_snapshot.remote_id() - == last_new_snapshot.remote_id() - && old_snapshot.version == last_new_snapshot.version; - - let should_coalesce = is_next_snapshot_of_same_buffer - && end_edit_anchor - .as_ref() - .zip(last_end_edit_anchor.as_ref()) - .is_some_and(|(a, b)| { - let a = a.to_point(&new_snapshot); - let b = b.to_point(&new_snapshot); - a.row.abs_diff(b.row) <= CHANGE_GROUPING_LINE_SPAN - }); - - if should_coalesce { - *last_end_edit_anchor = end_edit_anchor; - *last_new_snapshot = new_snapshot; - return; - } - } - - if events.len() >= event_count_max { - events.pop_front(); - } - - events.push_back(Event::BufferChange { - old_snapshot, - new_snapshot, - end_edit_anchor, - timestamp: Instant::now(), - }); - } - - fn current_prediction_for_buffer( - &self, - buffer: &Entity, - project: &Entity, - cx: &App, - ) -> Option> { - let project_state = self.projects.get(&project.entity_id())?; - - let CurrentEditPrediction { - requested_by, - prediction, - } = project_state.current_prediction.as_ref()?; - - if prediction.targets_buffer(buffer.read(cx)) { - Some(BufferEditPrediction::Local { prediction }) - } else { - let show_jump = match requested_by { - PredictionRequestedBy::Buffer(requested_by_buffer_id) => { - requested_by_buffer_id == &buffer.entity_id() - } - PredictionRequestedBy::DiagnosticsUpdate => true, - }; - - if show_jump { - Some(BufferEditPrediction::Jump { prediction }) - } else { - None - } - } - } - - fn accept_current_prediction(&mut self, project: &Entity, cx: &mut Context) { - if self.edit_prediction_model != ZetaEditPredictionModel::ZedCloud { - return; - } - - let Some(project_state) = self.projects.get_mut(&project.entity_id()) else { - return; - }; - - let Some(prediction) = project_state.current_prediction.take() else { - return; - }; - let request_id = prediction.prediction.id.to_string(); - project_state.pending_predictions.clear(); - - let client = self.client.clone(); - let llm_token = self.llm_token.clone(); - let app_version = AppVersion::global(cx); - cx.spawn(async move |this, cx| { - let url = if let Ok(predict_edits_url) = env::var("ZED_ACCEPT_PREDICTION_URL") { - http_client::Url::parse(&predict_edits_url)? - } else { - client - .http_client() - .build_zed_llm_url("/predict_edits/accept", &[])? - }; - - let response = cx - .background_spawn(Self::send_api_request::<()>( - move |builder| { - let req = builder.uri(url.as_ref()).body( - serde_json::to_string(&AcceptEditPredictionBody { - request_id: request_id.clone(), - })? - .into(), - ); - Ok(req?) - }, - client, - llm_token, - app_version, - )) - .await; - - Self::handle_api_response(&this, response, cx)?; - anyhow::Ok(()) - }) - .detach_and_log_err(cx); - } - - fn discard_current_prediction(&mut self, project: &Entity) { - if let Some(project_state) = self.projects.get_mut(&project.entity_id()) { - project_state.current_prediction.take(); - project_state.pending_predictions.clear(); - }; - } - - fn is_refreshing(&self, project: &Entity) -> bool { - self.projects - .get(&project.entity_id()) - .is_some_and(|project_state| !project_state.pending_predictions.is_empty()) - } - - pub fn refresh_prediction_from_buffer( - &mut self, - project: Entity, - buffer: Entity, - position: language::Anchor, - cx: &mut Context, - ) { - self.queue_prediction_refresh(project.clone(), buffer.entity_id(), cx, move |this, cx| { - let Some(request_task) = this - .update(cx, |this, cx| { - this.request_prediction(&project, &buffer, position, cx) - }) - .log_err() - else { - return Task::ready(anyhow::Ok(())); - }; - - let project = project.clone(); - cx.spawn(async move |cx| { - if let Some(prediction) = request_task.await? { - this.update(cx, |this, cx| { - let project_state = this - .projects - .get_mut(&project.entity_id()) - .context("Project not found")?; - - let new_prediction = CurrentEditPrediction { - requested_by: PredictionRequestedBy::Buffer(buffer.entity_id()), - prediction: prediction, - }; - - if project_state - .current_prediction - .as_ref() - .is_none_or(|old_prediction| { - new_prediction.should_replace_prediction(&old_prediction, cx) - }) - { - project_state.current_prediction = Some(new_prediction); - cx.notify(); - } - anyhow::Ok(()) - })??; - } - Ok(()) - }) - }) - } - - pub fn refresh_prediction_from_diagnostics( - &mut self, - project: Entity, - cx: &mut Context, - ) { - let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) else { - return; - }; - - // Prefer predictions from buffer - if zeta_project.current_prediction.is_some() { - return; - }; - - self.queue_prediction_refresh(project.clone(), project.entity_id(), cx, move |this, cx| { - let Some(open_buffer_task) = project - .update(cx, |project, cx| { - project - .active_entry() - .and_then(|entry| project.path_for_entry(entry, cx)) - .map(|path| project.open_buffer(path, cx)) - }) - .log_err() - .flatten() - else { - return Task::ready(anyhow::Ok(())); - }; - - cx.spawn(async move |cx| { - let active_buffer = open_buffer_task.await?; - let snapshot = active_buffer.read_with(cx, |buffer, _cx| buffer.snapshot())?; - - let Some((jump_buffer, jump_position)) = Self::next_diagnostic_location( - active_buffer, - &snapshot, - Default::default(), - Default::default(), - &project, - cx, - ) - .await? - else { - return anyhow::Ok(()); - }; - - let Some(prediction) = this - .update(cx, |this, cx| { - this.request_prediction(&project, &jump_buffer, jump_position, cx) - })? - .await? - else { - return anyhow::Ok(()); - }; - - this.update(cx, |this, cx| { - if let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) { - zeta_project.current_prediction.get_or_insert_with(|| { - cx.notify(); - CurrentEditPrediction { - requested_by: PredictionRequestedBy::DiagnosticsUpdate, - prediction, - } - }); - } - })?; - - anyhow::Ok(()) - }) - }); - } - - #[cfg(not(test))] - pub const THROTTLE_TIMEOUT: Duration = Duration::from_millis(300); - #[cfg(test)] - pub const THROTTLE_TIMEOUT: Duration = Duration::ZERO; - - fn queue_prediction_refresh( - &mut self, - project: Entity, - throttle_entity: EntityId, - cx: &mut Context, - do_refresh: impl FnOnce(WeakEntity, &mut AsyncApp) -> Task> + 'static, - ) { - let zeta_project = self.get_or_init_zeta_project(&project, cx); - let pending_prediction_id = zeta_project.next_pending_prediction_id; - zeta_project.next_pending_prediction_id += 1; - let last_request = zeta_project.last_prediction_refresh; - - // TODO report cancelled requests like in zeta1 - let task = cx.spawn(async move |this, cx| { - if let Some((last_entity, last_timestamp)) = last_request - && throttle_entity == last_entity - && let Some(timeout) = - (last_timestamp + Self::THROTTLE_TIMEOUT).checked_duration_since(Instant::now()) - { - cx.background_executor().timer(timeout).await; - } - - do_refresh(this.clone(), cx).await.log_err(); - - this.update(cx, |this, cx| { - let zeta_project = this.get_or_init_zeta_project(&project, cx); - - if zeta_project.pending_predictions[0].id == pending_prediction_id { - zeta_project.pending_predictions.remove(0); - } else { - zeta_project.pending_predictions.clear(); - } - - cx.notify(); - }) - .ok(); - }); - - if zeta_project.pending_predictions.len() <= 1 { - zeta_project.pending_predictions.push(PendingPrediction { - id: pending_prediction_id, - _task: task, - }); - } else if zeta_project.pending_predictions.len() == 2 { - zeta_project.pending_predictions.pop(); - zeta_project.pending_predictions.push(PendingPrediction { - id: pending_prediction_id, - _task: task, - }); - } - } - - pub fn request_prediction( - &mut self, - project: &Entity, - active_buffer: &Entity, - position: language::Anchor, - cx: &mut Context, - ) -> Task>> { - match self.edit_prediction_model { - ZetaEditPredictionModel::ZedCloud => { - self.request_prediction_with_zed_cloud(project, active_buffer, position, cx) - } - ZetaEditPredictionModel::Sweep => { - self.request_prediction_with_sweep(project, active_buffer, position, true, cx) - } - } - } - - fn request_prediction_with_sweep( - &mut self, - project: &Entity, - active_buffer: &Entity, - position: language::Anchor, - allow_jump: bool, - cx: &mut Context, - ) -> Task>> { - let snapshot = active_buffer.read(cx).snapshot(); - let debug_info = self.sweep_ai_debug_info.clone(); - let Some(api_token) = self.sweep_api_token.clone() else { - return Task::ready(Ok(None)); - }; - let full_path: Arc = snapshot - .file() - .map(|file| file.full_path(cx)) - .unwrap_or_else(|| "untitled".into()) - .into(); - - let project_file = project::File::from_dyn(snapshot.file()); - let repo_name = project_file - .map(|file| file.worktree.read(cx).root_name_str()) - .unwrap_or("untitled") - .into(); - let offset = position.to_offset(&snapshot); - - let project_state = self.get_or_init_zeta_project(project, cx); - let events = project_state.events.clone(); - let has_events = !events.is_empty(); - let recent_buffers = project_state.recent_paths.iter().cloned(); - let http_client = cx.http_client(); - - let recent_buffer_snapshots = recent_buffers - .filter_map(|project_path| { - let buffer = project.read(cx).get_open_buffer(&project_path, cx)?; - if active_buffer == &buffer { - None - } else { - Some(buffer.read(cx).snapshot()) - } - }) - .take(3) - .collect::>(); - - const DIAGNOSTIC_LINES_RANGE: u32 = 20; - - let cursor_point = position.to_point(&snapshot); - let diagnostic_search_start = cursor_point.row.saturating_sub(DIAGNOSTIC_LINES_RANGE); - let diagnostic_search_end = cursor_point.row + DIAGNOSTIC_LINES_RANGE; - let diagnostic_search_range = - Point::new(diagnostic_search_start, 0)..Point::new(diagnostic_search_end, 0); - - let result = cx.background_spawn({ - let snapshot = snapshot.clone(); - let diagnostic_search_range = diagnostic_search_range.clone(); - async move { - let text = snapshot.text(); - - let mut recent_changes = String::new(); - for event in events { - sweep_ai::write_event(event, &mut recent_changes).unwrap(); - } - - let mut file_chunks = recent_buffer_snapshots - .into_iter() - .map(|snapshot| { - let end_point = Point::new(30, 0).min(snapshot.max_point()); - sweep_ai::FileChunk { - content: snapshot.text_for_range(Point::zero()..end_point).collect(), - file_path: snapshot - .file() - .map(|f| f.path().as_unix_str()) - .unwrap_or("untitled") - .to_string(), - start_line: 0, - end_line: end_point.row as usize, - timestamp: snapshot.file().and_then(|file| { - Some( - file.disk_state() - .mtime()? - .to_seconds_and_nanos_for_persistence()? - .0, - ) - }), - } - }) - .collect::>(); - - let diagnostic_entries = - snapshot.diagnostics_in_range(diagnostic_search_range, false); - let mut diagnostic_content = String::new(); - let mut diagnostic_count = 0; - - for entry in diagnostic_entries { - let start_point: Point = entry.range.start; - - let severity = match entry.diagnostic.severity { - DiagnosticSeverity::ERROR => "error", - DiagnosticSeverity::WARNING => "warning", - DiagnosticSeverity::INFORMATION => "info", - DiagnosticSeverity::HINT => "hint", - _ => continue, - }; - - diagnostic_count += 1; - - writeln!( - &mut diagnostic_content, - "{} at line {}: {}", - severity, - start_point.row + 1, - entry.diagnostic.message - )?; - } - - if !diagnostic_content.is_empty() { - file_chunks.push(sweep_ai::FileChunk { - file_path: format!("Diagnostics for {}", full_path.display()), - start_line: 0, - end_line: diagnostic_count, - content: diagnostic_content, - timestamp: None, - }); - } - - let request_body = sweep_ai::AutocompleteRequest { - debug_info, - repo_name, - file_path: full_path.clone(), - file_contents: text.clone(), - original_file_contents: text, - cursor_position: offset, - recent_changes: recent_changes.clone(), - changes_above_cursor: true, - multiple_suggestions: false, - branch: None, - file_chunks, - retrieval_chunks: vec![], - recent_user_actions: vec![], - // TODO - privacy_mode_enabled: false, - }; - - let mut buf: Vec = Vec::new(); - let writer = brotli::CompressorWriter::new(&mut buf, 4096, 11, 22); - serde_json::to_writer(writer, &request_body)?; - let body: AsyncBody = buf.into(); - - const SWEEP_API_URL: &str = - "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; - - let request = http_client::Request::builder() - .uri(SWEEP_API_URL) - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {}", api_token)) - .header("Connection", "keep-alive") - .header("Content-Encoding", "br") - .method(Method::POST) - .body(body)?; - - let mut response = http_client.send(request).await?; - - let mut body: Vec = Vec::new(); - response.body_mut().read_to_end(&mut body).await?; - - if !response.status().is_success() { - anyhow::bail!( - "Request failed with status: {:?}\nBody: {}", - response.status(), - String::from_utf8_lossy(&body), - ); - }; - - let response: sweep_ai::AutocompleteResponse = serde_json::from_slice(&body)?; - - let old_text = snapshot - .text_for_range(response.start_index..response.end_index) - .collect::(); - let edits = language::text_diff(&old_text, &response.completion) - .into_iter() - .map(|(range, text)| { - ( - snapshot.anchor_after(response.start_index + range.start) - ..snapshot.anchor_before(response.start_index + range.end), - text, - ) - }) - .collect::>(); - - anyhow::Ok((response.autocomplete_id, edits, snapshot)) - } - }); - - let buffer = active_buffer.clone(); - let project = project.clone(); - let active_buffer = active_buffer.clone(); - - cx.spawn(async move |this, cx| { - let (id, edits, old_snapshot) = result.await?; - - if edits.is_empty() { - if has_events - && allow_jump - && let Some((jump_buffer, jump_position)) = Self::next_diagnostic_location( - active_buffer, - &snapshot, - diagnostic_search_range, - cursor_point, - &project, - cx, - ) - .await? - { - return this - .update(cx, |this, cx| { - this.request_prediction_with_sweep( - &project, - &jump_buffer, - jump_position, - false, - cx, - ) - })? - .await; - } - - return anyhow::Ok(None); - } - - let Some((edits, new_snapshot, preview_task)) = - buffer.read_with(cx, |buffer, cx| { - let new_snapshot = buffer.snapshot(); - - let edits: Arc<[(Range, Arc)]> = - edit_prediction::interpolate_edits(&old_snapshot, &new_snapshot, &edits)? - .into(); - let preview_task = buffer.preview_edits(edits.clone(), cx); - - Some((edits, new_snapshot, preview_task)) - })? - else { - return anyhow::Ok(None); - }; - - let prediction = EditPrediction { - id: EditPredictionId(id.into()), - edits, - snapshot: new_snapshot, - edit_preview: preview_task.await, - buffer, - }; - - anyhow::Ok(Some(prediction)) - }) - } - - async fn next_diagnostic_location( - active_buffer: Entity, - active_buffer_snapshot: &BufferSnapshot, - active_buffer_diagnostic_search_range: Range, - active_buffer_cursor_point: Point, - project: &Entity, - cx: &mut AsyncApp, - ) -> Result, language::Anchor)>> { - // find the closest diagnostic to the cursor that wasn't close enough to be included in the last request - let mut jump_location = active_buffer_snapshot - .diagnostic_groups(None) - .into_iter() - .filter_map(|(_, group)| { - let range = &group.entries[group.primary_ix] - .range - .to_point(&active_buffer_snapshot); - if range.overlaps(&active_buffer_diagnostic_search_range) { - None - } else { - Some(range.start) - } - }) - .min_by_key(|probe| probe.row.abs_diff(active_buffer_cursor_point.row)) - .map(|position| { - ( - active_buffer.clone(), - active_buffer_snapshot.anchor_before(position), - ) - }); - - if jump_location.is_none() { - let active_buffer_path = active_buffer.read_with(cx, |buffer, cx| { - let file = buffer.file()?; - - Some(ProjectPath { - worktree_id: file.worktree_id(cx), - path: file.path().clone(), - }) - })?; - - let buffer_task = project.update(cx, |project, cx| { - let (path, _, _) = project - .diagnostic_summaries(false, cx) - .filter(|(path, _, _)| Some(path) != active_buffer_path.as_ref()) - .max_by_key(|(path, _, _)| { - // find the buffer with errors that shares most parent directories - path.path - .components() - .zip( - active_buffer_path - .as_ref() - .map(|p| p.path.components()) - .unwrap_or_default(), - ) - .take_while(|(a, b)| a == b) - .count() - })?; - - Some(project.open_buffer(path, cx)) - })?; - - if let Some(buffer_task) = buffer_task { - let closest_buffer = buffer_task.await?; - - jump_location = closest_buffer - .read_with(cx, |buffer, _cx| { - buffer - .buffer_diagnostics(None) - .into_iter() - .min_by_key(|entry| entry.diagnostic.severity) - .map(|entry| entry.range.start) - })? - .map(|position| (closest_buffer, position)); - } - } - - anyhow::Ok(jump_location) - } - - fn request_prediction_with_zed_cloud( - &mut self, - project: &Entity, - active_buffer: &Entity, - position: language::Anchor, - cx: &mut Context, - ) -> Task>> { - let project_state = self.projects.get(&project.entity_id()); - - let index_state = project_state.and_then(|state| { - state - .syntax_index - .as_ref() - .map(|syntax_index| syntax_index.read_with(cx, |index, _cx| index.state().clone())) - }); - let options = self.options.clone(); - let active_snapshot = active_buffer.read(cx).snapshot(); - let Some(excerpt_path) = active_snapshot - .file() - .map(|path| -> Arc { path.full_path(cx).into() }) - else { - return Task::ready(Err(anyhow!("No file path for excerpt"))); - }; - let client = self.client.clone(); - let llm_token = self.llm_token.clone(); - let app_version = AppVersion::global(cx); - let worktree_snapshots = project - .read(cx) - .worktrees(cx) - .map(|worktree| worktree.read(cx).snapshot()) - .collect::>(); - let debug_tx = self.debug_tx.clone(); - - let events = project_state - .map(|state| { - state - .events - .iter() - .filter_map(|event| event.to_request_event(cx)) - .collect::>() - }) - .unwrap_or_default(); - - let diagnostics = active_snapshot.diagnostic_sets().clone(); - - let parent_abs_path = - project::File::from_dyn(active_buffer.read(cx).file()).and_then(|f| { - let mut path = f.worktree.read(cx).absolutize(&f.path); - if path.pop() { Some(path) } else { None } - }); - - // TODO data collection - let can_collect_data = cx.is_staff(); - - let empty_context_files = HashMap::default(); - let context_files = project_state - .and_then(|project_state| project_state.context.as_ref()) - .unwrap_or(&empty_context_files); - - #[cfg(feature = "eval-support")] - let parsed_fut = futures::future::join_all( - context_files - .keys() - .map(|buffer| buffer.read(cx).parsing_idle()), - ); - - let mut included_files = context_files - .iter() - .filter_map(|(buffer_entity, ranges)| { - let buffer = buffer_entity.read(cx); - Some(( - buffer_entity.clone(), - buffer.snapshot(), - buffer.file()?.full_path(cx).into(), - ranges.clone(), - )) - }) - .collect::>(); - - included_files.sort_by(|(_, _, path_a, ranges_a), (_, _, path_b, ranges_b)| { - (path_a, ranges_a.len()).cmp(&(path_b, ranges_b.len())) - }); - - #[cfg(feature = "eval-support")] - let eval_cache = self.eval_cache.clone(); - - let request_task = cx.background_spawn({ - let active_buffer = active_buffer.clone(); - async move { - #[cfg(feature = "eval-support")] - parsed_fut.await; - - let index_state = if let Some(index_state) = index_state { - Some(index_state.lock_owned().await) - } else { - None - }; - - let cursor_offset = position.to_offset(&active_snapshot); - let cursor_point = cursor_offset.to_point(&active_snapshot); - - let before_retrieval = chrono::Utc::now(); - - let (diagnostic_groups, diagnostic_groups_truncated) = - Self::gather_nearby_diagnostics( - cursor_offset, - &diagnostics, - &active_snapshot, - options.max_diagnostic_bytes, - ); - - let cloud_request = match options.context { - ContextMode::Agentic(context_options) => { - let Some(excerpt) = EditPredictionExcerpt::select_from_buffer( - cursor_point, - &active_snapshot, - &context_options.excerpt, - index_state.as_deref(), - ) else { - return Ok((None, None)); - }; - - let excerpt_anchor_range = active_snapshot.anchor_after(excerpt.range.start) - ..active_snapshot.anchor_before(excerpt.range.end); - - if let Some(buffer_ix) = - included_files.iter().position(|(_, snapshot, _, _)| { - snapshot.remote_id() == active_snapshot.remote_id() - }) - { - let (_, buffer, _, ranges) = &mut included_files[buffer_ix]; - ranges.push(excerpt_anchor_range); - retrieval_search::merge_anchor_ranges(ranges, buffer); - let last_ix = included_files.len() - 1; - included_files.swap(buffer_ix, last_ix); - } else { - included_files.push(( - active_buffer.clone(), - active_snapshot.clone(), - excerpt_path.clone(), - vec![excerpt_anchor_range], - )); - } - - let included_files = included_files - .iter() - .map(|(_, snapshot, path, ranges)| { - let ranges = ranges - .iter() - .map(|range| { - let point_range = range.to_point(&snapshot); - Line(point_range.start.row)..Line(point_range.end.row) - }) - .collect::>(); - let excerpts = assemble_excerpts(&snapshot, ranges); - predict_edits_v3::IncludedFile { - path: path.clone(), - max_row: Line(snapshot.max_point().row), - excerpts, - } - }) - .collect::>(); - - predict_edits_v3::PredictEditsRequest { - excerpt_path, - excerpt: String::new(), - excerpt_line_range: Line(0)..Line(0), - excerpt_range: 0..0, - cursor_point: predict_edits_v3::Point { - line: predict_edits_v3::Line(cursor_point.row), - column: cursor_point.column, - }, - included_files, - referenced_declarations: vec![], - events, - can_collect_data, - diagnostic_groups, - diagnostic_groups_truncated, - debug_info: debug_tx.is_some(), - prompt_max_bytes: Some(options.max_prompt_bytes), - prompt_format: options.prompt_format, - // TODO [zeta2] - signatures: vec![], - excerpt_parent: None, - git_info: None, - } - } - ContextMode::Syntax(context_options) => { - let Some(context) = EditPredictionContext::gather_context( - cursor_point, - &active_snapshot, - parent_abs_path.as_deref(), - &context_options, - index_state.as_deref(), - ) else { - return Ok((None, None)); - }; - - make_syntax_context_cloud_request( - excerpt_path, - context, - events, - can_collect_data, - diagnostic_groups, - diagnostic_groups_truncated, - None, - debug_tx.is_some(), - &worktree_snapshots, - index_state.as_deref(), - Some(options.max_prompt_bytes), - options.prompt_format, - ) - } - }; - - let prompt_result = cloud_zeta2_prompt::build_prompt(&cloud_request); - - let retrieval_time = chrono::Utc::now() - before_retrieval; - - let debug_response_tx = if let Some(debug_tx) = &debug_tx { - let (response_tx, response_rx) = oneshot::channel(); - - debug_tx - .unbounded_send(ZetaDebugInfo::EditPredictionRequested( - ZetaEditPredictionDebugInfo { - request: cloud_request.clone(), - retrieval_time, - buffer: active_buffer.downgrade(), - local_prompt: match prompt_result.as_ref() { - Ok((prompt, _)) => Ok(prompt.clone()), - Err(err) => Err(err.to_string()), - }, - position, - response_rx, - }, - )) - .ok(); - Some(response_tx) - } else { - None - }; - - if cfg!(debug_assertions) && env::var("ZED_ZETA2_SKIP_REQUEST").is_ok() { - if let Some(debug_response_tx) = debug_response_tx { - debug_response_tx - .send((Err("Request skipped".to_string()), TimeDelta::zero())) - .ok(); - } - anyhow::bail!("Skipping request because ZED_ZETA2_SKIP_REQUEST is set") - } - - let (prompt, _) = prompt_result?; - let generation_params = - cloud_zeta2_prompt::generation_params(cloud_request.prompt_format); - let request = open_ai::Request { - model: EDIT_PREDICTIONS_MODEL_ID.clone(), - messages: vec![open_ai::RequestMessage::User { - content: open_ai::MessageContent::Plain(prompt), - }], - stream: false, - max_completion_tokens: None, - stop: generation_params.stop.unwrap_or_default(), - temperature: generation_params.temperature.unwrap_or(0.7), - tool_choice: None, - parallel_tool_calls: None, - tools: vec![], - prompt_cache_key: None, - reasoning_effort: None, - }; - - log::trace!("Sending edit prediction request"); - - let before_request = chrono::Utc::now(); - let response = Self::send_raw_llm_request( - request, - client, - llm_token, - app_version, - #[cfg(feature = "eval-support")] - eval_cache, - #[cfg(feature = "eval-support")] - EvalCacheEntryKind::Prediction, - ) - .await; - let request_time = chrono::Utc::now() - before_request; - - log::trace!("Got edit prediction response"); - - if let Some(debug_response_tx) = debug_response_tx { - debug_response_tx - .send(( - response - .as_ref() - .map_err(|err| err.to_string()) - .map(|response| response.0.clone()), - request_time, - )) - .ok(); - } - - let (res, usage) = response?; - let request_id = EditPredictionId(res.id.clone().into()); - let Some(mut output_text) = text_from_response(res) else { - return Ok((None, usage)); - }; - - if output_text.contains(CURSOR_MARKER) { - log::trace!("Stripping out {CURSOR_MARKER} from response"); - output_text = output_text.replace(CURSOR_MARKER, ""); - } - - let get_buffer_from_context = |path: &Path| { - included_files - .iter() - .find_map(|(_, buffer, probe_path, ranges)| { - if probe_path.as_ref() == path { - Some((buffer, ranges.as_slice())) - } else { - None - } - }) - }; - - let (edited_buffer_snapshot, edits) = match options.prompt_format { - PromptFormat::NumLinesUniDiff => { - // TODO: Implement parsing of multi-file diffs - crate::udiff::parse_diff(&output_text, get_buffer_from_context).await? - } - PromptFormat::Minimal - | PromptFormat::MinimalQwen - | PromptFormat::SeedCoder1120 => { - if output_text.contains("--- a/\n+++ b/\nNo edits") { - let edits = vec![]; - (&active_snapshot, edits) - } else { - crate::udiff::parse_diff(&output_text, get_buffer_from_context).await? - } - } - PromptFormat::OldTextNewText => { - crate::xml_edits::parse_xml_edits(&output_text, get_buffer_from_context) - .await? - } - _ => { - bail!("unsupported prompt format {}", options.prompt_format) - } - }; - - let edited_buffer = included_files - .iter() - .find_map(|(buffer, snapshot, _, _)| { - if snapshot.remote_id() == edited_buffer_snapshot.remote_id() { - Some(buffer.clone()) - } else { - None - } - }) - .context("Failed to find buffer in included_buffers")?; - - anyhow::Ok(( - Some(( - request_id, - edited_buffer, - edited_buffer_snapshot.clone(), - edits, - )), - usage, - )) - } - }); - - cx.spawn({ - async move |this, cx| { - let Some((id, edited_buffer, edited_buffer_snapshot, edits)) = - Self::handle_api_response(&this, request_task.await, cx)? - else { - return Ok(None); - }; - - // TODO telemetry: duration, etc - Ok( - EditPrediction::new(id, &edited_buffer, &edited_buffer_snapshot, edits, cx) - .await, - ) - } - }) - } - - async fn send_raw_llm_request( - request: open_ai::Request, - client: Arc, - llm_token: LlmApiToken, - app_version: Version, - #[cfg(feature = "eval-support")] eval_cache: Option>, - #[cfg(feature = "eval-support")] eval_cache_kind: EvalCacheEntryKind, - ) -> Result<(open_ai::Response, Option)> { - let url = if let Some(predict_edits_url) = PREDICT_EDITS_URL.as_ref() { - http_client::Url::parse(&predict_edits_url)? - } else { - client - .http_client() - .build_zed_llm_url("/predict_edits/raw", &[])? - }; - - #[cfg(feature = "eval-support")] - let cache_key = if let Some(cache) = eval_cache { - use collections::FxHasher; - use std::hash::{Hash, Hasher}; - - let mut hasher = FxHasher::default(); - url.hash(&mut hasher); - let request_str = serde_json::to_string_pretty(&request)?; - request_str.hash(&mut hasher); - let hash = hasher.finish(); - - let key = (eval_cache_kind, hash); - if let Some(response_str) = cache.read(key) { - return Ok((serde_json::from_str(&response_str)?, None)); - } - - Some((cache, request_str, key)) - } else { - None - }; - - let (response, usage) = Self::send_api_request( - |builder| { - let req = builder - .uri(url.as_ref()) - .body(serde_json::to_string(&request)?.into()); - Ok(req?) - }, - client, - llm_token, - app_version, - ) - .await?; - - #[cfg(feature = "eval-support")] - if let Some((cache, request, key)) = cache_key { - cache.write(key, &request, &serde_json::to_string_pretty(&response)?); - } - - Ok((response, usage)) - } - - fn handle_api_response( - this: &WeakEntity, - response: Result<(T, Option)>, - cx: &mut gpui::AsyncApp, - ) -> Result { - match response { - Ok((data, usage)) => { - if let Some(usage) = usage { - this.update(cx, |this, cx| { - this.user_store.update(cx, |user_store, cx| { - user_store.update_edit_prediction_usage(usage, cx); - }); - }) - .ok(); - } - Ok(data) - } - Err(err) => { - if err.is::() { - cx.update(|cx| { - this.update(cx, |this, _cx| { - this.update_required = true; - }) - .ok(); - - let error_message: SharedString = err.to_string().into(); - show_app_notification( - NotificationId::unique::(), - cx, - move |cx| { - cx.new(|cx| { - ErrorMessagePrompt::new(error_message.clone(), cx) - .with_link_button("Update Zed", "https://zed.dev/releases") - }) - }, - ); - }) - .ok(); - } - Err(err) - } - } - } - - async fn send_api_request( - build: impl Fn(http_client::http::request::Builder) -> Result>, - client: Arc, - llm_token: LlmApiToken, - app_version: Version, - ) -> Result<(Res, Option)> - where - Res: DeserializeOwned, - { - let http_client = client.http_client(); - let mut token = llm_token.acquire(&client).await?; - let mut did_retry = false; - - loop { - let request_builder = http_client::Request::builder().method(Method::POST); - - let request = build( - request_builder - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {}", token)) - .header(ZED_VERSION_HEADER_NAME, app_version.to_string()), - )?; - - let mut response = http_client.send(request).await?; - - if let Some(minimum_required_version) = response - .headers() - .get(MINIMUM_REQUIRED_VERSION_HEADER_NAME) - .and_then(|version| Version::from_str(version.to_str().ok()?).ok()) - { - anyhow::ensure!( - app_version >= minimum_required_version, - ZedUpdateRequiredError { - minimum_version: minimum_required_version - } - ); - } - - if response.status().is_success() { - let usage = EditPredictionUsage::from_headers(response.headers()).ok(); - - let mut body = Vec::new(); - response.body_mut().read_to_end(&mut body).await?; - return Ok((serde_json::from_slice(&body)?, usage)); - } else if !did_retry - && response - .headers() - .get(EXPIRED_LLM_TOKEN_HEADER_NAME) - .is_some() - { - did_retry = true; - token = llm_token.refresh(&client).await?; - } else { - let mut body = String::new(); - response.body_mut().read_to_string(&mut body).await?; - anyhow::bail!( - "Request failed with status: {:?}\nBody: {}", - response.status(), - body - ); - } - } - } - - pub const CONTEXT_RETRIEVAL_IDLE_DURATION: Duration = Duration::from_secs(10); - pub const CONTEXT_RETRIEVAL_DEBOUNCE_DURATION: Duration = Duration::from_secs(3); - - // Refresh the related excerpts when the user just beguns editing after - // an idle period, and after they pause editing. - fn refresh_context_if_needed( - &mut self, - project: &Entity, - buffer: &Entity, - cursor_position: language::Anchor, - cx: &mut Context, - ) { - if !matches!(&self.options().context, ContextMode::Agentic { .. }) { - return; - } - - let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) else { - return; - }; - - let now = Instant::now(); - let was_idle = zeta_project - .refresh_context_timestamp - .map_or(true, |timestamp| { - now - timestamp > Self::CONTEXT_RETRIEVAL_IDLE_DURATION - }); - zeta_project.refresh_context_timestamp = Some(now); - zeta_project.refresh_context_debounce_task = Some(cx.spawn({ - let buffer = buffer.clone(); - let project = project.clone(); - async move |this, cx| { - if was_idle { - log::debug!("refetching edit prediction context after idle"); - } else { - cx.background_executor() - .timer(Self::CONTEXT_RETRIEVAL_DEBOUNCE_DURATION) - .await; - log::debug!("refetching edit prediction context after pause"); - } - this.update(cx, |this, cx| { - let task = this.refresh_context(project.clone(), buffer, cursor_position, cx); - - if let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) { - zeta_project.refresh_context_task = Some(task.log_err()); - }; - }) - .ok() - } - })); - } - - // Refresh the related excerpts asynchronously. Ensure the task runs to completion, - // and avoid spawning more than one concurrent task. - pub fn refresh_context( - &mut self, - project: Entity, - buffer: Entity, - cursor_position: language::Anchor, - cx: &mut Context, - ) -> Task> { - let Some(zeta_project) = self.projects.get(&project.entity_id()) else { - return Task::ready(anyhow::Ok(())); - }; - - let ContextMode::Agentic(options) = &self.options().context else { - return Task::ready(anyhow::Ok(())); - }; - - let snapshot = buffer.read(cx).snapshot(); - let cursor_point = cursor_position.to_point(&snapshot); - let Some(cursor_excerpt) = EditPredictionExcerpt::select_from_buffer( - cursor_point, - &snapshot, - &options.excerpt, - None, - ) else { - return Task::ready(Ok(())); - }; - - let app_version = AppVersion::global(cx); - let client = self.client.clone(); - let llm_token = self.llm_token.clone(); - let debug_tx = self.debug_tx.clone(); - let current_file_path: Arc = snapshot - .file() - .map(|f| f.full_path(cx).into()) - .unwrap_or_else(|| Path::new("untitled").into()); - - let prompt = match cloud_zeta2_prompt::retrieval_prompt::build_prompt( - predict_edits_v3::PlanContextRetrievalRequest { - excerpt: cursor_excerpt.text(&snapshot).body, - excerpt_path: current_file_path, - excerpt_line_range: cursor_excerpt.line_range, - cursor_file_max_row: Line(snapshot.max_point().row), - events: zeta_project - .events - .iter() - .filter_map(|ev| ev.to_request_event(cx)) - .collect(), - }, - ) { - Ok(prompt) => prompt, - Err(err) => { - return Task::ready(Err(err)); - } - }; - - if let Some(debug_tx) = &debug_tx { - debug_tx - .unbounded_send(ZetaDebugInfo::ContextRetrievalStarted( - ZetaContextRetrievalStartedDebugInfo { - project: project.clone(), - timestamp: Instant::now(), - search_prompt: prompt.clone(), - }, - )) - .ok(); - } - - pub static TOOL_SCHEMA: LazyLock<(serde_json::Value, String)> = LazyLock::new(|| { - let schema = language_model::tool_schema::root_schema_for::( - language_model::LanguageModelToolSchemaFormat::JsonSchemaSubset, - ); - - let description = schema - .get("description") - .and_then(|description| description.as_str()) - .unwrap() - .to_string(); - - (schema.into(), description) - }); - - let (tool_schema, tool_description) = TOOL_SCHEMA.clone(); - - let request = open_ai::Request { - model: CONTEXT_RETRIEVAL_MODEL_ID.clone(), - messages: vec![open_ai::RequestMessage::User { - content: open_ai::MessageContent::Plain(prompt), - }], - stream: false, - max_completion_tokens: None, - stop: Default::default(), - temperature: 0.7, - tool_choice: None, - parallel_tool_calls: None, - tools: vec![open_ai::ToolDefinition::Function { - function: FunctionDefinition { - name: cloud_zeta2_prompt::retrieval_prompt::TOOL_NAME.to_string(), - description: Some(tool_description), - parameters: Some(tool_schema), - }, - }], - prompt_cache_key: None, - reasoning_effort: None, - }; - - #[cfg(feature = "eval-support")] - let eval_cache = self.eval_cache.clone(); - - cx.spawn(async move |this, cx| { - log::trace!("Sending search planning request"); - let response = Self::send_raw_llm_request( - request, - client, - llm_token, - app_version, - #[cfg(feature = "eval-support")] - eval_cache.clone(), - #[cfg(feature = "eval-support")] - EvalCacheEntryKind::Context, - ) - .await; - let mut response = Self::handle_api_response(&this, response, cx)?; - log::trace!("Got search planning response"); - - let choice = response - .choices - .pop() - .context("No choices in retrieval response")?; - let open_ai::RequestMessage::Assistant { - content: _, - tool_calls, - } = choice.message - else { - anyhow::bail!("Retrieval response didn't include an assistant message"); - }; - - let mut queries: Vec = Vec::new(); - for tool_call in tool_calls { - let open_ai::ToolCallContent::Function { function } = tool_call.content; - if function.name != cloud_zeta2_prompt::retrieval_prompt::TOOL_NAME { - log::warn!( - "Context retrieval response tried to call an unknown tool: {}", - function.name - ); - - continue; - } - - let input: SearchToolInput = serde_json::from_str(&function.arguments) - .with_context(|| format!("invalid search json {}", &function.arguments))?; - queries.extend(input.queries); - } - - if let Some(debug_tx) = &debug_tx { - debug_tx - .unbounded_send(ZetaDebugInfo::SearchQueriesGenerated( - ZetaSearchQueryDebugInfo { - project: project.clone(), - timestamp: Instant::now(), - search_queries: queries.clone(), - }, - )) - .ok(); - } - - log::trace!("Running retrieval search: {queries:#?}"); - - let related_excerpts_result = retrieval_search::run_retrieval_searches( - queries, - project.clone(), - #[cfg(feature = "eval-support")] - eval_cache, - cx, - ) - .await; - - log::trace!("Search queries executed"); - - if let Some(debug_tx) = &debug_tx { - debug_tx - .unbounded_send(ZetaDebugInfo::SearchQueriesExecuted( - ZetaContextRetrievalDebugInfo { - project: project.clone(), - timestamp: Instant::now(), - }, - )) - .ok(); - } - - this.update(cx, |this, _cx| { - let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) else { - return Ok(()); - }; - zeta_project.refresh_context_task.take(); - if let Some(debug_tx) = &this.debug_tx { - debug_tx - .unbounded_send(ZetaDebugInfo::ContextRetrievalFinished( - ZetaContextRetrievalDebugInfo { - project, - timestamp: Instant::now(), - }, - )) - .ok(); - } - match related_excerpts_result { - Ok(excerpts) => { - zeta_project.context = Some(excerpts); - Ok(()) - } - Err(error) => Err(error), - } - })? - }) - } - - pub fn set_context( - &mut self, - project: Entity, - context: HashMap, Vec>>, - ) { - if let Some(zeta_project) = self.projects.get_mut(&project.entity_id()) { - zeta_project.context = Some(context); - } - } - - fn gather_nearby_diagnostics( - cursor_offset: usize, - diagnostic_sets: &[(LanguageServerId, DiagnosticSet)], - snapshot: &BufferSnapshot, - max_diagnostics_bytes: usize, - ) -> (Vec, bool) { - // TODO: Could make this more efficient - let mut diagnostic_groups = Vec::new(); - for (language_server_id, diagnostics) in diagnostic_sets { - let mut groups = Vec::new(); - diagnostics.groups(*language_server_id, &mut groups, &snapshot); - diagnostic_groups.extend( - groups - .into_iter() - .map(|(_, group)| group.resolve::(&snapshot)), - ); - } - - // sort by proximity to cursor - diagnostic_groups.sort_by_key(|group| { - let range = &group.entries[group.primary_ix].range; - if range.start >= cursor_offset { - range.start - cursor_offset - } else if cursor_offset >= range.end { - cursor_offset - range.end - } else { - (cursor_offset - range.start).min(range.end - cursor_offset) - } - }); - - let mut results = Vec::new(); - let mut diagnostic_groups_truncated = false; - let mut diagnostics_byte_count = 0; - for group in diagnostic_groups { - let raw_value = serde_json::value::to_raw_value(&group).unwrap(); - diagnostics_byte_count += raw_value.get().len(); - if diagnostics_byte_count > max_diagnostics_bytes { - diagnostic_groups_truncated = true; - break; - } - results.push(predict_edits_v3::DiagnosticGroup(raw_value)); - } - - (results, diagnostic_groups_truncated) - } - - // TODO: Dedupe with similar code in request_prediction? - pub fn cloud_request_for_zeta_cli( - &mut self, - project: &Entity, - buffer: &Entity, - position: language::Anchor, - cx: &mut Context, - ) -> Task> { - let project_state = self.projects.get(&project.entity_id()); - - let index_state = project_state.and_then(|state| { - state - .syntax_index - .as_ref() - .map(|index| index.read_with(cx, |index, _cx| index.state().clone())) - }); - let options = self.options.clone(); - let snapshot = buffer.read(cx).snapshot(); - let Some(excerpt_path) = snapshot.file().map(|path| path.full_path(cx)) else { - return Task::ready(Err(anyhow!("No file path for excerpt"))); - }; - let worktree_snapshots = project - .read(cx) - .worktrees(cx) - .map(|worktree| worktree.read(cx).snapshot()) - .collect::>(); - - let parent_abs_path = project::File::from_dyn(buffer.read(cx).file()).and_then(|f| { - let mut path = f.worktree.read(cx).absolutize(&f.path); - if path.pop() { Some(path) } else { None } - }); - - cx.background_spawn(async move { - let index_state = if let Some(index_state) = index_state { - Some(index_state.lock_owned().await) - } else { - None - }; - - let cursor_point = position.to_point(&snapshot); - - let debug_info = true; - EditPredictionContext::gather_context( - cursor_point, - &snapshot, - parent_abs_path.as_deref(), - match &options.context { - ContextMode::Agentic(_) => { - // TODO - panic!("Llm mode not supported in zeta cli yet"); - } - ContextMode::Syntax(edit_prediction_context_options) => { - edit_prediction_context_options - } - }, - index_state.as_deref(), - ) - .context("Failed to select excerpt") - .map(|context| { - make_syntax_context_cloud_request( - excerpt_path.into(), - context, - // TODO pass everything - Vec::new(), - false, - Vec::new(), - false, - None, - debug_info, - &worktree_snapshots, - index_state.as_deref(), - Some(options.max_prompt_bytes), - options.prompt_format, - ) - }) - }) - } - - pub fn wait_for_initial_indexing( - &mut self, - project: &Entity, - cx: &mut Context, - ) -> Task> { - let zeta_project = self.get_or_init_zeta_project(project, cx); - if let Some(syntax_index) = &zeta_project.syntax_index { - syntax_index.read(cx).wait_for_initial_file_indexing(cx) - } else { - Task::ready(Ok(())) - } - } -} - -pub fn text_from_response(mut res: open_ai::Response) -> Option { - let choice = res.choices.pop()?; - let output_text = match choice.message { - open_ai::RequestMessage::Assistant { - content: Some(open_ai::MessageContent::Plain(content)), - .. - } => content, - open_ai::RequestMessage::Assistant { - content: Some(open_ai::MessageContent::Multipart(mut content)), - .. - } => { - if content.is_empty() { - log::error!("No output from Baseten completion response"); - return None; - } - - match content.remove(0) { - open_ai::MessagePart::Text { text } => text, - open_ai::MessagePart::Image { .. } => { - log::error!("Expected text, got an image"); - return None; - } - } - } - _ => { - log::error!("Invalid response message: {:?}", choice.message); - return None; - } - }; - Some(output_text) -} - -#[derive(Error, Debug)] -#[error( - "You must update to Zed version {minimum_version} or higher to continue using edit predictions." -)] -pub struct ZedUpdateRequiredError { - minimum_version: Version, -} - -fn make_syntax_context_cloud_request( - excerpt_path: Arc, - context: EditPredictionContext, - events: Vec, - can_collect_data: bool, - diagnostic_groups: Vec, - diagnostic_groups_truncated: bool, - git_info: Option, - debug_info: bool, - worktrees: &Vec, - index_state: Option<&SyntaxIndexState>, - prompt_max_bytes: Option, - prompt_format: PromptFormat, -) -> predict_edits_v3::PredictEditsRequest { - let mut signatures = Vec::new(); - let mut declaration_to_signature_index = HashMap::default(); - let mut referenced_declarations = Vec::new(); - - for snippet in context.declarations { - let project_entry_id = snippet.declaration.project_entry_id(); - let Some(path) = worktrees.iter().find_map(|worktree| { - worktree.entry_for_id(project_entry_id).map(|entry| { - let mut full_path = RelPathBuf::new(); - full_path.push(worktree.root_name()); - full_path.push(&entry.path); - full_path - }) - }) else { - continue; - }; - - let parent_index = index_state.and_then(|index_state| { - snippet.declaration.parent().and_then(|parent| { - add_signature( - parent, - &mut declaration_to_signature_index, - &mut signatures, - index_state, - ) - }) - }); - - let (text, text_is_truncated) = snippet.declaration.item_text(); - referenced_declarations.push(predict_edits_v3::ReferencedDeclaration { - path: path.as_std_path().into(), - text: text.into(), - range: snippet.declaration.item_line_range(), - text_is_truncated, - signature_range: snippet.declaration.signature_range_in_item_text(), - parent_index, - signature_score: snippet.score(DeclarationStyle::Signature), - declaration_score: snippet.score(DeclarationStyle::Declaration), - score_components: snippet.components, - }); - } - - let excerpt_parent = index_state.and_then(|index_state| { - context - .excerpt - .parent_declarations - .last() - .and_then(|(parent, _)| { - add_signature( - *parent, - &mut declaration_to_signature_index, - &mut signatures, - index_state, - ) - }) - }); - - predict_edits_v3::PredictEditsRequest { - excerpt_path, - excerpt: context.excerpt_text.body, - excerpt_line_range: context.excerpt.line_range, - excerpt_range: context.excerpt.range, - cursor_point: predict_edits_v3::Point { - line: predict_edits_v3::Line(context.cursor_point.row), - column: context.cursor_point.column, - }, - referenced_declarations, - included_files: vec![], - signatures, - excerpt_parent, - events, - can_collect_data, - diagnostic_groups, - diagnostic_groups_truncated, - git_info, - debug_info, - prompt_max_bytes, - prompt_format, - } -} - -fn add_signature( - declaration_id: DeclarationId, - declaration_to_signature_index: &mut HashMap, - signatures: &mut Vec, - index: &SyntaxIndexState, -) -> Option { - if let Some(signature_index) = declaration_to_signature_index.get(&declaration_id) { - return Some(*signature_index); - } - let Some(parent_declaration) = index.declaration(declaration_id) else { - log::error!("bug: missing parent declaration"); - return None; - }; - let parent_index = parent_declaration.parent().and_then(|parent| { - add_signature(parent, declaration_to_signature_index, signatures, index) - }); - let (text, text_is_truncated) = parent_declaration.signature_text(); - let signature_index = signatures.len(); - signatures.push(Signature { - text: text.into(), - text_is_truncated, - parent_index, - range: parent_declaration.signature_line_range(), - }); - declaration_to_signature_index.insert(declaration_id, signature_index); - Some(signature_index) -} - -#[cfg(feature = "eval-support")] -pub type EvalCacheKey = (EvalCacheEntryKind, u64); - -#[cfg(feature = "eval-support")] -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum EvalCacheEntryKind { - Context, - Search, - Prediction, -} - -#[cfg(feature = "eval-support")] -impl std::fmt::Display for EvalCacheEntryKind { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - EvalCacheEntryKind::Search => write!(f, "search"), - EvalCacheEntryKind::Context => write!(f, "context"), - EvalCacheEntryKind::Prediction => write!(f, "prediction"), - } - } -} - -#[cfg(feature = "eval-support")] -pub trait EvalCache: Send + Sync { - fn read(&self, key: EvalCacheKey) -> Option; - fn write(&self, key: EvalCacheKey, input: &str, value: &str); -} - -#[cfg(test)] -mod tests { - use std::{path::Path, sync::Arc}; - - use client::UserStore; - use clock::FakeSystemClock; - use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery}; - use futures::{ - AsyncReadExt, StreamExt, - channel::{mpsc, oneshot}, - }; - use gpui::{ - Entity, TestAppContext, - http_client::{FakeHttpClient, Response}, - prelude::*, - }; - use indoc::indoc; - use language::OffsetRangeExt as _; - use open_ai::Usage; - use pretty_assertions::{assert_eq, assert_matches}; - use project::{FakeFs, Project}; - use serde_json::json; - use settings::SettingsStore; - use util::path; - use uuid::Uuid; - - use crate::{BufferEditPrediction, Zeta}; - - #[gpui::test] - async fn test_current_state(cx: &mut TestAppContext) { - let (zeta, mut req_rx) = init_test(cx); - let fs = FakeFs::new(cx.executor()); - fs.insert_tree( - "/root", - json!({ - "1.txt": "Hello!\nHow\nBye\n", - "2.txt": "Hola!\nComo\nAdios\n" - }), - ) - .await; - let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; - - zeta.update(cx, |zeta, cx| { - zeta.register_project(&project, cx); - }); - - let buffer1 = project - .update(cx, |project, cx| { - let path = project.find_project_path(path!("root/1.txt"), cx).unwrap(); - project.open_buffer(path, cx) - }) - .await - .unwrap(); - let snapshot1 = buffer1.read_with(cx, |buffer, _cx| buffer.snapshot()); - let position = snapshot1.anchor_before(language::Point::new(1, 3)); - - // Prediction for current file - - zeta.update(cx, |zeta, cx| { - zeta.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx) - }); - let (_request, respond_tx) = req_rx.next().await.unwrap(); - - respond_tx - .send(model_response(indoc! {r" - --- a/root/1.txt - +++ b/root/1.txt - @@ ... @@ - Hello! - -How - +How are you? - Bye - "})) - .unwrap(); - - cx.run_until_parked(); - - zeta.read_with(cx, |zeta, cx| { - let prediction = zeta - .current_prediction_for_buffer(&buffer1, &project, cx) - .unwrap(); - assert_matches!(prediction, BufferEditPrediction::Local { .. }); - }); - - // Context refresh - let refresh_task = zeta.update(cx, |zeta, cx| { - zeta.refresh_context(project.clone(), buffer1.clone(), position, cx) - }); - let (_request, respond_tx) = req_rx.next().await.unwrap(); - respond_tx - .send(open_ai::Response { - id: Uuid::new_v4().to_string(), - object: "response".into(), - created: 0, - model: "model".into(), - choices: vec![open_ai::Choice { - index: 0, - message: open_ai::RequestMessage::Assistant { - content: None, - tool_calls: vec![open_ai::ToolCall { - id: "search".into(), - content: open_ai::ToolCallContent::Function { - function: open_ai::FunctionContent { - name: cloud_zeta2_prompt::retrieval_prompt::TOOL_NAME - .to_string(), - arguments: serde_json::to_string(&SearchToolInput { - queries: Box::new([SearchToolQuery { - glob: "root/2.txt".to_string(), - syntax_node: vec![], - content: Some(".".into()), - }]), - }) - .unwrap(), - }, - }, - }], - }, - finish_reason: None, - }], - usage: Usage { - prompt_tokens: 0, - completion_tokens: 0, - total_tokens: 0, - }, - }) - .unwrap(); - refresh_task.await.unwrap(); - - zeta.update(cx, |zeta, _cx| { - zeta.discard_current_prediction(&project); - }); - - // Prediction for another file - zeta.update(cx, |zeta, cx| { - zeta.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx) - }); - let (_request, respond_tx) = req_rx.next().await.unwrap(); - respond_tx - .send(model_response(indoc! {r#" - --- a/root/2.txt - +++ b/root/2.txt - Hola! - -Como - +Como estas? - Adios - "#})) - .unwrap(); - cx.run_until_parked(); - - zeta.read_with(cx, |zeta, cx| { - let prediction = zeta - .current_prediction_for_buffer(&buffer1, &project, cx) - .unwrap(); - assert_matches!( - prediction, - BufferEditPrediction::Jump { prediction } if prediction.snapshot.file().unwrap().full_path(cx) == Path::new(path!("root/2.txt")) - ); - }); - - let buffer2 = project - .update(cx, |project, cx| { - let path = project.find_project_path(path!("root/2.txt"), cx).unwrap(); - project.open_buffer(path, cx) - }) - .await - .unwrap(); - - zeta.read_with(cx, |zeta, cx| { - let prediction = zeta - .current_prediction_for_buffer(&buffer2, &project, cx) - .unwrap(); - assert_matches!(prediction, BufferEditPrediction::Local { .. }); - }); - } - - #[gpui::test] - async fn test_simple_request(cx: &mut TestAppContext) { - let (zeta, mut req_rx) = init_test(cx); - let fs = FakeFs::new(cx.executor()); - fs.insert_tree( - "/root", - json!({ - "foo.md": "Hello!\nHow\nBye\n" - }), - ) - .await; - let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; - - let buffer = project - .update(cx, |project, cx| { - let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); - project.open_buffer(path, cx) - }) - .await - .unwrap(); - let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); - let position = snapshot.anchor_before(language::Point::new(1, 3)); - - let prediction_task = zeta.update(cx, |zeta, cx| { - zeta.request_prediction(&project, &buffer, position, cx) - }); - - let (_, respond_tx) = req_rx.next().await.unwrap(); - - // TODO Put back when we have a structured request again - // assert_eq!( - // request.excerpt_path.as_ref(), - // Path::new(path!("root/foo.md")) - // ); - // assert_eq!( - // request.cursor_point, - // Point { - // line: Line(1), - // column: 3 - // } - // ); - - respond_tx - .send(model_response(indoc! { r" - --- a/root/foo.md - +++ b/root/foo.md - @@ ... @@ - Hello! - -How - +How are you? - Bye - "})) - .unwrap(); - - let prediction = prediction_task.await.unwrap().unwrap(); - - assert_eq!(prediction.edits.len(), 1); - assert_eq!( - prediction.edits[0].0.to_point(&snapshot).start, - language::Point::new(1, 3) - ); - assert_eq!(prediction.edits[0].1.as_ref(), " are you?"); - } - - #[gpui::test] - async fn test_request_events(cx: &mut TestAppContext) { - let (zeta, mut req_rx) = init_test(cx); - let fs = FakeFs::new(cx.executor()); - fs.insert_tree( - "/root", - json!({ - "foo.md": "Hello!\n\nBye\n" - }), - ) - .await; - let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; - - let buffer = project - .update(cx, |project, cx| { - let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); - project.open_buffer(path, cx) - }) - .await - .unwrap(); - - zeta.update(cx, |zeta, cx| { - zeta.register_buffer(&buffer, &project, cx); - }); - - buffer.update(cx, |buffer, cx| { - buffer.edit(vec![(7..7, "How")], None, cx); - }); - - let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); - let position = snapshot.anchor_before(language::Point::new(1, 3)); - - let prediction_task = zeta.update(cx, |zeta, cx| { - zeta.request_prediction(&project, &buffer, position, cx) - }); - - let (request, respond_tx) = req_rx.next().await.unwrap(); - - let prompt = prompt_from_request(&request); - assert!( - prompt.contains(indoc! {" - --- a/root/foo.md - +++ b/root/foo.md - @@ -1,3 +1,3 @@ - Hello! - - - +How - Bye - "}), - "{prompt}" - ); - - respond_tx - .send(model_response(indoc! {r#" - --- a/root/foo.md - +++ b/root/foo.md - @@ ... @@ - Hello! - -How - +How are you? - Bye - "#})) - .unwrap(); - - let prediction = prediction_task.await.unwrap().unwrap(); - - assert_eq!(prediction.edits.len(), 1); - assert_eq!( - prediction.edits[0].0.to_point(&snapshot).start, - language::Point::new(1, 3) - ); - assert_eq!(prediction.edits[0].1.as_ref(), " are you?"); - } - - // Skipped until we start including diagnostics in prompt - // #[gpui::test] - // async fn test_request_diagnostics(cx: &mut TestAppContext) { - // let (zeta, mut req_rx) = init_test(cx); - // let fs = FakeFs::new(cx.executor()); - // fs.insert_tree( - // "/root", - // json!({ - // "foo.md": "Hello!\nBye" - // }), - // ) - // .await; - // let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; - - // let path_to_buffer_uri = lsp::Uri::from_file_path(path!("/root/foo.md")).unwrap(); - // let diagnostic = lsp::Diagnostic { - // range: lsp::Range::new(lsp::Position::new(1, 1), lsp::Position::new(1, 5)), - // severity: Some(lsp::DiagnosticSeverity::ERROR), - // message: "\"Hello\" deprecated. Use \"Hi\" instead".to_string(), - // ..Default::default() - // }; - - // project.update(cx, |project, cx| { - // project.lsp_store().update(cx, |lsp_store, cx| { - // // Create some diagnostics - // lsp_store - // .update_diagnostics( - // LanguageServerId(0), - // lsp::PublishDiagnosticsParams { - // uri: path_to_buffer_uri.clone(), - // diagnostics: vec![diagnostic], - // version: None, - // }, - // None, - // language::DiagnosticSourceKind::Pushed, - // &[], - // cx, - // ) - // .unwrap(); - // }); - // }); - - // let buffer = project - // .update(cx, |project, cx| { - // let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); - // project.open_buffer(path, cx) - // }) - // .await - // .unwrap(); - - // let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); - // let position = snapshot.anchor_before(language::Point::new(0, 0)); - - // let _prediction_task = zeta.update(cx, |zeta, cx| { - // zeta.request_prediction(&project, &buffer, position, cx) - // }); - - // let (request, _respond_tx) = req_rx.next().await.unwrap(); - - // assert_eq!(request.diagnostic_groups.len(), 1); - // let value = serde_json::from_str::(request.diagnostic_groups[0].0.get()) - // .unwrap(); - // // We probably don't need all of this. TODO define a specific diagnostic type in predict_edits_v3 - // assert_eq!( - // value, - // json!({ - // "entries": [{ - // "range": { - // "start": 8, - // "end": 10 - // }, - // "diagnostic": { - // "source": null, - // "code": null, - // "code_description": null, - // "severity": 1, - // "message": "\"Hello\" deprecated. Use \"Hi\" instead", - // "markdown": null, - // "group_id": 0, - // "is_primary": true, - // "is_disk_based": false, - // "is_unnecessary": false, - // "source_kind": "Pushed", - // "data": null, - // "underline": true - // } - // }], - // "primary_ix": 0 - // }) - // ); - // } - - fn model_response(text: &str) -> open_ai::Response { - open_ai::Response { - id: Uuid::new_v4().to_string(), - object: "response".into(), - created: 0, - model: "model".into(), - choices: vec![open_ai::Choice { - index: 0, - message: open_ai::RequestMessage::Assistant { - content: Some(open_ai::MessageContent::Plain(text.to_string())), - tool_calls: vec![], - }, - finish_reason: None, - }], - usage: Usage { - prompt_tokens: 0, - completion_tokens: 0, - total_tokens: 0, - }, - } - } - - fn prompt_from_request(request: &open_ai::Request) -> &str { - assert_eq!(request.messages.len(), 1); - let open_ai::RequestMessage::User { - content: open_ai::MessageContent::Plain(content), - .. - } = &request.messages[0] - else { - panic!( - "Request does not have single user message of type Plain. {:#?}", - request - ); - }; - content - } - - fn init_test( - cx: &mut TestAppContext, - ) -> ( - Entity, - mpsc::UnboundedReceiver<(open_ai::Request, oneshot::Sender)>, - ) { - cx.update(move |cx| { - let settings_store = SettingsStore::test(cx); - cx.set_global(settings_store); - zlog::init_test(); - - let (req_tx, req_rx) = mpsc::unbounded(); - - let http_client = FakeHttpClient::create({ - move |req| { - let uri = req.uri().path().to_string(); - let mut body = req.into_body(); - let req_tx = req_tx.clone(); - async move { - let resp = match uri.as_str() { - "/client/llm_tokens" => serde_json::to_string(&json!({ - "token": "test" - })) - .unwrap(), - "/predict_edits/raw" => { - let mut buf = Vec::new(); - body.read_to_end(&mut buf).await.ok(); - let req = serde_json::from_slice(&buf).unwrap(); - - let (res_tx, res_rx) = oneshot::channel(); - req_tx.unbounded_send((req, res_tx)).unwrap(); - serde_json::to_string(&res_rx.await?).unwrap() - } - _ => { - panic!("Unexpected path: {}", uri) - } - }; - - Ok(Response::builder().body(resp.into()).unwrap()) - } - } - }); - - let client = client::Client::new(Arc::new(FakeSystemClock::new()), http_client, cx); - client.cloud_client().set_credentials(1, "test".into()); - - language_model::init(client.clone(), cx); - - let user_store = cx.new(|cx| UserStore::new(client.clone(), cx)); - let zeta = Zeta::global(&client, &user_store, cx); - - (zeta, req_rx) - }) - } -} diff --git a/crates/zeta2_tools/Cargo.toml b/crates/zeta2_tools/Cargo.toml index 3a9b1ccbf9340dfdaa06030e59c2112b9cda6307..607e24c895d96de1464ff1bfa2a4dfa01c5d9669 100644 --- a/crates/zeta2_tools/Cargo.toml +++ b/crates/zeta2_tools/Cargo.toml @@ -13,7 +13,6 @@ path = "src/zeta2_tools.rs" [dependencies] anyhow.workspace = true -chrono.workspace = true client.workspace = true cloud_llm_client.workspace = true cloud_zeta2_prompt.workspace = true @@ -24,9 +23,7 @@ feature_flags.workspace = true futures.workspace = true gpui.workspace = true language.workspace = true -log.workspace = true multi_buffer.workspace = true -ordered-float.workspace = true project.workspace = true serde.workspace = true serde_json.workspace = true @@ -36,7 +33,7 @@ ui.workspace = true ui_input.workspace = true util.workspace = true workspace.workspace = true -zeta2.workspace = true +zeta.workspace = true [dev-dependencies] clap.workspace = true diff --git a/crates/zeta2_tools/src/zeta2_context_view.rs b/crates/zeta2_tools/src/zeta2_context_view.rs index 759d0d0a3da1adbd9e61fa05b5d305ca9de1f823..54f1ea2d813f7c00d30b12e341fb3e5ac3f155dc 100644 --- a/crates/zeta2_tools/src/zeta2_context_view.rs +++ b/crates/zeta2_tools/src/zeta2_context_view.rs @@ -25,7 +25,7 @@ use ui::{ v_flex, }; use workspace::Item; -use zeta2::{ +use zeta::{ Zeta, ZetaContextRetrievalDebugInfo, ZetaContextRetrievalStartedDebugInfo, ZetaDebugInfo, ZetaSearchQueryDebugInfo, }; diff --git a/crates/zeta2_tools/src/zeta2_tools.rs b/crates/zeta2_tools/src/zeta2_tools.rs index 8758857e7cf50d6a5f2e5a4ea509293b18a8cb2c..6a6268f68ad0fa10e2379ac21e07d4fa530dddc1 100644 --- a/crates/zeta2_tools/src/zeta2_tools.rs +++ b/crates/zeta2_tools/src/zeta2_tools.rs @@ -1,30 +1,26 @@ mod zeta2_context_view; -use std::{cmp::Reverse, path::PathBuf, str::FromStr, sync::Arc}; +use std::{str::FromStr, sync::Arc, time::Duration}; -use chrono::TimeDelta; use client::{Client, UserStore}; -use cloud_llm_client::predict_edits_v3::{ - DeclarationScoreComponents, PredictEditsRequest, PromptFormat, -}; +use cloud_llm_client::predict_edits_v3::PromptFormat; use collections::HashMap; -use editor::{Editor, EditorEvent, EditorMode, ExcerptRange, MultiBuffer}; +use editor::{Editor, EditorEvent, EditorMode, MultiBuffer}; use feature_flags::FeatureFlagAppExt as _; use futures::{FutureExt, StreamExt as _, channel::oneshot, future::Shared}; use gpui::{ - CursorStyle, Empty, Entity, EventEmitter, FocusHandle, Focusable, Subscription, Task, - WeakEntity, actions, prelude::*, + Empty, Entity, EventEmitter, FocusHandle, Focusable, Subscription, Task, WeakEntity, actions, + prelude::*, }; -use language::{Buffer, DiskState}; -use ordered_float::OrderedFloat; -use project::{Project, WorktreeId, telemetry_snapshot::TelemetrySnapshot}; +use language::Buffer; +use project::{Project, telemetry_snapshot::TelemetrySnapshot}; use ui::{ButtonLike, ContextMenu, ContextMenuEntry, DropdownMenu, KeyBinding, prelude::*}; use ui_input::InputField; -use util::{ResultExt, paths::PathStyle, rel_path::RelPath}; +use util::ResultExt; use workspace::{Item, SplitDirection, Workspace}; -use zeta2::{ - AgenticContextOptions, ContextMode, DEFAULT_SYNTAX_CONTEXT_OPTIONS, Zeta, Zeta2FeatureFlag, - ZetaDebugInfo, ZetaEditPredictionDebugInfo, ZetaOptions, +use zeta::{ + AgenticContextOptions, ContextMode, DEFAULT_SYNTAX_CONTEXT_OPTIONS, EditPredictionInputs, Zeta, + Zeta2FeatureFlag, ZetaDebugInfo, ZetaEditPredictionDebugInfo, ZetaOptions, }; use edit_prediction_context::{EditPredictionContextOptions, EditPredictionExcerptOptions}; @@ -99,7 +95,6 @@ pub struct Zeta2Inspector { cursor_context_ratio_input: Entity, max_prompt_bytes_input: Entity, context_mode: ContextModeState, - active_view: ActiveView, zeta: Entity, _active_editor_subscription: Option, _update_state_task: Task<()>, @@ -113,21 +108,14 @@ pub enum ContextModeState { }, } -#[derive(PartialEq)] -enum ActiveView { - Context, - Inference, -} - struct LastPrediction { - context_editor: Entity, prompt_editor: Entity, - retrieval_time: TimeDelta, - request_time: Option, + retrieval_time: Duration, + request_time: Option, buffer: WeakEntity, position: language::Anchor, state: LastPredictionState, - request: PredictEditsRequest, + inputs: EditPredictionInputs, project_snapshot: Shared>>, _task: Option>, } @@ -175,7 +163,6 @@ impl Zeta2Inspector { focus_handle: cx.focus_handle(), project: project.clone(), last_prediction: None, - active_view: ActiveView::Inference, max_excerpt_bytes_input: Self::number_input("Max Excerpt Bytes", window, cx), min_excerpt_bytes_input: Self::number_input("Min Excerpt Bytes", window, cx), cursor_context_ratio_input: Self::number_input("Cursor Context Ratio", window, cx), @@ -305,7 +292,7 @@ impl Zeta2Inspector { ContextMode::Syntax(context_options) => { let max_retrieved_declarations = match &this.context_mode { ContextModeState::Llm => { - zeta2::DEFAULT_SYNTAX_CONTEXT_OPTIONS.max_retrieved_declarations + zeta::DEFAULT_SYNTAX_CONTEXT_OPTIONS.max_retrieved_declarations } ContextModeState::Syntax { max_retrieved_declarations, @@ -340,22 +327,10 @@ impl Zeta2Inspector { fn update_last_prediction( &mut self, - prediction: zeta2::ZetaDebugInfo, + prediction: zeta::ZetaDebugInfo, window: &mut Window, cx: &mut Context, ) { - let project = self.project.read(cx); - let path_style = project.path_style(cx); - let Some(worktree_id) = project - .worktrees(cx) - .next() - .map(|worktree| worktree.read(cx).id()) - else { - log::error!("Open a worktree to use edit prediction debug view"); - self.last_prediction.take(); - return; - }; - self._update_state_task = cx.spawn_in(window, { let language_registry = self.project.read(cx).languages().clone(); async move |this, cx| { @@ -364,11 +339,10 @@ impl Zeta2Inspector { return; }; for ext in prediction - .request - .referenced_declarations + .inputs + .included_files .iter() - .filter_map(|snippet| snippet.path.extension()) - .chain(prediction.request.excerpt_path.extension()) + .filter_map(|file| file.path.extension()) { if !languages.contains_key(ext) { // Most snippets are gonna be the same language, @@ -391,90 +365,6 @@ impl Zeta2Inspector { let json_language = language_registry.language_for_name("Json").await.log_err(); this.update_in(cx, |this, window, cx| { - let context_editor = cx.new(|cx| { - let mut excerpt_score_components = HashMap::default(); - - let multibuffer = cx.new(|cx| { - let mut multibuffer = MultiBuffer::new(language::Capability::ReadOnly); - let excerpt_file = Arc::new(ExcerptMetadataFile { - title: RelPath::unix("Cursor Excerpt").unwrap().into(), - path_style, - worktree_id, - }); - - let excerpt_buffer = cx.new(|cx| { - let mut buffer = - Buffer::local(prediction.request.excerpt.clone(), cx); - if let Some(language) = prediction - .request - .excerpt_path - .extension() - .and_then(|ext| languages.get(ext)) - { - buffer.set_language(language.clone(), cx); - } - buffer.file_updated(excerpt_file, cx); - buffer - }); - - multibuffer.push_excerpts( - excerpt_buffer, - [ExcerptRange::new(text::Anchor::MIN..text::Anchor::MAX)], - cx, - ); - - let mut declarations = - prediction.request.referenced_declarations.clone(); - declarations.sort_unstable_by_key(|declaration| { - Reverse(OrderedFloat(declaration.declaration_score)) - }); - - for snippet in &declarations { - let snippet_file = Arc::new(ExcerptMetadataFile { - title: RelPath::unix(&format!( - "{} (Score: {})", - snippet.path.display(), - snippet.declaration_score - )) - .unwrap() - .into(), - path_style, - worktree_id, - }); - - let excerpt_buffer = cx.new(|cx| { - let mut buffer = Buffer::local(snippet.text.clone(), cx); - buffer.file_updated(snippet_file, cx); - if let Some(ext) = snippet.path.extension() - && let Some(language) = languages.get(ext) - { - buffer.set_language(language.clone(), cx); - } - buffer - }); - - let excerpt_ids = multibuffer.push_excerpts( - excerpt_buffer, - [ExcerptRange::new(text::Anchor::MIN..text::Anchor::MAX)], - cx, - ); - let excerpt_id = excerpt_ids.first().unwrap(); - - excerpt_score_components - .insert(*excerpt_id, snippet.score_components.clone()); - } - - multibuffer - }); - - let mut editor = - Editor::new(EditorMode::full(), multibuffer, None, window, cx); - editor.register_addon(ZetaContextAddon { - excerpt_score_components, - }); - editor - }); - let ZetaEditPredictionDebugInfo { response_rx, position, @@ -606,7 +496,6 @@ impl Zeta2Inspector { let project_snapshot_task = TelemetrySnapshot::new(&this.project, cx); this.last_prediction = Some(LastPrediction { - context_editor, prompt_editor: cx.new(|cx| { let buffer = cx.new(|cx| { let mut buffer = @@ -632,7 +521,7 @@ impl Zeta2Inspector { .foreground_executor() .spawn(async move { Arc::new(project_snapshot_task.await) }) .shared(), - request: prediction.request, + inputs: prediction.inputs, _task: Some(task), }); cx.notify(); @@ -664,9 +553,6 @@ impl Zeta2Inspector { let Some(last_prediction) = self.last_prediction.as_mut() else { return; }; - if !last_prediction.request.can_collect_data { - return; - } let project_snapshot_task = last_prediction.project_snapshot.clone(); @@ -718,7 +604,7 @@ impl Zeta2Inspector { id = request_id, kind = kind, text = text, - request = last_prediction.request, + request = last_prediction.inputs, project_snapshot = project_snapshot, ); }) @@ -727,17 +613,6 @@ impl Zeta2Inspector { .detach(); } - fn focus_feedback(&mut self, window: &mut Window, cx: &mut Context) { - if let Some(last_prediction) = self.last_prediction.as_mut() { - if let LastPredictionState::Success { - feedback_editor, .. - } = &mut last_prediction.state - { - feedback_editor.focus_handle(cx).focus(window); - } - }; - } - fn render_options(&self, window: &mut Window, cx: &mut Context) -> Div { v_flex() .gap_2() @@ -747,11 +622,11 @@ impl Zeta2Inspector { .justify_between() .child( ui::Button::new("reset-options", "Reset") - .disabled(self.zeta.read(cx).options() == &zeta2::DEFAULT_OPTIONS) + .disabled(self.zeta.read(cx).options() == &zeta::DEFAULT_OPTIONS) .style(ButtonStyle::Outlined) .size(ButtonSize::Large) .on_click(cx.listener(|this, _, window, cx| { - this.set_options_state(&zeta2::DEFAULT_OPTIONS, window, cx); + this.set_options_state(&zeta::DEFAULT_OPTIONS, window, cx); })), ), ) @@ -915,42 +790,6 @@ impl Zeta2Inspector { ) } - fn render_tabs(&self, cx: &mut Context) -> Option { - if self.last_prediction.is_none() { - return None; - }; - - Some( - ui::ToggleButtonGroup::single_row( - "prediction", - [ - ui::ToggleButtonSimple::new( - "Context", - cx.listener(|this, _, _, cx| { - this.active_view = ActiveView::Context; - cx.notify(); - }), - ), - ui::ToggleButtonSimple::new( - "Inference", - cx.listener(|this, _, window, cx| { - this.active_view = ActiveView::Inference; - this.focus_feedback(window, cx); - cx.notify(); - }), - ), - ], - ) - .style(ui::ToggleButtonGroupStyle::Outlined) - .selected_index(if self.active_view == ActiveView::Context { - 0 - } else { - 1 - }) - .into_any_element(), - ) - } - fn render_stats(&self) -> Option
{ let Some(prediction) = self.last_prediction.as_ref() else { return None; @@ -970,15 +809,15 @@ impl Zeta2Inspector { ) } - fn render_duration(name: &'static str, time: Option) -> Div { + fn render_duration(name: &'static str, time: Option) -> Div { h_flex() .gap_1() .child(Label::new(name).color(Color::Muted).size(LabelSize::Small)) .child(match time { - Some(time) => Label::new(if time.num_microseconds().unwrap_or(0) >= 1000 { - format!("{} ms", time.num_milliseconds()) + Some(time) => Label::new(if time.as_micros() >= 1000 { + format!("{} ms", time.as_millis()) } else { - format!("{} µs", time.num_microseconds().unwrap_or(0)) + format!("{} µs", time.as_micros()) }) .size(LabelSize::Small), None => Label::new("...").size(LabelSize::Small), @@ -1006,144 +845,135 @@ impl Zeta2Inspector { } fn render_last_prediction(&self, prediction: &LastPrediction, cx: &mut Context) -> Div { - match &self.active_view { - ActiveView::Context => div().size_full().child(prediction.context_editor.clone()), - ActiveView::Inference => h_flex() - .items_start() - .w_full() - .flex_1() - .border_t_1() - .border_color(cx.theme().colors().border) - .bg(cx.theme().colors().editor_background) - .child( - v_flex() - .flex_1() - .gap_2() - .p_4() - .h_full() - .child( - h_flex() - .justify_between() - .child(ui::Headline::new("Prompt").size(ui::HeadlineSize::XSmall)) - .child(match prediction.state { - LastPredictionState::Requested - | LastPredictionState::Failed { .. } => ui::Chip::new("Local") - .bg_color(cx.theme().status().warning_background) - .label_color(Color::Success), - LastPredictionState::Success { .. } => ui::Chip::new("Cloud") - .bg_color(cx.theme().status().success_background) - .label_color(Color::Success), - }), - ) - .child(prediction.prompt_editor.clone()), - ) - .child(ui::vertical_divider()) - .child( - v_flex() - .flex_1() - .gap_2() - .h_full() - .child( + h_flex() + .items_start() + .w_full() + .flex_1() + .border_t_1() + .border_color(cx.theme().colors().border) + .bg(cx.theme().colors().editor_background) + .child( + v_flex() + .flex_1() + .gap_2() + .p_4() + .h_full() + .child( + h_flex() + .justify_between() + .child(ui::Headline::new("Prompt").size(ui::HeadlineSize::XSmall)) + .child(match prediction.state { + LastPredictionState::Requested + | LastPredictionState::Failed { .. } => ui::Chip::new("Local") + .bg_color(cx.theme().status().warning_background) + .label_color(Color::Success), + LastPredictionState::Success { .. } => ui::Chip::new("Cloud") + .bg_color(cx.theme().status().success_background) + .label_color(Color::Success), + }), + ) + .child(prediction.prompt_editor.clone()), + ) + .child(ui::vertical_divider()) + .child( + v_flex() + .flex_1() + .gap_2() + .h_full() + .child( + v_flex() + .flex_1() + .gap_2() + .p_4() + .child( + ui::Headline::new("Model Response").size(ui::HeadlineSize::XSmall), + ) + .child(match &prediction.state { + LastPredictionState::Success { + model_response_editor, + .. + } => model_response_editor.clone().into_any_element(), + LastPredictionState::Requested => v_flex() + .gap_2() + .child(Label::new("Loading...").buffer_font(cx)) + .into_any_element(), + LastPredictionState::Failed { message } => v_flex() + .gap_2() + .max_w_96() + .child(Label::new(message.clone()).buffer_font(cx)) + .into_any_element(), + }), + ) + .child(ui::divider()) + .child( + if let LastPredictionState::Success { + feedback_editor, + feedback: feedback_state, + .. + } = &prediction.state + { v_flex() - .flex_1() + .key_context("Zeta2Feedback") + .on_action(cx.listener(Self::handle_rate_positive)) + .on_action(cx.listener(Self::handle_rate_negative)) .gap_2() - .p_4() + .p_2() + .child(feedback_editor.clone()) .child( - ui::Headline::new("Model Response") - .size(ui::HeadlineSize::XSmall), - ) - .child(match &prediction.state { - LastPredictionState::Success { - model_response_editor, - .. - } => model_response_editor.clone().into_any_element(), - LastPredictionState::Requested => v_flex() - .gap_2() - .child(Label::new("Loading...").buffer_font(cx)) - .into_any_element(), - LastPredictionState::Failed { message } => v_flex() - .gap_2() - .max_w_96() - .child(Label::new(message.clone()).buffer_font(cx)) - .into_any_element(), - }), - ) - .child(ui::divider()) - .child( - if prediction.request.can_collect_data - && let LastPredictionState::Success { - feedback_editor, - feedback: feedback_state, - .. - } = &prediction.state - { - v_flex() - .key_context("Zeta2Feedback") - .on_action(cx.listener(Self::handle_rate_positive)) - .on_action(cx.listener(Self::handle_rate_negative)) - .gap_2() - .p_2() - .child(feedback_editor.clone()) - .child( - h_flex() - .justify_end() - .w_full() - .child( - ButtonLike::new("rate-positive") - .when( - *feedback_state == Some(Feedback::Positive), - |this| this.style(ButtonStyle::Filled), - ) - .child( - KeyBinding::for_action( - &Zeta2RatePredictionPositive, - cx, - ) - .size(TextSize::Small.rems(cx)), - ) - .child(ui::Icon::new(ui::IconName::ThumbsUp)) - .on_click(cx.listener( - |this, _, window, cx| { - this.handle_rate_positive( - &Zeta2RatePredictionPositive, - window, - cx, - ); - }, - )), - ) - .child( - ButtonLike::new("rate-negative") - .when( - *feedback_state == Some(Feedback::Negative), - |this| this.style(ButtonStyle::Filled), + h_flex() + .justify_end() + .w_full() + .child( + ButtonLike::new("rate-positive") + .when( + *feedback_state == Some(Feedback::Positive), + |this| this.style(ButtonStyle::Filled), + ) + .child( + KeyBinding::for_action( + &Zeta2RatePredictionPositive, + cx, ) - .child( - KeyBinding::for_action( - &Zeta2RatePredictionNegative, - cx, - ) - .size(TextSize::Small.rems(cx)), + .size(TextSize::Small.rems(cx)), + ) + .child(ui::Icon::new(ui::IconName::ThumbsUp)) + .on_click(cx.listener(|this, _, window, cx| { + this.handle_rate_positive( + &Zeta2RatePredictionPositive, + window, + cx, + ); + })), + ) + .child( + ButtonLike::new("rate-negative") + .when( + *feedback_state == Some(Feedback::Negative), + |this| this.style(ButtonStyle::Filled), + ) + .child( + KeyBinding::for_action( + &Zeta2RatePredictionNegative, + cx, ) - .child(ui::Icon::new(ui::IconName::ThumbsDown)) - .on_click(cx.listener( - |this, _, window, cx| { - this.handle_rate_negative( - &Zeta2RatePredictionNegative, - window, - cx, - ); - }, - )), - ), - ) - .into_any() - } else { - Empty.into_any_element() - }, - ), - ), - } + .size(TextSize::Small.rems(cx)), + ) + .child(ui::Icon::new(ui::IconName::ThumbsDown)) + .on_click(cx.listener(|this, _, window, cx| { + this.handle_rate_negative( + &Zeta2RatePredictionNegative, + window, + cx, + ); + })), + ), + ) + .into_any() + } else { + Empty.into_any_element() + }, + ), + ) } } @@ -1178,8 +1008,7 @@ impl Render for Zeta2Inspector { .h_full() .justify_between() .child(self.render_options(window, cx)) - .gap_4() - .children(self.render_tabs(cx)), + .gap_4(), ) .child(ui::vertical_divider()) .children(self.render_stats()), @@ -1187,104 +1016,3 @@ impl Render for Zeta2Inspector { .child(self.render_content(window, cx)) } } - -// Using same approach as commit view - -struct ExcerptMetadataFile { - title: Arc, - worktree_id: WorktreeId, - path_style: PathStyle, -} - -impl language::File for ExcerptMetadataFile { - fn as_local(&self) -> Option<&dyn language::LocalFile> { - None - } - - fn disk_state(&self) -> DiskState { - DiskState::New - } - - fn path(&self) -> &Arc { - &self.title - } - - fn full_path(&self, _: &App) -> PathBuf { - self.title.as_std_path().to_path_buf() - } - - fn file_name<'a>(&'a self, _: &'a App) -> &'a str { - self.title.file_name().unwrap() - } - - fn path_style(&self, _: &App) -> PathStyle { - self.path_style - } - - fn worktree_id(&self, _: &App) -> WorktreeId { - self.worktree_id - } - - fn to_proto(&self, _: &App) -> language::proto::File { - unimplemented!() - } - - fn is_private(&self) -> bool { - false - } -} - -struct ZetaContextAddon { - excerpt_score_components: HashMap, -} - -impl editor::Addon for ZetaContextAddon { - fn to_any(&self) -> &dyn std::any::Any { - self - } - - fn render_buffer_header_controls( - &self, - excerpt_info: &multi_buffer::ExcerptInfo, - _window: &Window, - _cx: &App, - ) -> Option { - let score_components = self.excerpt_score_components.get(&excerpt_info.id)?.clone(); - - Some( - div() - .id(excerpt_info.id.to_proto() as usize) - .child(ui::Icon::new(IconName::Info)) - .cursor(CursorStyle::PointingHand) - .tooltip(move |_, cx| { - cx.new(|_| ScoreComponentsTooltip::new(&score_components)) - .into() - }) - .into_any(), - ) - } -} - -struct ScoreComponentsTooltip { - text: SharedString, -} - -impl ScoreComponentsTooltip { - fn new(components: &DeclarationScoreComponents) -> Self { - Self { - text: format!("{:#?}", components).into(), - } - } -} - -impl Render for ScoreComponentsTooltip { - fn render(&mut self, _: &mut Window, cx: &mut Context) -> impl IntoElement { - div().pl_2().pt_2p5().child( - div() - .elevation_2(cx) - .py_1() - .px_2() - .child(ui::Label::new(self.text.clone()).buffer_font(cx)), - ) - } -} diff --git a/crates/zeta_cli/Cargo.toml b/crates/zeta_cli/Cargo.toml index e18cf54787ca98e2be60db4977dd2de18e9c09e2..2dbca537f55377e84f306e13649dfb71ccf2f181 100644 --- a/crates/zeta_cli/Cargo.toml +++ b/crates/zeta_cli/Cargo.toml @@ -53,8 +53,7 @@ terminal_view.workspace = true toml.workspace = true util.workspace = true watch.workspace = true -zeta.workspace = true -zeta2 = { workspace = true, features = ["eval-support"] } +zeta = { workspace = true, features = ["eval-support"] } zlog.workspace = true [dev-dependencies] diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index a9d7acaee2287450eac828bd2d770b88a8150940..a0ebdf998595ccacec2dafecf51b6094e5e401b5 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -9,7 +9,7 @@ use collections::HashSet; use gpui::{AsyncApp, Entity}; use project::Project; use util::ResultExt as _; -use zeta2::{Zeta, udiff::DiffLine}; +use zeta::{Zeta, udiff::DiffLine}; use crate::{ EvaluateArguments, PredictionOptions, diff --git a/crates/zeta_cli/src/example.rs b/crates/zeta_cli/src/example.rs index 67eed23f90dc1a5b48a53a2a7de07f500396ba9f..7dbe304a88b9ea024adab793fa782fd2f4bdf1c0 100644 --- a/crates/zeta_cli/src/example.rs +++ b/crates/zeta_cli/src/example.rs @@ -26,7 +26,7 @@ use project::{Project, ProjectPath}; use pulldown_cmark::CowStr; use serde::{Deserialize, Serialize}; use util::{paths::PathStyle, rel_path::RelPath}; -use zeta2::udiff::OpenedBuffers; +use zeta::udiff::OpenedBuffers; use crate::paths::{REPOS_DIR, WORKTREES_DIR}; @@ -557,7 +557,7 @@ impl NamedExample { project: &Entity, cx: &mut AsyncApp, ) -> Result> { - zeta2::udiff::apply_diff(&self.example.edit_history, project, cx).await + zeta::udiff::apply_diff(&self.example.edit_history, project, cx).await } } diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index 914b141915cd3a89cd35a02bc6c9463094f0de96..f87563cc34ca7631baf8195e42e4e3473f522659 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -31,7 +31,7 @@ use serde_json::json; use std::io::{self}; use std::time::Duration; use std::{collections::HashSet, path::PathBuf, str::FromStr, sync::Arc}; -use zeta2::ContextMode; +use zeta::ContextMode; #[derive(Parser, Debug)] #[command(name = "zeta")] @@ -193,13 +193,14 @@ pub struct EvaluateArguments { #[derive(clap::ValueEnum, Default, Debug, Clone, Copy, PartialEq)] enum PredictionProvider { + Zeta1, #[default] Zeta2, Sweep, } -fn zeta2_args_to_options(args: &Zeta2Args, omit_excerpt_overlaps: bool) -> zeta2::ZetaOptions { - zeta2::ZetaOptions { +fn zeta2_args_to_options(args: &Zeta2Args, omit_excerpt_overlaps: bool) -> zeta::ZetaOptions { + zeta::ZetaOptions { context: ContextMode::Syntax(EditPredictionContextOptions { max_retrieved_declarations: args.max_retrieved_definitions, use_imports: !args.disable_imports_gathering, @@ -397,7 +398,7 @@ async fn zeta2_syntax_context( let output = cx .update(|cx| { let zeta = cx.new(|cx| { - zeta2::Zeta::new(app_state.client.clone(), app_state.user_store.clone(), cx) + zeta::Zeta::new(app_state.client.clone(), app_state.user_store.clone(), cx) }); let indexing_done_task = zeta.update(cx, |zeta, cx| { zeta.set_options(zeta2_args_to_options(&args.zeta2_args, true)); @@ -435,7 +436,7 @@ async fn zeta1_context( args: ContextArgs, app_state: &Arc, cx: &mut AsyncApp, -) -> Result { +) -> Result { let LoadedContext { full_path_str, snapshot, @@ -450,7 +451,7 @@ async fn zeta1_context( let prompt_for_events = move || (events, 0); cx.update(|cx| { - zeta::gather_context( + zeta::zeta1::gather_context( full_path_str, &snapshot, clipped_cursor, diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index c792b318cec6de42e518793ed5400df0010ae5ea..a757a5faa0dbae95c4dcab58c76d50450b1d2e9f 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -21,7 +21,7 @@ use std::path::PathBuf; use std::sync::Arc; use std::sync::Mutex; use std::time::{Duration, Instant}; -use zeta2::{EvalCache, EvalCacheEntryKind, EvalCacheKey, Zeta}; +use zeta::{EvalCache, EvalCacheEntryKind, EvalCacheKey, Zeta}; pub async fn run_predict( args: PredictArguments, @@ -47,12 +47,13 @@ pub fn setup_zeta( cx: &mut AsyncApp, ) -> Result> { let zeta = - cx.new(|cx| zeta2::Zeta::new(app_state.client.clone(), app_state.user_store.clone(), cx))?; + cx.new(|cx| zeta::Zeta::new(app_state.client.clone(), app_state.user_store.clone(), cx))?; zeta.update(cx, |zeta, _cx| { let model = match provider { - PredictionProvider::Zeta2 => zeta2::ZetaEditPredictionModel::ZedCloud, - PredictionProvider::Sweep => zeta2::ZetaEditPredictionModel::Sweep, + PredictionProvider::Zeta1 => zeta::ZetaEditPredictionModel::Zeta1, + PredictionProvider::Zeta2 => zeta::ZetaEditPredictionModel::Zeta2, + PredictionProvider::Sweep => zeta::ZetaEditPredictionModel::Sweep, }; zeta.set_edit_prediction_model(model); })?; @@ -142,25 +143,25 @@ pub async fn perform_predict( let mut search_queries_executed_at = None; while let Some(event) = debug_rx.next().await { match event { - zeta2::ZetaDebugInfo::ContextRetrievalStarted(info) => { + zeta::ZetaDebugInfo::ContextRetrievalStarted(info) => { start_time = Some(info.timestamp); fs::write( example_run_dir.join("search_prompt.md"), &info.search_prompt, )?; } - zeta2::ZetaDebugInfo::SearchQueriesGenerated(info) => { + zeta::ZetaDebugInfo::SearchQueriesGenerated(info) => { search_queries_generated_at = Some(info.timestamp); fs::write( example_run_dir.join("search_queries.json"), serde_json::to_string_pretty(&info.search_queries).unwrap(), )?; } - zeta2::ZetaDebugInfo::SearchQueriesExecuted(info) => { + zeta::ZetaDebugInfo::SearchQueriesExecuted(info) => { search_queries_executed_at = Some(info.timestamp); } - zeta2::ZetaDebugInfo::ContextRetrievalFinished(_info) => {} - zeta2::ZetaDebugInfo::EditPredictionRequested(request) => { + zeta::ZetaDebugInfo::ContextRetrievalFinished(_info) => {} + zeta::ZetaDebugInfo::EditPredictionRequested(request) => { let prediction_started_at = Instant::now(); start_time.get_or_insert(prediction_started_at); let prompt = request.local_prompt.unwrap_or_default(); @@ -170,9 +171,9 @@ pub async fn perform_predict( let mut result = result.lock().unwrap(); result.prompt_len = prompt.chars().count(); - for included_file in request.request.included_files { + for included_file in request.inputs.included_files { let insertions = - vec![(request.request.cursor_point, CURSOR_MARKER)]; + vec![(request.inputs.cursor_point, CURSOR_MARKER)]; result.excerpts.extend(included_file.excerpts.iter().map( |excerpt| ActualExcerpt { path: included_file.path.components().skip(1).collect(), @@ -182,7 +183,7 @@ pub async fn perform_predict( write_codeblock( &included_file.path, included_file.excerpts.iter(), - if included_file.path == request.request.excerpt_path { + if included_file.path == request.inputs.cursor_path { &insertions } else { &[] @@ -196,7 +197,7 @@ pub async fn perform_predict( let response = request.response_rx.await?.0.map_err(|err| anyhow!(err))?; - let response = zeta2::text_from_response(response).unwrap_or_default(); + let response = zeta::text_from_response(response).unwrap_or_default(); let prediction_finished_at = Instant::now(); fs::write(example_run_dir.join("prediction_response.md"), &response)?; @@ -267,20 +268,7 @@ pub async fn perform_predict( let mut result = Arc::into_inner(result).unwrap().into_inner().unwrap(); result.diff = prediction - .map(|prediction| { - let old_text = prediction.snapshot.text(); - let new_text = prediction - .buffer - .update(cx, |buffer, cx| { - let branch = buffer.branch(cx); - branch.update(cx, |branch, cx| { - branch.edit(prediction.edits.iter().cloned(), None, cx); - branch.text() - }) - }) - .unwrap(); - language::unified_diff(&old_text, &new_text) - }) + .and_then(|prediction| prediction.edit_preview.as_unified_diff(&prediction.edits)) .unwrap_or_default(); anyhow::Ok(result) diff --git a/crates/zeta_cli/src/syntax_retrieval_stats.rs b/crates/zeta_cli/src/syntax_retrieval_stats.rs index f2634b1323d92b7136c591627226161b2905a955..4c7506ff78952da79acfeae751959bfe8182b9d4 100644 --- a/crates/zeta_cli/src/syntax_retrieval_stats.rs +++ b/crates/zeta_cli/src/syntax_retrieval_stats.rs @@ -32,7 +32,7 @@ use std::{ time::Duration, }; use util::paths::PathStyle; -use zeta2::ContextMode; +use zeta::ContextMode; use crate::headless::ZetaCliAppState; use crate::source_location::SourceLocation; @@ -44,7 +44,7 @@ pub async fn retrieval_stats( only_extension: Option, file_limit: Option, skip_files: Option, - options: zeta2::ZetaOptions, + options: zeta::ZetaOptions, cx: &mut AsyncApp, ) -> Result { let ContextMode::Syntax(context_options) = options.context.clone() else { From 0e2041dd41a8c7252b53f1ae8772d3a8f1e7e169 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ole=20J=C3=B8rgen=20Br=C3=B8nner?= Date: Tue, 25 Nov 2025 08:21:18 +0100 Subject: [PATCH 0356/1030] multi_buffer: Fix `editor::ExpandExcerpts` failing when cursor is at excerpt start (#42324) The bug is easily verified by: 1. open any multi-buffer 2. place the cursor at the beginning of an excerpt 3. run the editor::ExpandExcerpts / editor: expand excerpts action 4. The excerpt is not expanded Since the `buffer_ids_for_range` function basically did the same and had even been changed the same way earlier I DRYed these functions as well. Note: I'm a rust novice, so keep an extra eye on rust technicalities when reviewing :) --- Release Notes: - Fix editor: expand excerpts failing when cursor is at excerpt start --------- Co-authored-by: Lukas Wirth --- crates/multi_buffer/src/multi_buffer.rs | 32 +++-- crates/multi_buffer/src/multi_buffer_tests.rs | 114 ++++++++++++++++++ 2 files changed, 129 insertions(+), 17 deletions(-) diff --git a/crates/multi_buffer/src/multi_buffer.rs b/crates/multi_buffer/src/multi_buffer.rs index 7ecc09255b17ebbf2e68e21ab4c8d88f93d08d75..93fa26e02936884bc4b9dfd19bdea37455f1fd6e 100644 --- a/crates/multi_buffer/src/multi_buffer.rs +++ b/crates/multi_buffer/src/multi_buffer.rs @@ -3616,40 +3616,38 @@ impl MultiBufferSnapshot { }) } - pub fn excerpt_ids_for_range( + fn excerpts_for_range( &self, range: Range, - ) -> impl Iterator + '_ { + ) -> impl Iterator + '_ { let range = range.start.to_offset(self)..range.end.to_offset(self); let mut cursor = self.cursor::(); cursor.seek(&range.start); std::iter::from_fn(move || { let region = cursor.region()?; - if region.range.start >= range.end { + if region.range.start > range.end + || region.range.start == range.end && region.range.start > range.start + { return None; } cursor.next_excerpt(); - Some(region.excerpt.id) + Some(region.excerpt) }) } + pub fn excerpt_ids_for_range( + &self, + range: Range, + ) -> impl Iterator + '_ { + self.excerpts_for_range(range).map(|excerpt| excerpt.id) + } + pub fn buffer_ids_for_range( &self, range: Range, ) -> impl Iterator + '_ { - let range = range.start.to_offset(self)..range.end.to_offset(self); - let mut cursor = self.cursor::(); - cursor.seek(&range.start); - std::iter::from_fn(move || { - let region = cursor.region()?; - if region.range.start > range.end - || region.range.start == range.end && region.range.start > range.start - { - return None; - } - cursor.next_excerpt(); - Some(region.excerpt.buffer_id) - }) + self.excerpts_for_range(range) + .map(|excerpt| excerpt.buffer_id) } pub fn ranges_to_buffer_ranges( diff --git a/crates/multi_buffer/src/multi_buffer_tests.rs b/crates/multi_buffer/src/multi_buffer_tests.rs index 0151805d065b779569b3a2f8f02157f3ce129295..526c77db85a3efabb0e64b184dbed6fa90097558 100644 --- a/crates/multi_buffer/src/multi_buffer_tests.rs +++ b/crates/multi_buffer/src/multi_buffer_tests.rs @@ -4095,3 +4095,117 @@ fn test_random_chunk_bitmaps_with_diffs(cx: &mut App, mut rng: StdRng) { } } } + +/// Tests `excerpt_containing` and `excerpts_for_range` (functions mapping multi-buffer text-coordinates to excerpts) +#[gpui::test] +fn test_excerpts_containment_functions(cx: &mut App) { + // Multibuffer content for these tests: + // 0123 + // 0: aa0 + // 1: aa1 + // ----- + // 2: bb0 + // 3: bb1 + // -----MultiBufferOffset(0).. + // 4: cc0 + + let buffer_1 = cx.new(|cx| Buffer::local("aa0\naa1", cx)); + let buffer_2 = cx.new(|cx| Buffer::local("bb0\nbb1", cx)); + let buffer_3 = cx.new(|cx| Buffer::local("cc0", cx)); + + let multibuffer = cx.new(|_| MultiBuffer::new(Capability::ReadWrite)); + + let (excerpt_1_id, excerpt_2_id, excerpt_3_id) = multibuffer.update(cx, |multibuffer, cx| { + let excerpt_1_id = multibuffer.push_excerpts( + buffer_1.clone(), + [ExcerptRange::new(Point::new(0, 0)..Point::new(1, 3))], + cx, + )[0]; + + let excerpt_2_id = multibuffer.push_excerpts( + buffer_2.clone(), + [ExcerptRange::new(Point::new(0, 0)..Point::new(1, 3))], + cx, + )[0]; + + let excerpt_3_id = multibuffer.push_excerpts( + buffer_3.clone(), + [ExcerptRange::new(Point::new(0, 0)..Point::new(0, 3))], + cx, + )[0]; + + (excerpt_1_id, excerpt_2_id, excerpt_3_id) + }); + + let snapshot = multibuffer.read(cx).snapshot(cx); + + assert_eq!(snapshot.text(), "aa0\naa1\nbb0\nbb1\ncc0"); + + //// Test `excerpts_for_range` + + let p00 = snapshot.point_to_offset(Point::new(0, 0)); + let p10 = snapshot.point_to_offset(Point::new(1, 0)); + let p20 = snapshot.point_to_offset(Point::new(2, 0)); + let p23 = snapshot.point_to_offset(Point::new(2, 3)); + let p13 = snapshot.point_to_offset(Point::new(1, 3)); + let p40 = snapshot.point_to_offset(Point::new(4, 0)); + let p43 = snapshot.point_to_offset(Point::new(4, 3)); + + let excerpts: Vec<_> = snapshot.excerpts_for_range(p00..p00).collect(); + assert_eq!(excerpts.len(), 1); + assert_eq!(excerpts[0].id, excerpt_1_id); + + // Cursor at very end of excerpt 3 + let excerpts: Vec<_> = snapshot.excerpts_for_range(p43..p43).collect(); + assert_eq!(excerpts.len(), 1); + assert_eq!(excerpts[0].id, excerpt_3_id); + + let excerpts: Vec<_> = snapshot.excerpts_for_range(p00..p23).collect(); + assert_eq!(excerpts.len(), 2); + assert_eq!(excerpts[0].id, excerpt_1_id); + assert_eq!(excerpts[1].id, excerpt_2_id); + + // This range represent an selection with end-point just inside excerpt_2 + // Today we only expand the first excerpt, but another interpretation that + // we could consider is expanding both here + let excerpts: Vec<_> = snapshot.excerpts_for_range(p10..p20).collect(); + assert_eq!(excerpts.len(), 1); + assert_eq!(excerpts[0].id, excerpt_1_id); + + //// Test that `excerpts_for_range` and `excerpt_containing` agree for all single offsets (cursor positions) + for offset in 0..=snapshot.len().0 { + let offset = MultiBufferOffset(offset); + let excerpts_for_range: Vec<_> = snapshot.excerpts_for_range(offset..offset).collect(); + assert_eq!( + excerpts_for_range.len(), + 1, + "Expected exactly one excerpt for offset {offset}", + ); + + let excerpt_containing = snapshot.excerpt_containing(offset..offset); + assert!( + excerpt_containing.is_some(), + "Expected excerpt_containing to find excerpt for offset {offset}", + ); + + assert_eq!( + excerpts_for_range[0].id, + excerpt_containing.unwrap().id(), + "excerpts_for_range and excerpt_containing should agree for offset {offset}", + ); + } + + //// Test `excerpt_containing` behavior with ranges: + + // Ranges intersecting a single-excerpt + let containing = snapshot.excerpt_containing(p00..p13); + assert!(containing.is_some()); + assert_eq!(containing.unwrap().id(), excerpt_1_id); + + // Ranges intersecting multiple excerpts (should return None) + let containing = snapshot.excerpt_containing(p20..p40); + assert!( + containing.is_none(), + "excerpt_containing should return None for ranges spanning multiple excerpts" + ); +} From 303c23cf1e7ce6226f99dd989b9173c3799622a2 Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Tue, 25 Nov 2025 09:34:23 +0200 Subject: [PATCH 0357/1030] Fix first window open not focusing the modals (#43180) Closes https://github.com/zed-industries/zed/issues/4357 Closes https://github.com/zed-industries/zed/issues/41278 Release Notes: - Fixed modals not getting focus on window reopen --------- Co-authored-by: Conrad Irwin --- crates/agent_ui/src/acp/thread_view.rs | 1 - crates/agent_ui/src/agent_panel.rs | 20 +++++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 92765140f5101034a30fc95db675ff335f2cb324..fd0b1eedbdf80d1893760e6182cd2e57d96ef010 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -653,7 +653,6 @@ impl AcpThreadView { mode_selector, _subscriptions: subscriptions, }; - this.message_editor.focus_handle(cx).focus(window); this.profile_selector = this.as_native_thread(cx).map(|thread| { cx.new(|cx| { diff --git a/crates/agent_ui/src/agent_panel.rs b/crates/agent_ui/src/agent_panel.rs index 6ff909389986ec27b998c4554fe2d86115ef1785..22eb11e24a8fd706c80aa65c3dcf5d8ae3876ddc 100644 --- a/crates/agent_ui/src/agent_panel.rs +++ b/crates/agent_ui/src/agent_panel.rs @@ -816,6 +816,7 @@ impl AgentPanel { window, cx, ), + true, window, cx, ); @@ -911,7 +912,12 @@ impl AgentPanel { ) }); - this.set_active_view(ActiveView::ExternalAgentThread { thread_view }, window, cx); + this.set_active_view( + ActiveView::ExternalAgentThread { thread_view }, + !loading, + window, + cx, + ); }) }) .detach_and_log_err(cx); @@ -953,10 +959,10 @@ impl AgentPanel { fn open_history(&mut self, window: &mut Window, cx: &mut Context) { if matches!(self.active_view, ActiveView::History) { if let Some(previous_view) = self.previous_view.take() { - self.set_active_view(previous_view, window, cx); + self.set_active_view(previous_view, true, window, cx); } } else { - self.set_active_view(ActiveView::History, window, cx); + self.set_active_view(ActiveView::History, true, window, cx); } cx.notify(); } @@ -1012,6 +1018,7 @@ impl AgentPanel { window, cx, ), + true, window, cx, ); @@ -1157,7 +1164,7 @@ impl AgentPanel { let context_server_store = self.project.read(cx).context_server_store(); let fs = self.fs.clone(); - self.set_active_view(ActiveView::Configuration, window, cx); + self.set_active_view(ActiveView::Configuration, true, window, cx); self.configuration = Some(cx.new(|cx| { AgentConfiguration::new( fs, @@ -1274,6 +1281,7 @@ impl AgentPanel { fn set_active_view( &mut self, new_view: ActiveView, + focus: bool, window: &mut Window, cx: &mut Context, ) { @@ -1312,7 +1320,9 @@ impl AgentPanel { self.active_view = new_view; } - self.focus_handle(cx).focus(window); + if focus { + self.focus_handle(cx).focus(window); + } } fn populate_recently_opened_menu_section( From e6fe95b4f2f676c7fc4a5f951ba7c721e7d22e8a Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Tue, 25 Nov 2025 10:25:49 +0200 Subject: [PATCH 0358/1030] Only show ssh logs when toggled (#43445) Same as in collab projects. Release Notes: - N/A --- crates/project/src/lsp_store/log_store.rs | 44 +++++++++++--------- crates/remote_server/src/headless_project.rs | 40 +++++++++--------- 2 files changed, 45 insertions(+), 39 deletions(-) diff --git a/crates/project/src/lsp_store/log_store.rs b/crates/project/src/lsp_store/log_store.rs index 00098712bf0092a6795de2ed48c7ccf15925c555..92f8fecadd0236e899ef16781e55405dfe05f282 100644 --- a/crates/project/src/lsp_store/log_store.rs +++ b/crates/project/src/lsp_store/log_store.rs @@ -344,22 +344,7 @@ impl LogStore { enabled, toggled_log_kind, } => { - if let Some(server_state) = - log_store.get_language_server_state(*server_id) - { - if *enabled { - server_state.toggled_log_kind = Some(*toggled_log_kind); - } else { - server_state.toggled_log_kind = None; - } - } - if LogKind::Rpc == *toggled_log_kind { - if *enabled { - log_store.enable_rpc_trace_for_language_server(*server_id); - } else { - log_store.disable_rpc_trace_for_language_server(*server_id); - } - } + log_store.toggle_lsp_logs(*server_id, *enabled, *toggled_log_kind); } _ => {} } @@ -676,7 +661,6 @@ impl LogStore { } fn emit_event(&mut self, e: Event, cx: &mut Context) { - let on_headless_host = self.on_headless_host; match &e { Event::NewServerLogEntry { id, kind, text } => { if let Some(state) = self.get_language_server_state(*id) { @@ -690,9 +674,7 @@ impl LogStore { } .and_then(|lsp_store| lsp_store.read(cx).downstream_client()); if let Some((client, project_id)) = downstream_client { - if on_headless_host - || Some(LogKind::from_server_log_type(kind)) == state.toggled_log_kind - { + if Some(LogKind::from_server_log_type(kind)) == state.toggled_log_kind { client .send(proto::LanguageServerLog { project_id, @@ -709,4 +691,26 @@ impl LogStore { cx.emit(e); } + + pub fn toggle_lsp_logs( + &mut self, + server_id: LanguageServerId, + enabled: bool, + toggled_log_kind: LogKind, + ) { + if let Some(server_state) = self.get_language_server_state(server_id) { + if enabled { + server_state.toggled_log_kind = Some(toggled_log_kind); + } else { + server_state.toggled_log_kind = None; + } + } + if LogKind::Rpc == toggled_log_kind { + if enabled { + self.enable_rpc_trace_for_language_server(server_id); + } else { + self.disable_rpc_trace_for_language_server(server_id); + } + } + } } diff --git a/crates/remote_server/src/headless_project.rs b/crates/remote_server/src/headless_project.rs index 6d64202a038145fc32dc5e5896484e23f03dacef..f5cce907f956d7127aeb272cfef27ecb5f6375a7 100644 --- a/crates/remote_server/src/headless_project.rs +++ b/crates/remote_server/src/headless_project.rs @@ -17,7 +17,7 @@ use project::{ debugger::{breakpoint_store::BreakpointStore, dap_store::DapStore}, git_store::GitStore, image_store::ImageId, - lsp_store::log_store::{self, GlobalLogStore, LanguageServerKind}, + lsp_store::log_store::{self, GlobalLogStore, LanguageServerKind, LogKind}, project_settings::SettingsObserver, search::SearchQuery, task_store::TaskStore, @@ -623,26 +623,28 @@ impl HeadlessProject { async fn handle_toggle_lsp_logs( _: Entity, envelope: TypedEnvelope, - mut cx: AsyncApp, + cx: AsyncApp, ) -> Result<()> { let server_id = LanguageServerId::from_proto(envelope.payload.server_id); - let lsp_logs = cx - .update(|cx| { - cx.try_global::() - .map(|lsp_logs| lsp_logs.0.clone()) - })? - .context("lsp logs store is missing")?; - - lsp_logs.update(&mut cx, |lsp_logs, _| { - // RPC logs are very noisy and we need to toggle it on the headless server too. - // The rest of the logs for the ssh project are very important to have toggled always, - // to e.g. send language server error logs to the client before anything is toggled. - if envelope.payload.enabled { - lsp_logs.enable_rpc_trace_for_language_server(server_id); - } else { - lsp_logs.disable_rpc_trace_for_language_server(server_id); - } - })?; + cx.update(|cx| { + let log_store = cx + .try_global::() + .map(|global_log_store| global_log_store.0.clone()) + .context("lsp logs store is missing")?; + let toggled_log_kind = + match proto::toggle_lsp_logs::LogType::from_i32(envelope.payload.log_type) + .context("invalid log type")? + { + proto::toggle_lsp_logs::LogType::Log => LogKind::Logs, + proto::toggle_lsp_logs::LogType::Trace => LogKind::Trace, + proto::toggle_lsp_logs::LogType::Rpc => LogKind::Rpc, + }; + log_store.update(cx, |log_store, _| { + log_store.toggle_lsp_logs(server_id, envelope.payload.enabled, toggled_log_kind); + }); + anyhow::Ok(()) + })??; + Ok(()) } From c0e85481b09da81c852a7fc3f793a82fe125fe97 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 25 Nov 2025 10:11:43 +0100 Subject: [PATCH 0359/1030] lsp: Fix potential double didClose notification when renaming a file (#43448) Closes #42709 Release Notes: - N/A --- crates/project/src/lsp_store.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index 349bfa9ed00223ea71d4d77dd32bdf433c39c784..f5d931737dff9a873fc5d63e5445b2b5d49bab56 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -2684,10 +2684,15 @@ impl LocalLspStore { cx: &mut App, ) { buffer.update(cx, |buffer, cx| { - let _ = self.buffer_snapshots.remove(&buffer.remote_id()); + let mut snapshots = self.buffer_snapshots.remove(&buffer.remote_id()); for (_, language_server) in self.language_servers_for_buffer(buffer, cx) { - language_server.unregister_buffer(file_url.clone()); + if snapshots + .as_mut() + .is_some_and(|map| map.remove(&language_server.server_id()).is_some()) + { + language_server.unregister_buffer(file_url.clone()); + } } }); } From 5139cc2bfb6380ef0520727f2da57771018529e9 Mon Sep 17 00:00:00 2001 From: AidanV <84053180+AidanV@users.noreply.github.com> Date: Tue, 25 Nov 2025 01:20:01 -0800 Subject: [PATCH 0360/1030] helix: Fix `Vim::NextWordEnd` off-by-one in `HelixSelect` (#43234) Closes #43209 Closes #38121 Starting on the first character. Running `v e` before changes: image Running `v e` after changes: image Change Notes: - Added helix selection sanitation code that directly mirrors the code in the Vim [`visual_motion`](https://github.com/AidanV/zed/blob/b6728c080c5d14ded7002d0276deb5c19d42ed8a/crates/vim/src/visual.rs#L237) method. I kept the comments from the Vim section that explains its purpose. - The above change converted the problem from fixing `v e` to fixing `v w`. Since `w` is treated differently in Helix than in Vim (i.e. `w` in Vim goes to the first character of a word and `w` in Helix goes to the character before a word. Commented [here](https://github.com/AidanV/zed/blob/b6728c080c5d14ded7002d0276deb5c19d42ed8a/crates/vim/src/helix.rs#L132)), the code treats `w` in `HelixSelect` as a motion that differs from the Vim motion in the same way that the function [`helix_move_cursor`](https://github.com/AidanV/zed/blob/b6728c080c5d14ded7002d0276deb5c19d42ed8a/crates/vim/src/helix.rs#L353) separates these behaviors. - Added a regression test Release Notes: - Fixes bug where `Vim::NextWordEnd` in `HelixSelect` would not select whole word. --- crates/vim/src/helix.rs | 185 ++++++++++++++++++++++++++++++---------- 1 file changed, 139 insertions(+), 46 deletions(-) diff --git a/crates/vim/src/helix.rs b/crates/vim/src/helix.rs index 67c99ff6aea249692bddc38d3681be5c491a7437..fae2bda578c6844c33290d059248b895ebde4c3d 100644 --- a/crates/vim/src/helix.rs +++ b/crates/vim/src/helix.rs @@ -109,19 +109,76 @@ impl Vim { }; s.move_with(|map, selection| { - let current_head = selection.head(); - - let Some((new_head, goal)) = motion.move_point( - map, - current_head, - selection.goal, - times, - &text_layout_details, - ) else { - return; + let was_reversed = selection.reversed; + let mut current_head = selection.head(); + + // our motions assume the current character is after the cursor, + // but in (forward) visual mode the current character is just + // before the end of the selection. + + // If the file ends with a newline (which is common) we don't do this. + // so that if you go to the end of such a file you can use "up" to go + // to the previous line and have it work somewhat as expected. + if !selection.reversed + && !selection.is_empty() + && !(selection.end.column() == 0 && selection.end == map.max_point()) + { + current_head = movement::left(map, selection.end) + } + + let (new_head, goal) = match motion { + // Going to next word start is special cased + // since Vim differs from Helix in that motion + // Vim: `w` goes to the first character of a word + // Helix: `w` goes to the character before a word + Motion::NextWordStart { ignore_punctuation } => { + let mut head = movement::right(map, current_head); + let classifier = + map.buffer_snapshot().char_classifier_at(head.to_point(map)); + for _ in 0..times.unwrap_or(1) { + let (_, new_head) = + movement::find_boundary_trail(map, head, |left, right| { + Self::is_boundary_right(ignore_punctuation)( + left, + right, + &classifier, + ) + }); + head = new_head; + } + head = movement::left(map, head); + (head, SelectionGoal::None) + } + _ => motion + .move_point( + map, + current_head, + selection.goal, + times, + &text_layout_details, + ) + .unwrap_or((current_head, selection.goal)), }; selection.set_head(new_head, goal); + + // ensure the current character is included in the selection. + if !selection.reversed { + let next_point = movement::right(map, selection.end); + + if !(next_point.column() == 0 && next_point == map.max_point()) { + selection.end = next_point; + } + } + + // vim always ensures the anchor character stays selected. + // if our selection has reversed, we need to move the opposite end + // to ensure the anchor is still selected. + if was_reversed && !selection.reversed { + selection.start = movement::left(map, selection.start); + } else if !was_reversed && selection.reversed { + selection.end = movement::right(map, selection.end); + } }) }); }); @@ -255,6 +312,30 @@ impl Vim { }); } + fn is_boundary_right( + ignore_punctuation: bool, + ) -> impl FnMut(char, char, &CharClassifier) -> bool { + move |left, right, classifier| { + let left_kind = classifier.kind_with(left, ignore_punctuation); + let right_kind = classifier.kind_with(right, ignore_punctuation); + let at_newline = (left == '\n') ^ (right == '\n'); + + (left_kind != right_kind && right_kind != CharKind::Whitespace) || at_newline + } + } + + fn is_boundary_left( + ignore_punctuation: bool, + ) -> impl FnMut(char, char, &CharClassifier) -> bool { + move |left, right, classifier| { + let left_kind = classifier.kind_with(left, ignore_punctuation); + let right_kind = classifier.kind_with(right, ignore_punctuation); + let at_newline = (left == '\n') ^ (right == '\n'); + + (left_kind != right_kind && left_kind != CharKind::Whitespace) || at_newline + } + } + pub fn helix_move_cursor( &mut self, motion: Motion, @@ -263,6 +344,30 @@ impl Vim { cx: &mut Context, ) { match motion { + Motion::NextWordStart { ignore_punctuation } => self.helix_find_range_forward( + times, + window, + cx, + Self::is_boundary_right(ignore_punctuation), + ), + Motion::NextWordEnd { ignore_punctuation } => self.helix_find_range_forward( + times, + window, + cx, + Self::is_boundary_left(ignore_punctuation), + ), + Motion::PreviousWordStart { ignore_punctuation } => self.helix_find_range_backward( + times, + window, + cx, + Self::is_boundary_left(ignore_punctuation), + ), + Motion::PreviousWordEnd { ignore_punctuation } => self.helix_find_range_backward( + times, + window, + cx, + Self::is_boundary_right(ignore_punctuation), + ), Motion::EndOfLine { .. } => { // In Helix mode, EndOfLine should position cursor ON the last character, // not after it. We therefore need special handling for it. @@ -288,42 +393,6 @@ impl Vim { }); }); } - Motion::NextWordStart { ignore_punctuation } => { - self.helix_find_range_forward(times, window, cx, |left, right, classifier| { - let left_kind = classifier.kind_with(left, ignore_punctuation); - let right_kind = classifier.kind_with(right, ignore_punctuation); - let at_newline = (left == '\n') ^ (right == '\n'); - - (left_kind != right_kind && right_kind != CharKind::Whitespace) || at_newline - }) - } - Motion::NextWordEnd { ignore_punctuation } => { - self.helix_find_range_forward(times, window, cx, |left, right, classifier| { - let left_kind = classifier.kind_with(left, ignore_punctuation); - let right_kind = classifier.kind_with(right, ignore_punctuation); - let at_newline = (left == '\n') ^ (right == '\n'); - - (left_kind != right_kind && left_kind != CharKind::Whitespace) || at_newline - }) - } - Motion::PreviousWordStart { ignore_punctuation } => { - self.helix_find_range_backward(times, window, cx, |left, right, classifier| { - let left_kind = classifier.kind_with(left, ignore_punctuation); - let right_kind = classifier.kind_with(right, ignore_punctuation); - let at_newline = (left == '\n') ^ (right == '\n'); - - (left_kind != right_kind && left_kind != CharKind::Whitespace) || at_newline - }) - } - Motion::PreviousWordEnd { ignore_punctuation } => { - self.helix_find_range_backward(times, window, cx, |left, right, classifier| { - let left_kind = classifier.kind_with(left, ignore_punctuation); - let right_kind = classifier.kind_with(right, ignore_punctuation); - let at_newline = (left == '\n') ^ (right == '\n'); - - (left_kind != right_kind && right_kind != CharKind::Whitespace) || at_newline - }) - } Motion::FindForward { before, char, @@ -1394,6 +1463,30 @@ mod test { cx.assert_state("«one ˇ»two", Mode::HelixNormal); } + #[gpui::test] + async fn test_helix_select_motion(cx: &mut gpui::TestAppContext) { + let mut cx = VimTestContext::new(cx, true).await; + cx.enable_helix(); + + cx.set_state("«ˇ»one two three", Mode::HelixSelect); + cx.simulate_keystrokes("w"); + cx.assert_state("«one ˇ»two three", Mode::HelixSelect); + + cx.set_state("«ˇ»one two three", Mode::HelixSelect); + cx.simulate_keystrokes("e"); + cx.assert_state("«oneˇ» two three", Mode::HelixSelect); + } + + #[gpui::test] + async fn test_helix_full_cursor_selection(cx: &mut gpui::TestAppContext) { + let mut cx = VimTestContext::new(cx, true).await; + cx.enable_helix(); + + cx.set_state("ˇone two three", Mode::HelixNormal); + cx.simulate_keystrokes("l l v h h h"); + cx.assert_state("«ˇone» two three", Mode::HelixSelect); + } + #[gpui::test] async fn test_helix_select_regex(cx: &mut gpui::TestAppContext) { let mut cx = VimTestContext::new(cx, true).await; From 7651854bbd892e2938673b8a5eed0d7f53278103 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Tue, 25 Nov 2025 10:22:00 +0100 Subject: [PATCH 0361/1030] ci: Do not show output of failed tests at the end too (#43449) This reverts #39643, effectively For the record, @SomeoneToIgnore found it quite cumbersome to scroll through logs just to see which tests have failed. I kinda see the argument. At the same time, I wish nextest could do both: it could aggregate logs of failed tests and then print out the summary. Release Notes: - N/A --- .github/workflows/extension_tests.yml | 2 +- .github/workflows/release.yml | 6 +++--- .github/workflows/release_nightly.yml | 2 +- .github/workflows/run_tests.yml | 6 +++--- tooling/xtask/src/tasks/workflows/steps.rs | 5 +---- 5 files changed, 9 insertions(+), 12 deletions(-) diff --git a/.github/workflows/extension_tests.yml b/.github/workflows/extension_tests.yml index 89289fbea20999ada413ef1801bb428f03c82c6b..e579c6739dd3201d37b8029fcbc205f28f9bafd9 100644 --- a/.github/workflows/extension_tests.yml +++ b/.github/workflows/extension_tests.yml @@ -77,7 +77,7 @@ jobs: uses: taiki-e/install-action@nextest - name: steps::cargo_nextest if: inputs.run_tests - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: bash -euxo pipefail {0} timeout-minutes: 3 check_extension: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 90d105880f94ee428f01746ed627f5c6f7d4e246..7afac285b5a34df2aadd04952400809059e12222 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -33,7 +33,7 @@ jobs: run: ./script/clear-target-dir-if-larger-than 300 shell: bash -euxo pipefail {0} - name: steps::cargo_nextest - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: bash -euxo pipefail {0} - name: steps::cleanup_cargo_config if: always() @@ -80,7 +80,7 @@ jobs: run: ./script/clear-target-dir-if-larger-than 250 shell: bash -euxo pipefail {0} - name: steps::cargo_nextest - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: bash -euxo pipefail {0} - name: steps::cleanup_cargo_config if: always() @@ -112,7 +112,7 @@ jobs: run: ./script/clear-target-dir-if-larger-than.ps1 250 shell: pwsh - name: steps::cargo_nextest - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: pwsh - name: steps::cleanup_cargo_config if: always() diff --git a/.github/workflows/release_nightly.yml b/.github/workflows/release_nightly.yml index bb327f2c5527d353c9aad01c3e26edcf5baac78c..d76244175accc3e816cbd7d5dc322d2529a0a236 100644 --- a/.github/workflows/release_nightly.yml +++ b/.github/workflows/release_nightly.yml @@ -51,7 +51,7 @@ jobs: run: ./script/clear-target-dir-if-larger-than.ps1 250 shell: pwsh - name: steps::cargo_nextest - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: pwsh - name: steps::cleanup_cargo_config if: always() diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 323dd7fd1b52eb43400658470ee7d7c986f219fa..8ed11788d44317d93899f629956567228fbb55fe 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -117,7 +117,7 @@ jobs: run: ./script/clear-target-dir-if-larger-than.ps1 250 shell: pwsh - name: steps::cargo_nextest - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: pwsh - name: steps::cleanup_cargo_config if: always() @@ -166,7 +166,7 @@ jobs: run: ./script/clear-target-dir-if-larger-than 250 shell: bash -euxo pipefail {0} - name: steps::cargo_nextest - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: bash -euxo pipefail {0} - name: steps::cleanup_cargo_config if: always() @@ -200,7 +200,7 @@ jobs: run: ./script/clear-target-dir-if-larger-than 300 shell: bash -euxo pipefail {0} - name: steps::cargo_nextest - run: cargo nextest run --workspace --no-fail-fast --failure-output immediate-final + run: cargo nextest run --workspace --no-fail-fast shell: bash -euxo pipefail {0} - name: steps::cleanup_cargo_config if: always() diff --git a/tooling/xtask/src/tasks/workflows/steps.rs b/tooling/xtask/src/tasks/workflows/steps.rs index c5edbdf8439675b4264388001322c12f4f3026e9..910b344cb7319e4f58911b3025632e560553716a 100644 --- a/tooling/xtask/src/tasks/workflows/steps.rs +++ b/tooling/xtask/src/tasks/workflows/steps.rs @@ -53,10 +53,7 @@ pub fn cargo_install_nextest() -> Step { } pub fn cargo_nextest(platform: Platform) -> Step { - named::run( - platform, - "cargo nextest run --workspace --no-fail-fast --failure-output immediate-final", - ) + named::run(platform, "cargo nextest run --workspace --no-fail-fast") } pub fn setup_cargo_config(platform: Platform) -> Step { From a359a5a1f248616f4bece8d082fab0c6fe60cc9c Mon Sep 17 00:00:00 2001 From: David Kleingeld Date: Tue, 25 Nov 2025 13:49:27 +0100 Subject: [PATCH 0362/1030] Add performance doc (#43265) Release Notes: - N/A --- docs/src/SUMMARY.md | 1 + docs/src/performance.md | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) create mode 100644 docs/src/performance.md diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index dc42cfbdbb89d06162016f8ec2548ad630d20bc9..7425e77fb42af3922ca50fbb8fae7cd8f75d9313 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -167,6 +167,7 @@ - [FreeBSD](./development/freebsd.md) - [Local Collaboration](./development/local-collaboration.md) - [Using Debuggers](./development/debuggers.md) + - [Performance](./performance.md) - [Glossary](./development/glossary.md) - [Release Notes](./development/release-notes.md) - [Debugging Crashes](./development/debugging-crashes.md) diff --git a/docs/src/performance.md b/docs/src/performance.md new file mode 100644 index 0000000000000000000000000000000000000000..bcd9da1a59533469758d309cb19b8dec30bab012 --- /dev/null +++ b/docs/src/performance.md @@ -0,0 +1,32 @@ +How to use our internal tools to profile and keep Zed fast. + +# Tracy + +Get a profile of the zed foreground executor + +The profiler always runs in the background. You can save a trace from its UI or look at the results live. + +## Setup/Building the importer: + +- Clone the repo at git@github.com:zed-industries/tracy.git on v0.12.2 branch +- `cd profiler && mkdir build && cd build` +- Run cmake to generate build files: `cmake -G Ninja -DCMAKE_BUILD_TYPE=Release ..` +- Build the importer: `ninja` +- Run the impoter on the trace file: `./tracy-import-miniprofiler /path/to/trace.miniprof /path/to/output.tracy` +- Open the trace in tracy: + - If you're on windows download the v0.12.2 version from the releases on the upstream repo + - If you're on other platforms open it on the website: https://tracy.nereid.pl/ (the version might mismatch so your luck might vary, we need to host our own ideally..) + +## To Save a Trace: + +- Run the action: `zed open performance profiler` +- Hit the save button. This opens a save dialog or if that fails to open the trace gets saved in your working directory. +- Convert the profile so it can be imported in tracy using the importer: `./tracy-import-miniprofiler output.tracy` +- Go to hit the 'power button' in the top left and then open saved trace. +- Now zoom in to see the tasks and how long they took + +# Warn if function is slow + +```rust +let _timer = zlog::time!("my_function_name").warn_if_gt(std::time::Duration::from_millis(100)); +``` From f8965317c394b758106bd83aebb94ddaea0ef00e Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Tue, 25 Nov 2025 14:41:19 +0100 Subject: [PATCH 0363/1030] multi_buffer: Fix up some anchor checks (#43454) Release Notes: - N/A *or* Added/Fixed/Improved ... --- Cargo.lock | 1 + crates/agent_ui/src/inline_assistant.rs | 1 + crates/assistant_text_thread/Cargo.toml | 1 + .../src/assistant_text_thread_tests.rs | 7 ++- .../assistant_text_thread/src/text_thread.rs | 12 +++-- crates/buffer_diff/src/buffer_diff.rs | 1 + crates/editor/src/selections_collection.rs | 23 ++++++++++ crates/multi_buffer/src/anchor.rs | 44 +++++++++++++++---- crates/multi_buffer/src/multi_buffer.rs | 43 +++++++++++++----- crates/multi_buffer/src/multi_buffer_tests.rs | 2 + crates/multi_buffer/src/path_key.rs | 13 ++++-- crates/remote/src/transport/wsl.rs | 25 +++++------ crates/rope/src/rope.rs | 6 +-- crates/text/src/anchor.rs | 20 ++++++++- crates/text/src/text.rs | 13 +++--- 15 files changed, 154 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 93961b4181aa1ad721ba8d740736d86c2ae32ca2..2698d882403b159f8ed350c59cc8e98ab467360d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -884,6 +884,7 @@ dependencies = [ "fuzzy", "gpui", "indoc", + "itertools 0.14.0", "language", "language_model", "log", diff --git a/crates/agent_ui/src/inline_assistant.rs b/crates/agent_ui/src/inline_assistant.rs index f822c79f2589c757173bcd2699ef6abf2ac51027..81242135757561a6c829cc9cabf8893294d9e875 100644 --- a/crates/agent_ui/src/inline_assistant.rs +++ b/crates/agent_ui/src/inline_assistant.rs @@ -1445,6 +1445,7 @@ impl InlineAssistant { multi_buffer.update(cx, |multi_buffer, cx| { multi_buffer.push_excerpts( old_buffer.clone(), + // todo(lw): buffer_start and buffer_end might come from different snapshots! Some(ExcerptRange::new(buffer_start..buffer_end)), cx, ); diff --git a/crates/assistant_text_thread/Cargo.toml b/crates/assistant_text_thread/Cargo.toml index 8dfdfa3828340217456088a246eee5b1568a7a77..7c8fcca3bfa81f6f2de570fa68ecc795cb81b257 100644 --- a/crates/assistant_text_thread/Cargo.toml +++ b/crates/assistant_text_thread/Cargo.toml @@ -29,6 +29,7 @@ fs.workspace = true futures.workspace = true fuzzy.workspace = true gpui.workspace = true +itertools.workspace = true language.workspace = true language_model.workspace = true log.workspace = true diff --git a/crates/assistant_text_thread/src/assistant_text_thread_tests.rs b/crates/assistant_text_thread/src/assistant_text_thread_tests.rs index 75a414dfc4428b3c101a72454bb185b5a171d692..0743641bf5ce33850f28987d834b2e79771cff6f 100644 --- a/crates/assistant_text_thread/src/assistant_text_thread_tests.rs +++ b/crates/assistant_text_thread/src/assistant_text_thread_tests.rs @@ -880,10 +880,9 @@ async fn test_random_context_collaboration(cx: &mut TestAppContext, mut rng: Std let num_sections = rng.random_range(0..=3); let mut section_start = 0; for _ in 0..num_sections { - let mut section_end = rng.random_range(section_start..=output_text.len()); - while !output_text.is_char_boundary(section_end) { - section_end += 1; - } + let section_end = output_text.floor_char_boundary( + rng.random_range(section_start..=output_text.len()), + ); events.push(Ok(SlashCommandEvent::StartSection { icon: IconName::Ai, label: "section".into(), diff --git a/crates/assistant_text_thread/src/text_thread.rs b/crates/assistant_text_thread/src/text_thread.rs index a50e410ab7d1bd1eb34ba367dfbfd36a7b2ec826..2bc4ceec4c243a654abf04b19b4e2ba93a1fef4f 100644 --- a/crates/assistant_text_thread/src/text_thread.rs +++ b/crates/assistant_text_thread/src/text_thread.rs @@ -16,6 +16,7 @@ use gpui::{ App, AppContext as _, Context, Entity, EventEmitter, RenderImage, SharedString, Subscription, Task, }; +use itertools::Itertools as _; use language::{AnchorRangeExt, Bias, Buffer, LanguageRegistry, OffsetRangeExt, Point, ToOffset}; use language_model::{ LanguageModel, LanguageModelCacheConfiguration, LanguageModelCompletionEvent, @@ -1853,14 +1854,17 @@ impl TextThread { } if ensure_trailing_newline - && buffer.contains_str_at(command_range_end, "\n") + && buffer + .chars_at(command_range_end) + .next() + .is_some_and(|c| c == '\n') { - let newline_offset = insert_position.saturating_sub(1); - if buffer.contains_str_at(newline_offset, "\n") + if let Some((prev_char, '\n')) = + buffer.reversed_chars_at(insert_position).next_tuple() && last_section_range.is_none_or(|last_section_range| { !last_section_range .to_offset(buffer) - .contains(&newline_offset) + .contains(&(insert_position - prev_char.len_utf8())) }) { deletions.push((command_range_end..command_range_end + 1, "")); diff --git a/crates/buffer_diff/src/buffer_diff.rs b/crates/buffer_diff/src/buffer_diff.rs index d6ae5545200bb47976554814e346be3039fa276e..52c6463b9bcccd242ef18e5f3dcb518bd335686d 100644 --- a/crates/buffer_diff/src/buffer_diff.rs +++ b/crates/buffer_diff/src/buffer_diff.rs @@ -147,6 +147,7 @@ impl std::fmt::Debug for BufferDiffInner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("BufferDiffSnapshot") .field("hunks", &self.hunks) + .field("remote_id", &self.base_text.remote_id()) .finish() } } diff --git a/crates/editor/src/selections_collection.rs b/crates/editor/src/selections_collection.rs index c1b8d11db94de7394b36ec706f42622993b63785..f8ff9da763403b0946e99a4e39c934ff43ad6634 100644 --- a/crates/editor/src/selections_collection.rs +++ b/crates/editor/src/selections_collection.rs @@ -415,6 +415,29 @@ impl SelectionsCollection { !mutable_collection.disjoint.is_empty() || mutable_collection.pending.is_some(), "There must be at least one selection" ); + if cfg!(debug_assertions) { + mutable_collection.disjoint.iter().for_each(|selection| { + assert!( + snapshot.can_resolve(&selection.start), + "disjoint selection start is not resolvable for the given snapshot:\n{selection:?}", + ); + assert!( + snapshot.can_resolve(&selection.end), + "disjoint selection end is not resolvable for the given snapshot: {selection:?}", + ); + }); + if let Some(pending) = &mutable_collection.pending { + let selection = &pending.selection; + assert!( + snapshot.can_resolve(&selection.start), + "pending selection start is not resolvable for the given snapshot: {pending:?}", + ); + assert!( + snapshot.can_resolve(&selection.end), + "pending selection end is not resolvable for the given snapshot: {pending:?}", + ); + } + } (mutable_collection.selections_changed, result) } diff --git a/crates/multi_buffer/src/anchor.rs b/crates/multi_buffer/src/anchor.rs index 57b5244b3f276265c31f1431701a2bd7d8e59aef..b8c1680574a86354d92f39c544c202642293f619 100644 --- a/crates/multi_buffer/src/anchor.rs +++ b/crates/multi_buffer/src/anchor.rs @@ -9,14 +9,33 @@ use std::{ use sum_tree::Bias; use text::BufferId; -#[derive(Clone, Copy, Eq, PartialEq, Debug, Hash)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] pub struct Anchor { + /// Invariant: If buffer id is `None`, excerpt id must be `ExcerptId::min()` or `ExcerptId::max()`. pub buffer_id: Option, pub excerpt_id: ExcerptId, pub text_anchor: text::Anchor, pub diff_base_anchor: Option, } +impl std::fmt::Debug for Anchor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if *self == Self::min() { + return f.write_str("Anchor::MIN"); + } + if *self == Self::max() { + return f.write_str("Anchor::MAX"); + } + + f.debug_struct("Anchor") + .field("buffer_id", &self.buffer_id) + .field("excerpt_id", &self.excerpt_id) + .field("text_anchor", &self.text_anchor) + .field("diff_base_anchor", &self.diff_base_anchor) + .finish() + } +} + impl Anchor { pub fn with_diff_base_anchor(self, diff_base_anchor: text::Anchor) -> Self { Self { @@ -30,6 +49,10 @@ impl Anchor { buffer_id: BufferId, text_anchor: text::Anchor, ) -> Self { + debug_assert!( + text_anchor.buffer_id.is_none_or(|id| id == buffer_id), + "buffer id does not match the one in the text anchor: {buffer_id:?} {text_anchor:?}", + ); Self { buffer_id: Some(buffer_id), excerpt_id, @@ -77,7 +100,12 @@ impl Anchor { if excerpt_id_cmp.is_ne() { return excerpt_id_cmp; } - if self_excerpt_id == ExcerptId::min() || self_excerpt_id == ExcerptId::max() { + if self_excerpt_id == ExcerptId::max() + && self.text_anchor == text::Anchor::MAX + && self.text_anchor == text::Anchor::MAX + && self.diff_base_anchor.is_none() + && other.diff_base_anchor.is_none() + { return Ordering::Equal; } if let Some(excerpt) = snapshot.excerpt(self_excerpt_id) { @@ -119,8 +147,8 @@ impl Anchor { && let Some(excerpt) = snapshot.excerpt(self.excerpt_id) { return Self { - buffer_id: self.buffer_id, - excerpt_id: self.excerpt_id, + buffer_id: Some(excerpt.buffer_id), + excerpt_id: excerpt.id, text_anchor: self.text_anchor.bias_left(&excerpt.buffer), diff_base_anchor: self.diff_base_anchor.map(|a| { if let Some(base_text) = snapshot @@ -143,8 +171,8 @@ impl Anchor { && let Some(excerpt) = snapshot.excerpt(self.excerpt_id) { return Self { - buffer_id: self.buffer_id, - excerpt_id: self.excerpt_id, + buffer_id: Some(excerpt.buffer_id), + excerpt_id: excerpt.id, text_anchor: self.text_anchor.bias_right(&excerpt.buffer), diff_base_anchor: self.diff_base_anchor.map(|a| { if let Some(base_text) = snapshot @@ -174,8 +202,8 @@ impl Anchor { } pub fn is_valid(&self, snapshot: &MultiBufferSnapshot) -> bool { - if *self == Anchor::min() || *self == Anchor::max() { - true + if *self == Anchor::min() || self.excerpt_id == ExcerptId::max() { + !snapshot.is_empty() } else if let Some(excerpt) = snapshot.excerpt(self.excerpt_id) { (self.text_anchor == excerpt.range.context.start || self.text_anchor == excerpt.range.context.end diff --git a/crates/multi_buffer/src/multi_buffer.rs b/crates/multi_buffer/src/multi_buffer.rs index 93fa26e02936884bc4b9dfd19bdea37455f1fd6e..7922692d30eb3a79e835f5e4b94313c3ea886a7c 100644 --- a/crates/multi_buffer/src/multi_buffer.rs +++ b/crates/multi_buffer/src/multi_buffer.rs @@ -5076,8 +5076,7 @@ impl MultiBufferSnapshot { excerpt_id: ExcerptId, text_anchor: Range, ) -> Option> { - let excerpt_id = self.latest_excerpt_id(excerpt_id); - let excerpt = self.excerpt(excerpt_id)?; + let excerpt = self.excerpt(self.latest_excerpt_id(excerpt_id))?; Some( self.anchor_in_excerpt_(excerpt, text_anchor.start)? @@ -5092,8 +5091,7 @@ impl MultiBufferSnapshot { excerpt_id: ExcerptId, text_anchor: text::Anchor, ) -> Option { - let excerpt_id = self.latest_excerpt_id(excerpt_id); - let excerpt = self.excerpt(excerpt_id)?; + let excerpt = self.excerpt(self.latest_excerpt_id(excerpt_id))?; self.anchor_in_excerpt_(excerpt, text_anchor) } @@ -5130,7 +5128,8 @@ impl MultiBufferSnapshot { } pub fn can_resolve(&self, anchor: &Anchor) -> bool { - if anchor.excerpt_id == ExcerptId::min() || anchor.excerpt_id == ExcerptId::max() { + if *anchor == Anchor::min() || anchor.excerpt_id == ExcerptId::max() { + // todo(lw): should be `!self.is_empty()` true } else if let Some(excerpt) = self.excerpt(anchor.excerpt_id) { excerpt.buffer.can_resolve(&anchor.text_anchor) @@ -5791,8 +5790,8 @@ impl MultiBufferSnapshot { .and_then(|(buffer, _)| buffer.file()) } - pub fn language_at(&self, point: T) -> Option<&Arc> { - self.point_to_buffer_offset(point) + pub fn language_at(&self, offset: T) -> Option<&Arc> { + self.point_to_buffer_offset(offset) .and_then(|(buffer, offset)| buffer.language_at(offset)) } @@ -5992,13 +5991,27 @@ impl MultiBufferSnapshot { theme: Option<&SyntaxTheme>, ) -> Option<(BufferId, Vec>)> { let anchor = self.anchor_before(offset); - let excerpt_id = anchor.excerpt_id; - let excerpt = self.excerpt(excerpt_id)?; - let buffer_id = excerpt.buffer_id; + let excerpt @ &Excerpt { + id: excerpt_id, + buffer_id, + ref buffer, + .. + } = self.excerpt(anchor.excerpt_id)?; + if cfg!(debug_assertions) { + match anchor.buffer_id { + // we clearly are hitting this according to sentry, but in what situations can this occur? + Some(anchor_buffer_id) => { + assert_eq!( + anchor_buffer_id, buffer_id, + "anchor {anchor:?} does not match with resolved excerpt {excerpt:?}" + ) + } + None => assert_eq!(anchor, Anchor::max()), + } + }; Some(( buffer_id, - excerpt - .buffer + buffer .symbols_containing(anchor.text_anchor, theme) .into_iter() .flat_map(|item| { @@ -6114,6 +6127,12 @@ impl MultiBufferSnapshot { } } + /// Returns the excerpt for the given id. The returned excerpt is guaranteed + /// to have the same excerpt id as the one passed in, with the exception of + /// `ExcerptId::max()`. + /// + /// Callers of this function should generally use the resulting excerpt's `id` field + /// afterwards. fn excerpt(&self, excerpt_id: ExcerptId) -> Option<&Excerpt> { let mut cursor = self.excerpts.cursor::>(()); let locator = self.excerpt_locator_for_id(excerpt_id); diff --git a/crates/multi_buffer/src/multi_buffer_tests.rs b/crates/multi_buffer/src/multi_buffer_tests.rs index 526c77db85a3efabb0e64b184dbed6fa90097558..9517f1f76ece2f34aa5c95eb27b408e1ef004b99 100644 --- a/crates/multi_buffer/src/multi_buffer_tests.rs +++ b/crates/multi_buffer/src/multi_buffer_tests.rs @@ -3050,7 +3050,9 @@ async fn test_random_multibuffer(cx: &mut TestAppContext, mut rng: StdRng) { for _ in 0..10 { let end_ix = rng.random_range(0..=text_rope.len()); + let end_ix = text_rope.floor_char_boundary(end_ix); let start_ix = rng.random_range(0..=end_ix); + let start_ix = text_rope.floor_char_boundary(start_ix); assert_eq!( snapshot .bytes_in_range(MultiBufferOffset(start_ix)..MultiBufferOffset(end_ix)) diff --git a/crates/multi_buffer/src/path_key.rs b/crates/multi_buffer/src/path_key.rs index 926ceff202837d13fe14350ee0334cbf4036bd89..530bb4aa6435fb9a3aa768d84a2bbcf829eb72c6 100644 --- a/crates/multi_buffer/src/path_key.rs +++ b/crates/multi_buffer/src/path_key.rs @@ -57,7 +57,7 @@ impl MultiBuffer { let snapshot = self.read(cx); let excerpt = snapshot.excerpt(*excerpt_id)?; Some(Anchor::in_buffer( - *excerpt_id, + excerpt.id, excerpt.buffer_id, excerpt.range.context.start, )) @@ -182,11 +182,16 @@ impl MultiBuffer { }; let ids_to_expand = HashSet::from_iter(ids); + let mut excerpt_id_ = None; let expanded_ranges = excerpt_ids.iter().filter_map(|excerpt_id| { let excerpt = snapshot.excerpt(*excerpt_id)?; + let excerpt_id = excerpt.id; + if excerpt_id_.is_none() { + excerpt_id_ = Some(excerpt_id); + } let mut context = excerpt.range.context.to_point(&excerpt.buffer); - if ids_to_expand.contains(excerpt_id) { + if ids_to_expand.contains(&excerpt_id) { match direction { ExpandExcerptDirection::Up => { context.start.row = context.start.row.saturating_sub(line_count); @@ -222,10 +227,10 @@ impl MultiBuffer { } merged_ranges.push(range) } - let Some(excerpt_id) = excerpt_ids.first() else { + let Some(excerpt_id) = excerpt_id_ else { continue; }; - let Some(buffer_id) = &snapshot.buffer_id_for_excerpt(*excerpt_id) else { + let Some(buffer_id) = &snapshot.buffer_id_for_excerpt(excerpt_id) else { continue; }; diff --git a/crates/remote/src/transport/wsl.rs b/crates/remote/src/transport/wsl.rs index 9fdf14d9fed6e6caf108171e292d4c2f33709ce7..3239f8813159a42a95c607a1b893845d4d5ae3c8 100644 --- a/crates/remote/src/transport/wsl.rs +++ b/crates/remote/src/transport/wsl.rs @@ -141,10 +141,6 @@ impl WslRemoteConnection { windows_path_to_wsl_path_impl(&self.connection_options, source, self.can_exec).await } - fn wsl_command(&self, program: &str, args: &[impl AsRef]) -> process::Command { - wsl_command_impl(&self.connection_options, program, args, self.can_exec) - } - async fn run_wsl_command(&self, program: &str, args: &[&str]) -> Result { run_wsl_command_impl(&self.connection_options, program, args, self.can_exec).await } @@ -345,16 +341,17 @@ impl RemoteConnection for WslRemoteConnection { if reconnect { proxy_args.push("--reconnect".to_owned()); } - let proxy_process = match self - .wsl_command("env", &proxy_args) - .kill_on_drop(true) - .spawn() - { - Ok(process) => process, - Err(error) => { - return Task::ready(Err(anyhow!("failed to spawn remote server: {}", error))); - } - }; + + let proxy_process = + match wsl_command_impl(&self.connection_options, "env", &proxy_args, self.can_exec) + .kill_on_drop(true) + .spawn() + { + Ok(process) => process, + Err(error) => { + return Task::ready(Err(anyhow!("failed to spawn remote server: {}", error))); + } + }; super::handle_rpc_messages_over_child_process_stdio( proxy_process, diff --git a/crates/rope/src/rope.rs b/crates/rope/src/rope.rs index ad39022c0d6181bd5d5f4fdfc1b84ea4a667340d..32894fb84469287fb1474efc57d8180bdee13466 100644 --- a/crates/rope/src/rope.rs +++ b/crates/rope/src/rope.rs @@ -715,10 +715,8 @@ impl<'a> Chunks<'a> { range.start }; let chunk_offset = offset - chunks.start(); - if let Some(chunk) = chunks.item() - && !chunk.text.is_char_boundary(chunk_offset) - { - panic!("byte index {} is not a char boundary", offset); + if let Some(chunk) = chunks.item() { + chunk.assert_char_boundary(chunk_offset); } Self { chunks, diff --git a/crates/text/src/anchor.rs b/crates/text/src/anchor.rs index cf2febdfc505b426fd8d224a2dc29f18d22cd1a8..63a9ff6f1863041594fba7ebea0b3feaba6b8db7 100644 --- a/crates/text/src/anchor.rs +++ b/crates/text/src/anchor.rs @@ -6,7 +6,7 @@ use std::{cmp::Ordering, fmt::Debug, ops::Range}; use sum_tree::{Bias, Dimensions}; /// A timestamped position in a buffer -#[derive(Copy, Clone, Eq, PartialEq, Debug, Hash)] +#[derive(Copy, Clone, Eq, PartialEq, Hash)] pub struct Anchor { pub timestamp: clock::Lamport, /// The byte offset in the buffer @@ -16,6 +16,24 @@ pub struct Anchor { pub buffer_id: Option, } +impl Debug for Anchor { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if *self == Self::MIN { + return f.write_str("Anchor::MIN"); + } + if *self == Self::MAX { + return f.write_str("Anchor::MAX"); + } + + f.debug_struct("Anchor") + .field("timestamp", &self.timestamp) + .field("offset", &self.offset) + .field("bias", &self.bias) + .field("buffer_id", &self.buffer_id) + .finish() + } +} + impl Anchor { pub const MIN: Self = Self { timestamp: clock::Lamport::MIN, diff --git a/crates/text/src/text.rs b/crates/text/src/text.rs index fe9fe26f1bcc89b66753703e03f0a8bfeec628bd..5f87e5441d2bb97863b0086ac273e4d4d8acfdc9 100644 --- a/crates/text/src/text.rs +++ b/crates/text/src/text.rs @@ -2444,7 +2444,9 @@ impl BufferSnapshot { } else if bias == Bias::Right && offset == self.len() { Anchor::MAX } else { - if offset > self.visible_text.len() { + if cfg!(debug_assertions) { + self.visible_text.assert_char_boundary(offset); + } else if offset > self.visible_text.len() { panic!("offset {} is out of bounds", offset) } let (start, _, item) = self.fragments.find::(&None, &offset, bias); @@ -3137,12 +3139,9 @@ impl ToOffset for Point { impl ToOffset for usize { fn to_offset(&self, snapshot: &BufferSnapshot) -> usize { - assert!( - *self <= snapshot.len(), - "offset {} is out of range, snapshot length is {}", - self, - snapshot.len() - ); + if cfg!(debug_assertions) { + snapshot.as_rope().assert_char_boundary(*self); + } *self } } From 1cbb49864c32feae332c082f7f10aa0ab17750bc Mon Sep 17 00:00:00 2001 From: David Kleingeld Date: Tue, 25 Nov 2025 16:01:38 +0100 Subject: [PATCH 0364/1030] document how to do flamecharts in an easy way (#43461) Release Notes: - N/A --- docs/src/performance.md | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/docs/src/performance.md b/docs/src/performance.md index bcd9da1a59533469758d309cb19b8dec30bab012..9dff1d7f5ff0961d33169ee5c8761016d8fb7564 100644 --- a/docs/src/performance.md +++ b/docs/src/performance.md @@ -1,10 +1,19 @@ How to use our internal tools to profile and keep Zed fast. -# Tracy +# Flamechart/CPU profiling -Get a profile of the zed foreground executor +See what the CPU spends the most time on. Strongly recommend you use +[samply](https://github.com/mstange/samply). It opens an interactive profile in +the browser. See its README on how to install and run. -The profiler always runs in the background. You can save a trace from its UI or look at the results live. +# Task/Async profiling + +Get a profile of the zed foreground executor and background executors. Check if +anything is blocking the foreground too long or taking too much (clock) time in +the background. + +The profiler always runs in the background. You can save a trace from its UI or +look at the results live. ## Setup/Building the importer: From f58de2106885b121aaae6473d8e3511dcb65eabc Mon Sep 17 00:00:00 2001 From: Jason Lee Date: Tue, 25 Nov 2025 23:08:49 +0800 Subject: [PATCH 0365/1030] miniprofiler_ui: Improve MiniProfiler to use uniform list (#43457) Release Notes: - N/A --- - Apply uniform_list for timing list for performance. - Add paddings for window. - Add space to `ms`, before: `100ms` after `100 ms`. ## Before image ## After image --- crates/gpui/src/elements/uniform_list.rs | 7 +- crates/miniprofiler_ui/src/miniprofiler_ui.rs | 144 ++++++++++-------- 2 files changed, 84 insertions(+), 67 deletions(-) diff --git a/crates/gpui/src/elements/uniform_list.rs b/crates/gpui/src/elements/uniform_list.rs index ba002a67f3c614e614dd591d795f839e7f1ea73d..72843ea6330aaa24d9e1d6bf34d024cdeb54ad4a 100644 --- a/crates/gpui/src/elements/uniform_list.rs +++ b/crates/gpui/src/elements/uniform_list.rs @@ -11,7 +11,7 @@ use crate::{ StyleRefinement, Styled, Window, point, size, }; use smallvec::SmallVec; -use std::{cell::RefCell, cmp, ops::Range, rc::Rc}; +use std::{cell::RefCell, cmp, ops::Range, rc::Rc, usize}; use super::ListHorizontalSizingBehavior; @@ -235,6 +235,11 @@ impl UniformListScrollHandle { false } } + + /// Scroll to the bottom of the list. + pub fn scroll_to_bottom(&self) { + self.scroll_to_item(usize::MAX, ScrollStrategy::Bottom); + } } impl Styled for UniformList { diff --git a/crates/miniprofiler_ui/src/miniprofiler_ui.rs b/crates/miniprofiler_ui/src/miniprofiler_ui.rs index 5fb80b6307ba3b93b3a9c5def7b8da620fdd738c..93ccfc559c6eedc5e1be1c3ca68355aeba878a76 100644 --- a/crates/miniprofiler_ui/src/miniprofiler_ui.rs +++ b/crates/miniprofiler_ui/src/miniprofiler_ui.rs @@ -1,21 +1,22 @@ use std::{ ops::Range, path::PathBuf, + rc::Rc, time::{Duration, Instant}, }; use gpui::{ - App, AppContext, ClipboardItem, Context, Entity, Hsla, InteractiveElement, IntoElement, - ParentElement, Render, ScrollHandle, SerializedTaskTiming, SharedString, - StatefulInteractiveElement, Styled, Task, TaskTiming, TitlebarOptions, WindowBounds, - WindowHandle, WindowOptions, div, prelude::FluentBuilder, px, relative, size, + App, AppContext, ClipboardItem, Context, Div, Entity, Hsla, InteractiveElement, + ParentElement as _, Render, SerializedTaskTiming, SharedString, StatefulInteractiveElement, + Styled, Task, TaskTiming, TitlebarOptions, UniformListScrollHandle, WindowBounds, WindowHandle, + WindowOptions, div, prelude::FluentBuilder, px, relative, size, uniform_list, }; use util::ResultExt; use workspace::{ Workspace, ui::{ - ActiveTheme, Button, ButtonCommon, ButtonStyle, Checkbox, Clickable, ToggleState, Tooltip, - WithScrollbar, h_flex, v_flex, + ActiveTheme, Button, ButtonCommon, ButtonStyle, Checkbox, Clickable, Divider, + ScrollableHandle as _, ToggleState, Tooltip, WithScrollbar, h_flex, v_flex, }, }; use zed_actions::OpenPerformanceProfiler; @@ -95,7 +96,7 @@ pub struct ProfilerWindow { data: DataMode, include_self_timings: ToggleState, autoscroll: bool, - scroll_handle: ScrollHandle, + scroll_handle: UniformListScrollHandle, workspace: Option>, _refresh: Option>, } @@ -111,7 +112,7 @@ impl ProfilerWindow { data: DataMode::Realtime(None), include_self_timings: ToggleState::Unselected, autoscroll: true, - scroll_handle: ScrollHandle::new(), + scroll_handle: UniformListScrollHandle::default(), workspace: workspace_handle, _refresh: Some(Self::begin_listen(cx)), }); @@ -128,16 +129,7 @@ impl ProfilerWindow { .get_current_thread_timings(); this.update(cx, |this: &mut ProfilerWindow, cx| { - let scroll_offset = this.scroll_handle.offset(); - let max_offset = this.scroll_handle.max_offset(); - this.autoscroll = -scroll_offset.y >= (max_offset.height - px(5.0)); - this.data = DataMode::Realtime(Some(data)); - - if this.autoscroll { - this.scroll_handle.scroll_to_bottom(); - } - cx.notify(); }) .ok(); @@ -157,12 +149,7 @@ impl ProfilerWindow { } } - fn render_timing( - &self, - value_range: Range, - item: TimingBar, - cx: &App, - ) -> impl IntoElement { + fn render_timing(value_range: Range, item: TimingBar, cx: &App) -> Div { let time_ms = item.end.duration_since(item.start).as_secs_f32() * 1000f32; let remap = value_range @@ -227,10 +214,10 @@ impl ProfilerWindow { ) .child( div() - .min_w(px(60.0)) + .min_w(px(70.)) .flex_shrink_0() .text_right() - .child(format!("{:.1}ms", time_ms)), + .child(format!("{:.1} ms", time_ms)), ) } } @@ -241,15 +228,23 @@ impl Render for ProfilerWindow { window: &mut gpui::Window, cx: &mut gpui::Context, ) -> impl gpui::IntoElement { + let scroll_offset = self.scroll_handle.offset(); + let max_offset = self.scroll_handle.max_offset(); + self.autoscroll = -scroll_offset.y >= (max_offset.height - px(24.)); + if self.autoscroll { + self.scroll_handle.scroll_to_bottom(); + } + v_flex() .id("profiler") .w_full() .h_full() - .gap_2() .bg(cx.theme().colors().surface_background) .text_color(cx.theme().colors().text) .child( h_flex() + .py_2() + .px_4() .w_full() .justify_between() .child( @@ -346,53 +341,70 @@ impl Render for ProfilerWindow { let min = e[0].start; let max = e[e.len() - 1].end.unwrap_or_else(|| Instant::now()); - div.child( + let timings = Rc::new( + e.into_iter() + .filter(|timing| { + timing + .end + .unwrap_or_else(|| Instant::now()) + .duration_since(timing.start) + .as_millis() + >= 1 + }) + .filter(|timing| { + if self.include_self_timings.selected() { + true + } else { + !timing.location.file().ends_with("miniprofiler_ui.rs") + } + }) + .cloned() + .collect::>(), + ); + + div.child(Divider::horizontal()).child( v_flex() .id("timings.bars") - .overflow_scroll() .w_full() .h_full() .gap_2() - .track_scroll(&self.scroll_handle) - .on_scroll_wheel(cx.listener(|this, _, _, _cx| { - let scroll_offset = this.scroll_handle.offset(); - let max_offset = this.scroll_handle.max_offset(); - this.autoscroll = -scroll_offset.y >= (max_offset.height - px(5.0)); - })) - .children( - e.iter() - .filter(|timing| { - timing - .end - .unwrap_or_else(|| Instant::now()) - .duration_since(timing.start) - .as_millis() - >= 1 - }) - .filter(|timing| { - if self.include_self_timings.selected() { - true - } else { - !timing.location.file().ends_with("miniprofiler_ui.rs") + .child( + uniform_list("list", timings.len(), { + let timings = timings.clone(); + move |visible_range, _, cx| { + let mut items = vec![]; + for i in visible_range { + let timing = &timings[i]; + let value_range = + max.checked_sub(Duration::from_secs(10)).unwrap_or(min) + ..max; + items.push(Self::render_timing( + value_range, + TimingBar { + location: timing.location, + start: timing.start, + end: timing.end.unwrap_or_else(|| Instant::now()), + color: cx + .theme() + .accents() + .color_for_index(i as u32), + }, + cx, + )); } - }) - .enumerate() - .map(|(i, timing)| { - self.render_timing( - max.checked_sub(Duration::from_secs(10)).unwrap_or(min) - ..max, - TimingBar { - location: timing.location, - start: timing.start, - end: timing.end.unwrap_or_else(|| Instant::now()), - color: cx.theme().accents().color_for_index(i as u32), - }, - cx, - ) - }), - ), + items + } + }) + .p_4() + .on_scroll_wheel(cx.listener(|this, _, _, cx| { + this.autoscroll = false; + cx.notify(); + })) + .track_scroll(self.scroll_handle.clone()) + .size_full(), + ) + .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx), ) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx) }) } } From 9cae39449a69a7dae284c98d5bd0b55657b1cfc0 Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Tue, 25 Nov 2025 10:53:47 -0500 Subject: [PATCH 0366/1030] Restructure collaboration docs (#43464) Overview - Channels - Private calls --- Up next would be to - [ ] Update any zed.dev links to point to items in this structure - [ ] Update content in these docs (would prefer to do that in a separate PR from this one) Release Notes: - N/A --- docs/src/SUMMARY.md | 5 +++-- docs/src/authentication.md | 2 +- docs/src/{ => collaboration}/channels.md | 4 +--- docs/src/collaboration/overview.md | 17 +++++++++++++++++ .../private-calls.md} | 8 ++------ docs/src/toolchains.md | 2 +- 6 files changed, 25 insertions(+), 13 deletions(-) rename docs/src/{ => collaboration}/channels.md (92%) create mode 100644 docs/src/collaboration/overview.md rename docs/src/{collaboration.md => collaboration/private-calls.md} (91%) diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 7425e77fb42af3922ca50fbb8fae7cd8f75d9313..2f8bcd2ce8be00790866025d5de687d32aee7dcf 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -34,8 +34,9 @@ - [Command-line Interface](./command-line-interface.md) - [Outline Panel](./outline-panel.md) - [Code Completions](./completions.md) -- [Channels](./channels.md) -- [Collaboration](./collaboration.md) +- [Collaboration](./collaboration/overview.md) + - [Channels](./collaboration/channels.md) + - [Private Calls](./collaboration/private-calls.md) - [Git](./git.md) - [Debugger](./debugger.md) - [Diagnostics](./diagnostics.md) diff --git a/docs/src/authentication.md b/docs/src/authentication.md index 6d05567e3198ed5180b65dc0fb5f470baa679f9e..0ea97040a0ae2023143beb5a83d15cd9e28c9786 100644 --- a/docs/src/authentication.md +++ b/docs/src/authentication.md @@ -4,7 +4,7 @@ Signing in to Zed is not required. You can use most features you'd expect in a c ## What Features Require Signing In? -1. All real-time [collaboration features](./collaboration.md). +1. All real-time [collaboration features](./collaboration/overview.md). 2. [LLM-powered features](./ai/overview.md), if you are using Zed as the provider of your LLM models. To use AI without signing in, you can [bring and configure your own API keys](./ai/llm-providers.md#use-your-own-keys). ## Signing In diff --git a/docs/src/channels.md b/docs/src/collaboration/channels.md similarity index 92% rename from docs/src/channels.md rename to docs/src/collaboration/channels.md index afd97cdabc51f8c54ffd3f85d02c7aa0764d2f8b..bc723d73dedf16d2f75179f9203cdbf473bebbbb 100644 --- a/docs/src/channels.md +++ b/docs/src/collaboration/channels.md @@ -1,7 +1,5 @@ # Channels -At Zed we believe that great things are built by great people working together. We have designed Zed to help every individual work faster and to help teams of people work together more effectively. - ## Overview Channels provide a way to streamline collaborating for software engineers in many ways, but particularly: @@ -27,7 +25,7 @@ After joining a channel, you can `Share` a project with the other people there. When you are editing someone else’s project, you still have the full power of the editor at your fingertips, you can jump to definitions, use the AI assistant, and see any diagnostic errors. This is extremely powerful for pairing, as one of you can be implementing the current method while the other is reading and researching the correct solution to the next problem. And, because you have your own config running, it feels like you’re using your own machine. -See [our collaboration documentation](./collaboration.md) for more details about how this works. +See [our collaboration documentation](./private-calls.md) for more details about how this works. ## Notes diff --git a/docs/src/collaboration/overview.md b/docs/src/collaboration/overview.md new file mode 100644 index 0000000000000000000000000000000000000000..8acbecc372cecee7fb87d40685b3a08eb6e046f6 --- /dev/null +++ b/docs/src/collaboration/overview.md @@ -0,0 +1,17 @@ +# Collaboration + +At Zed, we believe that great things are built by great people working together. +We have designed Zed to help individuals work faster and help teams of people work together more effectively. +Zed has two mechanisms for collaborating: + +1. [Channels](./channels.md): Ongoing project rooms where team members can share projects, collaborate on code, and maintain ambient awareness of what everyone is working on. +1. [Private Calls](./private-calls.md): Ad-hoc private collaboration with those in your contacts list. + +You will need to [sign in](../authentication.md#signing-in) in order to begin using Zed's collaboration features. + +--- + +> Note: Only collaborate with people that you trust. +> Since sharing a project gives them access to your local file system, you should not share projects with people you do not trust; they could potentially do some nasty things. +> +> In the future, we will do more to prevent this type of access beyond the shared project and add more control over what collaborators can do, but for now, only collaborate with people you trust. diff --git a/docs/src/collaboration.md b/docs/src/collaboration/private-calls.md similarity index 91% rename from docs/src/collaboration.md rename to docs/src/collaboration/private-calls.md index 8992c7d6ca0185e08ac3923359b0dee9a2fadbfe..8ea4790688f055074c5afcf4eb1d6d63ee49d868 100644 --- a/docs/src/collaboration.md +++ b/docs/src/collaboration/private-calls.md @@ -1,8 +1,4 @@ -# Collaboration - -Only collaborate with people that you trust. Since sharing a project gives them access to your local file system, you should not share projects with people you do not trust; they could potentially do some nasty things. - -In the future, we will do more to prevent this type of access beyond the shared project and add more control over what collaborators can do, but for now, only collaborate with people you trust. +# Private Calls ## Adding a collaborator to a call @@ -28,7 +24,7 @@ If someone you want to collaborate with has not yet signed up for Zed, they will ### Voice chat -When joining a call, Zed will automatically share your microphone with other users in the call, if your OS allows it. This isn't tied to your project. You can disable this for your client via the [`mute_on_join`](./configuring-zed.md#calls) setting. +When joining a call, Zed will automatically share your microphone with other users in the call, if your OS allows it. This isn't tied to your project. You can disable this for your client via the [`mute_on_join`](../configuring-zed.md#calls) setting. ## Collaborating on a project diff --git a/docs/src/toolchains.md b/docs/src/toolchains.md index 68e7baa8cf225d85862eadb0ef02674f84b59fd2..f9f5f3fe0e8164b0580786795df0b286a2a7760a 100644 --- a/docs/src/toolchains.md +++ b/docs/src/toolchains.md @@ -8,7 +8,7 @@ With toolchain selector, you don't need to spend time configuring your language You can even select different toolchains for different subprojects within your Zed project. A definition of a subproject is language-specific. In collaborative scenarios, only the project owner can see and modify an active toolchain. -In [remote projects](./remote-development.md), you can use the toolchain selector to control the active toolchain on the SSH host. When [sharing your project](./collaboration.md), the toolchain selector is not available to guests. +In [remote projects](./remote-development.md), you can use the toolchain selector to control the active toolchain on the SSH host. When [sharing your project](./collaboration/overview.md), the toolchain selector is not available to guests. ## Why do we need toolchains? From ab80ef18458417ac010f5b6de9c51be3bca9fac3 Mon Sep 17 00:00:00 2001 From: Bennet Bo Fenner Date: Tue, 25 Nov 2025 17:03:21 +0100 Subject: [PATCH 0367/1030] mcp: Fix `source` property showing up as undefined in settings (#43417) Follow up to #39021. image - Add migration to remove `source` tag because `ContextServerSettings` is now untagged - Fix typos in context server modal - PR seems to have removed the `test_action_namespaces` test, which I brought back in this PR Release Notes: - Fixed an issue where the `source` property of MCP settings would show up as unrecognised --- crates/agent/src/tests/mod.rs | 2 +- .../configure_context_server_modal.rs | 11 +- .../src/wasm_host/wit/since_v0_6_0.rs | 2 +- crates/migrator/src/migrations.rs | 6 + .../src/migrations/m_2025_11_25/settings.rs | 17 +++ crates/migrator/src/migrator.rs | 55 +++++++- crates/project/src/context_server_store.rs | 14 +- crates/project/src/project_settings.rs | 38 +++--- .../settings/src/settings_content/project.rs | 4 +- crates/settings/src/vscode_import.rs | 2 +- crates/zed/src/zed.rs | 127 ++++++++++++++++++ docs/src/ai/mcp.md | 4 +- 12 files changed, 237 insertions(+), 45 deletions(-) create mode 100644 crates/migrator/src/migrations/m_2025_11_25/settings.rs diff --git a/crates/agent/src/tests/mod.rs b/crates/agent/src/tests/mod.rs index efba471f1a927446aa96b1c1426c60b42b725b89..b33080671980eb28c7900aea4bb0942d152a054a 100644 --- a/crates/agent/src/tests/mod.rs +++ b/crates/agent/src/tests/mod.rs @@ -2553,7 +2553,7 @@ fn setup_context_server( let mut settings = ProjectSettings::get_global(cx).clone(); settings.context_servers.insert( name.into(), - project::project_settings::ContextServerSettings::Custom { + project::project_settings::ContextServerSettings::Stdio { enabled: true, command: ContextServerCommand { path: "somebinary".into(), diff --git a/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs b/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs index ebea8c25fb68a8a5055d4ccaa8b9068583c4b91c..a93df3839d98d95e2f91833078dbe96bc3fb8889 100644 --- a/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs +++ b/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs @@ -182,7 +182,7 @@ impl ConfigurationSource { parse_input(&editor.read(cx).text(cx)).map(|(id, command)| { ( id, - ContextServerSettings::Custom { + ContextServerSettings::Stdio { enabled: true, command, }, @@ -403,7 +403,7 @@ impl ConfigureContextServerModal { window.spawn(cx, async move |cx| { let target = match settings { - ContextServerSettings::Custom { + ContextServerSettings::Stdio { enabled: _, command, } => Some(ConfigurationTarget::Existing { @@ -635,7 +635,6 @@ impl ConfigureContextServerModal { } fn render_modal_content(&self, cx: &App) -> AnyElement { - // All variants now use single editor approach let editor = match &self.source { ConfigurationSource::New { editor, .. } => editor, ConfigurationSource::Existing { editor, .. } => editor, @@ -712,12 +711,12 @@ impl ConfigureContextServerModal { ) } else if let ConfigurationSource::New { is_http, .. } = &self.source { let label = if *is_http { - "Run command" + "Configure Local" } else { - "Connect via HTTP" + "Configure Remote" }; let tooltip = if *is_http { - "Configure an MCP serevr that runs on stdin/stdout." + "Configure an MCP server that runs on stdin/stdout." } else { "Configure an MCP server that you connect to over HTTP" }; diff --git a/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs b/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs index dd0548d9d182e4b81e8490476eef2420f0e6c13d..c96e5216c4703df2a73e1a0bc27c90d13adbb782 100644 --- a/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs +++ b/crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs @@ -972,7 +972,7 @@ impl ExtensionImports for WasmState { }); match settings { - project::project_settings::ContextServerSettings::Custom { + project::project_settings::ContextServerSettings::Stdio { enabled: _, command, } => Ok(serde_json::to_string(&settings::ContextServerSettings { diff --git a/crates/migrator/src/migrations.rs b/crates/migrator/src/migrations.rs index 8a481c734f9efcce4f6342789df6ff1d7fc01562..07b7d3f0afb141d4dde77b883ca97f4df67cdd6c 100644 --- a/crates/migrator/src/migrations.rs +++ b/crates/migrator/src/migrations.rs @@ -147,3 +147,9 @@ pub(crate) mod m_2025_11_20 { pub(crate) use settings::SETTINGS_PATTERNS; } + +pub(crate) mod m_2025_11_25 { + mod settings; + + pub(crate) use settings::remove_context_server_source; +} diff --git a/crates/migrator/src/migrations/m_2025_11_25/settings.rs b/crates/migrator/src/migrations/m_2025_11_25/settings.rs new file mode 100644 index 0000000000000000000000000000000000000000..944eee8a119714b7d9839e2ddf13ec61db4c18d2 --- /dev/null +++ b/crates/migrator/src/migrations/m_2025_11_25/settings.rs @@ -0,0 +1,17 @@ +use anyhow::Result; +use serde_json::Value; + +pub fn remove_context_server_source(settings: &mut Value) -> Result<()> { + if let Some(obj) = settings.as_object_mut() { + if let Some(context_servers) = obj.get_mut("context_servers") { + if let Some(servers) = context_servers.as_object_mut() { + for (_, server) in servers.iter_mut() { + if let Some(server_obj) = server.as_object_mut() { + server_obj.remove("source"); + } + } + } + } + } + Ok(()) +} diff --git a/crates/migrator/src/migrator.rs b/crates/migrator/src/migrator.rs index fd30bf24982d2625e4f40669aa2e0142b8634186..444ebadfb615628e91422ed62c351722d8cb9300 100644 --- a/crates/migrator/src/migrator.rs +++ b/crates/migrator/src/migrator.rs @@ -223,6 +223,7 @@ pub fn migrate_settings(text: &str) -> Result> { migrations::m_2025_11_20::SETTINGS_PATTERNS, &SETTINGS_QUERY_2025_11_20, ), + MigrationType::Json(migrations::m_2025_11_25::remove_context_server_source), ]; run_migrations(text, migrations) } @@ -1334,7 +1335,6 @@ mod tests { r#"{ "context_servers": { "some-mcp-server": { - "source": "custom", "command": { "path": "npx", "args": [ @@ -1354,7 +1354,6 @@ mod tests { r#"{ "context_servers": { "some-mcp-server": { - "source": "custom", "command": "npx", "args": [ "-y", @@ -1376,7 +1375,6 @@ mod tests { r#"{ "context_servers": { "server-with-extras": { - "source": "custom", "command": { "path": "/usr/bin/node", "args": ["server.js"] @@ -1389,7 +1387,6 @@ mod tests { r#"{ "context_servers": { "server-with-extras": { - "source": "custom", "command": "/usr/bin/node", "args": ["server.js"], "settings": {} @@ -1404,7 +1401,6 @@ mod tests { r#"{ "context_servers": { "simple-server": { - "source": "custom", "command": { "path": "simple-mcp-server" } @@ -1415,7 +1411,6 @@ mod tests { r#"{ "context_servers": { "simple-server": { - "source": "custom", "command": "simple-mcp-server" } } @@ -2311,4 +2306,52 @@ mod tests { ), ); } + + #[test] + fn test_remove_context_server_source() { + assert_migrate_settings( + &r#" + { + "context_servers": { + "extension_server": { + "source": "extension", + "settings": { + "foo": "bar" + } + }, + "custom_server": { + "source": "custom", + "command": "foo", + "args": ["bar"], + "env": { + "FOO": "BAR" + } + }, + } + } + "# + .unindent(), + Some( + &r#" + { + "context_servers": { + "extension_server": { + "settings": { + "foo": "bar" + } + }, + "custom_server": { + "command": "foo", + "args": ["bar"], + "env": { + "FOO": "BAR" + } + }, + } + } + "# + .unindent(), + ), + ); + } } diff --git a/crates/project/src/context_server_store.rs b/crates/project/src/context_server_store.rs index efc2bbf686a273fe18ca3a34f071176d07532981..342a59ab7d5530e8f2268f1c4b72ea44f302f807 100644 --- a/crates/project/src/context_server_store.rs +++ b/crates/project/src/context_server_store.rs @@ -122,7 +122,7 @@ impl ContextServerConfiguration { cx: &AsyncApp, ) -> Option { match settings { - ContextServerSettings::Custom { + ContextServerSettings::Stdio { enabled: _, command, } => Some(ContextServerConfiguration::Custom { command }), @@ -1003,7 +1003,7 @@ mod tests { ), ( server_2_id.0.clone(), - settings::ContextServerSettingsContent::Custom { + settings::ContextServerSettingsContent::Stdio { enabled: true, command: ContextServerCommand { path: "somebinary".into(), @@ -1044,7 +1044,7 @@ mod tests { ), ( server_2_id.0.clone(), - settings::ContextServerSettingsContent::Custom { + settings::ContextServerSettingsContent::Stdio { enabled: true, command: ContextServerCommand { path: "somebinary".into(), @@ -1127,7 +1127,7 @@ mod tests { json!({"code.rs": ""}), vec![( SERVER_1_ID.into(), - ContextServerSettings::Custom { + ContextServerSettings::Stdio { enabled: true, command: ContextServerCommand { path: "somebinary".into(), @@ -1180,7 +1180,7 @@ mod tests { set_context_server_configuration( vec![( server_1_id.0.clone(), - settings::ContextServerSettingsContent::Custom { + settings::ContextServerSettingsContent::Stdio { enabled: false, command: ContextServerCommand { path: "somebinary".into(), @@ -1209,7 +1209,7 @@ mod tests { set_context_server_configuration( vec![( server_1_id.0.clone(), - settings::ContextServerSettingsContent::Custom { + settings::ContextServerSettingsContent::Stdio { enabled: true, command: ContextServerCommand { path: "somebinary".into(), @@ -1328,7 +1328,7 @@ mod tests { } fn dummy_server_settings() -> ContextServerSettings { - ContextServerSettings::Custom { + ContextServerSettings::Stdio { enabled: true, command: ContextServerCommand { path: "somebinary".into(), diff --git a/crates/project/src/project_settings.rs b/crates/project/src/project_settings.rs index 05d5612f7db5b35e3c2fe6513cc45a05ddaac68c..b7dadc52f74f4800741f5cf537ac9f52c09643e3 100644 --- a/crates/project/src/project_settings.rs +++ b/crates/project/src/project_settings.rs @@ -117,7 +117,7 @@ pub struct GlobalLspSettings { #[derive(Deserialize, Serialize, Clone, PartialEq, Eq, JsonSchema, Debug)] #[serde(tag = "source", rename_all = "snake_case")] pub enum ContextServerSettings { - Custom { + Stdio { /// Whether the context server is enabled. #[serde(default = "default_true")] enabled: bool, @@ -125,16 +125,6 @@ pub enum ContextServerSettings { #[serde(flatten)] command: ContextServerCommand, }, - Extension { - /// Whether the context server is enabled. - #[serde(default = "default_true")] - enabled: bool, - /// The settings for this context server specified by the extension. - /// - /// Consult the documentation for the context server to see what settings - /// are supported. - settings: serde_json::Value, - }, Http { /// Whether the context server is enabled. #[serde(default = "default_true")] @@ -145,13 +135,23 @@ pub enum ContextServerSettings { #[serde(skip_serializing_if = "HashMap::is_empty", default)] headers: HashMap, }, + Extension { + /// Whether the context server is enabled. + #[serde(default = "default_true")] + enabled: bool, + /// The settings for this context server specified by the extension. + /// + /// Consult the documentation for the context server to see what settings + /// are supported. + settings: serde_json::Value, + }, } impl From for ContextServerSettings { fn from(value: settings::ContextServerSettingsContent) -> Self { match value { - settings::ContextServerSettingsContent::Custom { enabled, command } => { - ContextServerSettings::Custom { enabled, command } + settings::ContextServerSettingsContent::Stdio { enabled, command } => { + ContextServerSettings::Stdio { enabled, command } } settings::ContextServerSettingsContent::Extension { enabled, settings } => { ContextServerSettings::Extension { enabled, settings } @@ -171,8 +171,8 @@ impl From for ContextServerSettings { impl Into for ContextServerSettings { fn into(self) -> settings::ContextServerSettingsContent { match self { - ContextServerSettings::Custom { enabled, command } => { - settings::ContextServerSettingsContent::Custom { enabled, command } + ContextServerSettings::Stdio { enabled, command } => { + settings::ContextServerSettingsContent::Stdio { enabled, command } } ContextServerSettings::Extension { enabled, settings } => { settings::ContextServerSettingsContent::Extension { enabled, settings } @@ -200,17 +200,17 @@ impl ContextServerSettings { pub fn enabled(&self) -> bool { match self { - ContextServerSettings::Custom { enabled, .. } => *enabled, - ContextServerSettings::Extension { enabled, .. } => *enabled, + ContextServerSettings::Stdio { enabled, .. } => *enabled, ContextServerSettings::Http { enabled, .. } => *enabled, + ContextServerSettings::Extension { enabled, .. } => *enabled, } } pub fn set_enabled(&mut self, enabled: bool) { match self { - ContextServerSettings::Custom { enabled: e, .. } => *e = enabled, - ContextServerSettings::Extension { enabled: e, .. } => *e = enabled, + ContextServerSettings::Stdio { enabled: e, .. } => *e = enabled, ContextServerSettings::Http { enabled: e, .. } => *e = enabled, + ContextServerSettings::Extension { enabled: e, .. } => *e = enabled, } } } diff --git a/crates/settings/src/settings_content/project.rs b/crates/settings/src/settings_content/project.rs index ccad50ce8827f6d7d59a45b0fb2efd4abb5257b7..0076721228b3b8c6b8d5e6bfd85fc1d25f00c5e3 100644 --- a/crates/settings/src/settings_content/project.rs +++ b/crates/settings/src/settings_content/project.rs @@ -192,7 +192,7 @@ pub struct SessionSettingsContent { #[derive(Deserialize, Serialize, Clone, PartialEq, Eq, JsonSchema, MergeFrom, Debug)] #[serde(untagged, rename_all = "snake_case")] pub enum ContextServerSettingsContent { - Custom { + Stdio { /// Whether the context server is enabled. #[serde(default = "default_true")] enabled: bool, @@ -225,7 +225,7 @@ pub enum ContextServerSettingsContent { impl ContextServerSettingsContent { pub fn set_enabled(&mut self, enabled: bool) { match self { - ContextServerSettingsContent::Custom { + ContextServerSettingsContent::Stdio { enabled: custom_enabled, .. } => { diff --git a/crates/settings/src/vscode_import.rs b/crates/settings/src/vscode_import.rs index 22081727d8ff767b861a776f0a821e3b4a8d5fdf..7ba07395964266e303965733bdccda42ba7df60e 100644 --- a/crates/settings/src/vscode_import.rs +++ b/crates/settings/src/vscode_import.rs @@ -568,7 +568,7 @@ impl VsCodeSettings { .filter_map(|(k, v)| { Some(( k.clone().into(), - ContextServerSettingsContent::Custom { + ContextServerSettingsContent::Stdio { enabled: true, command: serde_json::from_value::(v.clone()) .ok() diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index f6348a8cf22bda6441bca6d31abe8823c1d2215a..180f53a46b93eaa93ea355ece256807a16d03f43 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -4686,6 +4686,133 @@ mod tests { }); } + /// Checks that action namespaces are the expected set. The purpose of this is to prevent typos + /// and let you know when introducing a new namespace. + #[gpui::test] + async fn test_action_namespaces(cx: &mut gpui::TestAppContext) { + use itertools::Itertools; + + init_keymap_test(cx); + cx.update(|cx| { + let all_actions = cx.all_action_names(); + + let mut actions_without_namespace = Vec::new(); + let all_namespaces = all_actions + .iter() + .filter_map(|action_name| { + let namespace = action_name + .split("::") + .collect::>() + .into_iter() + .rev() + .skip(1) + .rev() + .join("::"); + if namespace.is_empty() { + actions_without_namespace.push(*action_name); + } + if &namespace == "test_only" || &namespace == "stories" { + None + } else { + Some(namespace) + } + }) + .sorted() + .dedup() + .collect::>(); + assert_eq!(actions_without_namespace, Vec::<&str>::new()); + + let expected_namespaces = vec![ + "action", + "activity_indicator", + "agent", + #[cfg(not(target_os = "macos"))] + "app_menu", + "assistant", + "assistant2", + "auto_update", + "bedrock", + "branches", + "buffer_search", + "channel_modal", + "cli", + "client", + "collab", + "collab_panel", + "command_palette", + "console", + "context_server", + "copilot", + "debug_panel", + "debugger", + "dev", + "diagnostics", + "edit_prediction", + "editor", + "feedback", + "file_finder", + "git", + "git_onboarding", + "git_panel", + "go_to_line", + "icon_theme_selector", + "journal", + "keymap_editor", + "keystroke_input", + "language_selector", + "line_ending_selector", + "lsp_tool", + "markdown", + "menu", + "notebook", + "notification_panel", + "onboarding", + "outline", + "outline_panel", + "pane", + "panel", + "picker", + "project_panel", + "project_search", + "project_symbols", + "projects", + "repl", + "rules_library", + "search", + "settings_editor", + "settings_profile_selector", + "snippets", + "stash_picker", + "supermaven", + "svg", + "syntax_tree_view", + "tab_switcher", + "task", + "terminal", + "terminal_panel", + "theme_selector", + "toast", + "toolchain", + "variable_list", + "vim", + "window", + "workspace", + "zed", + "zed_actions", + "zed_predict_onboarding", + "zeta", + ]; + assert_eq!( + all_namespaces, + expected_namespaces + .into_iter() + .map(|namespace| namespace.to_string()) + .sorted() + .collect::>() + ); + }); + } + #[gpui::test] fn test_bundled_settings_and_themes(cx: &mut App) { cx.text_system() diff --git a/docs/src/ai/mcp.md b/docs/src/ai/mcp.md index d8d2de2a014459ddeed0f2a0fe92c2cbe84045e4..956477a1c2872d9371f770c3a767e5a77bead9fa 100644 --- a/docs/src/ai/mcp.md +++ b/docs/src/ai/mcp.md @@ -40,12 +40,12 @@ You can connect them by adding their commands directly to your `settings.json`, ```json [settings] { "context_servers": { - "run-command": { + "local-mcp-server": { "command": "some-command", "args": ["arg-1", "arg-2"], "env": {} }, - "over-http": { + "remote-mcp-server": { "url": "custom", "headers": { "Authorization": "Bearer " } } From fafe1afa61b6e1c6a02644a3ff889cd7c0aecda5 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Tue, 25 Nov 2025 17:13:16 +0100 Subject: [PATCH 0368/1030] multi_buffer: Remove redundant buffer id field (#43459) It is easy for us to get the two fields out of sync causing weird problems, there is no reason to have both here so. Release Notes: - N/A *or* Added/Fixed/Improved ... Co-authored by: Antonio Scandurra --- crates/acp_thread/src/acp_thread.rs | 6 +- crates/acp_thread/src/diff.rs | 18 ++- crates/action_log/src/action_log.rs | 30 +++-- crates/agent/src/edit_agent.rs | 33 +++-- crates/agent/src/tools/read_file_tool.rs | 4 +- crates/agent_ui/src/acp/thread_view.rs | 15 ++- crates/agent_ui/src/agent_diff.rs | 2 +- crates/agent_ui/src/inline_assistant.rs | 1 - .../assistant_text_thread/src/text_thread.rs | 15 +-- crates/buffer_diff/src/buffer_diff.rs | 70 +++++++--- crates/collab/src/tests/editor_tests.rs | 10 +- crates/diagnostics/src/diagnostic_renderer.rs | 2 +- crates/diagnostics/src/diagnostics.rs | 2 +- crates/editor/src/display_map/block_map.rs | 2 +- crates/editor/src/editor.rs | 35 ++--- crates/editor/src/editor_tests.rs | 9 +- crates/editor/src/element.rs | 2 +- crates/editor/src/inlays/inlay_hints.rs | 19 ++- crates/editor/src/items.rs | 49 +++---- crates/editor/src/linked_editing_ranges.rs | 4 +- crates/editor/src/lsp_ext.rs | 2 +- crates/editor/src/rust_analyzer_ext.rs | 18 ++- crates/editor/src/test/editor_test_context.rs | 12 +- crates/git_ui/src/commit_view.rs | 6 +- crates/git_ui/src/project_diff.rs | 14 +- crates/language/src/buffer_tests.rs | 2 +- crates/language/src/syntax_map.rs | 6 +- crates/multi_buffer/src/anchor.rs | 56 ++++---- crates/multi_buffer/src/multi_buffer.rs | 123 ++++++++---------- crates/multi_buffer/src/multi_buffer_tests.rs | 7 +- crates/multi_buffer/src/path_key.rs | 7 +- crates/outline_panel/src/outline_panel.rs | 3 +- crates/project/src/lsp_store.rs | 2 +- crates/project/src/project.rs | 3 +- crates/sum_tree/src/tree_map.rs | 6 + crates/text/src/anchor.rs | 52 +++++++- crates/text/src/text.rs | 25 ++-- crates/vim/src/motion.rs | 10 +- crates/vim/src/state.rs | 2 +- 39 files changed, 378 insertions(+), 306 deletions(-) diff --git a/crates/acp_thread/src/acp_thread.rs b/crates/acp_thread/src/acp_thread.rs index 56e33fda47f095eef1873f7a0724b021e88a0bdc..a42eaa491f7f98e9965cd3aba801690ed996a39a 100644 --- a/crates/acp_thread/src/acp_thread.rs +++ b/crates/acp_thread/src/acp_thread.rs @@ -347,13 +347,13 @@ impl ToolCall { let buffer = buffer.await.log_err()?; let position = buffer .update(cx, |buffer, _| { + let snapshot = buffer.snapshot(); if let Some(row) = location.line { - let snapshot = buffer.snapshot(); let column = snapshot.indent_size_for_line(row).len; let point = snapshot.clip_point(Point::new(row, column), Bias::Left); snapshot.anchor_before(point) } else { - Anchor::MIN + Anchor::min_for_buffer(snapshot.remote_id()) } }) .ok()?; @@ -2120,7 +2120,7 @@ impl AcpThread { position: edits .last() .map(|(range, _)| range.end) - .unwrap_or(Anchor::MIN), + .unwrap_or(Anchor::min_for_buffer(buffer.read(cx).remote_id())), }), cx, ); diff --git a/crates/acp_thread/src/diff.rs b/crates/acp_thread/src/diff.rs index 055b2f7fb86ffe9d7f12459b6b16405ce77815a0..f17e9d0fce404483ae99efc95bf666586c1f644b 100644 --- a/crates/acp_thread/src/diff.rs +++ b/crates/acp_thread/src/diff.rs @@ -50,9 +50,14 @@ impl Diff { let hunk_ranges = { let buffer = buffer.read(cx); let diff = diff.read(cx); - diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, buffer, cx) - .map(|diff_hunk| diff_hunk.buffer_range.to_point(buffer)) - .collect::>() + diff.hunks_intersecting_range( + Anchor::min_for_buffer(buffer.remote_id()) + ..Anchor::max_for_buffer(buffer.remote_id()), + buffer, + cx, + ) + .map(|diff_hunk| diff_hunk.buffer_range.to_point(buffer)) + .collect::>() }; multibuffer.set_excerpts_for_path( @@ -316,7 +321,12 @@ impl PendingDiff { let buffer = self.new_buffer.read(cx); let diff = self.diff.read(cx); let mut ranges = diff - .hunks_intersecting_range(Anchor::MIN..Anchor::MAX, buffer, cx) + .hunks_intersecting_range( + Anchor::min_for_buffer(buffer.remote_id()) + ..Anchor::max_for_buffer(buffer.remote_id()), + buffer, + cx, + ) .map(|diff_hunk| diff_hunk.buffer_range.to_point(buffer)) .collect::>(); ranges.extend( diff --git a/crates/action_log/src/action_log.rs b/crates/action_log/src/action_log.rs index 78265007a5abe3e724166610013ade776d82dbeb..80c9438bc9f8051cb58357e56a82b5307fd20b75 100644 --- a/crates/action_log/src/action_log.rs +++ b/crates/action_log/src/action_log.rs @@ -409,9 +409,11 @@ impl ActionLog { let new_diff_base = new_diff_base.clone(); async move { let mut unreviewed_edits = Patch::default(); - for hunk in diff_snapshot - .hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer_snapshot) - { + for hunk in diff_snapshot.hunks_intersecting_range( + Anchor::min_for_buffer(buffer_snapshot.remote_id()) + ..Anchor::max_for_buffer(buffer_snapshot.remote_id()), + &buffer_snapshot, + ) { let old_range = new_diff_base .offset_to_point(hunk.diff_base_byte_range.start) ..new_diff_base.offset_to_point(hunk.diff_base_byte_range.end); @@ -732,12 +734,10 @@ impl ActionLog { cx: &mut Context, ) -> Task<()> { let futures = self.changed_buffers(cx).into_keys().map(|buffer| { - let reject = self.reject_edits_in_ranges( - buffer, - vec![Anchor::MIN..Anchor::MAX], - telemetry.clone(), - cx, - ); + let buffer_ranges = vec![Anchor::min_max_range_for_buffer( + buffer.read(cx).remote_id(), + )]; + let reject = self.reject_edits_in_ranges(buffer, buffer_ranges, telemetry.clone(), cx); async move { reject.await.log_err(); @@ -2010,7 +2010,8 @@ mod tests { // User accepts the single hunk action_log.update(cx, |log, cx| { - log.keep_edits_in_range(buffer.clone(), Anchor::MIN..Anchor::MAX, None, cx) + let buffer_range = Anchor::min_max_range_for_buffer(buffer.read(cx).remote_id()); + log.keep_edits_in_range(buffer.clone(), buffer_range, None, cx) }); cx.run_until_parked(); assert_eq!(unreviewed_hunks(&action_log, cx), vec![]); @@ -2031,7 +2032,14 @@ mod tests { // User rejects the hunk action_log .update(cx, |log, cx| { - log.reject_edits_in_ranges(buffer.clone(), vec![Anchor::MIN..Anchor::MAX], None, cx) + log.reject_edits_in_ranges( + buffer.clone(), + vec![Anchor::min_max_range_for_buffer( + buffer.read(cx).remote_id(), + )], + None, + cx, + ) }) .await .unwrap(); diff --git a/crates/agent/src/edit_agent.rs b/crates/agent/src/edit_agent.rs index e5b1d1e3871ecb0070f60f5f382196482e24963a..5ea04729a49afae944c5e7ca88ad67791e18b6f3 100644 --- a/crates/agent/src/edit_agent.rs +++ b/crates/agent/src/edit_agent.rs @@ -172,14 +172,14 @@ impl EditAgent { project.set_agent_location( Some(AgentLocation { buffer: buffer.downgrade(), - position: language::Anchor::MAX, + position: language::Anchor::max_for_buffer(buffer.read(cx).remote_id()), }), cx, ) }); output_events_tx .unbounded_send(EditAgentOutputEvent::Edited( - language::Anchor::MIN..language::Anchor::MAX, + Anchor::min_max_range_for_buffer(buffer.read(cx).remote_id()), )) .ok(); })?; @@ -187,7 +187,7 @@ impl EditAgent { while let Some(event) = parse_rx.next().await { match event? { CreateFileParserEvent::NewTextChunk { chunk } => { - cx.update(|cx| { + let buffer_id = cx.update(|cx| { buffer.update(cx, |buffer, cx| buffer.append(chunk, cx)); self.action_log .update(cx, |log, cx| log.buffer_edited(buffer.clone(), cx)); @@ -195,15 +195,18 @@ impl EditAgent { project.set_agent_location( Some(AgentLocation { buffer: buffer.downgrade(), - position: language::Anchor::MAX, + position: language::Anchor::max_for_buffer( + buffer.read(cx).remote_id(), + ), }), cx, ) }); + buffer.read(cx).remote_id() })?; output_events_tx .unbounded_send(EditAgentOutputEvent::Edited( - language::Anchor::MIN..language::Anchor::MAX, + Anchor::min_max_range_for_buffer(buffer_id), )) .ok(); } @@ -1200,7 +1203,9 @@ mod tests { project.read_with(cx, |project, _| project.agent_location()), Some(AgentLocation { buffer: buffer.downgrade(), - position: language::Anchor::MAX + position: language::Anchor::max_for_buffer( + cx.update(|cx| buffer.read(cx).remote_id()) + ), }) ); @@ -1218,7 +1223,9 @@ mod tests { project.read_with(cx, |project, _| project.agent_location()), Some(AgentLocation { buffer: buffer.downgrade(), - position: language::Anchor::MAX + position: language::Anchor::max_for_buffer( + cx.update(|cx| buffer.read(cx).remote_id()) + ), }) ); @@ -1236,7 +1243,9 @@ mod tests { project.read_with(cx, |project, _| project.agent_location()), Some(AgentLocation { buffer: buffer.downgrade(), - position: language::Anchor::MAX + position: language::Anchor::max_for_buffer( + cx.update(|cx| buffer.read(cx).remote_id()) + ), }) ); @@ -1254,7 +1263,9 @@ mod tests { project.read_with(cx, |project, _| project.agent_location()), Some(AgentLocation { buffer: buffer.downgrade(), - position: language::Anchor::MAX + position: language::Anchor::max_for_buffer( + cx.update(|cx| buffer.read(cx).remote_id()) + ), }) ); @@ -1269,7 +1280,9 @@ mod tests { project.read_with(cx, |project, _| project.agent_location()), Some(AgentLocation { buffer: buffer.downgrade(), - position: language::Anchor::MAX + position: language::Anchor::max_for_buffer( + cx.update(|cx| buffer.read(cx).remote_id()) + ), }) ); } diff --git a/crates/agent/src/tools/read_file_tool.rs b/crates/agent/src/tools/read_file_tool.rs index eccb40737c744d57792655cadb925e18a68d2835..77852c5fda674c55b324af8bae90d7d6a57bcff0 100644 --- a/crates/agent/src/tools/read_file_tool.rs +++ b/crates/agent/src/tools/read_file_tool.rs @@ -275,7 +275,9 @@ impl AgentTool for ReadFileTool { project.set_agent_location( Some(AgentLocation { buffer: buffer.downgrade(), - position: anchor.unwrap_or(text::Anchor::MIN), + position: anchor.unwrap_or_else(|| { + text::Anchor::min_for_buffer(buffer.read(cx).remote_id()) + }), }), cx, ); diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index fd0b1eedbdf80d1893760e6182cd2e57d96ef010..1c9e3f83e383658051f7799a7e3096f532addbe1 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -4103,7 +4103,9 @@ impl AcpThreadView { action_log .reject_edits_in_ranges( buffer.clone(), - vec![Anchor::MIN..Anchor::MAX], + vec![Anchor::min_max_range_for_buffer( + buffer.read(cx).remote_id(), + )], Some(telemetry.clone()), cx, ) @@ -4124,7 +4126,9 @@ impl AcpThreadView { action_log.update(cx, |action_log, cx| { action_log.keep_edits_in_range( buffer.clone(), - Anchor::MIN..Anchor::MAX, + Anchor::min_max_range_for_buffer( + buffer.read(cx).remote_id(), + ), Some(telemetry.clone()), cx, ); @@ -4743,11 +4747,8 @@ impl AcpThreadView { let buffer = multibuffer.as_singleton(); if agent_location.buffer.upgrade() == buffer { let excerpt_id = multibuffer.excerpt_ids().first().cloned(); - let anchor = editor::Anchor::in_buffer( - excerpt_id.unwrap(), - buffer.unwrap().read(cx).remote_id(), - agent_location.position, - ); + let anchor = + editor::Anchor::in_buffer(excerpt_id.unwrap(), agent_location.position); editor.change_selections(Default::default(), window, cx, |selections| { selections.select_anchor_ranges([anchor..anchor]); }) diff --git a/crates/agent_ui/src/agent_diff.rs b/crates/agent_ui/src/agent_diff.rs index 53e7a2f46d37e4cd2f0688d5af2a7d4a01174801..8aece1984ad597e629cd966c0e61d6a5681d7020 100644 --- a/crates/agent_ui/src/agent_diff.rs +++ b/crates/agent_ui/src/agent_diff.rs @@ -145,7 +145,7 @@ impl AgentDiffPane { let diff_hunk_ranges = diff .hunks_intersecting_range( - language::Anchor::MIN..language::Anchor::MAX, + language::Anchor::min_max_range_for_buffer(snapshot.remote_id()), &snapshot, cx, ) diff --git a/crates/agent_ui/src/inline_assistant.rs b/crates/agent_ui/src/inline_assistant.rs index 81242135757561a6c829cc9cabf8893294d9e875..0f617044546f186bddb2be5de3983edf9dad2e0c 100644 --- a/crates/agent_ui/src/inline_assistant.rs +++ b/crates/agent_ui/src/inline_assistant.rs @@ -440,7 +440,6 @@ impl InlineAssistant { { let anchor_range = Anchor::range_in_buffer( excerpt_id, - buffer.remote_id(), buffer.anchor_before(buffer_range.start)..buffer.anchor_after(buffer_range.end), ); diff --git a/crates/assistant_text_thread/src/text_thread.rs b/crates/assistant_text_thread/src/text_thread.rs index 2bc4ceec4c243a654abf04b19b4e2ba93a1fef4f..613c9b862e8a0b055465a73fe34c541ecb18d4a1 100644 --- a/crates/assistant_text_thread/src/text_thread.rs +++ b/crates/assistant_text_thread/src/text_thread.rs @@ -797,7 +797,7 @@ impl TextThread { }); let message = MessageAnchor { id: first_message_id, - start: language::Anchor::MIN, + start: language::Anchor::min_for_buffer(this.buffer.read(cx).remote_id()), }; this.messages_metadata.insert( first_message_id, @@ -1147,12 +1147,10 @@ impl TextThread { cx: &App, ) -> bool { let version = &self.buffer.read(cx).version; - let observed_start = range.start == language::Anchor::MIN - || range.start == language::Anchor::MAX - || version.observed(range.start.timestamp); - let observed_end = range.end == language::Anchor::MIN - || range.end == language::Anchor::MAX - || version.observed(range.end.timestamp); + let observed_start = + range.start.is_min() || range.start.is_max() || version.observed(range.start.timestamp); + let observed_end = + range.end.is_min() || range.end.is_max() || version.observed(range.end.timestamp); observed_start && observed_end } @@ -2858,7 +2856,8 @@ impl TextThread { messages.next(); } } - let message_end_anchor = message_end.unwrap_or(language::Anchor::MAX); + let message_end_anchor = + message_end.unwrap_or(language::Anchor::max_for_buffer(buffer.remote_id())); let message_end = message_end_anchor.to_offset(buffer); return Some(Message { diff --git a/crates/buffer_diff/src/buffer_diff.rs b/crates/buffer_diff/src/buffer_diff.rs index 52c6463b9bcccd242ef18e5f3dcb518bd335686d..38b9b8e4baa1d0789bf64ae53f28bc28bfe6bd98 100644 --- a/crates/buffer_diff/src/buffer_diff.rs +++ b/crates/buffer_diff/src/buffer_diff.rs @@ -153,6 +153,10 @@ impl std::fmt::Debug for BufferDiffInner { } impl BufferDiffSnapshot { + pub fn buffer_diff_id(&self) -> BufferId { + self.inner.base_text.remote_id() + } + fn empty(buffer: &text::BufferSnapshot, cx: &mut App) -> BufferDiffSnapshot { BufferDiffSnapshot { inner: BufferDiffInner { @@ -340,7 +344,7 @@ impl BufferDiffInner { }; let hunk = PendingHunk { - buffer_range: Anchor::MIN..Anchor::MAX, + buffer_range: Anchor::min_max_range_for_buffer(buffer.remote_id()), diff_base_byte_range: 0..index_text.map_or(0, |rope| rope.len()), buffer_version: buffer.version().clone(), new_status, @@ -780,7 +784,7 @@ fn compute_hunks( } else { tree.push( InternalDiffHunk { - buffer_range: Anchor::MIN..Anchor::MAX, + buffer_range: Anchor::min_max_range_for_buffer(buffer.remote_id()), diff_base_byte_range: 0..0, }, &buffer, @@ -941,10 +945,10 @@ impl BufferDiff { pub fn clear_pending_hunks(&mut self, cx: &mut Context) { if self.secondary_diff.is_some() { self.inner.pending_hunks = SumTree::from_summary(DiffHunkSummary { - buffer_range: Anchor::MIN..Anchor::MIN, + buffer_range: Anchor::min_min_range_for_buffer(self.buffer_id), }); cx.emit(BufferDiffEvent::DiffChanged { - changed_range: Some(Anchor::MIN..Anchor::MAX), + changed_range: Some(Anchor::min_max_range_for_buffer(self.buffer_id)), }); } } @@ -1065,7 +1069,10 @@ impl BufferDiff { { (false, new_state.compare(state, buffer)) } - _ => (true, Some(text::Anchor::MIN..text::Anchor::MAX)), + _ => ( + true, + Some(text::Anchor::min_max_range_for_buffer(self.buffer_id)), + ), }; if let Some(secondary_changed_range) = secondary_diff_change @@ -1126,7 +1133,11 @@ impl BufferDiff { buffer_snapshot: &'a text::BufferSnapshot, cx: &'a App, ) -> impl 'a + Iterator { - self.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, buffer_snapshot, cx) + self.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(buffer_snapshot.remote_id()), + buffer_snapshot, + cx, + ) } pub fn hunks_intersecting_range<'a>( @@ -1222,7 +1233,9 @@ impl BufferDiff { impl DiffHunk { pub fn is_created_file(&self) -> bool { - self.diff_base_byte_range == (0..0) && self.buffer_range == (Anchor::MIN..Anchor::MAX) + self.diff_base_byte_range == (0..0) + && self.buffer_range.start.is_min() + && self.buffer_range.end.is_min() } pub fn status(&self) -> DiffHunkStatus { @@ -1389,7 +1402,10 @@ mod tests { let mut buffer = Buffer::new(ReplicaId::LOCAL, BufferId::new(1).unwrap(), buffer_text); let mut diff = BufferDiffSnapshot::new_sync(buffer.clone(), diff_base.clone(), cx); assert_hunks( - diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer), + diff.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(buffer.remote_id()), + &buffer, + ), &buffer, &diff_base, &[(1..2, "two\n", "HELLO\n", DiffHunkStatus::modified_none())], @@ -1398,7 +1414,10 @@ mod tests { buffer.edit([(0..0, "point five\n")]); diff = BufferDiffSnapshot::new_sync(buffer.clone(), diff_base.clone(), cx); assert_hunks( - diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer), + diff.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(buffer.remote_id()), + &buffer, + ), &buffer, &diff_base, &[ @@ -1409,7 +1428,10 @@ mod tests { diff = cx.update(|cx| BufferDiffSnapshot::empty(&buffer, cx)); assert_hunks::<&str, _>( - diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer), + diff.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(buffer.remote_id()), + &buffer, + ), &buffer, &diff_base, &[], @@ -1483,7 +1505,10 @@ mod tests { ]; assert_hunks( - uncommitted_diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer), + uncommitted_diff.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(buffer.remote_id()), + &buffer, + ), &buffer, &head_text, &expected_hunks, @@ -1542,8 +1567,11 @@ mod tests { }) .await; assert_eq!( - diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &buffer) - .count(), + diff.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(buffer.remote_id()), + &buffer + ) + .count(), 8 ); @@ -2155,8 +2183,12 @@ mod tests { let mut diff = uncommitted_diff(&working_copy, &index_text, head_text.clone(), cx); let mut hunks = diff.update(cx, |diff, cx| { - diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &working_copy, cx) - .collect::>() + diff.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(diff.buffer_id), + &working_copy, + cx, + ) + .collect::>() }); if hunks.is_empty() { return; @@ -2185,8 +2217,12 @@ mod tests { diff = uncommitted_diff(&working_copy, &index_text, head_text.clone(), cx); let found_hunks = diff.update(cx, |diff, cx| { - diff.hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &working_copy, cx) - .collect::>() + diff.hunks_intersecting_range( + Anchor::min_max_range_for_buffer(diff.buffer_id), + &working_copy, + cx, + ) + .collect::>() }); assert_eq!(hunks.len(), found_hunks.len()); diff --git a/crates/collab/src/tests/editor_tests.rs b/crates/collab/src/tests/editor_tests.rs index fe20ab935c9fb2ffd2c18962953f9d62ca06fb16..e5d3661aaf1aa0c74a4204e0989018121f5eb64a 100644 --- a/crates/collab/src/tests/editor_tests.rs +++ b/crates/collab/src/tests/editor_tests.rs @@ -1581,7 +1581,10 @@ async fn test_share_project( buffer_a.read_with(cx_a, |buffer, _| { buffer .snapshot() - .selections_in_range(text::Anchor::MIN..text::Anchor::MAX, false) + .selections_in_range( + text::Anchor::min_max_range_for_buffer(buffer.remote_id()), + false, + ) .count() == 1 }); @@ -1622,7 +1625,10 @@ async fn test_share_project( buffer_a.read_with(cx_a, |buffer, _| { buffer .snapshot() - .selections_in_range(text::Anchor::MIN..text::Anchor::MAX, false) + .selections_in_range( + text::Anchor::min_max_range_for_buffer(buffer.remote_id()), + false, + ) .count() == 0 }); diff --git a/crates/diagnostics/src/diagnostic_renderer.rs b/crates/diagnostics/src/diagnostic_renderer.rs index 2636b1aadc9708ff6832a5baa212277672dd305f..72ad7b591413832183bb85d58d188e692d46ffad 100644 --- a/crates/diagnostics/src/diagnostic_renderer.rs +++ b/crates/diagnostics/src/diagnostic_renderer.rs @@ -284,7 +284,7 @@ impl DiagnosticBlock { if range.context.overlaps(&diagnostic.range, &snapshot) { Self::jump_to( editor, - Anchor::range_in_buffer(excerpt_id, buffer_id, diagnostic.range), + Anchor::range_in_buffer(excerpt_id, diagnostic.range), window, cx, ); diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index b24a63b830b93cdbe14e2329abe524f6523cbbd6..413b73d1b6f679fa464d378760e37c773e1583e7 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -308,7 +308,7 @@ impl ProjectDiagnosticsEditor { .selections .all_anchors(&snapshot) .iter() - .filter_map(|anchor| anchor.start.buffer_id) + .filter_map(|anchor| anchor.start.text_anchor.buffer_id) .collect::>() }); for buffer_id in buffer_ids { diff --git a/crates/editor/src/display_map/block_map.rs b/crates/editor/src/display_map/block_map.rs index b55c5330dd398428c549ae1932c1f0a25c8e1436..58dea4010caaec98cdc14c9dc0e2b02af8ef1712 100644 --- a/crates/editor/src/display_map/block_map.rs +++ b/crates/editor/src/display_map/block_map.rs @@ -2976,7 +2976,7 @@ mod tests { ); } - #[gpui::test(iterations = 100)] + #[gpui::test(iterations = 60)] fn test_random_blocks(cx: &mut gpui::TestAppContext, mut rng: StdRng) { cx.update(init_test); diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 08627f1bd64be6e62581014628c57306df43623e..cd7a872f8c129c3b67b544ed2ba78d7fde104b48 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -1780,7 +1780,7 @@ impl Editor { let start_row = (multi_buffer_visible_start.row).min(max_row); let end_row = (multi_buffer_visible_start.row + 10).min(max_row); - if let Some((excerpt_id, buffer_id, buffer)) = multi_buffer.read(cx).as_singleton() { + if let Some((excerpt_id, _, buffer)) = multi_buffer.read(cx).as_singleton() { let outline_items = buffer .outline_items_containing( Point::new(start_row, 0)..Point::new(end_row, 0), @@ -1790,10 +1790,9 @@ impl Editor { .into_iter() .map(|outline_item| OutlineItem { depth: outline_item.depth, - range: Anchor::range_in_buffer(*excerpt_id, buffer_id, outline_item.range), + range: Anchor::range_in_buffer(*excerpt_id, outline_item.range), source_range_for_text: Anchor::range_in_buffer( *excerpt_id, - buffer_id, outline_item.source_range_for_text, ), text: outline_item.text, @@ -1801,10 +1800,10 @@ impl Editor { name_ranges: outline_item.name_ranges, body_range: outline_item .body_range - .map(|range| Anchor::range_in_buffer(*excerpt_id, buffer_id, range)), + .map(|range| Anchor::range_in_buffer(*excerpt_id, range)), annotation_range: outline_item .annotation_range - .map(|range| Anchor::range_in_buffer(*excerpt_id, buffer_id, range)), + .map(|range| Anchor::range_in_buffer(*excerpt_id, range)), }); return Some(outline_items.collect()); } @@ -3259,7 +3258,7 @@ impl Editor { } if local { - if let Some(buffer_id) = new_cursor_position.buffer_id { + if let Some(buffer_id) = new_cursor_position.text_anchor.buffer_id { self.register_buffer(buffer_id, cx); } @@ -4198,8 +4197,8 @@ impl Editor { continue; } if self.selections.disjoint_anchor_ranges().any(|s| { - if s.start.buffer_id != selection.start.buffer_id - || s.end.buffer_id != selection.end.buffer_id + if s.start.text_anchor.buffer_id != selection.start.buffer_id + || s.end.text_anchor.buffer_id != selection.end.buffer_id { return false; } @@ -5484,6 +5483,7 @@ impl Editor { } let buffer_position = multibuffer_snapshot.anchor_before(position); let Some(buffer) = buffer_position + .text_anchor .buffer_id .and_then(|buffer_id| self.buffer.read(cx).buffer(buffer_id)) else { @@ -6923,8 +6923,7 @@ impl Editor { continue; } - let range = - Anchor::range_in_buffer(excerpt_id, buffer_id, *start..*end); + let range = Anchor::range_in_buffer(excerpt_id, *start..*end); if highlight.kind == lsp::DocumentHighlightKind::WRITE { write_ranges.push(range); } else { @@ -7033,11 +7032,8 @@ impl Editor { .anchor_after(search_range.start + match_range.start); let match_end = buffer_snapshot .anchor_before(search_range.start + match_range.end); - let match_anchor_range = Anchor::range_in_buffer( - excerpt_id, - buffer_snapshot.remote_id(), - match_start..match_end, - ); + let match_anchor_range = + Anchor::range_in_buffer(excerpt_id, match_start..match_end); (match_anchor_range != query_range).then_some(match_anchor_range) }), ); @@ -8212,8 +8208,7 @@ impl Editor { cx, ); for (breakpoint, state) in breakpoints { - let multi_buffer_anchor = - Anchor::in_buffer(excerpt_id, buffer_snapshot.remote_id(), breakpoint.position); + let multi_buffer_anchor = Anchor::in_buffer(excerpt_id, breakpoint.position); let position = multi_buffer_anchor .to_point(&multi_buffer_snapshot) .to_display_point(&snapshot); @@ -20804,8 +20799,7 @@ impl Editor { let start = highlight.range.start.to_display_point(&snapshot); let end = highlight.range.end.to_display_point(&snapshot); let start_row = start.row().0; - let end_row = if highlight.range.end.text_anchor != text::Anchor::MAX - && end.column() == 0 + let end_row = if !highlight.range.end.text_anchor.is_max() && end.column() == 0 { end.row().0.saturating_sub(1) } else { @@ -21361,7 +21355,7 @@ impl Editor { .for_each(|hint| { let inlay = Inlay::debugger( post_inc(&mut editor.next_inlay_id), - Anchor::in_buffer(excerpt_id, buffer_id, hint.position), + Anchor::in_buffer(excerpt_id, hint.position), hint.text(), ); if !inlay.text().chars().contains(&'\n') { @@ -24105,7 +24099,6 @@ impl EditorSnapshot { display_row_range: hunk_display_start.row()..end_row, multi_buffer_range: Anchor::range_in_buffer( hunk.excerpt_id, - hunk.buffer_id, hunk.buffer_range, ), is_created_file, diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index 119c7058e061406b4a75017b6c5c8717f9f250c0..f68b15b6b258a5ab730a13af9d7ecc62763321ea 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -21550,10 +21550,9 @@ async fn test_adjacent_diff_hunks(executor: BackgroundExecutor, cx: &mut TestApp .diff_hunks_in_ranges(&[Anchor::min()..Anchor::max()], &snapshot.buffer_snapshot()) .collect::>(); let excerpt_id = editor.buffer.read(cx).excerpt_ids()[0]; - let buffer_id = hunks[0].buffer_id; hunks .into_iter() - .map(|hunk| Anchor::range_in_buffer(excerpt_id, buffer_id, hunk.buffer_range)) + .map(|hunk| Anchor::range_in_buffer(excerpt_id, hunk.buffer_range)) .collect::>() }); assert_eq!(hunk_ranges.len(), 2); @@ -21641,10 +21640,9 @@ async fn test_adjacent_diff_hunks(executor: BackgroundExecutor, cx: &mut TestApp .diff_hunks_in_ranges(&[Anchor::min()..Anchor::max()], &snapshot.buffer_snapshot()) .collect::>(); let excerpt_id = editor.buffer.read(cx).excerpt_ids()[0]; - let buffer_id = hunks[0].buffer_id; hunks .into_iter() - .map(|hunk| Anchor::range_in_buffer(excerpt_id, buffer_id, hunk.buffer_range)) + .map(|hunk| Anchor::range_in_buffer(excerpt_id, hunk.buffer_range)) .collect::>() }); assert_eq!(hunk_ranges.len(), 2); @@ -21707,10 +21705,9 @@ async fn test_toggle_deletion_hunk_at_start_of_file( .diff_hunks_in_ranges(&[Anchor::min()..Anchor::max()], &snapshot.buffer_snapshot()) .collect::>(); let excerpt_id = editor.buffer.read(cx).excerpt_ids()[0]; - let buffer_id = hunks[0].buffer_id; hunks .into_iter() - .map(|hunk| Anchor::range_in_buffer(excerpt_id, buffer_id, hunk.buffer_range)) + .map(|hunk| Anchor::range_in_buffer(excerpt_id, hunk.buffer_range)) .collect::>() }); assert_eq!(hunk_ranges.len(), 1); diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index 71c76c0cb3eba0e70da140191ab5eb8daa5735bc..4ea12f0a21295d97cdcff565c484750e14334223 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -9283,7 +9283,7 @@ impl Element for EditorElement { HashMap::default(); for selection in all_anchor_selections.iter() { let head = selection.head(); - if let Some(buffer_id) = head.buffer_id { + if let Some(buffer_id) = head.text_anchor.buffer_id { anchors_by_buffer .entry(buffer_id) .and_modify(|(latest_id, latest_anchor)| { diff --git a/crates/editor/src/inlays/inlay_hints.rs b/crates/editor/src/inlays/inlay_hints.rs index cd9456be7a109ce5c2535339bc153bb5434ab94f..4379276707ceacd07935d660d54ac3b52216a720 100644 --- a/crates/editor/src/inlays/inlay_hints.rs +++ b/crates/editor/src/inlays/inlay_hints.rs @@ -584,8 +584,11 @@ impl Editor { }) .max_by_key(|hint| hint.id) { - if let Some(ResolvedHint::Resolved(cached_hint)) = - hovered_hint.position.buffer_id.and_then(|buffer_id| { + if let Some(ResolvedHint::Resolved(cached_hint)) = hovered_hint + .position + .text_anchor + .buffer_id + .and_then(|buffer_id| { lsp_store.update(cx, |lsp_store, cx| { lsp_store.resolved_hint(buffer_id, hovered_hint.id, cx) }) @@ -757,7 +760,7 @@ impl Editor { let visible_inlay_hint_ids = self .visible_inlay_hints(cx) .iter() - .filter(|inlay| inlay.position.buffer_id == Some(buffer_id)) + .filter(|inlay| inlay.position.text_anchor.buffer_id == Some(buffer_id)) .map(|inlay| inlay.id) .collect::>(); let Some(inlay_hints) = &mut self.inlay_hints else { @@ -858,9 +861,13 @@ impl Editor { self.visible_inlay_hints(cx) .iter() .filter(|inlay| { - inlay.position.buffer_id.is_none_or(|buffer_id| { - invalidate_hints_for_buffers.contains(&buffer_id) - }) + inlay + .position + .text_anchor + .buffer_id + .is_none_or(|buffer_id| { + invalidate_hints_for_buffers.contains(&buffer_id) + }) }) .map(|inlay| inlay.id), ); diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 157ad84d053b9125dfd59243098deb680be7b264..7e82336b4403cc8142983ef3802a9cdb9ca9cf2b 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -455,21 +455,13 @@ async fn update_editor_from_message( })??; // Deserialize the editor state. - let (selections, pending_selection, scroll_top_anchor) = this.update(cx, |editor, cx| { - let buffer = editor.buffer.read(cx).read(cx); - let selections = message - .selections - .into_iter() - .filter_map(|selection| deserialize_selection(&buffer, selection)) - .collect::>(); - let pending_selection = message - .pending_selection - .and_then(|selection| deserialize_selection(&buffer, selection)); - let scroll_top_anchor = message - .scroll_top_anchor - .and_then(|anchor| deserialize_anchor(&buffer, anchor)); - anyhow::Ok((selections, pending_selection, scroll_top_anchor)) - })??; + let selections = message + .selections + .into_iter() + .filter_map(deserialize_selection) + .collect::>(); + let pending_selection = message.pending_selection.and_then(deserialize_selection); + let scroll_top_anchor = message.scroll_top_anchor.and_then(deserialize_anchor); // Wait until the buffer has received all of the operations referenced by // the editor's new state. @@ -563,24 +555,20 @@ fn deserialize_excerpt_range( )) } -fn deserialize_selection( - buffer: &MultiBufferSnapshot, - selection: proto::Selection, -) -> Option> { +fn deserialize_selection(selection: proto::Selection) -> Option> { Some(Selection { id: selection.id as usize, - start: deserialize_anchor(buffer, selection.start?)?, - end: deserialize_anchor(buffer, selection.end?)?, + start: deserialize_anchor(selection.start?)?, + end: deserialize_anchor(selection.end?)?, reversed: selection.reversed, goal: SelectionGoal::None, }) } -fn deserialize_anchor(buffer: &MultiBufferSnapshot, anchor: proto::EditorAnchor) -> Option { +fn deserialize_anchor(anchor: proto::EditorAnchor) -> Option { let excerpt_id = ExcerptId::from_proto(anchor.excerpt_id); Some(Anchor::in_buffer( excerpt_id, - buffer.buffer_id_for_excerpt(excerpt_id)?, language::proto::deserialize_anchor(anchor.anchor?)?, )) } @@ -1374,7 +1362,7 @@ impl ProjectItem for Editor { cx: &mut Context, ) -> Self { let mut editor = Self::for_buffer(buffer.clone(), Some(project), window, cx); - if let Some((excerpt_id, buffer_id, snapshot)) = + if let Some((excerpt_id, _, snapshot)) = editor.buffer().read(cx).snapshot(cx).as_singleton() && WorkspaceSettings::get(None, cx).restore_on_file_reopen && let Some(restoration_data) = Self::project_item_kind() @@ -1397,11 +1385,8 @@ impl ProjectItem for Editor { }); } let (top_row, offset) = restoration_data.scroll_position; - let anchor = Anchor::in_buffer( - *excerpt_id, - buffer_id, - snapshot.anchor_before(Point::new(top_row, 0)), - ); + let anchor = + Anchor::in_buffer(*excerpt_id, snapshot.anchor_before(Point::new(top_row, 0))); editor.set_scroll_anchor(ScrollAnchor { anchor, offset }, window, cx); } @@ -1783,11 +1768,7 @@ impl SearchableItem for Editor { .anchor_after(search_range.start + match_range.start); let end = search_buffer .anchor_before(search_range.start + match_range.end); - Anchor::range_in_buffer( - excerpt_id, - search_buffer.remote_id(), - start..end, - ) + Anchor::range_in_buffer(excerpt_id, start..end) } }), ); diff --git a/crates/editor/src/linked_editing_ranges.rs b/crates/editor/src/linked_editing_ranges.rs index 33635a2ae2009031220ab0a58e99f8b07957de94..ff3096961d646a2a98458319d927a4e2723d0602 100644 --- a/crates/editor/src/linked_editing_ranges.rs +++ b/crates/editor/src/linked_editing_ranges.rs @@ -70,8 +70,8 @@ pub(super) fn refresh_linked_ranges( let cursor_position = selection.head(); let start_position = snapshot.anchor_before(cursor_position); let end_position = snapshot.anchor_after(selection.tail()); - if start_position.buffer_id != end_position.buffer_id - || end_position.buffer_id.is_none() + if start_position.text_anchor.buffer_id != end_position.text_anchor.buffer_id + || end_position.text_anchor.buffer_id.is_none() { // Throw away selections spanning multiple buffers. continue; diff --git a/crates/editor/src/lsp_ext.rs b/crates/editor/src/lsp_ext.rs index 36353e8d42527cd59043ab3cf2b6105c534412d9..37cc734ab1ef0a0b677b3e405ff70b461d349a1c 100644 --- a/crates/editor/src/lsp_ext.rs +++ b/crates/editor/src/lsp_ext.rs @@ -37,7 +37,7 @@ where .selections .disjoint_anchors_arc() .iter() - .filter_map(|selection| Some((selection.head(), selection.head().buffer_id?))) + .filter_map(|selection| Some((selection.head(), selection.head().text_anchor.buffer_id?))) .unique_by(|(_, buffer_id)| *buffer_id) .find_map(|(trigger_anchor, buffer_id)| { let buffer = editor.buffer().read(cx).buffer(buffer_id)?; diff --git a/crates/editor/src/rust_analyzer_ext.rs b/crates/editor/src/rust_analyzer_ext.rs index ffa0c017c0eb157df776cc49e0dba51e617e3379..f548db75ad5d8cfe32a59a798b6d23931c34f215 100644 --- a/crates/editor/src/rust_analyzer_ext.rs +++ b/crates/editor/src/rust_analyzer_ext.rs @@ -322,7 +322,11 @@ fn cancel_flycheck_action( .disjoint_anchors_arc() .iter() .find_map(|selection| { - let buffer_id = selection.start.buffer_id.or(selection.end.buffer_id)?; + let buffer_id = selection + .start + .text_anchor + .buffer_id + .or(selection.end.text_anchor.buffer_id)?; let project = project.read(cx); let entry_id = project .buffer_for_id(buffer_id, cx)? @@ -347,7 +351,11 @@ fn run_flycheck_action( .disjoint_anchors_arc() .iter() .find_map(|selection| { - let buffer_id = selection.start.buffer_id.or(selection.end.buffer_id)?; + let buffer_id = selection + .start + .text_anchor + .buffer_id + .or(selection.end.text_anchor.buffer_id)?; let project = project.read(cx); let entry_id = project .buffer_for_id(buffer_id, cx)? @@ -372,7 +380,11 @@ fn clear_flycheck_action( .disjoint_anchors_arc() .iter() .find_map(|selection| { - let buffer_id = selection.start.buffer_id.or(selection.end.buffer_id)?; + let buffer_id = selection + .start + .text_anchor + .buffer_id + .or(selection.end.text_anchor.buffer_id)?; let project = project.read(cx); let entry_id = project .buffer_for_id(buffer_id, cx)? diff --git a/crates/editor/src/test/editor_test_context.rs b/crates/editor/src/test/editor_test_context.rs index 5793bcf576c7ed0e1604c30aada0fb362f65bb9f..cd45a6ec47ad7631404189194a6a0291a6240647 100644 --- a/crates/editor/src/test/editor_test_context.rs +++ b/crates/editor/src/test/editor_test_context.rs @@ -490,11 +490,7 @@ impl EditorTestContext { ); assert_eq!( multibuffer_snapshot - .text_for_range(Anchor::range_in_buffer( - excerpt_id, - snapshot.remote_id(), - range.context.clone() - )) + .text_for_range(Anchor::range_in_buffer(excerpt_id, range.context.clone())) .collect::(), expected_text, "{}", @@ -675,11 +671,7 @@ impl std::fmt::Display for FormatMultiBufferAsMarkedText { } let mut text = multibuffer_snapshot - .text_for_range(Anchor::range_in_buffer( - *excerpt_id, - snapshot.remote_id(), - range.context.clone(), - )) + .text_for_range(Anchor::range_in_buffer(*excerpt_id, range.context.clone())) .collect::(); let selections = selections diff --git a/crates/git_ui/src/commit_view.rs b/crates/git_ui/src/commit_view.rs index 3251ab43f71292d2d46503ef83f61692f385dc76..41fd99982c97967c016d9a59199f22ea7ba6115c 100644 --- a/crates/git_ui/src/commit_view.rs +++ b/crates/git_ui/src/commit_view.rs @@ -223,7 +223,11 @@ impl CommitView { let snapshot = buffer.read(cx).snapshot(); let diff = buffer_diff.read(cx); let diff_hunk_ranges = diff - .hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &snapshot, cx) + .hunks_intersecting_range( + Anchor::min_max_range_for_buffer(diff.buffer_id), + &snapshot, + cx, + ) .map(|diff_hunk| diff_hunk.buffer_range.to_point(&snapshot)) .collect::>(); let path = snapshot.file().unwrap().path().clone(); diff --git a/crates/git_ui/src/project_diff.rs b/crates/git_ui/src/project_diff.rs index 715b74db333e78081a245f2fb362426591db79d9..a6de68b789c33ff75b8e6f474f31b7f9f6d8399c 100644 --- a/crates/git_ui/src/project_diff.rs +++ b/crates/git_ui/src/project_diff.rs @@ -383,12 +383,8 @@ impl ProjectDiff { .collect::>(); if !ranges.iter().any(|range| range.start != range.end) { selection = false; - if let Some((excerpt_id, buffer, range)) = self.editor.read(cx).active_excerpt(cx) { - ranges = vec![multi_buffer::Anchor::range_in_buffer( - excerpt_id, - buffer.read(cx).remote_id(), - range, - )]; + if let Some((excerpt_id, _, range)) = self.editor.read(cx).active_excerpt(cx) { + ranges = vec![multi_buffer::Anchor::range_in_buffer(excerpt_id, range)]; } else { ranges = Vec::default(); } @@ -488,7 +484,11 @@ impl ProjectDiff { let snapshot = buffer.read(cx).snapshot(); let diff_read = diff.read(cx); let diff_hunk_ranges = diff_read - .hunks_intersecting_range(Anchor::MIN..Anchor::MAX, &snapshot, cx) + .hunks_intersecting_range( + Anchor::min_max_range_for_buffer(diff_read.buffer_id), + &snapshot, + cx, + ) .map(|diff_hunk| diff_hunk.buffer_range); let conflicts = conflict_addon .conflict_set(snapshot.remote_id()) diff --git a/crates/language/src/buffer_tests.rs b/crates/language/src/buffer_tests.rs index 05402abcad478e2eedb17d31853ab0bc2bd3702c..efef0a08127bc66f9c6d8f21fe5a545dbee20fb1 100644 --- a/crates/language/src/buffer_tests.rs +++ b/crates/language/src/buffer_tests.rs @@ -3427,7 +3427,7 @@ fn test_random_collaboration(cx: &mut App, mut rng: StdRng) { for buffer in &buffers { let buffer = buffer.read(cx).snapshot(); let actual_remote_selections = buffer - .selections_in_range(Anchor::MIN..Anchor::MAX, false) + .selections_in_range(Anchor::min_max_range_for_buffer(buffer.remote_id()), false) .map(|(replica_id, _, _, selections)| (replica_id, selections.collect::>())) .collect::>(); let expected_remote_selections = active_selections diff --git a/crates/language/src/syntax_map.rs b/crates/language/src/syntax_map.rs index 33a652b6fdeb32a2adbc1743cf8a70fe453518f5..8574d52ff900563ddfb733c09204caab5eb6ae44 100644 --- a/crates/language/src/syntax_map.rs +++ b/crates/language/src/syntax_map.rs @@ -330,7 +330,7 @@ impl SyntaxSnapshot { let slice = cursor.slice( &SyntaxLayerPosition { depth: depth + 1, - range: Anchor::MIN..Anchor::MAX, + range: Anchor::min_max_range_for_buffer(text.remote_id()), language: None, }, Bias::Left, @@ -493,7 +493,7 @@ impl SyntaxSnapshot { start_point: Point::zero().to_ts_point(), end_point: text.max_point().to_ts_point(), }], - range: Anchor::MIN..Anchor::MAX, + range: Anchor::min_max_range_for_buffer(text.remote_id()), mode: ParseMode::Single, }); @@ -515,7 +515,7 @@ impl SyntaxSnapshot { } else { SyntaxLayerPosition { depth: max_depth + 1, - range: Anchor::MAX..Anchor::MAX, + range: Anchor::min_max_range_for_buffer(text.remote_id()), language: None, } }; diff --git a/crates/multi_buffer/src/anchor.rs b/crates/multi_buffer/src/anchor.rs index b8c1680574a86354d92f39c544c202642293f619..51696ba09e4bdb1c6be065f63d3ee7ff634e6b1a 100644 --- a/crates/multi_buffer/src/anchor.rs +++ b/crates/multi_buffer/src/anchor.rs @@ -7,12 +7,9 @@ use std::{ ops::{AddAssign, Range, Sub}, }; use sum_tree::Bias; -use text::BufferId; #[derive(Clone, Copy, Eq, PartialEq, Hash)] pub struct Anchor { - /// Invariant: If buffer id is `None`, excerpt id must be `ExcerptId::min()` or `ExcerptId::max()`. - pub buffer_id: Option, pub excerpt_id: ExcerptId, pub text_anchor: text::Anchor, pub diff_base_anchor: Option, @@ -20,15 +17,14 @@ pub struct Anchor { impl std::fmt::Debug for Anchor { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if *self == Self::min() { - return f.write_str("Anchor::MIN"); + if self.is_min() { + return write!(f, "Anchor::min({:?})", self.text_anchor.buffer_id); } - if *self == Self::max() { - return f.write_str("Anchor::MAX"); + if self.is_max() { + return write!(f, "Anchor::max({:?})", self.text_anchor.buffer_id); } f.debug_struct("Anchor") - .field("buffer_id", &self.buffer_id) .field("excerpt_id", &self.excerpt_id) .field("text_anchor", &self.text_anchor) .field("diff_base_anchor", &self.diff_base_anchor) @@ -44,35 +40,20 @@ impl Anchor { } } - pub fn in_buffer( - excerpt_id: ExcerptId, - buffer_id: BufferId, - text_anchor: text::Anchor, - ) -> Self { - debug_assert!( - text_anchor.buffer_id.is_none_or(|id| id == buffer_id), - "buffer id does not match the one in the text anchor: {buffer_id:?} {text_anchor:?}", - ); + pub fn in_buffer(excerpt_id: ExcerptId, text_anchor: text::Anchor) -> Self { Self { - buffer_id: Some(buffer_id), excerpt_id, text_anchor, diff_base_anchor: None, } } - pub fn range_in_buffer( - excerpt_id: ExcerptId, - buffer_id: BufferId, - range: Range, - ) -> Range { - Self::in_buffer(excerpt_id, buffer_id, range.start) - ..Self::in_buffer(excerpt_id, buffer_id, range.end) + pub fn range_in_buffer(excerpt_id: ExcerptId, range: Range) -> Range { + Self::in_buffer(excerpt_id, range.start)..Self::in_buffer(excerpt_id, range.end) } pub fn min() -> Self { Self { - buffer_id: None, excerpt_id: ExcerptId::min(), text_anchor: text::Anchor::MIN, diff_base_anchor: None, @@ -81,13 +62,24 @@ impl Anchor { pub fn max() -> Self { Self { - buffer_id: None, excerpt_id: ExcerptId::max(), text_anchor: text::Anchor::MAX, diff_base_anchor: None, } } + pub fn is_min(&self) -> bool { + self.excerpt_id == ExcerptId::min() + && self.text_anchor.is_min() + && self.diff_base_anchor.is_none() + } + + pub fn is_max(&self) -> bool { + self.excerpt_id == ExcerptId::max() + && self.text_anchor.is_max() + && self.diff_base_anchor.is_none() + } + pub fn cmp(&self, other: &Anchor, snapshot: &MultiBufferSnapshot) -> Ordering { if self == other { return Ordering::Equal; @@ -101,8 +93,8 @@ impl Anchor { return excerpt_id_cmp; } if self_excerpt_id == ExcerptId::max() - && self.text_anchor == text::Anchor::MAX - && self.text_anchor == text::Anchor::MAX + && self.text_anchor.is_max() + && self.text_anchor.is_max() && self.diff_base_anchor.is_none() && other.diff_base_anchor.is_none() { @@ -147,7 +139,6 @@ impl Anchor { && let Some(excerpt) = snapshot.excerpt(self.excerpt_id) { return Self { - buffer_id: Some(excerpt.buffer_id), excerpt_id: excerpt.id, text_anchor: self.text_anchor.bias_left(&excerpt.buffer), diff_base_anchor: self.diff_base_anchor.map(|a| { @@ -171,7 +162,6 @@ impl Anchor { && let Some(excerpt) = snapshot.excerpt(self.excerpt_id) { return Self { - buffer_id: Some(excerpt.buffer_id), excerpt_id: excerpt.id, text_anchor: self.text_anchor.bias_right(&excerpt.buffer), diff_base_anchor: self.diff_base_anchor.map(|a| { @@ -202,8 +192,8 @@ impl Anchor { } pub fn is_valid(&self, snapshot: &MultiBufferSnapshot) -> bool { - if *self == Anchor::min() || self.excerpt_id == ExcerptId::max() { - !snapshot.is_empty() + if self.is_min() || self.is_max() { + true } else if let Some(excerpt) = snapshot.excerpt(self.excerpt_id) { (self.text_anchor == excerpt.range.context.start || self.text_anchor == excerpt.range.context.end diff --git a/crates/multi_buffer/src/multi_buffer.rs b/crates/multi_buffer/src/multi_buffer.rs index 7922692d30eb3a79e835f5e4b94313c3ea886a7c..2e59eaa621c79bc8d0d0a149704cb55314e9b70d 100644 --- a/crates/multi_buffer/src/multi_buffer.rs +++ b/crates/multi_buffer/src/multi_buffer.rs @@ -158,12 +158,13 @@ impl MultiBufferDiffHunk { pub fn is_created_file(&self) -> bool { self.diff_base_byte_range == (BufferOffset(0)..BufferOffset(0)) - && self.buffer_range == (text::Anchor::MIN..text::Anchor::MAX) + && self.buffer_range.start.is_min() + && self.buffer_range.end.is_max() } pub fn multi_buffer_range(&self) -> Range { - let start = Anchor::in_buffer(self.excerpt_id, self.buffer_id, self.buffer_range.start); - let end = Anchor::in_buffer(self.excerpt_id, self.buffer_id, self.buffer_range.end); + let start = Anchor::in_buffer(self.excerpt_id, self.buffer_range.start); + let end = Anchor::in_buffer(self.excerpt_id, self.buffer_range.end); start..end } } @@ -1028,9 +1029,12 @@ impl MultiBuffer { }, ); this.singleton = true; + let buffer_id = buffer.read(cx).remote_id(); this.push_excerpts( buffer, - [ExcerptRange::new(text::Anchor::MIN..text::Anchor::MAX)], + [ExcerptRange::new(text::Anchor::min_max_range_for_buffer( + buffer_id, + ))], cx, ); this @@ -1912,7 +1916,7 @@ impl MultiBuffer { } pub fn buffer_for_anchor(&self, anchor: Anchor, cx: &App) -> Option> { - if let Some(buffer_id) = anchor.buffer_id { + if let Some(buffer_id) = anchor.text_anchor.buffer_id { self.buffer(buffer_id) } else { let (_, buffer, _) = self.excerpt_containing(anchor, cx)?; @@ -1975,7 +1979,7 @@ impl MultiBuffer { found.map(|(point, excerpt_id)| { let text_anchor = snapshot.anchor_after(point); - Anchor::in_buffer(excerpt_id, snapshot.remote_id(), text_anchor) + Anchor::in_buffer(excerpt_id, text_anchor) }) } @@ -1990,7 +1994,7 @@ impl MultiBuffer { if range.context.start.cmp(&anchor, &snapshot).is_le() && range.context.end.cmp(&anchor, &snapshot).is_ge() { - return Some(Anchor::in_buffer(excerpt_id, snapshot.remote_id(), anchor)); + return Some(Anchor::in_buffer(excerpt_id, anchor)); } } @@ -2112,7 +2116,7 @@ impl MultiBuffer { let mut error = None; let mut futures = Vec::new(); for anchor in anchors { - if let Some(buffer_id) = anchor.buffer_id { + if let Some(buffer_id) = anchor.text_anchor.buffer_id { if let Some(buffer) = self.buffers.get(&buffer_id) { buffer.buffer.update(cx, |buffer, _| { futures.push(buffer.wait_for_anchors([anchor.text_anchor])) @@ -2143,7 +2147,11 @@ impl MultiBuffer { ) -> Option<(Entity, language::Anchor)> { let snapshot = self.read(cx); let anchor = snapshot.anchor_before(position); - let buffer = self.buffers.get(&anchor.buffer_id?)?.buffer.clone(); + let buffer = self + .buffers + .get(&anchor.text_anchor.buffer_id?)? + .buffer + .clone(); Some((buffer, anchor.text_anchor)) } @@ -2205,7 +2213,7 @@ impl MultiBuffer { .get(&buffer_id) .is_none_or(|old_diff| !new_diff.base_texts_eq(old_diff)); - snapshot.diffs.insert(buffer_id, new_diff); + snapshot.diffs.insert_or_replace(buffer_id, new_diff); let mut excerpt_edits = Vec::new(); for locator in &buffer_state.excerpts { @@ -2402,7 +2410,11 @@ impl MultiBuffer { pub fn add_diff(&mut self, diff: Entity, cx: &mut Context) { let buffer_id = diff.read(cx).buffer_id; - self.buffer_diff_changed(diff.clone(), text::Anchor::MIN..text::Anchor::MAX, cx); + self.buffer_diff_changed( + diff.clone(), + text::Anchor::min_max_range_for_buffer(buffer_id), + cx, + ); self.diffs.insert(buffer_id, DiffState::new(diff, cx)); } @@ -2500,16 +2512,8 @@ impl MultiBuffer { if last_hunk_row.is_some_and(|row| row >= diff_hunk.row_range.start) { continue; } - let start = Anchor::in_buffer( - diff_hunk.excerpt_id, - diff_hunk.buffer_id, - diff_hunk.buffer_range.start, - ); - let end = Anchor::in_buffer( - diff_hunk.excerpt_id, - diff_hunk.buffer_id, - diff_hunk.buffer_range.end, - ); + let start = Anchor::in_buffer(diff_hunk.excerpt_id, diff_hunk.buffer_range.start); + let end = Anchor::in_buffer(diff_hunk.excerpt_id, diff_hunk.buffer_range.end); let start = snapshot.excerpt_offset_for_anchor(&start); let end = snapshot.excerpt_offset_for_anchor(&end); last_hunk_row = Some(diff_hunk.row_range.start); @@ -3945,9 +3949,7 @@ impl MultiBufferSnapshot { if hunk_end >= current_position { continue; } - let start = - Anchor::in_buffer(excerpt.id, excerpt.buffer_id, hunk.buffer_range.start) - .to_point(self); + let start = Anchor::in_buffer(excerpt.id, hunk.buffer_range.start).to_point(self); return Some(MultiBufferRow(start.row)); } } @@ -3964,8 +3966,7 @@ impl MultiBufferSnapshot { let Some(hunk) = hunks.next() else { continue; }; - let start = Anchor::in_buffer(excerpt.id, excerpt.buffer_id, hunk.buffer_range.start) - .to_point(self); + let start = Anchor::in_buffer(excerpt.id, hunk.buffer_range.start).to_point(self); return Some(MultiBufferRow(start.row)); } } @@ -4955,7 +4956,7 @@ impl MultiBufferSnapshot { { text_anchor = excerpt.range.context.end; } - Anchor::in_buffer(excerpt.id, excerpt.buffer_id, text_anchor) + Anchor::in_buffer(excerpt.id, text_anchor) } else if let Some(excerpt) = prev_excerpt { let mut text_anchor = excerpt .range @@ -4968,7 +4969,7 @@ impl MultiBufferSnapshot { { text_anchor = excerpt.range.context.start; } - Anchor::in_buffer(excerpt.id, excerpt.buffer_id, text_anchor) + Anchor::in_buffer(excerpt.id, text_anchor) } else if anchor.text_anchor.bias == Bias::Left { Anchor::min() } else { @@ -5050,7 +5051,7 @@ impl MultiBufferSnapshot { let buffer_start = excerpt.range.context.start.to_offset(&excerpt.buffer); let text_anchor = excerpt.clip_anchor(excerpt.buffer.anchor_at(buffer_start + overshoot, bias)); - let anchor = Anchor::in_buffer(excerpt.id, excerpt.buffer_id, text_anchor); + let anchor = Anchor::in_buffer(excerpt.id, text_anchor); match diff_base_anchor { Some(diff_base_anchor) => anchor.with_diff_base_anchor(diff_base_anchor), None => anchor, @@ -5066,7 +5067,11 @@ impl MultiBufferSnapshot { /// Wraps the [`text::Anchor`] in a [`multi_buffer::Anchor`] if this multi-buffer is a singleton. pub fn as_singleton_anchor(&self, text_anchor: text::Anchor) -> Option { let (excerpt, buffer, _) = self.as_singleton()?; - Some(Anchor::in_buffer(*excerpt, buffer, text_anchor)) + if text_anchor.buffer_id.is_none_or(|id| id == buffer) { + Some(Anchor::in_buffer(*excerpt, text_anchor)) + } else { + None + } } /// Returns an anchor for the given excerpt and text anchor, @@ -5099,12 +5104,8 @@ impl MultiBufferSnapshot { match text_anchor.buffer_id { Some(buffer_id) if buffer_id == excerpt.buffer_id => (), Some(_) => return None, - None if text_anchor == text::Anchor::MAX || text_anchor == text::Anchor::MIN => { - return Some(Anchor::in_buffer( - excerpt.id, - excerpt.buffer_id, - text_anchor, - )); + None if text_anchor.is_max() || text_anchor.is_min() => { + return Some(Anchor::in_buffer(excerpt.id, text_anchor)); } None => return None, } @@ -5116,11 +5117,7 @@ impl MultiBufferSnapshot { return None; } - Some(Anchor::in_buffer( - excerpt.id, - excerpt.buffer_id, - text_anchor, - )) + Some(Anchor::in_buffer(excerpt.id, text_anchor)) } pub fn context_range_for_excerpt(&self, excerpt_id: ExcerptId) -> Option> { @@ -5128,7 +5125,7 @@ impl MultiBufferSnapshot { } pub fn can_resolve(&self, anchor: &Anchor) -> bool { - if *anchor == Anchor::min() || anchor.excerpt_id == ExcerptId::max() { + if anchor.is_min() || anchor.is_max() { // todo(lw): should be `!self.is_empty()` true } else if let Some(excerpt) = self.excerpt(anchor.excerpt_id) { @@ -5998,7 +5995,7 @@ impl MultiBufferSnapshot { .. } = self.excerpt(anchor.excerpt_id)?; if cfg!(debug_assertions) { - match anchor.buffer_id { + match anchor.text_anchor.buffer_id { // we clearly are hitting this according to sentry, but in what situations can this occur? Some(anchor_buffer_id) => { assert_eq!( @@ -6006,7 +6003,7 @@ impl MultiBufferSnapshot { "anchor {anchor:?} does not match with resolved excerpt {excerpt:?}" ) } - None => assert_eq!(anchor, Anchor::max()), + None => assert!(anchor.is_max()), } }; Some(( @@ -6019,19 +6016,18 @@ impl MultiBufferSnapshot { depth: item.depth, source_range_for_text: Anchor::range_in_buffer( excerpt_id, - buffer_id, item.source_range_for_text, ), - range: Anchor::range_in_buffer(excerpt_id, buffer_id, item.range), + range: Anchor::range_in_buffer(excerpt_id, item.range), text: item.text, highlight_ranges: item.highlight_ranges, name_ranges: item.name_ranges, - body_range: item.body_range.map(|body_range| { - Anchor::range_in_buffer(excerpt_id, buffer_id, body_range) - }), - annotation_range: item.annotation_range.map(|body_range| { - Anchor::range_in_buffer(excerpt_id, buffer_id, body_range) - }), + body_range: item + .body_range + .map(|body_range| Anchor::range_in_buffer(excerpt_id, body_range)), + annotation_range: item + .annotation_range + .map(|body_range| Anchor::range_in_buffer(excerpt_id, body_range)), }) }) .collect(), @@ -6180,7 +6176,7 @@ impl MultiBufferSnapshot { } pub fn buffer_id_for_anchor(&self, anchor: Anchor) -> Option { - if let Some(id) = anchor.buffer_id { + if let Some(id) = anchor.text_anchor.buffer_id { return Some(id); } let excerpt = self.excerpt_containing(anchor..anchor)?; @@ -6212,10 +6208,8 @@ impl MultiBufferSnapshot { .selections_in_range(query_range, include_local) .flat_map(move |(replica_id, line_mode, cursor_shape, selections)| { selections.map(move |selection| { - let mut start = - Anchor::in_buffer(excerpt.id, excerpt.buffer_id, selection.start); - let mut end = - Anchor::in_buffer(excerpt.id, excerpt.buffer_id, selection.end); + let mut start = Anchor::in_buffer(excerpt.id, selection.start); + let mut end = Anchor::in_buffer(excerpt.id, selection.end); if range.start.cmp(&start, self).is_gt() { start = range.start; } @@ -6687,7 +6681,8 @@ impl Excerpt { } fn contains(&self, anchor: &Anchor) -> bool { - (anchor.buffer_id == None || anchor.buffer_id == Some(self.buffer_id)) + (anchor.text_anchor.buffer_id == None + || anchor.text_anchor.buffer_id == Some(self.buffer_id)) && self .range .context @@ -6723,19 +6718,11 @@ impl<'a> MultiBufferExcerpt<'a> { } pub fn start_anchor(&self) -> Anchor { - Anchor::in_buffer( - self.excerpt.id, - self.excerpt.buffer_id, - self.excerpt.range.context.start, - ) + Anchor::in_buffer(self.excerpt.id, self.excerpt.range.context.start) } pub fn end_anchor(&self) -> Anchor { - Anchor::in_buffer( - self.excerpt.id, - self.excerpt.buffer_id, - self.excerpt.range.context.end, - ) + Anchor::in_buffer(self.excerpt.id, self.excerpt.range.context.end) } pub fn buffer(&self) -> &'a BufferSnapshot { diff --git a/crates/multi_buffer/src/multi_buffer_tests.rs b/crates/multi_buffer/src/multi_buffer_tests.rs index 9517f1f76ece2f34aa5c95eb27b408e1ef004b99..e95d222c651999645a6966195be2da31347f1409 100644 --- a/crates/multi_buffer/src/multi_buffer_tests.rs +++ b/crates/multi_buffer/src/multi_buffer_tests.rs @@ -3401,14 +3401,11 @@ fn test_summaries_for_anchors(cx: &mut TestAppContext) { ), ); - let id_1 = buffer_1.read_with(cx, |buffer, _| buffer.remote_id()); - let id_2 = buffer_2.read_with(cx, |buffer, _| buffer.remote_id()); - - let anchor_1 = Anchor::in_buffer(ids[0], id_1, text::Anchor::MIN); + let anchor_1 = Anchor::in_buffer(ids[0], text::Anchor::MIN); let point_1 = snapshot.summaries_for_anchors::([&anchor_1])[0]; assert_eq!(point_1, Point::new(0, 0)); - let anchor_2 = Anchor::in_buffer(ids[1], id_2, text::Anchor::MIN); + let anchor_2 = Anchor::in_buffer(ids[1], text::Anchor::MIN); let point_2 = snapshot.summaries_for_anchors::([&anchor_2])[0]; assert_eq!(point_2, Point::new(3, 0)); } diff --git a/crates/multi_buffer/src/path_key.rs b/crates/multi_buffer/src/path_key.rs index 530bb4aa6435fb9a3aa768d84a2bbcf829eb72c6..5d9b653a2b8c9df8c854ca01c47c57b42c159f1e 100644 --- a/crates/multi_buffer/src/path_key.rs +++ b/crates/multi_buffer/src/path_key.rs @@ -56,11 +56,7 @@ impl MultiBuffer { let excerpt_id = self.excerpts_by_path.get(path)?.first()?; let snapshot = self.read(cx); let excerpt = snapshot.excerpt(*excerpt_id)?; - Some(Anchor::in_buffer( - excerpt.id, - excerpt.buffer_id, - excerpt.range.context.start, - )) + Some(Anchor::in_buffer(excerpt.id, excerpt.range.context.start)) } pub fn excerpt_paths(&self) -> impl Iterator { @@ -263,7 +259,6 @@ impl MultiBuffer { for range in ranges.by_ref().take(range_count) { let range = Anchor::range_in_buffer( excerpt_id, - buffer_snapshot.remote_id(), buffer_snapshot.anchor_before(&range.primary.start) ..buffer_snapshot.anchor_after(&range.primary.end), ); diff --git a/crates/outline_panel/src/outline_panel.rs b/crates/outline_panel/src/outline_panel.rs index 36cd9d076bb428f37c898a142fa7f3d1da887918..cb857a72898bbd6f4161a0f4d218394efeab5c7e 100644 --- a/crates/outline_panel/src/outline_panel.rs +++ b/crates/outline_panel/src/outline_panel.rs @@ -2044,8 +2044,9 @@ impl OutlinePanel { PanelEntry::Fs(FsEntry::ExternalFile(..)) => None, PanelEntry::Search(SearchEntry { match_range, .. }) => match_range .start + .text_anchor .buffer_id - .or(match_range.end.buffer_id) + .or(match_range.end.text_anchor.buffer_id) .map(|buffer_id| { outline_panel.update(cx, |outline_panel, cx| { outline_panel diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index f5d931737dff9a873fc5d63e5445b2b5d49bab56..4f7022a264db18f96150c369fadb957556e33b75 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -2746,7 +2746,7 @@ impl LocalLspStore { let actions = lsp_store .update(cx, move |this, cx| { let request = GetCodeActions { - range: text::Anchor::MIN..text::Anchor::MAX, + range: text::Anchor::min_max_range_for_buffer(buffer.read(cx).remote_id()), kinds: Some(code_action_kinds), }; let server = LanguageServerToQuery::Other(language_server_id); diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 149d30a5283c13f71477fc6776d5ca7f61f6205d..beebf5a1d133eb75fdd98184ddf7880b9cedc7e0 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -4002,7 +4002,8 @@ impl Project { ) -> Task>> { let snapshot = buffer_handle.read(cx).snapshot(); - let captures = snapshot.debug_variables_query(Anchor::MIN..range.end); + let captures = + snapshot.debug_variables_query(Anchor::min_for_buffer(snapshot.remote_id())..range.end); let row = snapshot .summary_for_anchor::(&range.end) diff --git a/crates/sum_tree/src/tree_map.rs b/crates/sum_tree/src/tree_map.rs index 3e56194dddd9910f819e91c209f6701b55efdd02..e58f7a65dd5d13ca67d4433bd25118ffb55d1169 100644 --- a/crates/sum_tree/src/tree_map.rs +++ b/crates/sum_tree/src/tree_map.rs @@ -72,6 +72,12 @@ impl TreeMap { self.0.insert_or_replace(MapEntry { key, value }, ()); } + pub fn insert_or_replace(&mut self, key: K, value: V) -> Option { + self.0 + .insert_or_replace(MapEntry { key, value }, ()) + .map(|it| it.value) + } + pub fn extend(&mut self, iter: impl IntoIterator) { let edits: Vec<_> = iter .into_iter() diff --git a/crates/text/src/anchor.rs b/crates/text/src/anchor.rs index 63a9ff6f1863041594fba7ebea0b3feaba6b8db7..c6d47a1e233b2fdf58fbc73adb622fc801832335 100644 --- a/crates/text/src/anchor.rs +++ b/crates/text/src/anchor.rs @@ -18,11 +18,11 @@ pub struct Anchor { impl Debug for Anchor { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if *self == Self::MIN { - return f.write_str("Anchor::MIN"); + if self.is_min() { + return write!(f, "Anchor::min({:?})", self.buffer_id); } - if *self == Self::MAX { - return f.write_str("Anchor::MAX"); + if self.is_max() { + return write!(f, "Anchor::max({:?})", self.buffer_id); } f.debug_struct("Anchor") @@ -49,6 +49,36 @@ impl Anchor { buffer_id: None, }; + pub fn min_for_buffer(buffer_id: BufferId) -> Self { + Self { + timestamp: clock::Lamport::MIN, + offset: usize::MIN, + bias: Bias::Left, + buffer_id: Some(buffer_id), + } + } + + pub fn max_for_buffer(buffer_id: BufferId) -> Self { + Self { + timestamp: clock::Lamport::MAX, + offset: usize::MAX, + bias: Bias::Right, + buffer_id: Some(buffer_id), + } + } + + pub fn min_min_range_for_buffer(buffer_id: BufferId) -> std::ops::Range { + let min = Self::min_for_buffer(buffer_id); + min..min + } + pub fn max_max_range_for_buffer(buffer_id: BufferId) -> std::ops::Range { + let max = Self::max_for_buffer(buffer_id); + max..max + } + pub fn min_max_range_for_buffer(buffer_id: BufferId) -> std::ops::Range { + Self::min_for_buffer(buffer_id)..Self::max_for_buffer(buffer_id) + } + pub fn cmp(&self, other: &Anchor, buffer: &BufferSnapshot) -> Ordering { let fragment_id_comparison = if self.timestamp == other.timestamp { Ordering::Equal @@ -109,7 +139,7 @@ impl Anchor { /// Returns true when the [`Anchor`] is located inside a visible fragment. pub fn is_valid(&self, buffer: &BufferSnapshot) -> bool { - if *self == Anchor::MIN || *self == Anchor::MAX { + if self.is_min() || self.is_max() { true } else if self.buffer_id.is_none_or(|id| id != buffer.remote_id) { false @@ -127,6 +157,18 @@ impl Anchor { item.is_some_and(|fragment| fragment.visible) } } + + pub fn is_min(&self) -> bool { + self.timestamp == clock::Lamport::MIN + && self.offset == usize::MIN + && self.bias == Bias::Left + } + + pub fn is_max(&self) -> bool { + self.timestamp == clock::Lamport::MAX + && self.offset == usize::MAX + && self.bias == Bias::Right + } } pub trait OffsetRangeExt { diff --git a/crates/text/src/text.rs b/crates/text/src/text.rs index 5f87e5441d2bb97863b0086ac273e4d4d8acfdc9..c16c6a7b27e2b1fc4c945007395dbe26f98adcda 100644 --- a/crates/text/src/text.rs +++ b/crates/text/src/text.rs @@ -1652,10 +1652,7 @@ impl Buffer { ) -> impl 'static + Future> + use { let mut futures = Vec::new(); for anchor in anchors { - if !self.version.observed(anchor.timestamp) - && anchor != Anchor::MAX - && anchor != Anchor::MIN - { + if !self.version.observed(anchor.timestamp) && !anchor.is_max() && !anchor.is_min() { let (tx, rx) = oneshot::channel(); self.edit_id_resolvers .entry(anchor.timestamp) @@ -2258,9 +2255,9 @@ impl BufferSnapshot { let mut position = D::zero(()); anchors.map(move |(anchor, payload)| { - if *anchor == Anchor::MIN { + if anchor.is_min() { return (D::zero(()), payload); - } else if *anchor == Anchor::MAX { + } else if anchor.is_max() { return (D::from_text_summary(&self.visible_text.summary()), payload); } @@ -2318,9 +2315,9 @@ impl BufferSnapshot { } pub fn offset_for_anchor(&self, anchor: &Anchor) -> usize { - if *anchor == Anchor::MIN { + if anchor.is_min() { 0 - } else if *anchor == Anchor::MAX { + } else if anchor.is_max() { self.visible_text.len() } else { debug_assert!(anchor.buffer_id == Some(self.remote_id)); @@ -2393,9 +2390,9 @@ impl BufferSnapshot { } fn try_fragment_id_for_anchor(&self, anchor: &Anchor) -> Option<&Locator> { - if *anchor == Anchor::MIN { + if anchor.is_min() { Some(Locator::min_ref()) - } else if *anchor == Anchor::MAX { + } else if anchor.is_max() { Some(Locator::max_ref()) } else { let anchor_key = InsertionFragmentKey { @@ -2440,9 +2437,9 @@ impl BufferSnapshot { fn anchor_at_offset(&self, offset: usize, bias: Bias) -> Anchor { if bias == Bias::Left && offset == 0 { - Anchor::MIN + Anchor::min_for_buffer(self.remote_id) } else if bias == Bias::Right && offset == self.len() { - Anchor::MAX + Anchor::max_for_buffer(self.remote_id) } else { if cfg!(debug_assertions) { self.visible_text.assert_char_boundary(offset); @@ -2462,8 +2459,8 @@ impl BufferSnapshot { } pub fn can_resolve(&self, anchor: &Anchor) -> bool { - *anchor == Anchor::MIN - || *anchor == Anchor::MAX + anchor.is_min() + || anchor.is_max() || (Some(self.remote_id) == anchor.buffer_id && self.version.observed(anchor.timestamp)) } diff --git a/crates/vim/src/motion.rs b/crates/vim/src/motion.rs index b0faa7bb068135a3feafc507e4f8a6ed97863e8c..dc108b0957d993b2229e8c04fed5923e9de250d4 100644 --- a/crates/vim/src/motion.rs +++ b/crates/vim/src/motion.rs @@ -2268,17 +2268,13 @@ fn go_to_line(map: &DisplaySnapshot, display_point: DisplayPoint, line: usize) - ..language::ToOffset::to_offset(&range.context.end, buffer); if offset >= excerpt_range.start && offset <= excerpt_range.end { let text_anchor = buffer.anchor_after(offset); - let anchor = Anchor::in_buffer(excerpt, buffer.remote_id(), text_anchor); + let anchor = Anchor::in_buffer(excerpt, text_anchor); return anchor.to_display_point(map); } else if offset <= excerpt_range.start { - let anchor = Anchor::in_buffer(excerpt, buffer.remote_id(), range.context.start); + let anchor = Anchor::in_buffer(excerpt, range.context.start); return anchor.to_display_point(map); } else { - last_position = Some(Anchor::in_buffer( - excerpt, - buffer.remote_id(), - range.context.end, - )); + last_position = Some(Anchor::in_buffer(excerpt, range.context.end)); } } diff --git a/crates/vim/src/state.rs b/crates/vim/src/state.rs index eba4476ea878932518dc8a3951e04f4c6ea96d29..e96fd3a329e95311eeb73b87b53acbe76939f0cd 100644 --- a/crates/vim/src/state.rs +++ b/crates/vim/src/state.rs @@ -606,7 +606,7 @@ impl MarksState { let text_anchors = anchors.get(name)?; let anchors = text_anchors .iter() - .map(|anchor| Anchor::in_buffer(excerpt_id, buffer_id, *anchor)) + .map(|anchor| Anchor::in_buffer(excerpt_id, *anchor)) .collect(); return Some(Mark::Local(anchors)); } From 552bc027838e6b6bd60a60627878ce1eabbfc990 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 25 Nov 2025 17:26:44 +0100 Subject: [PATCH 0369/1030] git: Bring back auto-commit suggestions (#43470) This got accidentally regressed in https://github.com/zed-industries/zed/pull/42149. Release Notes: - Fixed displaying auto-commit suggestions for single staged entries. --- crates/git_ui/src/git_panel.rs | 218 +++++++++++++++++++++++++++++++-- 1 file changed, 205 insertions(+), 13 deletions(-) diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index a6c6113a33b61cd16f007b6d2d818e42ad2a191e..4a5cd56ec90fd95fe94d55edfdeb7e2114fea820 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -2868,20 +2868,19 @@ impl GitPanel { if ops.staged() { self.single_staged_entry = single_staged_entry; } + } else { + self.single_staged_entry = single_staged_entry; } - } else if repo.pending_ops_summary().item_summary.staging_count == 1 { - self.single_staged_entry = repo.pending_ops().find_map(|ops| { - if ops.staging() { - repo.status_for_path(&ops.repo_path) - .map(|status| GitStatusEntry { - repo_path: ops.repo_path.clone(), - status: status.status, - staging: StageStatus::Staged, - }) - } else { - None - } - }); + } else if repo.pending_ops_summary().item_summary.staging_count == 1 + && let Some(ops) = repo.pending_ops().find(|ops| ops.staging()) + { + self.single_staged_entry = + repo.status_for_path(&ops.repo_path) + .map(|status| GitStatusEntry { + repo_path: ops.repo_path.clone(), + status: status.status, + staging: StageStatus::Staged, + }); } } @@ -5942,4 +5941,197 @@ mod tests { "}; assert_eq!(result, expected); } + + #[gpui::test] + async fn test_suggest_commit_message(cx: &mut TestAppContext) { + init_test(cx); + + let fs = FakeFs::new(cx.background_executor.clone()); + fs.insert_tree( + path!("/project"), + json!({ + ".git": {}, + "tracked": "tracked\n", + "untracked": "\n", + }), + ) + .await; + + fs.set_head_and_index_for_repo( + path!("/project/.git").as_ref(), + &[("tracked", "old tracked\n".into())], + ); + + let project = Project::test(fs.clone(), [Path::new(path!("/project"))], cx).await; + let workspace = + cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx)); + let cx = &mut VisualTestContext::from_window(*workspace, cx); + let panel = workspace.update(cx, GitPanel::new).unwrap(); + + let handle = cx.update_window_entity(&panel, |panel, _, _| { + std::mem::replace(&mut panel.update_visible_entries_task, Task::ready(())) + }); + cx.executor().advance_clock(2 * UPDATE_DEBOUNCE); + handle.await; + + let entries = panel.read_with(cx, |panel, _| panel.entries.clone()); + + // GitPanel + // - Tracked: + // - [] tracked + // - Untracked + // - [] untracked + // + // The commit message should now read: + // "Update tracked" + let message = panel.update(cx, |panel, cx| panel.suggest_commit_message(cx)); + assert_eq!(message, Some("Update tracked".to_string())); + + let first_status_entry = entries[1].clone(); + panel.update_in(cx, |panel, window, cx| { + panel.toggle_staged_for_entry(&first_status_entry, window, cx); + }); + + cx.read(|cx| { + project + .read(cx) + .worktrees(cx) + .next() + .unwrap() + .read(cx) + .as_local() + .unwrap() + .scan_complete() + }) + .await; + + cx.executor().run_until_parked(); + + let handle = cx.update_window_entity(&panel, |panel, _, _| { + std::mem::replace(&mut panel.update_visible_entries_task, Task::ready(())) + }); + cx.executor().advance_clock(2 * UPDATE_DEBOUNCE); + handle.await; + + // GitPanel + // - Tracked: + // - [x] tracked + // - Untracked + // - [] untracked + // + // The commit message should still read: + // "Update tracked" + let message = panel.update(cx, |panel, cx| panel.suggest_commit_message(cx)); + assert_eq!(message, Some("Update tracked".to_string())); + + let second_status_entry = entries[3].clone(); + panel.update_in(cx, |panel, window, cx| { + panel.toggle_staged_for_entry(&second_status_entry, window, cx); + }); + + cx.read(|cx| { + project + .read(cx) + .worktrees(cx) + .next() + .unwrap() + .read(cx) + .as_local() + .unwrap() + .scan_complete() + }) + .await; + + cx.executor().run_until_parked(); + + let handle = cx.update_window_entity(&panel, |panel, _, _| { + std::mem::replace(&mut panel.update_visible_entries_task, Task::ready(())) + }); + cx.executor().advance_clock(2 * UPDATE_DEBOUNCE); + handle.await; + + // GitPanel + // - Tracked: + // - [x] tracked + // - Untracked + // - [x] untracked + // + // The commit message should now read: + // "Enter commit message" + // (which means we should see None returned). + let message = panel.update(cx, |panel, cx| panel.suggest_commit_message(cx)); + assert!(message.is_none()); + + panel.update_in(cx, |panel, window, cx| { + panel.toggle_staged_for_entry(&first_status_entry, window, cx); + }); + + cx.read(|cx| { + project + .read(cx) + .worktrees(cx) + .next() + .unwrap() + .read(cx) + .as_local() + .unwrap() + .scan_complete() + }) + .await; + + cx.executor().run_until_parked(); + + let handle = cx.update_window_entity(&panel, |panel, _, _| { + std::mem::replace(&mut panel.update_visible_entries_task, Task::ready(())) + }); + cx.executor().advance_clock(2 * UPDATE_DEBOUNCE); + handle.await; + + // GitPanel + // - Tracked: + // - [] tracked + // - Untracked + // - [x] untracked + // + // The commit message should now read: + // "Update untracked" + let message = panel.update(cx, |panel, cx| panel.suggest_commit_message(cx)); + assert_eq!(message, Some("Create untracked".to_string())); + + panel.update_in(cx, |panel, window, cx| { + panel.toggle_staged_for_entry(&second_status_entry, window, cx); + }); + + cx.read(|cx| { + project + .read(cx) + .worktrees(cx) + .next() + .unwrap() + .read(cx) + .as_local() + .unwrap() + .scan_complete() + }) + .await; + + cx.executor().run_until_parked(); + + let handle = cx.update_window_entity(&panel, |panel, _, _| { + std::mem::replace(&mut panel.update_visible_entries_task, Task::ready(())) + }); + cx.executor().advance_clock(2 * UPDATE_DEBOUNCE); + handle.await; + + // GitPanel + // - Tracked: + // - [] tracked + // - Untracked + // - [] untracked + // + // The commit message should now read: + // "Update tracked" + let message = panel.update(cx, |panel, cx| panel.suggest_commit_message(cx)); + assert_eq!(message, Some("Update tracked".to_string())); + } } From 2053fea0a7cebdd160c55e02e13cb9624b9f4efa Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Tue, 25 Nov 2025 11:28:33 -0500 Subject: [PATCH 0370/1030] Add collaboration redirects (#43471) Redirect: https://zed.dev/docs/collaboration -> https://zed.dev/docs/collaboration/overview https://zed.dev/docs/channels -> https://zed.dev/docs/collaboration/channels Release Notes: - N/A --- docs/book.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/book.toml b/docs/book.toml index 60ddc5ac515cb73f7b0b4f2f8c2c193bdddf228b..2bb57c5c08ea2421aa9b8a2fb47fdc9521d32a39 100644 --- a/docs/book.toml +++ b/docs/book.toml @@ -56,6 +56,10 @@ enable = false "/model-improvement.html" = "/docs/ai/ai-improvement.html" "/ai/temperature.html" = "/docs/ai/agent-settings.html#model-temperature" +# Collaboration +"/channels.html" = "/docs/collaboration/channels.html" +"/collaboration.html" = "/docs/collaboration/overview.html" + # Community "/community/feedback.html" = "/community-links" "/conversations.html" = "/community-links" From 8a992703a740caf440586eccb231575f7aa90727 Mon Sep 17 00:00:00 2001 From: Richard Feldman Date: Tue, 25 Nov 2025 12:15:55 -0500 Subject: [PATCH 0371/1030] Add Gemini 3 support to Copilot (#43096) Closes #43024 Release Notes: - Add support for Gemini 3 to Copilot --- crates/copilot/src/copilot_chat.rs | 20 ++- crates/copilot/src/copilot_responses.rs | 3 +- .../src/provider/copilot_chat.rs | 156 ++++++++++++++++++ 3 files changed, 171 insertions(+), 8 deletions(-) diff --git a/crates/copilot/src/copilot_chat.rs b/crates/copilot/src/copilot_chat.rs index d4051701f72331bf5fc25fcd634002f0206ba529..52a3631791ecaf4e1f7b2bc935be37816f2b25de 100644 --- a/crates/copilot/src/copilot_chat.rs +++ b/crates/copilot/src/copilot_chat.rs @@ -294,6 +294,10 @@ pub enum ChatMessage { content: ChatMessageContent, #[serde(default, skip_serializing_if = "Vec::is_empty")] tool_calls: Vec, + #[serde(default, skip_serializing_if = "Option::is_none")] + reasoning_opaque: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + reasoning_text: Option, }, User { content: ChatMessageContent, @@ -386,6 +390,8 @@ pub struct ResponseDelta { pub role: Option, #[serde(default)] pub tool_calls: Vec, + pub reasoning_opaque: Option, + pub reasoning_text: Option, } #[derive(Deserialize, Debug, Eq, PartialEq)] pub struct ToolCallChunk { @@ -786,13 +792,13 @@ async fn stream_completion( is_user_initiated: bool, ) -> Result>> { let is_vision_request = request.messages.iter().any(|message| match message { - ChatMessage::User { content } - | ChatMessage::Assistant { content, .. } - | ChatMessage::Tool { content, .. } => { - matches!(content, ChatMessageContent::Multipart(parts) if parts.iter().any(|part| matches!(part, ChatMessagePart::Image { .. }))) - } - _ => false, - }); + ChatMessage::User { content } + | ChatMessage::Assistant { content, .. } + | ChatMessage::Tool { content, .. } => { + matches!(content, ChatMessageContent::Multipart(parts) if parts.iter().any(|part| matches!(part, ChatMessagePart::Image { .. }))) + } + _ => false, + }); let request_initiator = if is_user_initiated { "user" } else { "agent" }; diff --git a/crates/copilot/src/copilot_responses.rs b/crates/copilot/src/copilot_responses.rs index 938577e224bcf4af440c3bd646cd1910ec1fbd13..2da2eb394b5fc5ba88c8dd3007df394a2dbc15bf 100644 --- a/crates/copilot/src/copilot_responses.rs +++ b/crates/copilot/src/copilot_responses.rs @@ -313,7 +313,8 @@ pub async fn stream_response( }; let is_streaming = request.stream; - let request = request_builder.body(AsyncBody::from(serde_json::to_string(&request)?))?; + let json = serde_json::to_string(&request)?; + let request = request_builder.body(AsyncBody::from(json))?; let mut response = client.send(request).await?; if !response.status().is_success() { diff --git a/crates/language_models/src/provider/copilot_chat.rs b/crates/language_models/src/provider/copilot_chat.rs index 1d0410c0cfff5f0f757120c9b91432593c8c1053..92ac342a39ff04ae42f5b01b5777a5d16563c37f 100644 --- a/crates/language_models/src/provider/copilot_chat.rs +++ b/crates/language_models/src/provider/copilot_chat.rs @@ -367,12 +367,16 @@ pub fn map_to_language_model_completion_events( struct State { events: Pin>>>, tool_calls_by_index: HashMap, + reasoning_opaque: Option, + reasoning_text: Option, } futures::stream::unfold( State { events, tool_calls_by_index: HashMap::default(), + reasoning_opaque: None, + reasoning_text: None, }, move |mut state| async move { if let Some(event) = state.events.next().await { @@ -403,6 +407,14 @@ pub fn map_to_language_model_completion_events( events.push(Ok(LanguageModelCompletionEvent::Text(content))); } + // Capture reasoning data from the delta (e.g. for Gemini 3) + if let Some(opaque) = delta.reasoning_opaque.clone() { + state.reasoning_opaque = Some(opaque); + } + if let Some(text) = delta.reasoning_text.clone() { + state.reasoning_text = Some(text); + } + for (index, tool_call) in delta.tool_calls.iter().enumerate() { let tool_index = tool_call.index.unwrap_or(index); let entry = state.tool_calls_by_index.entry(tool_index).or_default(); @@ -445,6 +457,32 @@ pub fn map_to_language_model_completion_events( ))); } Some("tool_calls") => { + // Gemini 3 models send reasoning_opaque/reasoning_text that must + // be preserved and sent back in subsequent requests. Emit as + // ReasoningDetails so the agent stores it in the message. + if state.reasoning_opaque.is_some() + || state.reasoning_text.is_some() + { + let mut details = serde_json::Map::new(); + if let Some(opaque) = state.reasoning_opaque.take() { + details.insert( + "reasoning_opaque".to_string(), + serde_json::Value::String(opaque), + ); + } + if let Some(text) = state.reasoning_text.take() { + details.insert( + "reasoning_text".to_string(), + serde_json::Value::String(text), + ); + } + events.push(Ok( + LanguageModelCompletionEvent::ReasoningDetails( + serde_json::Value::Object(details), + ), + )); + } + events.extend(state.tool_calls_by_index.drain().map( |(_, tool_call)| { // The model can output an empty string @@ -807,6 +845,22 @@ fn into_copilot_chat( buffer }; + // Extract reasoning_opaque and reasoning_text from reasoning_details + let (reasoning_opaque, reasoning_text) = + if let Some(details) = &message.reasoning_details { + let opaque = details + .get("reasoning_opaque") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + let text = details + .get("reasoning_text") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + (opaque, text) + } else { + (None, None) + }; + messages.push(ChatMessage::Assistant { content: if text_content.is_empty() { ChatMessageContent::empty() @@ -814,6 +868,8 @@ fn into_copilot_chat( text_content.into() }, tool_calls, + reasoning_opaque, + reasoning_text, }); } Role::System => messages.push(ChatMessage::System { @@ -1317,6 +1373,106 @@ mod tests { other => panic!("expected HttpResponseError, got {:?}", other), } } + + #[test] + fn chat_completions_stream_maps_reasoning_data() { + use copilot::copilot_chat::ResponseEvent; + + let events = vec![ + ResponseEvent { + choices: vec![copilot::copilot_chat::ResponseChoice { + index: Some(0), + finish_reason: None, + delta: Some(copilot::copilot_chat::ResponseDelta { + content: None, + role: Some(copilot::copilot_chat::Role::Assistant), + tool_calls: vec![copilot::copilot_chat::ToolCallChunk { + index: Some(0), + id: Some("call_abc123".to_string()), + function: Some(copilot::copilot_chat::FunctionChunk { + name: Some("list_directory".to_string()), + arguments: Some("{\"path\":\"test\"}".to_string()), + thought_signature: None, + }), + }], + reasoning_opaque: Some("encrypted_reasoning_token_xyz".to_string()), + reasoning_text: Some("Let me check the directory".to_string()), + }), + message: None, + }], + id: "chatcmpl-123".to_string(), + usage: None, + }, + ResponseEvent { + choices: vec![copilot::copilot_chat::ResponseChoice { + index: Some(0), + finish_reason: Some("tool_calls".to_string()), + delta: Some(copilot::copilot_chat::ResponseDelta { + content: None, + role: None, + tool_calls: vec![], + reasoning_opaque: None, + reasoning_text: None, + }), + message: None, + }], + id: "chatcmpl-123".to_string(), + usage: None, + }, + ]; + + let mapped = futures::executor::block_on(async { + map_to_language_model_completion_events( + Box::pin(futures::stream::iter(events.into_iter().map(Ok))), + true, + ) + .collect::>() + .await + }); + + let mut has_reasoning_details = false; + let mut has_tool_use = false; + let mut reasoning_opaque_value: Option = None; + let mut reasoning_text_value: Option = None; + + for event_result in mapped { + match event_result { + Ok(LanguageModelCompletionEvent::ReasoningDetails(details)) => { + has_reasoning_details = true; + reasoning_opaque_value = details + .get("reasoning_opaque") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + reasoning_text_value = details + .get("reasoning_text") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()); + } + Ok(LanguageModelCompletionEvent::ToolUse(tool_use)) => { + has_tool_use = true; + assert_eq!(tool_use.id.to_string(), "call_abc123"); + assert_eq!(tool_use.name.as_ref(), "list_directory"); + } + _ => {} + } + } + + assert!( + has_reasoning_details, + "Should emit ReasoningDetails event for Gemini 3 reasoning" + ); + assert!(has_tool_use, "Should emit ToolUse event"); + assert_eq!( + reasoning_opaque_value, + Some("encrypted_reasoning_token_xyz".to_string()), + "Should capture reasoning_opaque" + ); + assert_eq!( + reasoning_text_value, + Some("Let me check the directory".to_string()), + "Should capture reasoning_text" + ); + } } struct ConfigurationView { copilot_status: Option, From 1c072017a468d04b500da857bf4245832b0f8bf6 Mon Sep 17 00:00:00 2001 From: Anthony Eid <56899983+Anthony-Eid@users.noreply.github.com> Date: Tue, 25 Nov 2025 12:29:26 -0500 Subject: [PATCH 0372/1030] git: Make the version_control.{deleted/added} colors more accessible (#43475) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The new colors are easier to tell apart for people that are colorblind cc: @mattermill ## One Dark ### Before Screenshot 2025-11-25 at 12 13 14 PM ### After Screenshot 2025-11-25 at 12 14 16 PM ## One Light ### Before Screenshot 2025-11-25 at 12 15 13 PM ### After Screenshot 2025-11-25 at 12 15 45 PM Release Notes: - N/A --- assets/themes/one/one.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/assets/themes/one/one.json b/assets/themes/one/one.json index 6849cd05dc70752216789ae04e81fad232f7b14b..48db749a4b636963d6db714ddb055c9c15bc5494 100644 --- a/assets/themes/one/one.json +++ b/assets/themes/one/one.json @@ -96,9 +96,9 @@ "terminal.ansi.bright_white": "#fafafaff", "terminal.ansi.dim_white": "#575d65ff", "link_text.hover": "#74ade8ff", - "version_control.added": "#27a657ff", + "version_control.added": "#2EA048ff", "version_control.modified": "#d3b020ff", - "version_control.deleted": "#e06c76ff", + "version_control.deleted": "#78081Bff", "version_control.conflict_marker.ours": "#a1c1811a", "version_control.conflict_marker.theirs": "#74ade81a", "conflict": "#dec184ff", @@ -497,9 +497,9 @@ "terminal.ansi.bright_white": "#ffffffff", "terminal.ansi.dim_white": "#aaaaaaff", "link_text.hover": "#5c78e2ff", - "version_control.added": "#27a657ff", + "version_control.added": "#2EA048ff", "version_control.modified": "#d3b020ff", - "version_control.deleted": "#e06c76ff", + "version_control.deleted": "#F85149ff", "conflict": "#a48819ff", "conflict.background": "#faf2e6ff", "conflict.border": "#f4e7d1ff", From 94f9b8585969da841cccb341ad766c1d2d852816 Mon Sep 17 00:00:00 2001 From: Bennet Bo Fenner Date: Tue, 25 Nov 2025 18:29:45 +0100 Subject: [PATCH 0373/1030] acp: Only pass enabled MCP servers to agent (#43467) Release Notes: - Fix an issue where ACP agents would start MCP servers that were disabled in Zed --- crates/project/src/context_server_store.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/project/src/context_server_store.rs b/crates/project/src/context_server_store.rs index 342a59ab7d5530e8f2268f1c4b72ea44f302f807..59bef36f06502f11d06f76ac7819a4c9ea806176 100644 --- a/crates/project/src/context_server_store.rs +++ b/crates/project/src/context_server_store.rs @@ -199,12 +199,12 @@ impl ContextServerStore { ) } - /// Returns all configured context server ids, regardless of enabled state. + /// Returns all configured context server ids, excluding the ones that are disabled pub fn configured_server_ids(&self) -> Vec { self.context_server_settings - .keys() - .cloned() - .map(ContextServerId) + .iter() + .filter(|(_, settings)| settings.enabled()) + .map(|(id, _)| ContextServerId(id.clone())) .collect() } From 388fda2292ccbe381c18473030461420f06c36cd Mon Sep 17 00:00:00 2001 From: Smit Barmase Date: Tue, 25 Nov 2025 23:27:11 +0530 Subject: [PATCH 0374/1030] editor: Fix package version completion partial accept and improve sorting (#43473) Closes #41723 This PR fixes an issue with accepting partial semver completions by including `.` in the completion query. This makes the editor treat the entire version string as the query, instead of breaking segment at last `.` . This PR also adds a test for sorting semver completions. The actual sorting fix is handled in the `package-version-server` by having it provide `sort_text`. More: https://github.com/zed-industries/package-version-server/pull/10 image Release Notes: - Fixed an issue where accepting a completion for a semver version in package.json would append the suggestion to the existing text instead of replacing it. - Improved the sorting of semver completions in package.json so the latest versions appear at the top. --- crates/editor/src/code_completion_tests.rs | 122 +++++++++++++++++++-- crates/languages/src/json/config.toml | 3 +- 2 files changed, 117 insertions(+), 8 deletions(-) diff --git a/crates/editor/src/code_completion_tests.rs b/crates/editor/src/code_completion_tests.rs index 364b310f367ff195f9aee8693a815be94db0b44d..4602824486ebb88f78ed529abb91ddcc1c34646f 100644 --- a/crates/editor/src/code_completion_tests.rs +++ b/crates/editor/src/code_completion_tests.rs @@ -239,6 +239,89 @@ async fn test_fuzzy_over_sort_positions(cx: &mut TestAppContext) { assert_eq!(matches[2].string, "fetch_code_lens"); } +#[gpui::test] +async fn test_semver_label_sort_by_latest_version(cx: &mut TestAppContext) { + let mut versions = [ + "10.4.112", + "10.4.22", + "10.4.2", + "10.4.20", + "10.4.21", + "10.4.12", + // Pre-release versions + "10.4.22-alpha", + "10.4.22-beta.1", + "10.4.22-rc.1", + // Build metadata versions + "10.4.21+build.123", + "10.4.20+20210327", + ]; + versions.sort_by(|a, b| { + match ( + semver::Version::parse(a).ok(), + semver::Version::parse(b).ok(), + ) { + (Some(a_ver), Some(b_ver)) => b_ver.cmp(&a_ver), + _ => std::cmp::Ordering::Equal, + } + }); + let completions: Vec<_> = versions + .iter() + .enumerate() + .map(|(i, version)| { + // This sort text would come from the LSP + let sort_text = format!("{:08}", i); + CompletionBuilder::new(version, None, &sort_text, None) + }) + .collect(); + + // Case 1: User types just the major and minor version + let matches = + filter_and_sort_matches("10.4.", &completions, SnippetSortOrder::default(), cx).await; + // Versions are ordered by recency (latest first) + let expected_versions = [ + "10.4.112", + "10.4.22", + "10.4.22-rc.1", + "10.4.22-beta.1", + "10.4.22-alpha", + "10.4.21+build.123", + "10.4.21", + "10.4.20+20210327", + "10.4.20", + "10.4.12", + "10.4.2", + ]; + for (match_item, expected) in matches.iter().zip(expected_versions.iter()) { + assert_eq!(match_item.string.as_ref() as &str, *expected); + } + + // Case 2: User types the major, minor, and patch version + let matches = + filter_and_sort_matches("10.4.2", &completions, SnippetSortOrder::default(), cx).await; + let expected_versions = [ + // Exact match comes first + "10.4.2", + // Ordered by recency with exact major, minor, and patch versions + "10.4.22", + "10.4.22-rc.1", + "10.4.22-beta.1", + "10.4.22-alpha", + "10.4.21+build.123", + "10.4.21", + "10.4.20+20210327", + "10.4.20", + // Versions with non-exact patch versions are ordered by fuzzy score + // Higher fuzzy score than 112 patch version since "2" appears before "1" + // in "12", making it rank higher than "112" + "10.4.12", + "10.4.112", + ]; + for (match_item, expected) in matches.iter().zip(expected_versions.iter()) { + assert_eq!(match_item.string.as_ref() as &str, *expected); + } +} + async fn test_for_each_prefix( target: &str, completions: &Vec, @@ -259,30 +342,55 @@ struct CompletionBuilder; impl CompletionBuilder { fn constant(label: &str, filter_text: Option<&str>, sort_text: &str) -> Completion { - Self::new(label, filter_text, sort_text, CompletionItemKind::CONSTANT) + Self::new( + label, + filter_text, + sort_text, + Some(CompletionItemKind::CONSTANT), + ) } fn function(label: &str, filter_text: Option<&str>, sort_text: &str) -> Completion { - Self::new(label, filter_text, sort_text, CompletionItemKind::FUNCTION) + Self::new( + label, + filter_text, + sort_text, + Some(CompletionItemKind::FUNCTION), + ) } fn method(label: &str, filter_text: Option<&str>, sort_text: &str) -> Completion { - Self::new(label, filter_text, sort_text, CompletionItemKind::METHOD) + Self::new( + label, + filter_text, + sort_text, + Some(CompletionItemKind::METHOD), + ) } fn variable(label: &str, filter_text: Option<&str>, sort_text: &str) -> Completion { - Self::new(label, filter_text, sort_text, CompletionItemKind::VARIABLE) + Self::new( + label, + filter_text, + sort_text, + Some(CompletionItemKind::VARIABLE), + ) } fn snippet(label: &str, filter_text: Option<&str>, sort_text: &str) -> Completion { - Self::new(label, filter_text, sort_text, CompletionItemKind::SNIPPET) + Self::new( + label, + filter_text, + sort_text, + Some(CompletionItemKind::SNIPPET), + ) } fn new( label: &str, filter_text: Option<&str>, sort_text: &str, - kind: CompletionItemKind, + kind: Option, ) -> Completion { Completion { replace_range: Anchor::MIN..Anchor::MAX, @@ -294,7 +402,7 @@ impl CompletionBuilder { server_id: LanguageServerId(0), lsp_completion: Box::new(CompletionItem { label: label.to_string(), - kind: Some(kind), + kind: kind, sort_text: Some(sort_text.to_string()), filter_text: filter_text.map(|text| text.to_string()), ..Default::default() diff --git a/crates/languages/src/json/config.toml b/crates/languages/src/json/config.toml index 1cf815704cd1d0ecd25c25e00d5925c13ff0cf35..8caa46c8a45076557d5f6c897fc1a5ad11ffa6ac 100644 --- a/crates/languages/src/json/config.toml +++ b/crates/languages/src/json/config.toml @@ -11,5 +11,6 @@ brackets = [ tab_size = 2 prettier_parser_name = "json" debuggers = ["JavaScript"] + [overrides.string] -completion_query_characters = [":", " "] +completion_query_characters = [":", " ", "."] From 36708c910a0b4f20a85338d423f9710b0cc27780 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 25 Nov 2025 10:36:45 -0800 Subject: [PATCH 0375/1030] Separate experimental edit prediction jumps feature from the Sweep AI prediction provider (#43481) Release Notes: - N/A --------- Co-authored-by: Ben Kunkle --- crates/zeta/src/provider.rs | 2 +- crates/zeta/src/sweep_ai.rs | 283 ++++++++++++++++++++++- crates/zeta/src/zeta.rs | 320 ++++++-------------------- crates/zeta/src/zeta1.rs | 6 +- crates/zeta2_tools/src/zeta2_tools.rs | 79 ++++--- 5 files changed, 381 insertions(+), 309 deletions(-) diff --git a/crates/zeta/src/provider.rs b/crates/zeta/src/provider.rs index a2b3eed1b5efe953ebdf5a2448ca06e7866bea86..76c950714afa808ea04cf5fead89979374f2b99b 100644 --- a/crates/zeta/src/provider.rs +++ b/crates/zeta/src/provider.rs @@ -77,7 +77,7 @@ impl EditPredictionProvider for ZetaEditPredictionProvider { ) -> bool { let zeta = self.zeta.read(cx); if zeta.edit_prediction_model == ZetaEditPredictionModel::Sweep { - zeta.sweep_api_token.is_some() + zeta.sweep_ai.api_token.is_some() } else { true } diff --git a/crates/zeta/src/sweep_ai.rs b/crates/zeta/src/sweep_ai.rs index 0e226ab9df26ffc945a2d8e810790d0b00d0f198..49870c7c9e917468e70062cbc234e9774fb3668b 100644 --- a/crates/zeta/src/sweep_ai.rs +++ b/crates/zeta/src/sweep_ai.rs @@ -1,10 +1,269 @@ -use std::fmt; -use std::{path::Path, sync::Arc}; - +use anyhow::{Context as _, Result}; +use cloud_llm_client::predict_edits_v3::Event; +use futures::AsyncReadExt as _; +use gpui::{ + App, AppContext as _, Entity, Task, + http_client::{self, AsyncBody, Method}, +}; +use language::{Buffer, BufferSnapshot, Point, ToOffset as _, ToPoint as _}; +use lsp::DiagnosticSeverity; +use project::{Project, ProjectPath}; use serde::{Deserialize, Serialize}; +use std::{ + collections::VecDeque, + fmt::{self, Write as _}, + ops::Range, + path::Path, + sync::Arc, + time::Instant, +}; +use util::ResultExt as _; + +use crate::{EditPrediction, EditPredictionId, EditPredictionInputs}; + +const SWEEP_API_URL: &str = "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; + +pub struct SweepAi { + pub api_token: Option, + pub debug_info: Arc, +} + +impl SweepAi { + pub fn new(cx: &App) -> Self { + SweepAi { + api_token: std::env::var("SWEEP_AI_TOKEN") + .context("No SWEEP_AI_TOKEN environment variable set") + .log_err(), + debug_info: debug_info(cx), + } + } + + pub fn request_prediction_with_sweep( + &self, + project: &Entity, + active_buffer: &Entity, + snapshot: BufferSnapshot, + position: language::Anchor, + events: Vec>, + recent_paths: &VecDeque, + diagnostic_search_range: Range, + cx: &mut App, + ) -> Task>> { + let debug_info = self.debug_info.clone(); + let Some(api_token) = self.api_token.clone() else { + return Task::ready(Ok(None)); + }; + let full_path: Arc = snapshot + .file() + .map(|file| file.full_path(cx)) + .unwrap_or_else(|| "untitled".into()) + .into(); + + let project_file = project::File::from_dyn(snapshot.file()); + let repo_name = project_file + .map(|file| file.worktree.read(cx).root_name_str()) + .unwrap_or("untitled") + .into(); + let offset = position.to_offset(&snapshot); + + let recent_buffers = recent_paths.iter().cloned(); + let http_client = cx.http_client(); + + let recent_buffer_snapshots = recent_buffers + .filter_map(|project_path| { + let buffer = project.read(cx).get_open_buffer(&project_path, cx)?; + if active_buffer == &buffer { + None + } else { + Some(buffer.read(cx).snapshot()) + } + }) + .take(3) + .collect::>(); + + let cursor_point = position.to_point(&snapshot); + let buffer_snapshotted_at = Instant::now(); + + let result = cx.background_spawn(async move { + let text = snapshot.text(); + + let mut recent_changes = String::new(); + for event in &events { + write_event(event.as_ref(), &mut recent_changes).unwrap(); + } + + let mut file_chunks = recent_buffer_snapshots + .into_iter() + .map(|snapshot| { + let end_point = Point::new(30, 0).min(snapshot.max_point()); + FileChunk { + content: snapshot.text_for_range(Point::zero()..end_point).collect(), + file_path: snapshot + .file() + .map(|f| f.path().as_unix_str()) + .unwrap_or("untitled") + .to_string(), + start_line: 0, + end_line: end_point.row as usize, + timestamp: snapshot.file().and_then(|file| { + Some( + file.disk_state() + .mtime()? + .to_seconds_and_nanos_for_persistence()? + .0, + ) + }), + } + }) + .collect::>(); + + let diagnostic_entries = snapshot.diagnostics_in_range(diagnostic_search_range, false); + let mut diagnostic_content = String::new(); + let mut diagnostic_count = 0; + + for entry in diagnostic_entries { + let start_point: Point = entry.range.start; + + let severity = match entry.diagnostic.severity { + DiagnosticSeverity::ERROR => "error", + DiagnosticSeverity::WARNING => "warning", + DiagnosticSeverity::INFORMATION => "info", + DiagnosticSeverity::HINT => "hint", + _ => continue, + }; + + diagnostic_count += 1; + + writeln!( + &mut diagnostic_content, + "{} at line {}: {}", + severity, + start_point.row + 1, + entry.diagnostic.message + )?; + } + + if !diagnostic_content.is_empty() { + file_chunks.push(FileChunk { + file_path: format!("Diagnostics for {}", full_path.display()), + start_line: 0, + end_line: diagnostic_count, + content: diagnostic_content, + timestamp: None, + }); + } + + let request_body = AutocompleteRequest { + debug_info, + repo_name, + file_path: full_path.clone(), + file_contents: text.clone(), + original_file_contents: text, + cursor_position: offset, + recent_changes: recent_changes.clone(), + changes_above_cursor: true, + multiple_suggestions: false, + branch: None, + file_chunks, + retrieval_chunks: vec![], + recent_user_actions: vec![], + // TODO + privacy_mode_enabled: false, + }; + + let mut buf: Vec = Vec::new(); + let writer = brotli::CompressorWriter::new(&mut buf, 4096, 11, 22); + serde_json::to_writer(writer, &request_body)?; + let body: AsyncBody = buf.into(); + + let inputs = EditPredictionInputs { + events, + included_files: vec![cloud_llm_client::predict_edits_v3::IncludedFile { + path: full_path.clone(), + max_row: cloud_llm_client::predict_edits_v3::Line(snapshot.max_point().row), + excerpts: vec![cloud_llm_client::predict_edits_v3::Excerpt { + start_line: cloud_llm_client::predict_edits_v3::Line(0), + text: request_body.file_contents.into(), + }], + }], + cursor_point: cloud_llm_client::predict_edits_v3::Point { + column: cursor_point.column, + line: cloud_llm_client::predict_edits_v3::Line(cursor_point.row), + }, + cursor_path: full_path.clone(), + }; + + let request = http_client::Request::builder() + .uri(SWEEP_API_URL) + .header("Content-Type", "application/json") + .header("Authorization", format!("Bearer {}", api_token)) + .header("Connection", "keep-alive") + .header("Content-Encoding", "br") + .method(Method::POST) + .body(body)?; + + let mut response = http_client.send(request).await?; + + let mut body: Vec = Vec::new(); + response.body_mut().read_to_end(&mut body).await?; + + let response_received_at = Instant::now(); + if !response.status().is_success() { + anyhow::bail!( + "Request failed with status: {:?}\nBody: {}", + response.status(), + String::from_utf8_lossy(&body), + ); + }; + + let response: AutocompleteResponse = serde_json::from_slice(&body)?; + + let old_text = snapshot + .text_for_range(response.start_index..response.end_index) + .collect::(); + let edits = language::text_diff(&old_text, &response.completion) + .into_iter() + .map(|(range, text)| { + ( + snapshot.anchor_after(response.start_index + range.start) + ..snapshot.anchor_before(response.start_index + range.end), + text, + ) + }) + .collect::>(); + + anyhow::Ok(( + response.autocomplete_id, + edits, + snapshot, + response_received_at, + inputs, + )) + }); + + let buffer = active_buffer.clone(); + + cx.spawn(async move |cx| { + let (id, edits, old_snapshot, response_received_at, inputs) = result.await?; + anyhow::Ok( + EditPrediction::new( + EditPredictionId(id.into()), + &buffer, + &old_snapshot, + edits.into(), + buffer_snapshotted_at, + response_received_at, + inputs, + cx, + ) + .await, + ) + }) + } +} #[derive(Debug, Clone, Serialize)] -pub struct AutocompleteRequest { +struct AutocompleteRequest { pub debug_info: Arc, pub repo_name: String, pub branch: Option, @@ -22,7 +281,7 @@ pub struct AutocompleteRequest { } #[derive(Debug, Clone, Serialize)] -pub struct FileChunk { +struct FileChunk { pub file_path: String, pub start_line: usize, pub end_line: usize, @@ -31,7 +290,7 @@ pub struct FileChunk { } #[derive(Debug, Clone, Serialize)] -pub struct RetrievalChunk { +struct RetrievalChunk { pub file_path: String, pub start_line: usize, pub end_line: usize, @@ -40,7 +299,7 @@ pub struct RetrievalChunk { } #[derive(Debug, Clone, Serialize)] -pub struct UserAction { +struct UserAction { pub action_type: ActionType, pub line_number: usize, pub offset: usize, @@ -51,7 +310,7 @@ pub struct UserAction { #[allow(dead_code)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)] #[serde(rename_all = "SCREAMING_SNAKE_CASE")] -pub enum ActionType { +enum ActionType { CursorMovement, InsertChar, DeleteChar, @@ -60,7 +319,7 @@ pub enum ActionType { } #[derive(Debug, Clone, Deserialize)] -pub struct AutocompleteResponse { +struct AutocompleteResponse { pub autocomplete_id: String, pub start_index: usize, pub end_index: usize, @@ -80,7 +339,7 @@ pub struct AutocompleteResponse { #[allow(dead_code)] #[derive(Debug, Clone, Deserialize)] -pub struct AdditionalCompletion { +struct AdditionalCompletion { pub start_index: usize, pub end_index: usize, pub completion: String, @@ -90,7 +349,7 @@ pub struct AdditionalCompletion { pub finish_reason: Option, } -pub(crate) fn write_event( +fn write_event( event: &cloud_llm_client::predict_edits_v3::Event, f: &mut impl fmt::Write, ) -> fmt::Result { @@ -115,7 +374,7 @@ pub(crate) fn write_event( } } -pub(crate) fn debug_info(cx: &gpui::App) -> Arc { +fn debug_info(cx: &gpui::App) -> Arc { format!( "Zed v{version} ({sha}) - OS: {os} - Zed v{version}", version = release_channel::AppVersion::global(cx), diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index 6464ce19ebaf1f95ad58e2954fb68e934600dac4..78169821f0e6cddd51bcd2075d766c6d2e0f2e71 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -30,7 +30,6 @@ use language::{ }; use language::{BufferSnapshot, OffsetRangeExt}; use language_model::{LlmApiToken, RefreshLlmTokenListener}; -use lsp::DiagnosticSeverity; use open_ai::FunctionDefinition; use project::{DisableAiSettings, Project, ProjectPath, WorktreeId}; use release_channel::AppVersion; @@ -42,7 +41,6 @@ use std::collections::{VecDeque, hash_map}; use telemetry_events::EditPredictionRating; use workspace::Workspace; -use std::fmt::Write as _; use std::ops::Range; use std::path::Path; use std::rc::Rc; @@ -80,6 +78,7 @@ use crate::rate_prediction_modal::{ NextEdit, PreviousEdit, RatePredictionsModal, ThumbsDownActivePrediction, ThumbsUpActivePrediction, }; +use crate::sweep_ai::SweepAi; use crate::zeta1::request_prediction_with_zeta1; pub use provider::ZetaEditPredictionProvider; @@ -171,7 +170,7 @@ impl FeatureFlag for Zeta2FeatureFlag { const NAME: &'static str = "zeta2"; fn enabled_for_staff() -> bool { - false + true } } @@ -192,8 +191,7 @@ pub struct Zeta { #[cfg(feature = "eval-support")] eval_cache: Option>, edit_prediction_model: ZetaEditPredictionModel, - sweep_api_token: Option, - sweep_ai_debug_info: Arc, + sweep_ai: SweepAi, data_collection_choice: DataCollectionChoice, rejected_predictions: Vec, reject_predictions_tx: mpsc::UnboundedSender<()>, @@ -202,7 +200,7 @@ pub struct Zeta { rated_predictions: HashSet, } -#[derive(Default, PartialEq, Eq)] +#[derive(Copy, Clone, Default, PartialEq, Eq)] pub enum ZetaEditPredictionModel { #[default] Zeta1, @@ -499,11 +497,8 @@ impl Zeta { #[cfg(feature = "eval-support")] eval_cache: None, edit_prediction_model: ZetaEditPredictionModel::Zeta2, - sweep_api_token: std::env::var("SWEEP_AI_TOKEN") - .context("No SWEEP_AI_TOKEN environment variable set") - .log_err(), + sweep_ai: SweepAi::new(cx), data_collection_choice, - sweep_ai_debug_info: sweep_ai::debug_info(cx), rejected_predictions: Vec::new(), reject_predictions_debounce_task: None, reject_predictions_tx: reject_tx, @@ -517,7 +512,7 @@ impl Zeta { } pub fn has_sweep_api_token(&self) -> bool { - self.sweep_api_token.is_some() + self.sweep_ai.api_token.is_some() } #[cfg(feature = "eval-support")] @@ -643,7 +638,9 @@ impl Zeta { } } project::Event::DiagnosticsUpdated { .. } => { - self.refresh_prediction_from_diagnostics(project, cx); + if cx.has_flag::() { + self.refresh_prediction_from_diagnostics(project, cx); + } } _ => (), } @@ -1183,249 +1180,77 @@ impl Zeta { position: language::Anchor, cx: &mut Context, ) -> Task>> { - match self.edit_prediction_model { - ZetaEditPredictionModel::Zeta1 => { - request_prediction_with_zeta1(self, project, active_buffer, position, cx) - } - ZetaEditPredictionModel::Zeta2 => { - self.request_prediction_with_zeta2(project, active_buffer, position, cx) - } - ZetaEditPredictionModel::Sweep => { - self.request_prediction_with_sweep(project, active_buffer, position, true, cx) - } - } + self.request_prediction_internal( + project.clone(), + active_buffer.clone(), + position, + cx.has_flag::(), + cx, + ) } - fn request_prediction_with_sweep( + fn request_prediction_internal( &mut self, - project: &Entity, - active_buffer: &Entity, + project: Entity, + active_buffer: Entity, position: language::Anchor, allow_jump: bool, cx: &mut Context, ) -> Task>> { - let snapshot = active_buffer.read(cx).snapshot(); - let debug_info = self.sweep_ai_debug_info.clone(); - let Some(api_token) = self.sweep_api_token.clone() else { - return Task::ready(Ok(None)); - }; - let full_path: Arc = snapshot - .file() - .map(|file| file.full_path(cx)) - .unwrap_or_else(|| "untitled".into()) - .into(); - - let project_file = project::File::from_dyn(snapshot.file()); - let repo_name = project_file - .map(|file| file.worktree.read(cx).root_name_str()) - .unwrap_or("untitled") - .into(); - let offset = position.to_offset(&snapshot); + const DIAGNOSTIC_LINES_RANGE: u32 = 20; - let project_state = self.get_or_init_zeta_project(project, cx); - let events = project_state.events(cx); + self.get_or_init_zeta_project(&project, cx); + let zeta_project = self.projects.get(&project.entity_id()).unwrap(); + let events = zeta_project.events(cx); let has_events = !events.is_empty(); - let recent_buffers = project_state.recent_paths.iter().cloned(); - let http_client = cx.http_client(); - - let recent_buffer_snapshots = recent_buffers - .filter_map(|project_path| { - let buffer = project.read(cx).get_open_buffer(&project_path, cx)?; - if active_buffer == &buffer { - None - } else { - Some(buffer.read(cx).snapshot()) - } - }) - .take(3) - .collect::>(); - - const DIAGNOSTIC_LINES_RANGE: u32 = 20; + let snapshot = active_buffer.read(cx).snapshot(); let cursor_point = position.to_point(&snapshot); let diagnostic_search_start = cursor_point.row.saturating_sub(DIAGNOSTIC_LINES_RANGE); let diagnostic_search_end = cursor_point.row + DIAGNOSTIC_LINES_RANGE; let diagnostic_search_range = Point::new(diagnostic_search_start, 0)..Point::new(diagnostic_search_end, 0); - let buffer_snapshotted_at = Instant::now(); - - let result = cx.background_spawn({ - let snapshot = snapshot.clone(); - let diagnostic_search_range = diagnostic_search_range.clone(); - async move { - let text = snapshot.text(); - - let mut recent_changes = String::new(); - for event in &events { - sweep_ai::write_event(event.as_ref(), &mut recent_changes).unwrap(); - } - - let mut file_chunks = recent_buffer_snapshots - .into_iter() - .map(|snapshot| { - let end_point = Point::new(30, 0).min(snapshot.max_point()); - sweep_ai::FileChunk { - content: snapshot.text_for_range(Point::zero()..end_point).collect(), - file_path: snapshot - .file() - .map(|f| f.path().as_unix_str()) - .unwrap_or("untitled") - .to_string(), - start_line: 0, - end_line: end_point.row as usize, - timestamp: snapshot.file().and_then(|file| { - Some( - file.disk_state() - .mtime()? - .to_seconds_and_nanos_for_persistence()? - .0, - ) - }), - } - }) - .collect::>(); - - let diagnostic_entries = - snapshot.diagnostics_in_range(diagnostic_search_range, false); - let mut diagnostic_content = String::new(); - let mut diagnostic_count = 0; - - for entry in diagnostic_entries { - let start_point: Point = entry.range.start; - - let severity = match entry.diagnostic.severity { - DiagnosticSeverity::ERROR => "error", - DiagnosticSeverity::WARNING => "warning", - DiagnosticSeverity::INFORMATION => "info", - DiagnosticSeverity::HINT => "hint", - _ => continue, - }; - - diagnostic_count += 1; - - writeln!( - &mut diagnostic_content, - "{} at line {}: {}", - severity, - start_point.row + 1, - entry.diagnostic.message - )?; - } - - if !diagnostic_content.is_empty() { - file_chunks.push(sweep_ai::FileChunk { - file_path: format!("Diagnostics for {}", full_path.display()), - start_line: 0, - end_line: diagnostic_count, - content: diagnostic_content, - timestamp: None, - }); - } - - let request_body = sweep_ai::AutocompleteRequest { - debug_info, - repo_name, - file_path: full_path.clone(), - file_contents: text.clone(), - original_file_contents: text, - cursor_position: offset, - recent_changes: recent_changes.clone(), - changes_above_cursor: true, - multiple_suggestions: false, - branch: None, - file_chunks, - retrieval_chunks: vec![], - recent_user_actions: vec![], - // TODO - privacy_mode_enabled: false, - }; - let mut buf: Vec = Vec::new(); - let writer = brotli::CompressorWriter::new(&mut buf, 4096, 11, 22); - serde_json::to_writer(writer, &request_body)?; - let body: AsyncBody = buf.into(); - - let inputs = EditPredictionInputs { - events, - included_files: vec![cloud_llm_client::predict_edits_v3::IncludedFile { - path: full_path.clone(), - max_row: cloud_llm_client::predict_edits_v3::Line(snapshot.max_point().row), - excerpts: vec![cloud_llm_client::predict_edits_v3::Excerpt { - start_line: cloud_llm_client::predict_edits_v3::Line(0), - text: request_body.file_contents.into(), - }], - }], - cursor_point: cloud_llm_client::predict_edits_v3::Point { - column: cursor_point.column, - line: cloud_llm_client::predict_edits_v3::Line(cursor_point.row), - }, - cursor_path: full_path.clone(), - }; - - const SWEEP_API_URL: &str = - "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; - - let request = http_client::Request::builder() - .uri(SWEEP_API_URL) - .header("Content-Type", "application/json") - .header("Authorization", format!("Bearer {}", api_token)) - .header("Connection", "keep-alive") - .header("Content-Encoding", "br") - .method(Method::POST) - .body(body)?; - - let mut response = http_client.send(request).await?; - - let mut body: Vec = Vec::new(); - response.body_mut().read_to_end(&mut body).await?; - - let response_received_at = Instant::now(); - if !response.status().is_success() { - anyhow::bail!( - "Request failed with status: {:?}\nBody: {}", - response.status(), - String::from_utf8_lossy(&body), - ); - }; - - let response: sweep_ai::AutocompleteResponse = serde_json::from_slice(&body)?; - - let old_text = snapshot - .text_for_range(response.start_index..response.end_index) - .collect::(); - let edits = language::text_diff(&old_text, &response.completion) - .into_iter() - .map(|(range, text)| { - ( - snapshot.anchor_after(response.start_index + range.start) - ..snapshot.anchor_before(response.start_index + range.end), - text, - ) - }) - .collect::>(); - - anyhow::Ok(( - response.autocomplete_id, - edits, - snapshot, - response_received_at, - inputs, - )) - } - }); - - let buffer = active_buffer.clone(); - let project = project.clone(); - let active_buffer = active_buffer.clone(); + let task = match self.edit_prediction_model { + ZetaEditPredictionModel::Zeta1 => request_prediction_with_zeta1( + self, + &project, + &active_buffer, + snapshot.clone(), + position, + events, + cx, + ), + ZetaEditPredictionModel::Zeta2 => self.request_prediction_with_zeta2( + &project, + &active_buffer, + snapshot.clone(), + position, + events, + cx, + ), + ZetaEditPredictionModel::Sweep => self.sweep_ai.request_prediction_with_sweep( + &project, + &active_buffer, + snapshot.clone(), + position, + events, + &zeta_project.recent_paths, + diagnostic_search_range.clone(), + cx, + ), + }; cx.spawn(async move |this, cx| { - let (id, edits, old_snapshot, response_received_at, inputs) = result.await?; + let prediction = task + .await? + .filter(|prediction| !prediction.edits.is_empty()); - if edits.is_empty() { + if prediction.is_none() && allow_jump { + let cursor_point = position.to_point(&snapshot); if has_events - && allow_jump && let Some((jump_buffer, jump_position)) = Self::next_diagnostic_location( - active_buffer, + active_buffer.clone(), &snapshot, diagnostic_search_range, cursor_point, @@ -1436,9 +1261,9 @@ impl Zeta { { return this .update(cx, |this, cx| { - this.request_prediction_with_sweep( - &project, - &jump_buffer, + this.request_prediction_internal( + project, + jump_buffer, jump_position, false, cx, @@ -1450,19 +1275,7 @@ impl Zeta { return anyhow::Ok(None); } - anyhow::Ok( - EditPrediction::new( - EditPredictionId(id.into()), - &buffer, - &old_snapshot, - edits.into(), - buffer_snapshotted_at, - response_received_at, - inputs, - cx, - ) - .await, - ) + Ok(prediction) }) } @@ -1549,7 +1362,9 @@ impl Zeta { &mut self, project: &Entity, active_buffer: &Entity, + active_snapshot: BufferSnapshot, position: language::Anchor, + events: Vec>, cx: &mut Context, ) -> Task>> { let project_state = self.projects.get(&project.entity_id()); @@ -1561,7 +1376,6 @@ impl Zeta { .map(|syntax_index| syntax_index.read_with(cx, |index, _cx| index.state().clone())) }); let options = self.options.clone(); - let active_snapshot = active_buffer.read(cx).snapshot(); let buffer_snapshotted_at = Instant::now(); let Some(excerpt_path) = active_snapshot .file() @@ -1579,10 +1393,6 @@ impl Zeta { .collect::>(); let debug_tx = self.debug_tx.clone(); - let events = project_state - .map(|state| state.events(cx)) - .unwrap_or_default(); - let diagnostics = active_snapshot.diagnostic_sets().clone(); let file = active_buffer.read(cx).file(); diff --git a/crates/zeta/src/zeta1.rs b/crates/zeta/src/zeta1.rs index 5a779cabeceac0bcb58340f7bbb98175409916e8..7f80d60d5efcbbd0bd7b9426508c344c063d5597 100644 --- a/crates/zeta/src/zeta1.rs +++ b/crates/zeta/src/zeta1.rs @@ -32,19 +32,17 @@ pub(crate) fn request_prediction_with_zeta1( zeta: &mut Zeta, project: &Entity, buffer: &Entity, + snapshot: BufferSnapshot, position: language::Anchor, + events: Vec>, cx: &mut Context, ) -> Task>> { let buffer = buffer.clone(); let buffer_snapshotted_at = Instant::now(); - let snapshot = buffer.read(cx).snapshot(); let client = zeta.client.clone(); let llm_token = zeta.llm_token.clone(); let app_version = AppVersion::global(cx); - let zeta_project = zeta.get_or_init_zeta_project(project, cx); - let events = Arc::new(zeta_project.events(cx)); - let (git_info, can_collect_file) = if let Some(file) = snapshot.file() { let can_collect_file = zeta.can_collect_file(project, file, cx); let git_info = if can_collect_file { diff --git a/crates/zeta2_tools/src/zeta2_tools.rs b/crates/zeta2_tools/src/zeta2_tools.rs index 6a6268f68ad0fa10e2379ac21e07d4fa530dddc1..4e650f2405d63feab010c5c9b73efc75bd576af6 100644 --- a/crates/zeta2_tools/src/zeta2_tools.rs +++ b/crates/zeta2_tools/src/zeta2_tools.rs @@ -42,43 +42,48 @@ actions!( pub fn init(cx: &mut App) { cx.observe_new(move |workspace: &mut Workspace, _, _cx| { - workspace.register_action(move |workspace, _: &OpenZeta2Inspector, window, cx| { - let project = workspace.project(); - workspace.split_item( - SplitDirection::Right, - Box::new(cx.new(|cx| { - Zeta2Inspector::new( - &project, - workspace.client(), - workspace.user_store(), - window, - cx, - ) - })), - window, - cx, - ); - }); - }) - .detach(); - - cx.observe_new(move |workspace: &mut Workspace, _, _cx| { - workspace.register_action(move |workspace, _: &OpenZeta2ContextView, window, cx| { - let project = workspace.project(); - workspace.split_item( - SplitDirection::Right, - Box::new(cx.new(|cx| { - Zeta2ContextView::new( - project.clone(), - workspace.client(), - workspace.user_store(), - window, - cx, - ) - })), - window, - cx, - ); + workspace.register_action_renderer(|div, _, _, cx| { + let has_flag = cx.has_flag::(); + div.when(has_flag, |div| { + div.on_action( + cx.listener(move |workspace, _: &OpenZeta2Inspector, window, cx| { + let project = workspace.project(); + workspace.split_item( + SplitDirection::Right, + Box::new(cx.new(|cx| { + Zeta2Inspector::new( + &project, + workspace.client(), + workspace.user_store(), + window, + cx, + ) + })), + window, + cx, + ) + }), + ) + .on_action(cx.listener( + move |workspace, _: &OpenZeta2ContextView, window, cx| { + let project = workspace.project(); + workspace.split_item( + SplitDirection::Right, + Box::new(cx.new(|cx| { + Zeta2ContextView::new( + project.clone(), + workspace.client(), + workspace.user_store(), + window, + cx, + ) + })), + window, + cx, + ); + }, + )) + }) }); }) .detach(); From fb0fcd86fd4de39107ca034265e3fc01124a111c Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 25 Nov 2025 10:43:46 -0800 Subject: [PATCH 0376/1030] Add missing update of last_prediction_refresh (#43483) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes a regression introduced in https://github.com/zed-industries/zed/pull/43284 where edit predictions stopped being throttled at all 😬 Release Notes: - N/A Co-authored-by: Ben Kunkle --- crates/zeta/src/zeta.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index 78169821f0e6cddd51bcd2075d766c6d2e0f2e71..8b54576a12f2ff788b6088299c30923b2ce8adda 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -1123,7 +1123,6 @@ impl Zeta { zeta_project.next_pending_prediction_id += 1; let last_request = zeta_project.last_prediction_refresh; - // TODO report cancelled requests like in zeta1 let task = cx.spawn(async move |this, cx| { if let Some((last_entity, last_timestamp)) = last_request && throttle_entity == last_entity @@ -1133,6 +1132,12 @@ impl Zeta { cx.background_executor().timer(timeout).await; } + this.update(cx, |this, cx| { + this.get_or_init_zeta_project(&project, cx) + .last_prediction_refresh = Some((throttle_entity, Instant::now())); + }) + .ok(); + let edit_prediction_id = do_refresh(this.clone(), cx).await.log_err().flatten(); // When a prediction completes, remove it from the pending list, and cancel From 7ecbf8cf60feff43da961ec3e3d99e7570f75454 Mon Sep 17 00:00:00 2001 From: Ben Kunkle Date: Tue, 25 Nov 2025 10:44:04 -0800 Subject: [PATCH 0377/1030] zeta2: Remove expected context from evals (#43430) Closes #ISSUE Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/zeta_cli/src/evaluate.rs | 114 +------------------ crates/zeta_cli/src/example.rs | 189 +------------------------------- crates/zeta_cli/src/main.rs | 2 - crates/zeta_cli/src/predict.rs | 87 ++------------- 4 files changed, 18 insertions(+), 374 deletions(-) diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index a0ebdf998595ccacec2dafecf51b6094e5e401b5..6726dcb3aafdeff7fe41cbbbc49850c1e7465cf4 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -1,5 +1,5 @@ use std::{ - collections::{BTreeSet, HashMap}, + collections::HashMap, io::{IsTerminal, Write}, sync::Arc, }; @@ -125,21 +125,10 @@ fn write_aggregated_scores( .peekable(); let has_edit_predictions = edit_predictions.peek().is_some(); let aggregated_result = EvaluationResult { - context: Scores::aggregate(successful.iter().map(|r| &r.context)), edit_prediction: has_edit_predictions.then(|| Scores::aggregate(edit_predictions)), prompt_len: successful.iter().map(|r| r.prompt_len).sum::() / successful.len(), generated_len: successful.iter().map(|r| r.generated_len).sum::() / successful.len(), - context_lines_found_in_context: successful - .iter() - .map(|r| r.context_lines_found_in_context) - .sum::() - / successful.len(), - context_lines_in_expected_patch: successful - .iter() - .map(|r| r.context_lines_in_expected_patch) - .sum::() - / successful.len(), }; writeln!(w, "\n{}", "-".repeat(80))?; @@ -261,11 +250,8 @@ fn write_eval_result( #[derive(Debug, Default)] pub struct EvaluationResult { pub edit_prediction: Option, - pub context: Scores, pub prompt_len: usize, pub generated_len: usize, - pub context_lines_in_expected_patch: usize, - pub context_lines_found_in_context: usize, } #[derive(Default, Debug)] @@ -363,14 +349,6 @@ impl std::fmt::Display for EvaluationResult { impl EvaluationResult { fn fmt_markdown(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - r#" -### Context Scores -{} -"#, - self.context.to_markdown(), - )?; if let Some(prediction) = &self.edit_prediction { write!( f, @@ -387,34 +365,18 @@ impl EvaluationResult { writeln!(f, "### Scores\n")?; writeln!( f, - " Prompt Generated RetrievedContext PatchContext TP FP FN Precision Recall F1" + " Prompt Generated TP FP FN Precision Recall F1" )?; writeln!( f, - "─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────" - )?; - writeln!( - f, - "Context Retrieval {:<7} {:<9} {:<16} {:<16} {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", - "", - "", - "", - "", - self.context.true_positives, - self.context.false_positives, - self.context.false_negatives, - self.context.precision() * 100.0, - self.context.recall() * 100.0, - self.context.f1_score() * 100.0 + "───────────────────────────────────────────────────────────────────────────────────────────────" )?; if let Some(edit_prediction) = &self.edit_prediction { writeln!( f, - "Edit Prediction {:<7} {:<9} {:<16} {:<16} {:<6} {:<6} {:<6} {:>10.2} {:>7.2} {:>7.2}", + "Edit Prediction {:<7} {:<9} {:<6} {:<6} {:<6} {:>9.2} {:>8.2} {:>7.2}", self.prompt_len, self.generated_len, - self.context_lines_found_in_context, - self.context_lines_in_expected_patch, edit_prediction.true_positives, edit_prediction.false_positives, edit_prediction.false_negatives, @@ -434,53 +396,6 @@ fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> Eval ..Default::default() }; - let actual_context_lines: HashSet<_> = preds - .excerpts - .iter() - .flat_map(|excerpt| { - excerpt - .text - .lines() - .map(|line| format!("{}: {line}", excerpt.path.display())) - }) - .collect(); - - let mut false_positive_lines = actual_context_lines.clone(); - - for entry in &example.expected_context { - let mut best_alternative_score: Option = None; - - for alternative in &entry.alternatives { - let expected: HashSet<_> = alternative - .excerpts - .iter() - .flat_map(|excerpt| { - excerpt - .text - .lines() - .map(|line| format!("{}: {line}", excerpt.path.display())) - }) - .collect(); - - let scores = Scores::new(&expected, &actual_context_lines); - - false_positive_lines.retain(|line| !expected.contains(line)); - - if best_alternative_score - .as_ref() - .is_none_or(|best| scores.recall() > best.recall()) - { - best_alternative_score = Some(scores); - } - } - - let best_alternative = best_alternative_score.unwrap_or_default(); - eval_result.context.false_negatives += best_alternative.false_negatives; - eval_result.context.true_positives += best_alternative.true_positives; - } - - eval_result.context.false_positives = false_positive_lines.len(); - if predict { // todo: alternatives for patches let expected_patch = example @@ -493,25 +408,6 @@ fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> Eval .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) .map(|line| line.to_string()) .collect(); - let expected_context_lines = expected_patch - .iter() - .filter_map(|line| { - if let DiffLine::Context(str) = line { - Some(String::from(*str)) - } else { - None - } - }) - .collect::>(); - let actual_context_lines = preds - .excerpts - .iter() - .flat_map(|excerpt| excerpt.text.lines().map(ToOwned::to_owned)) - .collect::>(); - - let matched = expected_context_lines - .intersection(&actual_context_lines) - .count(); let actual_patch_lines = preds .diff @@ -522,8 +418,6 @@ fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> Eval .collect(); eval_result.edit_prediction = Some(Scores::new(&expected_patch_lines, &actual_patch_lines)); - eval_result.context_lines_in_expected_patch = expected_context_lines.len(); - eval_result.context_lines_found_in_context = matched; } eval_result diff --git a/crates/zeta_cli/src/example.rs b/crates/zeta_cli/src/example.rs index 7dbe304a88b9ea024adab793fa782fd2f4bdf1c0..a9d4c4f47c5a05d4198b1cffaee51e14a122e88d 100644 --- a/crates/zeta_cli/src/example.rs +++ b/crates/zeta_cli/src/example.rs @@ -14,7 +14,6 @@ use anyhow::{Context as _, Result, anyhow}; use clap::ValueEnum; use cloud_zeta2_prompt::CURSOR_MARKER; use collections::HashMap; -use edit_prediction_context::Line; use futures::{ AsyncWriteExt as _, lock::{Mutex, OwnedMutexGuard}, @@ -53,7 +52,6 @@ pub struct Example { pub cursor_position: String, pub edit_history: String, pub expected_patch: String, - pub expected_context: Vec, } pub type ActualExcerpt = Excerpt; @@ -64,25 +62,6 @@ pub struct Excerpt { pub text: String, } -#[derive(Default, Clone, Debug, Serialize, Deserialize)] -pub struct ExpectedContextEntry { - pub heading: String, - pub alternatives: Vec, -} - -#[derive(Default, Clone, Debug, Serialize, Deserialize)] -pub struct ExpectedExcerptSet { - pub heading: String, - pub excerpts: Vec, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct ExpectedExcerpt { - pub path: PathBuf, - pub text: String, - pub required_lines: Vec, -} - #[derive(ValueEnum, Debug, Clone)] pub enum ExampleFormat { Json, @@ -132,7 +111,6 @@ impl NamedExample { cursor_position: String::new(), edit_history: String::new(), expected_patch: String::new(), - expected_context: Vec::new(), }, }; @@ -197,30 +175,10 @@ impl NamedExample { }; } Event::End(TagEnd::Heading(HeadingLevel::H3)) => { - let heading = mem::take(&mut text); - match current_section { - Section::ExpectedExcerpts => { - named.example.expected_context.push(ExpectedContextEntry { - heading, - alternatives: Vec::new(), - }); - } - _ => {} - } + mem::take(&mut text); } Event::End(TagEnd::Heading(HeadingLevel::H4)) => { - let heading = mem::take(&mut text); - match current_section { - Section::ExpectedExcerpts => { - let expected_context = &mut named.example.expected_context; - let last_entry = expected_context.last_mut().unwrap(); - last_entry.alternatives.push(ExpectedExcerptSet { - heading, - excerpts: Vec::new(), - }) - } - _ => {} - } + mem::take(&mut text); } Event::End(TagEnd::Heading(level)) => { anyhow::bail!("Unexpected heading level: {level}"); @@ -253,41 +211,7 @@ impl NamedExample { named.example.cursor_position = mem::take(&mut text); } Section::ExpectedExcerpts => { - let text = mem::take(&mut text); - for excerpt in text.split("\n…\n") { - let (mut text, required_lines) = extract_required_lines(&excerpt); - if !text.ends_with('\n') { - text.push('\n'); - } - - if named.example.expected_context.is_empty() { - named.example.expected_context.push(Default::default()); - } - - let alternatives = &mut named - .example - .expected_context - .last_mut() - .unwrap() - .alternatives; - - if alternatives.is_empty() { - alternatives.push(ExpectedExcerptSet { - heading: String::new(), - excerpts: vec![], - }); - } - - alternatives - .last_mut() - .unwrap() - .excerpts - .push(ExpectedExcerpt { - path: block_info.into(), - text, - required_lines, - }); - } + mem::take(&mut text); } Section::ExpectedPatch => { named.example.expected_patch = mem::take(&mut text); @@ -561,47 +485,6 @@ impl NamedExample { } } -fn extract_required_lines(text: &str) -> (String, Vec) { - const MARKER: &str = "[ZETA]"; - let mut new_text = String::new(); - let mut required_lines = Vec::new(); - let mut skipped_lines = 0_u32; - - for (row, mut line) in text.split('\n').enumerate() { - if let Some(marker_column) = line.find(MARKER) { - let mut strip_column = marker_column; - - while strip_column > 0 { - let prev_char = line[strip_column - 1..].chars().next().unwrap(); - if prev_char.is_whitespace() || ['/', '#'].contains(&prev_char) { - strip_column -= 1; - } else { - break; - } - } - - let metadata = &line[marker_column + MARKER.len()..]; - if metadata.contains("required") { - required_lines.push(Line(row as u32 - skipped_lines)); - } - - if strip_column == 0 { - skipped_lines += 1; - continue; - } - - line = &line[..strip_column]; - } - - new_text.push_str(line); - new_text.push('\n'); - } - - new_text.pop(); - - (new_text, required_lines) -} - async fn run_git(repo_path: &Path, args: &[&str]) -> Result { let output = smol::process::Command::new("git") .current_dir(repo_path) @@ -656,37 +539,6 @@ impl Display for NamedExample { )?; } - if !self.example.expected_context.is_empty() { - write!(f, "\n## {EXPECTED_CONTEXT_HEADING}\n\n")?; - - for entry in &self.example.expected_context { - write!(f, "\n### {}\n\n", entry.heading)?; - - let skip_h4 = - entry.alternatives.len() == 1 && entry.alternatives[0].heading.is_empty(); - - for excerpt_set in &entry.alternatives { - if !skip_h4 { - write!(f, "\n#### {}\n\n", excerpt_set.heading)?; - } - - for excerpt in &excerpt_set.excerpts { - write!( - f, - "`````{}{}\n{}`````\n\n", - excerpt - .path - .extension() - .map(|ext| format!("{} ", ext.to_string_lossy())) - .unwrap_or_default(), - excerpt.path.display(), - excerpt.text - )?; - } - } - } - } - Ok(()) } } @@ -707,38 +559,3 @@ pub async fn lock_repo(path: impl AsRef) -> OwnedMutexGuard<()> { .lock_owned() .await } - -#[cfg(test)] -mod tests { - use super::*; - use indoc::indoc; - use pretty_assertions::assert_eq; - - #[test] - fn test_extract_required_lines() { - let input = indoc! {" - zero - one // [ZETA] required - two - // [ZETA] something - three - four # [ZETA] required - five - "}; - - let expected_updated_input = indoc! {" - zero - one - two - three - four - five - "}; - - let expected_required_lines = vec![Line(1), Line(4)]; - - let (updated_input, required_lines) = extract_required_lines(input); - assert_eq!(updated_input, expected_updated_input); - assert_eq!(required_lines, expected_required_lines); - } -} diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index f87563cc34ca7631baf8195e42e4e3473f522659..d13f0710cdc4d16666594d25dc639d337fb6bdfc 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -128,8 +128,6 @@ pub struct PredictArguments { #[derive(Clone, Debug, Args)] pub struct PredictionOptions { - #[arg(long)] - use_expected_context: bool, #[clap(flatten)] zeta2: Zeta2Args, #[clap(long)] diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index a757a5faa0dbae95c4dcab58c76d50450b1d2e9f..8a1a4131fb684a5186b2111f9d922fa34d6972e1 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -1,4 +1,4 @@ -use crate::example::{ActualExcerpt, ExpectedExcerpt, NamedExample}; +use crate::example::{ActualExcerpt, NamedExample}; use crate::headless::ZetaCliAppState; use crate::paths::{CACHE_DIR, LATEST_EXAMPLE_RUN_DIR, RUN_DIR, print_run_data_dir}; use crate::{ @@ -7,16 +7,13 @@ use crate::{ use ::serde::Serialize; use anyhow::{Context, Result, anyhow}; use cloud_zeta2_prompt::{CURSOR_MARKER, write_codeblock}; -use collections::HashMap; use futures::StreamExt as _; use gpui::{AppContext, AsyncApp, Entity}; -use language::{Anchor, Buffer, Point}; use project::Project; use project::buffer_store::BufferStoreEvent; use serde::Deserialize; use std::fs; use std::io::{IsTerminal, Write}; -use std::ops::Range; use std::path::PathBuf; use std::sync::Arc; use std::sync::Mutex; @@ -204,15 +201,12 @@ pub async fn perform_predict( let mut result = result.lock().unwrap(); result.generated_len = response.chars().count(); - if !options.use_expected_context { - result.planning_search_time = Some( - search_queries_generated_at.unwrap() - start_time.unwrap(), - ); - result.running_search_time = Some( - search_queries_executed_at.unwrap() - - search_queries_generated_at.unwrap(), - ); - } + result.planning_search_time = + Some(search_queries_generated_at.unwrap() - start_time.unwrap()); + result.running_search_time = Some( + search_queries_executed_at.unwrap() + - search_queries_generated_at.unwrap(), + ); result.prediction_time = prediction_finished_at - prediction_started_at; result.total_time = prediction_finished_at - start_time.unwrap(); @@ -224,37 +218,10 @@ pub async fn perform_predict( } }); - if options.use_expected_context { - let context_excerpts_tasks = example - .example - .expected_context - .iter() - .flat_map(|section| { - section.alternatives[0].excerpts.iter().map(|excerpt| { - resolve_context_entry(project.clone(), excerpt.clone(), cx.clone()) - }) - }) - .collect::>(); - let context_excerpts_vec = - futures::future::try_join_all(context_excerpts_tasks).await?; - - let mut context_excerpts = HashMap::default(); - for (buffer, mut excerpts) in context_excerpts_vec { - context_excerpts - .entry(buffer) - .or_insert(Vec::new()) - .append(&mut excerpts); - } - - zeta.update(cx, |zeta, _cx| { - zeta.set_context(project.clone(), context_excerpts) - })?; - } else { - zeta.update(cx, |zeta, cx| { - zeta.refresh_context(project.clone(), cursor_buffer.clone(), cursor_anchor, cx) - })? - .await?; - } + zeta.update(cx, |zeta, cx| { + zeta.refresh_context(project.clone(), cursor_buffer.clone(), cursor_anchor, cx) + })? + .await?; } let prediction = zeta @@ -274,38 +241,6 @@ pub async fn perform_predict( anyhow::Ok(result) } -async fn resolve_context_entry( - project: Entity, - excerpt: ExpectedExcerpt, - mut cx: AsyncApp, -) -> Result<(Entity, Vec>)> { - let buffer = project - .update(&mut cx, |project, cx| { - let project_path = project.find_project_path(&excerpt.path, cx).unwrap(); - project.open_buffer(project_path, cx) - })? - .await?; - - let ranges = buffer.read_with(&mut cx, |buffer, _| { - let full_text = buffer.text(); - let offset = full_text - .find(&excerpt.text) - .expect("Expected context not found"); - let point = buffer.offset_to_point(offset); - excerpt - .required_lines - .iter() - .map(|line| { - let row = point.row + line.0; - let range = Point::new(row, 0)..Point::new(row + 1, 0); - buffer.anchor_after(range.start)..buffer.anchor_before(range.end) - }) - .collect() - })?; - - Ok((buffer, ranges)) -} - struct RunCache { cache_mode: CacheMode, example_run_dir: PathBuf, From 83f0a3fd1364bcb39fe4a69dd5a417aa6c54f89a Mon Sep 17 00:00:00 2001 From: Peter Tripp Date: Tue, 25 Nov 2025 14:00:31 -0500 Subject: [PATCH 0378/1030] Redact sensitive environment variables in LSP Logs: Server Info (#43480) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Follow-up to: - https://github.com/zed-industries/zed/pull/43436 - https://github.com/zed-industries/zed/pull/42831 The changes in #42831 resulted in a regression where environment variables in the Server Info view were no longer redact. The changes in #43436 were insufficient as I was still seeing sensitive values in Nightly e6fe95b4f2f676c7fc4a5f951ba7c721e7d22e8a (which includes #43436). CC: @SomeoneToIgnore (Hi! 👋 Thanks for keeping this redaction functionality alive) Release Notes: - N/A --- crates/language_tools/src/lsp_log_view.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/language_tools/src/lsp_log_view.rs b/crates/language_tools/src/lsp_log_view.rs index e7586583704750b0c84832ecb8cb9ba8d5a9b5a1..4cf47cab079617d55aeeb959dcad116919a55609 100644 --- a/crates/language_tools/src/lsp_log_view.rs +++ b/crates/language_tools/src/lsp_log_view.rs @@ -340,11 +340,11 @@ impl LspLogView { * Configuration: {CONFIGURATION}", NAME = info.status.name, ID = info.id, - BINARY = info.status.binary.as_ref().map_or_else( - || "Unknown".to_string(), - |binary| serde_json::to_string_pretty(binary) - .unwrap_or_else(|e| format!("Failed to serialize binary info: {e:#}")) - ), + BINARY = info + .status + .binary + .as_ref() + .map_or_else(|| "Unknown".to_string(), |binary| format!("{:#?}", binary)), WORKSPACE_FOLDERS = info .status .workspace_folders From 1f9d5ef6849e482fc35dda552adc4c417cca1f0a Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Tue, 25 Nov 2025 16:49:16 -0300 Subject: [PATCH 0379/1030] Always display terminal cursor when blinking is disabled (#43487) Fixes an issue where the terminal cursor wouldn't always be displayed in the default `blink: "terminal_controlled"` mode unless the terminal requested cursor blinking. Release Notes: - N/A --- crates/terminal_view/src/terminal_view.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/terminal_view/src/terminal_view.rs b/crates/terminal_view/src/terminal_view.rs index 7c8bda83be864353c35b0317efc8599456dca6e5..7b3e29ac9b0582d081a286539d973fe8f1a453c5 100644 --- a/crates/terminal_view/src/terminal_view.rs +++ b/crates/terminal_view/src/terminal_view.rs @@ -649,9 +649,10 @@ impl TerminalView { // When focused, check blinking settings and blink manager state match TerminalSettings::get_global(cx).blinking { TerminalBlink::Off => true, - TerminalBlink::On | TerminalBlink::TerminalControlled => { - self.blink_manager.read(cx).visible() + TerminalBlink::TerminalControlled => { + !self.blinking_terminal_enabled || self.blink_manager.read(cx).visible() } + TerminalBlink::On => self.blink_manager.read(cx).visible(), } } From d49044328636181131c1d3d3822597fbcf111503 Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Tue, 25 Nov 2025 16:06:46 -0500 Subject: [PATCH 0380/1030] Refresh collaboration docs (#43489) Most of the features for collab were previously listed in the section that was written for private calls. Most of this PR is moving that content over to the channel documentation and adapting it slightly. Private calls have similar collaboration, so we can just point back to the channels doc in that section and keep it pretty thin / DRY. Release Notes: - N/A --- docs/src/SUMMARY.md | 2 +- docs/src/collaboration/channels.md | 113 ++++++++++++++---- .../contacts-and-private-calls.md | 18 +++ docs/src/collaboration/overview.md | 12 +- docs/src/collaboration/private-calls.md | 99 --------------- 5 files changed, 120 insertions(+), 124 deletions(-) create mode 100644 docs/src/collaboration/contacts-and-private-calls.md delete mode 100644 docs/src/collaboration/private-calls.md diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 2f8bcd2ce8be00790866025d5de687d32aee7dcf..5974b4f68d8951fa62b6bfaa625db5fdb38899fd 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -36,7 +36,7 @@ - [Code Completions](./completions.md) - [Collaboration](./collaboration/overview.md) - [Channels](./collaboration/channels.md) - - [Private Calls](./collaboration/private-calls.md) + - [Contacts and Private Calls](./collaboration/contacts-and-private-calls.md) - [Git](./git.md) - [Debugger](./debugger.md) - [Diagnostics](./diagnostics.md) diff --git a/docs/src/collaboration/channels.md b/docs/src/collaboration/channels.md index bc723d73dedf16d2f75179f9203cdbf473bebbbb..b20cff2a12ced63cf75c13bfd8b63d25f2c53c50 100644 --- a/docs/src/collaboration/channels.md +++ b/docs/src/collaboration/channels.md @@ -1,50 +1,123 @@ # Channels -## Overview - Channels provide a way to streamline collaborating for software engineers in many ways, but particularly: - Pairing – when working on something together, you both have your own screen, mouse, and keyboard. -- Mentoring – it’s easy to jump in to someone else’s context, and help them get unstuck, without the friction of pushing code up. +- Mentoring – it's easy to jump in to someone else's context, and help them get unstuck, without the friction of pushing code up. - Refactoring – you can have multiple people join in on large refactoring without fear of conflict. - Ambient awareness – you can see what everyone else is working on with no need for status emails or meetings. -## Channels - -To open the collaboration panel hit {#kb collab_panel::ToggleFocus} or `collab panel: toggle focus`. +Each channel corresponds to an ongoing project or work-stream. +You can see who's in a channel as their avatars will show up in the sidebar. +This makes it easy to see what everyone is doing and where to find them if needed. -Each channel corresponds to an ongoing project or work-stream. You can see who’s in a channel as their avatars will show up in the sidebar. This makes it easy to see what everyone is doing and where to find them if needed. +Create a channel by clicking the `+` icon next to the `Channels` text in the collab panel. +Create a subchannel by right clicking an existing channel and selecting `New Subchannel`. -You can create as many channels as you need. As in the example above, you can mix channels for your day job, as well as side-projects in one instance of Zed. +You can mix channels for your day job, as well as side-projects in your collab panel. Joining a channel adds you to a shared room where you can work on projects together. -## Sharing projects +_[Join our channel tree to get an idea of how you can organize yours.](https://zed.dev/community-links)_ + +## Inviting People + +By default, channels you create can only be accessed by you. +You can invite collaborators by right clicking and selecting `Manage members`. + +When you have subchannels nested under others, permissions are inherited. +For instance, adding people to the top-level channel in your channel tree will automatically give them access to its subchannels. + +Once you have added someone, they can either join your channel by clicking on it in their Zed sidebar, or you can share the link to the channel so that they can join directly. + +## Voice Chat -After joining a channel, you can `Share` a project with the other people there. This will enable them to edit the code hosted on your machine as though they had it checked out locally. +You can mute/unmute your microphone via the microphone icon in the upper right-hand side of the window. -When you are editing someone else’s project, you still have the full power of the editor at your fingertips, you can jump to definitions, use the AI assistant, and see any diagnostic errors. This is extremely powerful for pairing, as one of you can be implementing the current method while the other is reading and researching the correct solution to the next problem. And, because you have your own config running, it feels like you’re using your own machine. +> Note: When joining a channel, Zed will automatically share your microphone with other users in the call, if your OS allows it. +> If you'd prefer your microphone to be off when joining a channel, you can do so via the [`mute_on_join`](../configuring-zed.md#calls) setting. -See [our collaboration documentation](./private-calls.md) for more details about how this works. +## Sharing Projects -## Notes +After joining a channel, you can share a project over the channel via the `Share` button in the upper right-hand side of the window. +This will allow channel members to edit the code hosted on your machine as though they had it checked out locally. -Each channel has a notes file associated with it to keep track of current status, new ideas, or to collaborate on building out the design for the feature that you’re working on before diving into code. +When you are editing someone else's project, you still have the full power of the editor at your fingertips; you can jump to definitions, use the AI assistant, and see any diagnostic errors. +This is extremely powerful for pairing, as one of you can be implementing the current method while the other is reading and researching the correct solution to the next problem. +And, because you have your own config running, it feels like you're using your own machine. + +We aim to eliminate the distinction between local and remote projects as much as possible. +Collaborators can open, edit, and save files, perform searches, interact with the language server, etc. +Guests have a read-only view of the project, including access to language server info. + +### Unsharing a Project + +You can remove a project from a channel by clicking on the `Unshare` button in the title bar. + +Collaborators that are currently in that project will be disconnected from the project and will not be able to rejoin it unless you share it again. + +## Channel Notes + +Each channel has a Markdown notes file associated with it to keep track of current status, new ideas, or to collaborate on building out the design for the feature that you're working on before diving into code. This is similar to a Google Doc, except powered by Zed's collaborative software and persisted to our servers. -## Inviting people +Open the channel notes by clicking on the document icon to the right of the channel name in the collaboration panel. -By default, channels you create can only be accessed by you. You can invite collaborators by right clicking and selecting `Manage members`. +> Note: You can view a channel's notes without joining the channel, if you'd just like to read up on what has been written. -When you have channels nested under each other, permissions are inherited. For instance, in the example above, we only need to add people to the `#zed` channel, and they will automatically gain access to `#core-editor`, `#new-languages`, and `#stability`. +## Following Collaborators -Once you have added someone, they can either join your channel by clicking on it in their Zed sidebar, or you can share the link to the channel so that they can join directly. +To follow a collaborator, click on their avatar in the top left of the title bar. +You can also cycle through collaborators using {#kb workspace::FollowNextCollaborator} or `workspace: follow next collaborator` in the command palette. + +When you join a project, you'll immediately start following the collaborator that invited you. + +When you are in a pane that is following a collaborator, you will: + +- follow their cursor and scroll position +- follow them to other files in the same project +- instantly swap to viewing their screenshare in that pane, if they are sharing their screen and leave the project + +To stop following, simply move your mouse or make an edit via your keyboard. + +### How Following Works + +Following is confined to a particular pane. +When a pane is following a collaborator, it is outlined in their cursor color. + +Avatars of collaborators in the same project as you are in color, and have a cursor color. +Collaborators in other projects are shown in gray. + +This pane-specific behavior allows you to follow someone in one pane while navigating independently in another and can be an effective layout for some collaboration styles. + +### Following a Terminal + +You can follow what a collaborator is doing in their terminal by having them share their screen and following it. + +In the future, we plan to allow you to collaborate in the terminal directly in a shared project. + +## Screen Sharing + +Share your screen with collaborators in the current channel by clicking on the `Share screen` (monitor icon) button in the top right of the title bar. +If you have multiple displays, you can choose which one to share via the chevron to the right of the monitor icon. + +After you've shared your screen, others can click on the `Screen` entry under your name in the collaboration panel to open a tab that always keeps it visible. +If they are following you, Zed will automatically switch between following your cursor in their Zed instance and your screen share, depending on whether you are focused on Zed or another application, like a web browser. + +> Note: Collaborators can see your entire screen when you are screen sharing, so be careful not to share anything you don't want to share. +> Remember to stop screen sharing when you are finished. ## Livestreaming & Guests -A Channel can also be made Public. This allows anyone to join the channel by clicking on the link. +A Channel can also be made Public. +This allows anyone to join the channel by clicking on the link. Guest users in channels can hear and see everything that is happening, and have read only access to projects and channel notes. -If you'd like to invite a guest to participate in a channel for the duration of a call you can do so by right clicking on them in the Collaboration Panel. "Allowing Write Access" will allow them to edit any projects shared into the call, and to use their microphone and share their screen if they wish. +If you'd like to invite a guest to participate in a channel for the duration of a call you can do so by right clicking on them in the Collaboration Panel. +"Allowing Write Access" will allow them to edit any projects shared into the call, and to use their microphone and share their screen if they wish. + +## Leaving a Call + +You can leave a channel by clicking on the `Leave call` button in the upper right-hand side of the window. diff --git a/docs/src/collaboration/contacts-and-private-calls.md b/docs/src/collaboration/contacts-and-private-calls.md new file mode 100644 index 0000000000000000000000000000000000000000..e5660a34b643b09272c1a1bb02e026c1a4bf8e03 --- /dev/null +++ b/docs/src/collaboration/contacts-and-private-calls.md @@ -0,0 +1,18 @@ +# Contacts and Private Calls + +Zed allows you to have private calls / collaboration sessions with those in your contacts. +These calls can be one-on-ones or contain any number of users from your contacts. + +## Adding a Contact + +1. In the collaboration panel, click the `+` button next to the `Contacts` section +1. Search for the contact using their GitHub handle + _Note: The contact must be an existing Zed user who has completed the GitHub authentication flow._ +1. Your contact will receive a notification. + Once they accept, you'll both appear in each other's contact list. + +## Private Calls + +Simply click on a contact to start a private call. + +_Aside from a few additional features (channel notes, etc.), collaboration in private calls is largely the same as it is in [channels](./channels.md)._ diff --git a/docs/src/collaboration/overview.md b/docs/src/collaboration/overview.md index 8acbecc372cecee7fb87d40685b3a08eb6e046f6..fae16290fe0d9eedbff8a27a9e510964bb5aba84 100644 --- a/docs/src/collaboration/overview.md +++ b/docs/src/collaboration/overview.md @@ -2,12 +2,16 @@ At Zed, we believe that great things are built by great people working together. We have designed Zed to help individuals work faster and help teams of people work together more effectively. -Zed has two mechanisms for collaborating: -1. [Channels](./channels.md): Ongoing project rooms where team members can share projects, collaborate on code, and maintain ambient awareness of what everyone is working on. -1. [Private Calls](./private-calls.md): Ad-hoc private collaboration with those in your contacts list. +In Zed, all collaboration happens in the collaboration panel, which can be opened via {#kb collab_panel::ToggleFocus} or `collab panel: toggle focus` from the command palette. +You will need to [sign in](../authentication.md#signing-in) in order to access features within the collaboration panel. + +## Collaboration panel -You will need to [sign in](../authentication.md#signing-in) in order to begin using Zed's collaboration features. +The collaboration panel is broken down into two sections: + +1. [Channels](./channels.md): Ongoing project rooms where team members can share projects, collaborate on code, and maintain ambient awareness of what everyone is working on. +1. [Contacts](./contacts-and-private-calls.md): Ad-hoc private collaboration with those in your contacts list. --- diff --git a/docs/src/collaboration/private-calls.md b/docs/src/collaboration/private-calls.md deleted file mode 100644 index 8ea4790688f055074c5afcf4eb1d6d63ee49d868..0000000000000000000000000000000000000000 --- a/docs/src/collaboration/private-calls.md +++ /dev/null @@ -1,99 +0,0 @@ -# Private Calls - -## Adding a collaborator to a call - -Before you can collaborate, you'll need to add a collaborator to your contacts. To do this: - -1. Open the contacts menu by clicking on the `Show contacts menu` button in the upper right-hand corner of the window or by running `collab: toggle contacts menu` (`cmd-shift-c`). -2. Click the add button to the right of the search box. -3. Search for the contact you want to add using their GitHub handle. Note: the person you are trying to add as a contact must be an existing Zed user. - -### Inviting a collaborator - -You can add an existing Zed user as a contact from the contacts menu, deployed from the `Show contacts menu` button in the upper right-hand corner of the window or by `collab: toggle contacts menu` (`cmd-shift-c`) and then clicking the `Search for new contact` button to the right of the search box. - -![Inviting a collaborator to the current project](https://zed.dev/img/collaboration/add-a-collaborator.png) - -When you invite a collaborator to a project not in a call they will receive a notification to join, and a new call is created. - -![Receiving an invite to join a call](https://zed.dev/img/collaboration/receiving-an-invite.jpg) - -### Inviting non-Zed users - -If someone you want to collaborate with has not yet signed up for Zed, they will need to [download the app](https://zed.dev/download) and sign in for the first time before you can add them. Identity is tied to GitHub accounts, so new users will need to authenticate with GitHub in order to sign into Zed. - -### Voice chat - -When joining a call, Zed will automatically share your microphone with other users in the call, if your OS allows it. This isn't tied to your project. You can disable this for your client via the [`mute_on_join`](../configuring-zed.md#calls) setting. - -## Collaborating on a project - -### Share a project - -When you invite a collaborator to join your project, a new call begins. Your Zed windows will show the call participants in the title bar of the window. - -![A new Zed call with two collaborators](https://zed.dev/img/collaboration/new-call.png) - -Collaborators in the same project as you are in color, and have a cursor color. Collaborators in other projects are shown in gray. Collaborators that have access to the current project will have their own cursor color under their avatar. - -We aim to eliminate the distinction between local and remote projects as much as possible. Collaborators can open, edit, and save files, perform searches, interact with the language server, etc. Guests have a read-only view of the project, including access to language server info. - -#### Unshared Projects - -If a collaborator is currently in a project that is not shared, you will not be able to jump to their project or follow them until they either share the project or return to a project that is shared. - -If you are in a project that isn't shared, others will not be able to join it or see its contents. - -### Follow a collaborator - -To follow a collaborator, click on their avatar in the top right of the window. You can also cycle through collaborators using `workspace: follow next collaborator` (`ctrl-alt-cmd-f`). - -When you join a project, you'll immediately start following the collaborator that invited you. - -![Automatically following the person inviting us to a project](https://zed.dev/img/collaboration/joining-a-call.png) - -When you are in a pane that is following a collaborator, you will: - -- follow their cursor and scroll position -- follow them to other files in the same project -- instantly swap to viewing their screen in that pane, if they are sharing their screen and leave the project - -If you move your cursor or make an edit in that pane, you will stop following. - -To start following again, you can click on a collaborator's avatar or cycle through following different participants by pressing `workspace: follow next collaborator` (`ctrl-alt-cmd-f`). - -#### How following works - -Following is confined to a particular pane. When a pane is following a collaborator, it is outlined in their cursor color. - -This pane-specific behavior allows you to follow someone in one pane while navigating independently in another and can be an effective layout for some collaboration styles. - -### Sharing your screen - -Share your screen with collaborators in the current call by clicking on the `Share screen` button in the top right of the window. - -Collaborators will see your screen if they are following you and you start viewing a window outside Zed or a project that is not shared. - -Collaborators can see your entire screen when you are screen sharing, so be careful not to share anything you don't want to share. Remember to stop screen sharing when you are finished. - -Call participants can open a dedicated tab for your screen share by opening the contacts menu in the top right and clicking on the `Screen` entry if you are sharing your screen. - -### Adding a project - -You can add a project to a call by clicking on the `Share` button next to the project name in the title bar. - -### Removing a project - -You can remove a project from a call by clicking on the `Unshare` button next to the project name in the title bar. - -Collaborators that are currently in that project will be disconnected from the project and will not be able to rejoin it unless you share it again. - -### Following a collaborator's terminal - -You can follow what a collaborator is doing in their terminal by having them share their screen and following it. - -In the future, we plan to allow you to collaborate in the terminal directly in a shared project. - -### Leave call - -You can leave a call by opening the contacts menu in the top right and clicking on the `Leave call` button. From 877763b960f5eeb387cfb5513084f99b578c5c87 Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Tue, 25 Nov 2025 17:08:39 -0500 Subject: [PATCH 0381/1030] More tweaks to collaboration docs (#43494) Release Notes: - N/A --- docs/src/collaboration/channels.md | 7 +++---- .../src/collaboration/contacts-and-private-calls.md | 13 ++++++++++--- docs/src/collaboration/overview.md | 5 ++++- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/docs/src/collaboration/channels.md b/docs/src/collaboration/channels.md index b20cff2a12ced63cf75c13bfd8b63d25f2c53c50..ebc2760275c7e3382dfabeac296dfede1b58d268 100644 --- a/docs/src/collaboration/channels.md +++ b/docs/src/collaboration/channels.md @@ -18,7 +18,7 @@ You can mix channels for your day job, as well as side-projects in your collab p Joining a channel adds you to a shared room where you can work on projects together. -_[Join our channel tree to get an idea of how you can organize yours.](https://zed.dev/community-links)_ +_Join [our channel tree](https://zed.dev/channel/zed-283) to get an idea of how you can organize yours._ ## Inviting People @@ -93,9 +93,8 @@ This pane-specific behavior allows you to follow someone in one pane while navig ### Following a Terminal -You can follow what a collaborator is doing in their terminal by having them share their screen and following it. - -In the future, we plan to allow you to collaborate in the terminal directly in a shared project. +Following is not currently supported in the terminal in the way it is supported in the editor. +As a workaround, collaborators can share their screen and you can follow that instead. ## Screen Sharing diff --git a/docs/src/collaboration/contacts-and-private-calls.md b/docs/src/collaboration/contacts-and-private-calls.md index e5660a34b643b09272c1a1bb02e026c1a4bf8e03..f011fa2c672c2e6e563e65172705115802262a7e 100644 --- a/docs/src/collaboration/contacts-and-private-calls.md +++ b/docs/src/collaboration/contacts-and-private-calls.md @@ -6,13 +6,20 @@ These calls can be one-on-ones or contain any number of users from your contacts ## Adding a Contact 1. In the collaboration panel, click the `+` button next to the `Contacts` section -1. Search for the contact using their GitHub handle - _Note: The contact must be an existing Zed user who has completed the GitHub authentication flow._ +1. Search for the contact using their GitHub handle.\ + _Note: Your contact must be an existing Zed user who has completed the GitHub authentication sign-in flow._ 1. Your contact will receive a notification. Once they accept, you'll both appear in each other's contact list. ## Private Calls -Simply click on a contact to start a private call. +To start up a private call... + +1. Click the `...` menu next to an online contact's name in the collaboration panel. +1. Click `Call ` + +Once you've begun a private call, you can add other online contacts by clicking on their name in the collaboration panel. + +--- _Aside from a few additional features (channel notes, etc.), collaboration in private calls is largely the same as it is in [channels](./channels.md)._ diff --git a/docs/src/collaboration/overview.md b/docs/src/collaboration/overview.md index fae16290fe0d9eedbff8a27a9e510964bb5aba84..719aa56ee3b62c8562cd03ff8dd29faf25f2df5b 100644 --- a/docs/src/collaboration/overview.md +++ b/docs/src/collaboration/overview.md @@ -4,6 +4,7 @@ At Zed, we believe that great things are built by great people working together. We have designed Zed to help individuals work faster and help teams of people work together more effectively. In Zed, all collaboration happens in the collaboration panel, which can be opened via {#kb collab_panel::ToggleFocus} or `collab panel: toggle focus` from the command palette. + You will need to [sign in](../authentication.md#signing-in) in order to access features within the collaboration panel. ## Collaboration panel @@ -11,7 +12,7 @@ You will need to [sign in](../authentication.md#signing-in) in order to access f The collaboration panel is broken down into two sections: 1. [Channels](./channels.md): Ongoing project rooms where team members can share projects, collaborate on code, and maintain ambient awareness of what everyone is working on. -1. [Contacts](./contacts-and-private-calls.md): Ad-hoc private collaboration with those in your contacts list. +1. [Contacts and Private Calls](./contacts-and-private-calls.md): Your contacts list for ad-hoc private collaboration. --- @@ -19,3 +20,5 @@ The collaboration panel is broken down into two sections: > Since sharing a project gives them access to your local file system, you should not share projects with people you do not trust; they could potentially do some nasty things. > > In the future, we will do more to prevent this type of access beyond the shared project and add more control over what collaborators can do, but for now, only collaborate with people you trust. + +See our [Data and Privacy FAQs](https://zed.dev/faq#data-and-privacy) for collaboration. From 53eb35f5b2f6eb21ce9c5f4cb8fd511481bed83c Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 25 Nov 2025 14:17:27 -0800 Subject: [PATCH 0382/1030] Add GPT 5.1 to Zed BYOK (#43492) Release Notes: - Added support for OpenAI's GPT 5.1 model to BYOK --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- crates/language_models/src/provider/open_ai.rs | 12 ++++++------ crates/open_ai/src/open_ai.rs | 9 ++++++++- 4 files changed, 17 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2698d882403b159f8ed350c59cc8e98ab467360d..533839bdd306e1d9c1e75e75dd2b26b80257d534 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17313,8 +17313,8 @@ dependencies = [ [[package]] name = "tiktoken-rs" -version = "0.8.0" -source = "git+https://github.com/zed-industries/tiktoken-rs?rev=30c32a4522751699adeda0d5840c71c3b75ae73d#30c32a4522751699adeda0d5840c71c3b75ae73d" +version = "0.9.1" +source = "git+https://github.com/zed-industries/tiktoken-rs?rev=7249f999c5fdf9bf3cc5c288c964454e4dac0c00#7249f999c5fdf9bf3cc5c288c964454e4dac0c00" dependencies = [ "anyhow", "base64 0.22.1", diff --git a/Cargo.toml b/Cargo.toml index ab18418939e1b7100684e3c0acec277e7ec75a88..4377b120450c8da185820ebb8c44a334ba8a3778 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -655,7 +655,7 @@ sysinfo = "0.37.0" take-until = "0.2.0" tempfile = "3.20.0" thiserror = "2.0.12" -tiktoken-rs = { git = "https://github.com/zed-industries/tiktoken-rs", rev = "30c32a4522751699adeda0d5840c71c3b75ae73d" } +tiktoken-rs = { git = "https://github.com/zed-industries/tiktoken-rs", rev = "7249f999c5fdf9bf3cc5c288c964454e4dac0c00" } time = { version = "0.3", features = [ "macros", "parsing", diff --git a/crates/language_models/src/provider/open_ai.rs b/crates/language_models/src/provider/open_ai.rs index 9d828d188586b92e3f47a1345e070f33af380d48..46cea34e3e01cb0f8ad0f859827881f3ec74cad7 100644 --- a/crates/language_models/src/provider/open_ai.rs +++ b/crates/language_models/src/provider/open_ai.rs @@ -277,6 +277,7 @@ impl LanguageModel for OpenAiLanguageModel { | Model::Five | Model::FiveMini | Model::FiveNano + | Model::FivePointOne | Model::O1 | Model::O3 | Model::O4Mini => true, @@ -644,7 +645,6 @@ pub fn count_open_ai_tokens( ) -> BoxFuture<'static, Result> { cx.background_spawn(async move { let messages = collect_tiktoken_messages(request); - match model { Model::Custom { max_tokens, .. } => { let model = if max_tokens >= 100_000 { @@ -672,11 +672,11 @@ pub fn count_open_ai_tokens( | Model::O1 | Model::O3 | Model::O3Mini - | Model::O4Mini => tiktoken_rs::num_tokens_from_messages(model.id(), &messages), - // GPT-5 models don't have tiktoken support yet; fall back on gpt-4o tokenizer - Model::Five | Model::FiveMini | Model::FiveNano => { - tiktoken_rs::num_tokens_from_messages("gpt-4o", &messages) - } + | Model::O4Mini + | Model::Five + | Model::FiveMini + | Model::FiveNano => tiktoken_rs::num_tokens_from_messages(model.id(), &messages), // GPT-5.1 doesn't have tiktoken support yet; fall back on gpt-4o tokenizer + Model::FivePointOne => tiktoken_rs::num_tokens_from_messages("gpt-5", &messages), } .map(|tokens| tokens as u64) }) diff --git a/crates/open_ai/src/open_ai.rs b/crates/open_ai/src/open_ai.rs index aaeee01c9c74f8592ccfffa01893f9333f120e89..6fdb393c9a13c7ff6a6981f949b4d0c865b9bff8 100644 --- a/crates/open_ai/src/open_ai.rs +++ b/crates/open_ai/src/open_ai.rs @@ -85,7 +85,8 @@ pub enum Model { FiveMini, #[serde(rename = "gpt-5-nano")] FiveNano, - + #[serde(rename = "gpt-5.1")] + FivePointOne, #[serde(rename = "custom")] Custom { name: String, @@ -121,6 +122,7 @@ impl Model { "gpt-5" => Ok(Self::Five), "gpt-5-mini" => Ok(Self::FiveMini), "gpt-5-nano" => Ok(Self::FiveNano), + "gpt-5.1" => Ok(Self::FivePointOne), invalid_id => anyhow::bail!("invalid model id '{invalid_id}'"), } } @@ -142,6 +144,7 @@ impl Model { Self::Five => "gpt-5", Self::FiveMini => "gpt-5-mini", Self::FiveNano => "gpt-5-nano", + Self::FivePointOne => "gpt-5.1", Self::Custom { name, .. } => name, } } @@ -163,6 +166,7 @@ impl Model { Self::Five => "gpt-5", Self::FiveMini => "gpt-5-mini", Self::FiveNano => "gpt-5-nano", + Self::FivePointOne => "gpt-5.1", Self::Custom { name, display_name, .. } => display_name.as_ref().unwrap_or(name), @@ -186,6 +190,7 @@ impl Model { Self::Five => 272_000, Self::FiveMini => 272_000, Self::FiveNano => 272_000, + Self::FivePointOne => 400_000, Self::Custom { max_tokens, .. } => *max_tokens, } } @@ -210,6 +215,7 @@ impl Model { Self::Five => Some(128_000), Self::FiveMini => Some(128_000), Self::FiveNano => Some(128_000), + Self::FivePointOne => Some(128_000), } } @@ -237,6 +243,7 @@ impl Model { | Self::FourPointOneNano | Self::Five | Self::FiveMini + | Self::FivePointOne | Self::FiveNano => true, Self::O1 | Self::O3 | Self::O3Mini | Self::O4Mini | Model::Custom { .. } => false, } From 6548eb74f19493f3a58c55ab4187d29c3c1f2958 Mon Sep 17 00:00:00 2001 From: John Tur Date: Tue, 25 Nov 2025 18:05:59 -0500 Subject: [PATCH 0383/1030] Upgrade `python-environment-tools` (#43496) Fixes https://github.com/zed-industries/zed/issues/42554 Fixes https://github.com/zed-industries/zed/issues/43383 Release Notes: - python: Added support for detecting uv workspaces as toolchains. - windows: Fixed console windows sometimes appearing when opening Python files. --- Cargo.lock | 61 +++++++++++++++++++++------------- Cargo.toml | 16 ++++----- crates/languages/src/python.rs | 4 +++ 3 files changed, 49 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 533839bdd306e1d9c1e75e75dd2b26b80257d534..38b7a6939878fad9bfa259ee03189e018ef507c9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11530,7 +11530,7 @@ dependencies = [ [[package]] name = "pet" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "clap", "env_logger 0.10.2", @@ -11555,6 +11555,7 @@ dependencies = [ "pet-python-utils", "pet-reporter", "pet-telemetry", + "pet-uv", "pet-venv", "pet-virtualenv", "pet-virtualenvwrapper", @@ -11567,7 +11568,7 @@ dependencies = [ [[package]] name = "pet-conda" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "env_logger 0.10.2", "lazy_static", @@ -11586,7 +11587,7 @@ dependencies = [ [[package]] name = "pet-core" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "clap", "lazy_static", @@ -11601,7 +11602,7 @@ dependencies = [ [[package]] name = "pet-env-var-path" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "lazy_static", "log", @@ -11617,7 +11618,7 @@ dependencies = [ [[package]] name = "pet-fs" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11626,7 +11627,7 @@ dependencies = [ [[package]] name = "pet-global-virtualenvs" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11639,7 +11640,7 @@ dependencies = [ [[package]] name = "pet-homebrew" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "lazy_static", "log", @@ -11657,7 +11658,7 @@ dependencies = [ [[package]] name = "pet-jsonrpc" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "env_logger 0.10.2", "log", @@ -11670,7 +11671,7 @@ dependencies = [ [[package]] name = "pet-linux-global-python" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11683,7 +11684,7 @@ dependencies = [ [[package]] name = "pet-mac-commandlinetools" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11696,7 +11697,7 @@ dependencies = [ [[package]] name = "pet-mac-python-org" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11709,7 +11710,7 @@ dependencies = [ [[package]] name = "pet-mac-xcode" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11722,7 +11723,7 @@ dependencies = [ [[package]] name = "pet-pipenv" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11735,7 +11736,7 @@ dependencies = [ [[package]] name = "pet-pixi" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11747,7 +11748,7 @@ dependencies = [ [[package]] name = "pet-poetry" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "base64 0.22.1", "lazy_static", @@ -11768,7 +11769,7 @@ dependencies = [ [[package]] name = "pet-pyenv" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "lazy_static", "log", @@ -11786,7 +11787,7 @@ dependencies = [ [[package]] name = "pet-python-utils" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "env_logger 0.10.2", "lazy_static", @@ -11803,7 +11804,7 @@ dependencies = [ [[package]] name = "pet-reporter" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "env_logger 0.10.2", "log", @@ -11817,7 +11818,7 @@ dependencies = [ [[package]] name = "pet-telemetry" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "env_logger 0.10.2", "lazy_static", @@ -11829,10 +11830,22 @@ dependencies = [ "regex", ] +[[package]] +name = "pet-uv" +version = "0.1.0" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" +dependencies = [ + "log", + "pet-core", + "pet-python-utils", + "serde", + "toml 0.9.8", +] + [[package]] name = "pet-venv" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11844,7 +11857,7 @@ dependencies = [ [[package]] name = "pet-virtualenv" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11856,7 +11869,7 @@ dependencies = [ [[package]] name = "pet-virtualenvwrapper" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "log", "msvc_spectre_libs", @@ -11869,7 +11882,7 @@ dependencies = [ [[package]] name = "pet-windows-registry" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "lazy_static", "log", @@ -11887,7 +11900,7 @@ dependencies = [ [[package]] name = "pet-windows-store" version = "0.1.0" -source = "git+https://github.com/microsoft/python-environment-tools.git?rev=e97b9508befa0062929da65a01054d25c4be861c#e97b9508befa0062929da65a01054d25c4be861c" +source = "git+https://github.com/microsoft/python-environment-tools.git?rev=1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da#1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" dependencies = [ "lazy_static", "log", diff --git a/Cargo.toml b/Cargo.toml index 4377b120450c8da185820ebb8c44a334ba8a3778..05ea7bceb818e33f8b550269f00c305ce6d7be0b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -583,14 +583,14 @@ partial-json-fixer = "0.5.3" parse_int = "0.9" pciid-parser = "0.8.0" pathdiff = "0.2" -pet = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } -pet-conda = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } -pet-core = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } -pet-fs = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } -pet-pixi = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } -pet-poetry = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } -pet-reporter = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } -pet-virtualenv = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "e97b9508befa0062929da65a01054d25c4be861c" } +pet = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } +pet-conda = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } +pet-core = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } +pet-fs = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } +pet-pixi = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } +pet-poetry = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } +pet-reporter = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } +pet-virtualenv = { git = "https://github.com/microsoft/python-environment-tools.git", rev = "1e86914c3ce2f3a08c0cedbcb0615a7f9fa7a5da" } portable-pty = "0.9.0" postage = { version = "0.5", features = ["futures-traits"] } pretty_assertions = { version = "1.3.0", features = ["unstable"] } diff --git a/crates/languages/src/python.rs b/crates/languages/src/python.rs index 03ce559b87bb5f318758735c5903bfc51b7c1267..a451afa6f1f3fcc5e4aa2135611fa37fa2f0f39e 100644 --- a/crates/languages/src/python.rs +++ b/crates/languages/src/python.rs @@ -991,6 +991,8 @@ fn python_env_kind_display(k: &PythonEnvironmentKind) -> &'static str { PythonEnvironmentKind::VirtualEnvWrapper => "virtualenvwrapper", PythonEnvironmentKind::WindowsStore => "global (Windows Store)", PythonEnvironmentKind::WindowsRegistry => "global (Windows Registry)", + PythonEnvironmentKind::Uv => "uv", + PythonEnvironmentKind::UvWorkspace => "uv (Workspace)", } } @@ -998,6 +1000,8 @@ pub(crate) struct PythonToolchainProvider; static ENV_PRIORITY_LIST: &[PythonEnvironmentKind] = &[ // Prioritize non-Conda environments. + PythonEnvironmentKind::UvWorkspace, + PythonEnvironmentKind::Uv, PythonEnvironmentKind::Poetry, PythonEnvironmentKind::Pipenv, PythonEnvironmentKind::VirtualEnvWrapper, From 98e369285bd8c38dba9f50ca77b2e0dcb86811c5 Mon Sep 17 00:00:00 2001 From: Peter Tripp Date: Tue, 25 Nov 2025 18:31:52 -0500 Subject: [PATCH 0384/1030] languages: Recognize .clang-format as YAML (#43469) Clang-Format uses uses a YAML config file format. Use YAML language by default for `.clang-format` and `_clang-format` filenames. ([source](https://clang.llvm.org/docs/ClangFormatStyleOptions.html)) Add `#yaml-language-server: $schema` to `.clang-format` example in C language docs. Release Notes: - Added support for identifying. `.clang-format` files as YAML by default --- crates/languages/src/yaml/config.toml | 2 +- docs/src/languages/c.md | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/languages/src/yaml/config.toml b/crates/languages/src/yaml/config.toml index e941497bf3f3ea56e7c5188fde3e8708cd36bb8a..8834b3205af810c26fc9a8835f1c2afe7a185d8c 100644 --- a/crates/languages/src/yaml/config.toml +++ b/crates/languages/src/yaml/config.toml @@ -1,6 +1,6 @@ name = "YAML" grammar = "yaml" -path_suffixes = ["yml", "yaml", "pixi.lock"] +path_suffixes = ["yml", "yaml", "pixi.lock", "clang-format"] line_comments = ["# "] autoclose_before = ",]}" brackets = [ diff --git a/docs/src/languages/c.md b/docs/src/languages/c.md index 7f6e0ba6b2ed24dd958c02a7606e2a569d08f8f1..565b0b5acbef78a23722020dcbad9300748dbb16 100644 --- a/docs/src/languages/c.md +++ b/docs/src/languages/c.md @@ -27,9 +27,10 @@ By default clang and gcc will recognize `*.C` and `*.H` (uppercase extensions) a ## Formatting -By default Zed will use the `clangd` language server for formatting C code. The Clangd is the same as the `clang-format` CLI tool. To configure this you can add a `.clang-format` file. For example: +By default Zed will use the `clangd` language server for formatting C code like the `clang-format` CLI tool. To configure this you can add a `.clang-format` file. For example: ```yaml +# yaml-language-server: $schema=https://json.schemastore.org/clang-format-21.x.json --- BasedOnStyle: GNU IndentWidth: 2 From e13e93063ce24a2ede88747c316d7279174878c8 Mon Sep 17 00:00:00 2001 From: Max Brunsfeld Date: Tue, 25 Nov 2025 17:33:10 -0800 Subject: [PATCH 0385/1030] Avoid continuing zeta requests that are cancelled before their throttle (#43505) Release Notes: - N/A --- crates/zeta/src/zeta.rs | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index 8b54576a12f2ff788b6088299c30923b2ce8adda..26a2388a96e4a828fc4c7bd6fe5d3dbb57bfc911 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -289,6 +289,7 @@ struct ZetaProject { next_pending_prediction_id: usize, pending_predictions: ArrayVec, last_prediction_refresh: Option<(EntityId, Instant)>, + cancelled_predictions: HashSet, context: Option, Vec>>>, refresh_context_task: Option>>>, refresh_context_debounce_task: Option>>, @@ -601,6 +602,7 @@ impl Zeta { recent_paths: VecDeque::new(), registered_buffers: HashMap::default(), current_prediction: None, + cancelled_predictions: HashSet::default(), pending_predictions: ArrayVec::new(), next_pending_prediction_id: 0, last_prediction_refresh: None, @@ -1132,11 +1134,23 @@ impl Zeta { cx.background_executor().timer(timeout).await; } + // If this task was cancelled before the throttle timeout expired, + // do not perform a request. + let mut is_cancelled = true; this.update(cx, |this, cx| { - this.get_or_init_zeta_project(&project, cx) - .last_prediction_refresh = Some((throttle_entity, Instant::now())); + let project_state = this.get_or_init_zeta_project(&project, cx); + if !project_state + .cancelled_predictions + .remove(&pending_prediction_id) + { + project_state.last_prediction_refresh = Some((throttle_entity, Instant::now())); + is_cancelled = false; + } }) .ok(); + if is_cancelled { + return None; + } let edit_prediction_id = do_refresh(this.clone(), cx).await.log_err().flatten(); @@ -1144,6 +1158,10 @@ impl Zeta { // any pending predictions that were enqueued before it. this.update(cx, |this, cx| { let zeta_project = this.get_or_init_zeta_project(&project, cx); + zeta_project + .cancelled_predictions + .remove(&pending_prediction_id); + let mut pending_predictions = mem::take(&mut zeta_project.pending_predictions); for (ix, pending_prediction) in pending_predictions.iter().enumerate() { if pending_prediction.id == pending_prediction_id { @@ -1174,6 +1192,9 @@ impl Zeta { id: pending_prediction_id, task, }); + zeta_project + .cancelled_predictions + .insert(pending_prediction.id); self.cancel_pending_prediction(pending_prediction, cx); } } From 88ef5b137fc4cc8e9b194ef5b8a888f10872ce84 Mon Sep 17 00:00:00 2001 From: Anthony Eid <56899983+Anthony-Eid@users.noreply.github.com> Date: Tue, 25 Nov 2025 22:45:54 -0500 Subject: [PATCH 0386/1030] terminal: Update search match highlights on resize (#43507) The fix for this is emitting a wake-up event to tell the terminal to recalculate its search highlights on resize. Release Notes: - terminal: Fix bug where search match highlights wouldn't update their position when resizing the terminal. --- crates/terminal/src/terminal.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 69b6be5f249b811273aed8ecd96ed82493a3596a..48073aee51a376d3700a3f818081f87fd24c5ee1 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -992,6 +992,12 @@ impl Terminal { } term.resize(new_bounds); + // If there are matches we need to emit a wake up event to + // invalidate the matches and recalculate their locations + // in the new terminal layout + if !self.matches.is_empty() { + cx.emit(Event::Wakeup); + } } InternalEvent::Clear => { trace!("Clearing"); From 56a2f9cfcf0c6a3c38f596b58002953763cd890f Mon Sep 17 00:00:00 2001 From: Anthony Eid <56899983+Anthony-Eid@users.noreply.github.com> Date: Tue, 25 Nov 2025 22:58:29 -0500 Subject: [PATCH 0387/1030] Revert "git: Make the version_control.{deleted/added} colors more accessible" (#43512) Reverts zed-industries/zed#43475 The colors ended up being too dark. Zed adds an opacity to the highlights. https://github.com/zed-industries/zed/blob/e13e93063ce24a2ede88747c316d7279174878c8/crates/editor/src/element.rs#L9195-L9200 Reverting to avoid having the colors go out in preview will fix shortly after. --- assets/themes/one/one.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/assets/themes/one/one.json b/assets/themes/one/one.json index 48db749a4b636963d6db714ddb055c9c15bc5494..6849cd05dc70752216789ae04e81fad232f7b14b 100644 --- a/assets/themes/one/one.json +++ b/assets/themes/one/one.json @@ -96,9 +96,9 @@ "terminal.ansi.bright_white": "#fafafaff", "terminal.ansi.dim_white": "#575d65ff", "link_text.hover": "#74ade8ff", - "version_control.added": "#2EA048ff", + "version_control.added": "#27a657ff", "version_control.modified": "#d3b020ff", - "version_control.deleted": "#78081Bff", + "version_control.deleted": "#e06c76ff", "version_control.conflict_marker.ours": "#a1c1811a", "version_control.conflict_marker.theirs": "#74ade81a", "conflict": "#dec184ff", @@ -497,9 +497,9 @@ "terminal.ansi.bright_white": "#ffffffff", "terminal.ansi.dim_white": "#aaaaaaff", "link_text.hover": "#5c78e2ff", - "version_control.added": "#2EA048ff", + "version_control.added": "#27a657ff", "version_control.modified": "#d3b020ff", - "version_control.deleted": "#F85149ff", + "version_control.deleted": "#e06c76ff", "conflict": "#a48819ff", "conflict.background": "#faf2e6ff", "conflict.border": "#f4e7d1ff", From 3072133e5965e16f956509a600faaef482a10195 Mon Sep 17 00:00:00 2001 From: qystishere Date: Wed, 26 Nov 2025 09:45:50 +0300 Subject: [PATCH 0388/1030] Improve bash detection on Windows (#43455) I have git installed via [scoop](https://scoop.sh). The current implementation finds `git.exe` in scoop's shims folder and then tries to find `bash.exe` relative to it. For example, `git.exe` (shim) is located at: ``` C:\Users\\scoop\shims\git.exe ``` And the code tries to find `bash.exe` at: ``` C:\Users\\scoop\shims\..\bin\bash.exe ``` which doesn't exist. This PR changes the logic to first check if `bash.exe` is available in PATH (using `which::which`), and only falls back to the git-relative path if that fails. --- crates/util/src/shell.rs | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/crates/util/src/shell.rs b/crates/util/src/shell.rs index ba54f7b7784b45613b28067afe2748339e6b6c64..1f91939134b67a745c75afe264ceec0ef5d50f73 100644 --- a/crates/util/src/shell.rs +++ b/crates/util/src/shell.rs @@ -79,29 +79,42 @@ pub fn get_default_system_shell() -> String { } } -/// Get the default system shell, preferring git-bash on Windows. +/// Get the default system shell, preferring bash on Windows. pub fn get_default_system_shell_preferring_bash() -> String { if cfg!(windows) { - get_windows_git_bash().unwrap_or_else(|| get_windows_system_shell()) + get_windows_bash().unwrap_or_else(|| get_windows_system_shell()) } else { "/bin/sh".to_string() } } -pub fn get_windows_git_bash() -> Option { - static GIT_BASH: LazyLock> = LazyLock::new(|| { +pub fn get_windows_bash() -> Option { + use std::path::PathBuf; + + fn find_bash_in_scoop() -> Option { + let bash_exe = + PathBuf::from(std::env::var_os("USERPROFILE")?).join("scoop\\shims\\bash.exe"); + bash_exe.exists().then_some(bash_exe) + } + + fn find_bash_in_git() -> Option { // /path/to/git/cmd/git.exe/../../bin/bash.exe let git = which::which("git").ok()?; let git_bash = git.parent()?.parent()?.join("bin").join("bash.exe"); - if git_bash.is_file() { - log::info!("Found git-bash at {}", git_bash.display()); - Some(git_bash.to_string_lossy().to_string()) - } else { - None + git_bash.exists().then_some(git_bash) + } + + static BASH: LazyLock> = LazyLock::new(|| { + let bash = find_bash_in_scoop() + .or_else(|| find_bash_in_git()) + .map(|p| p.to_string_lossy().into_owned()); + if let Some(ref path) = bash { + log::info!("Found bash at {}", path); } + bash }); - (*GIT_BASH).clone() + (*BASH).clone() } pub fn get_windows_system_shell() -> String { From 9d8b5077b4fcc1c3ff65ed807de34cc773e59d54 Mon Sep 17 00:00:00 2001 From: Oscar Vargas Torres <1676245+oscarvarto@users.noreply.github.com> Date: Wed, 26 Nov 2025 00:48:06 -0600 Subject: [PATCH 0389/1030] zeta: Avoid logging an error for not having SWEEP_AI_TOKEN (#43504) Closes #43503 Release Notes: - Fixes ERROR No SWEEP_AI_TOKEN environment variable set Co-authored-by: oscarvarto --- crates/zeta/src/sweep_ai.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/crates/zeta/src/sweep_ai.rs b/crates/zeta/src/sweep_ai.rs index 49870c7c9e917468e70062cbc234e9774fb3668b..c88dda2ae2fd11dd37965e58560df9e98528c9d9 100644 --- a/crates/zeta/src/sweep_ai.rs +++ b/crates/zeta/src/sweep_ai.rs @@ -1,4 +1,4 @@ -use anyhow::{Context as _, Result}; +use anyhow::Result; use cloud_llm_client::predict_edits_v3::Event; use futures::AsyncReadExt as _; use gpui::{ @@ -17,7 +17,6 @@ use std::{ sync::Arc, time::Instant, }; -use util::ResultExt as _; use crate::{EditPrediction, EditPredictionId, EditPredictionInputs}; @@ -31,9 +30,7 @@ pub struct SweepAi { impl SweepAi { pub fn new(cx: &App) -> Self { SweepAi { - api_token: std::env::var("SWEEP_AI_TOKEN") - .context("No SWEEP_AI_TOKEN environment variable set") - .log_err(), + api_token: std::env::var("SWEEP_AI_TOKEN").ok(), debug_info: debug_info(cx), } } From 00e93bfa113a3daed6e4a97a7244ad04d58453ee Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 26 Nov 2025 09:00:46 +0100 Subject: [PATCH 0390/1030] shell: Correctly identifiy `powershell` shells on windows (#43526) Release Notes: - Fixed zed only finding pwsh but not powershell on windows --- crates/askpass/src/askpass.rs | 1 + crates/gpui/src/platform/windows/platform.rs | 11 +++++---- crates/util/src/shell.rs | 24 +++++++++++++------- 3 files changed, 23 insertions(+), 13 deletions(-) diff --git a/crates/askpass/src/askpass.rs b/crates/askpass/src/askpass.rs index 25db3144ccb10b9cac1b8d8555ea9924e193468c..a9047a567fd3b6323fb6edc64be4854f4da0a958 100644 --- a/crates/askpass/src/askpass.rs +++ b/crates/askpass/src/askpass.rs @@ -250,6 +250,7 @@ impl PasswordProxy { .await .with_context(|| format!("creating askpass script at {askpass_script_path:?}"))?; make_file_executable(&askpass_script_path).await?; + // todo(shell): There might be no powershell on the system #[cfg(target_os = "windows")] let askpass_helper = format!( "powershell.exe -ExecutionPolicy Bypass -File {}", diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index b7f13f1fab495b1040d1be8e7b86376c450b5f7e..006099c3828efb11b0981e81635fba0c452c8560 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -389,11 +389,12 @@ impl Platform for WindowsPlatform { #[allow( clippy::disallowed_methods, reason = "We are restarting ourselves, using std command thus is fine" - )] - let restart_process = util::command::new_std_command("powershell.exe") - .arg("-command") - .arg(script) - .spawn(); + )] // todo(shell): There might be no powershell on the system + let restart_process = + util::command::new_std_command(util::shell::get_windows_system_shell()) + .arg("-command") + .arg(script) + .spawn(); match restart_process { Ok(_) => self.quit(), diff --git a/crates/util/src/shell.rs b/crates/util/src/shell.rs index 1f91939134b67a745c75afe264ceec0ef5d50f73..d6cf5e1d380109aa4fcfc4e55a4c469ba1903add 100644 --- a/crates/util/src/shell.rs +++ b/crates/util/src/shell.rs @@ -204,14 +204,22 @@ pub fn get_windows_system_shell() -> String { } static SYSTEM_SHELL: LazyLock = LazyLock::new(|| { - find_pwsh_in_programfiles(false, false) - .or_else(|| find_pwsh_in_programfiles(true, false)) - .or_else(|| find_pwsh_in_msix(false)) - .or_else(|| find_pwsh_in_programfiles(false, true)) - .or_else(|| find_pwsh_in_msix(true)) - .or_else(|| find_pwsh_in_programfiles(true, true)) - .or_else(find_pwsh_in_scoop) - .map(|p| p.to_string_lossy().into_owned()) + let locations = [ + || find_pwsh_in_programfiles(false, false), + || find_pwsh_in_programfiles(true, false), + || find_pwsh_in_msix(false), + || find_pwsh_in_programfiles(false, true), + || find_pwsh_in_msix(true), + || find_pwsh_in_programfiles(true, true), + || find_pwsh_in_scoop(), + || which::which_global("pwsh.exe").ok(), + || which::which_global("powershell.exe").ok(), + ]; + + locations + .into_iter() + .find_map(|f| f()) + .map(|p| p.to_string_lossy().trim().to_owned()) .inspect(|shell| log::info!("Found powershell in: {}", shell)) .unwrap_or_else(|| { log::warn!("Powershell not found, falling back to `cmd`"); From 425d4c73f3fbf4362520d2ea2c1205eeb948b31d Mon Sep 17 00:00:00 2001 From: Bhuminjay Soni Date: Wed, 26 Nov 2025 14:31:20 +0530 Subject: [PATCH 0391/1030] git: Use correct file mode when staging (#41900) Closes #28667 Release Notes: - Fixed git not preserving file mode when committing. Now if an input file is executable it will be preserved when committed with Zed. --------- Signed-off-by: 11happy Signed-off-by: 11happy Co-authored-by: Jakub Konka --- Cargo.lock | 10 ++++ crates/fs/Cargo.toml | 1 + crates/fs/src/fake_git_repo.rs | 1 + crates/fs/src/fs.rs | 11 ++++ crates/git/src/repository.rs | 6 +- crates/project/src/git_store.rs | 15 ++++- crates/project/src/project_tests.rs | 85 +++++++++++++++++++++++++++++ 7 files changed, 126 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 38b7a6939878fad9bfa259ee03189e018ef507c9..3c19e1b79bc859e77c99b992f4f5617894da82df 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6406,6 +6406,7 @@ dependencies = [ "git", "gpui", "ignore", + "is_executable", "libc", "log", "notify 8.2.0", @@ -8436,6 +8437,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "is_executable" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baabb8b4867b26294d818bf3f651a454b6901431711abb96e296245888d6e8c4" +dependencies = [ + "windows-sys 0.60.2", +] + [[package]] name = "is_terminal_polyfill" version = "1.70.1" diff --git a/crates/fs/Cargo.toml b/crates/fs/Cargo.toml index 15093b3a5b5e18ce0ddca1e9c23350e1ac46d66e..52063eeddcc3aa74adae33f3a78c74ecb6b6f04c 100644 --- a/crates/fs/Cargo.toml +++ b/crates/fs/Cargo.toml @@ -33,6 +33,7 @@ tempfile.workspace = true text.workspace = true time.workspace = true util.workspace = true +is_executable = "1.0.5" [target.'cfg(target_os = "macos")'.dependencies] fsevent.workspace = true diff --git a/crates/fs/src/fake_git_repo.rs b/crates/fs/src/fake_git_repo.rs index c9a41243aa641318026db208d78a64429cfeb1ab..febef94d8cd8c5f10c27dd5c62e8076fb5fb784d 100644 --- a/crates/fs/src/fake_git_repo.rs +++ b/crates/fs/src/fake_git_repo.rs @@ -138,6 +138,7 @@ impl GitRepository for FakeGitRepository { path: RepoPath, content: Option, _env: Arc>, + _is_executable: bool, ) -> BoxFuture<'_, anyhow::Result<()>> { self.with_state_async(true, move |state| { if let Some(message) = &state.simulated_index_write_error_message { diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 93192ecd2bd2449dafa622a69045be6811a43cf7..5a6e4bdfdba48af25342d4d1ecfafd1d4ce0709b 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -32,6 +32,7 @@ use std::mem::MaybeUninit; use async_tar::Archive; use futures::{AsyncRead, Stream, StreamExt, future::BoxFuture}; use git::repository::{GitRepository, RealGitRepository}; +use is_executable::IsExecutable; use rope::Rope; use serde::{Deserialize, Serialize}; use smol::io::AsyncWriteExt; @@ -208,6 +209,7 @@ pub struct Metadata { pub is_dir: bool, pub len: u64, pub is_fifo: bool, + pub is_executable: bool, } /// Filesystem modification time. The purpose of this newtype is to discourage use of operations @@ -895,6 +897,12 @@ impl Fs for RealFs { #[cfg(unix)] let is_fifo = metadata.file_type().is_fifo(); + let path_buf = path.to_path_buf(); + let is_executable = self + .executor + .spawn(async move { path_buf.is_executable() }) + .await; + Ok(Some(Metadata { inode, mtime: MTime(metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH)), @@ -902,6 +910,7 @@ impl Fs for RealFs { is_symlink, is_dir: metadata.file_type().is_dir(), is_fifo, + is_executable, })) } @@ -2602,6 +2611,7 @@ impl Fs for FakeFs { is_dir: false, is_symlink, is_fifo: false, + is_executable: false, }, FakeFsEntry::Dir { inode, mtime, len, .. @@ -2612,6 +2622,7 @@ impl Fs for FakeFs { is_dir: true, is_symlink, is_fifo: false, + is_executable: false, }, FakeFsEntry::Symlink { .. } => unreachable!(), })) diff --git a/crates/git/src/repository.rs b/crates/git/src/repository.rs index 9beb3d838382d9267afdb081211647139f85b75e..03b29eb4a7a28ddc13bdbfb23422f98baa82ae36 100644 --- a/crates/git/src/repository.rs +++ b/crates/git/src/repository.rs @@ -400,6 +400,7 @@ pub trait GitRepository: Send + Sync { path: RepoPath, content: Option, env: Arc>, + is_executable: bool, ) -> BoxFuture<'_, anyhow::Result<()>>; /// Returns the URL of the remote with the given name. @@ -987,12 +988,15 @@ impl GitRepository for RealGitRepository { path: RepoPath, content: Option, env: Arc>, + is_executable: bool, ) -> BoxFuture<'_, anyhow::Result<()>> { let working_directory = self.working_directory(); let git_binary_path = self.any_git_binary_path.clone(); self.executor .spawn(async move { let working_directory = working_directory?; + let mode = if is_executable { "100755" } else { "100644" }; + if let Some(content) = content { let mut child = new_smol_command(&git_binary_path) .current_dir(&working_directory) @@ -1013,7 +1017,7 @@ impl GitRepository for RealGitRepository { let output = new_smol_command(&git_binary_path) .current_dir(&working_directory) .envs(env.iter()) - .args(["update-index", "--add", "--cacheinfo", "100644", sha]) + .args(["update-index", "--add", "--cacheinfo", mode, sha]) .arg(path.as_unix_str()) .output() .await?; diff --git a/crates/project/src/git_store.rs b/crates/project/src/git_store.rs index bde9261fa28b8ed0d6c6a79fd02b90177e52a98e..8b83fa48e9b61a7200a001f4d42227b1c2302874 100644 --- a/crates/project/src/git_store.rs +++ b/crates/project/src/git_store.rs @@ -301,6 +301,7 @@ impl std::ops::Deref for Repository { #[derive(Clone)] pub enum RepositoryState { Local { + fs: Arc, backend: Arc, environment: Arc>, }, @@ -4288,6 +4289,7 @@ impl Repository { RepositoryState::Local { backend, environment, + .. } => backend.run_hook(hook, environment.clone()).await, RepositoryState::Remote { project_id, client } => { client @@ -4580,6 +4582,7 @@ impl Repository { let id = self.id; let this = cx.weak_entity(); let git_store = self.git_store.clone(); + let abs_path = self.snapshot.repo_path_to_abs_path(&path); self.send_keyed_job( Some(GitJobKey::WriteIndex(vec![path.clone()])), None, @@ -4588,14 +4591,21 @@ impl Repository { "start updating index text for buffer {}", path.as_unix_str() ); + match git_repo { RepositoryState::Local { + fs, backend, environment, .. } => { + let executable = match fs.metadata(&abs_path).await { + Ok(Some(meta)) => meta.is_executable, + Ok(None) => false, + Err(_err) => false, + }; backend - .set_index_text(path.clone(), content, environment.clone()) + .set_index_text(path.clone(), content, environment.clone(), executable) .await?; } RepositoryState::Remote { project_id, client } => { @@ -5164,6 +5174,7 @@ impl Repository { cx: &mut Context, ) -> mpsc::UnboundedSender { let (job_tx, mut job_rx) = mpsc::unbounded::(); + let fs_cloned = fs.clone(); cx.spawn(async move |_, cx| { let environment = project_environment @@ -5195,8 +5206,8 @@ impl Repository { backend.clone(), ); } - let state = RepositoryState::Local { + fs: fs_cloned, backend, environment: Arc::new(environment), }; diff --git a/crates/project/src/project_tests.rs b/crates/project/src/project_tests.rs index d42859de5d5491d4a5388d311266e22962889f35..1cbaf950e818956f55cb52eed997c1e3819ced34 100644 --- a/crates/project/src/project_tests.rs +++ b/crates/project/src/project_tests.rs @@ -8174,6 +8174,91 @@ async fn test_single_file_diffs(cx: &mut gpui::TestAppContext) { }); } +// TODO: Should we test this on Windows also? +#[gpui::test] +#[cfg(not(windows))] +async fn test_staging_hunk_preserve_executable_permission(cx: &mut gpui::TestAppContext) { + use std::os::unix::fs::PermissionsExt; + init_test(cx); + cx.executor().allow_parking(); + let committed_contents = "bar\n"; + let file_contents = "baz\n"; + let root = TempTree::new(json!({ + "project": { + "foo": committed_contents + }, + })); + + let work_dir = root.path().join("project"); + let file_path = work_dir.join("foo"); + let repo = git_init(work_dir.as_path()); + let mut perms = std::fs::metadata(&file_path).unwrap().permissions(); + perms.set_mode(0o755); + std::fs::set_permissions(&file_path, perms).unwrap(); + git_add("foo", &repo); + git_commit("Initial commit", &repo); + std::fs::write(&file_path, file_contents).unwrap(); + + let project = Project::test( + Arc::new(RealFs::new(None, cx.executor())), + [root.path()], + cx, + ) + .await; + + let buffer = project + .update(cx, |project, cx| { + project.open_local_buffer(file_path.as_path(), cx) + }) + .await + .unwrap(); + + let snapshot = buffer.read_with(cx, |buffer, _| buffer.snapshot()); + + let uncommitted_diff = project + .update(cx, |project, cx| { + project.open_uncommitted_diff(buffer.clone(), cx) + }) + .await + .unwrap(); + + uncommitted_diff.update(cx, |diff, cx| { + let hunks = diff.hunks(&snapshot, cx).collect::>(); + diff.stage_or_unstage_hunks(true, &hunks, &snapshot, true, cx); + }); + + cx.run_until_parked(); + + let output = smol::process::Command::new("git") + .current_dir(&work_dir) + .args(["diff", "--staged"]) + .output() + .await + .unwrap(); + + let staged_diff = String::from_utf8_lossy(&output.stdout); + + assert!( + !staged_diff.contains("new mode 100644"), + "Staging should not change file mode from 755 to 644.\ngit diff --staged:\n{}", + staged_diff + ); + + let output = smol::process::Command::new("git") + .current_dir(&work_dir) + .args(["ls-files", "-s"]) + .output() + .await + .unwrap(); + let index_contents = String::from_utf8_lossy(&output.stdout); + + assert!( + index_contents.contains("100755"), + "Index should show file as executable (100755).\ngit ls-files -s:\n{}", + index_contents + ); +} + #[gpui::test] async fn test_repository_and_path_for_project_path( background_executor: BackgroundExecutor, From 9150346a43483eacc51a368d6c48257309c6186e Mon Sep 17 00:00:00 2001 From: Floyd Wang Date: Wed, 26 Nov 2025 17:03:52 +0800 Subject: [PATCH 0392/1030] outline_panel: Fix the panel frequent flickering during search (#43530) The outline panel flickers when searching or when the file content changes. This happens because an empty UI appears during the search process, but it only lasts for a few milliseconds, so we can safely ignore it. ## Before https://github.com/user-attachments/assets/9b409827-75ee-4a45-864a-58f0ca43191f ## After https://github.com/user-attachments/assets/b6d48143-1f1a-4811-8754-0a679428eec2 Release Notes: - N/A --- crates/outline_panel/src/outline_panel.rs | 63 +++++++++-------------- 1 file changed, 24 insertions(+), 39 deletions(-) diff --git a/crates/outline_panel/src/outline_panel.rs b/crates/outline_panel/src/outline_panel.rs index cb857a72898bbd6f4161a0f4d218394efeab5c7e..1e649b2eb64fda970f845e9376be3f61944dde85 100644 --- a/crates/outline_panel/src/outline_panel.rs +++ b/crates/outline_panel/src/outline_panel.rs @@ -111,8 +111,6 @@ pub struct OutlinePanel { selected_entry: SelectedEntry, active_item: Option, _subscriptions: Vec, - updating_fs_entries: bool, - updating_cached_entries: bool, new_entries_for_fs_update: HashSet, fs_entries_update_task: Task<()>, cached_entries_update_task: Task<()>, @@ -853,8 +851,6 @@ impl OutlinePanel { width: None, active_item: None, pending_serialization: Task::ready(None), - updating_fs_entries: false, - updating_cached_entries: false, new_entries_for_fs_update: HashSet::default(), preserve_selection_on_buffer_fold_toggles: HashSet::default(), pending_default_expansion_depth: None, @@ -2658,7 +2654,6 @@ impl OutlinePanel { let repo_snapshots = self.project.update(cx, |project, cx| { project.git_store().read(cx).repo_snapshots(cx) }); - self.updating_fs_entries = true; self.fs_entries_update_task = cx.spawn_in(window, async move |outline_panel, cx| { if let Some(debounce) = debounce { cx.background_executor().timer(debounce).await; @@ -3016,7 +3011,6 @@ impl OutlinePanel { outline_panel .update_in(cx, |outline_panel, window, cx| { - outline_panel.updating_fs_entries = false; outline_panel.new_entries_for_fs_update.clear(); outline_panel.excerpts = new_excerpts; outline_panel.collapsed_entries = new_collapsed_entries; @@ -3579,7 +3573,6 @@ impl OutlinePanel { let is_singleton = self.is_singleton_active(cx); let query = self.query(cx); - self.updating_cached_entries = true; self.cached_entries_update_task = cx.spawn_in(window, async move |outline_panel, cx| { if let Some(debounce) = debounce { cx.background_executor().timer(debounce).await; @@ -3612,7 +3605,6 @@ impl OutlinePanel { } outline_panel.autoscroll(cx); - outline_panel.updating_cached_entries = false; cx.notify(); }) .ok(); @@ -4542,12 +4534,10 @@ impl OutlinePanel { cx: &mut Context, ) -> impl IntoElement { let contents = if self.cached_entries.is_empty() { - let header = if self.updating_fs_entries || self.updating_cached_entries { - None - } else if query.is_some() { - Some("No matches for query") + let header = if query.is_some() { + "No matches for query" } else { - Some("No outlines available") + "No outlines available" }; v_flex() @@ -4556,33 +4546,28 @@ impl OutlinePanel { .flex_1() .justify_center() .size_full() - .when_some(header, |panel, header| { - panel - .child(h_flex().justify_center().child(Label::new(header))) - .when_some(query.clone(), |panel, query| { - panel.child( - h_flex() - .px_0p5() - .justify_center() - .bg(cx.theme().colors().element_selected.opacity(0.2)) - .child(Label::new(query)), - ) - }) - .child(h_flex().justify_center().child({ - let keystroke = match self.position(window, cx) { - DockPosition::Left => { - window.keystroke_text_for(&workspace::ToggleLeftDock) - } - DockPosition::Bottom => { - window.keystroke_text_for(&workspace::ToggleBottomDock) - } - DockPosition::Right => { - window.keystroke_text_for(&workspace::ToggleRightDock) - } - }; - Label::new(format!("Toggle Panel With {keystroke}")).color(Color::Muted) - })) + .child(h_flex().justify_center().child(Label::new(header))) + .when_some(query, |panel, query| { + panel.child( + h_flex() + .px_0p5() + .justify_center() + .bg(cx.theme().colors().element_selected.opacity(0.2)) + .child(Label::new(query)), + ) }) + .child(h_flex().justify_center().child({ + let keystroke = match self.position(window, cx) { + DockPosition::Left => window.keystroke_text_for(&workspace::ToggleLeftDock), + DockPosition::Bottom => { + window.keystroke_text_for(&workspace::ToggleBottomDock) + } + DockPosition::Right => { + window.keystroke_text_for(&workspace::ToggleRightDock) + } + }; + Label::new(format!("Toggle Panel With {keystroke}")).color(Color::Muted) + })) } else { let list_contents = { let items_len = self.cached_entries.len(); From 684a58fc84491d21d5a034ac442f14f8f0bdb6d4 Mon Sep 17 00:00:00 2001 From: ihavecoke Date: Wed, 26 Nov 2025 17:09:26 +0800 Subject: [PATCH 0393/1030] Implement vertical scrolling for extended keymap load error information (#42542) This PR fix an issue where, if an error occurs while loading the keymap file during application startup, an excessively long error message would be truncated and not fully displayed. Before: before After: image Release Notes: - N/A --- crates/workspace/src/notifications.rs | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/crates/workspace/src/notifications.rs b/crates/workspace/src/notifications.rs index 6c1156b83396d1266bc46bca67f10f3f57adfec4..75c35cda22d72d659040154a079fe78af78cf414 100644 --- a/crates/workspace/src/notifications.rs +++ b/crates/workspace/src/notifications.rs @@ -593,9 +593,9 @@ pub mod simple_message_notification { use gpui::{ AnyElement, DismissEvent, EventEmitter, FocusHandle, Focusable, ParentElement, Render, - SharedString, Styled, + ScrollHandle, SharedString, Styled, }; - use ui::prelude::*; + use ui::{WithScrollbar, prelude::*}; use crate::notifications::NotificationFrame; @@ -617,6 +617,7 @@ pub mod simple_message_notification { show_close_button: bool, show_suppress_button: bool, title: Option, + scroll_handle: ScrollHandle, } impl Focusable for MessageNotification { @@ -661,6 +662,7 @@ pub mod simple_message_notification { show_suppress_button: true, title: None, focus_handle: cx.focus_handle(), + scroll_handle: ScrollHandle::new(), } } @@ -777,7 +779,18 @@ pub mod simple_message_notification { fn render(&mut self, window: &mut Window, cx: &mut Context) -> impl IntoElement { NotificationFrame::new() .with_title(self.title.clone()) - .with_content((self.build_content)(window, cx)) + .with_content( + div() + .child( + div() + .id("message-notification-content") + .max_h(vh(0.6, window)) + .overflow_y_scroll() + .track_scroll(&self.scroll_handle.clone()) + .child((self.build_content)(window, cx)), + ) + .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx), + ) .show_close_button(self.show_close_button) .show_suppress_button(self.show_suppress_button) .on_close(cx.listener(|_, suppress, _, cx| { From c2cb76b026f6dc73d8b2b91b3c56be5a78a1e473 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 26 Nov 2025 12:43:30 +0100 Subject: [PATCH 0394/1030] rope: Turn `ChunkSlice::slice` panics into error logs (#43538) While logically not really correct, its better than tearing down the application until we figure out the root cause here Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/rope/src/chunk.rs | 139 ++++++++++++++++++++------------------- crates/rope/src/rope.rs | 4 +- 2 files changed, 74 insertions(+), 69 deletions(-) diff --git a/crates/rope/src/chunk.rs b/crates/rope/src/chunk.rs index 7ada5c2052481408bc5af56740f8e35916623f14..95df79d64bb401edf6220ba573be854297226cfe 100644 --- a/crates/rope/src/chunk.rs +++ b/crates/rope/src/chunk.rs @@ -127,39 +127,15 @@ impl Chunk { #[track_caller] #[inline(always)] - pub fn assert_char_boundary(&self, offset: usize) { + pub fn assert_char_boundary(&self, offset: usize) -> bool { if self.is_char_boundary(offset) { - return; + return true; } - panic_char_boundary(self, offset); - - #[cold] - #[inline(never)] - #[track_caller] - fn panic_char_boundary(chunk: &Chunk, offset: usize) { - if offset > chunk.text.len() { - panic!( - "byte index {} is out of bounds of `{:?}` (length: {})", - offset, - chunk.text, - chunk.text.len() - ); - } - // find the character - let char_start = chunk.floor_char_boundary(offset); - // `char_start` must be less than len and a char boundary - let ch = chunk - .text - .get(char_start..) - .unwrap() - .chars() - .next() - .unwrap(); - let char_range = char_start..char_start + ch.len_utf8(); - panic!( - "byte index {} is not a char boundary; it is inside {:?} (bytes {:?})", - offset, ch, char_range, - ); + if PANIC { + panic_char_boundary(&self.text, offset); + } else { + log_err_char_boundary(&self.text, offset); + false } } } @@ -230,10 +206,7 @@ impl<'a> ChunkSlice<'a> { } #[inline(always)] - pub fn slice(self, range: Range) -> Self { - let mask = (1 as Bitmap) - .unbounded_shl(range.end as u32) - .wrapping_sub(1); + pub fn slice(self, mut range: Range) -> Self { if range.start == MAX_BASE { Self { chars: 0, @@ -243,8 +216,15 @@ impl<'a> ChunkSlice<'a> { text: "", } } else { - self.assert_char_boundary(range.start); - self.assert_char_boundary(range.end); + if !self.assert_char_boundary::(range.start) { + range.start = self.text.ceil_char_boundary(range.start); + } + if !self.assert_char_boundary::(range.end) { + range.end = self.text.floor_char_boundary(range.end); + } + let mask = (1 as Bitmap) + .unbounded_shl(range.end as u32) + .wrapping_sub(1); Self { chars: (self.chars & mask) >> range.start, chars_utf16: (self.chars_utf16 & mask) >> range.start, @@ -381,38 +361,15 @@ impl<'a> ChunkSlice<'a> { #[track_caller] #[inline(always)] - pub fn assert_char_boundary(&self, offset: usize) { + pub fn assert_char_boundary(&self, offset: usize) -> bool { if self.is_char_boundary(offset) { - return; + return true; } - panic_char_boundary(self, offset); - - #[cold] - #[inline(never)] - fn panic_char_boundary(chunk: &ChunkSlice, offset: usize) { - if offset > chunk.text.len() { - panic!( - "byte index {} is out of bounds of `{:?}` (length: {})", - offset, - chunk.text, - chunk.text.len() - ); - } - // find the character - let char_start = chunk.floor_char_boundary(offset); - // `char_start` must be less than len and a char boundary - let ch = chunk - .text - .get(char_start..) - .unwrap() - .chars() - .next() - .unwrap(); - let char_range = char_start..char_start + ch.len_utf8(); - panic!( - "byte index {} is not a char boundary; it is inside {:?} (bytes {:?})", - offset, ch, char_range, - ); + if PANIC { + panic_char_boundary(self.text, offset); + } else { + log_err_char_boundary(self.text, offset); + false } } @@ -696,6 +653,54 @@ fn nth_set_bit(v: u128, n: usize) -> usize { } } +#[cold] +#[inline(never)] +#[track_caller] +fn panic_char_boundary(text: &str, offset: usize) -> ! { + if offset > text.len() { + panic!( + "byte index {} is out of bounds of `{:?}` (length: {})", + offset, + text, + text.len() + ); + } + // find the character + let char_start = text.floor_char_boundary(offset); + // `char_start` must be less than len and a char boundary + let ch = text.get(char_start..).unwrap().chars().next().unwrap(); + let char_range = char_start..char_start + ch.len_utf8(); + panic!( + "byte index {} is not a char boundary; it is inside {:?} (bytes {:?})", + offset, ch, char_range, + ); +} + +#[cold] +#[inline(never)] +#[track_caller] +fn log_err_char_boundary(text: &str, offset: usize) { + if offset > text.len() { + log::error!( + "byte index {} is out of bounds of `{:?}` (length: {})", + offset, + text, + text.len() + ); + } + // find the character + let char_start = text.floor_char_boundary(offset); + // `char_start` must be less than len and a char boundary + let ch = text.get(char_start..).unwrap().chars().next().unwrap(); + let char_range = char_start..char_start + ch.len_utf8(); + log::error!( + "byte index {} is not a char boundary; it is inside {:?} (bytes {:?})", + offset, + ch, + char_range, + ); +} + #[inline(always)] fn nth_set_bit_u64(v: u64, mut n: u64) -> u64 { let v = v.reverse_bits(); diff --git a/crates/rope/src/rope.rs b/crates/rope/src/rope.rs index 32894fb84469287fb1474efc57d8180bdee13466..8379045be245cadaf79800f1d57ff418cdd24b40 100644 --- a/crates/rope/src/rope.rs +++ b/crates/rope/src/rope.rs @@ -58,7 +58,7 @@ impl Rope { match item { Some(chunk) => { let chunk_offset = offset - start; - chunk.assert_char_boundary(chunk_offset); + chunk.assert_char_boundary::(chunk_offset); } None => { panic!( @@ -716,7 +716,7 @@ impl<'a> Chunks<'a> { }; let chunk_offset = offset - chunks.start(); if let Some(chunk) = chunks.item() { - chunk.assert_char_boundary(chunk_offset); + chunk.assert_char_boundary::(chunk_offset); } Self { chunks, From b9af6645e3824a6bc94a5c2b051fb857c104bbe8 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 26 Nov 2025 13:01:01 +0100 Subject: [PATCH 0395/1030] gpui: Return `None` for non-existing credentials in `read_credentials` on windows (#43540) Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/gpui/src/platform/windows/platform.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index 006099c3828efb11b0981e81635fba0c452c8560..942cb62d2216c8d7cd5ea4cf75c4e4fa4a7d007f 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -642,15 +642,24 @@ impl Platform for WindowsPlatform { .collect_vec(); self.foreground_executor().spawn(async move { let mut credentials: *mut CREDENTIALW = std::ptr::null_mut(); - unsafe { + let result = unsafe { CredReadW( PCWSTR::from_raw(target_name.as_ptr()), CRED_TYPE_GENERIC, None, &mut credentials, - )? + ) }; + if let Err(err) = result { + // ERROR_NOT_FOUND means the credential doesn't exist. + // Return Ok(None) to match macOS and Linux behavior. + if err.code().0 == ERROR_NOT_FOUND.0 as i32 { + return Ok(None); + } + return Err(err.into()); + } + if credentials.is_null() { Ok(None) } else { From 1e6a05d0d87652c708d41e97f9d31af4a9cf6cd0 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 26 Nov 2025 13:17:57 +0100 Subject: [PATCH 0396/1030] askpass: Quote askpass script in askpass helper command (#43542) Closes #40276 Release Notes: - Fixed askpass execution failing on windows sometimes --- crates/askpass/src/askpass.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/crates/askpass/src/askpass.rs b/crates/askpass/src/askpass.rs index a9047a567fd3b6323fb6edc64be4854f4da0a958..8e911f1654572d2bcd9f906c82449e1524d0ce9d 100644 --- a/crates/askpass/src/askpass.rs +++ b/crates/askpass/src/askpass.rs @@ -249,11 +249,15 @@ impl PasswordProxy { fs::write(&askpass_script_path, askpass_script) .await .with_context(|| format!("creating askpass script at {askpass_script_path:?}"))?; - make_file_executable(&askpass_script_path).await?; + make_file_executable(&askpass_script_path) + .await + .with_context(|| { + format!("marking askpass script executable at {askpass_script_path:?}") + })?; // todo(shell): There might be no powershell on the system #[cfg(target_os = "windows")] let askpass_helper = format!( - "powershell.exe -ExecutionPolicy Bypass -File {}", + "powershell.exe -ExecutionPolicy Bypass -File '{}'", askpass_script_path.display() ); From 7c724c0f1049e610c541c2f4f6a8739f91865e02 Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Wed, 26 Nov 2025 14:11:47 +0100 Subject: [PATCH 0397/1030] editor: Do not show scroll thumb if page fits (#43548) Follow-up to https://github.com/zed-industries/zed/pull/39367 Release Notes: - Fixed a small issue where a scrollbar would sometimes show in the editor although the content fix exactly on screen. --- crates/editor/src/element.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index 4ea12f0a21295d97cdcff565c484750e14334223..c85528353fc23ac2da4cca3682e28a30cda37f9c 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -10712,9 +10712,9 @@ impl ScrollbarLayout { show_thumb: bool, axis: ScrollbarAxis, ) -> Self { - let text_units_per_page = f64::from(viewport_size / glyph_space); + let text_units_per_page = viewport_size.to_f64() / glyph_space.to_f64(); let visible_range = scroll_position..scroll_position + text_units_per_page; - let total_text_units = scroll_range / f64::from(glyph_space); + let total_text_units = scroll_range / glyph_space.to_f64(); let thumb_percentage = text_units_per_page / total_text_units; let thumb_size = Pixels::from(ScrollOffset::from(track_length) * thumb_percentage) From c36b12f3b241babfd84390e7dad6e54c945fbdd6 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Wed, 26 Nov 2025 15:32:25 +0100 Subject: [PATCH 0398/1030] settings_ui: Pick a more reasonable minimum window size (#43556) Closes https://github.com/zed-industries/zed/issues/41903 Release Notes: - Fixed settings ui being forced larger than small screens --- crates/settings_ui/src/settings_ui.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/settings_ui/src/settings_ui.rs b/crates/settings_ui/src/settings_ui.rs index 4f29945edb2e212e3638db60213dde082a41baf6..564e78dc57b8b27398d79f861b538a9cc9dbf21c 100644 --- a/crates/settings_ui/src/settings_ui.rs +++ b/crates/settings_ui/src/settings_ui.rs @@ -607,7 +607,10 @@ pub fn open_settings_editor( window_background: cx.theme().window_background_appearance(), app_id: Some(app_id.to_owned()), window_decorations: Some(window_decorations), - window_min_size: Some(scaled_bounds), + window_min_size: Some(gpui::Size { + width: px(360.0), + height: px(240.0), + }), window_bounds: Some(WindowBounds::centered(scaled_bounds, cx)), ..Default::default() }, From 51e97d343da40750c5bd43c09396457715b9fc59 Mon Sep 17 00:00:00 2001 From: Peter Tripp Date: Wed, 26 Nov 2025 09:55:23 -0500 Subject: [PATCH 0399/1030] languages: Recognize .clangd as YAML (#43557) Follow-up to: https://github.com/zed-industries/zed/pull/43469 Thanks @WeetHet for [the idea]([WeetHet](https://github.com/WeetHet)). Release Notes: - Added support for identifying. .clangd files as YAML by default --- crates/languages/src/yaml/config.toml | 2 +- docs/src/languages/c.md | 1 + docs/src/languages/cpp.md | 4 +++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/crates/languages/src/yaml/config.toml b/crates/languages/src/yaml/config.toml index 8834b3205af810c26fc9a8835f1c2afe7a185d8c..51e8e1224a40904e0dfbb0204eb531e6b2664825 100644 --- a/crates/languages/src/yaml/config.toml +++ b/crates/languages/src/yaml/config.toml @@ -1,6 +1,6 @@ name = "YAML" grammar = "yaml" -path_suffixes = ["yml", "yaml", "pixi.lock", "clang-format"] +path_suffixes = ["yml", "yaml", "pixi.lock", "clang-format", "clangd"] line_comments = ["# "] autoclose_before = ",]}" brackets = [ diff --git a/docs/src/languages/c.md b/docs/src/languages/c.md index 565b0b5acbef78a23722020dcbad9300748dbb16..2259ad21a4afa69390ef7ef15bfa4bb96cf44e1e 100644 --- a/docs/src/languages/c.md +++ b/docs/src/languages/c.md @@ -11,6 +11,7 @@ C support is available natively in Zed. Clangd out of the box assumes mixed C++/C projects. If you have a C-only project you may wish to instruct clangd to treat all files as C using the `-xc` flag. To do this, create a `.clangd` file in the root of your project with the following: ```yaml +# yaml-language-server: $schema=https://json.schemastore.org/clangd.json CompileFlags: Add: [-xc] ``` diff --git a/docs/src/languages/cpp.md b/docs/src/languages/cpp.md index 36cdc7a9580d2de41a6eb7063d694d54c7caffa4..c20dd58335caca45a6923cc0527605d6cc4b5564 100644 --- a/docs/src/languages/cpp.md +++ b/docs/src/languages/cpp.md @@ -78,6 +78,7 @@ You can pass any number of arguments to clangd. To see a full set of available o By default Zed will use the `clangd` language server for formatting C++ code. The Clangd is the same as the `clang-format` CLI tool. To configure this you can add a `.clang-format` file. For example: ```yaml +# yaml-language-server: $schema=https://json.schemastore.org/clang-format-21.x.json --- BasedOnStyle: LLVM IndentWidth: 4 @@ -106,7 +107,8 @@ You can trigger formatting via {#kb editor::Format} or the `editor: format` acti In the root of your project, it is generally common to create a `.clangd` file to set extra configuration. -```text +```yaml +# yaml-language-server: $schema=https://json.schemastore.org/clangd.json CompileFlags: Add: - "--include-directory=/path/to/include" From 6a311cad113e261fae455cfa23ab069ab49411a2 Mon Sep 17 00:00:00 2001 From: David Kleingeld Date: Wed, 26 Nov 2025 16:46:17 +0100 Subject: [PATCH 0400/1030] Detail how to add symbols to samply's output (#43472) Release Notes: - N/A --- docs/src/performance.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/src/performance.md b/docs/src/performance.md index 9dff1d7f5ff0961d33169ee5c8761016d8fb7564..a04d7c5c342d4f0dfa506451d4b890bfdfd1013c 100644 --- a/docs/src/performance.md +++ b/docs/src/performance.md @@ -4,7 +4,13 @@ How to use our internal tools to profile and keep Zed fast. See what the CPU spends the most time on. Strongly recommend you use [samply](https://github.com/mstange/samply). It opens an interactive profile in -the browser. See its README on how to install and run. +the browser (specifically a local instance of [firefox_profiler](https://profiler.firefox.com/)). + +See [samply](https://github.com/mstange/samply)'s README on how to install and run. + +The profile.json does not contain any symbols. Firefox profiler can add the local symbols to the profile for for. To do that hit the upload local profile button in the top right corner. + +image # Task/Async profiling From 8aa53612fd194c787c74447c03b9fdefc31fec98 Mon Sep 17 00:00:00 2001 From: Remco Smits Date: Wed, 26 Nov 2025 16:55:12 +0100 Subject: [PATCH 0401/1030] agent_ui: Add support for deleting thread history (#43370) This PR adds support for deleting your entire thread history. This is inspired by a Zed user from the meetup in Amsterdam, he was missing this feature. **Demo** https://github.com/user-attachments/assets/5a195007-1094-4ec6-902a-1b83db5ec508 Release Notes: - AI: Add support for deleting your entire thread history --------- Co-authored-by: Danilo Leal --- crates/agent/src/db.rs | 16 ++++ crates/agent/src/history_store.rs | 9 +++ crates/agent_ui/src/acp/thread_history.rs | 95 ++++++++++++++++++++++- crates/agent_ui/src/agent_panel.rs | 16 ++-- crates/agent_ui/src/agent_ui.rs | 2 + 5 files changed, 127 insertions(+), 11 deletions(-) diff --git a/crates/agent/src/db.rs b/crates/agent/src/db.rs index 6b6312e48176c93fbfb12f97e26c7943c6cbf89a..d5166c5df931b6f7fad63769449aaa9784b5263f 100644 --- a/crates/agent/src/db.rs +++ b/crates/agent/src/db.rs @@ -424,4 +424,20 @@ impl ThreadsDatabase { Ok(()) }) } + + pub fn delete_threads(&self) -> Task> { + let connection = self.connection.clone(); + + self.executor.spawn(async move { + let connection = connection.lock(); + + let mut delete = connection.exec_bound::<()>(indoc! {" + DELETE FROM threads + "})?; + + delete(())?; + + Ok(()) + }) + } } diff --git a/crates/agent/src/history_store.rs b/crates/agent/src/history_store.rs index 3bfbd99677feed5db53d96d2fa96316ac49abce4..efc0e3966d30fbc8bc7857c9da0404ce7dd4201f 100644 --- a/crates/agent/src/history_store.rs +++ b/crates/agent/src/history_store.rs @@ -188,6 +188,15 @@ impl HistoryStore { }) } + pub fn delete_threads(&mut self, cx: &mut Context) -> Task> { + let database_future = ThreadsDatabase::connect(cx); + cx.spawn(async move |this, cx| { + let database = database_future.await.map_err(|err| anyhow!(err))?; + database.delete_threads().await?; + this.update(cx, |this, cx| this.reload(cx)) + }) + } + pub fn delete_text_thread( &mut self, path: Arc, diff --git a/crates/agent_ui/src/acp/thread_history.rs b/crates/agent_ui/src/acp/thread_history.rs index 11718c63475212fbe8b996b2f6edae8b4295c91a..29759093303a684fdfd9ad255d269516ed7a29b9 100644 --- a/crates/agent_ui/src/acp/thread_history.rs +++ b/crates/agent_ui/src/acp/thread_history.rs @@ -1,5 +1,5 @@ use crate::acp::AcpThreadView; -use crate::{AgentPanel, RemoveSelectedThread}; +use crate::{AgentPanel, RemoveHistory, RemoveSelectedThread}; use agent::{HistoryEntry, HistoryStore}; use chrono::{Datelike as _, Local, NaiveDate, TimeDelta}; use editor::{Editor, EditorEvent}; @@ -12,7 +12,7 @@ use std::{fmt::Display, ops::Range}; use text::Bias; use time::{OffsetDateTime, UtcOffset}; use ui::{ - HighlightedLabel, IconButtonShape, ListItem, ListItemSpacing, Tooltip, WithScrollbar, + HighlightedLabel, IconButtonShape, ListItem, ListItemSpacing, Tab, Tooltip, WithScrollbar, prelude::*, }; @@ -25,6 +25,7 @@ pub struct AcpThreadHistory { search_query: SharedString, visible_items: Vec, local_timezone: UtcOffset, + confirming_delete_history: bool, _update_task: Task<()>, _subscriptions: Vec, } @@ -98,6 +99,7 @@ impl AcpThreadHistory { ) .unwrap(), search_query: SharedString::default(), + confirming_delete_history: false, _subscriptions: vec![search_editor_subscription, history_store_subscription], _update_task: Task::ready(()), }; @@ -331,6 +333,24 @@ impl AcpThreadHistory { task.detach_and_log_err(cx); } + fn remove_history(&mut self, _window: &mut Window, cx: &mut Context) { + self.history_store.update(cx, |store, cx| { + store.delete_threads(cx).detach_and_log_err(cx) + }); + self.confirming_delete_history = false; + cx.notify(); + } + + fn prompt_delete_history(&mut self, _window: &mut Window, cx: &mut Context) { + self.confirming_delete_history = true; + cx.notify(); + } + + fn cancel_delete_history(&mut self, _window: &mut Window, cx: &mut Context) { + self.confirming_delete_history = false; + cx.notify(); + } + fn render_list_items( &mut self, range: Range, @@ -447,6 +467,8 @@ impl Focusable for AcpThreadHistory { impl Render for AcpThreadHistory { fn render(&mut self, window: &mut Window, cx: &mut Context) -> impl IntoElement { + let has_no_history = self.history_store.read(cx).is_empty(cx); + v_flex() .key_context("ThreadHistory") .size_full() @@ -457,9 +479,12 @@ impl Render for AcpThreadHistory { .on_action(cx.listener(Self::select_last)) .on_action(cx.listener(Self::confirm)) .on_action(cx.listener(Self::remove_selected_thread)) + .on_action(cx.listener(|this, _: &RemoveHistory, window, cx| { + this.remove_history(window, cx); + })) .child( h_flex() - .h(px(41.)) // Match the toolbar perfectly + .h(Tab::container_height(cx)) .w_full() .py_1() .px_2() @@ -481,7 +506,7 @@ impl Render for AcpThreadHistory { .overflow_hidden() .flex_grow(); - if self.history_store.read(cx).is_empty(cx) { + if has_no_history { view.justify_center().items_center().child( Label::new("You don't have any past threads yet.") .size(LabelSize::Small) @@ -512,6 +537,68 @@ impl Render for AcpThreadHistory { ) } }) + .when(!has_no_history, |this| { + this.child( + h_flex() + .p_2() + .border_t_1() + .border_color(cx.theme().colors().border_variant) + .when(!self.confirming_delete_history, |this| { + this.child( + Button::new("delete_history", "Delete All History") + .full_width() + .style(ButtonStyle::Outlined) + .label_size(LabelSize::Small) + .on_click(cx.listener(|this, _, window, cx| { + this.prompt_delete_history(window, cx); + })), + ) + }) + .when(self.confirming_delete_history, |this| { + this.w_full() + .gap_2() + .flex_wrap() + .justify_between() + .child( + h_flex() + .flex_wrap() + .gap_1() + .child( + Label::new("Delete all threads?") + .size(LabelSize::Small), + ) + .child( + Label::new("You won't be able to recover them later.") + .size(LabelSize::Small) + .color(Color::Muted), + ), + ) + .child( + h_flex() + .gap_1() + .child( + Button::new("cancel_delete", "Cancel") + .label_size(LabelSize::Small) + .on_click(cx.listener(|this, _, window, cx| { + this.cancel_delete_history(window, cx); + })), + ) + .child( + Button::new("confirm_delete", "Delete") + .style(ButtonStyle::Tinted(ui::TintColor::Error)) + .color(Color::Error) + .label_size(LabelSize::Small) + .on_click(cx.listener(|_, _, window, cx| { + window.dispatch_action( + Box::new(RemoveHistory), + cx, + ); + })), + ), + ) + }), + ) + }) } } diff --git a/crates/agent_ui/src/agent_panel.rs b/crates/agent_ui/src/agent_panel.rs index 22eb11e24a8fd706c80aa65c3dcf5d8ae3876ddc..aa152018b180047815cc461d80e48dba0996b3cd 100644 --- a/crates/agent_ui/src/agent_panel.rs +++ b/crates/agent_ui/src/agent_panel.rs @@ -20,10 +20,9 @@ use zed_actions::agent::{OpenClaudeCodeOnboardingModal, ReauthenticateAgent}; use crate::ManageProfiles; use crate::ui::{AcpOnboardingModal, ClaudeCodeOnboardingModal}; use crate::{ - AddContextServer, AgentDiffPane, DeleteRecentlyOpenThread, Follow, InlineAssistant, - NewTextThread, NewThread, OpenActiveThreadAsMarkdown, OpenAgentDiff, OpenHistory, - ResetTrialEndUpsell, ResetTrialUpsell, ToggleNavigationMenu, ToggleNewThreadMenu, - ToggleOptionsMenu, + AddContextServer, AgentDiffPane, Follow, InlineAssistant, NewTextThread, NewThread, + OpenActiveThreadAsMarkdown, OpenAgentDiff, OpenHistory, ResetTrialEndUpsell, ResetTrialUpsell, + ToggleNavigationMenu, ToggleNewThreadMenu, ToggleOptionsMenu, acp::AcpThreadView, agent_configuration::{AgentConfiguration, AssistantConfigurationEvent}, slash_command::SlashCommandCompletionProvider, @@ -614,11 +613,14 @@ impl AgentPanel { if let Some(panel) = panel.upgrade() { menu = Self::populate_recently_opened_menu_section(menu, panel, cx); } - menu.action("View All", Box::new(OpenHistory)) - .end_slot_action(DeleteRecentlyOpenThread.boxed_clone()) + + menu = menu + .action("View All", Box::new(OpenHistory)) .fixed_width(px(320.).into()) .keep_open_on_confirm(false) - .key_context("NavigationMenu") + .key_context("NavigationMenu"); + + menu }); weak_panel .update(cx, |panel, cx| { diff --git a/crates/agent_ui/src/agent_ui.rs b/crates/agent_ui/src/agent_ui.rs index ae4cb70d4af419184519afb53ab62849b8a0eab8..5f5682b7dcc90d2b779744ba353380987a5907a1 100644 --- a/crates/agent_ui/src/agent_ui.rs +++ b/crates/agent_ui/src/agent_ui.rs @@ -69,6 +69,8 @@ actions!( CycleModeSelector, /// Expands the message editor to full size. ExpandMessageEditor, + /// Removes all thread history. + RemoveHistory, /// Opens the conversation history view. OpenHistory, /// Adds a context server to the configuration. From 6fbbc899049f8116685b731a3778830d709044eb Mon Sep 17 00:00:00 2001 From: "Joseph T. Lyons" Date: Wed, 26 Nov 2025 10:59:13 -0500 Subject: [PATCH 0402/1030] Bump Zed to v0.216 (#43564) Release Notes: - N/A --- Cargo.lock | 2 +- crates/zed/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c19e1b79bc859e77c99b992f4f5617894da82df..f10c2e1d13210d67d16d584637c0fb7b71d61eec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -21192,7 +21192,7 @@ dependencies = [ [[package]] name = "zed" -version = "0.215.0" +version = "0.216.0" dependencies = [ "acp_tools", "activity_indicator", diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 470f1ea28a3663838080b7e7bf98f58215a0a8fc..9e6a6a0fbd10a7695270f2651418d9e2cdc31b4c 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -2,7 +2,7 @@ description = "The fast, collaborative code editor." edition.workspace = true name = "zed" -version = "0.215.0" +version = "0.216.0" publish.workspace = true license = "GPL-3.0-or-later" authors = ["Zed Team "] From 0713ddcabc1f691cf89be7d564e4a2afdfeeac17 Mon Sep 17 00:00:00 2001 From: Mayank Verma Date: Wed, 26 Nov 2025 21:36:02 +0530 Subject: [PATCH 0403/1030] editor: Fix vertical scroll margin not accounting for file header height (#43521) Closes #43178 Release Notes: - Fixed vertical scroll margin not accounting for file header height Here's the before/after: With `{ "vertical_scroll_margin": 0 }` in `~/.config/zed/settings.json` https://github.com/user-attachments/assets/418c6d7f-de0f-4da6-a038-69927b1b8b88 --- crates/editor/src/editor_tests.rs | 84 +++++++++++++++++++++++++++++ crates/editor/src/scroll/actions.rs | 10 +++- 2 files changed, 92 insertions(+), 2 deletions(-) diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index f68b15b6b258a5ab730a13af9d7ecc62763321ea..09c9083f29a57addbdd5ca01b162f4abc023d0d7 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -28198,3 +28198,87 @@ async fn test_multibuffer_selections_with_folding(cx: &mut TestAppContext) { 3 "}); } + +#[gpui::test] +async fn test_multibuffer_scroll_cursor_top_margin(cx: &mut TestAppContext) { + init_test(cx, |_| {}); + + let (editor, cx) = cx.add_window_view(|window, cx| { + let multi_buffer = MultiBuffer::build_multi( + [ + ("1\n2\n3\n", vec![Point::row_range(0..3)]), + ("1\n2\n3\n4\n5\n6\n7\n8\n9\n", vec![Point::row_range(0..9)]), + ], + cx, + ); + Editor::new(EditorMode::full(), multi_buffer, None, window, cx) + }); + + let mut cx = EditorTestContext::for_editor_in(editor.clone(), cx).await; + + cx.assert_excerpts_with_selections(indoc! {" + [EXCERPT] + ˇ1 + 2 + 3 + [EXCERPT] + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + "}); + + cx.update_editor(|editor, window, cx| { + editor.change_selections(None.into(), window, cx, |s| { + s.select_ranges([MultiBufferOffset(19)..MultiBufferOffset(19)]); + }); + }); + + cx.assert_excerpts_with_selections(indoc! {" + [EXCERPT] + 1 + 2 + 3 + [EXCERPT] + 1 + 2 + 3 + 4 + 5 + 6 + ˇ7 + 8 + 9 + "}); + + cx.update_editor(|editor, _window, cx| { + editor.set_vertical_scroll_margin(0, cx); + }); + + cx.update_editor(|editor, window, cx| { + assert_eq!(editor.vertical_scroll_margin(), 0); + editor.scroll_cursor_top(&ScrollCursorTop, window, cx); + assert_eq!( + editor.snapshot(window, cx).scroll_position(), + gpui::Point::new(0., 12.0) + ); + }); + + cx.update_editor(|editor, _window, cx| { + editor.set_vertical_scroll_margin(3, cx); + }); + + cx.update_editor(|editor, window, cx| { + assert_eq!(editor.vertical_scroll_margin(), 3); + editor.scroll_cursor_top(&ScrollCursorTop, window, cx); + assert_eq!( + editor.snapshot(window, cx).scroll_position(), + gpui::Point::new(0., 9.0) + ); + }); +} diff --git a/crates/editor/src/scroll/actions.rs b/crates/editor/src/scroll/actions.rs index 3b2ed55df724485ee72e6afbc02c7111817869fb..5a1c849b2438fe987b24481b824375e188468916 100644 --- a/crates/editor/src/scroll/actions.rs +++ b/crates/editor/src/scroll/actions.rs @@ -71,14 +71,20 @@ impl Editor { window: &mut Window, cx: &mut Context, ) { + let display_snapshot = self.display_snapshot(cx); let scroll_margin_rows = self.vertical_scroll_margin() as u32; let new_screen_top = self .selections - .newest_display(&self.display_snapshot(cx)) + .newest_display(&display_snapshot) .head() .row() .0; - let new_screen_top = new_screen_top.saturating_sub(scroll_margin_rows); + let header_offset = display_snapshot + .buffer_snapshot() + .show_headers() + .then(|| display_snapshot.buffer_header_height()) + .unwrap_or(0); + let new_screen_top = new_screen_top.saturating_sub(scroll_margin_rows + header_offset); self.set_scroll_top_row(DisplayRow(new_screen_top), window, cx); } From 5403e74bbd9688301ba44a953115a890196c27f6 Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Wed, 26 Nov 2025 17:51:50 +0100 Subject: [PATCH 0404/1030] Add callable workflow to bump the version of an extension (#43566) This adds an intial workflow file that can be pulled in to create a bump commit for an extension version in an extension repository. Release Notes: - N/A --- .github/workflows/extension_bump.yml | 136 +++++++++++ tooling/xtask/src/tasks/workflows.rs | 2 + .../xtask/src/tasks/workflows/cherry_pick.rs | 22 +- .../xtask/src/tasks/workflows/compare_perf.rs | 18 +- .../src/tasks/workflows/extension_bump.rs | 217 ++++++++++++++++++ .../src/tasks/workflows/extension_tests.rs | 4 +- .../src/tasks/workflows/run_agent_evals.rs | 10 +- tooling/xtask/src/tasks/workflows/steps.rs | 12 +- tooling/xtask/src/tasks/workflows/vars.rs | 97 +++++++- 9 files changed, 485 insertions(+), 33 deletions(-) create mode 100644 .github/workflows/extension_bump.yml create mode 100644 tooling/xtask/src/tasks/workflows/extension_bump.rs diff --git a/.github/workflows/extension_bump.yml b/.github/workflows/extension_bump.yml new file mode 100644 index 0000000000000000000000000000000000000000..5933ab7fbb2fab753cbda729c82026102e395539 --- /dev/null +++ b/.github/workflows/extension_bump.yml @@ -0,0 +1,136 @@ +# Generated from xtask::workflows::extension_bump +# Rebuild with `cargo xtask workflows`. +name: extension_bump +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: '1' + CARGO_INCREMENTAL: '0' + ZED_EXTENSION_CLI_SHA: 7cfce605704d41ca247e3f84804bf323f6c6caaf +on: + workflow_call: + inputs: + bump-type: + description: bump-type + type: string + default: patch + secrets: + app-id: + description: The app ID used to create the PR + required: true + app-secret: + description: The app secret for the corresponding app ID + required: true +jobs: + check_extension: + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') + runs-on: namespace-profile-2x4-ubuntu-2404 + steps: + - name: steps::checkout_repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + clean: false + - id: cache-zed-extension-cli + name: extension_tests::cache_zed_extension_cli + uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830 + with: + path: zed-extension + key: zed-extension-${{ env.ZED_EXTENSION_CLI_SHA }} + - name: extension_tests::download_zed_extension_cli + if: steps.cache-zed-extension-cli.outputs.cache-hit != 'true' + run: | + wget --quiet "https://zed-extension-cli.nyc3.digitaloceanspaces.com/$ZED_EXTENSION_CLI_SHA/x86_64-unknown-linux-gnu/zed-extension" + chmod +x zed-extension + shell: bash -euxo pipefail {0} + - name: extension_tests::check + run: | + mkdir -p /tmp/ext-scratch + mkdir -p /tmp/ext-output + ./zed-extension --source-dir . --scratch-dir /tmp/ext-scratch --output-dir /tmp/ext-output + shell: bash -euxo pipefail {0} + timeout-minutes: 1 + check_bump_needed: + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') + runs-on: namespace-profile-2x4-ubuntu-2404 + steps: + - name: steps::checkout_repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + clean: false + fetch-depth: 10 + - id: compare-versions-check + name: extension_bump::compare_versions + run: |+ + CURRENT_VERSION="$(sed -n 's/version = \"\(.*\)\"/\1/p' < extension.toml)" + + git checkout "$(git log -1 --format=%H)"~1 + + PREV_COMMIT_VERSION="$(sed -n 's/version = \"\(.*\)\"/\1/p' < extension.toml)" + + [[ "$CURRENT_VERSION" == "$PREV_COMMIT_VERSION" ]] && \ + echo "needs_bump=true" >> "$GITHUB_OUTPUT" || \ + echo "needs_bump=false" >> "$GITHUB_OUTPUT" + + shell: bash -euxo pipefail {0} + outputs: + needs_bump: ${{ steps.compare-versions-check.outputs.needs_bump }} + timeout-minutes: 1 + bump_extension_version: + needs: + - check_extension + - check_bump_needed + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') && needs.check_bump_needed.outputs.needs_bump == 'true' + runs-on: namespace-profile-8x16-ubuntu-2204 + steps: + - id: generate-token + name: extension_bump::generate_token + uses: actions/create-github-app-token@v2 + with: + app-id: ${{ secrets.app-id }} + private-key: ${{ secrets.app-secret }} + - name: steps::checkout_repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + clean: false + - name: extension_bump::install_bump_2_version + run: pip install bump2version + shell: bash -euxo pipefail {0} + - id: bump-version + name: extension_bump::bump_version + run: | + OLD_VERSION="$(sed -n 's/version = \"\(.*\)\"/\1/p' < extension.toml)" + + cat < .bumpversion.cfg + [bumpversion] + current_version = "$OLD_VERSION" + + [bumpversion:file:Cargo.toml] + + [bumpversion:file:extension.toml] + + EOF + + bump2version --verbose ${{ inputs.bump-type }} + NEW_VERSION="$(sed -n 's/version = \"\(.*\)\"/\1/p' < extension.toml)" + cargo update --workspace + + rm .bumpversion.cfg + + echo "old_version=${OLD_VERSION}" >> "$GITHUB_OUTPUT" + echo "new_version=${NEW_VERSION}" >> "$GITHUB_OUTPUT" + shell: bash -euxo pipefail {0} + - name: extension_bump::create_pull_request + uses: peter-evans/create-pull-request@v7 + with: + title: Bump version to ${{ steps.bump-version.outputs.new_version }} + body: This PR bumps the version of this extension to v${{ steps.bump-version.outputs.new_version }} + commit-message: Bump version to v${{ steps.bump-version.outputs.new_version }} + branch: bump-from-${{ steps.bump-version.outputs.old_version }} + committer: zed-zippy[bot] <234243425+zed-zippy[bot]@users.noreply.github.com> + base: main + delete-branch: true + token: ${{ steps.generate-token.outputs.token }} + sign-commits: true + timeout-minutes: 1 +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }} + cancel-in-progress: true diff --git a/tooling/xtask/src/tasks/workflows.rs b/tooling/xtask/src/tasks/workflows.rs index c18eca52be8cf7fa369f46427c58b1d6b70e8bd0..334bc71e46fc5d7d99a9fab238dc60b874a1093d 100644 --- a/tooling/xtask/src/tasks/workflows.rs +++ b/tooling/xtask/src/tasks/workflows.rs @@ -7,6 +7,7 @@ mod after_release; mod cherry_pick; mod compare_perf; mod danger; +mod extension_bump; mod extension_tests; mod nix_build; mod release_nightly; @@ -44,6 +45,7 @@ pub fn run_workflows(_: GenerateWorkflowArgs) -> Result<()> { ("run_agent_evals.yml", run_agent_evals::run_agent_evals()), ("after_release.yml", after_release::after_release()), ("extension_tests.yml", extension_tests::extension_tests()), + ("extension_bump.yml", extension_bump::extension_bump()), ]; fs::create_dir_all(dir) .with_context(|| format!("Failed to create directory: {}", dir.display()))?; diff --git a/tooling/xtask/src/tasks/workflows/cherry_pick.rs b/tooling/xtask/src/tasks/workflows/cherry_pick.rs index 6181d79e042365b87ce2d6ad00a75580c71344a2..105bf74c4194a46ad4ca62991fae3a945eea150d 100644 --- a/tooling/xtask/src/tasks/workflows/cherry_pick.rs +++ b/tooling/xtask/src/tasks/workflows/cherry_pick.rs @@ -3,14 +3,14 @@ use gh_workflow::*; use crate::tasks::workflows::{ runners, steps::{self, NamedJob, named}, - vars::{self, Input, StepOutput}, + vars::{self, StepOutput, WorkflowInput}, }; pub fn cherry_pick() -> Workflow { - let branch = Input::string("branch", None); - let commit = Input::string("commit", None); - let channel = Input::string("channel", None); - let pr_number = Input::string("pr_number", None); + let branch = WorkflowInput::string("branch", None); + let commit = WorkflowInput::string("commit", None); + let channel = WorkflowInput::string("channel", None); + let pr_number = WorkflowInput::string("pr_number", None); let cherry_pick = run_cherry_pick(&branch, &commit, &channel); named::workflow() .run_name(format!("cherry_pick to {channel} #{pr_number}")) @@ -24,7 +24,11 @@ pub fn cherry_pick() -> Workflow { .add_job(cherry_pick.name, cherry_pick.job) } -fn run_cherry_pick(branch: &Input, commit: &Input, channel: &Input) -> NamedJob { +fn run_cherry_pick( + branch: &WorkflowInput, + commit: &WorkflowInput, + channel: &WorkflowInput, +) -> NamedJob { fn authenticate_as_zippy() -> (Step, StepOutput) { let step = named::uses( "actions", @@ -39,9 +43,9 @@ fn run_cherry_pick(branch: &Input, commit: &Input, channel: &Input) -> NamedJob } fn cherry_pick( - branch: &Input, - commit: &Input, - channel: &Input, + branch: &WorkflowInput, + commit: &WorkflowInput, + channel: &WorkflowInput, token: &StepOutput, ) -> Step { named::bash(&format!("./script/cherry-pick {branch} {commit} {channel}")) diff --git a/tooling/xtask/src/tasks/workflows/compare_perf.rs b/tooling/xtask/src/tasks/workflows/compare_perf.rs index db9f21de15fe159c369ad603e3ab0ff93e1cc7f9..1d111acc4f8a4dc47edea6f45c0b93c845b7cda2 100644 --- a/tooling/xtask/src/tasks/workflows/compare_perf.rs +++ b/tooling/xtask/src/tasks/workflows/compare_perf.rs @@ -5,13 +5,13 @@ use crate::tasks::workflows::steps::FluentBuilder; use crate::tasks::workflows::{ runners, steps::{self, NamedJob, named}, - vars::Input, + vars::WorkflowInput, }; pub fn compare_perf() -> Workflow { - let head = Input::string("head", None); - let base = Input::string("base", None); - let crate_name = Input::string("crate_name", Some("".to_owned())); + let head = WorkflowInput::string("head", None); + let base = WorkflowInput::string("base", None); + let crate_name = WorkflowInput::string("crate_name", Some("".to_owned())); let run_perf = run_perf(&base, &head, &crate_name); named::workflow() .on(Event::default().workflow_dispatch( @@ -23,8 +23,12 @@ pub fn compare_perf() -> Workflow { .add_job(run_perf.name, run_perf.job) } -pub fn run_perf(base: &Input, head: &Input, crate_name: &Input) -> NamedJob { - fn cargo_perf_test(ref_name: &Input, crate_name: &Input) -> Step { +pub fn run_perf( + base: &WorkflowInput, + head: &WorkflowInput, + crate_name: &WorkflowInput, +) -> NamedJob { + fn cargo_perf_test(ref_name: &WorkflowInput, crate_name: &WorkflowInput) -> Step { named::bash(&format!( " if [ -n \"{crate_name}\" ]; then @@ -39,7 +43,7 @@ pub fn run_perf(base: &Input, head: &Input, crate_name: &Input) -> NamedJob { named::uses("taiki-e", "install-action", "hyperfine") } - fn compare_runs(head: &Input, base: &Input) -> Step { + fn compare_runs(head: &WorkflowInput, base: &WorkflowInput) -> Step { named::bash(&format!( "cargo perf-compare --save=results.md {base} {head}" )) diff --git a/tooling/xtask/src/tasks/workflows/extension_bump.rs b/tooling/xtask/src/tasks/workflows/extension_bump.rs new file mode 100644 index 0000000000000000000000000000000000000000..66de1f86aa998269abc24f1de375dbe1800acc31 --- /dev/null +++ b/tooling/xtask/src/tasks/workflows/extension_bump.rs @@ -0,0 +1,217 @@ +use gh_workflow::*; +use indoc::indoc; + +use crate::tasks::workflows::{ + extension_tests::{self}, + runners, + steps::{self, CommonJobConditions, DEFAULT_REPOSITORY_OWNER_GUARD, NamedJob, named}, + vars::{ + JobOutput, StepOutput, WorkflowInput, WorkflowSecret, one_workflow_per_non_main_branch, + }, +}; + +const BUMPVERSION_CONFIG: &str = indoc! {r#" + [bumpversion] + current_version = "$OLD_VERSION" + + [bumpversion:file:Cargo.toml] + + [bumpversion:file:extension.toml] + "# +}; + +const VERSION_CHECK: &str = r#"sed -n 's/version = \"\(.*\)\"/\1/p' < extension.toml"#; + +// This is used by various extensions repos in the zed-extensions org to bump extension versions. +pub(crate) fn extension_bump() -> Workflow { + let bump_type = WorkflowInput::string("bump-type", Some("patch".to_owned())); + + let app_id = WorkflowSecret::new("app-id", "The app ID used to create the PR"); + let app_secret = + WorkflowSecret::new("app-secret", "The app secret for the corresponding app ID"); + + let test_extension = extension_tests::check_extension(); + let (check_bump_needed, needs_bump) = check_bump_needed(); + let bump_version = bump_extension_version( + &[&test_extension, &check_bump_needed], + &bump_type, + needs_bump.as_job_output(&check_bump_needed), + &app_id, + &app_secret, + ); + + named::workflow() + .add_event( + Event::default().workflow_call( + WorkflowCall::default() + .add_input(bump_type.name, bump_type.call_input()) + .secrets([ + (app_id.name.to_owned(), app_id.secret_configuration()), + ( + app_secret.name.to_owned(), + app_secret.secret_configuration(), + ), + ]), + ), + ) + .concurrency(one_workflow_per_non_main_branch()) + .add_env(("CARGO_TERM_COLOR", "always")) + .add_env(("RUST_BACKTRACE", 1)) + .add_env(("CARGO_INCREMENTAL", 0)) + .add_env(( + "ZED_EXTENSION_CLI_SHA", + extension_tests::ZED_EXTENSION_CLI_SHA, + )) + .add_job(test_extension.name, test_extension.job) + .add_job(check_bump_needed.name, check_bump_needed.job) + .add_job(bump_version.name, bump_version.job) +} + +fn check_bump_needed() -> (NamedJob, StepOutput) { + let (compare_versions, version_changed) = compare_versions(); + + let job = Job::default() + .with_repository_owner_guard() + .outputs([(version_changed.name.to_owned(), version_changed.to_string())]) + .runs_on(runners::LINUX_SMALL) + .timeout_minutes(1u32) + .add_step(steps::checkout_repo().add_with(("fetch-depth", 10))) + .add_step(compare_versions); + + (named::job(job), version_changed) +} + +/// Compares the current and previous commit and checks whether versions changed inbetween. +fn compare_versions() -> (Step, StepOutput) { + let check_needs_bump = named::bash(format!( + indoc! { + r#" + CURRENT_VERSION="$({})" + + git checkout "$(git log -1 --format=%H)"~1 + + PREV_COMMIT_VERSION="$({})" + + [[ "$CURRENT_VERSION" == "$PREV_COMMIT_VERSION" ]] && \ + echo "needs_bump=true" >> "$GITHUB_OUTPUT" || \ + echo "needs_bump=false" >> "$GITHUB_OUTPUT" + + "# + }, + VERSION_CHECK, VERSION_CHECK + )) + .id("compare-versions-check"); + + let needs_bump = StepOutput::new(&check_needs_bump, "needs_bump"); + + (check_needs_bump, needs_bump) +} + +fn bump_extension_version( + dependencies: &[&NamedJob], + bump_type: &WorkflowInput, + needs_bump: JobOutput, + app_id: &WorkflowSecret, + app_secret: &WorkflowSecret, +) -> NamedJob { + let (generate_token, generated_token) = generate_token(app_id, app_secret); + let (bump_version, old_version, new_version) = bump_version(bump_type); + + let job = steps::dependant_job(dependencies) + .cond(Expression::new(format!( + "{DEFAULT_REPOSITORY_OWNER_GUARD} && {} == 'true'", + needs_bump.expr(), + ))) + .runs_on(runners::LINUX_LARGE) + .timeout_minutes(1u32) + .add_step(generate_token) + .add_step(steps::checkout_repo()) + .add_step(install_bump_2_version()) + .add_step(bump_version) + .add_step(create_pull_request( + old_version, + new_version, + generated_token, + )); + + named::job(job) +} + +fn generate_token(app_id: &WorkflowSecret, app_secret: &WorkflowSecret) -> (Step, StepOutput) { + let step = named::uses("actions", "create-github-app-token", "v2") + .id("generate-token") + .add_with( + Input::default() + .add("app-id", app_id.to_string()) + .add("private-key", app_secret.to_string()), + ); + + let generated_token = StepOutput::new(&step, "token"); + + (step, generated_token) +} + +fn install_bump_2_version() -> Step { + named::run(runners::Platform::Linux, "pip install bump2version") +} + +fn bump_version(bump_type: &WorkflowInput) -> (Step, StepOutput, StepOutput) { + let step = named::bash(format!( + indoc! {r#" + OLD_VERSION="$({})" + + cat < .bumpversion.cfg + {} + EOF + + bump2version --verbose {} + NEW_VERSION="$({})" + cargo update --workspace + + rm .bumpversion.cfg + + echo "old_version=${{OLD_VERSION}}" >> "$GITHUB_OUTPUT" + echo "new_version=${{NEW_VERSION}}" >> "$GITHUB_OUTPUT" + "# + }, + VERSION_CHECK, BUMPVERSION_CONFIG, bump_type, VERSION_CHECK + )) + .id("bump-version"); + + let old_version = StepOutput::new(&step, "old_version"); + let new_version = StepOutput::new(&step, "new_version"); + (step, old_version, new_version) +} + +fn create_pull_request( + old_version: StepOutput, + new_version: StepOutput, + generated_token: StepOutput, +) -> Step { + let formatted_version = format!("v{}", new_version); + + named::uses("peter-evans", "create-pull-request", "v7").with( + Input::default() + .add("title", format!("Bump version to {}", new_version)) + .add( + "body", + format!( + "This PR bumps the version of this extension to {}", + formatted_version + ), + ) + .add( + "commit-message", + format!("Bump version to {}", formatted_version), + ) + .add("branch", format!("bump-from-{}", old_version)) + .add( + "committer", + "zed-zippy[bot] <234243425+zed-zippy[bot]@users.noreply.github.com>", + ) + .add("base", "main") + .add("delete-branch", true) + .add("token", generated_token.to_string()) + .add("sign-commits", true), + ) +} diff --git a/tooling/xtask/src/tasks/workflows/extension_tests.rs b/tooling/xtask/src/tasks/workflows/extension_tests.rs index 4ee094fd37608c2427037effac3a6afa182014ba..8ea1435292372e33d5f98d1b3a5d5db0582a6a46 100644 --- a/tooling/xtask/src/tasks/workflows/extension_tests.rs +++ b/tooling/xtask/src/tasks/workflows/extension_tests.rs @@ -9,7 +9,7 @@ use crate::tasks::workflows::{ }; const RUN_TESTS_INPUT: &str = "run_tests"; -const ZED_EXTENSION_CLI_SHA: &str = "7cfce605704d41ca247e3f84804bf323f6c6caaf"; +pub(crate) const ZED_EXTENSION_CLI_SHA: &str = "7cfce605704d41ca247e3f84804bf323f6c6caaf"; // This is used by various extensions repos in the zed-extensions org to run automated tests. pub(crate) fn extension_tests() -> Workflow { @@ -77,7 +77,7 @@ fn check_rust() -> NamedJob { named::job(job) } -fn check_extension() -> NamedJob { +pub(crate) fn check_extension() -> NamedJob { let (cache_download, cache_hit) = cache_zed_extension_cli(); let job = Job::default() .with_repository_owner_guard() diff --git a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs index ec9b70a2db9049b62676b43d614818374e0930a1..220d3872f72326f42845622b5e3c61f4819f4550 100644 --- a/tooling/xtask/src/tasks/workflows/run_agent_evals.rs +++ b/tooling/xtask/src/tasks/workflows/run_agent_evals.rs @@ -3,12 +3,12 @@ use gh_workflow::{Event, Expression, Job, Run, Schedule, Step, Use, Workflow, Wo use crate::tasks::workflows::{ runners::{self, Platform}, steps::{self, FluentBuilder as _, NamedJob, named, setup_cargo_config}, - vars::{self, Input}, + vars::{self, WorkflowInput}, }; pub(crate) fn run_agent_evals() -> Workflow { let agent_evals = agent_evals(); - let model_name = Input::string("model_name", None); + let model_name = WorkflowInput::string("model_name", None); named::workflow() .on(Event::default().workflow_dispatch( @@ -29,8 +29,8 @@ pub(crate) fn run_agent_evals() -> Workflow { } pub(crate) fn run_unit_evals() -> Workflow { - let model_name = Input::string("model_name", None); - let commit_sha = Input::string("commit_sha", None); + let model_name = WorkflowInput::string("model_name", None); + let commit_sha = WorkflowInput::string("commit_sha", None); let unit_evals = named::job(unit_evals(Some(&commit_sha))); @@ -117,7 +117,7 @@ fn cron_unit_evals() -> NamedJob { named::job(unit_evals(None).add_step(send_failure_to_slack())) } -fn unit_evals(commit: Option<&Input>) -> Job { +fn unit_evals(commit: Option<&WorkflowInput>) -> Job { let script_step = add_api_keys(steps::script("./script/run-unit-evals")); Job::default() diff --git a/tooling/xtask/src/tasks/workflows/steps.rs b/tooling/xtask/src/tasks/workflows/steps.rs index 910b344cb7319e4f58911b3025632e560553716a..e20dafe18a660a0067708cc1e9d15d59572e5f53 100644 --- a/tooling/xtask/src/tasks/workflows/steps.rs +++ b/tooling/xtask/src/tasks/workflows/steps.rs @@ -142,9 +142,13 @@ pub struct NamedJob { // } // } +pub(crate) const DEFAULT_REPOSITORY_OWNER_GUARD: &str = + "(github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions')"; + pub fn repository_owner_guard_expression(trigger_always: bool) -> Expression { Expression::new(format!( - "(github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions'){}", + "{}{}", + DEFAULT_REPOSITORY_OWNER_GUARD, trigger_always.then_some(" && always()").unwrap_or_default() )) } @@ -248,8 +252,10 @@ pub mod named { /// Returns a bash-script step with the same name as the enclosing function. /// (You shouldn't inline this function into the workflow definition, you must /// wrap it in a new function.) - pub fn bash(script: &str) -> Step { - Step::new(function_name(1)).run(script).shell(BASH_SHELL) + pub fn bash(script: impl AsRef) -> Step { + Step::new(function_name(1)) + .run(script.as_ref()) + .shell(BASH_SHELL) } /// Returns a pwsh-script step with the same name as the enclosing function. diff --git a/tooling/xtask/src/tasks/workflows/vars.rs b/tooling/xtask/src/tasks/workflows/vars.rs index bbb98af757dd9b794ae7c57d6ddb4f1d3d10019d..8dae64a1ea10ca891d23cabb989f5073ddd1755d 100644 --- a/tooling/xtask/src/tasks/workflows/vars.rs +++ b/tooling/xtask/src/tasks/workflows/vars.rs @@ -1,6 +1,9 @@ use std::cell::RefCell; -use gh_workflow::{Concurrency, Env, Expression, Step, WorkflowDispatchInput}; +use gh_workflow::{ + Concurrency, Env, Expression, Step, WorkflowCallInput, WorkflowCallSecret, + WorkflowDispatchInput, +}; use crate::tasks::workflows::{runners::Platform, steps::NamedJob}; @@ -132,7 +135,7 @@ impl PathCondition { } pub(crate) struct StepOutput { - name: &'static str, + pub name: &'static str, step_id: String, } @@ -151,6 +154,13 @@ impl StepOutput { pub fn expr(&self) -> String { format!("steps.{}.outputs.{}", self.step_id, self.name) } + + pub fn as_job_output(self, job: &NamedJob) -> JobOutput { + JobOutput { + job_name: job.name.clone(), + name: self.name, + } + } } impl serde::Serialize for StepOutput { @@ -164,17 +174,43 @@ impl serde::Serialize for StepOutput { impl std::fmt::Display for StepOutput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "${{{{ steps.{}.outputs.{} }}}}", self.step_id, self.name) + write!(f, "${{{{ {} }}}}", self.expr()) + } +} + +pub(crate) struct JobOutput { + job_name: String, + name: &'static str, +} + +impl JobOutput { + pub fn expr(&self) -> String { + format!("needs.{}.outputs.{}", self.job_name, self.name) + } +} + +impl serde::Serialize for JobOutput { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +impl std::fmt::Display for JobOutput { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "${{{{ {} }}}}", self.expr()) } } -pub struct Input { +pub struct WorkflowInput { pub input_type: &'static str, pub name: &'static str, pub default: Option, } -impl Input { +impl WorkflowInput { pub fn string(name: &'static str, default: Option) -> Self { Self { input_type: "string", @@ -191,15 +227,62 @@ impl Input { default: self.default.clone(), } } + + pub fn call_input(&self) -> WorkflowCallInput { + WorkflowCallInput { + description: self.name.to_owned(), + required: self.default.is_none(), + input_type: self.input_type.to_owned(), + default: self.default.clone(), + } + } } -impl std::fmt::Display for Input { +impl std::fmt::Display for WorkflowInput { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "${{{{ inputs.{} }}}}", self.name) } } -impl serde::Serialize for Input { +impl serde::Serialize for WorkflowInput { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} + +pub(crate) struct WorkflowSecret { + pub name: &'static str, + description: String, + required: bool, +} + +impl WorkflowSecret { + pub fn new(name: &'static str, description: impl ToString) -> Self { + Self { + name, + description: description.to_string(), + required: true, + } + } + + pub fn secret_configuration(&self) -> WorkflowCallSecret { + WorkflowCallSecret { + description: self.description.clone(), + required: self.required, + } + } +} + +impl std::fmt::Display for WorkflowSecret { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "${{{{ secrets.{} }}}}", self.name) + } +} + +impl serde::Serialize for WorkflowSecret { fn serialize(&self, serializer: S) -> Result where S: serde::Serializer, From 57e1bb810632c0858ea2e8fd6833c6ac261bdde1 Mon Sep 17 00:00:00 2001 From: Marshall Bowers Date: Wed, 26 Nov 2025 11:53:05 -0500 Subject: [PATCH 0405/1030] collab: Add `zed-zippy[bot]` to the `GET /contributor` endpoint (#43568) This PR adds the `zed-zippy[bot]` user to the `GET /contributor` endpoint so that it passes the CLA check. Release Notes: - N/A --- crates/collab/src/api/contributors.rs | 40 +++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/crates/collab/src/api/contributors.rs b/crates/collab/src/api/contributors.rs index 8cfef0ad7e717614e23c3cf9d04852c976f1f55f..574667c723dce62b905e3d2a0b34de1ca4c88c8e 100644 --- a/crates/collab/src/api/contributors.rs +++ b/crates/collab/src/api/contributors.rs @@ -64,6 +64,16 @@ async fn check_is_contributor( })); } + if ZedZippyBot::is_zed_zippy_bot(¶ms) { + return Ok(Json(CheckIsContributorResponse { + signed_at: Some( + ZedZippyBot::created_at() + .and_utc() + .to_rfc3339_opts(SecondsFormat::Millis, true), + ), + })); + } + Ok(Json(CheckIsContributorResponse { signed_at: app .db @@ -103,6 +113,36 @@ impl RenovateBot { } } +/// The Zed Zippy bot GitHub user (`zed-zippy[bot]`). +/// +/// https://api.github.com/users/zed-zippy[bot] +struct ZedZippyBot; + +impl ZedZippyBot { + const LOGIN: &'static str = "zed-zippy[bot]"; + const USER_ID: i32 = 234243425; + + /// Returns the `created_at` timestamp for the Zed Zippy bot user. + fn created_at() -> &'static NaiveDateTime { + static CREATED_AT: OnceLock = OnceLock::new(); + CREATED_AT.get_or_init(|| { + chrono::DateTime::parse_from_rfc3339("2025-09-24T17:00:11Z") + .expect("failed to parse 'created_at' for 'zed-zippy[bot]'") + .naive_utc() + }) + } + + /// Returns whether the given contributor selector corresponds to the Zed Zippy bot user. + fn is_zed_zippy_bot(contributor: &ContributorSelector) -> bool { + match contributor { + ContributorSelector::GitHubLogin { github_login } => github_login == Self::LOGIN, + ContributorSelector::GitHubUserId { github_user_id } => { + github_user_id == &Self::USER_ID + } + } + } +} + #[derive(Debug, Deserialize)] struct AddContributorBody { github_user_id: i32, From 757c043171b95013fa5ffa5c29b2a9fae0b7fb90 Mon Sep 17 00:00:00 2001 From: Cole Miller Date: Wed, 26 Nov 2025 11:56:34 -0500 Subject: [PATCH 0406/1030] Fix git features not working when a Windows host collaborates with a unix guest (#43515) We were using `std::path::Path::strip_prefix` to determine which repository an absolute path belongs to, which doesn't work when the paths are Windows-style but the code is running on unix. Replace it with a platform-agnostic implementation of `strip_prefix`. Release Notes: - Fixed git features not working when a Windows host collaborates with a unix guest --- crates/agent_ui/src/acp/message_editor.rs | 2 +- crates/agent_ui/src/acp/thread_view.rs | 2 +- crates/file_finder/src/file_finder.rs | 4 +- crates/file_finder/src/file_finder_tests.rs | 2 +- crates/file_finder/src/open_path_prompt.rs | 12 +- crates/fuzzy/src/paths.rs | 2 +- crates/git_ui/src/git_panel.rs | 7 +- crates/project/src/git_store.rs | 6 +- crates/project/src/project.rs | 2 +- crates/project_panel/src/project_panel.rs | 2 +- crates/settings_ui/src/settings_ui.rs | 2 +- .../src/toolchain_selector.rs | 2 +- crates/util/src/paths.rs | 134 +++++++++++++++++- crates/vim/src/command.rs | 4 +- crates/worktree/src/worktree.rs | 6 +- 15 files changed, 159 insertions(+), 30 deletions(-) diff --git a/crates/agent_ui/src/acp/message_editor.rs b/crates/agent_ui/src/acp/message_editor.rs index 169220a3614bf2d74d24a9638f87b9613a556bd6..facb86f3b87e746d35d8b91f27550e351b10e8b6 100644 --- a/crates/agent_ui/src/acp/message_editor.rs +++ b/crates/agent_ui/src/acp/message_editor.rs @@ -1423,7 +1423,7 @@ mod tests { rel_path("b/eight.txt"), ]; - let slash = PathStyle::local().separator(); + let slash = PathStyle::local().primary_separator(); let mut opened_editors = Vec::new(); for path in paths { diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 1c9e3f83e383658051f7799a7e3096f532addbe1..45b15e6e9e3eaa03fc69912eab3e778335b714d4 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -3989,7 +3989,7 @@ impl AcpThreadView { let file = buffer.read(cx).file()?; let path = file.path(); let path_style = file.path_style(cx); - let separator = file.path_style(cx).separator(); + let separator = file.path_style(cx).primary_separator(); let file_path = path.parent().and_then(|parent| { if parent.is_empty() { diff --git a/crates/file_finder/src/file_finder.rs b/crates/file_finder/src/file_finder.rs index 6f64dc20d0b97f1b12fb627c72209df555e6f1a7..050d7a45a1b46e94a195f88e49fd6795ce37f09f 100644 --- a/crates/file_finder/src/file_finder.rs +++ b/crates/file_finder/src/file_finder.rs @@ -1060,7 +1060,7 @@ impl FileFinderDelegate { ( filename.to_string(), Vec::new(), - prefix.display(path_style).to_string() + path_style.separator(), + prefix.display(path_style).to_string() + path_style.primary_separator(), Vec::new(), ) } else { @@ -1071,7 +1071,7 @@ impl FileFinderDelegate { .map_or(String::new(), |f| f.to_string_lossy().into_owned()), Vec::new(), entry_path.absolute.parent().map_or(String::new(), |path| { - path.to_string_lossy().into_owned() + path_style.separator() + path.to_string_lossy().into_owned() + path_style.primary_separator() }), Vec::new(), ) diff --git a/crates/file_finder/src/file_finder_tests.rs b/crates/file_finder/src/file_finder_tests.rs index d6971da15fde8406ac4d00fb613906c91e25d8d4..aeb9d794c2b4bc014bd332ed03dc8e5c3dda709b 100644 --- a/crates/file_finder/src/file_finder_tests.rs +++ b/crates/file_finder/src/file_finder_tests.rs @@ -1598,7 +1598,7 @@ async fn test_history_match_positions(cx: &mut gpui::TestAppContext) { assert_eq!(file_label.highlight_indices(), &[0, 1, 2]); assert_eq!( path_label.text(), - format!("test{}", PathStyle::local().separator()) + format!("test{}", PathStyle::local().primary_separator()) ); assert_eq!(path_label.highlight_indices(), &[] as &[usize]); }); diff --git a/crates/file_finder/src/open_path_prompt.rs b/crates/file_finder/src/open_path_prompt.rs index 53bad3b34880d69aba169df965db71f69b2296eb..2ae0c47776acb5c58b7d0919aa7522fb64d923d0 100644 --- a/crates/file_finder/src/open_path_prompt.rs +++ b/crates/file_finder/src/open_path_prompt.rs @@ -559,7 +559,7 @@ impl PickerDelegate for OpenPathDelegate { parent_path, candidate.path.string, if candidate.is_dir { - path_style.separator() + path_style.primary_separator() } else { "" } @@ -569,7 +569,7 @@ impl PickerDelegate for OpenPathDelegate { parent_path, candidate.path.string, if candidate.is_dir { - path_style.separator() + path_style.primary_separator() } else { "" } @@ -826,7 +826,13 @@ impl PickerDelegate for OpenPathDelegate { } fn placeholder_text(&self, _window: &mut Window, _cx: &mut App) -> Arc { - Arc::from(format!("[directory{}]filename.ext", self.path_style.separator()).as_str()) + Arc::from( + format!( + "[directory{}]filename.ext", + self.path_style.primary_separator() + ) + .as_str(), + ) } fn separators_after_indices(&self) -> Vec { diff --git a/crates/fuzzy/src/paths.rs b/crates/fuzzy/src/paths.rs index b35f0c1ce6cec73995838eb82bf782d00f0129af..cce0e082840c4cd05d6e2b21eac0073d3eb7700f 100644 --- a/crates/fuzzy/src/paths.rs +++ b/crates/fuzzy/src/paths.rs @@ -107,7 +107,7 @@ pub fn match_fixed_path_set( .display(path_style) .chars() .collect::>(); - path_prefix_chars.extend(path_style.separator().chars()); + path_prefix_chars.extend(path_style.primary_separator().chars()); let lowercase_pfx = path_prefix_chars .iter() .map(|c| c.to_ascii_lowercase()) diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index 4a5cd56ec90fd95fe94d55edfdeb7e2114fea820..1f66d194477c64fef207e63d4c87ad4d76675f65 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -4351,8 +4351,11 @@ impl GitPanel { .when(strikethrough, Label::strikethrough), ), (true, false) => this.child( - self.entry_label(format!("{dir}{}", path_style.separator()), path_color) - .when(strikethrough, Label::strikethrough), + self.entry_label( + format!("{dir}{}", path_style.primary_separator()), + path_color, + ) + .when(strikethrough, Label::strikethrough), ), _ => this, } diff --git a/crates/project/src/git_store.rs b/crates/project/src/git_store.rs index 8b83fa48e9b61a7200a001f4d42227b1c2302874..e7a69c0e81464ac74d02bc8a552089ddcd7db039 100644 --- a/crates/project/src/git_store.rs +++ b/crates/project/src/git_store.rs @@ -3222,10 +3222,8 @@ impl RepositorySnapshot { abs_path: &Path, path_style: PathStyle, ) -> Option { - abs_path - .strip_prefix(&work_directory_abs_path) - .ok() - .and_then(|path| RepoPath::from_std_path(path, path_style).ok()) + let rel_path = path_style.strip_prefix(abs_path, work_directory_abs_path)?; + Some(RepoPath::from_rel_path(&rel_path)) } pub fn had_conflict_on_last_merge_head_change(&self, repo_path: &RepoPath) -> bool { diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index beebf5a1d133eb75fdd98184ddf7880b9cedc7e0..afc854bceb59f88a496b6fcb99e840184277c894 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -927,7 +927,7 @@ impl DirectoryLister { .map(|worktree| worktree.read(cx).abs_path().to_string_lossy().into_owned()) .or_else(|| std::env::home_dir().map(|dir| dir.to_string_lossy().into_owned())) .map(|mut s| { - s.push_str(path_style.separator()); + s.push_str(path_style.primary_separator()); s }) .unwrap_or_else(|| { diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index 6a7036fce81eee5810dfbc41f57119efd22cfdca..cde0b89bb9115476744ed606f16174039db62cf6 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -4837,7 +4837,7 @@ impl ProjectPanel { .collect::>(); let active_index = folded_ancestors.active_index(); let components_len = components.len(); - let delimiter = SharedString::new(path_style.separator()); + let delimiter = SharedString::new(path_style.primary_separator()); for (index, component) in components.iter().enumerate() { if index != 0 { let delimiter_target_index = index - 1; diff --git a/crates/settings_ui/src/settings_ui.rs b/crates/settings_ui/src/settings_ui.rs index 564e78dc57b8b27398d79f861b538a9cc9dbf21c..499d6b04653b06c41ef4e302cfd4b4e77efc95c9 100644 --- a/crates/settings_ui/src/settings_ui.rs +++ b/crates/settings_ui/src/settings_ui.rs @@ -2192,7 +2192,7 @@ impl SettingsWindow { format!( "{}{}{}", directory_name, - path_style.separator(), + path_style.primary_separator(), path.display(path_style) ) } diff --git a/crates/toolchain_selector/src/toolchain_selector.rs b/crates/toolchain_selector/src/toolchain_selector.rs index c017483a32325d13e85a5db34566a3b0bf6e15a5..96f692694dcf6b1adaa6494a4c1cbf6905c57c7c 100644 --- a/crates/toolchain_selector/src/toolchain_selector.rs +++ b/crates/toolchain_selector/src/toolchain_selector.rs @@ -876,7 +876,7 @@ impl ToolchainSelectorDelegate { .strip_prefix(&worktree_root) .ok() .and_then(|suffix| suffix.to_str()) - .map(|suffix| format!(".{}{suffix}", path_style.separator()).into()) + .map(|suffix| format!(".{}{suffix}", path_style.primary_separator()).into()) .unwrap_or(path) } } diff --git a/crates/util/src/paths.rs b/crates/util/src/paths.rs index 74929c6c831bcdb035756483ddbf9b2bc9ad444c..0834039e0e59ff4149614ad863bd7a07b4a2efd7 100644 --- a/crates/util/src/paths.rs +++ b/crates/util/src/paths.rs @@ -3,6 +3,7 @@ use globset::{Glob, GlobSet, GlobSetBuilder}; use itertools::Itertools; use regex::Regex; use serde::{Deserialize, Serialize}; +use std::borrow::Cow; use std::cmp::Ordering; use std::error::Error; use std::fmt::{Display, Formatter}; @@ -331,13 +332,20 @@ impl PathStyle { } #[inline] - pub fn separator(&self) -> &'static str { + pub fn primary_separator(&self) -> &'static str { match self { PathStyle::Posix => "/", PathStyle::Windows => "\\", } } + pub fn separators(&self) -> &'static [&'static str] { + match self { + PathStyle::Posix => &["/"], + PathStyle::Windows => &["\\", "/"], + } + } + pub fn is_windows(&self) -> bool { *self == PathStyle::Windows } @@ -353,25 +361,54 @@ impl PathStyle { } else { Some(format!( "{left}{}{right}", - if left.ends_with(self.separator()) { + if left.ends_with(self.primary_separator()) { "" } else { - self.separator() + self.primary_separator() } )) } } pub fn split(self, path_like: &str) -> (Option<&str>, &str) { - let Some(pos) = path_like.rfind(self.separator()) else { + let Some(pos) = path_like.rfind(self.primary_separator()) else { return (None, path_like); }; - let filename_start = pos + self.separator().len(); + let filename_start = pos + self.primary_separator().len(); ( Some(&path_like[..filename_start]), &path_like[filename_start..], ) } + + pub fn strip_prefix<'a>( + &self, + child: &'a Path, + parent: &'a Path, + ) -> Option> { + let parent = parent.to_str()?; + if parent.is_empty() { + return RelPath::new(child, *self).ok(); + } + let parent = self + .separators() + .iter() + .find_map(|sep| parent.strip_suffix(sep)) + .unwrap_or(parent); + let child = child.to_str()?; + let stripped = child.strip_prefix(parent)?; + if let Some(relative) = self + .separators() + .iter() + .find_map(|sep| stripped.strip_prefix(sep)) + { + RelPath::new(relative.as_ref(), *self).ok() + } else if stripped.is_empty() { + Some(Cow::Borrowed(RelPath::empty())) + } else { + None + } + } } #[derive(Debug, Clone)] @@ -788,7 +825,7 @@ impl PathMatcher { fn check_with_end_separator(&self, path: &Path) -> bool { let path_str = path.to_string_lossy(); - let separator = self.path_style.separator(); + let separator = self.path_style.primary_separator(); if path_str.ends_with(separator) { false } else { @@ -1311,6 +1348,8 @@ impl WslPath { #[cfg(test)] mod tests { + use crate::rel_path::rel_path; + use super::*; use util_macros::perf; @@ -2480,6 +2519,89 @@ mod tests { assert_eq!(strip_path_suffix(base, suffix), None); } + #[test] + fn test_strip_prefix() { + let expected = [ + ( + PathStyle::Posix, + "/a/b/c", + "/a/b", + Some(rel_path("c").into_arc()), + ), + ( + PathStyle::Posix, + "/a/b/c", + "/a/b/", + Some(rel_path("c").into_arc()), + ), + ( + PathStyle::Posix, + "/a/b/c", + "/", + Some(rel_path("a/b/c").into_arc()), + ), + (PathStyle::Posix, "/a/b/c", "", None), + (PathStyle::Posix, "/a/b//c", "/a/b/", None), + (PathStyle::Posix, "/a/bc", "/a/b", None), + ( + PathStyle::Posix, + "/a/b/c", + "/a/b/c", + Some(rel_path("").into_arc()), + ), + ( + PathStyle::Windows, + "C:\\a\\b\\c", + "C:\\a\\b", + Some(rel_path("c").into_arc()), + ), + ( + PathStyle::Windows, + "C:\\a\\b\\c", + "C:\\a\\b\\", + Some(rel_path("c").into_arc()), + ), + ( + PathStyle::Windows, + "C:\\a\\b\\c", + "C:\\", + Some(rel_path("a/b/c").into_arc()), + ), + (PathStyle::Windows, "C:\\a\\b\\c", "", None), + (PathStyle::Windows, "C:\\a\\b\\\\c", "C:\\a\\b\\", None), + (PathStyle::Windows, "C:\\a\\bc", "C:\\a\\b", None), + ( + PathStyle::Windows, + "C:\\a\\b/c", + "C:\\a\\b", + Some(rel_path("c").into_arc()), + ), + ( + PathStyle::Windows, + "C:\\a\\b/c", + "C:\\a\\b\\", + Some(rel_path("c").into_arc()), + ), + ( + PathStyle::Windows, + "C:\\a\\b/c", + "C:\\a\\b/", + Some(rel_path("c").into_arc()), + ), + ]; + let actual = expected.clone().map(|(style, child, parent, _)| { + ( + style, + child, + parent, + style + .strip_prefix(child.as_ref(), parent.as_ref()) + .map(|rel_path| rel_path.into_arc()), + ) + }); + pretty_assertions::assert_eq!(actual, expected); + } + #[cfg(target_os = "windows")] #[test] fn test_wsl_path() { diff --git a/crates/vim/src/command.rs b/crates/vim/src/command.rs index 70d0e93c5db5999878f2bb79c7fc42f16e6861a1..5bf0fca041cf274f38c84031e35903c9e339cc24 100644 --- a/crates/vim/src/command.rs +++ b/crates/vim/src/command.rs @@ -965,7 +965,7 @@ impl VimCommand { } }; - let rel_path = if args.ends_with(PathStyle::local().separator()) { + let rel_path = if args.ends_with(PathStyle::local().primary_separator()) { rel_path } else { rel_path @@ -998,7 +998,7 @@ impl VimCommand { .display(PathStyle::local()) .to_string(); if dir.is_dir { - path_string.push_str(PathStyle::local().separator()); + path_string.push_str(PathStyle::local().primary_separator()); } path_string }) diff --git a/crates/worktree/src/worktree.rs b/crates/worktree/src/worktree.rs index 1e8c1648dca98b267146211a9b36fb78f743fb82..a62f1b3cd1305a4e396a9fb0dd6b2f3212a321b6 100644 --- a/crates/worktree/src/worktree.rs +++ b/crates/worktree/src/worktree.rs @@ -999,7 +999,7 @@ impl Worktree { }; if worktree_relative_path.components().next().is_some() { - full_path_string.push_str(self.path_style.separator()); + full_path_string.push_str(self.path_style.primary_separator()); full_path_string.push_str(&worktree_relative_path.display(self.path_style)); } @@ -2108,8 +2108,8 @@ impl Snapshot { if path.file_name().is_some() { let mut abs_path = self.abs_path.to_string(); for component in path.components() { - if !abs_path.ends_with(self.path_style.separator()) { - abs_path.push_str(self.path_style.separator()); + if !abs_path.ends_with(self.path_style.primary_separator()) { + abs_path.push_str(self.path_style.primary_separator()); } abs_path.push_str(component); } From 1a23115773ac3466444256992f661ee31cbaace2 Mon Sep 17 00:00:00 2001 From: Jason Lee Date: Thu, 27 Nov 2025 01:03:42 +0800 Subject: [PATCH 0407/1030] gpui: Unify `track_scroll` method to receive a reference type (#43518) Release Notes: - N/A This PR to change the `track_scroll` method to receive a reference type like the [Div#track_scroll](https://docs.rs/gpui/latest/gpui/trait.StatefulInteractiveElement.html#method.track_scroll), [Div#track_focus](https://docs.rs/gpui/latest/gpui/trait.InteractiveElement.html#method.track_focus). ```diff - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx) + .vertical_scrollbar_for(&self.scroll_handle, window, cx) ``` https://github.com/zed-industries/zed/blob/56a2f9cfcf0c6a3c38f596b58002953763cd890f/crates/gpui/src/elements/div.rs#L1088-L1093 https://github.com/zed-industries/zed/blob/56a2f9cfcf0c6a3c38f596b58002953763cd890f/crates/gpui/src/elements/div.rs#L613-L620 --- crates/acp_tools/src/acp_tools.rs | 2 +- crates/agent_ui/src/acp/thread_history.rs | 8 ++------ crates/agent_ui/src/acp/thread_view.rs | 2 +- crates/agent_ui/src/agent_configuration.rs | 2 +- .../src/agent_configuration/add_llm_provider_modal.rs | 2 +- .../configure_context_server_modal.rs | 5 ++--- .../configure_context_server_tools_modal.rs | 2 +- .../debugger_ui/src/session/running/breakpoint_list.rs | 4 ++-- crates/debugger_ui/src/session/running/memory_view.rs | 4 ++-- crates/debugger_ui/src/session/running/module_list.rs | 4 ++-- .../src/session/running/stack_frame_list.rs | 2 +- .../debugger_ui/src/session/running/variable_list.rs | 6 +++--- crates/editor/src/code_context_menus.rs | 6 +++--- crates/editor/src/hover_popover.rs | 4 ++-- crates/editor/src/signature_help.rs | 2 +- crates/extensions_ui/src/extensions_ui.rs | 4 ++-- crates/git_ui/src/git_panel.rs | 4 ++-- crates/gpui/examples/data_table.rs | 2 +- crates/gpui/src/elements/uniform_list.rs | 6 +++--- crates/language_tools/src/syntax_tree_view.rs | 4 ++-- crates/markdown/src/markdown.rs | 2 +- crates/markdown_preview/src/markdown_preview_view.rs | 2 +- crates/miniprofiler_ui/src/miniprofiler_ui.rs | 4 ++-- crates/onboarding/src/onboarding.rs | 2 +- crates/outline_panel/src/outline_panel.rs | 4 ++-- crates/picker/src/picker.rs | 6 +++--- crates/project_panel/src/project_panel.rs | 4 ++-- crates/recent_projects/src/remote_servers.rs | 2 +- crates/settings_ui/src/settings_ui.rs | 8 ++++---- crates/terminal_view/src/terminal_view.rs | 2 +- crates/ui/src/components/data_table.rs | 4 ++-- crates/ui/src/components/scrollbar.rs | 10 +++++----- crates/ui/src/components/tab_bar.rs | 4 ++-- crates/zed/src/zed/component_preview.rs | 2 +- 34 files changed, 63 insertions(+), 68 deletions(-) diff --git a/crates/acp_tools/src/acp_tools.rs b/crates/acp_tools/src/acp_tools.rs index 7615784676c7d9ff1782a6e9537e608cb927154d..0905effce38d1bfd4fa18e1d00169d6c7ef6c2d7 100644 --- a/crates/acp_tools/src/acp_tools.rs +++ b/crates/acp_tools/src/acp_tools.rs @@ -528,7 +528,7 @@ impl Render for AcpTools { .with_sizing_behavior(gpui::ListSizingBehavior::Auto) .size_full(), ) - .vertical_scrollbar_for(connection.list_state.clone(), window, cx) + .vertical_scrollbar_for(&connection.list_state, window, cx) .into_any() } } diff --git a/crates/agent_ui/src/acp/thread_history.rs b/crates/agent_ui/src/acp/thread_history.rs index 29759093303a684fdfd9ad255d269516ed7a29b9..e5c83d48f1fd4633591441ad88076e66d3eb1e62 100644 --- a/crates/agent_ui/src/acp/thread_history.rs +++ b/crates/agent_ui/src/acp/thread_history.rs @@ -527,14 +527,10 @@ impl Render for AcpThreadHistory { ) .p_1() .pr_4() - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .flex_grow(), ) - .vertical_scrollbar_for( - self.scroll_handle.clone(), - window, - cx, - ) + .vertical_scrollbar_for(&self.scroll_handle, window, cx) } }) .when(!has_no_history, |this| { diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index 45b15e6e9e3eaa03fc69912eab3e778335b714d4..a2929ad23ba8558b61abbf1d25ffe3843a918c2e 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -5896,7 +5896,7 @@ impl Render for AcpThreadView { .flex_grow() .into_any(), ) - .vertical_scrollbar_for(self.list_state.clone(), window, cx) + .vertical_scrollbar_for(&self.list_state, window, cx) .into_any() } else { this.child(self.render_recent_history(cx)).into_any() diff --git a/crates/agent_ui/src/agent_configuration.rs b/crates/agent_ui/src/agent_configuration.rs index ef6b90ad89e2e038e96d8864d4c2ce0ecf333d6e..f831329e2cde40dbb9d4b9e882d6bc942f383422 100644 --- a/crates/agent_ui/src/agent_configuration.rs +++ b/crates/agent_ui/src/agent_configuration.rs @@ -1209,7 +1209,7 @@ impl Render for AgentConfiguration { .child(self.render_context_servers_section(window, cx)) .child(self.render_provider_configuration_section(cx)), ) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx), + .vertical_scrollbar_for(&self.scroll_handle, window, cx), ) } } diff --git a/crates/agent_ui/src/agent_configuration/add_llm_provider_modal.rs b/crates/agent_ui/src/agent_configuration/add_llm_provider_modal.rs index 3427dab0d22c6900a3078f1dcb4cc7e892cce7db..02269511bb9a4d9b95fe27b66e3ca0a9e5c498c5 100644 --- a/crates/agent_ui/src/agent_configuration/add_llm_provider_modal.rs +++ b/crates/agent_ui/src/agent_configuration/add_llm_provider_modal.rs @@ -516,7 +516,7 @@ impl Render for AddLlmProviderModal { .child( div() .size_full() - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx) + .vertical_scrollbar_for(&self.scroll_handle, window, cx) .child( v_flex() .id("modal_content") diff --git a/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs b/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs index a93df3839d98d95e2f91833078dbe96bc3fb8889..85f527ff5a1262aa36657316d86999ac617fb09d 100644 --- a/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs +++ b/crates/agent_ui/src/agent_configuration/configure_context_server_modal.rs @@ -821,7 +821,6 @@ impl ConfigureContextServerModal { impl Render for ConfigureContextServerModal { fn render(&mut self, window: &mut Window, cx: &mut Context) -> impl IntoElement { - let scroll_handle = self.scroll_handle.clone(); div() .elevation_3(cx) .w(rems(34.)) @@ -849,7 +848,7 @@ impl Render for ConfigureContextServerModal { .id("modal-content") .max_h(vh(0.7, window)) .overflow_y_scroll() - .track_scroll(&scroll_handle) + .track_scroll(&self.scroll_handle) .child(self.render_modal_description(window, cx)) .child(self.render_modal_content(cx)) .child(match &self.state { @@ -862,7 +861,7 @@ impl Render for ConfigureContextServerModal { } }), ) - .vertical_scrollbar_for(scroll_handle, window, cx), + .vertical_scrollbar_for(&self.scroll_handle, window, cx), ), ) .footer(self.render_modal_footer(cx)), diff --git a/crates/agent_ui/src/agent_configuration/configure_context_server_tools_modal.rs b/crates/agent_ui/src/agent_configuration/configure_context_server_tools_modal.rs index 3fe0b8d1b1400b4362192261995ed5b6bd1cb662..3573c8b67ee81ef9cd1decacefb52017dabdb178 100644 --- a/crates/agent_ui/src/agent_configuration/configure_context_server_tools_modal.rs +++ b/crates/agent_ui/src/agent_configuration/configure_context_server_tools_modal.rs @@ -138,7 +138,7 @@ impl ConfigureContextServerToolsModal { items })), ) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx) + .vertical_scrollbar_for(&self.scroll_handle, window, cx) .into_any_element() } } diff --git a/crates/debugger_ui/src/session/running/breakpoint_list.rs b/crates/debugger_ui/src/session/running/breakpoint_list.rs index ca50f67c9236d19a9f04f327091eb383ab72e122..2c7e2074678290356b7669228dcf29008f1cc36b 100644 --- a/crates/debugger_ui/src/session/running/breakpoint_list.rs +++ b/crates/debugger_ui/src/session/running/breakpoint_list.rs @@ -575,7 +575,7 @@ impl BreakpointList { ) .with_horizontal_sizing_behavior(gpui::ListHorizontalSizingBehavior::Unconstrained) .with_width_from_item(self.max_width_index) - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .flex_1() } @@ -776,7 +776,7 @@ impl Render for BreakpointList { .child(self.render_list(cx)) .custom_scrollbars( ui::Scrollbars::new(ScrollAxes::Both) - .tracked_scroll_handle(self.scroll_handle.clone()) + .tracked_scroll_handle(&self.scroll_handle) .with_track_along(ScrollAxes::Both, cx.theme().colors().panel_background) .tracked_entity(cx.entity_id()), window, diff --git a/crates/debugger_ui/src/session/running/memory_view.rs b/crates/debugger_ui/src/session/running/memory_view.rs index 8670beb0f5f93f68a6052b868a866e22b82c92fd..55a8e8429eb23cd0bfcaa7d592d16797c061d2ae 100644 --- a/crates/debugger_ui/src/session/running/memory_view.rs +++ b/crates/debugger_ui/src/session/running/memory_view.rs @@ -229,7 +229,7 @@ impl MemoryView { rows }, ) - .track_scroll(view_state.scroll_handle) + .track_scroll(&view_state.scroll_handle) .with_horizontal_sizing_behavior(ListHorizontalSizingBehavior::Unconstrained) .on_scroll_wheel(cx.listener(|this, evt: &ScrollWheelEvent, window, _| { let mut view_state = this.view_state(); @@ -921,7 +921,7 @@ impl Render for MemoryView { })) .custom_scrollbars( ui::Scrollbars::new(ui::ScrollAxes::Both) - .tracked_scroll_handle(self.view_state_handle.clone()) + .tracked_scroll_handle(&self.view_state_handle) .with_track_along( ui::ScrollAxes::Both, cx.theme().colors().panel_background, diff --git a/crates/debugger_ui/src/session/running/module_list.rs b/crates/debugger_ui/src/session/running/module_list.rs index 545d8392745c636b805cfc1e0743170635ef8abe..19f407eb23f8acf0aa665f5119ecfd2156eb685f 100644 --- a/crates/debugger_ui/src/session/running/module_list.rs +++ b/crates/debugger_ui/src/session/running/module_list.rs @@ -253,7 +253,7 @@ impl ModuleList { range.map(|ix| this.render_entry(ix, cx)).collect() }), ) - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .size_full() } } @@ -279,6 +279,6 @@ impl Render for ModuleList { .size_full() .p_1() .child(self.render_list(window, cx)) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx) + .vertical_scrollbar_for(&self.scroll_handle, window, cx) } } diff --git a/crates/debugger_ui/src/session/running/stack_frame_list.rs b/crates/debugger_ui/src/session/running/stack_frame_list.rs index a8fabd327a3de630ff884899fe7af1167932618c..96a910af4dd0ac901c6802c139ddd5b8b3d728bc 100644 --- a/crates/debugger_ui/src/session/running/stack_frame_list.rs +++ b/crates/debugger_ui/src/session/running/stack_frame_list.rs @@ -913,7 +913,7 @@ impl Render for StackFrameList { ) }) .child(self.render_list(window, cx)) - .vertical_scrollbar_for(self.list_state.clone(), window, cx) + .vertical_scrollbar_for(&self.list_state, window, cx) } } diff --git a/crates/debugger_ui/src/session/running/variable_list.rs b/crates/debugger_ui/src/session/running/variable_list.rs index 7d736aace58ab1b27ccab5690cf24d4cff9a47f6..1b455b59d7d12712a3d4adc713a6ed15e8166c6e 100644 --- a/crates/debugger_ui/src/session/running/variable_list.rs +++ b/crates/debugger_ui/src/session/running/variable_list.rs @@ -1557,7 +1557,7 @@ impl Render for VariableList { this.render_entries(range, window, cx) }), ) - .track_scroll(self.list_handle.clone()) + .track_scroll(&self.list_handle) .with_width_from_item(self.max_width_index) .with_sizing_behavior(gpui::ListSizingBehavior::Auto) .with_horizontal_sizing_behavior(gpui::ListHorizontalSizingBehavior::Unconstrained) @@ -1574,10 +1574,10 @@ impl Render for VariableList { ) .with_priority(1) })) - // .vertical_scrollbar_for(self.list_handle.clone(), window, cx) + // .vertical_scrollbar_for(&self.list_handle, window, cx) .custom_scrollbars( ui::Scrollbars::new(ScrollAxes::Both) - .tracked_scroll_handle(self.list_handle.clone()) + .tracked_scroll_handle(&self.list_handle) .with_track_along(ScrollAxes::Both, cx.theme().colors().panel_background) .tracked_entity(cx.entity_id()), window, diff --git a/crates/editor/src/code_context_menus.rs b/crates/editor/src/code_context_menus.rs index 9a2b8c385689f284fc42e49a5c7451b3774fe018..6a07d39210773476b5f88764c5a21f292da48676 100644 --- a/crates/editor/src/code_context_menus.rs +++ b/crates/editor/src/code_context_menus.rs @@ -933,7 +933,7 @@ impl CompletionsMenu { ) .occlude() .max_h(max_height_in_lines as f32 * window.line_height()) - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .with_sizing_behavior(ListSizingBehavior::Infer) .map(|this| { if self.display_options.dynamic_width { @@ -948,7 +948,7 @@ impl CompletionsMenu { div().child(list).custom_scrollbars( Scrollbars::for_settings::() .show_along(ScrollAxes::Vertical) - .tracked_scroll_handle(self.scroll_handle.clone()), + .tracked_scroll_handle(&self.scroll_handle), window, cx, ), @@ -1599,7 +1599,7 @@ impl CodeActionsMenu { ) .occlude() .max_h(max_height_in_lines as f32 * window.line_height()) - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .with_width_from_item( self.actions .iter() diff --git a/crates/editor/src/hover_popover.rs b/crates/editor/src/hover_popover.rs index 5f831341bab2a4e37410a1e3e168bcf72bba93a8..0b9a25d3ee0fcb1cb67497bf51fe41ed73a3692e 100644 --- a/crates/editor/src/hover_popover.rs +++ b/crates/editor/src/hover_popover.rs @@ -914,7 +914,7 @@ impl InfoPopover { ) .custom_scrollbars( Scrollbars::for_settings::() - .tracked_scroll_handle(self.scroll_handle.clone()), + .tracked_scroll_handle(&self.scroll_handle), window, cx, ) @@ -1012,7 +1012,7 @@ impl DiagnosticPopover { ) .custom_scrollbars( Scrollbars::for_settings::() - .tracked_scroll_handle(self.scroll_handle.clone()), + .tracked_scroll_handle(&self.scroll_handle), window, cx, ), diff --git a/crates/editor/src/signature_help.rs b/crates/editor/src/signature_help.rs index b394364e01cbd647a0e17afc0ddc13afdb12ced3..2554db2450103709275b3f7946076fd891326d84 100644 --- a/crates/editor/src/signature_help.rs +++ b/crates/editor/src/signature_help.rs @@ -391,7 +391,7 @@ impl SignatureHelpPopover { ) }), ) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx); + .vertical_scrollbar_for(&self.scroll_handle, window, cx); let controls = if self.signatures.len() > 1 { let prev_button = IconButton::new("signature_help_prev", IconName::ChevronUp) diff --git a/crates/extensions_ui/src/extensions_ui.rs b/crates/extensions_ui/src/extensions_ui.rs index e35c90b6104b44bd6dbf3fe86aeaf84f122c04ca..e6d30527e0d7672255bf8f61cfd56fe06b409920 100644 --- a/crates/extensions_ui/src/extensions_ui.rs +++ b/crates/extensions_ui/src/extensions_ui.rs @@ -1704,12 +1704,12 @@ impl Render for ExtensionsPage { if count == 0 { this.child(self.render_empty_state(cx)).into_any_element() } else { - let scroll_handle = self.list.clone(); + let scroll_handle = &self.list; this.child( uniform_list("entries", count, cx.processor(Self::render_extensions)) .flex_grow() .pb_4() - .track_scroll(scroll_handle.clone()), + .track_scroll(scroll_handle), ) .vertical_scrollbar_for(scroll_handle, window, cx) .into_any_element() diff --git a/crates/git_ui/src/git_panel.rs b/crates/git_ui/src/git_panel.rs index 1f66d194477c64fef207e63d4c87ad4d76675f65..cf6babb401b6f407506595f3dd95592e98c18286 100644 --- a/crates/git_ui/src/git_panel.rs +++ b/crates/git_ui/src/git_panel.rs @@ -3939,7 +3939,7 @@ impl GitPanel { ListHorizontalSizingBehavior::Unconstrained, ) .with_width_from_item(self.max_width_item_index) - .track_scroll(self.scroll_handle.clone()), + .track_scroll(&self.scroll_handle), ) .on_mouse_down( MouseButton::Right, @@ -3949,7 +3949,7 @@ impl GitPanel { ) .custom_scrollbars( Scrollbars::for_settings::() - .tracked_scroll_handle(self.scroll_handle.clone()) + .tracked_scroll_handle(&self.scroll_handle) .with_track_along( ScrollAxes::Horizontal, cx.theme().colors().panel_background, diff --git a/crates/gpui/examples/data_table.rs b/crates/gpui/examples/data_table.rs index 56c9625ed3039b872cf4fcc70e84719ce903e268..dd1a443a9dfaa28a5079a034b8214ce1bbf01da8 100644 --- a/crates/gpui/examples/data_table.rs +++ b/crates/gpui/examples/data_table.rs @@ -438,7 +438,7 @@ impl Render for DataTable { }), ) .size_full() - .track_scroll(self.scroll_handle.clone()), + .track_scroll(&self.scroll_handle), ) .child(self.render_scrollbar(window, cx)), ), diff --git a/crates/gpui/src/elements/uniform_list.rs b/crates/gpui/src/elements/uniform_list.rs index 72843ea6330aaa24d9e1d6bf34d024cdeb54ad4a..1e38b0e7ac9abcf891201b7db61b819abe00ef1e 100644 --- a/crates/gpui/src/elements/uniform_list.rs +++ b/crates/gpui/src/elements/uniform_list.rs @@ -668,9 +668,9 @@ impl UniformList { } /// Track and render scroll state of this list with reference to the given scroll handle. - pub fn track_scroll(mut self, handle: UniformListScrollHandle) -> Self { + pub fn track_scroll(mut self, handle: &UniformListScrollHandle) -> Self { self.interactivity.tracked_scroll_handle = Some(handle.0.borrow().base_handle.clone()); - self.scroll_handle = Some(handle); + self.scroll_handle = Some(handle.clone()); self } @@ -780,7 +780,7 @@ mod test { .collect() }), ) - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .h(px(200.0)), ) } diff --git a/crates/language_tools/src/syntax_tree_view.rs b/crates/language_tools/src/syntax_tree_view.rs index 885f6bed327c765019ae166e21eab112f884e7dd..3ac007c134657ff33259f961f170d5a7d732a22c 100644 --- a/crates/language_tools/src/syntax_tree_view.rs +++ b/crates/language_tools/src/syntax_tree_view.rs @@ -507,11 +507,11 @@ impl Render for SyntaxTreeView { }), ) .size_full() - .track_scroll(self.list_scroll_handle.clone()) + .track_scroll(&self.list_scroll_handle) .text_bg(cx.theme().colors().background) .into_any_element(), ) - .vertical_scrollbar_for(self.list_scroll_handle.clone(), window, cx) + .vertical_scrollbar_for(&self.list_scroll_handle, window, cx) .into_any_element() } else { let inner_content = v_flex() diff --git a/crates/markdown/src/markdown.rs b/crates/markdown/src/markdown.rs index 1de6d494ffbf445ca8ee3df9d1e83b5575f8224e..dd0d726734173591cb9ed9f8cc965d06aaee7e89 100644 --- a/crates/markdown/src/markdown.rs +++ b/crates/markdown/src/markdown.rs @@ -889,7 +889,7 @@ impl Element for MarkdownElement { { let scrollbars = Scrollbars::new(ScrollAxes::Horizontal) .id(("markdown-code-block-scrollbar", range.start)) - .tracked_scroll_handle(scroll_handle.clone()) + .tracked_scroll_handle(scroll_handle) .with_track_along( ScrollAxes::Horizontal, cx.theme().colors().editor_background, diff --git a/crates/markdown_preview/src/markdown_preview_view.rs b/crates/markdown_preview/src/markdown_preview_view.rs index c4d3c033df6395235603837bf0944eeb59d3dfbc..4126a31379fa74a750a7d111ac71dc180a3bb0ff 100644 --- a/crates/markdown_preview/src/markdown_preview_view.rs +++ b/crates/markdown_preview/src/markdown_preview_view.rs @@ -611,6 +611,6 @@ impl Render for MarkdownPreviewView { .size_full(), ) })) - .vertical_scrollbar_for(self.list_state.clone(), window, cx) + .vertical_scrollbar_for(&self.list_state, window, cx) } } diff --git a/crates/miniprofiler_ui/src/miniprofiler_ui.rs b/crates/miniprofiler_ui/src/miniprofiler_ui.rs index 93ccfc559c6eedc5e1be1c3ca68355aeba878a76..ea59b43cc1dbc2cb1b8476b00d8fa7d07636afec 100644 --- a/crates/miniprofiler_ui/src/miniprofiler_ui.rs +++ b/crates/miniprofiler_ui/src/miniprofiler_ui.rs @@ -400,10 +400,10 @@ impl Render for ProfilerWindow { this.autoscroll = false; cx.notify(); })) - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .size_full(), ) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx), + .vertical_scrollbar_for(&self.scroll_handle, window, cx), ) }) } diff --git a/crates/onboarding/src/onboarding.rs b/crates/onboarding/src/onboarding.rs index 404af2c74f9524aa1d52db39de2354bbe4564240..94581e142339cde9d4f1f01a3fb361ae810c1efa 100644 --- a/crates/onboarding/src/onboarding.rs +++ b/crates/onboarding/src/onboarding.rs @@ -350,7 +350,7 @@ impl Render for Onboarding { .child(self.render_page(cx)) .track_scroll(&self.scroll_handle), ) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx), + .vertical_scrollbar_for(&self.scroll_handle, window, cx), ) } } diff --git a/crates/outline_panel/src/outline_panel.rs b/crates/outline_panel/src/outline_panel.rs index 1e649b2eb64fda970f845e9376be3f61944dde85..6e78b8a1e1f573d9870d42c6a5e99c8574e6979a 100644 --- a/crates/outline_panel/src/outline_panel.rs +++ b/crates/outline_panel/src/outline_panel.rs @@ -4639,7 +4639,7 @@ impl OutlinePanel { .with_sizing_behavior(ListSizingBehavior::Infer) .with_horizontal_sizing_behavior(ListHorizontalSizingBehavior::Unconstrained) .with_width_from_item(self.max_width_item_index) - .track_scroll(self.scroll_handle.clone()) + .track_scroll(&self.scroll_handle) .when(show_indent_guides, |list| { list.with_decoration( ui::indent_guides(px(indent_size), IndentGuideColors::panel(cx)) @@ -4692,7 +4692,7 @@ impl OutlinePanel { .child(list_contents.size_full().flex_shrink()) .custom_scrollbars( Scrollbars::for_settings::() - .tracked_scroll_handle(self.scroll_handle.clone()) + .tracked_scroll_handle(&self.scroll_handle.clone()) .with_track_along( ScrollAxes::Horizontal, cx.theme().colors().panel_background, diff --git a/crates/picker/src/picker.rs b/crates/picker/src/picker.rs index 4e7dba59ad39399b9edab30f553bdc17545540dd..8fb4941b716efa8186937ec7b49bcc3cfb26d44b 100644 --- a/crates/picker/src/picker.rs +++ b/crates/picker/src/picker.rs @@ -780,7 +780,7 @@ impl Picker { }) .flex_grow() .py_1() - .track_scroll(scroll_handle.clone()) + .track_scroll(&scroll_handle) .into_any_element(), ElementContainer::List(state) => list( state.clone(), @@ -866,12 +866,12 @@ impl Render for Picker { this.map(|this| match &self.element_container { ElementContainer::List(state) => this.custom_scrollbars( - base_scrollbar_config.tracked_scroll_handle(state.clone()), + base_scrollbar_config.tracked_scroll_handle(state), window, cx, ), ElementContainer::UniformList(state) => this.custom_scrollbars( - base_scrollbar_config.tracked_scroll_handle(state.clone()), + base_scrollbar_config.tracked_scroll_handle(state), window, cx, ), diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index cde0b89bb9115476744ed606f16174039db62cf6..e9af8bbe3fff1f5ff7d910b6aa16e05090351777 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -5765,7 +5765,7 @@ impl Render for ProjectPanel { ListHorizontalSizingBehavior::Unconstrained, ) .with_width_from_item(self.state.max_width_item_index) - .track_scroll(self.scroll_handle.clone()), + .track_scroll(&self.scroll_handle), ) .child( div() @@ -5908,7 +5908,7 @@ impl Render for ProjectPanel { ) .custom_scrollbars( Scrollbars::for_settings::() - .tracked_scroll_handle(self.scroll_handle.clone()) + .tracked_scroll_handle(&self.scroll_handle) .with_track_along( ScrollAxes::Horizontal, cx.theme().colors().panel_background, diff --git a/crates/recent_projects/src/remote_servers.rs b/crates/recent_projects/src/remote_servers.rs index 76b0b230dc16b5d5e594379bb94b30ca66b9b317..6dff231b30ddde741f69ba9d4e0366517d8e2751 100644 --- a/crates/recent_projects/src/remote_servers.rs +++ b/crates/recent_projects/src/remote_servers.rs @@ -2160,7 +2160,7 @@ impl RemoteServerProjects { ) .size_full(), ) - .vertical_scrollbar_for(state.scroll_handle, window, cx), + .vertical_scrollbar_for(&state.scroll_handle, window, cx), ), ) .into_any_element() diff --git a/crates/settings_ui/src/settings_ui.rs b/crates/settings_ui/src/settings_ui.rs index 499d6b04653b06c41ef4e302cfd4b4e77efc95c9..2726e1cbd6da7b568d3412791826bc1f7b826397 100644 --- a/crates/settings_ui/src/settings_ui.rs +++ b/crates/settings_ui/src/settings_ui.rs @@ -2455,9 +2455,9 @@ impl SettingsWindow { }), ) .size_full() - .track_scroll(self.navbar_scroll_handle.clone()), + .track_scroll(&self.navbar_scroll_handle), ) - .vertical_scrollbar_for(self.navbar_scroll_handle.clone(), window, cx), + .vertical_scrollbar_for(&self.navbar_scroll_handle, window, cx), ) .child( h_flex() @@ -3012,10 +3012,10 @@ impl SettingsWindow { window.focus_prev(); })) .when(sub_page_stack().is_empty(), |this| { - this.vertical_scrollbar_for(self.list_state.clone(), window, cx) + this.vertical_scrollbar_for(&self.list_state, window, cx) }) .when(!sub_page_stack().is_empty(), |this| { - this.vertical_scrollbar_for(self.sub_page_scroll_handle.clone(), window, cx) + this.vertical_scrollbar_for(&self.sub_page_scroll_handle, window, cx) }) .track_focus(&self.content_focus_handle.focus_handle(cx)) .pt_6() diff --git a/crates/terminal_view/src/terminal_view.rs b/crates/terminal_view/src/terminal_view.rs index 7b3e29ac9b0582d081a286539d973fe8f1a453c5..64336886a4b430f780db1126b8d677e51cff066b 100644 --- a/crates/terminal_view/src/terminal_view.rs +++ b/crates/terminal_view/src/terminal_view.rs @@ -1118,7 +1118,7 @@ impl Render for TerminalView { ScrollAxes::Vertical, cx.theme().colors().editor_background, ) - .tracked_scroll_handle(self.scroll_handle.clone()), + .tracked_scroll_handle(&self.scroll_handle), window, cx, ) diff --git a/crates/ui/src/components/data_table.rs b/crates/ui/src/components/data_table.rs index a505281cf3fa9868a19a04c168d0b1b5c71a4f85..f7cce2b85ffa3aeb9f97634c6c0fa65c46f4a8e7 100644 --- a/crates/ui/src/components/data_table.rs +++ b/crates/ui/src/components/data_table.rs @@ -872,7 +872,7 @@ impl RenderOnce for Table { interaction_state.as_ref(), |this, state| { this.track_scroll( - state.read_with(cx, |s, _| s.scroll_handle.clone()), + &state.read_with(cx, |s, _| s.scroll_handle.clone()), ) }, ), @@ -906,7 +906,7 @@ impl RenderOnce for Table { .unwrap_or_else(|| Scrollbars::new(super::ScrollAxes::Both)); content .custom_scrollbars( - scrollbars.tracked_scroll_handle(state.read(cx).scroll_handle.clone()), + scrollbars.tracked_scroll_handle(&state.read(cx).scroll_handle), window, cx, ) diff --git a/crates/ui/src/components/scrollbar.rs b/crates/ui/src/components/scrollbar.rs index d3d33a296bbd65edb24371d8f5f1e6462e77e3fe..391d480fb313d078bb20ab790ecbb61d7425257a 100644 --- a/crates/ui/src/components/scrollbar.rs +++ b/crates/ui/src/components/scrollbar.rs @@ -150,9 +150,9 @@ pub trait WithScrollbar: Sized { // } #[track_caller] - fn vertical_scrollbar_for( + fn vertical_scrollbar_for( self, - scroll_handle: ScrollHandle, + scroll_handle: &ScrollHandle, window: &mut Window, cx: &mut App, ) -> Self::Output { @@ -441,7 +441,7 @@ impl Scrollbars { pub fn tracked_scroll_handle( self, - tracked_scroll_handle: TrackedHandle, + tracked_scroll_handle: &TrackedHandle, ) -> Scrollbars { let Self { id, @@ -454,7 +454,7 @@ impl Scrollbars { } = self; Scrollbars { - scrollable_handle: Handle::Tracked(tracked_scroll_handle), + scrollable_handle: Handle::Tracked(tracked_scroll_handle.clone()), id, tracked_entity: tracked_entity_id, visibility, @@ -968,7 +968,7 @@ impl ScrollableHandle for ScrollHandle { } } -pub trait ScrollableHandle: 'static + Any + Sized { +pub trait ScrollableHandle: 'static + Any + Sized + Clone { fn max_offset(&self) -> Size; fn set_offset(&self, point: Point); fn offset(&self) -> Point; diff --git a/crates/ui/src/components/tab_bar.rs b/crates/ui/src/components/tab_bar.rs index 3c467c06ce2654c5886c30e42dfb7276fdb7d289..5d41466e3caadf6697b3c1681a405dafa2fb3101 100644 --- a/crates/ui/src/components/tab_bar.rs +++ b/crates/ui/src/components/tab_bar.rs @@ -24,8 +24,8 @@ impl TabBar { } } - pub fn track_scroll(mut self, scroll_handle: ScrollHandle) -> Self { - self.scroll_handle = Some(scroll_handle); + pub fn track_scroll(mut self, scroll_handle: &ScrollHandle) -> Self { + self.scroll_handle = Some(scroll_handle.clone()); self } diff --git a/crates/zed/src/zed/component_preview.rs b/crates/zed/src/zed/component_preview.rs index 18279d8ee88821d44166fb5aedebca2e51ae9491..c231836aaa9219cab2ed913db70ad1704606dfd1 100644 --- a/crates/zed/src/zed/component_preview.rs +++ b/crates/zed/src/zed/component_preview.rs @@ -627,7 +627,7 @@ impl Render for ComponentPreview { .collect() }), ) - .track_scroll(self.nav_scroll_handle.clone()) + .track_scroll(&self.nav_scroll_handle) .p_2p5() .w(px(231.)) // Matches perfectly with the size of the "Component Preview" tab, if that's the first one in the pane .h_full() From 6b92c1a47bbfd867c916ad82668caa62ab31aeee Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 26 Nov 2025 18:21:22 +0100 Subject: [PATCH 0408/1030] workspace: Fix broken main build after #43518 (#43570) *cough* merge queue *cough* Release Notes: - N/A --- crates/workspace/src/notifications.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/workspace/src/notifications.rs b/crates/workspace/src/notifications.rs index 75c35cda22d72d659040154a079fe78af78cf414..cfdc730b4db5be8e2f4a317dcf7e12072af40a88 100644 --- a/crates/workspace/src/notifications.rs +++ b/crates/workspace/src/notifications.rs @@ -789,7 +789,7 @@ pub mod simple_message_notification { .track_scroll(&self.scroll_handle.clone()) .child((self.build_content)(window, cx)), ) - .vertical_scrollbar_for(self.scroll_handle.clone(), window, cx), + .vertical_scrollbar_for(&self.scroll_handle, window, cx), ) .show_close_button(self.show_close_button) .show_suppress_button(self.show_suppress_button) From 233b97644190a7bb721fd105068bb7f4aafaeeab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Raz=20Guzm=C3=A1n=20Macedo?= Date: Wed, 26 Nov 2025 11:57:27 -0600 Subject: [PATCH 0409/1030] Add WSL Linux choice and settings.json prompt for GitHub issue template (#43479) Release Notes: - N/A --------- Co-authored-by: Kunall Banerjee --- .github/ISSUE_TEMPLATE/1.bug-report.yml | 29 +++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/1.bug-report.yml b/.github/ISSUE_TEMPLATE/1.bug-report.yml index 543c22117c2aa889b91fddd9eddd905c09dd0644..1fbb81af8e5e6bd8ebc8582c3528f5b88929f041 100644 --- a/.github/ISSUE_TEMPLATE/1.bug-report.yml +++ b/.github/ISSUE_TEMPLATE/1.bug-report.yml @@ -46,6 +46,22 @@ body: validations: required: false + - type: textarea + attributes: + label: If applicable, attach your relevant Zed settings to this issue + description: | + Open the command palette in Zed, then type “zed: open settings file” and copy/paste any relevant (e.g., LSP-specific) settings. + value: | +
settings.json + + + ```json + + ``` + +
+ validations: + required: false - type: textarea attributes: label: If applicable, provide details about your model provider @@ -68,3 +84,16 @@ body: Architecture: aarch64 validations: required: true + - type: dropdown + attributes: + label: If you are using WSL on Windows, what flavor of Linux are you using? + multiple: false + options: + - Arch Linux + - Ubuntu + - Fedora + - Mint + - Pop!_OS + - NixOS + - Other + default: 0 From 61a414df77a49278b6c734b486d7cda419cf781e Mon Sep 17 00:00:00 2001 From: Dino Date: Wed, 26 Nov 2025 19:34:03 +0000 Subject: [PATCH 0410/1030] Fix language server renaming when parent directory does not exist (#43499) Update the `fs::RenameOptions` used by `project::lsp_store::LocalLspStore.deserialize_workspace_edit` in order to always set `create_parents` to `true`. Doing this ensures that we'll always create the folders for the new file path provided by the language server instead of failing to handle the request in case the parent - Introduce `create_parents` field to `fs::RenameOptions` - Update `fs::RealFs.rename` to ensure that the `create_parents` option is respected Closes #41820 Release Notes: - Fixed a bug where using language server's file renaming actions could fail if the parent directory of the new file did not exist --- .../assistant_text_thread/src/text_thread.rs | 1 + crates/fs/src/fs.rs | 73 +++++++++++++++++++ crates/project/src/agent_server_store.rs | 1 + crates/project/src/lsp_store.rs | 28 ++++--- crates/worktree/src/worktree_tests.rs | 2 + 5 files changed, 94 insertions(+), 11 deletions(-) diff --git a/crates/assistant_text_thread/src/text_thread.rs b/crates/assistant_text_thread/src/text_thread.rs index 613c9b862e8a0b055465a73fe34c541ecb18d4a1..7f24c8f665f8d34aed199562dce1131797f13c9d 100644 --- a/crates/assistant_text_thread/src/text_thread.rs +++ b/crates/assistant_text_thread/src/text_thread.rs @@ -2933,6 +2933,7 @@ impl TextThread { RenameOptions { overwrite: true, ignore_if_exists: true, + create_parents: false, }, ) .await?; diff --git a/crates/fs/src/fs.rs b/crates/fs/src/fs.rs index 5a6e4bdfdba48af25342d4d1ecfafd1d4ce0709b..5be94ab6302b0a950b91e32dc43da374f0c62f29 100644 --- a/crates/fs/src/fs.rs +++ b/crates/fs/src/fs.rs @@ -193,6 +193,8 @@ pub struct CopyOptions { pub struct RenameOptions { pub overwrite: bool, pub ignore_if_exists: bool, + /// Whether to create parent directories if they do not exist. + pub create_parents: bool, } #[derive(Copy, Clone, Default)] @@ -579,6 +581,12 @@ impl Fs for RealFs { } } + if options.create_parents { + if let Some(parent) = target.parent() { + self.create_dir(parent).await?; + } + } + smol::fs::rename(source, target).await?; Ok(()) } @@ -2357,6 +2365,12 @@ impl Fs for FakeFs { let old_path = normalize_path(old_path); let new_path = normalize_path(new_path); + if options.create_parents { + if let Some(parent) = new_path.parent() { + self.create_dir(parent).await?; + } + } + let mut state = self.state.lock(); let moved_entry = state.write_path(&old_path, |e| { if let btree_map::Entry::Occupied(e) = e { @@ -3396,4 +3410,63 @@ mod tests { let content = std::fs::read_to_string(&file_to_be_replaced).unwrap(); assert_eq!(content, "Hello"); } + + #[gpui::test] + async fn test_rename(executor: BackgroundExecutor) { + let fs = FakeFs::new(executor.clone()); + fs.insert_tree( + path!("/root"), + json!({ + "src": { + "file_a.txt": "content a", + "file_b.txt": "content b" + } + }), + ) + .await; + + fs.rename( + Path::new(path!("/root/src/file_a.txt")), + Path::new(path!("/root/src/new/renamed_a.txt")), + RenameOptions { + create_parents: true, + ..Default::default() + }, + ) + .await + .unwrap(); + + // Assert that the `file_a.txt` file was being renamed and moved to a + // different directory that did not exist before. + assert_eq!( + fs.files(), + vec![ + PathBuf::from(path!("/root/src/file_b.txt")), + PathBuf::from(path!("/root/src/new/renamed_a.txt")), + ] + ); + + let result = fs + .rename( + Path::new(path!("/root/src/file_b.txt")), + Path::new(path!("/root/src/old/renamed_b.txt")), + RenameOptions { + create_parents: false, + ..Default::default() + }, + ) + .await; + + // Assert that the `file_b.txt` file was not renamed nor moved, as + // `create_parents` was set to `false`. + // different directory that did not exist before. + assert!(result.is_err()); + assert_eq!( + fs.files(), + vec![ + PathBuf::from(path!("/root/src/file_b.txt")), + PathBuf::from(path!("/root/src/new/renamed_a.txt")), + ] + ); + } } diff --git a/crates/project/src/agent_server_store.rs b/crates/project/src/agent_server_store.rs index d6bd83531eda515e6c2841c65d51619da82e9ae4..ef12e222009a59430a3396cae7971ac7593e82c3 100644 --- a/crates/project/src/agent_server_store.rs +++ b/crates/project/src/agent_server_store.rs @@ -1089,6 +1089,7 @@ async fn download_latest_version( RenameOptions { ignore_if_exists: true, overwrite: true, + create_parents: false, }, ) .await?; diff --git a/crates/project/src/lsp_store.rs b/crates/project/src/lsp_store.rs index 4f7022a264db18f96150c369fadb957556e33b75..a69d2553692277c7e10b203bf9edf075553d546c 100644 --- a/crates/project/src/lsp_store.rs +++ b/crates/project/src/lsp_store.rs @@ -3021,17 +3021,23 @@ impl LocalLspStore { .new_uri .to_file_path() .map_err(|()| anyhow!("can't convert URI to path"))?; - fs.rename( - &source_abs_path, - &target_abs_path, - op.options - .map(|options| fs::RenameOptions { - overwrite: options.overwrite.unwrap_or(false), - ignore_if_exists: options.ignore_if_exists.unwrap_or(false), - }) - .unwrap_or_default(), - ) - .await?; + + let options = fs::RenameOptions { + overwrite: op + .options + .as_ref() + .and_then(|options| options.overwrite) + .unwrap_or(false), + ignore_if_exists: op + .options + .as_ref() + .and_then(|options| options.ignore_if_exists) + .unwrap_or(false), + create_parents: true, + }; + + fs.rename(&source_abs_path, &target_abs_path, options) + .await?; } lsp::DocumentChangeOperation::Op(lsp::ResourceOp::Delete(op)) => { diff --git a/crates/worktree/src/worktree_tests.rs b/crates/worktree/src/worktree_tests.rs index e8d98b3508bd14f7ea8baaf1b985b42293eb078d..50e2c6acae0013a75e346ba754f9c9f861196b58 100644 --- a/crates/worktree/src/worktree_tests.rs +++ b/crates/worktree/src/worktree_tests.rs @@ -379,6 +379,7 @@ async fn test_renaming_case_only(cx: &mut TestAppContext) { fs::RenameOptions { overwrite: true, ignore_if_exists: true, + create_parents: false, }, ) .await @@ -1986,6 +1987,7 @@ async fn randomly_mutate_fs( fs::RenameOptions { overwrite: true, ignore_if_exists: true, + create_parents: false, }, ) .await From f89e5308e38c2c8e62f07dc0d641e61079569767 Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Wed, 26 Nov 2025 17:15:05 -0300 Subject: [PATCH 0411/1030] edit prediction: Report early-rejected predictions and fix cancel bug (#43585) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Many prediction requests end up being rejected early without ever being set as the current prediction. Before this change, those cases weren’t reported as rejections because the `request_prediction_with_*` functions simply returned `Ok(None)`. With this update, whenever we get a successful response from the provider, we will return at least the `id`, allowing it to be properly reported. The request now also includes a “reject reason,” since the different variants carry distinct implications for prediction quality. All of these scenarios are now covered by tests. While adding them, I also found and fixed a bug where some cancelled predictions were incorrectly being set as the current one. Release Notes: - N/A --------- Co-authored-by: MrSubidubi --- .../cloud_llm_client/src/cloud_llm_client.rs | 21 +- crates/zeta/src/prediction.rs | 89 +- crates/zeta/src/provider.rs | 13 +- crates/zeta/src/sweep_ai.rs | 10 +- crates/zeta/src/zeta.rs | 868 +++++++++++++++--- crates/zeta/src/zeta1.rs | 13 +- crates/zeta/src/zeta_tests.rs | 2 +- crates/zeta_cli/src/predict.rs | 5 +- 8 files changed, 842 insertions(+), 179 deletions(-) diff --git a/crates/cloud_llm_client/src/cloud_llm_client.rs b/crates/cloud_llm_client/src/cloud_llm_client.rs index 241e760887cdf0c4455f6769c79a813de0626028..15b5a4eda4f8473f48cc66d255598cc6c1d09f08 100644 --- a/crates/cloud_llm_client/src/cloud_llm_client.rs +++ b/crates/cloud_llm_client/src/cloud_llm_client.rs @@ -200,12 +200,31 @@ pub struct RejectEditPredictionsBody { pub rejections: Vec, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct EditPredictionRejection { pub request_id: String, + #[serde(default)] + pub reason: EditPredictionRejectReason, pub was_shown: bool, } +#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize, PartialEq)] +pub enum EditPredictionRejectReason { + /// New requests were triggered before this one completed + Canceled, + /// No edits returned + Empty, + /// Edits returned, but none remained after interpolation + InterpolatedEmpty, + /// The new prediction was preferred over the current one + Replaced, + /// The current prediction was preferred over the new one + CurrentPreferred, + /// The current prediction was discarded + #[default] + Discarded, +} + #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] pub enum CompletionMode { diff --git a/crates/zeta/src/prediction.rs b/crates/zeta/src/prediction.rs index 0125e739f335fc133cbff84dcd8b4c4bac3e6e7b..fd3241730030fe8bdd95e2cae9ee87b406ade735 100644 --- a/crates/zeta/src/prediction.rs +++ b/crates/zeta/src/prediction.rs @@ -5,6 +5,7 @@ use std::{ time::{Duration, Instant}, }; +use cloud_llm_client::EditPredictionRejectReason; use gpui::{AsyncApp, Entity, SharedString}; use language::{Anchor, Buffer, BufferSnapshot, EditPreview, OffsetRangeExt, TextBufferSnapshot}; use serde::Serialize; @@ -24,28 +25,13 @@ impl std::fmt::Display for EditPredictionId { } } -#[derive(Clone)] -pub struct EditPrediction { +/// A prediction response that was returned from the provider, whether it was ultimately valid or not. +pub struct EditPredictionResult { pub id: EditPredictionId, - pub edits: Arc<[(Range, Arc)]>, - pub snapshot: BufferSnapshot, - pub edit_preview: EditPreview, - // We keep a reference to the buffer so that we do not need to reload it from disk when applying the prediction. - pub buffer: Entity, - pub buffer_snapshotted_at: Instant, - pub response_received_at: Instant, - pub inputs: EditPredictionInputs, -} - -#[derive(Debug, Clone, Serialize)] -pub struct EditPredictionInputs { - pub events: Vec>, - pub included_files: Vec, - pub cursor_point: cloud_llm_client::predict_edits_v3::Point, - pub cursor_path: Arc, + pub prediction: Result, } -impl EditPrediction { +impl EditPredictionResult { pub async fn new( id: EditPredictionId, edited_buffer: &Entity, @@ -55,8 +41,15 @@ impl EditPrediction { response_received_at: Instant, inputs: EditPredictionInputs, cx: &mut AsyncApp, - ) -> Option { - let (edits, snapshot, edit_preview_task) = edited_buffer + ) -> Self { + if edits.is_empty() { + return Self { + id, + prediction: Err(EditPredictionRejectReason::Empty), + }; + } + + let Some((edits, snapshot, edit_preview_task)) = edited_buffer .read_with(cx, |buffer, cx| { let new_snapshot = buffer.snapshot(); let edits: Arc<[_]> = @@ -64,22 +57,54 @@ impl EditPrediction { Some((edits.clone(), new_snapshot, buffer.preview_edits(edits, cx))) }) - .ok()??; + .ok() + .flatten() + else { + return Self { + id, + prediction: Err(EditPredictionRejectReason::InterpolatedEmpty), + }; + }; let edit_preview = edit_preview_task.await; - Some(EditPrediction { - id, - edits, - snapshot, - edit_preview, - inputs, - buffer: edited_buffer.clone(), - buffer_snapshotted_at, - response_received_at, - }) + Self { + id: id.clone(), + prediction: Ok(EditPrediction { + id, + edits, + snapshot, + edit_preview, + inputs, + buffer: edited_buffer.clone(), + buffer_snapshotted_at, + response_received_at, + }), + } } +} +#[derive(Clone)] +pub struct EditPrediction { + pub id: EditPredictionId, + pub edits: Arc<[(Range, Arc)]>, + pub snapshot: BufferSnapshot, + pub edit_preview: EditPreview, + pub buffer: Entity, + pub buffer_snapshotted_at: Instant, + pub response_received_at: Instant, + pub inputs: EditPredictionInputs, +} + +#[derive(Debug, Clone, Serialize)] +pub struct EditPredictionInputs { + pub events: Vec>, + pub included_files: Vec, + pub cursor_point: cloud_llm_client::predict_edits_v3::Point, + pub cursor_path: Arc, +} + +impl EditPrediction { pub fn interpolate( &self, new_snapshot: &TextBufferSnapshot, diff --git a/crates/zeta/src/provider.rs b/crates/zeta/src/provider.rs index 76c950714afa808ea04cf5fead89979374f2b99b..b91df0963386543fbd1e8645e5893a35fe202cc5 100644 --- a/crates/zeta/src/provider.rs +++ b/crates/zeta/src/provider.rs @@ -1,6 +1,7 @@ use std::{cmp, sync::Arc, time::Duration}; use client::{Client, UserStore}; +use cloud_llm_client::EditPredictionRejectReason; use edit_prediction::{DataCollectionState, Direction, EditPredictionProvider}; use gpui::{App, Entity, prelude::*}; use language::ToPoint as _; @@ -132,7 +133,11 @@ impl EditPredictionProvider for ZetaEditPredictionProvider { fn discard(&mut self, cx: &mut Context) { self.zeta.update(cx, |zeta, cx| { - zeta.discard_current_prediction(&self.project, cx); + zeta.reject_current_prediction( + EditPredictionRejectReason::Discarded, + &self.project, + cx, + ); }); } @@ -169,7 +174,11 @@ impl EditPredictionProvider for ZetaEditPredictionProvider { let Some(edits) = prediction.interpolate(&snapshot) else { self.zeta.update(cx, |zeta, cx| { - zeta.discard_current_prediction(&self.project, cx); + zeta.reject_current_prediction( + EditPredictionRejectReason::InterpolatedEmpty, + &self.project, + cx, + ); }); return None; }; diff --git a/crates/zeta/src/sweep_ai.rs b/crates/zeta/src/sweep_ai.rs index c88dda2ae2fd11dd37965e58560df9e98528c9d9..f40e9711f231523174a2d2edbd9fe1adb14ad498 100644 --- a/crates/zeta/src/sweep_ai.rs +++ b/crates/zeta/src/sweep_ai.rs @@ -18,7 +18,7 @@ use std::{ time::Instant, }; -use crate::{EditPrediction, EditPredictionId, EditPredictionInputs}; +use crate::{EditPredictionId, EditPredictionInputs, prediction::EditPredictionResult}; const SWEEP_API_URL: &str = "https://autocomplete.sweep.dev/backend/next_edit_autocomplete"; @@ -45,7 +45,7 @@ impl SweepAi { recent_paths: &VecDeque, diagnostic_search_range: Range, cx: &mut App, - ) -> Task>> { + ) -> Task>> { let debug_info = self.debug_info.clone(); let Some(api_token) = self.api_token.clone() else { return Task::ready(Ok(None)); @@ -242,8 +242,8 @@ impl SweepAi { cx.spawn(async move |cx| { let (id, edits, old_snapshot, response_received_at, inputs) = result.await?; - anyhow::Ok( - EditPrediction::new( + anyhow::Ok(Some( + EditPredictionResult::new( EditPredictionId(id.into()), &buffer, &old_snapshot, @@ -254,7 +254,7 @@ impl SweepAi { cx, ) .await, - ) + )) }) } } diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index 26a2388a96e4a828fc4c7bd6fe5d3dbb57bfc911..5cf0191e2f8180ea7bcfbef07c046372d2ee22c9 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -3,9 +3,9 @@ use arrayvec::ArrayVec; use client::{Client, EditPredictionUsage, UserStore}; use cloud_llm_client::predict_edits_v3::{self, Event, PromptFormat, Signature}; use cloud_llm_client::{ - AcceptEditPredictionBody, EXPIRED_LLM_TOKEN_HEADER_NAME, EditPredictionRejection, - MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST, MINIMUM_REQUIRED_VERSION_HEADER_NAME, - RejectEditPredictionsBody, ZED_VERSION_HEADER_NAME, + AcceptEditPredictionBody, EXPIRED_LLM_TOKEN_HEADER_NAME, EditPredictionRejectReason, + EditPredictionRejection, MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST, + MINIMUM_REQUIRED_VERSION_HEADER_NAME, RejectEditPredictionsBody, ZED_VERSION_HEADER_NAME, }; use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery}; use cloud_zeta2_prompt::{CURSOR_MARKER, DEFAULT_MAX_PROMPT_BYTES}; @@ -74,6 +74,7 @@ use crate::onboarding_modal::ZedPredictModal; pub use crate::prediction::EditPrediction; pub use crate::prediction::EditPredictionId; pub use crate::prediction::EditPredictionInputs; +use crate::prediction::EditPredictionResult; use crate::rate_prediction_modal::{ NextEdit, PreviousEdit, RatePredictionsModal, ThumbsDownActivePrediction, ThumbsUpActivePrediction, @@ -310,6 +311,31 @@ impl ZetaProject { ) .collect() } + + fn cancel_pending_prediction( + &mut self, + pending_prediction: PendingPrediction, + cx: &mut Context, + ) { + self.cancelled_predictions.insert(pending_prediction.id); + + cx.spawn(async move |this, cx| { + let Some(prediction_id) = pending_prediction.task.await else { + return; + }; + + this.update(cx, |this, cx| { + this.reject_prediction( + prediction_id, + EditPredictionRejectReason::Canceled, + false, + cx, + ); + }) + .ok(); + }) + .detach() + } } #[derive(Debug, Clone)] @@ -373,6 +399,7 @@ impl PredictionRequestedBy { } } +#[derive(Debug)] struct PendingPrediction { id: usize, task: Task>, @@ -385,6 +412,18 @@ enum BufferEditPrediction<'a> { Jump { prediction: &'a EditPrediction }, } +#[cfg(test)] +impl std::ops::Deref for BufferEditPrediction<'_> { + type Target = EditPrediction; + + fn deref(&self) -> &Self::Target { + match self { + BufferEditPrediction::Local { prediction } => prediction, + BufferEditPrediction::Jump { prediction } => prediction, + } + } +} + struct RegisteredBuffer { snapshot: BufferSnapshot, _subscriptions: [gpui::Subscription; 2], @@ -467,7 +506,7 @@ impl Zeta { let (reject_tx, mut reject_rx) = mpsc::unbounded(); cx.spawn(async move |this, cx| { while let Some(()) = reject_rx.next().await { - this.update(cx, |this, cx| this.reject_edit_predictions(cx))? + this.update(cx, |this, cx| this.flush_rejected_predictions(cx))? .await .log_err(); } @@ -818,7 +857,7 @@ impl Zeta { }; let request_id = prediction.prediction.id.to_string(); for pending_prediction in mem::take(&mut project_state.pending_predictions) { - self.cancel_pending_prediction(pending_prediction, cx); + project_state.cancel_pending_prediction(pending_prediction, cx); } let client = self.client.clone(); @@ -856,7 +895,7 @@ impl Zeta { .detach_and_log_err(cx); } - fn reject_edit_predictions(&mut self, cx: &mut Context) -> Task> { + fn flush_rejected_predictions(&mut self, cx: &mut Context) -> Task> { match self.edit_prediction_model { ZetaEditPredictionModel::Zeta1 | ZetaEditPredictionModel::Zeta2 => {} ZetaEditPredictionModel::Sweep => return Task::ready(anyhow::Ok(())), @@ -904,11 +943,16 @@ impl Zeta { }) } - fn discard_current_prediction(&mut self, project: &Entity, cx: &mut Context) { + fn reject_current_prediction( + &mut self, + reason: EditPredictionRejectReason, + project: &Entity, + cx: &mut Context, + ) { if let Some(project_state) = self.projects.get_mut(&project.entity_id()) { project_state.pending_predictions.clear(); if let Some(prediction) = project_state.current_prediction.take() { - self.discard_prediction(prediction.prediction.id, prediction.was_shown, cx); + self.reject_prediction(prediction.prediction.id, reason, prediction.was_shown, cx); } }; } @@ -929,14 +973,16 @@ impl Zeta { } } - fn discard_prediction( + fn reject_prediction( &mut self, prediction_id: EditPredictionId, + reason: EditPredictionRejectReason, was_shown: bool, cx: &mut Context, ) { self.rejected_predictions.push(EditPredictionRejection { request_id: prediction_id.to_string(), + reason, was_shown, }); @@ -944,34 +990,16 @@ impl Zeta { self.rejected_predictions.len() >= MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST; let reject_tx = self.reject_predictions_tx.clone(); self.reject_predictions_debounce_task = Some(cx.spawn(async move |_this, cx| { - const DISCARD_COMPLETIONS_DEBOUNCE: Duration = Duration::from_secs(15); + const REJECT_REQUEST_DEBOUNCE: Duration = Duration::from_secs(15); if !reached_request_limit { cx.background_executor() - .timer(DISCARD_COMPLETIONS_DEBOUNCE) + .timer(REJECT_REQUEST_DEBOUNCE) .await; } reject_tx.unbounded_send(()).log_err(); })); } - fn cancel_pending_prediction( - &self, - pending_prediction: PendingPrediction, - cx: &mut Context, - ) { - cx.spawn(async move |this, cx| { - let Some(prediction_id) = pending_prediction.task.await else { - return; - }; - - this.update(cx, |this, cx| { - this.discard_prediction(prediction_id, false, cx); - }) - .ok(); - }) - .detach() - } - fn is_refreshing(&self, project: &Entity) -> bool { self.projects .get(&project.entity_id()) @@ -995,38 +1023,15 @@ impl Zeta { return Task::ready(anyhow::Ok(None)); }; - let project = project.clone(); - cx.spawn(async move |cx| { - if let Some(prediction) = request_task.await? { - let id = prediction.id.clone(); - this.update(cx, |this, cx| { - let project_state = this - .projects - .get_mut(&project.entity_id()) - .context("Project not found")?; - - let new_prediction = CurrentEditPrediction { - requested_by: PredictionRequestedBy::Buffer(buffer.entity_id()), - prediction: prediction, - was_shown: false, - }; - - if project_state - .current_prediction - .as_ref() - .is_none_or(|old_prediction| { - new_prediction.should_replace_prediction(&old_prediction, cx) - }) - { - project_state.current_prediction = Some(new_prediction); - cx.notify(); - } - anyhow::Ok(()) - })??; - Ok(Some(id)) - } else { - Ok(None) - } + cx.spawn(async move |_cx| { + request_task.await.map(|prediction_result| { + prediction_result.map(|prediction_result| { + ( + prediction_result, + PredictionRequestedBy::Buffer(buffer.entity_id()), + ) + }) + }) }) }) } @@ -1076,7 +1081,7 @@ impl Zeta { return anyhow::Ok(None); }; - let Some(prediction) = this + let Some(prediction_result) = this .update(cx, |this, cx| { this.request_prediction(&project, &jump_buffer, jump_position, cx) })? @@ -1085,21 +1090,23 @@ impl Zeta { return anyhow::Ok(None); }; - let id = prediction.id.clone(); this.update(cx, |this, cx| { - if let Some(zeta_project) = this.projects.get_mut(&project.entity_id()) { - zeta_project.current_prediction.get_or_insert_with(|| { - cx.notify(); - CurrentEditPrediction { - requested_by: PredictionRequestedBy::DiagnosticsUpdate, - prediction, - was_shown: false, + Some(( + if this + .get_or_init_zeta_project(&project, cx) + .current_prediction + .is_none() + { + prediction_result + } else { + EditPredictionResult { + id: prediction_result.id, + prediction: Err(EditPredictionRejectReason::CurrentPreferred), } - }); - } - })?; - - anyhow::Ok(Some(id)) + }, + PredictionRequestedBy::DiagnosticsUpdate, + )) + }) }) }); } @@ -1117,7 +1124,8 @@ impl Zeta { do_refresh: impl FnOnce( WeakEntity, &mut AsyncApp, - ) -> Task>> + ) + -> Task>> + 'static, ) { let zeta_project = self.get_or_init_zeta_project(&project, cx); @@ -1152,22 +1160,77 @@ impl Zeta { return None; } - let edit_prediction_id = do_refresh(this.clone(), cx).await.log_err().flatten(); + let new_prediction_result = do_refresh(this.clone(), cx).await.log_err().flatten(); + let new_prediction_id = new_prediction_result + .as_ref() + .map(|(prediction, _)| prediction.id.clone()); // When a prediction completes, remove it from the pending list, and cancel // any pending predictions that were enqueued before it. this.update(cx, |this, cx| { let zeta_project = this.get_or_init_zeta_project(&project, cx); - zeta_project + + let is_cancelled = zeta_project .cancelled_predictions .remove(&pending_prediction_id); + let new_current_prediction = if !is_cancelled + && let Some((prediction_result, requested_by)) = new_prediction_result + { + match prediction_result.prediction { + Ok(prediction) => { + let new_prediction = CurrentEditPrediction { + requested_by, + prediction, + was_shown: false, + }; + + if let Some(current_prediction) = + zeta_project.current_prediction.as_ref() + { + if new_prediction.should_replace_prediction(¤t_prediction, cx) + { + this.reject_current_prediction( + EditPredictionRejectReason::Replaced, + &project, + cx, + ); + + Some(new_prediction) + } else { + this.reject_prediction( + new_prediction.prediction.id, + EditPredictionRejectReason::CurrentPreferred, + false, + cx, + ); + None + } + } else { + Some(new_prediction) + } + } + Err(reject_reason) => { + this.reject_prediction(prediction_result.id, reject_reason, false, cx); + None + } + } + } else { + None + }; + + let zeta_project = this.get_or_init_zeta_project(&project, cx); + + if let Some(new_prediction) = new_current_prediction { + zeta_project.current_prediction = Some(new_prediction); + } + let mut pending_predictions = mem::take(&mut zeta_project.pending_predictions); for (ix, pending_prediction) in pending_predictions.iter().enumerate() { if pending_prediction.id == pending_prediction_id { pending_predictions.remove(ix); for pending_prediction in pending_predictions.drain(0..ix) { - this.cancel_pending_prediction(pending_prediction, cx) + zeta_project.cancel_pending_prediction(pending_prediction, cx) } break; } @@ -1178,7 +1241,7 @@ impl Zeta { }) .ok(); - edit_prediction_id + new_prediction_id }); if zeta_project.pending_predictions.len() <= 1 { @@ -1192,10 +1255,7 @@ impl Zeta { id: pending_prediction_id, task, }); - zeta_project - .cancelled_predictions - .insert(pending_prediction.id); - self.cancel_pending_prediction(pending_prediction, cx); + zeta_project.cancel_pending_prediction(pending_prediction, cx); } } @@ -1205,7 +1265,7 @@ impl Zeta { active_buffer: &Entity, position: language::Anchor, cx: &mut Context, - ) -> Task>> { + ) -> Task>> { self.request_prediction_internal( project.clone(), active_buffer.clone(), @@ -1222,7 +1282,7 @@ impl Zeta { position: language::Anchor, allow_jump: bool, cx: &mut Context, - ) -> Task>> { + ) -> Task>> { const DIAGNOSTIC_LINES_RANGE: u32 = 20; self.get_or_init_zeta_project(&project, cx); @@ -1268,9 +1328,7 @@ impl Zeta { }; cx.spawn(async move |this, cx| { - let prediction = task - .await? - .filter(|prediction| !prediction.edits.is_empty()); + let prediction = task.await?; if prediction.is_none() && allow_jump { let cursor_point = position.to_point(&snapshot); @@ -1392,7 +1450,7 @@ impl Zeta { position: language::Anchor, events: Vec>, cx: &mut Context, - ) -> Task>> { + ) -> Task>> { let project_state = self.projects.get(&project.entity_id()); let index_state = project_state.and_then(|state| { @@ -1689,7 +1747,7 @@ impl Zeta { let (res, usage) = response?; let request_id = EditPredictionId(res.id.clone().into()); let Some(mut output_text) = text_from_response(res) else { - return Ok((None, usage)); + return Ok((Some((request_id, None)), usage)); }; if output_text.contains(CURSOR_MARKER) { @@ -1747,11 +1805,13 @@ impl Zeta { anyhow::Ok(( Some(( request_id, - inputs, - edited_buffer, - edited_buffer_snapshot.clone(), - edits, - received_response_at, + Some(( + inputs, + edited_buffer, + edited_buffer_snapshot.clone(), + edits, + received_response_at, + )), )), usage, )) @@ -1760,30 +1820,40 @@ impl Zeta { cx.spawn({ async move |this, cx| { + let Some((id, prediction)) = + Self::handle_api_response(&this, request_task.await, cx)? + else { + return Ok(None); + }; + let Some(( - id, inputs, edited_buffer, edited_buffer_snapshot, edits, received_response_at, - )) = Self::handle_api_response(&this, request_task.await, cx)? + )) = prediction else { - return Ok(None); + return Ok(Some(EditPredictionResult { + id, + prediction: Err(EditPredictionRejectReason::Empty), + })); }; // TODO telemetry: duration, etc - Ok(EditPrediction::new( - id, - &edited_buffer, - &edited_buffer_snapshot, - edits.into(), - buffer_snapshotted_at, - received_response_at, - inputs, - cx, - ) - .await) + Ok(Some( + EditPredictionResult::new( + id, + &edited_buffer, + &edited_buffer_snapshot, + edits.into(), + buffer_snapshotted_at, + received_response_at, + inputs, + cx, + ) + .await, + )) } }) } @@ -2806,6 +2876,9 @@ mod tests { use client::UserStore; use clock::FakeSystemClock; + use cloud_llm_client::{ + EditPredictionRejectReason, EditPredictionRejection, RejectEditPredictionsBody, + }; use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery}; use futures::{ AsyncReadExt, StreamExt, @@ -2830,7 +2903,7 @@ mod tests { #[gpui::test] async fn test_current_state(cx: &mut TestAppContext) { - let (zeta, mut req_rx) = init_test(cx); + let (zeta, mut requests) = init_test(cx); let fs = FakeFs::new(cx.executor()); fs.insert_tree( "/root", @@ -2861,7 +2934,7 @@ mod tests { zeta.update(cx, |zeta, cx| { zeta.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx) }); - let (_request, respond_tx) = req_rx.next().await.unwrap(); + let (_request, respond_tx) = requests.predict.next().await.unwrap(); respond_tx .send(model_response(indoc! {r" @@ -2888,7 +2961,7 @@ mod tests { let refresh_task = zeta.update(cx, |zeta, cx| { zeta.refresh_context(project.clone(), buffer1.clone(), position, cx) }); - let (_request, respond_tx) = req_rx.next().await.unwrap(); + let (_request, respond_tx) = requests.predict.next().await.unwrap(); respond_tx .send(open_ai::Response { id: Uuid::new_v4().to_string(), @@ -2929,14 +3002,14 @@ mod tests { refresh_task.await.unwrap(); zeta.update(cx, |zeta, cx| { - zeta.discard_current_prediction(&project, cx); + zeta.reject_current_prediction(EditPredictionRejectReason::Discarded, &project, cx); }); // Prediction for another file zeta.update(cx, |zeta, cx| { zeta.refresh_prediction_from_buffer(project.clone(), buffer1.clone(), position, cx) }); - let (_request, respond_tx) = req_rx.next().await.unwrap(); + let (_request, respond_tx) = requests.predict.next().await.unwrap(); respond_tx .send(model_response(indoc! {r#" --- a/root/2.txt @@ -2977,7 +3050,7 @@ mod tests { #[gpui::test] async fn test_simple_request(cx: &mut TestAppContext) { - let (zeta, mut req_rx) = init_test(cx); + let (zeta, mut requests) = init_test(cx); let fs = FakeFs::new(cx.executor()); fs.insert_tree( "/root", @@ -3002,7 +3075,7 @@ mod tests { zeta.request_prediction(&project, &buffer, position, cx) }); - let (_, respond_tx) = req_rx.next().await.unwrap(); + let (_, respond_tx) = requests.predict.next().await.unwrap(); // TODO Put back when we have a structured request again // assert_eq!( @@ -3029,7 +3102,7 @@ mod tests { "})) .unwrap(); - let prediction = prediction_task.await.unwrap().unwrap(); + let prediction = prediction_task.await.unwrap().unwrap().prediction.unwrap(); assert_eq!(prediction.edits.len(), 1); assert_eq!( @@ -3041,7 +3114,7 @@ mod tests { #[gpui::test] async fn test_request_events(cx: &mut TestAppContext) { - let (zeta, mut req_rx) = init_test(cx); + let (zeta, mut requests) = init_test(cx); let fs = FakeFs::new(cx.executor()); fs.insert_tree( "/root", @@ -3075,7 +3148,7 @@ mod tests { zeta.request_prediction(&project, &buffer, position, cx) }); - let (request, respond_tx) = req_rx.next().await.unwrap(); + let (request, respond_tx) = requests.predict.next().await.unwrap(); let prompt = prompt_from_request(&request); assert!( @@ -3103,7 +3176,7 @@ mod tests { "#})) .unwrap(); - let prediction = prediction_task.await.unwrap().unwrap(); + let prediction = prediction_task.await.unwrap().unwrap().prediction.unwrap(); assert_eq!(prediction.edits.len(), 1); assert_eq!( @@ -3113,6 +3186,522 @@ mod tests { assert_eq!(prediction.edits[0].1.as_ref(), " are you?"); } + #[gpui::test] + async fn test_empty_prediction(cx: &mut TestAppContext) { + let (zeta, mut requests) = init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "foo.md": "Hello!\nHow\nBye\n" + }), + ) + .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; + + let buffer = project + .update(cx, |project, cx| { + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) + }) + .await + .unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); + + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + const NO_OP_DIFF: &str = indoc! { r" + --- a/root/foo.md + +++ b/root/foo.md + @@ ... @@ + Hello! + -How + +How + Bye + "}; + + let (_, respond_tx) = requests.predict.next().await.unwrap(); + let response = model_response(NO_OP_DIFF); + let id = response.id.clone(); + respond_tx.send(response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + assert!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .is_none() + ); + }); + + // prediction is reported as rejected + let (reject_request, _) = requests.reject.next().await.unwrap(); + + assert_eq!( + &reject_request.rejections, + &[EditPredictionRejection { + request_id: id, + reason: EditPredictionRejectReason::Empty, + was_shown: false + }] + ); + } + + #[gpui::test] + async fn test_interpolated_empty(cx: &mut TestAppContext) { + let (zeta, mut requests) = init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "foo.md": "Hello!\nHow\nBye\n" + }), + ) + .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; + + let buffer = project + .update(cx, |project, cx| { + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) + }) + .await + .unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); + + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + let (_, respond_tx) = requests.predict.next().await.unwrap(); + + buffer.update(cx, |buffer, cx| { + buffer.set_text("Hello!\nHow are you?\nBye", cx); + }); + + let response = model_response(SIMPLE_DIFF); + let id = response.id.clone(); + respond_tx.send(response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + assert!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .is_none() + ); + }); + + // prediction is reported as rejected + let (reject_request, _) = requests.reject.next().await.unwrap(); + + assert_eq!( + &reject_request.rejections, + &[EditPredictionRejection { + request_id: id, + reason: EditPredictionRejectReason::InterpolatedEmpty, + was_shown: false + }] + ); + } + + const SIMPLE_DIFF: &str = indoc! { r" + --- a/root/foo.md + +++ b/root/foo.md + @@ ... @@ + Hello! + -How + +How are you? + Bye + "}; + + #[gpui::test] + async fn test_replace_current(cx: &mut TestAppContext) { + let (zeta, mut requests) = init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "foo.md": "Hello!\nHow\nBye\n" + }), + ) + .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; + + let buffer = project + .update(cx, |project, cx| { + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) + }) + .await + .unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); + + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + let (_, respond_tx) = requests.predict.next().await.unwrap(); + let first_response = model_response(SIMPLE_DIFF); + let first_id = first_response.id.clone(); + respond_tx.send(first_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + first_id + ); + }); + + // a second request is triggered + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + let (_, respond_tx) = requests.predict.next().await.unwrap(); + let second_response = model_response(SIMPLE_DIFF); + let second_id = second_response.id.clone(); + respond_tx.send(second_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + // second replaces first + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + second_id + ); + }); + + // first is reported as replaced + let (reject_request, _) = requests.reject.next().await.unwrap(); + + assert_eq!( + &reject_request.rejections, + &[EditPredictionRejection { + request_id: first_id, + reason: EditPredictionRejectReason::Replaced, + was_shown: false + }] + ); + } + + #[gpui::test] + async fn test_current_preferred(cx: &mut TestAppContext) { + let (zeta, mut requests) = init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "foo.md": "Hello!\nHow\nBye\n" + }), + ) + .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; + + let buffer = project + .update(cx, |project, cx| { + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) + }) + .await + .unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); + + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + let (_, respond_tx) = requests.predict.next().await.unwrap(); + let first_response = model_response(SIMPLE_DIFF); + let first_id = first_response.id.clone(); + respond_tx.send(first_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + first_id + ); + }); + + // a second request is triggered + zeta.update(cx, |zeta, cx| { + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + let (_, respond_tx) = requests.predict.next().await.unwrap(); + // worse than current prediction + let second_response = model_response(indoc! { r" + --- a/root/foo.md + +++ b/root/foo.md + @@ ... @@ + Hello! + -How + +How are + Bye + "}); + let second_id = second_response.id.clone(); + respond_tx.send(second_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + // first is preferred over second + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + first_id + ); + }); + + // second is reported as rejected + let (reject_request, _) = requests.reject.next().await.unwrap(); + + assert_eq!( + &reject_request.rejections, + &[EditPredictionRejection { + request_id: second_id, + reason: EditPredictionRejectReason::CurrentPreferred, + was_shown: false + }] + ); + } + + #[gpui::test] + async fn test_cancel_earlier_pending_requests(cx: &mut TestAppContext) { + let (zeta, mut requests) = init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "foo.md": "Hello!\nHow\nBye\n" + }), + ) + .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; + + let buffer = project + .update(cx, |project, cx| { + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) + }) + .await + .unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); + + zeta.update(cx, |zeta, cx| { + // start two refresh tasks + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + let (_, respond_first) = requests.predict.next().await.unwrap(); + let (_, respond_second) = requests.predict.next().await.unwrap(); + + // wait for throttle + cx.run_until_parked(); + + // second responds first + let second_response = model_response(SIMPLE_DIFF); + let second_id = second_response.id.clone(); + respond_second.send(second_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + // current prediction is second + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + second_id + ); + }); + + let first_response = model_response(SIMPLE_DIFF); + let first_id = first_response.id.clone(); + respond_first.send(first_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + // current prediction is still second, since first was cancelled + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + second_id + ); + }); + + // first is reported as rejected + let (reject_request, _) = requests.reject.next().await.unwrap(); + + cx.run_until_parked(); + + assert_eq!( + &reject_request.rejections, + &[EditPredictionRejection { + request_id: first_id, + reason: EditPredictionRejectReason::Canceled, + was_shown: false + }] + ); + } + + #[gpui::test] + async fn test_cancel_second_on_third_request(cx: &mut TestAppContext) { + let (zeta, mut requests) = init_test(cx); + let fs = FakeFs::new(cx.executor()); + fs.insert_tree( + "/root", + json!({ + "foo.md": "Hello!\nHow\nBye\n" + }), + ) + .await; + let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await; + + let buffer = project + .update(cx, |project, cx| { + let path = project.find_project_path(path!("root/foo.md"), cx).unwrap(); + project.open_buffer(path, cx) + }) + .await + .unwrap(); + let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot()); + let position = snapshot.anchor_before(language::Point::new(1, 3)); + + zeta.update(cx, |zeta, cx| { + // start two refresh tasks + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + }); + + // wait for throttle, so requests are sent + cx.run_until_parked(); + + let (_, respond_first) = requests.predict.next().await.unwrap(); + let (_, respond_second) = requests.predict.next().await.unwrap(); + + zeta.update(cx, |zeta, cx| { + // start a third request + zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx); + + // 2 are pending, so 2nd is cancelled + assert_eq!( + zeta.get_or_init_zeta_project(&project, cx) + .cancelled_predictions + .iter() + .copied() + .collect::>(), + [1] + ); + }); + + // wait for throttle + cx.run_until_parked(); + + let (_, respond_third) = requests.predict.next().await.unwrap(); + + let first_response = model_response(SIMPLE_DIFF); + let first_id = first_response.id.clone(); + respond_first.send(first_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + // current prediction is first + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + first_id + ); + }); + + let cancelled_response = model_response(SIMPLE_DIFF); + let cancelled_id = cancelled_response.id.clone(); + respond_second.send(cancelled_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + // current prediction is still first, since second was cancelled + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + first_id + ); + }); + + let third_response = model_response(SIMPLE_DIFF); + let third_response_id = third_response.id.clone(); + respond_third.send(third_response).unwrap(); + + cx.run_until_parked(); + + zeta.read_with(cx, |zeta, cx| { + // third completes and replaces first + assert_eq!( + zeta.current_prediction_for_buffer(&buffer, &project, cx) + .unwrap() + .id + .0, + third_response_id + ); + }); + + // second is reported as rejected + let (reject_request, _) = requests.reject.next().await.unwrap(); + + cx.run_until_parked(); + + assert_eq!( + &reject_request.rejections, + &[ + EditPredictionRejection { + request_id: cancelled_id, + reason: EditPredictionRejectReason::Canceled, + was_shown: false + }, + EditPredictionRejection { + request_id: first_id, + reason: EditPredictionRejectReason::Replaced, + was_shown: false + } + ] + ); + } + // Skipped until we start including diagnostics in prompt // #[gpui::test] // async fn test_request_diagnostics(cx: &mut TestAppContext) { @@ -3242,24 +3831,26 @@ mod tests { content } - fn init_test( - cx: &mut TestAppContext, - ) -> ( - Entity, - mpsc::UnboundedReceiver<(open_ai::Request, oneshot::Sender)>, - ) { + struct RequestChannels { + predict: mpsc::UnboundedReceiver<(open_ai::Request, oneshot::Sender)>, + reject: mpsc::UnboundedReceiver<(RejectEditPredictionsBody, oneshot::Sender<()>)>, + } + + fn init_test(cx: &mut TestAppContext) -> (Entity, RequestChannels) { cx.update(move |cx| { let settings_store = SettingsStore::test(cx); cx.set_global(settings_store); zlog::init_test(); - let (req_tx, req_rx) = mpsc::unbounded(); + let (predict_req_tx, predict_req_rx) = mpsc::unbounded(); + let (reject_req_tx, reject_req_rx) = mpsc::unbounded(); let http_client = FakeHttpClient::create({ move |req| { let uri = req.uri().path().to_string(); let mut body = req.into_body(); - let req_tx = req_tx.clone(); + let predict_req_tx = predict_req_tx.clone(); + let reject_req_tx = reject_req_tx.clone(); async move { let resp = match uri.as_str() { "/client/llm_tokens" => serde_json::to_string(&json!({ @@ -3272,7 +3863,16 @@ mod tests { let req = serde_json::from_slice(&buf).unwrap(); let (res_tx, res_rx) = oneshot::channel(); - req_tx.unbounded_send((req, res_tx)).unwrap(); + predict_req_tx.unbounded_send((req, res_tx)).unwrap(); + serde_json::to_string(&res_rx.await?).unwrap() + } + "/predict_edits/reject" => { + let mut buf = Vec::new(); + body.read_to_end(&mut buf).await.ok(); + let req = serde_json::from_slice(&buf).unwrap(); + + let (res_tx, res_rx) = oneshot::channel(); + reject_req_tx.unbounded_send((req, res_tx)).unwrap(); serde_json::to_string(&res_rx.await?).unwrap() } _ => { @@ -3293,7 +3893,13 @@ mod tests { let user_store = cx.new(|cx| UserStore::new(client.clone(), cx)); let zeta = Zeta::global(&client, &user_store, cx); - (zeta, req_rx) + ( + zeta, + RequestChannels { + predict: predict_req_rx, + reject: reject_req_rx, + }, + ) }) } } diff --git a/crates/zeta/src/zeta1.rs b/crates/zeta/src/zeta1.rs index 7f80d60d5efcbbd0bd7b9426508c344c063d5597..96d175d5eb11c2c8be40779cf77bfb743d39dff6 100644 --- a/crates/zeta/src/zeta1.rs +++ b/crates/zeta/src/zeta1.rs @@ -4,7 +4,7 @@ use std::{fmt::Write, ops::Range, path::Path, sync::Arc, time::Instant}; use crate::{ EditPredictionId, ZedUpdateRequiredError, Zeta, - prediction::{EditPrediction, EditPredictionInputs}, + prediction::{EditPredictionInputs, EditPredictionResult}, }; use anyhow::{Context as _, Result}; use cloud_llm_client::{ @@ -36,7 +36,7 @@ pub(crate) fn request_prediction_with_zeta1( position: language::Anchor, events: Vec>, cx: &mut Context, -) -> Task>> { +) -> Task>> { let buffer = buffer.clone(); let buffer_snapshotted_at = Instant::now(); let client = zeta.client.clone(); @@ -216,7 +216,7 @@ pub(crate) fn request_prediction_with_zeta1( ); } - edit_prediction + edit_prediction.map(Some) }) } @@ -229,7 +229,7 @@ fn process_completion_response( buffer_snapshotted_at: Instant, received_response_at: Instant, cx: &AsyncApp, -) -> Task>> { +) -> Task> { let snapshot = snapshot.clone(); let request_id = prediction_response.request_id; let output_excerpt = prediction_response.output_excerpt; @@ -246,8 +246,9 @@ fn process_completion_response( .await? .into(); - Ok(EditPrediction::new( - EditPredictionId(request_id.into()), + let id = EditPredictionId(request_id.into()); + Ok(EditPredictionResult::new( + id, &buffer, &snapshot, edits, diff --git a/crates/zeta/src/zeta_tests.rs b/crates/zeta/src/zeta_tests.rs index eb12f81af25d72b5e7003187ab0a9536622c9a74..9b7abb216f5e8e7a9c8bd14a33c2f6ecd9f16174 100644 --- a/crates/zeta/src/zeta_tests.rs +++ b/crates/zeta/src/zeta_tests.rs @@ -538,7 +538,7 @@ async fn run_edit_prediction( let prediction_task = zeta.update(cx, |zeta, cx| { zeta.request_prediction(&project, buffer, cursor, cx) }); - prediction_task.await.unwrap().unwrap() + prediction_task.await.unwrap().unwrap().prediction.unwrap() } async fn make_test_zeta( diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index 8a1a4131fb684a5186b2111f9d922fa34d6972e1..c2d68a471fa5de7765c1042473fc8118a3fc9415 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -235,7 +235,10 @@ pub async fn perform_predict( let mut result = Arc::into_inner(result).unwrap().into_inner().unwrap(); result.diff = prediction - .and_then(|prediction| prediction.edit_preview.as_unified_diff(&prediction.edits)) + .and_then(|prediction| { + let prediction = prediction.prediction.ok()?; + prediction.edit_preview.as_unified_diff(&prediction.edits) + }) .unwrap_or_default(); anyhow::Ok(result) From 36a3b41f53182c3b528d027de7270d3ea204d0ee Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Wed, 26 Nov 2025 17:34:29 -0300 Subject: [PATCH 0412/1030] edit prediction: Request trigger (#43588) Adds a `trigger` field to the zeta1/zeta2 prediction requests so that we can distinguish between editor, diagnostic, and zeta-cli requests. Release Notes: - N/A --- .../cloud_llm_client/src/cloud_llm_client.rs | 11 ++++++ .../cloud_llm_client/src/predict_edits_v3.rs | 4 ++- crates/zeta/src/zeta.rs | 35 ++++++++++++++++--- crates/zeta/src/zeta1.rs | 7 +++- crates/zeta/src/zeta_tests.rs | 2 +- crates/zeta_cli/src/main.rs | 1 + crates/zeta_cli/src/predict.rs | 8 ++++- 7 files changed, 59 insertions(+), 9 deletions(-) diff --git a/crates/cloud_llm_client/src/cloud_llm_client.rs b/crates/cloud_llm_client/src/cloud_llm_client.rs index 15b5a4eda4f8473f48cc66d255598cc6c1d09f08..35916bd6801485c8c2bfde9330a47da19025f2c3 100644 --- a/crates/cloud_llm_client/src/cloud_llm_client.rs +++ b/crates/cloud_llm_client/src/cloud_llm_client.rs @@ -169,6 +169,17 @@ pub struct PredictEditsBody { /// Info about the git repository state, only present when can_collect_data is true. #[serde(skip_serializing_if = "Option::is_none", default)] pub git_info: Option, + /// The trigger for this request. + #[serde(default)] + pub trigger: PredictEditsRequestTrigger, +} + +#[derive(Default, Debug, Clone, Copy, Serialize, Deserialize)] +pub enum PredictEditsRequestTrigger { + Diagnostics, + Cli, + #[default] + Other, } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/crates/cloud_llm_client/src/predict_edits_v3.rs b/crates/cloud_llm_client/src/predict_edits_v3.rs index 47e5e71589c806f71725ee4f218ca4a86bee62d0..de8d69dc14870c5583679753c9a75a477e0cc759 100644 --- a/crates/cloud_llm_client/src/predict_edits_v3.rs +++ b/crates/cloud_llm_client/src/predict_edits_v3.rs @@ -9,7 +9,7 @@ use std::{ use strum::EnumIter; use uuid::Uuid; -use crate::PredictEditsGitInfo; +use crate::{PredictEditsGitInfo, PredictEditsRequestTrigger}; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct PlanContextRetrievalRequest { @@ -53,6 +53,8 @@ pub struct PredictEditsRequest { pub prompt_max_bytes: Option, #[serde(default)] pub prompt_format: PromptFormat, + #[serde(default)] + pub trigger: PredictEditsRequestTrigger, } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/crates/zeta/src/zeta.rs b/crates/zeta/src/zeta.rs index 5cf0191e2f8180ea7bcfbef07c046372d2ee22c9..8fda34133343e465b1b56835b116770b856cfe36 100644 --- a/crates/zeta/src/zeta.rs +++ b/crates/zeta/src/zeta.rs @@ -5,7 +5,8 @@ use cloud_llm_client::predict_edits_v3::{self, Event, PromptFormat, Signature}; use cloud_llm_client::{ AcceptEditPredictionBody, EXPIRED_LLM_TOKEN_HEADER_NAME, EditPredictionRejectReason, EditPredictionRejection, MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST, - MINIMUM_REQUIRED_VERSION_HEADER_NAME, RejectEditPredictionsBody, ZED_VERSION_HEADER_NAME, + MINIMUM_REQUIRED_VERSION_HEADER_NAME, PredictEditsRequestTrigger, RejectEditPredictionsBody, + ZED_VERSION_HEADER_NAME, }; use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery}; use cloud_zeta2_prompt::{CURSOR_MARKER, DEFAULT_MAX_PROMPT_BYTES}; @@ -1016,7 +1017,13 @@ impl Zeta { self.queue_prediction_refresh(project.clone(), buffer.entity_id(), cx, move |this, cx| { let Some(request_task) = this .update(cx, |this, cx| { - this.request_prediction(&project, &buffer, position, cx) + this.request_prediction( + &project, + &buffer, + position, + PredictEditsRequestTrigger::Other, + cx, + ) }) .log_err() else { @@ -1083,7 +1090,13 @@ impl Zeta { let Some(prediction_result) = this .update(cx, |this, cx| { - this.request_prediction(&project, &jump_buffer, jump_position, cx) + this.request_prediction( + &project, + &jump_buffer, + jump_position, + PredictEditsRequestTrigger::Diagnostics, + cx, + ) })? .await? else { @@ -1264,12 +1277,14 @@ impl Zeta { project: &Entity, active_buffer: &Entity, position: language::Anchor, + trigger: PredictEditsRequestTrigger, cx: &mut Context, ) -> Task>> { self.request_prediction_internal( project.clone(), active_buffer.clone(), position, + trigger, cx.has_flag::(), cx, ) @@ -1280,6 +1295,7 @@ impl Zeta { project: Entity, active_buffer: Entity, position: language::Anchor, + trigger: PredictEditsRequestTrigger, allow_jump: bool, cx: &mut Context, ) -> Task>> { @@ -1305,6 +1321,7 @@ impl Zeta { snapshot.clone(), position, events, + trigger, cx, ), ZetaEditPredictionModel::Zeta2 => self.request_prediction_with_zeta2( @@ -1313,6 +1330,7 @@ impl Zeta { snapshot.clone(), position, events, + trigger, cx, ), ZetaEditPredictionModel::Sweep => self.sweep_ai.request_prediction_with_sweep( @@ -1349,6 +1367,7 @@ impl Zeta { project, jump_buffer, jump_position, + trigger, false, cx, ) @@ -1449,6 +1468,7 @@ impl Zeta { active_snapshot: BufferSnapshot, position: language::Anchor, events: Vec>, + trigger: PredictEditsRequestTrigger, cx: &mut Context, ) -> Task>> { let project_state = self.projects.get(&project.entity_id()); @@ -1621,6 +1641,7 @@ impl Zeta { signatures: vec![], excerpt_parent: None, git_info: None, + trigger, } } ContextMode::Syntax(context_options) => { @@ -1647,6 +1668,7 @@ impl Zeta { index_state.as_deref(), Some(options.max_prompt_bytes), options.prompt_format, + trigger, ) } }; @@ -2416,6 +2438,7 @@ impl Zeta { index_state.as_deref(), Some(options.max_prompt_bytes), options.prompt_format, + PredictEditsRequestTrigger::Other, ) }) }) @@ -2574,6 +2597,7 @@ fn make_syntax_context_cloud_request( index_state: Option<&SyntaxIndexState>, prompt_max_bytes: Option, prompt_format: PromptFormat, + trigger: PredictEditsRequestTrigger, ) -> predict_edits_v3::PredictEditsRequest { let mut signatures = Vec::new(); let mut declaration_to_signature_index = HashMap::default(); @@ -2653,6 +2677,7 @@ fn make_syntax_context_cloud_request( debug_info, prompt_max_bytes, prompt_format, + trigger, } } @@ -3072,7 +3097,7 @@ mod tests { let position = snapshot.anchor_before(language::Point::new(1, 3)); let prediction_task = zeta.update(cx, |zeta, cx| { - zeta.request_prediction(&project, &buffer, position, cx) + zeta.request_prediction(&project, &buffer, position, Default::default(), cx) }); let (_, respond_tx) = requests.predict.next().await.unwrap(); @@ -3145,7 +3170,7 @@ mod tests { let position = snapshot.anchor_before(language::Point::new(1, 3)); let prediction_task = zeta.update(cx, |zeta, cx| { - zeta.request_prediction(&project, &buffer, position, cx) + zeta.request_prediction(&project, &buffer, position, Default::default(), cx) }); let (request, respond_tx) = requests.predict.next().await.unwrap(); diff --git a/crates/zeta/src/zeta1.rs b/crates/zeta/src/zeta1.rs index 96d175d5eb11c2c8be40779cf77bfb743d39dff6..0be5fad301242c51c4ad58c60a6d2fcb3441ea08 100644 --- a/crates/zeta/src/zeta1.rs +++ b/crates/zeta/src/zeta1.rs @@ -8,7 +8,8 @@ use crate::{ }; use anyhow::{Context as _, Result}; use cloud_llm_client::{ - PredictEditsBody, PredictEditsGitInfo, PredictEditsResponse, predict_edits_v3::Event, + PredictEditsBody, PredictEditsGitInfo, PredictEditsRequestTrigger, PredictEditsResponse, + predict_edits_v3::Event, }; use gpui::{App, AppContext as _, AsyncApp, Context, Entity, SharedString, Task}; use input_excerpt::excerpt_for_cursor_position; @@ -35,6 +36,7 @@ pub(crate) fn request_prediction_with_zeta1( snapshot: BufferSnapshot, position: language::Anchor, events: Vec>, + trigger: PredictEditsRequestTrigger, cx: &mut Context, ) -> Task>> { let buffer = buffer.clone(); @@ -70,6 +72,7 @@ pub(crate) fn request_prediction_with_zeta1( &snapshot, cursor_point, prompt_for_events, + trigger, cx, ); @@ -402,6 +405,7 @@ pub fn gather_context( snapshot: &BufferSnapshot, cursor_point: language::Point, prompt_for_events: impl FnOnce() -> (String, usize) + Send + 'static, + trigger: PredictEditsRequestTrigger, cx: &App, ) -> Task> { cx.background_spawn({ @@ -425,6 +429,7 @@ pub fn gather_context( git_info: None, outline: None, speculated_output: None, + trigger, }; Ok(GatherContextOutput { diff --git a/crates/zeta/src/zeta_tests.rs b/crates/zeta/src/zeta_tests.rs index 9b7abb216f5e8e7a9c8bd14a33c2f6ecd9f16174..3549cda36d575a989f5bc4bd5bb8bea6810d3180 100644 --- a/crates/zeta/src/zeta_tests.rs +++ b/crates/zeta/src/zeta_tests.rs @@ -536,7 +536,7 @@ async fn run_edit_prediction( zeta.update(cx, |zeta, cx| zeta.register_buffer(buffer, &project, cx)); cx.background_executor.run_until_parked(); let prediction_task = zeta.update(cx, |zeta, cx| { - zeta.request_prediction(&project, buffer, cursor, cx) + zeta.request_prediction(&project, buffer, cursor, Default::default(), cx) }); prediction_task.await.unwrap().unwrap().prediction.unwrap() } diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index d13f0710cdc4d16666594d25dc639d337fb6bdfc..2d5a23e31f463455871494d123a4988b41b5bd66 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -454,6 +454,7 @@ async fn zeta1_context( &snapshot, clipped_cursor, prompt_for_events, + cloud_llm_client::PredictEditsRequestTrigger::Cli, cx, ) })? diff --git a/crates/zeta_cli/src/predict.rs b/crates/zeta_cli/src/predict.rs index c2d68a471fa5de7765c1042473fc8118a3fc9415..99fe65cfa3221a1deb18e767e8faa8e1a1fca0ac 100644 --- a/crates/zeta_cli/src/predict.rs +++ b/crates/zeta_cli/src/predict.rs @@ -226,7 +226,13 @@ pub async fn perform_predict( let prediction = zeta .update(cx, |zeta, cx| { - zeta.request_prediction(&project, &cursor_buffer, cursor_anchor, cx) + zeta.request_prediction( + &project, + &cursor_buffer, + cursor_anchor, + cloud_llm_client::PredictEditsRequestTrigger::Cli, + cx, + ) })? .await?; From ae649c66ed45cbb6336a4df35bf234671f088b2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ole=20J=C3=B8rgen=20Br=C3=B8nner?= Date: Wed, 26 Nov 2025 22:16:50 +0100 Subject: [PATCH 0413/1030] Make key repeat rate on Wayland more precise (2) (#43589) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CLOSES #39042 This is a reopening of #34985+. _Original descrioption_: In Wayland, the client implement key repeat themself. In Zed this is ultimately handled by the gpui crate by inserting a timer source into the event loop which repeat itself if the key is still held down [1]. But it seems the processing of the repeated key event happen synchronously inside the timer source handler, meaning the effective rate become slightly lower (since the repeated timer is scheduled using the 1/rate as delay). I measured the event processing time on my laptop and it's typically around 3ms, but sometimes spiking at 10ms. At low key repeat rates this is probably not _very_ noticeable. I see the default in Zed is set to a (measly) 16/s, but I assume most systems will use something closer to 25, which is a 40ms delay. So ~3ms is around 7.5% of the delay. At higher rate the discrepancy become worse of course. I can visible notice the spikes, and doing some crude stopwatch measurements using gedit as a reference I can reproduce around 5-10% slower rates in Zed. IMO this is significant enough to warrant improving, especially since some people can get quite used the repeat rate and might feel something being "off" in Zed. ~~The suggested fix simply subtract the processing time from the next delay timer.~~ [1] https://github.com/olejorgenb/zed/blob/32df726f3b7fa83e7399f6629c59e0a3f3fff125/crates/gpui/src/platform/linux/wayland/client.rs#L1355 Release Notes: - Improved Wayland (Linux) key repeat rate precision --- crates/gpui/src/platform/linux/wayland/client.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/crates/gpui/src/platform/linux/wayland/client.rs b/crates/gpui/src/platform/linux/wayland/client.rs index 9a9ec213edd27d9ab7ac2e1437f408ac7d78f08e..a2324648fbb332e75af7df74923806797d93a05a 100644 --- a/crates/gpui/src/platform/linux/wayland/client.rs +++ b/crates/gpui/src/platform/linux/wayland/client.rs @@ -1419,6 +1419,7 @@ impl Dispatch for WaylandClientStatePtr { state.repeat.current_keycode = Some(keycode); let rate = state.repeat.characters_per_second; + let repeat_interval = Duration::from_secs(1) / rate; let id = state.repeat.current_id; state .loop_handle @@ -1428,7 +1429,7 @@ impl Dispatch for WaylandClientStatePtr { is_held: true, prefer_character_input: false, }); - move |_event, _metadata, this| { + move |event_timestamp, _metadata, this| { let mut client = this.get_client(); let mut state = client.borrow_mut(); let is_repeating = id == state.repeat.current_id @@ -1445,7 +1446,8 @@ impl Dispatch for WaylandClientStatePtr { drop(state); focused_window.handle_input(input.clone()); - TimeoutAction::ToDuration(Duration::from_secs(1) / rate) + // If the new scheduled time is in the past the event will repeat as soon as possible + TimeoutAction::ToInstant(event_timestamp + repeat_interval) } }) .unwrap(); From c366627642fdf962d57e63c65f16d62d306beeb3 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Wed, 26 Nov 2025 22:42:03 +0100 Subject: [PATCH 0414/1030] auto-update: Fix auto-update loop with non-nightly channels (#43595) There are 3 factors: 1. The Preview channel endpoint does not propagate versions with build identifier (which we oh-so-conveniently store in pre-release field of semver). 2. Preview build, once fetched, sees it's version *with* build identifier (as that's baked into the binary). 3. Auto update logic treats versions with pre-release version as less than versions without pre-release version. This in turn makes any Preview client see itself as versioned like 0.214.4-123-asdf1234455311, whereas the latest version on the endpoint is 0.214.4. Thus, the endpoint version is always more recent than the client version, causing an update loop. The fix is to ignore build identifier when comparing versions of non-nightly channels. This should still let us introduce changes to auto-update behavior in minor releases in the future. Closes #43584 Release Notes: - (Preview only): Fixed an update loop with latest Preview update. --- crates/auto_update/src/auto_update.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index 599afcf62d610cfc57a1216f46b1910a88e99bea..1f4d05630653b0dd8038eab4279ae597ec6d2fbe 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -717,9 +717,12 @@ impl AutoUpdater { } fn check_if_fetched_version_is_newer_non_nightly( - installed_version: Version, + mut installed_version: Version, fetched_version: Version, ) -> Result> { + // For non-nightly releases, ignore build and pre-release fields as they're not provided by our endpoints right now. + installed_version.build = semver::BuildMetadata::EMPTY; + installed_version.pre = semver::Prerelease::EMPTY; let should_download = fetched_version > installed_version; let newer_version = should_download.then(|| VersionCheckType::Semantic(fetched_version)); Ok(newer_version) From 958f1098b77ca11a010fae38bd9d5cbfd8a384d5 Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Wed, 26 Nov 2025 23:51:11 +0100 Subject: [PATCH 0415/1030] editor: Consider experimental theme overrides for colorized bracket invalidation (#43602) Release Notes: - Fixed a small issue where bracket colors would not be immediately updated if `experimental_theme_overrides.accents` was changed. --- crates/editor/src/editor.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index cd7a872f8c129c3b67b544ed2ba78d7fde104b48..7cdd587db48de1f03bf54949c3bfbe7870a07073 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -21565,12 +21565,22 @@ impl Editor { return Vec::new(); } - theme::ThemeSettings::get_global(cx) + let theme_settings = theme::ThemeSettings::get_global(cx); + + theme_settings .theme_overrides .get(cx.theme().name.as_ref()) .map(|theme_style| &theme_style.accents) .into_iter() .flatten() + .chain( + theme_settings + .experimental_theme_overrides + .as_ref() + .map(|overrides| &overrides.accents) + .into_iter() + .flatten(), + ) .flat_map(|accent| accent.0.clone()) .collect() } From 54309f4a4823ea1db344e3e48a28fd2687e310bb Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Thu, 27 Nov 2025 01:22:13 +0200 Subject: [PATCH 0416/1030] Account for greedy tree-sitter bracket matches (#43607) Current approach is to colorize brackets based on their depth, which was broken for markdown: image Markdown grammar, for bracket queries https://github.com/zed-industries/zed/blob/00e93bfa113a3daed6e4a97a7244ad04d58453ee/crates/languages/src/markdown/brackets.scm#L1-L8 and markdown document `[LLM-powered features](./ai/overview.md), [bring and configure your own API keys](./ai/llm-providers.md#use-your-own-keys)`, matches first bracket (offset 0) with two different ones: * `[LLM-powered features]` * `[LLM-powered features](./ai/overview.md), [bring and configure your own API keys]` which mix and add different color markers. Now, in case multiple pairs exist for the same first bracket, Zed will only colorize the shortest one: image Release Notes: - Fixed bracket colorization mixing colors in markdown files --- crates/editor/src/bracket_colorization.rs | 27 +++++++- crates/language/src/buffer.rs | 77 ++++++++++++++++------- 2 files changed, 81 insertions(+), 23 deletions(-) diff --git a/crates/editor/src/bracket_colorization.rs b/crates/editor/src/bracket_colorization.rs index 902ec2b7702b945bb482e4e4700cf37b36ae907b..65d8c139e99437e37d0c18551dd01475ac824bfd 100644 --- a/crates/editor/src/bracket_colorization.rs +++ b/crates/editor/src/bracket_colorization.rs @@ -161,7 +161,7 @@ mod tests { use gpui::{AppContext as _, UpdateGlobal as _}; use indoc::indoc; use itertools::Itertools; - use language::Capability; + use language::{Capability, markdown_lang}; use languages::rust_lang; use multi_buffer::{ExcerptRange, MultiBuffer}; use pretty_assertions::assert_eq; @@ -261,6 +261,31 @@ where ); } + #[gpui::test] + async fn test_markdown_bracket_colorization(cx: &mut gpui::TestAppContext) { + init_test(cx, |language_settings| { + language_settings.defaults.colorize_brackets = Some(true); + }); + let mut cx = EditorLspTestContext::new( + Arc::into_inner(markdown_lang()).unwrap(), + lsp::ServerCapabilities::default(), + cx, + ) + .await; + + cx.set_state(indoc! {r#"ˇ[LLM-powered features](./ai/overview.md), [bring and configure your own API keys](./ai/llm-providers.md#use-your-own-keys)"#}); + cx.executor().advance_clock(Duration::from_millis(100)); + cx.executor().run_until_parked(); + + assert_eq!( + r#"«1[LLM-powered features]1»«1(./ai/overview.md)1», «1[bring and configure your own API keys]1»«1(./ai/llm-providers.md#use-your-own-keys)1» +1 hsla(207.80, 16.20%, 69.19%, 1.00) +"#, + &bracket_colors_markup(&mut cx), + "All markdown brackets should be colored based on their depth" + ); + } + #[gpui::test] async fn test_bracket_colorization_when_editing(cx: &mut gpui::TestAppContext) { init_test(cx, |language_settings| { diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index c599a4751b60f150e31b7ddf6e32a6234a510c74..746992fa7e59650e5af59887630f8c2ee1b39450 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -45,12 +45,12 @@ use std::{ borrow::Cow, cell::Cell, cmp::{self, Ordering, Reverse}, - collections::{BTreeMap, BTreeSet}, + collections::{BTreeMap, BTreeSet, hash_map}, future::Future, iter::{self, Iterator, Peekable}, mem, num::NonZeroU32, - ops::{Deref, Not, Range}, + ops::{Deref, Range}, path::PathBuf, rc, sync::{Arc, LazyLock}, @@ -4236,6 +4236,7 @@ impl BufferSnapshot { let mut new_bracket_matches = HashMap::default(); let mut all_bracket_matches = HashMap::default(); + let mut bracket_matches_to_color = HashMap::default(); for chunk in tree_sitter_data .chunks @@ -4265,7 +4266,7 @@ impl BufferSnapshot { .collect::>(); let chunk_range = chunk_range.clone(); - let new_matches = iter::from_fn(move || { + let tree_sitter_matches = iter::from_fn(|| { while let Some(mat) = matches.peek() { let mut open = None; let mut close = None; @@ -4291,32 +4292,64 @@ impl BufferSnapshot { continue; } + if !pattern.rainbow_exclude { + // Certain tree-sitter grammars may return more bracket pairs than needed: + // see `test_markdown_bracket_colorization` for a set-up that returns pairs with the same start bracket and different end one. + // Pick the pair with the shortest range in case of ambiguity. + match bracket_matches_to_color.entry(open_range.clone()) { + hash_map::Entry::Vacant(v) => { + v.insert(close_range.clone()); + } + hash_map::Entry::Occupied(mut o) => { + let previous_close_range = o.get(); + let previous_length = + previous_close_range.end - open_range.start; + let new_length = close_range.end - open_range.start; + if new_length < previous_length { + o.insert(close_range.clone()); + } + } + } + } return Some((open_range, close_range, pattern, depth)); } None }) .sorted_by_key(|(open_range, _, _, _)| open_range.start) - .map(|(open_range, close_range, pattern, syntax_layer_depth)| { - while let Some(&last_bracket_end) = bracket_pairs_ends.last() { - if last_bracket_end <= open_range.start { - bracket_pairs_ends.pop(); - } else { - break; - } - } + .collect::>(); - let bracket_depth = bracket_pairs_ends.len(); - bracket_pairs_ends.push(close_range.end); + let new_matches = tree_sitter_matches + .into_iter() + .map(|(open_range, close_range, pattern, syntax_layer_depth)| { + let participates_in_coloring = + bracket_matches_to_color.get(&open_range).is_some_and( + |close_range_to_color| close_range_to_color == &close_range, + ); + let color_index = if participates_in_coloring { + while let Some(&last_bracket_end) = bracket_pairs_ends.last() { + if last_bracket_end <= open_range.start { + bracket_pairs_ends.pop(); + } else { + break; + } + } - BracketMatch { - open_range, - close_range, - syntax_layer_depth, - newline_only: pattern.newline_only, - color_index: pattern.rainbow_exclude.not().then_some(bracket_depth), - } - }) - .collect::>(); + let bracket_depth = bracket_pairs_ends.len(); + bracket_pairs_ends.push(close_range.end); + Some(bracket_depth) + } else { + None + }; + + BracketMatch { + open_range, + close_range, + syntax_layer_depth, + newline_only: pattern.newline_only, + color_index, + } + }) + .collect::>(); new_bracket_matches.insert(chunk.id, new_matches.clone()); new_matches From 8c355b5eeeee1cad71a3f584dba95c1ba7f62a7f Mon Sep 17 00:00:00 2001 From: Finn Evers Date: Thu, 27 Nov 2025 00:39:18 +0100 Subject: [PATCH 0417/1030] Improve `extension_bump` workflow (#43612) This extends the extension CI workflow to create a tag once the version is bumped on main. Release Notes: - N/A --- .github/workflows/extension_bump.yml | 33 +++++++- .../src/tasks/workflows/extension_bump.rs | 84 ++++++++++++++++--- 2 files changed, 104 insertions(+), 13 deletions(-) diff --git a/.github/workflows/extension_bump.yml b/.github/workflows/extension_bump.yml index 5933ab7fbb2fab753cbda729c82026102e395539..779116ec5b2ea4c766212ea2b61993eb82693992 100644 --- a/.github/workflows/extension_bump.yml +++ b/.github/workflows/extension_bump.yml @@ -59,7 +59,7 @@ jobs: fetch-depth: 10 - id: compare-versions-check name: extension_bump::compare_versions - run: |+ + run: | CURRENT_VERSION="$(sed -n 's/version = \"\(.*\)\"/\1/p' < extension.toml)" git checkout "$(git log -1 --format=%H)"~1 @@ -70,9 +70,11 @@ jobs: echo "needs_bump=true" >> "$GITHUB_OUTPUT" || \ echo "needs_bump=false" >> "$GITHUB_OUTPUT" + echo "current_version=${CURRENT_VERSION}" >> "$GITHUB_OUTPUT" shell: bash -euxo pipefail {0} outputs: needs_bump: ${{ steps.compare-versions-check.outputs.needs_bump }} + current_version: ${{ steps.compare-versions-check.outputs.current_version }} timeout-minutes: 1 bump_extension_version: needs: @@ -131,6 +133,35 @@ jobs: token: ${{ steps.generate-token.outputs.token }} sign-commits: true timeout-minutes: 1 + create_version_label: + needs: + - check_extension + - check_bump_needed + if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') && needs.check_bump_needed.outputs.needs_bump == 'false' + runs-on: namespace-profile-8x16-ubuntu-2204 + steps: + - id: generate-token + name: extension_bump::generate_token + uses: actions/create-github-app-token@v2 + with: + app-id: ${{ secrets.app-id }} + private-key: ${{ secrets.app-secret }} + - name: steps::checkout_repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 + with: + clean: false + - name: extension_bump::create_version_tag + uses: actions/github-script@v7 + with: + script: |- + github.rest.git.createRef({ + owner: context.repo.owner, + repo: context.repo.repo, + ref: 'refs/tags/v${{ needs.check_bump_needed.outputs.current_version }}', + sha: context.sha + }) + github-token: ${{ steps.generate-token.outputs.token }} + timeout-minutes: 1 concurrency: group: ${{ github.workflow }}-${{ github.ref_name }}-${{ github.ref_name == 'main' && github.sha || 'anysha' }} cancel-in-progress: true diff --git a/tooling/xtask/src/tasks/workflows/extension_bump.rs b/tooling/xtask/src/tasks/workflows/extension_bump.rs index 66de1f86aa998269abc24f1de375dbe1800acc31..85e7dbeceed0d05f60f04dbf055553f71228ce54 100644 --- a/tooling/xtask/src/tasks/workflows/extension_bump.rs +++ b/tooling/xtask/src/tasks/workflows/extension_bump.rs @@ -31,11 +31,19 @@ pub(crate) fn extension_bump() -> Workflow { WorkflowSecret::new("app-secret", "The app secret for the corresponding app ID"); let test_extension = extension_tests::check_extension(); - let (check_bump_needed, needs_bump) = check_bump_needed(); - let bump_version = bump_extension_version( - &[&test_extension, &check_bump_needed], - &bump_type, - needs_bump.as_job_output(&check_bump_needed), + let (check_bump_needed, needs_bump, current_version) = check_bump_needed(); + + let needs_bump = needs_bump.as_job_output(&check_bump_needed); + let current_version = current_version.as_job_output(&check_bump_needed); + + let dependencies = [&test_extension, &check_bump_needed]; + + let bump_version = + bump_extension_version(&dependencies, &bump_type, &needs_bump, &app_id, &app_secret); + let create_label = create_version_label( + &dependencies, + &needs_bump, + ¤t_version, &app_id, &app_secret, ); @@ -65,24 +73,74 @@ pub(crate) fn extension_bump() -> Workflow { .add_job(test_extension.name, test_extension.job) .add_job(check_bump_needed.name, check_bump_needed.job) .add_job(bump_version.name, bump_version.job) + .add_job(create_label.name, create_label.job) } -fn check_bump_needed() -> (NamedJob, StepOutput) { - let (compare_versions, version_changed) = compare_versions(); +fn check_bump_needed() -> (NamedJob, StepOutput, StepOutput) { + let (compare_versions, version_changed, current_version) = compare_versions(); let job = Job::default() .with_repository_owner_guard() - .outputs([(version_changed.name.to_owned(), version_changed.to_string())]) + .outputs([ + (version_changed.name.to_owned(), version_changed.to_string()), + ( + current_version.name.to_string(), + current_version.to_string(), + ), + ]) .runs_on(runners::LINUX_SMALL) .timeout_minutes(1u32) .add_step(steps::checkout_repo().add_with(("fetch-depth", 10))) .add_step(compare_versions); - (named::job(job), version_changed) + (named::job(job), version_changed, current_version) +} + +fn create_version_label( + dependencies: &[&NamedJob], + needs_bump: &JobOutput, + current_version: &JobOutput, + app_id: &WorkflowSecret, + app_secret: &WorkflowSecret, +) -> NamedJob { + let (generate_token, generated_token) = generate_token(app_id, app_secret); + let job = steps::dependant_job(dependencies) + .cond(Expression::new(format!( + "{DEFAULT_REPOSITORY_OWNER_GUARD} && {} == 'false'", + needs_bump.expr(), + ))) + .runs_on(runners::LINUX_LARGE) + .timeout_minutes(1u32) + .add_step(generate_token) + .add_step(steps::checkout_repo()) + .add_step(create_version_tag(current_version, generated_token)); + + named::job(job) +} + +fn create_version_tag(current_version: &JobOutput, generated_token: StepOutput) -> Step { + named::uses("actions", "github-script", "v7").with( + Input::default() + .add( + "script", + format!( + indoc! {r#" + github.rest.git.createRef({{ + owner: context.repo.owner, + repo: context.repo.repo, + ref: 'refs/tags/v{}', + sha: context.sha + }})"# + }, + current_version + ), + ) + .add("github-token", generated_token.to_string()), + ) } /// Compares the current and previous commit and checks whether versions changed inbetween. -fn compare_versions() -> (Step, StepOutput) { +fn compare_versions() -> (Step, StepOutput, StepOutput) { let check_needs_bump = named::bash(format!( indoc! { r#" @@ -96,6 +154,7 @@ fn compare_versions() -> (Step, StepOutput) { echo "needs_bump=true" >> "$GITHUB_OUTPUT" || \ echo "needs_bump=false" >> "$GITHUB_OUTPUT" + echo "current_version=${{CURRENT_VERSION}}" >> "$GITHUB_OUTPUT" "# }, VERSION_CHECK, VERSION_CHECK @@ -103,14 +162,15 @@ fn compare_versions() -> (Step, StepOutput) { .id("compare-versions-check"); let needs_bump = StepOutput::new(&check_needs_bump, "needs_bump"); + let current_version = StepOutput::new(&check_needs_bump, "current_version"); - (check_needs_bump, needs_bump) + (check_needs_bump, needs_bump, current_version) } fn bump_extension_version( dependencies: &[&NamedJob], bump_type: &WorkflowInput, - needs_bump: JobOutput, + needs_bump: &JobOutput, app_id: &WorkflowSecret, app_secret: &WorkflowSecret, ) -> NamedJob { From 82b768258f966c157ed960d19ae2d4bdce81779f Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Thu, 27 Nov 2025 00:55:11 +0100 Subject: [PATCH 0418/1030] remote: Do not include prerelease and build meta in asset queries (#43611) Closes #43580 Release Notes: - (Preview only) Fixed failures to fetch remoting server (needed to run remoting). --- crates/auto_update/src/auto_update.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index 1f4d05630653b0dd8038eab4279ae597ec6d2fbe..06e87c53dba2acfffb03cdc75cbbaa7cc2f44a6a 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -510,7 +510,9 @@ impl AutoUpdater { (None, None, None) }; - let version = if let Some(version) = version { + let version = if let Some(mut version) = version { + version.pre = semver::Prerelease::EMPTY; + version.build = semver::BuildMetadata::EMPTY; version.to_string() } else { "latest".to_string() From 91400e74897898eeb5026d5bea5b35666c773c96 Mon Sep 17 00:00:00 2001 From: Kirill Bulatov Date: Thu, 27 Nov 2025 10:37:34 +0200 Subject: [PATCH 0419/1030] Do not color html tags and other "long" "bracket pairs" (#43644) Closes https://github.com/zed-industries/zed/issues/43621 Follow-up of https://github.com/zed-industries/zed/pull/43172 Release Notes: - (Preview only) Fixed html tags incorrectly colorized --- crates/language/src/buffer.rs | 12 ++++++-- crates/language/src/language.rs | 49 ++++++++------------------------- 2 files changed, 20 insertions(+), 41 deletions(-) diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index 746992fa7e59650e5af59887630f8c2ee1b39450..66967f9a3357e13485b8228b06874804a8768fac 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -4292,7 +4292,13 @@ impl BufferSnapshot { continue; } - if !pattern.rainbow_exclude { + if !pattern.rainbow_exclude + // Also, certain languages have "brackets" that are not brackets, e.g. tags. and such + // bracket will match the entire tag with all text inside. + // For now, avoid highlighting any pair that has more than single char in each bracket. + // We need to colorize `` bracket pairs, so cannot make this check stricter. + && (open_range.len() == 1 || close_range.len() == 1) + { // Certain tree-sitter grammars may return more bracket pairs than needed: // see `test_markdown_bracket_colorization` for a set-up that returns pairs with the same start bracket and different end one. // Pick the pair with the shortest range in case of ambiguity. @@ -4321,11 +4327,11 @@ impl BufferSnapshot { let new_matches = tree_sitter_matches .into_iter() .map(|(open_range, close_range, pattern, syntax_layer_depth)| { - let participates_in_coloring = + let participates_in_colorizing = bracket_matches_to_color.get(&open_range).is_some_and( |close_range_to_color| close_range_to_color == &close_range, ); - let color_index = if participates_in_coloring { + let color_index = if participates_in_colorizing { while let Some(&last_bracket_end) = bracket_pairs_ends.last() { if last_bracket_end <= open_range.start { bracket_pairs_ends.pop(); diff --git a/crates/language/src/language.rs b/crates/language/src/language.rs index 03e563e145c3bd1cde63e62fa8a09a4fb0228f0f..c0a280767395dd83e8a36e554294d7d044b64e2e 100644 --- a/crates/language/src/language.rs +++ b/crates/language/src/language.rs @@ -2643,42 +2643,15 @@ pub fn rust_lang() -> Arc { outline: Some(Cow::from(include_str!( "../../languages/src/rust/outline.scm" ))), - indents: Some(Cow::from( - r#" -[ - ((where_clause) _ @end) - (field_expression) - (call_expression) - (assignment_expression) - (let_declaration) - (let_chain) - (await_expression) -] @indent - -(_ "[" "]" @end) @indent -(_ "<" ">" @end) @indent -(_ "{" "}" @end) @indent -(_ "(" ")" @end) @indent"#, - )), - brackets: Some(Cow::from( - r#" -("(" @open ")" @close) -("[" @open "]" @close) -("{" @open "}" @close) -("<" @open ">" @close) -(closure_parameters "|" @open "|" @close) -(("\"" @open "\"" @close) (#set! rainbow.exclude)) -(("'" @open "'" @close) (#set! rainbow.exclude))"#, - )), - text_objects: Some(Cow::from( - r#" -(function_item - body: (_ - "{" - (_)* @function.inside - "}" )) @function.around - "#, - )), + indents: Some(Cow::from(include_str!( + "../../languages/src/rust/indents.scm" + ))), + brackets: Some(Cow::from(include_str!( + "../../languages/src/rust/brackets.scm" + ))), + text_objects: Some(Cow::from(include_str!( + "../../languages/src/rust/textobjects.scm" + ))), ..LanguageQueries::default() }) .expect("Could not parse queries"); @@ -2697,7 +2670,7 @@ pub fn markdown_lang() -> Arc { path_suffixes: vec!["md".into()], ..Default::default() }, - ..Default::default() + ..LanguageConfig::default() }, Some(tree_sitter_md::LANGUAGE.into()), ) @@ -2708,7 +2681,7 @@ pub fn markdown_lang() -> Arc { injections: Some(Cow::from(include_str!( "../../languages/src/markdown/injections.scm" ))), - ..Default::default() + ..LanguageQueries::default() }) .expect("Could not parse markdown queries"); Arc::new(language) From 99d7b2fa1de8c9d5b6d33db0d4c90ba1571e2f8e Mon Sep 17 00:00:00 2001 From: Oleksiy Syvokon Date: Thu, 27 Nov 2025 11:10:35 +0200 Subject: [PATCH 0420/1030] zeta2: Compute diff-aware chrF metric (#43485) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Zeta evals now include a character n-gram metric adapted for multi-edit diffs (“delta chrF”). It works as follows: 1. Reconstruct the original, golden (expected), and actual texts from unified diffs. - "original": the text before any edits - "golden": the text after applying the expected edits - "actual": the text after applying the actual edits 2. Compute n-gram count deltas between original→golden and original→actual. - n-grams are computed as in chrF (max n=6, whitespace ignored). 3. Compare these deltas to assess how well the actual edits match the expected edits. - As in standard chrF, classify n-grams as true positives, false positives, and false negatives, and report the F-beta score with beta=2. Release Notes: - N/A --- crates/language/src/buffer.rs | 4 +- crates/zeta_cli/src/evaluate.rs | 203 +++++++---------- crates/zeta_cli/src/main.rs | 1 + crates/zeta_cli/src/metrics.rs | 380 ++++++++++++++++++++++++++++++++ 4 files changed, 464 insertions(+), 124 deletions(-) create mode 100644 crates/zeta_cli/src/metrics.rs diff --git a/crates/language/src/buffer.rs b/crates/language/src/buffer.rs index 66967f9a3357e13485b8228b06874804a8768fac..7d713d515b2ae9584bc922d08d5811155f83d3a8 100644 --- a/crates/language/src/buffer.rs +++ b/crates/language/src/buffer.rs @@ -758,8 +758,8 @@ impl EditPreview { .to_point(&self.applied_edits_snapshot); let start = Point::new(start.row.saturating_sub(3), 0); - let old_end = Point::new(old_end.row + 3, 0).min(self.old_snapshot.max_point()); - let new_end = Point::new(new_end.row + 3, 0).min(self.applied_edits_snapshot.max_point()); + let old_end = Point::new(old_end.row + 4, 0).min(self.old_snapshot.max_point()); + let new_end = Point::new(new_end.row + 4, 0).min(self.applied_edits_snapshot.max_point()); Some(unified_diff( &self diff --git a/crates/zeta_cli/src/evaluate.rs b/crates/zeta_cli/src/evaluate.rs index 6726dcb3aafdeff7fe41cbbbc49850c1e7465cf4..043844768557ad46f61d5fd0d809e1e85c62574f 100644 --- a/crates/zeta_cli/src/evaluate.rs +++ b/crates/zeta_cli/src/evaluate.rs @@ -1,3 +1,4 @@ +use crate::metrics::{self, Scores}; use std::{ collections::HashMap, io::{IsTerminal, Write}, @@ -5,7 +6,6 @@ use std::{ }; use anyhow::Result; -use collections::HashSet; use gpui::{AsyncApp, Entity}; use project::Project; use util::ResultExt as _; @@ -119,13 +119,14 @@ fn write_aggregated_scores( } if successful.len() > 1 { - let mut edit_predictions = successful + let edit_scores = successful .iter() - .filter_map(|r| r.edit_prediction.as_ref()) - .peekable(); - let has_edit_predictions = edit_predictions.peek().is_some(); + .filter_map(|r| r.edit_scores.clone()) + .collect::>(); + let has_edit_predictions = edit_scores.len() > 0; let aggregated_result = EvaluationResult { - edit_prediction: has_edit_predictions.then(|| Scores::aggregate(edit_predictions)), + context_scores: Scores::aggregate(successful.iter().map(|r| &r.context_scores)), + edit_scores: has_edit_predictions.then(|| EditScores::aggregate(&edit_scores)), prompt_len: successful.iter().map(|r| r.prompt_len).sum::() / successful.len(), generated_len: successful.iter().map(|r| r.generated_len).sum::() / successful.len(), @@ -247,94 +248,27 @@ fn write_eval_result( anyhow::Ok(()) } -#[derive(Debug, Default)] -pub struct EvaluationResult { - pub edit_prediction: Option, - pub prompt_len: usize, - pub generated_len: usize, -} - -#[derive(Default, Debug)] -pub struct Scores { - pub true_positives: usize, - pub false_positives: usize, - pub false_negatives: usize, +#[derive(Debug, Default, Clone)] +pub struct EditScores { + pub line_match: Scores, + pub chr_f: f64, } -impl Scores { - pub fn new(expected: &HashSet, actual: &HashSet) -> Scores { - let true_positives = expected.intersection(actual).count(); - let false_positives = actual.difference(expected).count(); - let false_negatives = expected.difference(actual).count(); - - Scores { - true_positives, - false_positives, - false_negatives, - } - } - - pub fn to_markdown(&self) -> String { - format!( - " -Precision : {:.4} -Recall : {:.4} -F1 Score : {:.4} -True Positives : {} -False Positives : {} -False Negatives : {}", - self.precision(), - self.recall(), - self.f1_score(), - self.true_positives, - self.false_positives, - self.false_negatives - ) - } - - pub fn aggregate<'a>(scores: impl Iterator) -> Scores { - let mut true_positives = 0; - let mut false_positives = 0; - let mut false_negatives = 0; - - for score in scores { - true_positives += score.true_positives; - false_positives += score.false_positives; - false_negatives += score.false_negatives; - } - - Scores { - true_positives, - false_positives, - false_negatives, - } - } +impl EditScores { + pub fn aggregate(scores: &[EditScores]) -> EditScores { + let line_match = Scores::aggregate(scores.iter().map(|s| &s.line_match)); + let chr_f = scores.iter().map(|s| s.chr_f).sum::() / scores.len() as f64; - pub fn precision(&self) -> f64 { - if self.true_positives + self.false_positives == 0 { - 0.0 - } else { - self.true_positives as f64 / (self.true_positives + self.false_positives) as f64 - } - } - - pub fn recall(&self) -> f64 { - if self.true_positives + self.false_negatives == 0 { - 0.0 - } else { - self.true_positives as f64 / (self.true_positives + self.false_negatives) as f64 - } + EditScores { line_match, chr_f } } +} - pub fn f1_score(&self) -> f64 { - let recall = self.recall(); - let precision = self.precision(); - if precision + recall == 0.0 { - 0.0 - } else { - 2.0 * precision * recall / (precision + recall) - } - } +#[derive(Debug, Default)] +pub struct EvaluationResult { + pub edit_scores: Option, + pub context_scores: Scores, + pub prompt_len: usize, + pub generated_len: usize, } impl std::fmt::Display for EvaluationResult { @@ -349,40 +283,74 @@ impl std::fmt::Display for EvaluationResult { impl EvaluationResult { fn fmt_markdown(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - if let Some(prediction) = &self.edit_prediction { + write!( + f, + r#" +### Context Scores +{} +"#, + self.context_scores.to_markdown(), + )?; + if let Some(scores) = &self.edit_scores { write!( f, r#" ### Edit Prediction Scores {}"#, - prediction.to_markdown() + scores.line_match.to_markdown() )?; } Ok(()) } fn fmt_table(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "### Scores\n")?; + writeln!(f, "#### Prompt Statistics")?; + writeln!(f, "─────────────────────────")?; + writeln!(f, "Prompt_len Generated_len")?; + writeln!(f, "─────────────────────────")?; + writeln!(f, "{:<11} {:<14}", self.prompt_len, self.generated_len,)?; + writeln!(f)?; + writeln!(f)?; + writeln!(f, "#### Performance Scores")?; writeln!( f, - " Prompt Generated TP FP FN Precision Recall F1" + "──────────────────────────────────────────────────────────────────" )?; writeln!( f, - "───────────────────────────────────────────────────────────────────────────────────────────────" + " TP FP FN Precision Recall F1" )?; - if let Some(edit_prediction) = &self.edit_prediction { + writeln!( + f, + "──────────────────────────────────────────────────────────────────" + )?; + writeln!( + f, + "Context Retrieval {:<6} {:<6} {:<6} {:>8.2} {:>7.2} {:>6.2}", + self.context_scores.true_positives, + self.context_scores.false_positives, + self.context_scores.false_negatives, + self.context_scores.precision() * 100.0, + self.context_scores.recall() * 100.0, + self.context_scores.f1_score() * 100.0 + )?; + if let Some(edit_scores) = &self.edit_scores { + let line_match = &edit_scores.line_match; + writeln!(f, "Edit Prediction")?; writeln!( f, - "Edit Prediction {:<7} {:<9} {:<6} {:<6} {:<6} {:>9.2} {:>8.2} {:>7.2}", - self.prompt_len, - self.generated_len, - edit_prediction.true_positives, - edit_prediction.false_positives, - edit_prediction.false_negatives, - edit_prediction.precision() * 100.0, - edit_prediction.recall() * 100.0, - edit_prediction.f1_score() * 100.0 + " ├─ exact lines {:<6} {:<6} {:<6} {:>8.2} {:>7.2} {:>6.2}", + line_match.true_positives, + line_match.false_positives, + line_match.false_negatives, + line_match.precision() * 100.0, + line_match.recall() * 100.0, + line_match.f1_score() * 100.0 + )?; + writeln!( + f, + " └─ diff chrF {:<6} {:<6} {:<6} {:>8} {:>8} {:>6.2}", + "-", "-", "-", "-", "-", edit_scores.chr_f )?; } Ok(()) @@ -403,21 +371,12 @@ fn evaluate(example: &Example, preds: &PredictionDetails, predict: bool) -> Eval .lines() .map(DiffLine::parse) .collect::>(); - let expected_patch_lines = expected_patch - .iter() - .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) - .map(|line| line.to_string()) - .collect(); + let actual_patch = preds.diff.lines().map(DiffLine::parse).collect::>(); - let actual_patch_lines = preds - .diff - .lines() - .map(DiffLine::parse) - .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) - .map(|line| line.to_string()) - .collect(); + let line_match = metrics::line_match_score(&expected_patch, &actual_patch); + let chr_f = metrics::delta_chr_f(&expected_patch, &actual_patch); - eval_result.edit_prediction = Some(Scores::new(&expected_patch_lines, &actual_patch_lines)); + eval_result.edit_scores = Some(EditScores { line_match, chr_f }); } eval_result @@ -500,12 +459,12 @@ fn write_bucketed_analysis( diff: execution_data.diff.clone(), is_correct: { evaluation_result - .edit_prediction + .edit_scores .as_ref() - .map_or(false, |edit_prediction| { - edit_prediction.false_positives == 0 - && edit_prediction.false_negatives == 0 - && edit_prediction.true_positives > 0 + .map_or(false, |edit_scores| { + edit_scores.line_match.false_positives == 0 + && edit_scores.line_match.false_negatives == 0 + && edit_scores.line_match.true_positives > 0 }) }, execution_indices: vec![execution_data.execution_id.clone()], diff --git a/crates/zeta_cli/src/main.rs b/crates/zeta_cli/src/main.rs index 2d5a23e31f463455871494d123a4988b41b5bd66..d72a0f5cf7cf00166a2bbaa60c1700d1007fc8af 100644 --- a/crates/zeta_cli/src/main.rs +++ b/crates/zeta_cli/src/main.rs @@ -1,6 +1,7 @@ mod evaluate; mod example; mod headless; +mod metrics; mod paths; mod predict; mod source_location; diff --git a/crates/zeta_cli/src/metrics.rs b/crates/zeta_cli/src/metrics.rs new file mode 100644 index 0000000000000000000000000000000000000000..dd08459678eef6d04a6b656d19a4572d51a5b5c1 --- /dev/null +++ b/crates/zeta_cli/src/metrics.rs @@ -0,0 +1,380 @@ +use collections::{HashMap, HashSet}; +use zeta::udiff::DiffLine; + +type Counts = HashMap; +type CountsDelta = HashMap; + +#[derive(Default, Debug, Clone)] +pub struct Scores { + pub true_positives: usize, + pub false_positives: usize, + pub false_negatives: usize, +} + +impl Scores { + pub fn from_sets(expected: &HashSet, actual: &HashSet) -> Scores { + let true_positives = expected.intersection(actual).count(); + let false_positives = actual.difference(expected).count(); + let false_negatives = expected.difference(actual).count(); + + Scores { + true_positives, + false_positives, + false_negatives, + } + } + + pub fn from_counts(expected: &Counts, actual: &Counts) -> Scores { + let mut true_positives = 0; + let mut false_positives = 0; + let mut false_negatives = 0; + + for (ngram, &expected_count) in expected { + let actual_count = *actual.get(ngram).unwrap_or(&0); + if actual_count > expected_count { + false_positives += actual_count - expected_count; + } else { + false_negatives += expected_count - actual_count; + } + true_positives += expected_count.min(actual_count); + } + + for (ngram, &actual_count) in actual { + if !expected.contains_key(ngram) { + false_positives += actual_count; + } + } + + Scores { + true_positives, + false_positives, + false_negatives, + } + } + + pub fn to_markdown(&self) -> String { + format!( + " +Precision : {:.4} +Recall : {:.4} +F1 Score : {:.4} +True Positives : {} +False Positives : {} +False Negatives : {}", + self.precision(), + self.recall(), + self.f1_score(), + self.true_positives, + self.false_positives, + self.false_negatives + ) + } + + pub fn aggregate<'a>(scores: impl Iterator) -> Scores { + let mut true_positives = 0; + let mut false_positives = 0; + let mut false_negatives = 0; + + for score in scores { + true_positives += score.true_positives; + false_positives += score.false_positives; + false_negatives += score.false_negatives; + } + + Scores { + true_positives, + false_positives, + false_negatives, + } + } + + pub fn precision(&self) -> f64 { + if self.true_positives + self.false_positives == 0 { + 0.0 + } else { + self.true_positives as f64 / (self.true_positives + self.false_positives) as f64 + } + } + + pub fn recall(&self) -> f64 { + if self.true_positives + self.false_negatives == 0 { + 0.0 + } else { + self.true_positives as f64 / (self.true_positives + self.false_negatives) as f64 + } + } + + pub fn f1_score(&self) -> f64 { + let recall = self.recall(); + let precision = self.precision(); + if precision + recall == 0.0 { + 0.0 + } else { + 2.0 * precision * recall / (precision + recall) + } + } +} + +pub fn line_match_score(expected_patch: &[DiffLine], actual_patch: &[DiffLine]) -> Scores { + let expected_change_lines = expected_patch + .iter() + .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) + .map(|line| line.to_string()) + .collect(); + + let actual_change_lines = actual_patch + .iter() + .filter(|line| matches!(line, DiffLine::Addition(_) | DiffLine::Deletion(_))) + .map(|line| line.to_string()) + .collect(); + + Scores::from_sets(&expected_change_lines, &actual_change_lines) +} + +enum ChrfWhitespace { + #[allow(unused)] + Unchanged, + Ignore, +} + +const CHR_F_CHAR_ORDER: usize = 6; +const CHR_F_BETA: f64 = 2.0; +const CHR_F_WHITESPACE: ChrfWhitespace = ChrfWhitespace::Ignore; + +/// Computes a delta-chrF score that compares two sets of edits. +/// +/// This metric works by: +/// 1. Reconstructing original, golden (expected result), and actual texts from diffs +/// 2. Computing n-gram count differences (deltas) between original→golden and original→actual +/// 3. Comparing these deltas to measure how well actual edits match expected edits +pub fn delta_chr_f(expected: &[DiffLine], actual: &[DiffLine]) -> f64 { + // Reconstruct texts from diffs + let mut original_text = String::new(); // state of the text before any edits + let mut golden_text = String::new(); // text after applying golden edits + let mut actual_text = String::new(); // text after applying actual edits + + for line in expected { + match line { + DiffLine::Context(s) => { + original_text.push_str(s); + golden_text.push_str(s); + } + DiffLine::Deletion(s) => { + original_text.push_str(s); + } + DiffLine::Addition(s) => { + golden_text.push_str(s); + } + _ => {} + } + } + + for line in actual { + match line { + DiffLine::Context(s) | DiffLine::Addition(s) => { + actual_text.push_str(s); + } + _ => {} + } + } + + // Edge case + if original_text == golden_text && golden_text == actual_text { + return 100.0; + } + + // Compute the metric + let original_ngrams = chr_f_ngram_counts(&original_text); + let golden_ngrams = chr_f_ngram_counts(&golden_text); + let actual_ngrams = chr_f_ngram_counts(&actual_text); + + let mut total_precision = 0.0; + let mut total_recall = 0.0; + + for order in 0..CHR_F_CHAR_ORDER { + let expected_delta = compute_ngram_delta(&golden_ngrams[order], &original_ngrams[order]); + let actual_delta = compute_ngram_delta(&actual_ngrams[order], &original_ngrams[order]); + + if expected_delta.is_empty() && actual_delta.is_empty() { + total_precision += 1.0; + total_recall += 1.0; + continue; + } + + let expected_counts = ngram_delta_to_counts(&expected_delta); + let actual_counts = ngram_delta_to_counts(&actual_delta); + + let score = Scores::from_counts(&expected_counts, &actual_counts); + total_precision += score.precision(); + total_recall += score.recall(); + } + + let prec = total_precision / CHR_F_CHAR_ORDER as f64; + let recall = total_recall / CHR_F_CHAR_ORDER as f64; + let f_score = if prec + recall == 0.0 { + 0.0 + } else { + (1.0 + CHR_F_BETA * CHR_F_BETA) * prec * recall / (CHR_F_BETA * CHR_F_BETA * prec + recall) + }; + + f_score * 100.0 +} + +fn chr_f_ngram_counts(text: &str) -> Vec { + // Ignore whitespace. The original chrF implementation skips all + // whitespace. We should consider compressing multiple consecutive + // spaces into one -- this may reflect our task more closely. + let text = match CHR_F_WHITESPACE { + ChrfWhitespace::Unchanged => text.to_string(), + ChrfWhitespace::Ignore => text + .chars() + .filter(|c| !c.is_whitespace()) + .collect::(), + }; + + (1..=CHR_F_CHAR_ORDER) + .map(|order| count_ngrams(&text, order)) + .collect() +} + +fn compute_ngram_delta(after: &Counts, before: &Counts) -> CountsDelta { + let mut delta = CountsDelta::default(); + + for (ngram, &before_count) in before { + let after_count = *after.get(ngram).unwrap_or(&0); + delta.insert(ngram.clone(), after_count as isize - before_count as isize); + } + + for (ngram, &after_count) in after { + if !before.contains_key(ngram) { + delta.insert(ngram.clone(), after_count as isize); + } + } + + delta +} + +/// Convert negative counts to special deletion tokens. +/// For example, if expected delta is {"foo": -1} and actual delta is {"bar": -1}, +/// we convert it to {"¬foo": +1} and {"¬bar": +1}. This way _not_ deleting "foo" +/// will result in a false negative, and mistakenly deleting "bar" will result in a false positive. +fn ngram_delta_to_counts(delta: &CountsDelta) -> Counts { + let mut counts = Counts::default(); + + for (ngram, &delta) in delta { + if delta > 0 { + counts.insert(ngram.clone(), delta as usize); + } else { + counts.insert(format!("¬{ngram}"), delta.unsigned_abs()); + } + } + + counts +} + +fn count_ngrams(text: &str, n: usize) -> Counts { + let chars: Vec = text.chars().collect(); + let mut counts = Counts::default(); + + for window in chars.windows(n) { + let ngram: String = window.iter().collect(); + *counts.entry(ngram).or_insert(0) += 1; + } + + counts +} + +#[cfg(test)] +mod test { + use super::*; + use zeta::udiff::DiffLine; + + #[test] + fn test_delta_chr_f_perfect_match() { + let diff = vec![ + DiffLine::Context("fn main() {"), + DiffLine::Deletion(" println!(\"Hello\");"), + DiffLine::Addition(" println!(\"Hello, World!\");"), + DiffLine::Context("}"), + ]; + + let score = delta_chr_f(&diff, &diff); + assert!((score - 100.0).abs() < 1e-2); + } + + #[test] + fn test_delta_chr_f_wrong_edit() { + // When the edit is wrong + let expected = vec![ + DiffLine::Context("one "), + DiffLine::Deletion("two "), + DiffLine::Context("three"), + ]; + + let actual = vec![ + DiffLine::Context("one "), + DiffLine::Context("two "), + DiffLine::Deletion("three"), + DiffLine::Addition("four"), + ]; + + // Then the score should be low + let score = delta_chr_f(&expected, &actual); + assert!(score > 20.0 && score < 40.0); + } + + #[test] + fn test_delta_chr_f_partial_match() { + let expected = vec![ + DiffLine::Deletion("let x = 42;"), + DiffLine::Addition("let x = 100;"), + ]; + + let actual = vec![ + DiffLine::Deletion("let x = 42;"), + DiffLine::Addition("let x = 99;"), + ]; + + // We got the edit location right, but the replacement text is wrong. + // Deleted ngrams will match, bringing the score somewhere in the middle. + let score = delta_chr_f(&expected, &actual); + assert!(score > 40.0 && score < 60.0); + } + + #[test] + fn test_delta_chr_f_missed_edit() { + // When predictions makes no changes + let expected = vec![ + DiffLine::Context("prefix "), + DiffLine::Deletion("old"), + DiffLine::Addition("new"), + DiffLine::Context(" suffix"), + ]; + + let actual = vec![ + DiffLine::Context("prefix "), + DiffLine::Context("old"), + DiffLine::Context(" suffix"), + ]; + + // Then the score should be low (all expected changes are false negatives) + let score = delta_chr_f(&expected, &actual); + assert!(score < 20.0); + } + + #[test] + fn test_delta_chr_f_extra_edit() { + // When adding unexpected content + let expected = vec![DiffLine::Context("hello"), DiffLine::Context("world")]; + + let actual = vec![ + DiffLine::Context("hello"), + DiffLine::Addition("extra"), + DiffLine::Context("world"), + ]; + + // Then the score should be low (all actual changes are false positives) + let score = delta_chr_f(&expected, &actual); + assert!(score < 20.0); + } +} From 8e04706c4d2b5ef454e1c1ffb9d5aac72118b55d Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 27 Nov 2025 10:29:27 +0100 Subject: [PATCH 0421/1030] editor: Fix panic in wrap_map (#43650) Fixes ZED-3P9 We only clamped the end which for a completely wrong input could cause us to construct a reversed range which will end up underflowing later on. Release Notes: - N/A *or* Added/Fixed/Improved ... --- crates/editor/src/display_map/wrap_map.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/crates/editor/src/display_map/wrap_map.rs b/crates/editor/src/display_map/wrap_map.rs index 6d9704b5f93c0ce48d413babdd59997b02f093e6..c33bf141b94866744c607e3410f051ae8b6e1fce 100644 --- a/crates/editor/src/display_map/wrap_map.rs +++ b/crates/editor/src/display_map/wrap_map.rs @@ -622,9 +622,10 @@ impl WrapSnapshot { if transforms.item().is_some_and(|t| t.is_isomorphic()) { input_start.0 += output_start.0 - transforms.start().0.0; } - let input_end = self - .to_tab_point(output_end) - .min(self.tab_snapshot.max_point()); + let input_end = self.to_tab_point(output_end); + let max_point = self.tab_snapshot.max_point(); + let input_start = input_start.min(max_point); + let input_end = input_end.min(max_point); WrapChunks { input_chunks: self.tab_snapshot.chunks( input_start..input_end, @@ -921,10 +922,10 @@ impl WrapChunks<'_> { if self.transforms.item().is_some_and(|t| t.is_isomorphic()) { input_start.0 += output_start.0 - self.transforms.start().0.0; } - let input_end = self - .snapshot - .to_tab_point(output_end) - .min(self.snapshot.tab_snapshot.max_point()); + let input_end = self.snapshot.to_tab_point(output_end); + let max_point = self.snapshot.tab_snapshot.max_point(); + let input_start = input_start.min(max_point); + let input_end = input_end.min(max_point); self.input_chunks.seek(input_start..input_end); self.input_chunk = Chunk::default(); self.output_position = output_start; From ab96155d6a968e73f1368a5af45888e70796aab2 Mon Sep 17 00:00:00 2001 From: Dino Date: Thu, 27 Nov 2025 11:00:38 +0000 Subject: [PATCH 0422/1030] buffer_search: Fix replace buttons not working if search bar is not focused (#43569) Update the way that both `search::buffer_search::BufferSearchBar.replace_next` and `search::buffer_search::BufferSearchBar.replace_all` are registered as listeners, so that we don't require the replacement editor to be focused in order for these listeners to be active, only requiring the replacement mode to be active in the buffer search bar. This means that, even if the user is focused on the buffer editor, if the "Replace Next Match" or "Replace All Matches" buttons are clicked, the replacement will be performed. Closes #42471 Release Notes: - Fixed issue with buffer search bar where the replacement buttons ("Replace Next Match" & "Replace All Matches") wouldn't work if search bar was not focused --- crates/search/src/buffer_search.rs | 52 +++++++++++++++++++++++++++--- crates/search/src/search_bar.rs | 2 +- 2 files changed, 49 insertions(+), 5 deletions(-) diff --git a/crates/search/src/buffer_search.rs b/crates/search/src/buffer_search.rs index 0b45455faea1c6cd4474ac630d725ee57e1021f4..d17efa635074f7898ab3ea829f3418e2ddd09934 100644 --- a/crates/search/src/buffer_search.rs +++ b/crates/search/src/buffer_search.rs @@ -432,10 +432,8 @@ impl Render for BufferSearchBar { })) .when(replacement, |this| { this.on_action(cx.listener(Self::toggle_replace)) - .when(in_replace, |this| { - this.on_action(cx.listener(Self::replace_next)) - .on_action(cx.listener(Self::replace_all)) - }) + .on_action(cx.listener(Self::replace_next)) + .on_action(cx.listener(Self::replace_all)) }) .when(case, |this| { this.on_action(cx.listener(Self::toggle_case_sensitive)) @@ -2549,6 +2547,52 @@ mod tests { ); } + #[gpui::test] + async fn test_replace_focus(cx: &mut TestAppContext) { + let (editor, search_bar, cx) = init_test(cx); + + editor.update_in(cx, |editor, window, cx| { + editor.set_text("What a bad day!", window, cx) + }); + + search_bar + .update_in(cx, |search_bar, window, cx| { + search_bar.search("bad", None, true, window, cx) + }) + .await + .unwrap(); + + // Calling `toggle_replace` in the search bar ensures that the "Replace + // *" buttons are rendered, so we can then simulate clicking the + // buttons. + search_bar.update_in(cx, |search_bar, window, cx| { + search_bar.toggle_replace(&ToggleReplace, window, cx) + }); + + search_bar.update_in(cx, |search_bar, window, cx| { + search_bar.replacement_editor.update(cx, |editor, cx| { + editor.set_text("great", window, cx); + }); + }); + + // Focus on the editor instead of the search bar, as we want to ensure + // that pressing the "Replace Next Match" button will work, even if the + // search bar is not focused. + cx.focus(&editor); + + // We'll not simulate clicking the "Replace Next Match " button, asserting that + // the replacement was done. + let button_bounds = cx + .debug_bounds("ICON-ReplaceNext") + .expect("'Replace Next Match' button should be visible"); + cx.simulate_click(button_bounds.center(), gpui::Modifiers::none()); + + assert_eq!( + editor.read_with(cx, |editor, cx| editor.text(cx)), + "What a great day!" + ); + } + struct ReplacementTestParams<'a> { editor: &'a Entity, search_bar: &'a Entity, diff --git a/crates/search/src/search_bar.rs b/crates/search/src/search_bar.rs index 61fa46ed9770fbaf49b43979d366655c1b658fc3..13b4df9574aa6b2568dd6db25c6b63551d9b6d03 100644 --- a/crates/search/src/search_bar.rs +++ b/crates/search/src/search_bar.rs @@ -29,7 +29,7 @@ pub(super) fn render_action_button( if !focus_handle.is_focused(window) { window.focus(&focus_handle); } - window.dispatch_action(action.boxed_clone(), cx) + window.dispatch_action(action.boxed_clone(), cx); } }) .tooltip(move |_window, cx| Tooltip::for_action_in(tooltip, action, &focus_handle, cx)) From bbdbfe3430e3c833459defa65fcabf73991bf219 Mon Sep 17 00:00:00 2001 From: Lena <241371603+zelenenka@users.noreply.github.com> Date: Thu, 27 Nov 2025 12:15:57 +0100 Subject: [PATCH 0423/1030] Auto-label new bugs/crashes with 'needs triage' (#43658) Release Notes: - N/A --- .github/ISSUE_TEMPLATE/1.bug-report.yml | 1 + .github/ISSUE_TEMPLATE/2.crash-report.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/1.bug-report.yml b/.github/ISSUE_TEMPLATE/1.bug-report.yml index 1fbb81af8e5e6bd8ebc8582c3528f5b88929f041..3fcf81113093be06f9477a0c89af979444fd016d 100644 --- a/.github/ISSUE_TEMPLATE/1.bug-report.yml +++ b/.github/ISSUE_TEMPLATE/1.bug-report.yml @@ -1,6 +1,7 @@ name: Report an issue description: Report an issue with Zed. type: Bug +labels: "state:needs triage" body: - type: markdown attributes: diff --git a/.github/ISSUE_TEMPLATE/2.crash-report.yml b/.github/ISSUE_TEMPLATE/2.crash-report.yml index 47cedcc3100854060c2cbde2147b754d89afef51..1613e59a4b120fe2cf8a2d79bfea2104cb95d855 100644 --- a/.github/ISSUE_TEMPLATE/2.crash-report.yml +++ b/.github/ISSUE_TEMPLATE/2.crash-report.yml @@ -1,6 +1,7 @@ name: Report a crash description: Zed is crashing or freezing or hanging. type: Crash +labels: "state:needs triage" body: - type: textarea attributes: From 007d648f5e9a3cc2df5964435b89d854955c077e Mon Sep 17 00:00:00 2001 From: Ben Brandt Date: Thu, 27 Nov 2025 14:01:41 +0100 Subject: [PATCH 0424/1030] acp: Add a timeout when initializing an ACP agent so the user isn't waiting forever (#43663) Sometimes we are unable to receive messages at all from an agent. This puts on upper bound on the `initialize` call so we can at least give a message to the user that something is wrong here. 30s might feel like too long, but I wanted to avoid some false positives in case there was something an agent needed to do at startup. This will still communicate to the user at some point that something is wrong, rather than leave them waiting forever with no signal that something is going wrong. Release Notes: - agent: Show an error message to the user if we are unable to initialize an ACP agent in a reasonable amount of time. --- crates/agent_ui/src/acp/thread_view.rs | 62 +++++++++++++++++++++++++- 1 file changed, 61 insertions(+), 1 deletion(-) diff --git a/crates/agent_ui/src/acp/thread_view.rs b/crates/agent_ui/src/acp/thread_view.rs index a2929ad23ba8558b61abbf1d25ffe3843a918c2e..ab662889abebb53d8ce3ef51f6cf8802b3b8eb46 100644 --- a/crates/agent_ui/src/acp/thread_view.rs +++ b/crates/agent_ui/src/acp/thread_view.rs @@ -498,7 +498,17 @@ impl AcpThreadView { Some(new_version_available_tx), ); - let connect_task = agent.connect(root_dir.as_deref(), delegate, cx); + let agent_name = agent.name(); + let timeout = cx.background_executor().timer(Duration::from_secs(30)); + let connect_task = smol::future::or( + agent.connect(root_dir.as_deref(), delegate, cx), + async move { + timeout.await; + Err(anyhow::Error::new(LoadError::Other( + format!("{agent_name} is unable to initialize after 30 seconds.").into(), + ))) + }, + ); let load_task = cx.spawn_in(window, async move |this, cx| { let connection = match connect_task.await { Ok((connection, login)) => { @@ -7358,4 +7368,54 @@ pub(crate) mod tests { assert_eq!(text, expected_txt); }) } + + #[gpui::test] + async fn test_initialize_timeout(cx: &mut TestAppContext) { + init_test(cx); + + struct InfiniteInitialize; + + impl AgentServer for InfiniteInitialize { + fn telemetry_id(&self) -> &'static str { + "test" + } + + fn logo(&self) -> ui::IconName { + ui::IconName::Ai + } + + fn name(&self) -> SharedString { + "Test".into() + } + + fn connect( + &self, + _root_dir: Option<&Path>, + _delegate: AgentServerDelegate, + cx: &mut App, + ) -> Task, Option)>> + { + cx.spawn(async |_| futures::future::pending().await) + } + + fn into_any(self: Rc) -> Rc { + self + } + } + + let (thread_view, cx) = setup_thread_view(InfiniteInitialize, cx).await; + + cx.executor().advance_clock(Duration::from_secs(31)); + cx.run_until_parked(); + + let error = thread_view.read_with(cx, |thread_view, _| match &thread_view.thread_state { + ThreadState::LoadError(err) => err.clone(), + _ => panic!("Incorrect thread state"), + }); + + match error { + LoadError::Other(str) => assert!(str.contains("initialize")), + _ => panic!("Unexpected load error"), + } + } } From 02fbafcda603bc3b6d63e2a0f778650b44f02436 Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Thu, 27 Nov 2025 14:46:43 +0100 Subject: [PATCH 0425/1030] release_version: Do not use prerelease field (#43669) - **release_channel: Do not use prerelease channel for build id** Prerelease channel specifiers always compare as less than to non-prerelease, which led to 2 auto-update bugs fixed in https://github.com/zed-industries/zed/pull/43595 and https://github.com/zed-industries/zed/pull/43611. We'll use a dot-delimited build specifiers in form: release-channel.build_number.sha1 instead - **auto_update: Do not display full build metadata in update notification** Release Notes: - N/A --- Cargo.lock | 1 + crates/auto_update_ui/Cargo.toml | 1 + crates/auto_update_ui/src/auto_update_ui.rs | 4 +++- crates/release_channel/src/lib.rs | 12 ++++++++++-- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f10c2e1d13210d67d16d584637c0fb7b71d61eec..8d4baa2e5221c23ff57a227a94dae4ae3859ec83 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1380,6 +1380,7 @@ dependencies = [ "http_client", "markdown_preview", "release_channel", + "semver", "serde", "serde_json", "smol", diff --git a/crates/auto_update_ui/Cargo.toml b/crates/auto_update_ui/Cargo.toml index 0e31f94f5ee268cdc3274dea747bd0b05d9c80eb..2b1421e35dcbcf6fac40cd0e97a3dc839da58d9e 100644 --- a/crates/auto_update_ui/Cargo.toml +++ b/crates/auto_update_ui/Cargo.toml @@ -20,6 +20,7 @@ gpui.workspace = true http_client.workspace = true markdown_preview.workspace = true release_channel.workspace = true +semver.workspace = true serde.workspace = true serde_json.workspace = true smol.workspace = true diff --git a/crates/auto_update_ui/src/auto_update_ui.rs b/crates/auto_update_ui/src/auto_update_ui.rs index aeaa6ae93e635a6cab1487400fb58bd7be1bc6e1..6c32ee3b6c9b9c4974a287ff0e9a988472cecf3b 100644 --- a/crates/auto_update_ui/src/auto_update_ui.rs +++ b/crates/auto_update_ui/src/auto_update_ui.rs @@ -148,7 +148,9 @@ pub fn notify_if_app_was_updated(cx: &mut App) { let should_show_notification = should_show_notification.await?; if should_show_notification { cx.update(|cx| { - let version = updater.read(cx).current_version(); + let mut version = updater.read(cx).current_version(); + version.build = semver::BuildMetadata::EMPTY; + version.pre = semver::Prerelease::EMPTY; let app_name = ReleaseChannel::global(cx).display_name(); show_app_notification( NotificationId::unique::(), diff --git a/crates/release_channel/src/lib.rs b/crates/release_channel/src/lib.rs index e84bf91c1db5e891abae0aeb67089cc40b1ec009..65201ccc46caccdf4912b69fa296d468dfdea95d 100644 --- a/crates/release_channel/src/lib.rs +++ b/crates/release_channel/src/lib.rs @@ -90,11 +90,19 @@ impl AppVersion { } else { pkg_version.parse().expect("invalid version in Cargo.toml") }; + let mut pre = String::from(RELEASE_CHANNEL.dev_name()); + if let Some(build_id) = build_id { - version.pre = semver::Prerelease::new(&build_id).expect("Invalid build identifier"); + pre.push('.'); + pre.push_str(&build_id); } + if let Some(sha) = commit_sha { - version.build = semver::BuildMetadata::new(&sha.0).expect("Invalid build metadata"); + pre.push('.'); + pre.push_str(&sha.0); + } + if let Ok(build) = semver::BuildMetadata::new(&pre) { + version.build = build; } version From c2281779af56bd52c829ccd31aae4eb82b682ebc Mon Sep 17 00:00:00 2001 From: Piotr Osiewicz <24362066+osiewicz@users.noreply.github.com> Date: Thu, 27 Nov 2025 15:08:43 +0100 Subject: [PATCH 0426/1030] auto_update: Tentatively prevent auto-update loop on Nightly again (#43670) Release Notes: - N/A --- crates/auto_update/src/auto_update.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index 06e87c53dba2acfffb03cdc75cbbaa7cc2f44a6a..0c122717d7a377e5aa5e8d23cab4de435bd67e33 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -639,10 +639,11 @@ impl AutoUpdater { if let AutoUpdateStatus::Updated { version, .. } = status { match version { VersionCheckType::Sha(cached_version) => { - let should_download = parsed_fetched_version - .as_ref() - .ok() - .is_none_or(|version| version.build.as_str() != cached_version.full()); + let should_download = + parsed_fetched_version.as_ref().ok().is_none_or(|version| { + version.build.as_str().rsplit('.').next() + != Some(&cached_version.full()) + }); let newer_version = should_download .then(|| VersionCheckType::Sha(AppCommitSha::new(fetched_version))); return Ok(newer_version); @@ -662,10 +663,9 @@ impl AutoUpdater { .ok() .flatten() .map(|sha| { - parsed_fetched_version - .as_ref() - .ok() - .is_none_or(|version| version.build.as_str() != sha) + parsed_fetched_version.as_ref().ok().is_none_or(|version| { + version.build.as_str().rsplit('.').next() != Some(&sha) + }) }) .unwrap_or(true); let newer_version = should_download From 5f0212de5fb76c91c4b8991f1bc8ed1464a2e527 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Thu, 27 Nov 2025 13:10:55 -0300 Subject: [PATCH 0427/1030] Better promote edit prediction when signed out (#43665) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introducing this little popover here that's aimed at better communicating what Zed's built-in edit prediction feature is and how much people can get of it for free by purely just signing in. Screenshot 2025-11-27 at 9  50@2x Release Notes: - N/A --- Cargo.lock | 1 + crates/client/src/zed_urls.rs | 8 + crates/edit_prediction_button/Cargo.toml | 1 + .../src/edit_prediction_button.rs | 145 +++++++++++++++++- 4 files changed, 149 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8d4baa2e5221c23ff57a227a94dae4ae3859ec83..082f420f11f12968dd5c6bec46c3fab4f2b37a7f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5308,6 +5308,7 @@ dependencies = [ "telemetry", "theme", "ui", + "util", "workspace", "zed_actions", "zeta", diff --git a/crates/client/src/zed_urls.rs b/crates/client/src/zed_urls.rs index 957d6c68f773db025b4ee604666f5b3d8101148b..2fe47251695446b54d6766c9a52bbd2da366d34e 100644 --- a/crates/client/src/zed_urls.rs +++ b/crates/client/src/zed_urls.rs @@ -59,3 +59,11 @@ pub fn agent_server_docs(cx: &App) -> String { server_url = server_url(cx) ) } + +/// Returns the URL to Zed's edit prediction documentation. +pub fn edit_prediction_docs(cx: &App) -> String { + format!( + "{server_url}/docs/ai/edit-prediction", + server_url = server_url(cx) + ) +} diff --git a/crates/edit_prediction_button/Cargo.toml b/crates/edit_prediction_button/Cargo.toml index 9062aca3c56f527385aecb000ebcd625f588eb9a..b7ec07e1e2b24d1d1b851913195afdbf58376da5 100644 --- a/crates/edit_prediction_button/Cargo.toml +++ b/crates/edit_prediction_button/Cargo.toml @@ -32,6 +32,7 @@ settings.workspace = true supermaven.workspace = true telemetry.workspace = true ui.workspace = true +util.workspace = true workspace.workspace = true zed_actions.workspace = true zeta.workspace = true diff --git a/crates/edit_prediction_button/src/edit_prediction_button.rs b/crates/edit_prediction_button/src/edit_prediction_button.rs index 254caa698aa05214f73a749e540233952db4978b..ba371b53aebe8c8f2db01501e01391125341a457 100644 --- a/crates/edit_prediction_button/src/edit_prediction_button.rs +++ b/crates/edit_prediction_button/src/edit_prediction_button.rs @@ -11,7 +11,7 @@ use fs::Fs; use gpui::{ Action, Animation, AnimationExt, App, AsyncWindowContext, Corner, Entity, FocusHandle, Focusable, IntoElement, ParentElement, Render, Subscription, WeakEntity, actions, div, - pulsating_between, + ease_in_out, pulsating_between, }; use indoc::indoc; use language::{ @@ -34,6 +34,7 @@ use ui::{ Clickable, ContextMenu, ContextMenuEntry, DocumentationEdge, DocumentationSide, IconButton, IconButtonShape, Indicator, PopoverMenu, PopoverMenuHandle, ProgressBar, Tooltip, prelude::*, }; +use util::ResultExt as _; use workspace::{ StatusItemView, Toast, Workspace, create_and_open_local_file, item::ItemHandle, notifications::NotificationId, @@ -322,7 +323,7 @@ impl Render for EditPredictionButton { let tooltip_meta = if self.user_store.read(cx).current_user().is_some() { "Choose a Plan" } else { - "Sign In" + "Sign In To Use" }; return div().child( @@ -357,6 +358,7 @@ impl Render for EditPredictionButton { } let show_editor_predictions = self.editor_show_predictions; + let user = self.user_store.read(cx).current_user(); let icon_button = IconButton::new("zed-predict-pending-button", zeta_icon) .shape(IconButtonShape::Square) @@ -372,10 +374,18 @@ impl Render for EditPredictionButton { }, ) .when(!self.popover_menu_handle.is_deployed(), |element| { + let user = user.clone(); element.tooltip(move |_window, cx| { if enabled { if show_editor_predictions { Tooltip::for_action("Edit Prediction", &ToggleMenu, cx) + } else if user.is_none() { + Tooltip::with_meta( + "Edit Prediction", + Some(&ToggleMenu), + "Sign In To Use", + cx, + ) } else { Tooltip::with_meta( "Edit Prediction", @@ -398,11 +408,25 @@ impl Render for EditPredictionButton { let this = cx.weak_entity(); let mut popover_menu = PopoverMenu::new("zeta") - .menu(move |window, cx| { - this.update(cx, |this, cx| { - this.build_zeta_context_menu(provider, window, cx) + .when(user.is_some(), |popover_menu| { + let this = this.clone(); + + popover_menu.menu(move |window, cx| { + this.update(cx, |this, cx| { + this.build_zeta_context_menu(provider, window, cx) + }) + .ok() + }) + }) + .when(user.is_none(), |popover_menu| { + let this = this.clone(); + + popover_menu.menu(move |window, cx| { + this.update(cx, |this, cx| { + this.build_zeta_upsell_context_menu(window, cx) + }) + .ok() }) - .ok() }) .anchor(Corner::BottomRight) .with_handle(self.popover_menu_handle.clone()); @@ -1045,6 +1069,55 @@ impl EditPredictionButton { }) } + fn build_zeta_upsell_context_menu( + &self, + window: &mut Window, + cx: &mut Context, + ) -> Entity { + ContextMenu::build(window, cx, |mut menu, _window, cx| { + menu = menu + .custom_row(move |_window, cx| { + let description = indoc! { + "Sign in for 2,000 worth of accepted suggestions at every keystroke, \ + powered by Zeta, our open-source, open-data model." + }; + + v_flex() + .max_w_64() + .h(rems_from_px(148.)) + .child(render_zeta_tab_animation(cx)) + .child(Label::new("Edit Prediction")) + .child( + Label::new(description) + .color(Color::Muted) + .size(LabelSize::Small), + ) + .into_any_element() + }) + .separator() + .entry("Sign In & Start Using", None, |window, cx| { + let client = Client::global(cx); + window + .spawn(cx, async move |cx| { + client + .sign_in_with_optional_connect(true, &cx) + .await + .log_err(); + }) + .detach(); + }) + .link( + "Learn More", + OpenBrowser { + url: zed_urls::edit_prediction_docs(cx), + } + .boxed_clone(), + ); + + menu + }) + } + pub fn update_enabled(&mut self, editor: Entity, cx: &mut Context) { let editor = editor.read(cx); let snapshot = editor.buffer().read(cx).snapshot(cx); @@ -1248,6 +1321,66 @@ fn toggle_edit_prediction_mode(fs: Arc, mode: EditPredictionsMode, cx: & } } +fn render_zeta_tab_animation(cx: &App) -> impl IntoElement { + let tab = |n: u64, inverted: bool| { + let text_color = cx.theme().colors().text; + + h_flex().child( + h_flex() + .text_size(TextSize::XSmall.rems(cx)) + .text_color(text_color) + .child("tab") + .with_animation( + ElementId::Integer(n), + Animation::new(Duration::from_secs(4)).repeat(), + move |tab, delta| { + let n_f32 = n as f32; + + let delta = if inverted { + (delta - 0.15 * (5.0 - n_f32)) / 0.7 + } else { + (delta - 0.15 * n_f32) / 0.7 + }; + + let delta = 1.0 - (0.5 - delta).abs() * 2.; + let delta = ease_in_out(delta.clamp(0., 1.)); + let delta = 0.1 + 0.5 * delta; + + tab.text_color(text_color.opacity(delta)) + }, + ), + ) + }; + + let tab_sequence = |inverted: bool| { + h_flex() + .gap_1() + .child(tab(0, inverted)) + .child(tab(1, inverted)) + .child(tab(2, inverted)) + .child(tab(3, inverted)) + .child(tab(4, inverted)) + }; + + h_flex() + .my_1p5() + .p_4() + .justify_center() + .gap_2() + .rounded_xs() + .border_1() + .border_dashed() + .border_color(cx.theme().colors().border) + .bg(gpui::pattern_slash( + cx.theme().colors().border.opacity(0.5), + 1., + 8., + )) + .child(tab_sequence(true)) + .child(Icon::new(IconName::ZedPredict)) + .child(tab_sequence(false)) +} + fn copilot_settings_url(enterprise_uri: Option<&str>) -> String { match enterprise_uri { Some(uri) => { From aa899f6d785826446f105c18f94e863f8c2e2b94 Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 27 Nov 2025 18:14:42 +0100 Subject: [PATCH 0428/1030] gpui: Give windows message loop processing chances when we overload the main thread with tasks (#43678) This reduces hangs on windows when we have many tasks queued up on the main thread that yield a lot. Release Notes: - Reduced hangs on windows in some situations --- .../gpui/src/platform/windows/dispatcher.rs | 2 +- crates/gpui/src/platform/windows/platform.rs | 23 +++++++++++++++++-- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/crates/gpui/src/platform/windows/dispatcher.rs b/crates/gpui/src/platform/windows/dispatcher.rs index f543202e77c75acfd007d632b2023317dcba43b6..dd53c86f5ed687c9b22a08779f262392f44a66ce 100644 --- a/crates/gpui/src/platform/windows/dispatcher.rs +++ b/crates/gpui/src/platform/windows/dispatcher.rs @@ -25,7 +25,7 @@ pub(crate) struct WindowsDispatcher { pub(crate) wake_posted: AtomicBool, main_sender: Sender, main_thread_id: ThreadId, - platform_window_handle: SafeHwnd, + pub(crate) platform_window_handle: SafeHwnd, validation_number: usize, } diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index 942cb62d2216c8d7cd5ea4cf75c4e4fa4a7d007f..548b08eca8f254825bc5a42bf74089c0f230c4ac 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -813,9 +813,28 @@ impl WindowsPlatformInner { #[inline] fn run_foreground_task(&self) -> Option { + const MAIN_TASK_TIMEOUT: u128 = 10; + + let start = std::time::Instant::now(); loop { - for runnable in self.main_receiver.drain() { - WindowsDispatcher::execute_runnable(runnable); + loop { + if start.elapsed().as_millis() >= MAIN_TASK_TIMEOUT { + // requeue main thread dispatch and bail, allowing more system messages to be processed + unsafe { + PostMessageW( + Some(self.dispatcher.platform_window_handle.as_raw()), + WM_GPUI_TASK_DISPATCHED_ON_MAIN_THREAD, + WPARAM(self.validation_number), + LPARAM(0), + ) + .log_err(); + } + return Some(0); + } + match self.main_receiver.try_recv() { + Err(_) => break, + Ok(runnable) => WindowsDispatcher::execute_runnable(runnable), + } } // Someone could enqueue a Runnable here. The flag is still true, so they will not PostMessage. From d82be979630a662677d09f5f765c3acf40aef3fd Mon Sep 17 00:00:00 2001 From: Bennet Bo Fenner Date: Thu, 27 Nov 2025 18:47:12 +0100 Subject: [PATCH 0429/1030] acp: Support using @mentions after typing slash command (#43681) Release Notes: - acp: Allow using @mentions after typing slash command --- crates/agent_ui/src/completion_provider.rs | 62 ++++++++++++++++------ 1 file changed, 47 insertions(+), 15 deletions(-) diff --git a/crates/agent_ui/src/completion_provider.rs b/crates/agent_ui/src/completion_provider.rs index 61ce313cb0c0c6ed91a08aa07544e766de5c581a..ec5d9c045acfe856409715770cb4c55f7a3e0e92 100644 --- a/crates/agent_ui/src/completion_provider.rs +++ b/crates/agent_ui/src/completion_provider.rs @@ -861,7 +861,7 @@ impl CompletionProvider for PromptCompletio let offset_to_line = buffer.point_to_offset(line_start); let mut lines = buffer.text_for_range(line_start..position).lines(); let line = lines.next()?; - ContextCompletion::try_parse(line, offset_to_line, &self.source.supported_modes(cx)) + PromptCompletion::try_parse(line, offset_to_line, &self.source.supported_modes(cx)) }); let Some(state) = state else { return Task::ready(Ok(Vec::new())); @@ -880,7 +880,7 @@ impl CompletionProvider for PromptCompletio let editor = self.editor.clone(); let mention_set = self.mention_set.downgrade(); match state { - ContextCompletion::SlashCommand(SlashCommandCompletion { + PromptCompletion::SlashCommand(SlashCommandCompletion { command, argument, .. }) => { let search_task = self.search_slash_commands(command.unwrap_or_default(), cx); @@ -943,7 +943,7 @@ impl CompletionProvider for PromptCompletio }]) }) } - ContextCompletion::Mention(MentionCompletion { mode, argument, .. }) => { + PromptCompletion::Mention(MentionCompletion { mode, argument, .. }) => { let query = argument.unwrap_or_default(); let search_task = self.search_mentions(mode, query, Arc::::default(), cx); @@ -1085,12 +1085,12 @@ impl CompletionProvider for PromptCompletio let offset_to_line = buffer.point_to_offset(line_start); let mut lines = buffer.text_for_range(line_start..position).lines(); if let Some(line) = lines.next() { - ContextCompletion::try_parse(line, offset_to_line, &self.source.supported_modes(cx)) + PromptCompletion::try_parse(line, offset_to_line, &self.source.supported_modes(cx)) .filter(|completion| { // Right now we don't support completing arguments of slash commands let is_slash_command_with_argument = matches!( completion, - ContextCompletion::SlashCommand(SlashCommandCompletion { + PromptCompletion::SlashCommand(SlashCommandCompletion { argument: Some(_), .. }) @@ -1160,12 +1160,13 @@ fn confirm_completion_callback( }) } -enum ContextCompletion { +#[derive(Debug, PartialEq)] +enum PromptCompletion { SlashCommand(SlashCommandCompletion), Mention(MentionCompletion), } -impl ContextCompletion { +impl PromptCompletion { fn source_range(&self) -> Range { match self { Self::SlashCommand(completion) => completion.source_range.clone(), @@ -1178,15 +1179,14 @@ impl ContextCompletion { offset_to_line: usize, supported_modes: &[PromptContextType], ) -> Option { - if let Some(command) = SlashCommandCompletion::try_parse(line, offset_to_line) { - Some(Self::SlashCommand(command)) - } else if let Some(mention) = - MentionCompletion::try_parse(line, offset_to_line, supported_modes) - { - Some(Self::Mention(mention)) - } else { - None + if line.contains('@') { + if let Some(mention) = + MentionCompletion::try_parse(line, offset_to_line, supported_modes) + { + return Some(Self::Mention(mention)); + } } + SlashCommandCompletion::try_parse(line, offset_to_line).map(Self::SlashCommand) } } @@ -1653,6 +1653,38 @@ fn selection_ranges( mod tests { use super::*; + #[test] + fn test_prompt_completion_parse() { + let supported_modes = vec![PromptContextType::File, PromptContextType::Symbol]; + + assert_eq!( + PromptCompletion::try_parse("/", 0, &supported_modes), + Some(PromptCompletion::SlashCommand(SlashCommandCompletion { + source_range: 0..1, + command: None, + argument: None, + })) + ); + + assert_eq!( + PromptCompletion::try_parse("@", 0, &supported_modes), + Some(PromptCompletion::Mention(MentionCompletion { + source_range: 0..1, + mode: None, + argument: None, + })) + ); + + assert_eq!( + PromptCompletion::try_parse("/test @file", 0, &supported_modes), + Some(PromptCompletion::Mention(MentionCompletion { + source_range: 6..11, + mode: Some(PromptContextType::File), + argument: None, + })) + ); + } + #[test] fn test_slash_command_completion_parse() { assert_eq!( From 64633bade4cac6351417a2147effa5c4ace9c9ba Mon Sep 17 00:00:00 2001 From: Lukas Wirth Date: Thu, 27 Nov 2025 19:51:19 +0100 Subject: [PATCH 0430/1030] gpui(windows): Prioritize system messages when exceeding main thread task budget (#43682) Release Notes: - Improved responsiveness on windows when there is a lot of tasks running in the foreground --- crates/gpui/src/platform/windows/platform.rs | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/crates/gpui/src/platform/windows/platform.rs b/crates/gpui/src/platform/windows/platform.rs index 548b08eca8f254825bc5a42bf74089c0f230c4ac..14b9511581a55a7654c8a48fc8f383c560c1a0ee 100644 --- a/crates/gpui/src/platform/windows/platform.rs +++ b/crates/gpui/src/platform/windows/platform.rs @@ -819,17 +819,16 @@ impl WindowsPlatformInner { loop { loop { if start.elapsed().as_millis() >= MAIN_TASK_TIMEOUT { - // requeue main thread dispatch and bail, allowing more system messages to be processed - unsafe { - PostMessageW( - Some(self.dispatcher.platform_window_handle.as_raw()), - WM_GPUI_TASK_DISPATCHED_ON_MAIN_THREAD, - WPARAM(self.validation_number), - LPARAM(0), - ) - .log_err(); + // we spent our budget on gpui tasks, we likely have a lot of work queued so drain system events first + // before returning to main thread task work + let mut msg = MSG::default(); + let peek_msg_type = PM_REMOVE | PM_QS_INPUT | PM_QS_PAINT; + while unsafe { PeekMessageW(&mut msg, None, 0, 0, peek_msg_type) }.as_bool() { + if translate_accelerator(&msg).is_none() { + _ = unsafe { TranslateMessage(&msg) }; + unsafe { DispatchMessageW(&msg) }; + } } - return Some(0); } match self.main_receiver.try_recv() { Err(_) => break, From d97d4f3949bedaf08e02c30bae9b0f39e6f4353b Mon Sep 17 00:00:00 2001 From: Xiaobo Liu Date: Fri, 28 Nov 2025 03:19:54 +0800 Subject: [PATCH 0431/1030] ui: Remove early return after painting existing menu (#43631) Release Notes: - Fixed right-click context menu UX issues Signed-off-by: Xiaobo Liu --- crates/ui/src/components/right_click_menu.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/crates/ui/src/components/right_click_menu.rs b/crates/ui/src/components/right_click_menu.rs index 761189671b935bf1f3d9e3f7d4d547528cf20196..dff423073710121bb0bc0fafdb8ab3108b746bde 100644 --- a/crates/ui/src/components/right_click_menu.rs +++ b/crates/ui/src/components/right_click_menu.rs @@ -223,7 +223,6 @@ impl Element for RightClickMenu { if let Some(mut menu) = request_layout.menu_element.take() { menu.paint(window, cx); - return; } let Some(builder) = this.menu_builder.take() else { From fa070c50e57e7de06b45336578f0a6f36d9c0ec8 Mon Sep 17 00:00:00 2001 From: Xiaobo Liu Date: Fri, 28 Nov 2025 03:33:48 +0800 Subject: [PATCH 0432/1030] editor: Show "Toggle Excerpt Unfold" tooltip when buffer is fold (#43626) Release Notes: - N/A --------- Signed-off-by: Xiaobo Liu Co-authored-by: Danilo Leal --- crates/editor/src/element.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/editor/src/element.rs b/crates/editor/src/element.rs index c85528353fc23ac2da4cca3682e28a30cda37f9c..dab04e8626385ada879485acecb93dddbcb4b940 100644 --- a/crates/editor/src/element.rs +++ b/crates/editor/src/element.rs @@ -3969,9 +3969,14 @@ impl EditorElement { .children(toggle_chevron_icon) .tooltip({ let focus_handle = focus_handle.clone(); + let is_folded_for_tooltip = is_folded; move |_window, cx| { Tooltip::with_meta_in( - "Toggle Excerpt Fold", + if is_folded_for_tooltip { + "Unfold Excerpt" + } else { + "Fold Excerpt" + }, Some(&ToggleFold), format!( "{} to toggle all", From 45285ee3452463bd08e780481256ba8180ab9c96 Mon Sep 17 00:00:00 2001 From: Danilo Leal <67129314+danilo-leal@users.noreply.github.com> Date: Thu, 27 Nov 2025 16:38:52 -0300 Subject: [PATCH 0433/1030] settings ui: Fix debugger step granularity setting (#43686) Closes https://github.com/zed-industries/zed/issues/43549 Release Notes: - N/A --- crates/settings_ui/src/page_data.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/settings_ui/src/page_data.rs b/crates/settings_ui/src/page_data.rs index 7cc5705ced9bc2267834044eff5e5def78182bc4..e06a98da49a3b0b60f704debe862ddf2bd4dd97e 100644 --- a/crates/settings_ui/src/page_data.rs +++ b/crates/settings_ui/src/page_data.rs @@ -4442,7 +4442,7 @@ pub(crate) fn settings_data(cx: &App) -> Vec { title: "Stepping Granularity", description: "Determines the stepping granularity for debug operations.", field: Box::new(SettingField { - json_path: Some("agent.default_height"), + json_path: Some("debugger.stepping_granularity"), pick: |settings_content| { settings_content .debugger From f10afd105921ce0449833eac28e7d351188277a1 Mon Sep 17 00:00:00 2001 From: Floyd Wang Date: Fri, 28 Nov 2025 04:02:03 +0800 Subject: [PATCH 0434/1030] component_preview: Pin the filter input at the top (#43636) When I scroll the page after selecting a component, the filter input disappears. It would be more helpful if it stayed fixed at the top. ## Before https://github.com/user-attachments/assets/e26031f1-d6e7-4ae2-a148-88f4c548a5cf ## After https://github.com/user-attachments/assets/f120355c-74d3-4724-9ffc-71fb7e3a4586 Release Notes: - N/A --- crates/zed/src/zed/component_preview.rs | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/crates/zed/src/zed/component_preview.rs b/crates/zed/src/zed/component_preview.rs index c231836aaa9219cab2ed913db70ad1704606dfd1..14a46d8882d1d3d371c50e9886062a124917a48d 100644 --- a/crates/zed/src/zed/component_preview.rs +++ b/crates/zed/src/zed/component_preview.rs @@ -653,10 +653,8 @@ impl Render for ComponentPreview { ) .child( v_flex() - .id("content-area") .flex_1() .size_full() - .overflow_y_scroll() .child( div() .p_2() @@ -665,14 +663,18 @@ impl Render for ComponentPreview { .border_color(cx.theme().colors().border) .child(self.filter_editor.clone()), ) - .child(match active_page { - PreviewPage::AllComponents => { - self.render_all_components(cx).into_any_element() - } - PreviewPage::Component(id) => self - .render_component_page(&id, window, cx) - .into_any_element(), - }), + .child( + div().id("content-area").flex_1().overflow_y_scroll().child( + match active_page { + PreviewPage::AllComponents => { + self.render_all_components(cx).into_any_element() + } + PreviewPage::Component(id) => self + .render_component_page(&id, window, cx) + .into_any_element(), + }, + ), + ), ) } } From 28dde14a33ee7b2a9858dfbb41b350e90118e4f3 Mon Sep 17 00:00:00 2001 From: Floyd Wang Date: Fri, 28 Nov 2025 04:09:19 +0800 Subject: [PATCH 0435/1030] ui: Fix custom size preview example of vector component (#43633) In the vector component custom size preview example, the image will overflow the content box. | Before | After | | - | - | | Before | After | Release Notes: - N/A --------- Co-authored-by: Danilo Leal --- crates/ui/src/components/image.rs | 33 +++++++++++++++++-------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/crates/ui/src/components/image.rs b/crates/ui/src/components/image.rs index 8a14cffd3b2de2e184fd87a9212775c470e3118d..3e8cbd8fff7ba484b1b1eb9fd50da55998d7eae9 100644 --- a/crates/ui/src/components/image.rs +++ b/crates/ui/src/components/image.rs @@ -115,6 +115,8 @@ impl Component for Vector { } fn preview(_window: &mut Window, _cx: &mut App) -> Option { + let size = rems_from_px(60.); + Some( v_flex() .gap_6() @@ -124,11 +126,18 @@ impl Component for Vector { vec![ single_example( "Default", - Vector::square(VectorName::ZedLogo, rems(8.)).into_any_element(), + Vector::square(VectorName::ZedLogo, size).into_any_element(), ), single_example( "Custom Size", - Vector::new(VectorName::ZedLogo, rems(12.), rems(6.)) + h_flex() + .h(rems_from_px(120.)) + .justify_center() + .child(Vector::new( + VectorName::ZedLogo, + rems_from_px(120.), + rems_from_px(200.), + )) .into_any_element(), ), ], @@ -138,13 +147,13 @@ impl Component for Vector { vec![ single_example( "Accent Color", - Vector::square(VectorName::ZedLogo, rems(8.)) + Vector::square(VectorName::ZedLogo, size) .color(Color::Accent) .into_any_element(), ), single_example( "Error Color", - Vector::square(VectorName::ZedLogo, rems(8.)) + Vector::square(VectorName::ZedLogo, size) .color(Color::Error) .into_any_element(), ), @@ -152,17 +161,11 @@ impl Component for Vector { ), example_group_with_title( "Different Vectors", - vec![ - single_example( - "Zed Logo", - Vector::square(VectorName::ZedLogo, rems(8.)).into_any_element(), - ), - single_example( - "Zed X Copilot", - Vector::square(VectorName::ZedXCopilot, rems(8.)) - .into_any_element(), - ), - ], + vec![single_example( + "Zed X Copilot", + Vector::square(VectorName::ZedXCopilot, rems_from_px(100.)) + .into_any_element(), + )], ), ]) .into_any_element(), From 20b584398e6151f0950f18a2579b4ee0b4012c96 Mon Sep 17 00:00:00 2001 From: Remco Smits Date: Thu, 27 Nov 2025 21:12:03 +0100 Subject: [PATCH 0436/1030] checkbox: Fix showing cursor pointer for edge of the checkbox when it's disabled (#43577) This PR fixes that you saw the cursor pointer for disabled checkbox. This is only for the edge of the checkbox component, because we didn't check for the disabled case. **Before** https://github.com/user-attachments/assets/cfebad01-533b-4515-b8d9-4bcb839eaec4 **After** https://github.com/user-attachments/assets/969600de-081b-42df-a288-ca3db5758d12 Release Notes: - N/A --- crates/ui/src/components/toggle.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/crates/ui/src/components/toggle.rs b/crates/ui/src/components/toggle.rs index a41dce6c61de1cabdbccee1478afe143feee4987..1e637f24194b86f0c06aef806975df635f45a6cd 100644 --- a/crates/ui/src/components/toggle.rs +++ b/crates/ui/src/components/toggle.rs @@ -247,7 +247,13 @@ impl RenderOnce for Checkbox { h_flex() .id(self.id) - .cursor_pointer() + .map(|this| { + if self.disabled { + this.cursor_not_allowed() + } else { + this.cursor_pointer() + } + }) .gap(DynamicSpacing::Base06.rems(cx)) .child(checkbox) .when_some( From 518ea716eeef2fd639f6349e84aad94b0e71dafb Mon Sep 17 00:00:00 2001 From: Scott Churchley Date: Thu, 27 Nov 2025 15:26:00 -0500 Subject: [PATCH 0437/1030] agent_ui: Truncate file path in context completions (#42682) Closes https://github.com/zed-industries/zed/issues/38753 Followed some prior art here: [a similar calculation ](https://github.com/zed-industries/zed/blob/e80b490ac00bfb5c206d3eb3f5c2dec10e742678/crates/file_finder/src/file_finder.rs#L1105) in `file_finder.rs` Release Notes: - improved visibility of long path names in context completions by truncating on the left when space is insufficient to render the full path Before: Screenshot of overflowing file paths After: Screenshot of truncated file paths --- crates/agent_ui/src/completion_provider.rs | 55 ++++++++++++++++++++-- crates/editor/src/code_context_menus.rs | 45 ++++++++++-------- 2 files changed, 75 insertions(+), 25 deletions(-) diff --git a/crates/agent_ui/src/completion_provider.rs b/crates/agent_ui/src/completion_provider.rs index ec5d9c045acfe856409715770cb4c55f7a3e0e92..2e3cf0d551fc649e61ae26e47fa53301def2aacc 100644 --- a/crates/agent_ui/src/completion_provider.rs +++ b/crates/agent_ui/src/completion_provider.rs @@ -7,7 +7,9 @@ use std::sync::atomic::AtomicBool; use acp_thread::MentionUri; use agent::{HistoryEntry, HistoryStore}; use anyhow::Result; -use editor::{CompletionProvider, Editor, ExcerptId}; +use editor::{ + CompletionProvider, Editor, ExcerptId, code_context_menus::COMPLETION_MENU_MAX_WIDTH, +}; use fuzzy::{PathMatch, StringMatch, StringMatchCandidate}; use gpui::{App, Entity, Task, WeakEntity}; use language::{Buffer, CodeLabel, CodeLabelBuilder, HighlightId}; @@ -25,6 +27,7 @@ use ui::prelude::*; use util::ResultExt as _; use util::paths::PathStyle; use util::rel_path::RelPath; +use util::truncate_and_remove_front; use workspace::Workspace; use crate::AgentPanel; @@ -336,14 +339,20 @@ impl PromptCompletionProvider { mention_set: WeakEntity, workspace: Entity, project: Entity, + label_max_chars: usize, cx: &mut App, ) -> Option { let path_style = project.read(cx).path_style(cx); let (file_name, directory) = extract_file_name_and_directory(&project_path.path, path_prefix, path_style); - let label = - build_code_label_for_path(&file_name, directory.as_ref().map(|s| s.as_ref()), None, cx); + let label = build_code_label_for_path( + &file_name, + directory.as_ref().map(|s| s.as_ref()), + None, + label_max_chars, + cx, + ); let abs_path = project.read(cx).absolute_path(&project_path, cx)?; @@ -392,6 +401,7 @@ impl PromptCompletionProvider { editor: WeakEntity, mention_set: WeakEntity, workspace: Entity, + label_max_chars: usize, cx: &mut App, ) -> Option { let project = workspace.read(cx).project().clone(); @@ -414,6 +424,7 @@ impl PromptCompletionProvider { &symbol.name, Some(&file_name), Some(symbol.range.start.0.row + 1), + label_max_chars, cx, ); @@ -852,7 +863,7 @@ impl CompletionProvider for PromptCompletio buffer: &Entity, buffer_position: Anchor, _trigger: CompletionContext, - _window: &mut Window, + window: &mut Window, cx: &mut Context, ) -> Task>> { let state = buffer.update(cx, |buffer, cx| { @@ -948,6 +959,31 @@ impl CompletionProvider for PromptCompletio let search_task = self.search_mentions(mode, query, Arc::::default(), cx); + // Calculate maximum characters available for the full label (file_name + space + directory) + // based on maximum menu width after accounting for padding, spacing, and icon width + let label_max_chars = { + // Base06 left padding + Base06 gap + Base06 right padding + icon width + let used_pixels = DynamicSpacing::Base06.px(cx) * 3.0 + + IconSize::XSmall.rems() * window.rem_size(); + + let style = window.text_style(); + let font_id = window.text_system().resolve_font(&style.font()); + let font_size = TextSize::Small.rems(cx).to_pixels(window.rem_size()); + + // Fallback em_width of 10px matches file_finder.rs fallback for TextSize::Small + let em_width = cx + .text_system() + .em_width(font_id, font_size) + .unwrap_or(px(10.0)); + + // Calculate available pixels for text (file_name + directory) + // Using max width since dynamic_width allows the menu to expand up to this + let available_pixels = COMPLETION_MENU_MAX_WIDTH - used_pixels; + + // Convert to character count (total available for file_name + directory) + (f32::from(available_pixels) / f32::from(em_width)) as usize + }; + cx.spawn(async move |_, cx| { let matches = search_task.await; @@ -984,6 +1020,7 @@ impl CompletionProvider for PromptCompletio mention_set.clone(), workspace.clone(), project.clone(), + label_max_chars, cx, ) } @@ -996,6 +1033,7 @@ impl CompletionProvider for PromptCompletio editor.clone(), mention_set.clone(), workspace.clone(), + label_max_chars, cx, ) } @@ -1595,6 +1633,7 @@ fn build_code_label_for_path( file: &str, directory: Option<&str>, line_number: Option, + label_max_chars: usize, cx: &App, ) -> CodeLabel { let variable_highlight_id = cx @@ -1608,7 +1647,13 @@ fn build_code_label_for_path( label.push_str(" ", None); if let Some(directory) = directory { - label.push_str(directory, variable_highlight_id); + let file_name_chars = file.chars().count(); + // Account for: file_name + space (ellipsis is handled by truncate_and_remove_front) + let directory_max_chars = label_max_chars + .saturating_sub(file_name_chars) + .saturating_sub(1); + let truncated_directory = truncate_and_remove_front(directory, directory_max_chars.max(5)); + label.push_str(&truncated_directory, variable_highlight_id); } if let Some(line_number) = line_number { label.push_str(&format!(" L{}", line_number), variable_highlight_id); diff --git a/crates/editor/src/code_context_menus.rs b/crates/editor/src/code_context_menus.rs index 6a07d39210773476b5f88764c5a21f292da48676..efecb70b59746b51db713b4a13153033d0dd3b10 100644 --- a/crates/editor/src/code_context_menus.rs +++ b/crates/editor/src/code_context_menus.rs @@ -49,6 +49,8 @@ pub const MENU_GAP: Pixels = px(4.); pub const MENU_ASIDE_X_PADDING: Pixels = px(16.); pub const MENU_ASIDE_MIN_WIDTH: Pixels = px(260.); pub const MENU_ASIDE_MAX_WIDTH: Pixels = px(500.); +pub const COMPLETION_MENU_MIN_WIDTH: Pixels = px(280.); +pub const COMPLETION_MENU_MAX_WIDTH: Pixels = px(540.); // Constants for the markdown cache. The purpose of this cache is to reduce flickering due to // documentation not yet being parsed. @@ -907,26 +909,29 @@ impl CompletionsMenu { }) }); - div().min_w(px(280.)).max_w(px(540.)).child( - ListItem::new(mat.candidate_id) - .inset(true) - .toggle_state(item_ix == selected_item) - .on_click(cx.listener(move |editor, _event, window, cx| { - cx.stop_propagation(); - if let Some(task) = editor.confirm_completion( - &ConfirmCompletion { - item_ix: Some(item_ix), - }, - window, - cx, - ) { - task.detach_and_log_err(cx) - } - })) - .start_slot::(start_slot) - .child(h_flex().overflow_hidden().child(completion_label)) - .end_slot::
@@ -378,23 +398,6 @@ {{/if}} {{/if}} - -
+
+ - - - - - -
- - - +
-
- {{> header}} - - - - {{#if search_enabled}} -
- +
+ +