1pub mod model;
2
3use std::{
4 borrow::Cow,
5 collections::BTreeMap,
6 path::{Path, PathBuf},
7 str::FromStr,
8 sync::Arc,
9};
10
11use chrono::{DateTime, NaiveDateTime, Utc};
12use fs::Fs;
13
14use anyhow::{Context as _, Result, bail};
15use collections::{HashMap, HashSet, IndexSet};
16use db::{
17 kvp::KeyValueStore,
18 query,
19 sqlez::{connection::Connection, domain::Domain},
20 sqlez_macros::sql,
21};
22use gpui::{Axis, Bounds, Task, WindowBounds, WindowId, point, size};
23use project::{
24 bookmark_store::SerializedBookmark,
25 debugger::breakpoint_store::{BreakpointState, SourceBreakpoint},
26 trusted_worktrees::{DbTrustedPaths, RemoteHostLocation},
27};
28
29use language::{LanguageName, Toolchain, ToolchainScope};
30use remote::{
31 DockerConnectionOptions, RemoteConnectionIdentity, RemoteConnectionOptions,
32 SshConnectionOptions, WslConnectionOptions, remote_connection_identity,
33};
34use serde::{Deserialize, Serialize};
35use sqlez::{
36 bindable::{Bind, Column, StaticColumnCount},
37 statement::Statement,
38 thread_safe_connection::ThreadSafeConnection,
39};
40
41use ui::{App, SharedString, px};
42use util::{ResultExt, maybe, rel_path::RelPath};
43use uuid::Uuid;
44
45use crate::{
46 WorkspaceId,
47 path_list::{PathList, SerializedPathList},
48 persistence::model::RemoteConnectionKind,
49};
50
51use model::{
52 GroupId, ItemId, PaneId, RemoteConnectionId, SerializedItem, SerializedPane,
53 SerializedPaneGroup, SerializedWorkspace,
54};
55
56use self::model::{DockStructure, SerializedWorkspaceLocation, SessionWorkspace};
57
58// https://www.sqlite.org/limits.html
59// > <..> the maximum value of a host parameter number is SQLITE_MAX_VARIABLE_NUMBER,
60// > which defaults to <..> 32766 for SQLite versions after 3.32.0.
61const MAX_QUERY_PLACEHOLDERS: usize = 32000;
62
63fn parse_timestamp(text: &str) -> DateTime<Utc> {
64 NaiveDateTime::parse_from_str(text, "%Y-%m-%d %H:%M:%S")
65 .map(|naive| naive.and_utc())
66 .unwrap_or_else(|_| Utc::now())
67}
68
69fn contains_wsl_path(paths: &PathList) -> bool {
70 cfg!(windows)
71 && paths
72 .paths()
73 .iter()
74 .any(|path| util::paths::WslPath::from_path(path).is_some())
75}
76
77#[derive(Copy, Clone, Debug, PartialEq)]
78pub(crate) struct SerializedAxis(pub(crate) gpui::Axis);
79impl sqlez::bindable::StaticColumnCount for SerializedAxis {}
80impl sqlez::bindable::Bind for SerializedAxis {
81 fn bind(
82 &self,
83 statement: &sqlez::statement::Statement,
84 start_index: i32,
85 ) -> anyhow::Result<i32> {
86 match self.0 {
87 gpui::Axis::Horizontal => "Horizontal",
88 gpui::Axis::Vertical => "Vertical",
89 }
90 .bind(statement, start_index)
91 }
92}
93
94impl sqlez::bindable::Column for SerializedAxis {
95 fn column(
96 statement: &mut sqlez::statement::Statement,
97 start_index: i32,
98 ) -> anyhow::Result<(Self, i32)> {
99 String::column(statement, start_index).and_then(|(axis_text, next_index)| {
100 Ok((
101 match axis_text.as_str() {
102 "Horizontal" => Self(Axis::Horizontal),
103 "Vertical" => Self(Axis::Vertical),
104 _ => anyhow::bail!("Stored serialized item kind is incorrect"),
105 },
106 next_index,
107 ))
108 })
109 }
110}
111
112#[derive(Copy, Clone, Debug, PartialEq, Default)]
113pub(crate) struct SerializedWindowBounds(pub(crate) WindowBounds);
114
115impl StaticColumnCount for SerializedWindowBounds {
116 fn column_count() -> usize {
117 5
118 }
119}
120
121impl Bind for SerializedWindowBounds {
122 fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
123 match self.0 {
124 WindowBounds::Windowed(bounds) => {
125 let next_index = statement.bind(&"Windowed", start_index)?;
126 statement.bind(
127 &(
128 SerializedPixels(bounds.origin.x),
129 SerializedPixels(bounds.origin.y),
130 SerializedPixels(bounds.size.width),
131 SerializedPixels(bounds.size.height),
132 ),
133 next_index,
134 )
135 }
136 WindowBounds::Maximized(bounds) => {
137 let next_index = statement.bind(&"Maximized", start_index)?;
138 statement.bind(
139 &(
140 SerializedPixels(bounds.origin.x),
141 SerializedPixels(bounds.origin.y),
142 SerializedPixels(bounds.size.width),
143 SerializedPixels(bounds.size.height),
144 ),
145 next_index,
146 )
147 }
148 WindowBounds::Fullscreen(bounds) => {
149 let next_index = statement.bind(&"FullScreen", start_index)?;
150 statement.bind(
151 &(
152 SerializedPixels(bounds.origin.x),
153 SerializedPixels(bounds.origin.y),
154 SerializedPixels(bounds.size.width),
155 SerializedPixels(bounds.size.height),
156 ),
157 next_index,
158 )
159 }
160 }
161 }
162}
163
164impl Column for SerializedWindowBounds {
165 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
166 let (window_state, next_index) = String::column(statement, start_index)?;
167 let ((x, y, width, height), _): ((i32, i32, i32, i32), _) =
168 Column::column(statement, next_index)?;
169 let bounds = Bounds {
170 origin: point(px(x as f32), px(y as f32)),
171 size: size(px(width as f32), px(height as f32)),
172 };
173
174 let status = match window_state.as_str() {
175 "Windowed" | "Fixed" => SerializedWindowBounds(WindowBounds::Windowed(bounds)),
176 "Maximized" => SerializedWindowBounds(WindowBounds::Maximized(bounds)),
177 "FullScreen" => SerializedWindowBounds(WindowBounds::Fullscreen(bounds)),
178 _ => bail!("Window State did not have a valid string"),
179 };
180
181 Ok((status, next_index + 4))
182 }
183}
184
185const DEFAULT_WINDOW_BOUNDS_KEY: &str = "default_window_bounds";
186
187pub fn read_default_window_bounds(kvp: &KeyValueStore) -> Option<(Uuid, WindowBounds)> {
188 let json_str = kvp
189 .read_kvp(DEFAULT_WINDOW_BOUNDS_KEY)
190 .log_err()
191 .flatten()?;
192
193 let (display_uuid, persisted) =
194 serde_json::from_str::<(Uuid, WindowBoundsJson)>(&json_str).ok()?;
195 Some((display_uuid, persisted.into()))
196}
197
198pub async fn write_default_window_bounds(
199 kvp: &KeyValueStore,
200 bounds: WindowBounds,
201 display_uuid: Uuid,
202) -> anyhow::Result<()> {
203 let persisted = WindowBoundsJson::from(bounds);
204 let json_str = serde_json::to_string(&(display_uuid, persisted))?;
205 kvp.write_kvp(DEFAULT_WINDOW_BOUNDS_KEY.to_string(), json_str)
206 .await?;
207 Ok(())
208}
209
210#[derive(Serialize, Deserialize)]
211pub enum WindowBoundsJson {
212 Windowed {
213 x: i32,
214 y: i32,
215 width: i32,
216 height: i32,
217 },
218 Maximized {
219 x: i32,
220 y: i32,
221 width: i32,
222 height: i32,
223 },
224 Fullscreen {
225 x: i32,
226 y: i32,
227 width: i32,
228 height: i32,
229 },
230}
231
232impl From<WindowBounds> for WindowBoundsJson {
233 fn from(b: WindowBounds) -> Self {
234 match b {
235 WindowBounds::Windowed(bounds) => {
236 let origin = bounds.origin;
237 let size = bounds.size;
238 WindowBoundsJson::Windowed {
239 x: f32::from(origin.x).round() as i32,
240 y: f32::from(origin.y).round() as i32,
241 width: f32::from(size.width).round() as i32,
242 height: f32::from(size.height).round() as i32,
243 }
244 }
245 WindowBounds::Maximized(bounds) => {
246 let origin = bounds.origin;
247 let size = bounds.size;
248 WindowBoundsJson::Maximized {
249 x: f32::from(origin.x).round() as i32,
250 y: f32::from(origin.y).round() as i32,
251 width: f32::from(size.width).round() as i32,
252 height: f32::from(size.height).round() as i32,
253 }
254 }
255 WindowBounds::Fullscreen(bounds) => {
256 let origin = bounds.origin;
257 let size = bounds.size;
258 WindowBoundsJson::Fullscreen {
259 x: f32::from(origin.x).round() as i32,
260 y: f32::from(origin.y).round() as i32,
261 width: f32::from(size.width).round() as i32,
262 height: f32::from(size.height).round() as i32,
263 }
264 }
265 }
266 }
267}
268
269impl From<WindowBoundsJson> for WindowBounds {
270 fn from(n: WindowBoundsJson) -> Self {
271 match n {
272 WindowBoundsJson::Windowed {
273 x,
274 y,
275 width,
276 height,
277 } => WindowBounds::Windowed(Bounds {
278 origin: point(px(x as f32), px(y as f32)),
279 size: size(px(width as f32), px(height as f32)),
280 }),
281 WindowBoundsJson::Maximized {
282 x,
283 y,
284 width,
285 height,
286 } => WindowBounds::Maximized(Bounds {
287 origin: point(px(x as f32), px(y as f32)),
288 size: size(px(width as f32), px(height as f32)),
289 }),
290 WindowBoundsJson::Fullscreen {
291 x,
292 y,
293 width,
294 height,
295 } => WindowBounds::Fullscreen(Bounds {
296 origin: point(px(x as f32), px(y as f32)),
297 size: size(px(width as f32), px(height as f32)),
298 }),
299 }
300 }
301}
302
303fn read_multi_workspace_state(window_id: WindowId, cx: &App) -> model::MultiWorkspaceState {
304 let kvp = KeyValueStore::global(cx);
305 kvp.scoped("multi_workspace_state")
306 .read(&window_id.as_u64().to_string())
307 .log_err()
308 .flatten()
309 .and_then(|json| serde_json::from_str(&json).ok())
310 .unwrap_or_default()
311}
312
313pub async fn write_multi_workspace_state(
314 kvp: &KeyValueStore,
315 window_id: WindowId,
316 state: model::MultiWorkspaceState,
317) {
318 if let Ok(json_str) = serde_json::to_string(&state) {
319 kvp.scoped("multi_workspace_state")
320 .write(window_id.as_u64().to_string(), json_str)
321 .await
322 .log_err();
323 }
324}
325
326pub fn read_serialized_multi_workspaces(
327 session_workspaces: Vec<model::SessionWorkspace>,
328 cx: &App,
329) -> Vec<model::SerializedMultiWorkspace> {
330 let mut window_groups: Vec<Vec<model::SessionWorkspace>> = Vec::new();
331 let mut window_id_to_group: HashMap<WindowId, usize> = HashMap::default();
332
333 for session_workspace in session_workspaces {
334 match session_workspace.window_id {
335 Some(window_id) => {
336 let group_index = *window_id_to_group.entry(window_id).or_insert_with(|| {
337 window_groups.push(Vec::new());
338 window_groups.len() - 1
339 });
340 window_groups[group_index].push(session_workspace);
341 }
342 None => {
343 window_groups.push(vec![session_workspace]);
344 }
345 }
346 }
347
348 window_groups
349 .into_iter()
350 .filter_map(|group| {
351 let window_id = group.first().and_then(|sw| sw.window_id);
352 let state = window_id
353 .map(|wid| read_multi_workspace_state(wid, cx))
354 .unwrap_or_default();
355 let active_workspace = state
356 .active_workspace_id
357 .and_then(|id| group.iter().position(|ws| ws.workspace_id == id))
358 .or(Some(0))
359 .and_then(|index| group.into_iter().nth(index))?;
360 Some(model::SerializedMultiWorkspace {
361 active_workspace,
362 state,
363 })
364 })
365 .collect()
366}
367
368const DEFAULT_DOCK_STATE_KEY: &str = "default_dock_state";
369
370pub fn read_default_dock_state(kvp: &KeyValueStore) -> Option<DockStructure> {
371 let json_str = kvp.read_kvp(DEFAULT_DOCK_STATE_KEY).log_err().flatten()?;
372
373 serde_json::from_str::<DockStructure>(&json_str).ok()
374}
375
376pub async fn write_default_dock_state(
377 kvp: &KeyValueStore,
378 docks: DockStructure,
379) -> anyhow::Result<()> {
380 let json_str = serde_json::to_string(&docks)?;
381 kvp.write_kvp(DEFAULT_DOCK_STATE_KEY.to_string(), json_str)
382 .await?;
383 Ok(())
384}
385
386#[derive(Debug)]
387pub struct Bookmark {
388 pub row: u32,
389}
390
391impl sqlez::bindable::StaticColumnCount for Bookmark {
392 fn column_count() -> usize {
393 // row
394 1
395 }
396}
397
398impl sqlez::bindable::Bind for Bookmark {
399 fn bind(
400 &self,
401 statement: &sqlez::statement::Statement,
402 start_index: i32,
403 ) -> anyhow::Result<i32> {
404 statement.bind(&self.row, start_index)
405 }
406}
407
408impl Column for Bookmark {
409 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
410 let row = statement
411 .column_int(start_index)
412 .with_context(|| format!("Failed to read bookmark at index {start_index}"))?
413 as u32;
414
415 Ok((Bookmark { row }, start_index + 1))
416 }
417}
418
419#[derive(Debug)]
420pub struct Breakpoint {
421 pub position: u32,
422 pub message: Option<Arc<str>>,
423 pub condition: Option<Arc<str>>,
424 pub hit_condition: Option<Arc<str>>,
425 pub state: BreakpointState,
426}
427
428/// Wrapper for DB type of a breakpoint
429struct BreakpointStateWrapper<'a>(Cow<'a, BreakpointState>);
430
431impl From<BreakpointState> for BreakpointStateWrapper<'static> {
432 fn from(kind: BreakpointState) -> Self {
433 BreakpointStateWrapper(Cow::Owned(kind))
434 }
435}
436
437impl StaticColumnCount for BreakpointStateWrapper<'_> {
438 fn column_count() -> usize {
439 1
440 }
441}
442
443impl Bind for BreakpointStateWrapper<'_> {
444 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
445 statement.bind(&self.0.to_int(), start_index)
446 }
447}
448
449impl Column for BreakpointStateWrapper<'_> {
450 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
451 let state = statement.column_int(start_index)?;
452
453 match state {
454 0 => Ok((BreakpointState::Enabled.into(), start_index + 1)),
455 1 => Ok((BreakpointState::Disabled.into(), start_index + 1)),
456 _ => anyhow::bail!("Invalid BreakpointState discriminant {state}"),
457 }
458 }
459}
460
461impl sqlez::bindable::StaticColumnCount for Breakpoint {
462 fn column_count() -> usize {
463 // Position, log message, condition message, and hit condition message
464 4 + BreakpointStateWrapper::column_count()
465 }
466}
467
468impl sqlez::bindable::Bind for Breakpoint {
469 fn bind(
470 &self,
471 statement: &sqlez::statement::Statement,
472 start_index: i32,
473 ) -> anyhow::Result<i32> {
474 let next_index = statement.bind(&self.position, start_index)?;
475 let next_index = statement.bind(&self.message, next_index)?;
476 let next_index = statement.bind(&self.condition, next_index)?;
477 let next_index = statement.bind(&self.hit_condition, next_index)?;
478 statement.bind(
479 &BreakpointStateWrapper(Cow::Borrowed(&self.state)),
480 next_index,
481 )
482 }
483}
484
485impl Column for Breakpoint {
486 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
487 let position = statement
488 .column_int(start_index)
489 .with_context(|| format!("Failed to read BreakPoint at index {start_index}"))?
490 as u32;
491 let (message, next_index) = Option::<String>::column(statement, start_index + 1)?;
492 let (condition, next_index) = Option::<String>::column(statement, next_index)?;
493 let (hit_condition, next_index) = Option::<String>::column(statement, next_index)?;
494 let (state, next_index) = BreakpointStateWrapper::column(statement, next_index)?;
495
496 Ok((
497 Breakpoint {
498 position,
499 message: message.map(Arc::from),
500 condition: condition.map(Arc::from),
501 hit_condition: hit_condition.map(Arc::from),
502 state: state.0.into_owned(),
503 },
504 next_index,
505 ))
506 }
507}
508
509#[derive(Clone, Debug, PartialEq)]
510struct SerializedPixels(gpui::Pixels);
511impl sqlez::bindable::StaticColumnCount for SerializedPixels {}
512
513impl sqlez::bindable::Bind for SerializedPixels {
514 fn bind(
515 &self,
516 statement: &sqlez::statement::Statement,
517 start_index: i32,
518 ) -> anyhow::Result<i32> {
519 let this: i32 = u32::from(self.0) as _;
520 this.bind(statement, start_index)
521 }
522}
523
524pub struct WorkspaceDb(ThreadSafeConnection);
525
526impl Domain for WorkspaceDb {
527 const NAME: &str = stringify!(WorkspaceDb);
528
529 const MIGRATIONS: &[&str] = &[
530 sql!(
531 CREATE TABLE workspaces(
532 workspace_id INTEGER PRIMARY KEY,
533 workspace_location BLOB UNIQUE,
534 dock_visible INTEGER, // Deprecated. Preserving so users can downgrade Zed.
535 dock_anchor TEXT, // Deprecated. Preserving so users can downgrade Zed.
536 dock_pane INTEGER, // Deprecated. Preserving so users can downgrade Zed.
537 left_sidebar_open INTEGER, // Boolean
538 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
539 FOREIGN KEY(dock_pane) REFERENCES panes(pane_id)
540 ) STRICT;
541
542 CREATE TABLE pane_groups(
543 group_id INTEGER PRIMARY KEY,
544 workspace_id INTEGER NOT NULL,
545 parent_group_id INTEGER, // NULL indicates that this is a root node
546 position INTEGER, // NULL indicates that this is a root node
547 axis TEXT NOT NULL, // Enum: 'Vertical' / 'Horizontal'
548 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
549 ON DELETE CASCADE
550 ON UPDATE CASCADE,
551 FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
552 ) STRICT;
553
554 CREATE TABLE panes(
555 pane_id INTEGER PRIMARY KEY,
556 workspace_id INTEGER NOT NULL,
557 active INTEGER NOT NULL, // Boolean
558 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
559 ON DELETE CASCADE
560 ON UPDATE CASCADE
561 ) STRICT;
562
563 CREATE TABLE center_panes(
564 pane_id INTEGER PRIMARY KEY,
565 parent_group_id INTEGER, // NULL means that this is a root pane
566 position INTEGER, // NULL means that this is a root pane
567 FOREIGN KEY(pane_id) REFERENCES panes(pane_id)
568 ON DELETE CASCADE,
569 FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
570 ) STRICT;
571
572 CREATE TABLE items(
573 item_id INTEGER NOT NULL, // This is the item's view id, so this is not unique
574 workspace_id INTEGER NOT NULL,
575 pane_id INTEGER NOT NULL,
576 kind TEXT NOT NULL,
577 position INTEGER NOT NULL,
578 active INTEGER NOT NULL,
579 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
580 ON DELETE CASCADE
581 ON UPDATE CASCADE,
582 FOREIGN KEY(pane_id) REFERENCES panes(pane_id)
583 ON DELETE CASCADE,
584 PRIMARY KEY(item_id, workspace_id)
585 ) STRICT;
586 ),
587 sql!(
588 ALTER TABLE workspaces ADD COLUMN window_state TEXT;
589 ALTER TABLE workspaces ADD COLUMN window_x REAL;
590 ALTER TABLE workspaces ADD COLUMN window_y REAL;
591 ALTER TABLE workspaces ADD COLUMN window_width REAL;
592 ALTER TABLE workspaces ADD COLUMN window_height REAL;
593 ALTER TABLE workspaces ADD COLUMN display BLOB;
594 ),
595 // Drop foreign key constraint from workspaces.dock_pane to panes table.
596 sql!(
597 CREATE TABLE workspaces_2(
598 workspace_id INTEGER PRIMARY KEY,
599 workspace_location BLOB UNIQUE,
600 dock_visible INTEGER, // Deprecated. Preserving so users can downgrade Zed.
601 dock_anchor TEXT, // Deprecated. Preserving so users can downgrade Zed.
602 dock_pane INTEGER, // Deprecated. Preserving so users can downgrade Zed.
603 left_sidebar_open INTEGER, // Boolean
604 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
605 window_state TEXT,
606 window_x REAL,
607 window_y REAL,
608 window_width REAL,
609 window_height REAL,
610 display BLOB
611 ) STRICT;
612 INSERT INTO workspaces_2 SELECT * FROM workspaces;
613 DROP TABLE workspaces;
614 ALTER TABLE workspaces_2 RENAME TO workspaces;
615 ),
616 // Add panels related information
617 sql!(
618 ALTER TABLE workspaces ADD COLUMN left_dock_visible INTEGER; //bool
619 ALTER TABLE workspaces ADD COLUMN left_dock_active_panel TEXT;
620 ALTER TABLE workspaces ADD COLUMN right_dock_visible INTEGER; //bool
621 ALTER TABLE workspaces ADD COLUMN right_dock_active_panel TEXT;
622 ALTER TABLE workspaces ADD COLUMN bottom_dock_visible INTEGER; //bool
623 ALTER TABLE workspaces ADD COLUMN bottom_dock_active_panel TEXT;
624 ),
625 // Add panel zoom persistence
626 sql!(
627 ALTER TABLE workspaces ADD COLUMN left_dock_zoom INTEGER; //bool
628 ALTER TABLE workspaces ADD COLUMN right_dock_zoom INTEGER; //bool
629 ALTER TABLE workspaces ADD COLUMN bottom_dock_zoom INTEGER; //bool
630 ),
631 // Add pane group flex data
632 sql!(
633 ALTER TABLE pane_groups ADD COLUMN flexes TEXT;
634 ),
635 // Add fullscreen field to workspace
636 // Deprecated, `WindowBounds` holds the fullscreen state now.
637 // Preserving so users can downgrade Zed.
638 sql!(
639 ALTER TABLE workspaces ADD COLUMN fullscreen INTEGER; //bool
640 ),
641 // Add preview field to items
642 sql!(
643 ALTER TABLE items ADD COLUMN preview INTEGER; //bool
644 ),
645 // Add centered_layout field to workspace
646 sql!(
647 ALTER TABLE workspaces ADD COLUMN centered_layout INTEGER; //bool
648 ),
649 sql!(
650 CREATE TABLE remote_projects (
651 remote_project_id INTEGER NOT NULL UNIQUE,
652 path TEXT,
653 dev_server_name TEXT
654 );
655 ALTER TABLE workspaces ADD COLUMN remote_project_id INTEGER;
656 ALTER TABLE workspaces RENAME COLUMN workspace_location TO local_paths;
657 ),
658 sql!(
659 DROP TABLE remote_projects;
660 CREATE TABLE dev_server_projects (
661 id INTEGER NOT NULL UNIQUE,
662 path TEXT,
663 dev_server_name TEXT
664 );
665 ALTER TABLE workspaces DROP COLUMN remote_project_id;
666 ALTER TABLE workspaces ADD COLUMN dev_server_project_id INTEGER;
667 ),
668 sql!(
669 ALTER TABLE workspaces ADD COLUMN local_paths_order BLOB;
670 ),
671 sql!(
672 ALTER TABLE workspaces ADD COLUMN session_id TEXT DEFAULT NULL;
673 ),
674 sql!(
675 ALTER TABLE workspaces ADD COLUMN window_id INTEGER DEFAULT NULL;
676 ),
677 sql!(
678 ALTER TABLE panes ADD COLUMN pinned_count INTEGER DEFAULT 0;
679 ),
680 sql!(
681 CREATE TABLE ssh_projects (
682 id INTEGER PRIMARY KEY,
683 host TEXT NOT NULL,
684 port INTEGER,
685 path TEXT NOT NULL,
686 user TEXT
687 );
688 ALTER TABLE workspaces ADD COLUMN ssh_project_id INTEGER REFERENCES ssh_projects(id) ON DELETE CASCADE;
689 ),
690 sql!(
691 ALTER TABLE ssh_projects RENAME COLUMN path TO paths;
692 ),
693 sql!(
694 CREATE TABLE toolchains (
695 workspace_id INTEGER,
696 worktree_id INTEGER,
697 language_name TEXT NOT NULL,
698 name TEXT NOT NULL,
699 path TEXT NOT NULL,
700 PRIMARY KEY (workspace_id, worktree_id, language_name)
701 );
702 ),
703 sql!(
704 ALTER TABLE toolchains ADD COLUMN raw_json TEXT DEFAULT "{}";
705 ),
706 sql!(
707 CREATE TABLE breakpoints (
708 workspace_id INTEGER NOT NULL,
709 path TEXT NOT NULL,
710 breakpoint_location INTEGER NOT NULL,
711 kind INTEGER NOT NULL,
712 log_message TEXT,
713 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
714 ON DELETE CASCADE
715 ON UPDATE CASCADE
716 );
717 ),
718 sql!(
719 ALTER TABLE workspaces ADD COLUMN local_paths_array TEXT;
720 CREATE UNIQUE INDEX local_paths_array_uq ON workspaces(local_paths_array);
721 ALTER TABLE workspaces ADD COLUMN local_paths_order_array TEXT;
722 ),
723 sql!(
724 ALTER TABLE breakpoints ADD COLUMN state INTEGER DEFAULT(0) NOT NULL
725 ),
726 sql!(
727 ALTER TABLE breakpoints DROP COLUMN kind
728 ),
729 sql!(ALTER TABLE toolchains ADD COLUMN relative_worktree_path TEXT DEFAULT "" NOT NULL),
730 sql!(
731 ALTER TABLE breakpoints ADD COLUMN condition TEXT;
732 ALTER TABLE breakpoints ADD COLUMN hit_condition TEXT;
733 ),
734 sql!(CREATE TABLE toolchains2 (
735 workspace_id INTEGER,
736 worktree_id INTEGER,
737 language_name TEXT NOT NULL,
738 name TEXT NOT NULL,
739 path TEXT NOT NULL,
740 raw_json TEXT NOT NULL,
741 relative_worktree_path TEXT NOT NULL,
742 PRIMARY KEY (workspace_id, worktree_id, language_name, relative_worktree_path)) STRICT;
743 INSERT INTO toolchains2
744 SELECT * FROM toolchains;
745 DROP TABLE toolchains;
746 ALTER TABLE toolchains2 RENAME TO toolchains;
747 ),
748 sql!(
749 CREATE TABLE ssh_connections (
750 id INTEGER PRIMARY KEY,
751 host TEXT NOT NULL,
752 port INTEGER,
753 user TEXT
754 );
755
756 INSERT INTO ssh_connections (host, port, user)
757 SELECT DISTINCT host, port, user
758 FROM ssh_projects;
759
760 CREATE TABLE workspaces_2(
761 workspace_id INTEGER PRIMARY KEY,
762 paths TEXT,
763 paths_order TEXT,
764 ssh_connection_id INTEGER REFERENCES ssh_connections(id),
765 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
766 window_state TEXT,
767 window_x REAL,
768 window_y REAL,
769 window_width REAL,
770 window_height REAL,
771 display BLOB,
772 left_dock_visible INTEGER,
773 left_dock_active_panel TEXT,
774 right_dock_visible INTEGER,
775 right_dock_active_panel TEXT,
776 bottom_dock_visible INTEGER,
777 bottom_dock_active_panel TEXT,
778 left_dock_zoom INTEGER,
779 right_dock_zoom INTEGER,
780 bottom_dock_zoom INTEGER,
781 fullscreen INTEGER,
782 centered_layout INTEGER,
783 session_id TEXT,
784 window_id INTEGER
785 ) STRICT;
786
787 INSERT
788 INTO workspaces_2
789 SELECT
790 workspaces.workspace_id,
791 CASE
792 WHEN ssh_projects.id IS NOT NULL THEN ssh_projects.paths
793 ELSE
794 CASE
795 WHEN workspaces.local_paths_array IS NULL OR workspaces.local_paths_array = "" THEN
796 NULL
797 ELSE
798 replace(workspaces.local_paths_array, ',', CHAR(10))
799 END
800 END as paths,
801
802 CASE
803 WHEN ssh_projects.id IS NOT NULL THEN ""
804 ELSE workspaces.local_paths_order_array
805 END as paths_order,
806
807 CASE
808 WHEN ssh_projects.id IS NOT NULL THEN (
809 SELECT ssh_connections.id
810 FROM ssh_connections
811 WHERE
812 ssh_connections.host IS ssh_projects.host AND
813 ssh_connections.port IS ssh_projects.port AND
814 ssh_connections.user IS ssh_projects.user
815 )
816 ELSE NULL
817 END as ssh_connection_id,
818
819 workspaces.timestamp,
820 workspaces.window_state,
821 workspaces.window_x,
822 workspaces.window_y,
823 workspaces.window_width,
824 workspaces.window_height,
825 workspaces.display,
826 workspaces.left_dock_visible,
827 workspaces.left_dock_active_panel,
828 workspaces.right_dock_visible,
829 workspaces.right_dock_active_panel,
830 workspaces.bottom_dock_visible,
831 workspaces.bottom_dock_active_panel,
832 workspaces.left_dock_zoom,
833 workspaces.right_dock_zoom,
834 workspaces.bottom_dock_zoom,
835 workspaces.fullscreen,
836 workspaces.centered_layout,
837 workspaces.session_id,
838 workspaces.window_id
839 FROM
840 workspaces LEFT JOIN
841 ssh_projects ON
842 workspaces.ssh_project_id = ssh_projects.id;
843
844 DELETE FROM workspaces_2
845 WHERE workspace_id NOT IN (
846 SELECT MAX(workspace_id)
847 FROM workspaces_2
848 GROUP BY ssh_connection_id, paths
849 );
850
851 DROP TABLE ssh_projects;
852 DROP TABLE workspaces;
853 ALTER TABLE workspaces_2 RENAME TO workspaces;
854
855 CREATE UNIQUE INDEX ix_workspaces_location ON workspaces(ssh_connection_id, paths);
856 ),
857 // Fix any data from when workspaces.paths were briefly encoded as JSON arrays
858 sql!(
859 UPDATE workspaces
860 SET paths = CASE
861 WHEN substr(paths, 1, 2) = '[' || '"' AND substr(paths, -2, 2) = '"' || ']' THEN
862 replace(
863 substr(paths, 3, length(paths) - 4),
864 '"' || ',' || '"',
865 CHAR(10)
866 )
867 ELSE
868 replace(paths, ',', CHAR(10))
869 END
870 WHERE paths IS NOT NULL
871 ),
872 sql!(
873 CREATE TABLE remote_connections(
874 id INTEGER PRIMARY KEY,
875 kind TEXT NOT NULL,
876 host TEXT,
877 port INTEGER,
878 user TEXT,
879 distro TEXT
880 );
881
882 CREATE TABLE workspaces_2(
883 workspace_id INTEGER PRIMARY KEY,
884 paths TEXT,
885 paths_order TEXT,
886 remote_connection_id INTEGER REFERENCES remote_connections(id),
887 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
888 window_state TEXT,
889 window_x REAL,
890 window_y REAL,
891 window_width REAL,
892 window_height REAL,
893 display BLOB,
894 left_dock_visible INTEGER,
895 left_dock_active_panel TEXT,
896 right_dock_visible INTEGER,
897 right_dock_active_panel TEXT,
898 bottom_dock_visible INTEGER,
899 bottom_dock_active_panel TEXT,
900 left_dock_zoom INTEGER,
901 right_dock_zoom INTEGER,
902 bottom_dock_zoom INTEGER,
903 fullscreen INTEGER,
904 centered_layout INTEGER,
905 session_id TEXT,
906 window_id INTEGER
907 ) STRICT;
908
909 INSERT INTO remote_connections
910 SELECT
911 id,
912 "ssh" as kind,
913 host,
914 port,
915 user,
916 NULL as distro
917 FROM ssh_connections;
918
919 INSERT
920 INTO workspaces_2
921 SELECT
922 workspace_id,
923 paths,
924 paths_order,
925 ssh_connection_id as remote_connection_id,
926 timestamp,
927 window_state,
928 window_x,
929 window_y,
930 window_width,
931 window_height,
932 display,
933 left_dock_visible,
934 left_dock_active_panel,
935 right_dock_visible,
936 right_dock_active_panel,
937 bottom_dock_visible,
938 bottom_dock_active_panel,
939 left_dock_zoom,
940 right_dock_zoom,
941 bottom_dock_zoom,
942 fullscreen,
943 centered_layout,
944 session_id,
945 window_id
946 FROM
947 workspaces;
948
949 DROP TABLE workspaces;
950 ALTER TABLE workspaces_2 RENAME TO workspaces;
951
952 CREATE UNIQUE INDEX ix_workspaces_location ON workspaces(remote_connection_id, paths);
953 ),
954 sql!(CREATE TABLE user_toolchains (
955 remote_connection_id INTEGER,
956 workspace_id INTEGER NOT NULL,
957 worktree_id INTEGER NOT NULL,
958 relative_worktree_path TEXT NOT NULL,
959 language_name TEXT NOT NULL,
960 name TEXT NOT NULL,
961 path TEXT NOT NULL,
962 raw_json TEXT NOT NULL,
963
964 PRIMARY KEY (workspace_id, worktree_id, relative_worktree_path, language_name, name, path, raw_json)
965 ) STRICT;),
966 sql!(
967 DROP TABLE ssh_connections;
968 ),
969 sql!(
970 ALTER TABLE remote_connections ADD COLUMN name TEXT;
971 ALTER TABLE remote_connections ADD COLUMN container_id TEXT;
972 ),
973 sql!(
974 CREATE TABLE IF NOT EXISTS trusted_worktrees (
975 trust_id INTEGER PRIMARY KEY AUTOINCREMENT,
976 absolute_path TEXT,
977 user_name TEXT,
978 host_name TEXT
979 ) STRICT;
980 ),
981 sql!(CREATE TABLE toolchains2 (
982 workspace_id INTEGER,
983 worktree_root_path TEXT NOT NULL,
984 language_name TEXT NOT NULL,
985 name TEXT NOT NULL,
986 path TEXT NOT NULL,
987 raw_json TEXT NOT NULL,
988 relative_worktree_path TEXT NOT NULL,
989 PRIMARY KEY (workspace_id, worktree_root_path, language_name, relative_worktree_path)) STRICT;
990 INSERT OR REPLACE INTO toolchains2
991 // The `instr(paths, '\n') = 0` part allows us to find all
992 // workspaces that have a single worktree, as `\n` is used as a
993 // separator when serializing the workspace paths, so if no `\n` is
994 // found, we know we have a single worktree.
995 SELECT toolchains.workspace_id, paths, language_name, name, path, raw_json, relative_worktree_path FROM toolchains INNER JOIN workspaces ON toolchains.workspace_id = workspaces.workspace_id AND instr(paths, '\n') = 0;
996 DROP TABLE toolchains;
997 ALTER TABLE toolchains2 RENAME TO toolchains;
998 ),
999 sql!(CREATE TABLE user_toolchains2 (
1000 remote_connection_id INTEGER,
1001 workspace_id INTEGER NOT NULL,
1002 worktree_root_path TEXT NOT NULL,
1003 relative_worktree_path TEXT NOT NULL,
1004 language_name TEXT NOT NULL,
1005 name TEXT NOT NULL,
1006 path TEXT NOT NULL,
1007 raw_json TEXT NOT NULL,
1008
1009 PRIMARY KEY (workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json)) STRICT;
1010 INSERT OR REPLACE INTO user_toolchains2
1011 // The `instr(paths, '\n') = 0` part allows us to find all
1012 // workspaces that have a single worktree, as `\n` is used as a
1013 // separator when serializing the workspace paths, so if no `\n` is
1014 // found, we know we have a single worktree.
1015 SELECT user_toolchains.remote_connection_id, user_toolchains.workspace_id, paths, relative_worktree_path, language_name, name, path, raw_json FROM user_toolchains INNER JOIN workspaces ON user_toolchains.workspace_id = workspaces.workspace_id AND instr(paths, '\n') = 0;
1016 DROP TABLE user_toolchains;
1017 ALTER TABLE user_toolchains2 RENAME TO user_toolchains;
1018 ),
1019 sql!(
1020 ALTER TABLE remote_connections ADD COLUMN use_podman BOOLEAN;
1021 ),
1022 sql!(
1023 ALTER TABLE remote_connections ADD COLUMN remote_env TEXT;
1024 ),
1025 sql!(
1026 CREATE TABLE bookmarks (
1027 workspace_id INTEGER NOT NULL,
1028 path TEXT NOT NULL,
1029 row INTEGER NOT NULL,
1030 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
1031 ON DELETE CASCADE
1032 ON UPDATE CASCADE
1033 );
1034 ),
1035 ];
1036
1037 // Allow recovering from bad migration that was initially shipped to nightly
1038 // when introducing the ssh_connections table.
1039 fn should_allow_migration_change(_index: usize, old: &str, new: &str) -> bool {
1040 old.starts_with("CREATE TABLE ssh_connections")
1041 && new.starts_with("CREATE TABLE ssh_connections")
1042 }
1043}
1044
1045db::static_connection!(WorkspaceDb, []);
1046
1047impl WorkspaceDb {
1048 /// Returns a serialized workspace for the given worktree_roots. If the passed array
1049 /// is empty, the most recent workspace is returned instead. If no workspace for the
1050 /// passed roots is stored, returns none.
1051 pub(crate) fn workspace_for_roots<P: AsRef<Path>>(
1052 &self,
1053 worktree_roots: &[P],
1054 ) -> Option<SerializedWorkspace> {
1055 self.workspace_for_roots_internal(worktree_roots, None)
1056 }
1057
1058 pub(crate) fn remote_workspace_for_roots<P: AsRef<Path>>(
1059 &self,
1060 worktree_roots: &[P],
1061 remote_project_id: RemoteConnectionId,
1062 ) -> Option<SerializedWorkspace> {
1063 self.workspace_for_roots_internal(worktree_roots, Some(remote_project_id))
1064 }
1065
1066 pub(crate) fn workspace_for_roots_internal<P: AsRef<Path>>(
1067 &self,
1068 worktree_roots: &[P],
1069 remote_connection_id: Option<RemoteConnectionId>,
1070 ) -> Option<SerializedWorkspace> {
1071 // paths are sorted before db interactions to ensure that the order of the paths
1072 // doesn't affect the workspace selection for existing workspaces
1073 let root_paths = PathList::new(worktree_roots);
1074
1075 // Empty workspaces cannot be matched by paths (all empty workspaces have paths = "").
1076 // They should only be restored via workspace_for_id during session restoration.
1077 if root_paths.is_empty() && remote_connection_id.is_none() {
1078 return None;
1079 }
1080
1081 // Note that we re-assign the workspace_id here in case it's empty
1082 // and we've grabbed the most recent workspace
1083 let (
1084 workspace_id,
1085 paths,
1086 paths_order,
1087 window_bounds,
1088 display,
1089 centered_layout,
1090 docks,
1091 window_id,
1092 ): (
1093 WorkspaceId,
1094 String,
1095 String,
1096 Option<SerializedWindowBounds>,
1097 Option<Uuid>,
1098 Option<bool>,
1099 DockStructure,
1100 Option<u64>,
1101 ) = self
1102 .select_row_bound(sql! {
1103 SELECT
1104 workspace_id,
1105 paths,
1106 paths_order,
1107 window_state,
1108 window_x,
1109 window_y,
1110 window_width,
1111 window_height,
1112 display,
1113 centered_layout,
1114 left_dock_visible,
1115 left_dock_active_panel,
1116 left_dock_zoom,
1117 right_dock_visible,
1118 right_dock_active_panel,
1119 right_dock_zoom,
1120 bottom_dock_visible,
1121 bottom_dock_active_panel,
1122 bottom_dock_zoom,
1123 window_id
1124 FROM workspaces
1125 WHERE
1126 paths IS ? AND
1127 remote_connection_id IS ?
1128 LIMIT 1
1129 })
1130 .and_then(|mut prepared_statement| {
1131 (prepared_statement)((
1132 root_paths.serialize().paths,
1133 remote_connection_id.map(|id| id.0 as i32),
1134 ))
1135 })
1136 .context("No workspaces found")
1137 .warn_on_err()
1138 .flatten()?;
1139
1140 let paths = PathList::deserialize(&SerializedPathList {
1141 paths,
1142 order: paths_order,
1143 });
1144
1145 let remote_connection_options = if let Some(remote_connection_id) = remote_connection_id {
1146 self.remote_connection(remote_connection_id)
1147 .context("Get remote connection")
1148 .log_err()
1149 } else {
1150 None
1151 };
1152
1153 Some(SerializedWorkspace {
1154 id: workspace_id,
1155 location: match remote_connection_options {
1156 Some(options) => SerializedWorkspaceLocation::Remote(options),
1157 None => SerializedWorkspaceLocation::Local,
1158 },
1159 paths,
1160 center_group: self
1161 .get_center_pane_group(workspace_id)
1162 .context("Getting center group")
1163 .log_err()?,
1164 window_bounds,
1165 centered_layout: centered_layout.unwrap_or(false),
1166 display,
1167 docks,
1168 session_id: None,
1169 bookmarks: self.bookmarks(workspace_id),
1170 breakpoints: self.breakpoints(workspace_id),
1171 window_id,
1172 user_toolchains: self.user_toolchains(workspace_id, remote_connection_id),
1173 })
1174 }
1175
1176 /// Returns the workspace with the given ID, loading all associated data.
1177 pub(crate) fn workspace_for_id(
1178 &self,
1179 workspace_id: WorkspaceId,
1180 ) -> Option<SerializedWorkspace> {
1181 let (
1182 paths,
1183 paths_order,
1184 window_bounds,
1185 display,
1186 centered_layout,
1187 docks,
1188 window_id,
1189 remote_connection_id,
1190 ): (
1191 String,
1192 String,
1193 Option<SerializedWindowBounds>,
1194 Option<Uuid>,
1195 Option<bool>,
1196 DockStructure,
1197 Option<u64>,
1198 Option<i32>,
1199 ) = self
1200 .select_row_bound(sql! {
1201 SELECT
1202 paths,
1203 paths_order,
1204 window_state,
1205 window_x,
1206 window_y,
1207 window_width,
1208 window_height,
1209 display,
1210 centered_layout,
1211 left_dock_visible,
1212 left_dock_active_panel,
1213 left_dock_zoom,
1214 right_dock_visible,
1215 right_dock_active_panel,
1216 right_dock_zoom,
1217 bottom_dock_visible,
1218 bottom_dock_active_panel,
1219 bottom_dock_zoom,
1220 window_id,
1221 remote_connection_id
1222 FROM workspaces
1223 WHERE workspace_id = ?
1224 })
1225 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id))
1226 .context("No workspace found for id")
1227 .warn_on_err()
1228 .flatten()?;
1229
1230 let paths = PathList::deserialize(&SerializedPathList {
1231 paths,
1232 order: paths_order,
1233 });
1234
1235 let remote_connection_id = remote_connection_id.map(|id| RemoteConnectionId(id as u64));
1236 let remote_connection_options = if let Some(remote_connection_id) = remote_connection_id {
1237 self.remote_connection(remote_connection_id)
1238 .context("Get remote connection")
1239 .log_err()
1240 } else {
1241 None
1242 };
1243
1244 Some(SerializedWorkspace {
1245 id: workspace_id,
1246 location: match remote_connection_options {
1247 Some(options) => SerializedWorkspaceLocation::Remote(options),
1248 None => SerializedWorkspaceLocation::Local,
1249 },
1250 paths,
1251 center_group: self
1252 .get_center_pane_group(workspace_id)
1253 .context("Getting center group")
1254 .log_err()?,
1255 window_bounds,
1256 centered_layout: centered_layout.unwrap_or(false),
1257 display,
1258 docks,
1259 session_id: None,
1260 bookmarks: self.bookmarks(workspace_id),
1261 breakpoints: self.breakpoints(workspace_id),
1262 window_id,
1263 user_toolchains: self.user_toolchains(workspace_id, remote_connection_id),
1264 })
1265 }
1266
1267 fn bookmarks(&self, workspace_id: WorkspaceId) -> BTreeMap<Arc<Path>, Vec<SerializedBookmark>> {
1268 let bookmarks: Result<Vec<(PathBuf, Bookmark)>> = self
1269 .select_bound(sql! {
1270 SELECT path, row
1271 FROM bookmarks
1272 WHERE workspace_id = ?
1273 ORDER BY path, row
1274 })
1275 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id));
1276
1277 match bookmarks {
1278 Ok(bookmarks) => {
1279 if bookmarks.is_empty() {
1280 log::debug!("Bookmarks are empty after querying database for them");
1281 }
1282
1283 let mut map: BTreeMap<_, Vec<_>> = BTreeMap::default();
1284
1285 for (path, bookmark) in bookmarks {
1286 let path: Arc<Path> = path.into();
1287 map.entry(path.clone())
1288 .or_default()
1289 .push(SerializedBookmark(bookmark.row))
1290 }
1291
1292 map
1293 }
1294 Err(e) => {
1295 log::error!("Failed to load bookmarks: {}", e);
1296 BTreeMap::default()
1297 }
1298 }
1299 }
1300
1301 fn breakpoints(&self, workspace_id: WorkspaceId) -> BTreeMap<Arc<Path>, Vec<SourceBreakpoint>> {
1302 let breakpoints: Result<Vec<(PathBuf, Breakpoint)>> = self
1303 .select_bound(sql! {
1304 SELECT path, breakpoint_location, log_message, condition, hit_condition, state
1305 FROM breakpoints
1306 WHERE workspace_id = ?
1307 })
1308 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id));
1309
1310 match breakpoints {
1311 Ok(bp) => {
1312 if bp.is_empty() {
1313 log::debug!("Breakpoints are empty after querying database for them");
1314 }
1315
1316 let mut map: BTreeMap<Arc<Path>, Vec<SourceBreakpoint>> = Default::default();
1317
1318 for (path, breakpoint) in bp {
1319 let path: Arc<Path> = path.into();
1320 map.entry(path.clone()).or_default().push(SourceBreakpoint {
1321 row: breakpoint.position,
1322 path,
1323 message: breakpoint.message,
1324 condition: breakpoint.condition,
1325 hit_condition: breakpoint.hit_condition,
1326 state: breakpoint.state,
1327 });
1328 }
1329
1330 for (path, bps) in map.iter() {
1331 log::info!(
1332 "Got {} breakpoints from database at path: {}",
1333 bps.len(),
1334 path.to_string_lossy()
1335 );
1336 }
1337
1338 map
1339 }
1340 Err(msg) => {
1341 log::error!("Breakpoints query failed with msg: {msg}");
1342 Default::default()
1343 }
1344 }
1345 }
1346
1347 fn user_toolchains(
1348 &self,
1349 workspace_id: WorkspaceId,
1350 remote_connection_id: Option<RemoteConnectionId>,
1351 ) -> BTreeMap<ToolchainScope, IndexSet<Toolchain>> {
1352 type RowKind = (WorkspaceId, String, String, String, String, String, String);
1353
1354 let toolchains: Vec<RowKind> = self
1355 .select_bound(sql! {
1356 SELECT workspace_id, worktree_root_path, relative_worktree_path,
1357 language_name, name, path, raw_json
1358 FROM user_toolchains WHERE remote_connection_id IS ?1 AND (
1359 workspace_id IN (0, ?2)
1360 )
1361 })
1362 .and_then(|mut statement| {
1363 (statement)((remote_connection_id.map(|id| id.0), workspace_id))
1364 })
1365 .unwrap_or_default();
1366 let mut ret = BTreeMap::<_, IndexSet<_>>::default();
1367
1368 for (
1369 _workspace_id,
1370 worktree_root_path,
1371 relative_worktree_path,
1372 language_name,
1373 name,
1374 path,
1375 raw_json,
1376 ) in toolchains
1377 {
1378 // INTEGER's that are primary keys (like workspace ids, remote connection ids and such) start at 1, so we're safe to
1379 let scope = if _workspace_id == WorkspaceId(0) {
1380 debug_assert_eq!(worktree_root_path, String::default());
1381 debug_assert_eq!(relative_worktree_path, String::default());
1382 ToolchainScope::Global
1383 } else {
1384 debug_assert_eq!(workspace_id, _workspace_id);
1385 debug_assert_eq!(
1386 worktree_root_path == String::default(),
1387 relative_worktree_path == String::default()
1388 );
1389
1390 let Some(relative_path) = RelPath::unix(&relative_worktree_path).log_err() else {
1391 continue;
1392 };
1393 if worktree_root_path != String::default()
1394 && relative_worktree_path != String::default()
1395 {
1396 ToolchainScope::Subproject(
1397 Arc::from(worktree_root_path.as_ref()),
1398 relative_path.into(),
1399 )
1400 } else {
1401 ToolchainScope::Project
1402 }
1403 };
1404 let Ok(as_json) = serde_json::from_str(&raw_json) else {
1405 continue;
1406 };
1407 let toolchain = Toolchain {
1408 name: SharedString::from(name),
1409 path: SharedString::from(path),
1410 language_name: LanguageName::from_proto(language_name),
1411 as_json,
1412 };
1413 ret.entry(scope).or_default().insert(toolchain);
1414 }
1415
1416 ret
1417 }
1418
1419 /// Saves a workspace using the worktree roots. Will garbage collect any workspaces
1420 /// that used this workspace previously
1421 pub(crate) async fn save_workspace(&self, workspace: SerializedWorkspace) {
1422 let paths = workspace.paths.serialize();
1423 log::debug!("Saving workspace at location: {:?}", workspace.location);
1424 self.write(move |conn| {
1425 conn.with_savepoint("update_worktrees", || {
1426 let remote_connection_id = match workspace.location.clone() {
1427 SerializedWorkspaceLocation::Local => None,
1428 SerializedWorkspaceLocation::Remote(connection_options) => {
1429 Some(Self::get_or_create_remote_connection_internal(
1430 conn,
1431 connection_options
1432 )?.0)
1433 }
1434 };
1435
1436 // Clear out panes and pane_groups
1437 conn.exec_bound(sql!(
1438 DELETE FROM pane_groups WHERE workspace_id = ?1;
1439 DELETE FROM panes WHERE workspace_id = ?1;))?(workspace.id)
1440 .context("Clearing old panes")?;
1441
1442 conn.exec_bound(
1443 sql!(
1444 DELETE FROM bookmarks WHERE workspace_id = ?1;
1445 )
1446 )?(workspace.id).context("Clearing old bookmarks")?;
1447
1448 for (path, bookmarks) in workspace.bookmarks {
1449 for bookmark in bookmarks {
1450 conn.exec_bound(sql!(
1451 INSERT INTO bookmarks (workspace_id, path, row)
1452 VALUES (?1, ?2, ?3);
1453 ))?((workspace.id, path.as_ref(), bookmark.0)).context("Inserting bookmark")?;
1454 }
1455 }
1456
1457 conn.exec_bound(
1458 sql!(
1459 DELETE FROM breakpoints WHERE workspace_id = ?1;
1460 )
1461 )?(workspace.id).context("Clearing old breakpoints")?;
1462
1463 for (path, breakpoints) in workspace.breakpoints {
1464 for bp in breakpoints {
1465 let state = BreakpointStateWrapper::from(bp.state);
1466 match conn.exec_bound(sql!(
1467 INSERT INTO breakpoints (workspace_id, path, breakpoint_location, log_message, condition, hit_condition, state)
1468 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7);))?
1469
1470 ((
1471 workspace.id,
1472 path.as_ref(),
1473 bp.row,
1474 bp.message,
1475 bp.condition,
1476 bp.hit_condition,
1477 state,
1478 )) {
1479 Ok(_) => {
1480 log::debug!("Stored breakpoint at row: {} in path: {}", bp.row, path.to_string_lossy())
1481 }
1482 Err(err) => {
1483 log::error!("{err}");
1484 continue;
1485 }
1486 }
1487 }
1488 }
1489
1490 conn.exec_bound(
1491 sql!(
1492 DELETE FROM user_toolchains WHERE workspace_id = ?1;
1493 )
1494 )?(workspace.id).context("Clearing old user toolchains")?;
1495
1496 for (scope, toolchains) in workspace.user_toolchains {
1497 for toolchain in toolchains {
1498 let query = sql!(INSERT OR REPLACE INTO user_toolchains(remote_connection_id, workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8));
1499 let (workspace_id, worktree_root_path, relative_worktree_path) = match scope {
1500 ToolchainScope::Subproject(ref worktree_root_path, ref path) => (Some(workspace.id), Some(worktree_root_path.to_string_lossy().into_owned()), Some(path.as_unix_str().to_owned())),
1501 ToolchainScope::Project => (Some(workspace.id), None, None),
1502 ToolchainScope::Global => (None, None, None),
1503 };
1504 let args = (remote_connection_id, workspace_id.unwrap_or(WorkspaceId(0)), worktree_root_path.unwrap_or_default(), relative_worktree_path.unwrap_or_default(),
1505 toolchain.language_name.as_ref().to_owned(), toolchain.name.to_string(), toolchain.path.to_string(), toolchain.as_json.to_string());
1506 if let Err(err) = conn.exec_bound(query)?(args) {
1507 log::error!("{err}");
1508 continue;
1509 }
1510 }
1511 }
1512
1513 // Clear out old workspaces with the same paths.
1514 // Skip this for empty workspaces - they are identified by workspace_id, not paths.
1515 // Multiple empty workspaces with different content should coexist.
1516 if !paths.paths.is_empty() {
1517 conn.exec_bound(sql!(
1518 DELETE
1519 FROM workspaces
1520 WHERE
1521 workspace_id != ?1 AND
1522 paths IS ?2 AND
1523 remote_connection_id IS ?3
1524 ))?((
1525 workspace.id,
1526 paths.paths.clone(),
1527 remote_connection_id,
1528 ))
1529 .context("clearing out old locations")?;
1530 }
1531
1532 // Upsert
1533 let query = sql!(
1534 INSERT INTO workspaces(
1535 workspace_id,
1536 paths,
1537 paths_order,
1538 remote_connection_id,
1539 left_dock_visible,
1540 left_dock_active_panel,
1541 left_dock_zoom,
1542 right_dock_visible,
1543 right_dock_active_panel,
1544 right_dock_zoom,
1545 bottom_dock_visible,
1546 bottom_dock_active_panel,
1547 bottom_dock_zoom,
1548 session_id,
1549 window_id,
1550 timestamp
1551 )
1552 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, CURRENT_TIMESTAMP)
1553 ON CONFLICT DO
1554 UPDATE SET
1555 paths = ?2,
1556 paths_order = ?3,
1557 remote_connection_id = ?4,
1558 left_dock_visible = ?5,
1559 left_dock_active_panel = ?6,
1560 left_dock_zoom = ?7,
1561 right_dock_visible = ?8,
1562 right_dock_active_panel = ?9,
1563 right_dock_zoom = ?10,
1564 bottom_dock_visible = ?11,
1565 bottom_dock_active_panel = ?12,
1566 bottom_dock_zoom = ?13,
1567 session_id = ?14,
1568 window_id = ?15,
1569 timestamp = CURRENT_TIMESTAMP
1570 );
1571 let mut prepared_query = conn.exec_bound(query)?;
1572 let args = (
1573 workspace.id,
1574 paths.paths.clone(),
1575 paths.order.clone(),
1576 remote_connection_id,
1577 workspace.docks,
1578 workspace.session_id,
1579 workspace.window_id,
1580 );
1581
1582 prepared_query(args).context("Updating workspace")?;
1583
1584 // Save center pane group
1585 Self::save_pane_group(conn, workspace.id, &workspace.center_group, None)
1586 .context("save pane group in save workspace")?;
1587
1588 Ok(())
1589 })
1590 .log_err();
1591 })
1592 .await;
1593 }
1594
1595 pub(crate) async fn get_or_create_remote_connection(
1596 &self,
1597 options: RemoteConnectionOptions,
1598 ) -> Result<RemoteConnectionId> {
1599 self.write(move |conn| Self::get_or_create_remote_connection_internal(conn, options))
1600 .await
1601 }
1602
1603 fn get_or_create_remote_connection_internal(
1604 this: &Connection,
1605 options: RemoteConnectionOptions,
1606 ) -> Result<RemoteConnectionId> {
1607 let identity = remote_connection_identity(&options);
1608 let kind;
1609 let user: Option<String>;
1610 let mut host = None;
1611 let mut port = None;
1612 let mut distro = None;
1613 let mut name = None;
1614 let mut container_id = None;
1615 let mut use_podman = None;
1616 let mut remote_env = None;
1617
1618 match identity {
1619 RemoteConnectionIdentity::Ssh {
1620 host: identity_host,
1621 username,
1622 port: identity_port,
1623 } => {
1624 kind = RemoteConnectionKind::Ssh;
1625 host = Some(identity_host);
1626 port = identity_port;
1627 user = username;
1628 }
1629 RemoteConnectionIdentity::Wsl {
1630 distro_name,
1631 user: identity_user,
1632 } => {
1633 kind = RemoteConnectionKind::Wsl;
1634 distro = Some(distro_name);
1635 user = identity_user;
1636 }
1637 RemoteConnectionIdentity::Docker {
1638 container_id: identity_container_id,
1639 name: identity_name,
1640 remote_user,
1641 } => {
1642 kind = RemoteConnectionKind::Docker;
1643 container_id = Some(identity_container_id);
1644 name = Some(identity_name);
1645 user = Some(remote_user);
1646 }
1647 #[cfg(any(test, feature = "test-support"))]
1648 RemoteConnectionIdentity::Mock { id } => {
1649 kind = RemoteConnectionKind::Ssh;
1650 host = Some(format!("mock-{}", id));
1651 user = Some(format!("mock-user-{}", id));
1652 }
1653 }
1654
1655 if let RemoteConnectionOptions::Docker(options) = options {
1656 use_podman = Some(options.use_podman);
1657 remote_env = serde_json::to_string(&options.remote_env).ok();
1658 }
1659
1660 Self::get_or_create_remote_connection_query(
1661 this,
1662 kind,
1663 host,
1664 port,
1665 user,
1666 distro,
1667 name,
1668 container_id,
1669 use_podman,
1670 remote_env,
1671 )
1672 }
1673
1674 fn get_or_create_remote_connection_query(
1675 this: &Connection,
1676 kind: RemoteConnectionKind,
1677 host: Option<String>,
1678 port: Option<u16>,
1679 user: Option<String>,
1680 distro: Option<String>,
1681 name: Option<String>,
1682 container_id: Option<String>,
1683 use_podman: Option<bool>,
1684 remote_env: Option<String>,
1685 ) -> Result<RemoteConnectionId> {
1686 if let Some(id) = this.select_row_bound(sql!(
1687 SELECT id
1688 FROM remote_connections
1689 WHERE
1690 kind IS ? AND
1691 host IS ? AND
1692 port IS ? AND
1693 user IS ? AND
1694 distro IS ? AND
1695 name IS ? AND
1696 container_id IS ?
1697 LIMIT 1
1698 ))?((
1699 kind.serialize(),
1700 host.clone(),
1701 port,
1702 user.clone(),
1703 distro.clone(),
1704 name.clone(),
1705 container_id.clone(),
1706 ))? {
1707 Ok(RemoteConnectionId(id))
1708 } else {
1709 let id = this.select_row_bound(sql!(
1710 INSERT INTO remote_connections (
1711 kind,
1712 host,
1713 port,
1714 user,
1715 distro,
1716 name,
1717 container_id,
1718 use_podman,
1719 remote_env
1720 ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)
1721 RETURNING id
1722 ))?((
1723 kind.serialize(),
1724 host,
1725 port,
1726 user,
1727 distro,
1728 name,
1729 container_id,
1730 use_podman,
1731 remote_env,
1732 ))?
1733 .context("failed to insert remote project")?;
1734 Ok(RemoteConnectionId(id))
1735 }
1736 }
1737
1738 query! {
1739 pub async fn next_id() -> Result<WorkspaceId> {
1740 INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id
1741 }
1742 }
1743
1744 fn recent_workspaces(
1745 &self,
1746 ) -> Result<
1747 Vec<(
1748 WorkspaceId,
1749 PathList,
1750 Option<RemoteConnectionId>,
1751 Option<String>,
1752 DateTime<Utc>,
1753 )>,
1754 > {
1755 Ok(self
1756 .recent_workspaces_query()?
1757 .into_iter()
1758 .map(
1759 |(id, paths, order, remote_connection_id, session_id, timestamp)| {
1760 (
1761 id,
1762 PathList::deserialize(&SerializedPathList { paths, order }),
1763 remote_connection_id.map(RemoteConnectionId),
1764 session_id,
1765 parse_timestamp(×tamp),
1766 )
1767 },
1768 )
1769 .collect())
1770 }
1771
1772 query! {
1773 fn recent_workspaces_query() -> Result<Vec<(WorkspaceId, String, String, Option<u64>, Option<String>, String)>> {
1774 SELECT workspace_id, paths, paths_order, remote_connection_id, session_id, timestamp
1775 FROM workspaces
1776 WHERE
1777 paths IS NOT NULL OR
1778 remote_connection_id IS NOT NULL
1779 ORDER BY timestamp DESC
1780 }
1781 }
1782
1783 fn session_workspaces(
1784 &self,
1785 session_id: String,
1786 ) -> Result<
1787 Vec<(
1788 WorkspaceId,
1789 PathList,
1790 Option<u64>,
1791 Option<RemoteConnectionId>,
1792 )>,
1793 > {
1794 Ok(self
1795 .session_workspaces_query(session_id)?
1796 .into_iter()
1797 .map(
1798 |(workspace_id, paths, order, window_id, remote_connection_id)| {
1799 (
1800 WorkspaceId(workspace_id),
1801 PathList::deserialize(&SerializedPathList { paths, order }),
1802 window_id,
1803 remote_connection_id.map(RemoteConnectionId),
1804 )
1805 },
1806 )
1807 .collect())
1808 }
1809
1810 query! {
1811 fn session_workspaces_query(session_id: String) -> Result<Vec<(i64, String, String, Option<u64>, Option<u64>)>> {
1812 SELECT workspace_id, paths, paths_order, window_id, remote_connection_id
1813 FROM workspaces
1814 WHERE session_id = ?1
1815 ORDER BY timestamp DESC
1816 }
1817 }
1818
1819 query! {
1820 pub fn breakpoints_for_file(workspace_id: WorkspaceId, file_path: &Path) -> Result<Vec<Breakpoint>> {
1821 SELECT breakpoint_location
1822 FROM breakpoints
1823 WHERE workspace_id= ?1 AND path = ?2
1824 }
1825 }
1826
1827 query! {
1828 pub fn clear_breakpoints(file_path: &Path) -> Result<()> {
1829 DELETE FROM breakpoints
1830 WHERE file_path = ?2
1831 }
1832 }
1833
1834 fn remote_connections(&self) -> Result<HashMap<RemoteConnectionId, RemoteConnectionOptions>> {
1835 Ok(self.select(sql!(
1836 SELECT
1837 id, kind, host, port, user, distro, container_id, name, use_podman, remote_env
1838 FROM
1839 remote_connections
1840 ))?()?
1841 .into_iter()
1842 .filter_map(
1843 |(id, kind, host, port, user, distro, container_id, name, use_podman, remote_env)| {
1844 Some((
1845 RemoteConnectionId(id),
1846 Self::remote_connection_from_row(
1847 kind,
1848 host,
1849 port,
1850 user,
1851 distro,
1852 container_id,
1853 name,
1854 use_podman,
1855 remote_env,
1856 )?,
1857 ))
1858 },
1859 )
1860 .collect())
1861 }
1862
1863 pub(crate) fn remote_connection(
1864 &self,
1865 id: RemoteConnectionId,
1866 ) -> Result<RemoteConnectionOptions> {
1867 let (kind, host, port, user, distro, container_id, name, use_podman, remote_env) =
1868 self.select_row_bound(sql!(
1869 SELECT kind, host, port, user, distro, container_id, name, use_podman, remote_env
1870 FROM remote_connections
1871 WHERE id = ?
1872 ))?(id.0)?
1873 .context("no such remote connection")?;
1874 Self::remote_connection_from_row(
1875 kind,
1876 host,
1877 port,
1878 user,
1879 distro,
1880 container_id,
1881 name,
1882 use_podman,
1883 remote_env,
1884 )
1885 .context("invalid remote_connection row")
1886 }
1887
1888 fn remote_connection_from_row(
1889 kind: String,
1890 host: Option<String>,
1891 port: Option<u16>,
1892 user: Option<String>,
1893 distro: Option<String>,
1894 container_id: Option<String>,
1895 name: Option<String>,
1896 use_podman: Option<bool>,
1897 remote_env: Option<String>,
1898 ) -> Option<RemoteConnectionOptions> {
1899 match RemoteConnectionKind::deserialize(&kind)? {
1900 RemoteConnectionKind::Wsl => Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
1901 distro_name: distro?,
1902 user: user,
1903 })),
1904 RemoteConnectionKind::Ssh => Some(RemoteConnectionOptions::Ssh(SshConnectionOptions {
1905 host: host?.into(),
1906 port,
1907 username: user,
1908 ..Default::default()
1909 })),
1910 RemoteConnectionKind::Docker => {
1911 let remote_env: BTreeMap<String, String> =
1912 serde_json::from_str(&remote_env?).ok()?;
1913 Some(RemoteConnectionOptions::Docker(DockerConnectionOptions {
1914 container_id: container_id?,
1915 name: name?,
1916 remote_user: user?,
1917 upload_binary_over_docker_exec: false,
1918 use_podman: use_podman?,
1919 remote_env,
1920 }))
1921 }
1922 }
1923 }
1924
1925 query! {
1926 pub async fn delete_workspace_by_id(id: WorkspaceId) -> Result<()> {
1927 DELETE FROM workspaces
1928 WHERE workspace_id IS ?
1929 }
1930 }
1931
1932 async fn all_paths_exist_with_a_directory(paths: &[PathBuf], fs: &dyn Fs) -> bool {
1933 let mut any_dir = false;
1934 for path in paths {
1935 match fs.metadata(path).await.ok().flatten() {
1936 None => return false,
1937 Some(meta) => {
1938 if meta.is_dir {
1939 any_dir = true;
1940 }
1941 }
1942 }
1943 }
1944 any_dir
1945 }
1946
1947 // Returns the recent project workspaces suitable for showing in the recent-projects UI.
1948 // Scratch workspaces (no paths) are filtered out - they aren't really "projects" and
1949 // are restored separately by `last_session_workspace_locations`.
1950 pub async fn recent_project_workspaces(
1951 &self,
1952 fs: &dyn Fs,
1953 ) -> Result<
1954 Vec<(
1955 WorkspaceId,
1956 SerializedWorkspaceLocation,
1957 PathList,
1958 DateTime<Utc>,
1959 )>,
1960 > {
1961 let remote_connections = self.remote_connections()?;
1962 let mut result = Vec::new();
1963 for (id, paths, remote_connection_id, _session_id, timestamp) in self.recent_workspaces()? {
1964 if let Some(remote_connection_id) = remote_connection_id {
1965 if let Some(connection_options) = remote_connections.get(&remote_connection_id) {
1966 result.push((
1967 id,
1968 SerializedWorkspaceLocation::Remote(connection_options.clone()),
1969 paths,
1970 timestamp,
1971 ));
1972 }
1973 continue;
1974 }
1975
1976 if paths.paths().is_empty() || contains_wsl_path(&paths) {
1977 continue;
1978 }
1979
1980 if Self::all_paths_exist_with_a_directory(paths.paths(), fs).await {
1981 result.push((id, SerializedWorkspaceLocation::Local, paths, timestamp));
1982 }
1983 }
1984 Ok(result)
1985 }
1986
1987 // Deletes workspace rows that can no longer be restored from. Remote workspaces whose
1988 // connection was removed, and (on Windows) workspaces pointing at WSL paths, are cleaned
1989 // up immediately. Local workspaces with no valid paths on disk are kept for seven days
1990 // after going stale. Workspaces belonging to the current session or the last session are
1991 // always preserved so that an in-progress restore can rehydrate them.
1992 pub async fn garbage_collect_workspaces(
1993 &self,
1994 fs: &dyn Fs,
1995 current_session_id: &str,
1996 last_session_id: Option<&str>,
1997 ) -> Result<()> {
1998 let remote_connections = self.remote_connections()?;
1999 let now = Utc::now();
2000 let mut workspaces_to_delete = Vec::new();
2001 for (id, paths, remote_connection_id, session_id, timestamp) in self.recent_workspaces()? {
2002 if let Some(session_id) = session_id.as_deref() {
2003 if session_id == current_session_id || Some(session_id) == last_session_id {
2004 continue;
2005 }
2006 }
2007
2008 if let Some(remote_connection_id) = remote_connection_id {
2009 if !remote_connections.contains_key(&remote_connection_id) {
2010 workspaces_to_delete.push(id);
2011 }
2012 continue;
2013 }
2014
2015 // Delete the workspace if any of the paths are WSL paths. If a
2016 // local workspace points to WSL, attempting to read its metadata
2017 // will wait for the WSL VM and file server to boot up. This can
2018 // block for many seconds. Supported scenarios use remote
2019 // workspaces.
2020 if contains_wsl_path(&paths) {
2021 workspaces_to_delete.push(id);
2022 continue;
2023 }
2024
2025 if !Self::all_paths_exist_with_a_directory(paths.paths(), fs).await
2026 && now - timestamp >= chrono::Duration::days(7)
2027 {
2028 workspaces_to_delete.push(id);
2029 }
2030 }
2031
2032 futures::future::join_all(
2033 workspaces_to_delete
2034 .into_iter()
2035 .map(|id| self.delete_workspace_by_id(id)),
2036 )
2037 .await;
2038 Ok(())
2039 }
2040
2041 pub async fn last_workspace(
2042 &self,
2043 fs: &dyn Fs,
2044 ) -> Result<
2045 Option<(
2046 WorkspaceId,
2047 SerializedWorkspaceLocation,
2048 PathList,
2049 DateTime<Utc>,
2050 )>,
2051 > {
2052 Ok(self.recent_project_workspaces(fs).await?.into_iter().next())
2053 }
2054
2055 // Returns the locations of the workspaces that were still opened when the last
2056 // session was closed (i.e. when Zed was quit).
2057 // If `last_session_window_order` is provided, the returned locations are ordered
2058 // according to that.
2059 pub async fn last_session_workspace_locations(
2060 &self,
2061 last_session_id: &str,
2062 last_session_window_stack: Option<Vec<WindowId>>,
2063 fs: &dyn Fs,
2064 ) -> Result<Vec<SessionWorkspace>> {
2065 let mut workspaces = Vec::new();
2066
2067 for (workspace_id, paths, window_id, remote_connection_id) in
2068 self.session_workspaces(last_session_id.to_owned())?
2069 {
2070 let window_id = window_id.map(WindowId::from);
2071
2072 if let Some(remote_connection_id) = remote_connection_id {
2073 workspaces.push(SessionWorkspace {
2074 workspace_id,
2075 location: SerializedWorkspaceLocation::Remote(
2076 self.remote_connection(remote_connection_id)?,
2077 ),
2078 paths,
2079 window_id,
2080 });
2081 continue;
2082 }
2083
2084 if paths.is_empty() || Self::all_paths_exist_with_a_directory(paths.paths(), fs).await {
2085 workspaces.push(SessionWorkspace {
2086 workspace_id,
2087 location: SerializedWorkspaceLocation::Local,
2088 paths,
2089 window_id,
2090 });
2091 }
2092 }
2093
2094 if let Some(stack) = last_session_window_stack {
2095 workspaces.sort_by_key(|workspace| {
2096 workspace
2097 .window_id
2098 .and_then(|id| stack.iter().position(|&order_id| order_id == id))
2099 .unwrap_or(usize::MAX)
2100 });
2101 }
2102
2103 Ok(workspaces)
2104 }
2105
2106 fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result<SerializedPaneGroup> {
2107 Ok(self
2108 .get_pane_group(workspace_id, None)?
2109 .into_iter()
2110 .next()
2111 .unwrap_or_else(|| {
2112 SerializedPaneGroup::Pane(SerializedPane {
2113 active: true,
2114 children: vec![],
2115 pinned_count: 0,
2116 })
2117 }))
2118 }
2119
2120 fn get_pane_group(
2121 &self,
2122 workspace_id: WorkspaceId,
2123 group_id: Option<GroupId>,
2124 ) -> Result<Vec<SerializedPaneGroup>> {
2125 type GroupKey = (Option<GroupId>, WorkspaceId);
2126 type GroupOrPane = (
2127 Option<GroupId>,
2128 Option<SerializedAxis>,
2129 Option<PaneId>,
2130 Option<bool>,
2131 Option<usize>,
2132 Option<String>,
2133 );
2134 self.select_bound::<GroupKey, GroupOrPane>(sql!(
2135 SELECT group_id, axis, pane_id, active, pinned_count, flexes
2136 FROM (SELECT
2137 group_id,
2138 axis,
2139 NULL as pane_id,
2140 NULL as active,
2141 NULL as pinned_count,
2142 position,
2143 parent_group_id,
2144 workspace_id,
2145 flexes
2146 FROM pane_groups
2147 UNION
2148 SELECT
2149 NULL,
2150 NULL,
2151 center_panes.pane_id,
2152 panes.active as active,
2153 pinned_count,
2154 position,
2155 parent_group_id,
2156 panes.workspace_id as workspace_id,
2157 NULL
2158 FROM center_panes
2159 JOIN panes ON center_panes.pane_id = panes.pane_id)
2160 WHERE parent_group_id IS ? AND workspace_id = ?
2161 ORDER BY position
2162 ))?((group_id, workspace_id))?
2163 .into_iter()
2164 .map(|(group_id, axis, pane_id, active, pinned_count, flexes)| {
2165 let maybe_pane = maybe!({ Some((pane_id?, active?, pinned_count?)) });
2166 if let Some((group_id, axis)) = group_id.zip(axis) {
2167 let flexes = flexes
2168 .map(|flexes: String| serde_json::from_str::<Vec<f32>>(&flexes))
2169 .transpose()?;
2170
2171 Ok(SerializedPaneGroup::Group {
2172 axis,
2173 children: self.get_pane_group(workspace_id, Some(group_id))?,
2174 flexes,
2175 })
2176 } else if let Some((pane_id, active, pinned_count)) = maybe_pane {
2177 Ok(SerializedPaneGroup::Pane(SerializedPane::new(
2178 self.get_items(pane_id)?,
2179 active,
2180 pinned_count,
2181 )))
2182 } else {
2183 bail!("Pane Group Child was neither a pane group or a pane");
2184 }
2185 })
2186 // Filter out panes and pane groups which don't have any children or items
2187 .filter(|pane_group| match pane_group {
2188 Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(),
2189 Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(),
2190 _ => true,
2191 })
2192 .collect::<Result<_>>()
2193 }
2194
2195 fn save_pane_group(
2196 conn: &Connection,
2197 workspace_id: WorkspaceId,
2198 pane_group: &SerializedPaneGroup,
2199 parent: Option<(GroupId, usize)>,
2200 ) -> Result<()> {
2201 if parent.is_none() {
2202 log::debug!("Saving a pane group for workspace {workspace_id:?}");
2203 }
2204 match pane_group {
2205 SerializedPaneGroup::Group {
2206 axis,
2207 children,
2208 flexes,
2209 } => {
2210 let (parent_id, position) = parent.unzip();
2211
2212 let flex_string = flexes
2213 .as_ref()
2214 .map(|flexes| serde_json::json!(flexes).to_string());
2215
2216 let group_id = conn.select_row_bound::<_, i64>(sql!(
2217 INSERT INTO pane_groups(
2218 workspace_id,
2219 parent_group_id,
2220 position,
2221 axis,
2222 flexes
2223 )
2224 VALUES (?, ?, ?, ?, ?)
2225 RETURNING group_id
2226 ))?((
2227 workspace_id,
2228 parent_id,
2229 position,
2230 *axis,
2231 flex_string,
2232 ))?
2233 .context("Couldn't retrieve group_id from inserted pane_group")?;
2234
2235 for (position, group) in children.iter().enumerate() {
2236 Self::save_pane_group(conn, workspace_id, group, Some((group_id, position)))?
2237 }
2238
2239 Ok(())
2240 }
2241 SerializedPaneGroup::Pane(pane) => {
2242 Self::save_pane(conn, workspace_id, pane, parent)?;
2243 Ok(())
2244 }
2245 }
2246 }
2247
2248 fn save_pane(
2249 conn: &Connection,
2250 workspace_id: WorkspaceId,
2251 pane: &SerializedPane,
2252 parent: Option<(GroupId, usize)>,
2253 ) -> Result<PaneId> {
2254 let pane_id = conn.select_row_bound::<_, i64>(sql!(
2255 INSERT INTO panes(workspace_id, active, pinned_count)
2256 VALUES (?, ?, ?)
2257 RETURNING pane_id
2258 ))?((workspace_id, pane.active, pane.pinned_count))?
2259 .context("Could not retrieve inserted pane_id")?;
2260
2261 let (parent_id, order) = parent.unzip();
2262 conn.exec_bound(sql!(
2263 INSERT INTO center_panes(pane_id, parent_group_id, position)
2264 VALUES (?, ?, ?)
2265 ))?((pane_id, parent_id, order))?;
2266
2267 Self::save_items(conn, workspace_id, pane_id, &pane.children).context("Saving items")?;
2268
2269 Ok(pane_id)
2270 }
2271
2272 fn get_items(&self, pane_id: PaneId) -> Result<Vec<SerializedItem>> {
2273 self.select_bound(sql!(
2274 SELECT kind, item_id, active, preview FROM items
2275 WHERE pane_id = ?
2276 ORDER BY position
2277 ))?(pane_id)
2278 }
2279
2280 fn save_items(
2281 conn: &Connection,
2282 workspace_id: WorkspaceId,
2283 pane_id: PaneId,
2284 items: &[SerializedItem],
2285 ) -> Result<()> {
2286 let mut insert = conn.exec_bound(sql!(
2287 INSERT INTO items(workspace_id, pane_id, position, kind, item_id, active, preview) VALUES (?, ?, ?, ?, ?, ?, ?)
2288 )).context("Preparing insertion")?;
2289 for (position, item) in items.iter().enumerate() {
2290 insert((workspace_id, pane_id, position, item))?;
2291 }
2292
2293 Ok(())
2294 }
2295
2296 query! {
2297 pub async fn update_timestamp(workspace_id: WorkspaceId) -> Result<()> {
2298 UPDATE workspaces
2299 SET timestamp = CURRENT_TIMESTAMP
2300 WHERE workspace_id = ?
2301 }
2302 }
2303
2304 #[cfg(test)]
2305 query! {
2306 pub(crate) async fn set_timestamp_for_tests(workspace_id: WorkspaceId, timestamp: String) -> Result<()> {
2307 UPDATE workspaces
2308 SET timestamp = ?2
2309 WHERE workspace_id = ?1
2310 }
2311 }
2312
2313 query! {
2314 pub(crate) async fn set_window_open_status(workspace_id: WorkspaceId, bounds: SerializedWindowBounds, display: Uuid) -> Result<()> {
2315 UPDATE workspaces
2316 SET window_state = ?2,
2317 window_x = ?3,
2318 window_y = ?4,
2319 window_width = ?5,
2320 window_height = ?6,
2321 display = ?7
2322 WHERE workspace_id = ?1
2323 }
2324 }
2325
2326 query! {
2327 pub(crate) async fn set_centered_layout(workspace_id: WorkspaceId, centered_layout: bool) -> Result<()> {
2328 UPDATE workspaces
2329 SET centered_layout = ?2
2330 WHERE workspace_id = ?1
2331 }
2332 }
2333
2334 query! {
2335 pub(crate) async fn set_session_id(workspace_id: WorkspaceId, session_id: Option<String>) -> Result<()> {
2336 UPDATE workspaces
2337 SET session_id = ?2
2338 WHERE workspace_id = ?1
2339 }
2340 }
2341
2342 query! {
2343 pub(crate) async fn set_session_binding(workspace_id: WorkspaceId, session_id: Option<String>, window_id: Option<u64>) -> Result<()> {
2344 UPDATE workspaces
2345 SET session_id = ?2, window_id = ?3
2346 WHERE workspace_id = ?1
2347 }
2348 }
2349
2350 pub(crate) async fn toolchains(
2351 &self,
2352 workspace_id: WorkspaceId,
2353 ) -> Result<Vec<(Toolchain, Arc<Path>, Arc<RelPath>)>> {
2354 self.write(move |this| {
2355 let mut select = this
2356 .select_bound(sql!(
2357 SELECT
2358 name, path, worktree_root_path, relative_worktree_path, language_name, raw_json
2359 FROM toolchains
2360 WHERE workspace_id = ?
2361 ))
2362 .context("select toolchains")?;
2363
2364 let toolchain: Vec<(String, String, String, String, String, String)> =
2365 select(workspace_id)?;
2366
2367 Ok(toolchain
2368 .into_iter()
2369 .filter_map(
2370 |(name, path, worktree_root_path, relative_worktree_path, language, json)| {
2371 Some((
2372 Toolchain {
2373 name: name.into(),
2374 path: path.into(),
2375 language_name: LanguageName::new(&language),
2376 as_json: serde_json::Value::from_str(&json).ok()?,
2377 },
2378 Arc::from(worktree_root_path.as_ref()),
2379 RelPath::from_proto(&relative_worktree_path).log_err()?,
2380 ))
2381 },
2382 )
2383 .collect())
2384 })
2385 .await
2386 }
2387
2388 pub async fn set_toolchain(
2389 &self,
2390 workspace_id: WorkspaceId,
2391 worktree_root_path: Arc<Path>,
2392 relative_worktree_path: Arc<RelPath>,
2393 toolchain: Toolchain,
2394 ) -> Result<()> {
2395 log::debug!(
2396 "Setting toolchain for workspace, worktree: {worktree_root_path:?}, relative path: {relative_worktree_path:?}, toolchain: {}",
2397 toolchain.name
2398 );
2399 self.write(move |conn| {
2400 let mut insert = conn
2401 .exec_bound(sql!(
2402 INSERT INTO toolchains(workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json) VALUES (?, ?, ?, ?, ?, ?, ?)
2403 ON CONFLICT DO
2404 UPDATE SET
2405 name = ?5,
2406 path = ?6,
2407 raw_json = ?7
2408 ))
2409 .context("Preparing insertion")?;
2410
2411 insert((
2412 workspace_id,
2413 worktree_root_path.to_string_lossy().into_owned(),
2414 relative_worktree_path.as_unix_str(),
2415 toolchain.language_name.as_ref(),
2416 toolchain.name.as_ref(),
2417 toolchain.path.as_ref(),
2418 toolchain.as_json.to_string(),
2419 ))?;
2420
2421 Ok(())
2422 }).await
2423 }
2424
2425 pub(crate) async fn save_trusted_worktrees(
2426 &self,
2427 trusted_worktrees: HashMap<Option<RemoteHostLocation>, HashSet<PathBuf>>,
2428 ) -> anyhow::Result<()> {
2429 use anyhow::Context as _;
2430 use db::sqlez::statement::Statement;
2431 use itertools::Itertools as _;
2432
2433 self.clear_trusted_worktrees()
2434 .await
2435 .context("clearing previous trust state")?;
2436
2437 let trusted_worktrees = trusted_worktrees
2438 .into_iter()
2439 .flat_map(|(host, abs_paths)| {
2440 abs_paths
2441 .into_iter()
2442 .map(move |abs_path| (Some(abs_path), host.clone()))
2443 })
2444 .collect::<Vec<_>>();
2445 let mut first_worktree;
2446 let mut last_worktree = 0_usize;
2447 for (count, placeholders) in std::iter::once("(?, ?, ?)")
2448 .cycle()
2449 .take(trusted_worktrees.len())
2450 .chunks(MAX_QUERY_PLACEHOLDERS / 3)
2451 .into_iter()
2452 .map(|chunk| {
2453 let mut count = 0;
2454 let placeholders = chunk
2455 .inspect(|_| {
2456 count += 1;
2457 })
2458 .join(", ");
2459 (count, placeholders)
2460 })
2461 .collect::<Vec<_>>()
2462 {
2463 first_worktree = last_worktree;
2464 last_worktree = last_worktree + count;
2465 let query = format!(
2466 r#"INSERT INTO trusted_worktrees(absolute_path, user_name, host_name)
2467VALUES {placeholders};"#
2468 );
2469
2470 let trusted_worktrees = trusted_worktrees[first_worktree..last_worktree].to_vec();
2471 self.write(move |conn| {
2472 let mut statement = Statement::prepare(conn, query)?;
2473 let mut next_index = 1;
2474 for (abs_path, host) in trusted_worktrees {
2475 let abs_path = abs_path.as_ref().map(|abs_path| abs_path.to_string_lossy());
2476 next_index = statement.bind(
2477 &abs_path.as_ref().map(|abs_path| abs_path.as_ref()),
2478 next_index,
2479 )?;
2480 next_index = statement.bind(
2481 &host
2482 .as_ref()
2483 .and_then(|host| Some(host.user_name.as_ref()?.as_str())),
2484 next_index,
2485 )?;
2486 next_index = statement.bind(
2487 &host.as_ref().map(|host| host.host_identifier.as_str()),
2488 next_index,
2489 )?;
2490 }
2491 statement.exec()
2492 })
2493 .await
2494 .context("inserting new trusted state")?;
2495 }
2496 Ok(())
2497 }
2498
2499 pub fn fetch_trusted_worktrees(&self) -> Result<DbTrustedPaths> {
2500 let trusted_worktrees = self.trusted_worktrees()?;
2501 Ok(trusted_worktrees
2502 .into_iter()
2503 .filter_map(|(abs_path, user_name, host_name)| {
2504 let db_host = match (user_name, host_name) {
2505 (None, Some(host_name)) => Some(RemoteHostLocation {
2506 user_name: None,
2507 host_identifier: SharedString::new(host_name),
2508 }),
2509 (Some(user_name), Some(host_name)) => Some(RemoteHostLocation {
2510 user_name: Some(SharedString::new(user_name)),
2511 host_identifier: SharedString::new(host_name),
2512 }),
2513 _ => None,
2514 };
2515 Some((db_host, abs_path?))
2516 })
2517 .fold(HashMap::default(), |mut acc, (remote_host, abs_path)| {
2518 acc.entry(remote_host)
2519 .or_insert_with(HashSet::default)
2520 .insert(abs_path);
2521 acc
2522 }))
2523 }
2524
2525 query! {
2526 fn trusted_worktrees() -> Result<Vec<(Option<PathBuf>, Option<String>, Option<String>)>> {
2527 SELECT absolute_path, user_name, host_name
2528 FROM trusted_worktrees
2529 }
2530 }
2531
2532 query! {
2533 pub async fn clear_trusted_worktrees() -> Result<()> {
2534 DELETE FROM trusted_worktrees
2535 }
2536 }
2537}
2538
2539type WorkspaceEntry = (
2540 WorkspaceId,
2541 SerializedWorkspaceLocation,
2542 PathList,
2543 DateTime<Utc>,
2544);
2545
2546/// Resolves workspace entries whose paths are git linked worktree checkouts
2547/// to their main repository paths.
2548///
2549/// For each workspace entry:
2550/// - If any path is a linked worktree checkout, all worktree paths in that
2551/// entry are resolved to their main repository paths, producing a new
2552/// `PathList`.
2553/// - The resolved entry is then deduplicated against existing entries: if a
2554/// workspace with the same paths already exists, the entry with the most
2555/// recent timestamp is kept.
2556pub async fn resolve_worktree_workspaces(
2557 workspaces: impl IntoIterator<Item = WorkspaceEntry>,
2558 fs: &dyn Fs,
2559) -> Vec<WorkspaceEntry> {
2560 // First pass: resolve worktree paths to main repo paths concurrently.
2561 let resolved = futures::future::join_all(workspaces.into_iter().map(|entry| async move {
2562 let paths = entry.2.paths();
2563 if paths.is_empty() {
2564 return entry;
2565 }
2566
2567 // Resolve each path concurrently
2568 let resolved_paths = futures::future::join_all(
2569 paths
2570 .iter()
2571 .map(|path| project::git_store::resolve_git_worktree_to_main_repo(fs, path)),
2572 )
2573 .await;
2574
2575 // If no paths were resolved, this entry is not a worktree — keep as-is
2576 if resolved_paths.iter().all(|r| r.is_none()) {
2577 return entry;
2578 }
2579
2580 // Build new path list, substituting resolved paths
2581 let new_paths: Vec<PathBuf> = paths
2582 .iter()
2583 .zip(resolved_paths.iter())
2584 .map(|(original, resolved)| {
2585 resolved
2586 .as_ref()
2587 .cloned()
2588 .unwrap_or_else(|| original.clone())
2589 })
2590 .collect();
2591
2592 let new_path_refs: Vec<&Path> = new_paths.iter().map(|p| p.as_path()).collect();
2593 (entry.0, entry.1, PathList::new(&new_path_refs), entry.3)
2594 }))
2595 .await;
2596
2597 // Second pass: deduplicate by PathList.
2598 // When two entries resolve to the same paths, keep the one with the
2599 // more recent timestamp.
2600 let mut seen: collections::HashMap<Vec<PathBuf>, usize> = collections::HashMap::default();
2601 let mut result: Vec<WorkspaceEntry> = Vec::new();
2602
2603 for entry in resolved {
2604 let key: Vec<PathBuf> = entry.2.paths().to_vec();
2605 if let Some(&existing_idx) = seen.get(&key) {
2606 // Keep the entry with the more recent timestamp
2607 if entry.3 > result[existing_idx].3 {
2608 result[existing_idx] = entry;
2609 }
2610 } else {
2611 seen.insert(key, result.len());
2612 result.push(entry);
2613 }
2614 }
2615
2616 result
2617}
2618
2619pub fn delete_unloaded_items(
2620 alive_items: Vec<ItemId>,
2621 workspace_id: WorkspaceId,
2622 table: &'static str,
2623 db: &ThreadSafeConnection,
2624 cx: &mut App,
2625) -> Task<Result<()>> {
2626 let db = db.clone();
2627 cx.spawn(async move |_| {
2628 let placeholders = alive_items
2629 .iter()
2630 .map(|_| "?")
2631 .collect::<Vec<&str>>()
2632 .join(", ");
2633
2634 let query = format!(
2635 "DELETE FROM {table} WHERE workspace_id = ? AND item_id NOT IN ({placeholders})"
2636 );
2637
2638 db.write(move |conn| {
2639 let mut statement = Statement::prepare(conn, query)?;
2640 let mut next_index = statement.bind(&workspace_id, 1)?;
2641 for id in alive_items {
2642 next_index = statement.bind(&id, next_index)?;
2643 }
2644 statement.exec()
2645 })
2646 .await
2647 })
2648}
2649
2650#[cfg(test)]
2651mod tests {
2652 use super::*;
2653 use crate::OpenMode;
2654 use crate::PathList;
2655 use crate::ProjectGroupKey;
2656 use crate::{
2657 multi_workspace::MultiWorkspace,
2658 persistence::{
2659 model::{
2660 SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace,
2661 SessionWorkspace,
2662 },
2663 read_multi_workspace_state,
2664 },
2665 };
2666
2667 use gpui::AppContext as _;
2668 use pretty_assertions::assert_eq;
2669 use project::Project;
2670 use remote::SshConnectionOptions;
2671 use serde_json::json;
2672 use std::{thread, time::Duration};
2673
2674 /// Creates a unique directory in a FakeFs, returning the path.
2675 /// Uses a UUID suffix to avoid collisions with other tests sharing the global DB.
2676 async fn unique_test_dir(fs: &fs::FakeFs, prefix: &str) -> PathBuf {
2677 let dir = PathBuf::from(format!("/test-dirs/{}-{}", prefix, uuid::Uuid::new_v4()));
2678 fs.insert_tree(&dir, json!({})).await;
2679 dir
2680 }
2681
2682 #[gpui::test]
2683 async fn test_multi_workspace_serializes_on_add_and_remove(cx: &mut gpui::TestAppContext) {
2684 crate::tests::init_test(cx);
2685
2686 let fs = fs::FakeFs::new(cx.executor());
2687 let project1 = Project::test(fs.clone(), [], cx).await;
2688 let project2 = Project::test(fs.clone(), [], cx).await;
2689
2690 let (multi_workspace, cx) =
2691 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
2692
2693 multi_workspace.update(cx, |mw, cx| {
2694 mw.open_sidebar(cx);
2695 });
2696
2697 multi_workspace.update_in(cx, |mw, _, cx| {
2698 mw.set_random_database_id(cx);
2699 });
2700
2701 let window_id =
2702 multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
2703
2704 // --- Add a second workspace ---
2705 let workspace2 = multi_workspace.update_in(cx, |mw, window, cx| {
2706 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
2707 workspace.update(cx, |ws, _cx| ws.set_random_database_id());
2708 mw.activate(workspace.clone(), None, window, cx);
2709 workspace
2710 });
2711
2712 // Run background tasks so serialize has a chance to flush.
2713 cx.run_until_parked();
2714
2715 // Read back the persisted state and check that the active workspace ID was written.
2716 let state_after_add = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
2717 let active_workspace2_db_id = workspace2.read_with(cx, |ws, _| ws.database_id());
2718 assert_eq!(
2719 state_after_add.active_workspace_id, active_workspace2_db_id,
2720 "After adding a second workspace, the serialized active_workspace_id should match \
2721 the newly activated workspace's database id"
2722 );
2723
2724 // --- Remove the non-active workspace ---
2725 multi_workspace.update_in(cx, |mw, _window, cx| {
2726 let active = mw.workspace().clone();
2727 let ws = mw
2728 .workspaces()
2729 .find(|ws| *ws != &active)
2730 .expect("should have a non-active workspace");
2731 mw.remove([ws.clone()], |_, _, _| unreachable!(), _window, cx)
2732 .detach_and_log_err(cx);
2733 });
2734
2735 cx.run_until_parked();
2736
2737 let state_after_remove = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
2738 let remaining_db_id =
2739 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
2740 assert_eq!(
2741 state_after_remove.active_workspace_id, remaining_db_id,
2742 "After removing a workspace, the serialized active_workspace_id should match \
2743 the remaining active workspace's database id"
2744 );
2745 }
2746
2747 #[gpui::test]
2748 async fn test_breakpoints() {
2749 zlog::init_test();
2750
2751 let db = WorkspaceDb::open_test_db("test_breakpoints").await;
2752 let id = db.next_id().await.unwrap();
2753
2754 let path = Path::new("/tmp/test.rs");
2755
2756 let breakpoint = Breakpoint {
2757 position: 123,
2758 message: None,
2759 state: BreakpointState::Enabled,
2760 condition: None,
2761 hit_condition: None,
2762 };
2763
2764 let log_breakpoint = Breakpoint {
2765 position: 456,
2766 message: Some("Test log message".into()),
2767 state: BreakpointState::Enabled,
2768 condition: None,
2769 hit_condition: None,
2770 };
2771
2772 let disable_breakpoint = Breakpoint {
2773 position: 578,
2774 message: None,
2775 state: BreakpointState::Disabled,
2776 condition: None,
2777 hit_condition: None,
2778 };
2779
2780 let condition_breakpoint = Breakpoint {
2781 position: 789,
2782 message: None,
2783 state: BreakpointState::Enabled,
2784 condition: Some("x > 5".into()),
2785 hit_condition: None,
2786 };
2787
2788 let hit_condition_breakpoint = Breakpoint {
2789 position: 999,
2790 message: None,
2791 state: BreakpointState::Enabled,
2792 condition: None,
2793 hit_condition: Some(">= 3".into()),
2794 };
2795
2796 let workspace = SerializedWorkspace {
2797 id,
2798 paths: PathList::new(&["/tmp"]),
2799 location: SerializedWorkspaceLocation::Local,
2800 center_group: Default::default(),
2801 window_bounds: Default::default(),
2802 display: Default::default(),
2803 docks: Default::default(),
2804 centered_layout: false,
2805 bookmarks: Default::default(),
2806 breakpoints: {
2807 let mut map = collections::BTreeMap::default();
2808 map.insert(
2809 Arc::from(path),
2810 vec![
2811 SourceBreakpoint {
2812 row: breakpoint.position,
2813 path: Arc::from(path),
2814 message: breakpoint.message.clone(),
2815 state: breakpoint.state,
2816 condition: breakpoint.condition.clone(),
2817 hit_condition: breakpoint.hit_condition.clone(),
2818 },
2819 SourceBreakpoint {
2820 row: log_breakpoint.position,
2821 path: Arc::from(path),
2822 message: log_breakpoint.message.clone(),
2823 state: log_breakpoint.state,
2824 condition: log_breakpoint.condition.clone(),
2825 hit_condition: log_breakpoint.hit_condition.clone(),
2826 },
2827 SourceBreakpoint {
2828 row: disable_breakpoint.position,
2829 path: Arc::from(path),
2830 message: disable_breakpoint.message.clone(),
2831 state: disable_breakpoint.state,
2832 condition: disable_breakpoint.condition.clone(),
2833 hit_condition: disable_breakpoint.hit_condition.clone(),
2834 },
2835 SourceBreakpoint {
2836 row: condition_breakpoint.position,
2837 path: Arc::from(path),
2838 message: condition_breakpoint.message.clone(),
2839 state: condition_breakpoint.state,
2840 condition: condition_breakpoint.condition.clone(),
2841 hit_condition: condition_breakpoint.hit_condition.clone(),
2842 },
2843 SourceBreakpoint {
2844 row: hit_condition_breakpoint.position,
2845 path: Arc::from(path),
2846 message: hit_condition_breakpoint.message.clone(),
2847 state: hit_condition_breakpoint.state,
2848 condition: hit_condition_breakpoint.condition.clone(),
2849 hit_condition: hit_condition_breakpoint.hit_condition.clone(),
2850 },
2851 ],
2852 );
2853 map
2854 },
2855 session_id: None,
2856 window_id: None,
2857 user_toolchains: Default::default(),
2858 };
2859
2860 db.save_workspace(workspace.clone()).await;
2861
2862 let loaded = db.workspace_for_roots(&["/tmp"]).unwrap();
2863 let loaded_breakpoints = loaded.breakpoints.get(&Arc::from(path)).unwrap();
2864
2865 assert_eq!(loaded_breakpoints.len(), 5);
2866
2867 // normal breakpoint
2868 assert_eq!(loaded_breakpoints[0].row, breakpoint.position);
2869 assert_eq!(loaded_breakpoints[0].message, breakpoint.message);
2870 assert_eq!(loaded_breakpoints[0].condition, breakpoint.condition);
2871 assert_eq!(
2872 loaded_breakpoints[0].hit_condition,
2873 breakpoint.hit_condition
2874 );
2875 assert_eq!(loaded_breakpoints[0].state, breakpoint.state);
2876 assert_eq!(loaded_breakpoints[0].path, Arc::from(path));
2877
2878 // enabled breakpoint
2879 assert_eq!(loaded_breakpoints[1].row, log_breakpoint.position);
2880 assert_eq!(loaded_breakpoints[1].message, log_breakpoint.message);
2881 assert_eq!(loaded_breakpoints[1].condition, log_breakpoint.condition);
2882 assert_eq!(
2883 loaded_breakpoints[1].hit_condition,
2884 log_breakpoint.hit_condition
2885 );
2886 assert_eq!(loaded_breakpoints[1].state, log_breakpoint.state);
2887 assert_eq!(loaded_breakpoints[1].path, Arc::from(path));
2888
2889 // disable breakpoint
2890 assert_eq!(loaded_breakpoints[2].row, disable_breakpoint.position);
2891 assert_eq!(loaded_breakpoints[2].message, disable_breakpoint.message);
2892 assert_eq!(
2893 loaded_breakpoints[2].condition,
2894 disable_breakpoint.condition
2895 );
2896 assert_eq!(
2897 loaded_breakpoints[2].hit_condition,
2898 disable_breakpoint.hit_condition
2899 );
2900 assert_eq!(loaded_breakpoints[2].state, disable_breakpoint.state);
2901 assert_eq!(loaded_breakpoints[2].path, Arc::from(path));
2902
2903 // condition breakpoint
2904 assert_eq!(loaded_breakpoints[3].row, condition_breakpoint.position);
2905 assert_eq!(loaded_breakpoints[3].message, condition_breakpoint.message);
2906 assert_eq!(
2907 loaded_breakpoints[3].condition,
2908 condition_breakpoint.condition
2909 );
2910 assert_eq!(
2911 loaded_breakpoints[3].hit_condition,
2912 condition_breakpoint.hit_condition
2913 );
2914 assert_eq!(loaded_breakpoints[3].state, condition_breakpoint.state);
2915 assert_eq!(loaded_breakpoints[3].path, Arc::from(path));
2916
2917 // hit condition breakpoint
2918 assert_eq!(loaded_breakpoints[4].row, hit_condition_breakpoint.position);
2919 assert_eq!(
2920 loaded_breakpoints[4].message,
2921 hit_condition_breakpoint.message
2922 );
2923 assert_eq!(
2924 loaded_breakpoints[4].condition,
2925 hit_condition_breakpoint.condition
2926 );
2927 assert_eq!(
2928 loaded_breakpoints[4].hit_condition,
2929 hit_condition_breakpoint.hit_condition
2930 );
2931 assert_eq!(loaded_breakpoints[4].state, hit_condition_breakpoint.state);
2932 assert_eq!(loaded_breakpoints[4].path, Arc::from(path));
2933 }
2934
2935 #[gpui::test]
2936 async fn test_remove_last_breakpoint() {
2937 zlog::init_test();
2938
2939 let db = WorkspaceDb::open_test_db("test_remove_last_breakpoint").await;
2940 let id = db.next_id().await.unwrap();
2941
2942 let singular_path = Path::new("/tmp/test_remove_last_breakpoint.rs");
2943
2944 let breakpoint_to_remove = Breakpoint {
2945 position: 100,
2946 message: None,
2947 state: BreakpointState::Enabled,
2948 condition: None,
2949 hit_condition: None,
2950 };
2951
2952 let workspace = SerializedWorkspace {
2953 id,
2954 paths: PathList::new(&["/tmp"]),
2955 location: SerializedWorkspaceLocation::Local,
2956 center_group: Default::default(),
2957 window_bounds: Default::default(),
2958 display: Default::default(),
2959 docks: Default::default(),
2960 centered_layout: false,
2961 bookmarks: Default::default(),
2962 breakpoints: {
2963 let mut map = collections::BTreeMap::default();
2964 map.insert(
2965 Arc::from(singular_path),
2966 vec![SourceBreakpoint {
2967 row: breakpoint_to_remove.position,
2968 path: Arc::from(singular_path),
2969 message: None,
2970 state: BreakpointState::Enabled,
2971 condition: None,
2972 hit_condition: None,
2973 }],
2974 );
2975 map
2976 },
2977 session_id: None,
2978 window_id: None,
2979 user_toolchains: Default::default(),
2980 };
2981
2982 db.save_workspace(workspace.clone()).await;
2983
2984 let loaded = db.workspace_for_roots(&["/tmp"]).unwrap();
2985 let loaded_breakpoints = loaded.breakpoints.get(&Arc::from(singular_path)).unwrap();
2986
2987 assert_eq!(loaded_breakpoints.len(), 1);
2988 assert_eq!(loaded_breakpoints[0].row, breakpoint_to_remove.position);
2989 assert_eq!(loaded_breakpoints[0].message, breakpoint_to_remove.message);
2990 assert_eq!(
2991 loaded_breakpoints[0].condition,
2992 breakpoint_to_remove.condition
2993 );
2994 assert_eq!(
2995 loaded_breakpoints[0].hit_condition,
2996 breakpoint_to_remove.hit_condition
2997 );
2998 assert_eq!(loaded_breakpoints[0].state, breakpoint_to_remove.state);
2999 assert_eq!(loaded_breakpoints[0].path, Arc::from(singular_path));
3000
3001 let workspace_without_breakpoint = SerializedWorkspace {
3002 id,
3003 paths: PathList::new(&["/tmp"]),
3004 location: SerializedWorkspaceLocation::Local,
3005 center_group: Default::default(),
3006 window_bounds: Default::default(),
3007 display: Default::default(),
3008 docks: Default::default(),
3009 centered_layout: false,
3010 bookmarks: Default::default(),
3011 breakpoints: collections::BTreeMap::default(),
3012 session_id: None,
3013 window_id: None,
3014 user_toolchains: Default::default(),
3015 };
3016
3017 db.save_workspace(workspace_without_breakpoint.clone())
3018 .await;
3019
3020 let loaded_after_remove = db.workspace_for_roots(&["/tmp"]).unwrap();
3021 let empty_breakpoints = loaded_after_remove
3022 .breakpoints
3023 .get(&Arc::from(singular_path));
3024
3025 assert!(empty_breakpoints.is_none());
3026 }
3027
3028 #[gpui::test]
3029 async fn test_next_id_stability() {
3030 zlog::init_test();
3031
3032 let db = WorkspaceDb::open_test_db("test_next_id_stability").await;
3033
3034 db.write(|conn| {
3035 conn.migrate(
3036 "test_table",
3037 &[sql!(
3038 CREATE TABLE test_table(
3039 text TEXT,
3040 workspace_id INTEGER,
3041 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
3042 ON DELETE CASCADE
3043 ) STRICT;
3044 )],
3045 &mut |_, _, _| false,
3046 )
3047 .unwrap();
3048 })
3049 .await;
3050
3051 let id = db.next_id().await.unwrap();
3052 // Assert the empty row got inserted
3053 assert_eq!(
3054 Some(id),
3055 db.select_row_bound::<WorkspaceId, WorkspaceId>(sql!(
3056 SELECT workspace_id FROM workspaces WHERE workspace_id = ?
3057 ))
3058 .unwrap()(id)
3059 .unwrap()
3060 );
3061
3062 db.write(move |conn| {
3063 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
3064 .unwrap()(("test-text-1", id))
3065 .unwrap()
3066 })
3067 .await;
3068
3069 let test_text_1 = db
3070 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
3071 .unwrap()(1)
3072 .unwrap()
3073 .unwrap();
3074 assert_eq!(test_text_1, "test-text-1");
3075 }
3076
3077 #[gpui::test]
3078 async fn test_workspace_id_stability() {
3079 zlog::init_test();
3080
3081 let db = WorkspaceDb::open_test_db("test_workspace_id_stability").await;
3082
3083 db.write(|conn| {
3084 conn.migrate(
3085 "test_table",
3086 &[sql!(
3087 CREATE TABLE test_table(
3088 text TEXT,
3089 workspace_id INTEGER,
3090 FOREIGN KEY(workspace_id)
3091 REFERENCES workspaces(workspace_id)
3092 ON DELETE CASCADE
3093 ) STRICT;)],
3094 &mut |_, _, _| false,
3095 )
3096 })
3097 .await
3098 .unwrap();
3099
3100 let mut workspace_1 = SerializedWorkspace {
3101 id: WorkspaceId(1),
3102 paths: PathList::new(&["/tmp", "/tmp2"]),
3103 location: SerializedWorkspaceLocation::Local,
3104 center_group: Default::default(),
3105 window_bounds: Default::default(),
3106 display: Default::default(),
3107 docks: Default::default(),
3108 centered_layout: false,
3109 bookmarks: Default::default(),
3110 breakpoints: Default::default(),
3111 session_id: None,
3112 window_id: None,
3113 user_toolchains: Default::default(),
3114 };
3115
3116 let workspace_2 = SerializedWorkspace {
3117 id: WorkspaceId(2),
3118 paths: PathList::new(&["/tmp"]),
3119 location: SerializedWorkspaceLocation::Local,
3120 center_group: Default::default(),
3121 window_bounds: Default::default(),
3122 display: Default::default(),
3123 docks: Default::default(),
3124 centered_layout: false,
3125 bookmarks: Default::default(),
3126 breakpoints: Default::default(),
3127 session_id: None,
3128 window_id: None,
3129 user_toolchains: Default::default(),
3130 };
3131
3132 db.save_workspace(workspace_1.clone()).await;
3133
3134 db.write(|conn| {
3135 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
3136 .unwrap()(("test-text-1", 1))
3137 .unwrap();
3138 })
3139 .await;
3140
3141 db.save_workspace(workspace_2.clone()).await;
3142
3143 db.write(|conn| {
3144 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
3145 .unwrap()(("test-text-2", 2))
3146 .unwrap();
3147 })
3148 .await;
3149
3150 workspace_1.paths = PathList::new(&["/tmp", "/tmp3"]);
3151 db.save_workspace(workspace_1.clone()).await;
3152 db.save_workspace(workspace_1).await;
3153 db.save_workspace(workspace_2).await;
3154
3155 let test_text_2 = db
3156 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
3157 .unwrap()(2)
3158 .unwrap()
3159 .unwrap();
3160 assert_eq!(test_text_2, "test-text-2");
3161
3162 let test_text_1 = db
3163 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
3164 .unwrap()(1)
3165 .unwrap()
3166 .unwrap();
3167 assert_eq!(test_text_1, "test-text-1");
3168 }
3169
3170 fn group(axis: Axis, children: Vec<SerializedPaneGroup>) -> SerializedPaneGroup {
3171 SerializedPaneGroup::Group {
3172 axis: SerializedAxis(axis),
3173 flexes: None,
3174 children,
3175 }
3176 }
3177
3178 #[gpui::test]
3179 async fn test_full_workspace_serialization() {
3180 zlog::init_test();
3181
3182 let db = WorkspaceDb::open_test_db("test_full_workspace_serialization").await;
3183
3184 // -----------------
3185 // | 1,2 | 5,6 |
3186 // | - - - | |
3187 // | 3,4 | |
3188 // -----------------
3189 let center_group = group(
3190 Axis::Horizontal,
3191 vec![
3192 group(
3193 Axis::Vertical,
3194 vec![
3195 SerializedPaneGroup::Pane(SerializedPane::new(
3196 vec![
3197 SerializedItem::new("Terminal", 5, false, false),
3198 SerializedItem::new("Terminal", 6, true, false),
3199 ],
3200 false,
3201 0,
3202 )),
3203 SerializedPaneGroup::Pane(SerializedPane::new(
3204 vec![
3205 SerializedItem::new("Terminal", 7, true, false),
3206 SerializedItem::new("Terminal", 8, false, false),
3207 ],
3208 false,
3209 0,
3210 )),
3211 ],
3212 ),
3213 SerializedPaneGroup::Pane(SerializedPane::new(
3214 vec![
3215 SerializedItem::new("Terminal", 9, false, false),
3216 SerializedItem::new("Terminal", 10, true, false),
3217 ],
3218 false,
3219 0,
3220 )),
3221 ],
3222 );
3223
3224 let workspace = SerializedWorkspace {
3225 id: WorkspaceId(5),
3226 paths: PathList::new(&["/tmp", "/tmp2"]),
3227 location: SerializedWorkspaceLocation::Local,
3228 center_group,
3229 window_bounds: Default::default(),
3230 bookmarks: Default::default(),
3231 breakpoints: Default::default(),
3232 display: Default::default(),
3233 docks: Default::default(),
3234 centered_layout: false,
3235 session_id: None,
3236 window_id: Some(999),
3237 user_toolchains: Default::default(),
3238 };
3239
3240 db.save_workspace(workspace.clone()).await;
3241
3242 let round_trip_workspace = db.workspace_for_roots(&["/tmp2", "/tmp"]);
3243 assert_eq!(workspace, round_trip_workspace.unwrap());
3244
3245 // Test guaranteed duplicate IDs
3246 db.save_workspace(workspace.clone()).await;
3247 db.save_workspace(workspace.clone()).await;
3248
3249 let round_trip_workspace = db.workspace_for_roots(&["/tmp", "/tmp2"]);
3250 assert_eq!(workspace, round_trip_workspace.unwrap());
3251 }
3252
3253 #[gpui::test]
3254 async fn test_workspace_assignment() {
3255 zlog::init_test();
3256
3257 let db = WorkspaceDb::open_test_db("test_basic_functionality").await;
3258
3259 let workspace_1 = SerializedWorkspace {
3260 id: WorkspaceId(1),
3261 paths: PathList::new(&["/tmp", "/tmp2"]),
3262 location: SerializedWorkspaceLocation::Local,
3263 center_group: Default::default(),
3264 window_bounds: Default::default(),
3265 bookmarks: Default::default(),
3266 breakpoints: Default::default(),
3267 display: Default::default(),
3268 docks: Default::default(),
3269 centered_layout: false,
3270 session_id: None,
3271 window_id: Some(1),
3272 user_toolchains: Default::default(),
3273 };
3274
3275 let mut workspace_2 = SerializedWorkspace {
3276 id: WorkspaceId(2),
3277 paths: PathList::new(&["/tmp"]),
3278 location: SerializedWorkspaceLocation::Local,
3279 center_group: Default::default(),
3280 window_bounds: Default::default(),
3281 display: Default::default(),
3282 docks: Default::default(),
3283 centered_layout: false,
3284 bookmarks: Default::default(),
3285 breakpoints: Default::default(),
3286 session_id: None,
3287 window_id: Some(2),
3288 user_toolchains: Default::default(),
3289 };
3290
3291 db.save_workspace(workspace_1.clone()).await;
3292 db.save_workspace(workspace_2.clone()).await;
3293
3294 // Test that paths are treated as a set
3295 assert_eq!(
3296 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3297 workspace_1
3298 );
3299 assert_eq!(
3300 db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(),
3301 workspace_1
3302 );
3303
3304 // Make sure that other keys work
3305 assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2);
3306 assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None);
3307
3308 // Test 'mutate' case of updating a pre-existing id
3309 workspace_2.paths = PathList::new(&["/tmp", "/tmp2"]);
3310
3311 db.save_workspace(workspace_2.clone()).await;
3312 assert_eq!(
3313 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3314 workspace_2
3315 );
3316
3317 // Test other mechanism for mutating
3318 let mut workspace_3 = SerializedWorkspace {
3319 id: WorkspaceId(3),
3320 paths: PathList::new(&["/tmp2", "/tmp"]),
3321 location: SerializedWorkspaceLocation::Local,
3322 center_group: Default::default(),
3323 window_bounds: Default::default(),
3324 bookmarks: Default::default(),
3325 breakpoints: Default::default(),
3326 display: Default::default(),
3327 docks: Default::default(),
3328 centered_layout: false,
3329 session_id: None,
3330 window_id: Some(3),
3331 user_toolchains: Default::default(),
3332 };
3333
3334 db.save_workspace(workspace_3.clone()).await;
3335 assert_eq!(
3336 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3337 workspace_3
3338 );
3339
3340 // Make sure that updating paths differently also works
3341 workspace_3.paths = PathList::new(&["/tmp3", "/tmp4", "/tmp2"]);
3342 db.save_workspace(workspace_3.clone()).await;
3343 assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None);
3344 assert_eq!(
3345 db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"])
3346 .unwrap(),
3347 workspace_3
3348 );
3349 }
3350
3351 #[gpui::test]
3352 async fn test_session_workspaces() {
3353 zlog::init_test();
3354
3355 let db = WorkspaceDb::open_test_db("test_serializing_workspaces_session_id").await;
3356
3357 let workspace_1 = SerializedWorkspace {
3358 id: WorkspaceId(1),
3359 paths: PathList::new(&["/tmp1"]),
3360 location: SerializedWorkspaceLocation::Local,
3361 center_group: Default::default(),
3362 window_bounds: Default::default(),
3363 display: Default::default(),
3364 docks: Default::default(),
3365 centered_layout: false,
3366 bookmarks: Default::default(),
3367 breakpoints: Default::default(),
3368 session_id: Some("session-id-1".to_owned()),
3369 window_id: Some(10),
3370 user_toolchains: Default::default(),
3371 };
3372
3373 let workspace_2 = SerializedWorkspace {
3374 id: WorkspaceId(2),
3375 paths: PathList::new(&["/tmp2"]),
3376 location: SerializedWorkspaceLocation::Local,
3377 center_group: Default::default(),
3378 window_bounds: Default::default(),
3379 display: Default::default(),
3380 docks: Default::default(),
3381 centered_layout: false,
3382 bookmarks: Default::default(),
3383 breakpoints: Default::default(),
3384 session_id: Some("session-id-1".to_owned()),
3385 window_id: Some(20),
3386 user_toolchains: Default::default(),
3387 };
3388
3389 let workspace_3 = SerializedWorkspace {
3390 id: WorkspaceId(3),
3391 paths: PathList::new(&["/tmp3"]),
3392 location: SerializedWorkspaceLocation::Local,
3393 center_group: Default::default(),
3394 window_bounds: Default::default(),
3395 display: Default::default(),
3396 docks: Default::default(),
3397 centered_layout: false,
3398 bookmarks: Default::default(),
3399 breakpoints: Default::default(),
3400 session_id: Some("session-id-2".to_owned()),
3401 window_id: Some(30),
3402 user_toolchains: Default::default(),
3403 };
3404
3405 let workspace_4 = SerializedWorkspace {
3406 id: WorkspaceId(4),
3407 paths: PathList::new(&["/tmp4"]),
3408 location: SerializedWorkspaceLocation::Local,
3409 center_group: Default::default(),
3410 window_bounds: Default::default(),
3411 display: Default::default(),
3412 docks: Default::default(),
3413 centered_layout: false,
3414 bookmarks: Default::default(),
3415 breakpoints: Default::default(),
3416 session_id: None,
3417 window_id: None,
3418 user_toolchains: Default::default(),
3419 };
3420
3421 let connection_id = db
3422 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3423 host: "my-host".into(),
3424 port: Some(1234),
3425 ..Default::default()
3426 }))
3427 .await
3428 .unwrap();
3429
3430 let workspace_5 = SerializedWorkspace {
3431 id: WorkspaceId(5),
3432 paths: PathList::default(),
3433 location: SerializedWorkspaceLocation::Remote(
3434 db.remote_connection(connection_id).unwrap(),
3435 ),
3436 center_group: Default::default(),
3437 window_bounds: Default::default(),
3438 display: Default::default(),
3439 docks: Default::default(),
3440 centered_layout: false,
3441 bookmarks: Default::default(),
3442 breakpoints: Default::default(),
3443 session_id: Some("session-id-2".to_owned()),
3444 window_id: Some(50),
3445 user_toolchains: Default::default(),
3446 };
3447
3448 let workspace_6 = SerializedWorkspace {
3449 id: WorkspaceId(6),
3450 paths: PathList::new(&["/tmp6c", "/tmp6b", "/tmp6a"]),
3451 location: SerializedWorkspaceLocation::Local,
3452 center_group: Default::default(),
3453 window_bounds: Default::default(),
3454 bookmarks: Default::default(),
3455 breakpoints: Default::default(),
3456 display: Default::default(),
3457 docks: Default::default(),
3458 centered_layout: false,
3459 session_id: Some("session-id-3".to_owned()),
3460 window_id: Some(60),
3461 user_toolchains: Default::default(),
3462 };
3463
3464 db.save_workspace(workspace_1.clone()).await;
3465 thread::sleep(Duration::from_millis(1000)); // Force timestamps to increment
3466 db.save_workspace(workspace_2.clone()).await;
3467 db.save_workspace(workspace_3.clone()).await;
3468 thread::sleep(Duration::from_millis(1000)); // Force timestamps to increment
3469 db.save_workspace(workspace_4.clone()).await;
3470 db.save_workspace(workspace_5.clone()).await;
3471 db.save_workspace(workspace_6.clone()).await;
3472
3473 let locations = db.session_workspaces("session-id-1".to_owned()).unwrap();
3474 assert_eq!(locations.len(), 2);
3475 assert_eq!(locations[0].0, WorkspaceId(2));
3476 assert_eq!(locations[0].1, PathList::new(&["/tmp2"]));
3477 assert_eq!(locations[0].2, Some(20));
3478 assert_eq!(locations[1].0, WorkspaceId(1));
3479 assert_eq!(locations[1].1, PathList::new(&["/tmp1"]));
3480 assert_eq!(locations[1].2, Some(10));
3481
3482 let locations = db.session_workspaces("session-id-2".to_owned()).unwrap();
3483 assert_eq!(locations.len(), 2);
3484 assert_eq!(locations[0].0, WorkspaceId(5));
3485 assert_eq!(locations[0].1, PathList::default());
3486 assert_eq!(locations[0].2, Some(50));
3487 assert_eq!(locations[0].3, Some(connection_id));
3488 assert_eq!(locations[1].0, WorkspaceId(3));
3489 assert_eq!(locations[1].1, PathList::new(&["/tmp3"]));
3490 assert_eq!(locations[1].2, Some(30));
3491
3492 let locations = db.session_workspaces("session-id-3".to_owned()).unwrap();
3493 assert_eq!(locations.len(), 1);
3494 assert_eq!(locations[0].0, WorkspaceId(6));
3495 assert_eq!(
3496 locations[0].1,
3497 PathList::new(&["/tmp6c", "/tmp6b", "/tmp6a"]),
3498 );
3499 assert_eq!(locations[0].2, Some(60));
3500 }
3501
3502 fn default_workspace<P: AsRef<Path>>(
3503 paths: &[P],
3504 center_group: &SerializedPaneGroup,
3505 ) -> SerializedWorkspace {
3506 SerializedWorkspace {
3507 id: WorkspaceId(4),
3508 paths: PathList::new(paths),
3509 location: SerializedWorkspaceLocation::Local,
3510 center_group: center_group.clone(),
3511 window_bounds: Default::default(),
3512 display: Default::default(),
3513 docks: Default::default(),
3514 bookmarks: Default::default(),
3515 breakpoints: Default::default(),
3516 centered_layout: false,
3517 session_id: None,
3518 window_id: None,
3519 user_toolchains: Default::default(),
3520 }
3521 }
3522
3523 #[gpui::test]
3524 async fn test_last_session_workspace_locations(cx: &mut gpui::TestAppContext) {
3525 let dir1 = tempfile::TempDir::with_prefix("dir1").unwrap();
3526 let dir2 = tempfile::TempDir::with_prefix("dir2").unwrap();
3527 let dir3 = tempfile::TempDir::with_prefix("dir3").unwrap();
3528 let dir4 = tempfile::TempDir::with_prefix("dir4").unwrap();
3529
3530 let fs = fs::FakeFs::new(cx.executor());
3531 fs.insert_tree(dir1.path(), json!({})).await;
3532 fs.insert_tree(dir2.path(), json!({})).await;
3533 fs.insert_tree(dir3.path(), json!({})).await;
3534 fs.insert_tree(dir4.path(), json!({})).await;
3535
3536 let db =
3537 WorkspaceDb::open_test_db("test_serializing_workspaces_last_session_workspaces").await;
3538
3539 let workspaces = [
3540 (1, vec![dir1.path()], 9),
3541 (2, vec![dir2.path()], 5),
3542 (3, vec![dir3.path()], 8),
3543 (4, vec![dir4.path()], 2),
3544 (5, vec![dir1.path(), dir2.path(), dir3.path()], 3),
3545 (6, vec![dir4.path(), dir3.path(), dir2.path()], 4),
3546 ]
3547 .into_iter()
3548 .map(|(id, paths, window_id)| SerializedWorkspace {
3549 id: WorkspaceId(id),
3550 paths: PathList::new(paths.as_slice()),
3551 location: SerializedWorkspaceLocation::Local,
3552 center_group: Default::default(),
3553 window_bounds: Default::default(),
3554 display: Default::default(),
3555 docks: Default::default(),
3556 centered_layout: false,
3557 session_id: Some("one-session".to_owned()),
3558 bookmarks: Default::default(),
3559 breakpoints: Default::default(),
3560 window_id: Some(window_id),
3561 user_toolchains: Default::default(),
3562 })
3563 .collect::<Vec<_>>();
3564
3565 for workspace in workspaces.iter() {
3566 db.save_workspace(workspace.clone()).await;
3567 }
3568
3569 let stack = Some(Vec::from([
3570 WindowId::from(2), // Top
3571 WindowId::from(8),
3572 WindowId::from(5),
3573 WindowId::from(9),
3574 WindowId::from(3),
3575 WindowId::from(4), // Bottom
3576 ]));
3577
3578 let locations = db
3579 .last_session_workspace_locations("one-session", stack, fs.as_ref())
3580 .await
3581 .unwrap();
3582 assert_eq!(
3583 locations,
3584 [
3585 SessionWorkspace {
3586 workspace_id: WorkspaceId(4),
3587 location: SerializedWorkspaceLocation::Local,
3588 paths: PathList::new(&[dir4.path()]),
3589 window_id: Some(WindowId::from(2u64)),
3590 },
3591 SessionWorkspace {
3592 workspace_id: WorkspaceId(3),
3593 location: SerializedWorkspaceLocation::Local,
3594 paths: PathList::new(&[dir3.path()]),
3595 window_id: Some(WindowId::from(8u64)),
3596 },
3597 SessionWorkspace {
3598 workspace_id: WorkspaceId(2),
3599 location: SerializedWorkspaceLocation::Local,
3600 paths: PathList::new(&[dir2.path()]),
3601 window_id: Some(WindowId::from(5u64)),
3602 },
3603 SessionWorkspace {
3604 workspace_id: WorkspaceId(1),
3605 location: SerializedWorkspaceLocation::Local,
3606 paths: PathList::new(&[dir1.path()]),
3607 window_id: Some(WindowId::from(9u64)),
3608 },
3609 SessionWorkspace {
3610 workspace_id: WorkspaceId(5),
3611 location: SerializedWorkspaceLocation::Local,
3612 paths: PathList::new(&[dir1.path(), dir2.path(), dir3.path()]),
3613 window_id: Some(WindowId::from(3u64)),
3614 },
3615 SessionWorkspace {
3616 workspace_id: WorkspaceId(6),
3617 location: SerializedWorkspaceLocation::Local,
3618 paths: PathList::new(&[dir4.path(), dir3.path(), dir2.path()]),
3619 window_id: Some(WindowId::from(4u64)),
3620 },
3621 ]
3622 );
3623 }
3624
3625 fn pane_with_items(item_ids: &[ItemId]) -> SerializedPaneGroup {
3626 SerializedPaneGroup::Pane(SerializedPane::new(
3627 item_ids
3628 .iter()
3629 .map(|id| SerializedItem::new("Terminal", *id, true, false))
3630 .collect(),
3631 true,
3632 0,
3633 ))
3634 }
3635
3636 fn empty_pane_group() -> SerializedPaneGroup {
3637 SerializedPaneGroup::Pane(SerializedPane::default())
3638 }
3639
3640 fn workspace_with(
3641 id: u64,
3642 paths: &[&Path],
3643 center_group: SerializedPaneGroup,
3644 session_id: Option<&str>,
3645 ) -> SerializedWorkspace {
3646 SerializedWorkspace {
3647 id: WorkspaceId(id as i64),
3648 paths: PathList::new(paths),
3649 location: SerializedWorkspaceLocation::Local,
3650 center_group,
3651 window_bounds: Default::default(),
3652 display: Default::default(),
3653 docks: Default::default(),
3654 bookmarks: Default::default(),
3655 breakpoints: Default::default(),
3656 centered_layout: false,
3657 session_id: session_id.map(|s| s.to_owned()),
3658 window_id: Some(id),
3659 user_toolchains: Default::default(),
3660 }
3661 }
3662
3663 #[gpui::test]
3664 async fn test_scratch_only_workspace_restores_from_last_session(cx: &mut gpui::TestAppContext) {
3665 let fs = fs::FakeFs::new(cx.executor());
3666 let db =
3667 WorkspaceDb::open_test_db("test_scratch_only_workspace_restores_from_last_session")
3668 .await;
3669
3670 db.save_workspace(workspace_with(1, &[], pane_with_items(&[100]), Some("s1")))
3671 .await;
3672
3673 let sessions = db
3674 .last_session_workspace_locations("s1", None, fs.as_ref())
3675 .await
3676 .unwrap();
3677 assert_eq!(sessions.len(), 1);
3678 assert_eq!(sessions[0].workspace_id, WorkspaceId(1));
3679 assert!(sessions[0].paths.is_empty());
3680
3681 let recents = db.recent_project_workspaces(fs.as_ref()).await.unwrap();
3682 assert!(
3683 recents.iter().all(|(id, ..)| *id != WorkspaceId(1)),
3684 "scratch-only workspace must not appear in the recent-projects UI"
3685 );
3686 }
3687
3688 #[gpui::test]
3689 async fn test_gc_preserves_scratch_inside_window(cx: &mut gpui::TestAppContext) {
3690 let fs = fs::FakeFs::new(cx.executor());
3691 let db = WorkspaceDb::open_test_db("test_gc_preserves_scratch_inside_window").await;
3692
3693 db.save_workspace(workspace_with(1, &[], empty_pane_group(), None))
3694 .await;
3695
3696 db.garbage_collect_workspaces(fs.as_ref(), "current", None)
3697 .await
3698 .unwrap();
3699 assert!(
3700 db.workspace_for_id(WorkspaceId(1)).is_some(),
3701 "fresh stale workspace must not be deleted before the 7-day window"
3702 );
3703 }
3704
3705 #[gpui::test]
3706 async fn test_gc_deletes_stale_outside_window(cx: &mut gpui::TestAppContext) {
3707 let fs = fs::FakeFs::new(cx.executor());
3708 let db = WorkspaceDb::open_test_db("test_gc_deletes_stale_outside_window").await;
3709
3710 db.save_workspace(workspace_with(1, &[], empty_pane_group(), None))
3711 .await;
3712 db.set_timestamp_for_tests(WorkspaceId(1), "2000-01-01 00:00:00".to_owned())
3713 .await
3714 .unwrap();
3715
3716 db.garbage_collect_workspaces(fs.as_ref(), "current", None)
3717 .await
3718 .unwrap();
3719 assert!(
3720 db.workspace_for_id(WorkspaceId(1)).is_none(),
3721 "stale empty workspace older than the retention window must be deleted"
3722 );
3723 }
3724
3725 #[gpui::test]
3726 async fn test_gc_preserves_directory_workspace_with_missing_path(
3727 cx: &mut gpui::TestAppContext,
3728 ) {
3729 let fs = fs::FakeFs::new(cx.executor());
3730 let db =
3731 WorkspaceDb::open_test_db("test_gc_preserves_directory_workspace_with_missing_path")
3732 .await;
3733
3734 let missing_dir = PathBuf::from("/missing-project-dir");
3735 db.save_workspace(workspace_with(
3736 1,
3737 &[missing_dir.as_path()],
3738 empty_pane_group(),
3739 None,
3740 ))
3741 .await;
3742
3743 db.garbage_collect_workspaces(fs.as_ref(), "current", None)
3744 .await
3745 .unwrap();
3746 assert!(
3747 db.workspace_for_id(WorkspaceId(1)).is_some(),
3748 "a stale workspace within the retention window must be kept"
3749 );
3750
3751 db.set_timestamp_for_tests(WorkspaceId(1), "2000-01-01 00:00:00".to_owned())
3752 .await
3753 .unwrap();
3754 db.garbage_collect_workspaces(fs.as_ref(), "current", None)
3755 .await
3756 .unwrap();
3757 assert!(
3758 db.workspace_for_id(WorkspaceId(1)).is_none(),
3759 "a stale workspace past the retention window must be deleted"
3760 );
3761 }
3762
3763 #[gpui::test]
3764 async fn test_gc_preserves_current_and_last_sessions(cx: &mut gpui::TestAppContext) {
3765 let fs = fs::FakeFs::new(cx.executor());
3766 let db = WorkspaceDb::open_test_db("test_gc_preserves_current_and_last_sessions").await;
3767
3768 db.save_workspace(workspace_with(1, &[], empty_pane_group(), Some("current")))
3769 .await;
3770 db.save_workspace(workspace_with(2, &[], empty_pane_group(), Some("last")))
3771 .await;
3772 db.save_workspace(workspace_with(3, &[], empty_pane_group(), Some("stale")))
3773 .await;
3774
3775 for id in [1, 2, 3] {
3776 db.set_timestamp_for_tests(WorkspaceId(id), "2000-01-01 00:00:00".to_owned())
3777 .await
3778 .unwrap();
3779 }
3780
3781 db.garbage_collect_workspaces(fs.as_ref(), "current", Some("last"))
3782 .await
3783 .unwrap();
3784
3785 assert!(
3786 db.workspace_for_id(WorkspaceId(1)).is_some(),
3787 "GC must not delete workspaces belonging to the current session"
3788 );
3789 assert!(
3790 db.workspace_for_id(WorkspaceId(2)).is_some(),
3791 "GC must not delete workspaces belonging to the last session"
3792 );
3793 assert!(
3794 db.workspace_for_id(WorkspaceId(3)).is_none(),
3795 "GC should still delete stale workspaces from other sessions"
3796 );
3797 }
3798
3799 #[gpui::test]
3800 async fn test_gc_deletes_empty_workspace_with_items(cx: &mut gpui::TestAppContext) {
3801 let fs = fs::FakeFs::new(cx.executor());
3802 let db = WorkspaceDb::open_test_db("test_gc_deletes_empty_workspace_with_items").await;
3803
3804 db.save_workspace(workspace_with(1, &[], pane_with_items(&[100]), None))
3805 .await;
3806 db.set_timestamp_for_tests(WorkspaceId(1), "2000-01-01 00:00:00".to_owned())
3807 .await
3808 .unwrap();
3809
3810 db.garbage_collect_workspaces(fs.as_ref(), "current", None)
3811 .await
3812 .unwrap();
3813 assert!(
3814 db.workspace_for_id(WorkspaceId(1)).is_none(),
3815 "a stale empty-path workspace must be deleted regardless of its items"
3816 );
3817 }
3818
3819 #[gpui::test]
3820 async fn test_last_session_restores_workspace_with_missing_paths(
3821 cx: &mut gpui::TestAppContext,
3822 ) {
3823 let fs = fs::FakeFs::new(cx.executor());
3824 let db =
3825 WorkspaceDb::open_test_db("test_last_session_restores_workspace_with_missing_paths")
3826 .await;
3827
3828 let missing = PathBuf::from("/gone/file.rs");
3829 db.save_workspace(workspace_with(
3830 1,
3831 &[missing.as_path()],
3832 empty_pane_group(),
3833 Some("s"),
3834 ))
3835 .await;
3836
3837 let sessions = db
3838 .last_session_workspace_locations("s", None, fs.as_ref())
3839 .await
3840 .unwrap();
3841 assert!(
3842 sessions.is_empty(),
3843 "workspaces whose paths no longer exist on disk must not restore"
3844 );
3845 }
3846
3847 #[gpui::test]
3848 async fn test_last_session_workspace_locations_remote(cx: &mut gpui::TestAppContext) {
3849 let fs = fs::FakeFs::new(cx.executor());
3850 let db =
3851 WorkspaceDb::open_test_db("test_serializing_workspaces_last_session_workspaces_remote")
3852 .await;
3853
3854 let remote_connections = [
3855 ("host-1", "my-user-1"),
3856 ("host-2", "my-user-2"),
3857 ("host-3", "my-user-3"),
3858 ("host-4", "my-user-4"),
3859 ]
3860 .into_iter()
3861 .map(|(host, user)| async {
3862 let options = RemoteConnectionOptions::Ssh(SshConnectionOptions {
3863 host: host.into(),
3864 username: Some(user.to_string()),
3865 ..Default::default()
3866 });
3867 db.get_or_create_remote_connection(options.clone())
3868 .await
3869 .unwrap();
3870 options
3871 })
3872 .collect::<Vec<_>>();
3873
3874 let remote_connections = futures::future::join_all(remote_connections).await;
3875
3876 let workspaces = [
3877 (1, remote_connections[0].clone(), 9),
3878 (2, remote_connections[1].clone(), 5),
3879 (3, remote_connections[2].clone(), 8),
3880 (4, remote_connections[3].clone(), 2),
3881 ]
3882 .into_iter()
3883 .map(|(id, remote_connection, window_id)| SerializedWorkspace {
3884 id: WorkspaceId(id),
3885 paths: PathList::default(),
3886 location: SerializedWorkspaceLocation::Remote(remote_connection),
3887 center_group: Default::default(),
3888 window_bounds: Default::default(),
3889 display: Default::default(),
3890 docks: Default::default(),
3891 centered_layout: false,
3892 session_id: Some("one-session".to_owned()),
3893 bookmarks: Default::default(),
3894 breakpoints: Default::default(),
3895 window_id: Some(window_id),
3896 user_toolchains: Default::default(),
3897 })
3898 .collect::<Vec<_>>();
3899
3900 for workspace in workspaces.iter() {
3901 db.save_workspace(workspace.clone()).await;
3902 }
3903
3904 let stack = Some(Vec::from([
3905 WindowId::from(2), // Top
3906 WindowId::from(8),
3907 WindowId::from(5),
3908 WindowId::from(9), // Bottom
3909 ]));
3910
3911 let have = db
3912 .last_session_workspace_locations("one-session", stack, fs.as_ref())
3913 .await
3914 .unwrap();
3915 assert_eq!(have.len(), 4);
3916 assert_eq!(
3917 have[0],
3918 SessionWorkspace {
3919 workspace_id: WorkspaceId(4),
3920 location: SerializedWorkspaceLocation::Remote(remote_connections[3].clone()),
3921 paths: PathList::default(),
3922 window_id: Some(WindowId::from(2u64)),
3923 }
3924 );
3925 assert_eq!(
3926 have[1],
3927 SessionWorkspace {
3928 workspace_id: WorkspaceId(3),
3929 location: SerializedWorkspaceLocation::Remote(remote_connections[2].clone()),
3930 paths: PathList::default(),
3931 window_id: Some(WindowId::from(8u64)),
3932 }
3933 );
3934 assert_eq!(
3935 have[2],
3936 SessionWorkspace {
3937 workspace_id: WorkspaceId(2),
3938 location: SerializedWorkspaceLocation::Remote(remote_connections[1].clone()),
3939 paths: PathList::default(),
3940 window_id: Some(WindowId::from(5u64)),
3941 }
3942 );
3943 assert_eq!(
3944 have[3],
3945 SessionWorkspace {
3946 workspace_id: WorkspaceId(1),
3947 location: SerializedWorkspaceLocation::Remote(remote_connections[0].clone()),
3948 paths: PathList::default(),
3949 window_id: Some(WindowId::from(9u64)),
3950 }
3951 );
3952 }
3953
3954 #[gpui::test]
3955 async fn test_get_or_create_ssh_project() {
3956 let db = WorkspaceDb::open_test_db("test_get_or_create_ssh_project").await;
3957
3958 let host = "example.com".to_string();
3959 let port = Some(22_u16);
3960 let user = Some("user".to_string());
3961
3962 let connection_id = db
3963 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3964 host: host.clone().into(),
3965 port,
3966 username: user.clone(),
3967 ..Default::default()
3968 }))
3969 .await
3970 .unwrap();
3971
3972 // Test that calling the function again with the same parameters returns the same project
3973 let same_connection = db
3974 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3975 host: host.clone().into(),
3976 port,
3977 username: user.clone(),
3978 ..Default::default()
3979 }))
3980 .await
3981 .unwrap();
3982
3983 assert_eq!(connection_id, same_connection);
3984
3985 // Test with different parameters
3986 let host2 = "otherexample.com".to_string();
3987 let port2 = None;
3988 let user2 = Some("otheruser".to_string());
3989
3990 let different_connection = db
3991 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3992 host: host2.clone().into(),
3993 port: port2,
3994 username: user2.clone(),
3995 ..Default::default()
3996 }))
3997 .await
3998 .unwrap();
3999
4000 assert_ne!(connection_id, different_connection);
4001 }
4002
4003 #[gpui::test]
4004 async fn test_get_or_create_ssh_project_with_null_user() {
4005 let db = WorkspaceDb::open_test_db("test_get_or_create_ssh_project_with_null_user").await;
4006
4007 let (host, port, user) = ("example.com".to_string(), None, None);
4008
4009 let connection_id = db
4010 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
4011 host: host.clone().into(),
4012 port,
4013 username: None,
4014 ..Default::default()
4015 }))
4016 .await
4017 .unwrap();
4018
4019 let same_connection_id = db
4020 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
4021 host: host.clone().into(),
4022 port,
4023 username: user.clone(),
4024 ..Default::default()
4025 }))
4026 .await
4027 .unwrap();
4028
4029 assert_eq!(connection_id, same_connection_id);
4030 }
4031
4032 #[gpui::test]
4033 async fn test_get_remote_connections() {
4034 let db = WorkspaceDb::open_test_db("test_get_remote_connections").await;
4035
4036 let connections = [
4037 ("example.com".to_string(), None, None),
4038 (
4039 "anotherexample.com".to_string(),
4040 Some(123_u16),
4041 Some("user2".to_string()),
4042 ),
4043 ("yetanother.com".to_string(), Some(345_u16), None),
4044 ];
4045
4046 let mut ids = Vec::new();
4047 for (host, port, user) in connections.iter() {
4048 ids.push(
4049 db.get_or_create_remote_connection(RemoteConnectionOptions::Ssh(
4050 SshConnectionOptions {
4051 host: host.clone().into(),
4052 port: *port,
4053 username: user.clone(),
4054 ..Default::default()
4055 },
4056 ))
4057 .await
4058 .unwrap(),
4059 );
4060 }
4061
4062 let stored_connections = db.remote_connections().unwrap();
4063 assert_eq!(
4064 stored_connections,
4065 [
4066 (
4067 ids[0],
4068 RemoteConnectionOptions::Ssh(SshConnectionOptions {
4069 host: "example.com".into(),
4070 port: None,
4071 username: None,
4072 ..Default::default()
4073 }),
4074 ),
4075 (
4076 ids[1],
4077 RemoteConnectionOptions::Ssh(SshConnectionOptions {
4078 host: "anotherexample.com".into(),
4079 port: Some(123),
4080 username: Some("user2".into()),
4081 ..Default::default()
4082 }),
4083 ),
4084 (
4085 ids[2],
4086 RemoteConnectionOptions::Ssh(SshConnectionOptions {
4087 host: "yetanother.com".into(),
4088 port: Some(345),
4089 username: None,
4090 ..Default::default()
4091 }),
4092 ),
4093 ]
4094 .into_iter()
4095 .collect::<HashMap<_, _>>(),
4096 );
4097 }
4098
4099 #[gpui::test]
4100 async fn test_simple_split() {
4101 zlog::init_test();
4102
4103 let db = WorkspaceDb::open_test_db("simple_split").await;
4104
4105 // -----------------
4106 // | 1,2 | 5,6 |
4107 // | - - - | |
4108 // | 3,4 | |
4109 // -----------------
4110 let center_pane = group(
4111 Axis::Horizontal,
4112 vec![
4113 group(
4114 Axis::Vertical,
4115 vec![
4116 SerializedPaneGroup::Pane(SerializedPane::new(
4117 vec![
4118 SerializedItem::new("Terminal", 1, false, false),
4119 SerializedItem::new("Terminal", 2, true, false),
4120 ],
4121 false,
4122 0,
4123 )),
4124 SerializedPaneGroup::Pane(SerializedPane::new(
4125 vec![
4126 SerializedItem::new("Terminal", 4, false, false),
4127 SerializedItem::new("Terminal", 3, true, false),
4128 ],
4129 true,
4130 0,
4131 )),
4132 ],
4133 ),
4134 SerializedPaneGroup::Pane(SerializedPane::new(
4135 vec![
4136 SerializedItem::new("Terminal", 5, true, false),
4137 SerializedItem::new("Terminal", 6, false, false),
4138 ],
4139 false,
4140 0,
4141 )),
4142 ],
4143 );
4144
4145 let workspace = default_workspace(&["/tmp"], ¢er_pane);
4146
4147 db.save_workspace(workspace.clone()).await;
4148
4149 let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap();
4150
4151 assert_eq!(workspace.center_group, new_workspace.center_group);
4152 }
4153
4154 #[gpui::test]
4155 async fn test_cleanup_panes() {
4156 zlog::init_test();
4157
4158 let db = WorkspaceDb::open_test_db("test_cleanup_panes").await;
4159
4160 let center_pane = group(
4161 Axis::Horizontal,
4162 vec![
4163 group(
4164 Axis::Vertical,
4165 vec![
4166 SerializedPaneGroup::Pane(SerializedPane::new(
4167 vec![
4168 SerializedItem::new("Terminal", 1, false, false),
4169 SerializedItem::new("Terminal", 2, true, false),
4170 ],
4171 false,
4172 0,
4173 )),
4174 SerializedPaneGroup::Pane(SerializedPane::new(
4175 vec![
4176 SerializedItem::new("Terminal", 4, false, false),
4177 SerializedItem::new("Terminal", 3, true, false),
4178 ],
4179 true,
4180 0,
4181 )),
4182 ],
4183 ),
4184 SerializedPaneGroup::Pane(SerializedPane::new(
4185 vec![
4186 SerializedItem::new("Terminal", 5, false, false),
4187 SerializedItem::new("Terminal", 6, true, false),
4188 ],
4189 false,
4190 0,
4191 )),
4192 ],
4193 );
4194
4195 let id = &["/tmp"];
4196
4197 let mut workspace = default_workspace(id, ¢er_pane);
4198
4199 db.save_workspace(workspace.clone()).await;
4200
4201 workspace.center_group = group(
4202 Axis::Vertical,
4203 vec![
4204 SerializedPaneGroup::Pane(SerializedPane::new(
4205 vec![
4206 SerializedItem::new("Terminal", 1, false, false),
4207 SerializedItem::new("Terminal", 2, true, false),
4208 ],
4209 false,
4210 0,
4211 )),
4212 SerializedPaneGroup::Pane(SerializedPane::new(
4213 vec![
4214 SerializedItem::new("Terminal", 4, true, false),
4215 SerializedItem::new("Terminal", 3, false, false),
4216 ],
4217 true,
4218 0,
4219 )),
4220 ],
4221 );
4222
4223 db.save_workspace(workspace.clone()).await;
4224
4225 let new_workspace = db.workspace_for_roots(id).unwrap();
4226
4227 assert_eq!(workspace.center_group, new_workspace.center_group);
4228 }
4229
4230 #[gpui::test]
4231 async fn test_empty_workspace_window_bounds() {
4232 zlog::init_test();
4233
4234 let db = WorkspaceDb::open_test_db("test_empty_workspace_window_bounds").await;
4235 let id = db.next_id().await.unwrap();
4236
4237 // Create a workspace with empty paths (empty workspace)
4238 let empty_paths: &[&str] = &[];
4239 let display_uuid = Uuid::new_v4();
4240 let window_bounds = SerializedWindowBounds(WindowBounds::Windowed(Bounds {
4241 origin: point(px(100.0), px(200.0)),
4242 size: size(px(800.0), px(600.0)),
4243 }));
4244
4245 let workspace = SerializedWorkspace {
4246 id,
4247 paths: PathList::new(empty_paths),
4248 location: SerializedWorkspaceLocation::Local,
4249 center_group: Default::default(),
4250 window_bounds: None,
4251 display: None,
4252 docks: Default::default(),
4253 bookmarks: Default::default(),
4254 breakpoints: Default::default(),
4255 centered_layout: false,
4256 session_id: None,
4257 window_id: None,
4258 user_toolchains: Default::default(),
4259 };
4260
4261 // Save the workspace (this creates the record with empty paths)
4262 db.save_workspace(workspace.clone()).await;
4263
4264 // Save window bounds separately (as the actual code does via set_window_open_status)
4265 db.set_window_open_status(id, window_bounds, display_uuid)
4266 .await
4267 .unwrap();
4268
4269 // Empty workspaces cannot be retrieved by paths (they'd all match).
4270 // They must be retrieved by workspace_id.
4271 assert!(db.workspace_for_roots(empty_paths).is_none());
4272
4273 // Retrieve using workspace_for_id instead
4274 let retrieved = db.workspace_for_id(id).unwrap();
4275
4276 // Verify window bounds were persisted
4277 assert_eq!(retrieved.id, id);
4278 assert!(retrieved.window_bounds.is_some());
4279 assert_eq!(retrieved.window_bounds.unwrap().0, window_bounds.0);
4280 assert!(retrieved.display.is_some());
4281 assert_eq!(retrieved.display.unwrap(), display_uuid);
4282 }
4283
4284 #[gpui::test]
4285 async fn test_last_session_workspace_locations_groups_by_window_id(
4286 cx: &mut gpui::TestAppContext,
4287 ) {
4288 let dir1 = tempfile::TempDir::with_prefix("dir1").unwrap();
4289 let dir2 = tempfile::TempDir::with_prefix("dir2").unwrap();
4290 let dir3 = tempfile::TempDir::with_prefix("dir3").unwrap();
4291 let dir4 = tempfile::TempDir::with_prefix("dir4").unwrap();
4292 let dir5 = tempfile::TempDir::with_prefix("dir5").unwrap();
4293
4294 let fs = fs::FakeFs::new(cx.executor());
4295 fs.insert_tree(dir1.path(), json!({})).await;
4296 fs.insert_tree(dir2.path(), json!({})).await;
4297 fs.insert_tree(dir3.path(), json!({})).await;
4298 fs.insert_tree(dir4.path(), json!({})).await;
4299 fs.insert_tree(dir5.path(), json!({})).await;
4300
4301 let db =
4302 WorkspaceDb::open_test_db("test_last_session_workspace_locations_groups_by_window_id")
4303 .await;
4304
4305 // Simulate two MultiWorkspace windows each containing two workspaces,
4306 // plus one single-workspace window:
4307 // Window 10: workspace 1, workspace 2
4308 // Window 20: workspace 3, workspace 4
4309 // Window 30: workspace 5 (only one)
4310 //
4311 // On session restore, the caller should be able to group these by
4312 // window_id to reconstruct the MultiWorkspace windows.
4313 let workspaces_data: Vec<(i64, &Path, u64)> = vec![
4314 (1, dir1.path(), 10),
4315 (2, dir2.path(), 10),
4316 (3, dir3.path(), 20),
4317 (4, dir4.path(), 20),
4318 (5, dir5.path(), 30),
4319 ];
4320
4321 for (id, dir, window_id) in &workspaces_data {
4322 db.save_workspace(SerializedWorkspace {
4323 id: WorkspaceId(*id),
4324 paths: PathList::new(&[*dir]),
4325 location: SerializedWorkspaceLocation::Local,
4326 center_group: Default::default(),
4327 window_bounds: Default::default(),
4328 display: Default::default(),
4329 docks: Default::default(),
4330 centered_layout: false,
4331 session_id: Some("test-session".to_owned()),
4332 bookmarks: Default::default(),
4333 breakpoints: Default::default(),
4334 window_id: Some(*window_id),
4335 user_toolchains: Default::default(),
4336 })
4337 .await;
4338 }
4339
4340 let locations = db
4341 .last_session_workspace_locations("test-session", None, fs.as_ref())
4342 .await
4343 .unwrap();
4344
4345 // All 5 workspaces should be returned with their window_ids.
4346 assert_eq!(locations.len(), 5);
4347
4348 // Every entry should have a window_id so the caller can group them.
4349 for session_workspace in &locations {
4350 assert!(
4351 session_workspace.window_id.is_some(),
4352 "workspace {:?} missing window_id",
4353 session_workspace.workspace_id
4354 );
4355 }
4356
4357 // Group by window_id, simulating what the restoration code should do.
4358 let mut by_window: HashMap<WindowId, Vec<WorkspaceId>> = HashMap::default();
4359 for session_workspace in &locations {
4360 if let Some(window_id) = session_workspace.window_id {
4361 by_window
4362 .entry(window_id)
4363 .or_default()
4364 .push(session_workspace.workspace_id);
4365 }
4366 }
4367
4368 // Should produce 3 windows, not 5.
4369 assert_eq!(
4370 by_window.len(),
4371 3,
4372 "Expected 3 window groups, got {}: {:?}",
4373 by_window.len(),
4374 by_window
4375 );
4376
4377 // Window 10 should contain workspaces 1 and 2.
4378 let window_10 = by_window.get(&WindowId::from(10u64)).unwrap();
4379 assert_eq!(window_10.len(), 2);
4380 assert!(window_10.contains(&WorkspaceId(1)));
4381 assert!(window_10.contains(&WorkspaceId(2)));
4382
4383 // Window 20 should contain workspaces 3 and 4.
4384 let window_20 = by_window.get(&WindowId::from(20u64)).unwrap();
4385 assert_eq!(window_20.len(), 2);
4386 assert!(window_20.contains(&WorkspaceId(3)));
4387 assert!(window_20.contains(&WorkspaceId(4)));
4388
4389 // Window 30 should contain only workspace 5.
4390 let window_30 = by_window.get(&WindowId::from(30u64)).unwrap();
4391 assert_eq!(window_30.len(), 1);
4392 assert!(window_30.contains(&WorkspaceId(5)));
4393 }
4394
4395 #[gpui::test]
4396 async fn test_read_serialized_multi_workspaces_with_state(cx: &mut gpui::TestAppContext) {
4397 use crate::persistence::model::MultiWorkspaceState;
4398
4399 // Write multi-workspace state for two windows via the scoped KVP.
4400 let window_10 = WindowId::from(10u64);
4401 let window_20 = WindowId::from(20u64);
4402
4403 let kvp = cx.update(|cx| KeyValueStore::global(cx));
4404
4405 write_multi_workspace_state(
4406 &kvp,
4407 window_10,
4408 MultiWorkspaceState {
4409 active_workspace_id: Some(WorkspaceId(2)),
4410 project_groups: vec![],
4411 sidebar_open: true,
4412 sidebar_state: None,
4413 },
4414 )
4415 .await;
4416
4417 write_multi_workspace_state(
4418 &kvp,
4419 window_20,
4420 MultiWorkspaceState {
4421 active_workspace_id: Some(WorkspaceId(3)),
4422 project_groups: vec![],
4423 sidebar_open: false,
4424 sidebar_state: None,
4425 },
4426 )
4427 .await;
4428
4429 // Build session workspaces: two in window 10, one in window 20, one with no window.
4430 let session_workspaces = vec![
4431 SessionWorkspace {
4432 workspace_id: WorkspaceId(1),
4433 location: SerializedWorkspaceLocation::Local,
4434 paths: PathList::new(&["/a"]),
4435 window_id: Some(window_10),
4436 },
4437 SessionWorkspace {
4438 workspace_id: WorkspaceId(2),
4439 location: SerializedWorkspaceLocation::Local,
4440 paths: PathList::new(&["/b"]),
4441 window_id: Some(window_10),
4442 },
4443 SessionWorkspace {
4444 workspace_id: WorkspaceId(3),
4445 location: SerializedWorkspaceLocation::Local,
4446 paths: PathList::new(&["/c"]),
4447 window_id: Some(window_20),
4448 },
4449 SessionWorkspace {
4450 workspace_id: WorkspaceId(4),
4451 location: SerializedWorkspaceLocation::Local,
4452 paths: PathList::new(&["/d"]),
4453 window_id: None,
4454 },
4455 ];
4456
4457 let results = cx.update(|cx| read_serialized_multi_workspaces(session_workspaces, cx));
4458
4459 // Should produce 3 results: window 10, window 20, and the orphan.
4460 assert_eq!(results.len(), 3);
4461
4462 // Window 10: active_workspace_id = 2 picks workspace 2 (paths /b), sidebar open.
4463 let group_10 = &results[0];
4464 assert_eq!(group_10.active_workspace.workspace_id, WorkspaceId(2));
4465 assert_eq!(group_10.state.active_workspace_id, Some(WorkspaceId(2)));
4466 assert_eq!(group_10.state.sidebar_open, true);
4467
4468 // Window 20: active_workspace_id = 3 picks workspace 3 (paths /c), sidebar closed.
4469 let group_20 = &results[1];
4470 assert_eq!(group_20.active_workspace.workspace_id, WorkspaceId(3));
4471 assert_eq!(group_20.state.active_workspace_id, Some(WorkspaceId(3)));
4472 assert_eq!(group_20.state.sidebar_open, false);
4473
4474 // Orphan: no active_workspace_id, falls back to first workspace (id 4).
4475 let group_none = &results[2];
4476 assert_eq!(group_none.active_workspace.workspace_id, WorkspaceId(4));
4477 assert_eq!(group_none.state.active_workspace_id, None);
4478 assert_eq!(group_none.state.sidebar_open, false);
4479 }
4480
4481 #[gpui::test]
4482 async fn test_flush_serialization_completes_before_quit(cx: &mut gpui::TestAppContext) {
4483 crate::tests::init_test(cx);
4484
4485 let fs = fs::FakeFs::new(cx.executor());
4486 let project = Project::test(fs.clone(), [], cx).await;
4487
4488 let (multi_workspace, cx) =
4489 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4490
4491 let workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone());
4492
4493 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4494
4495 // Assign a database_id so serialization will actually persist.
4496 let workspace_id = db.next_id().await.unwrap();
4497 workspace.update(cx, |ws, _cx| {
4498 ws.set_database_id(workspace_id);
4499 });
4500
4501 // Mutate some workspace state.
4502 db.set_centered_layout(workspace_id, true).await.unwrap();
4503
4504 // Call flush_serialization and await the returned task directly
4505 // (without run_until_parked — the point is that awaiting the task
4506 // alone is sufficient).
4507 let task = multi_workspace.update_in(cx, |mw, window, cx| {
4508 mw.workspace()
4509 .update(cx, |ws, cx| ws.flush_serialization(window, cx))
4510 });
4511 task.await;
4512
4513 // Read the workspace back from the DB and verify serialization happened.
4514 let serialized = db.workspace_for_id(workspace_id);
4515 assert!(
4516 serialized.is_some(),
4517 "flush_serialization should have persisted the workspace to DB"
4518 );
4519 }
4520
4521 #[gpui::test]
4522 async fn test_create_workspace_serialization(cx: &mut gpui::TestAppContext) {
4523 crate::tests::init_test(cx);
4524
4525 let fs = fs::FakeFs::new(cx.executor());
4526 let project = Project::test(fs.clone(), [], cx).await;
4527
4528 let (multi_workspace, cx) =
4529 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4530
4531 // Give the first workspace a database_id.
4532 multi_workspace.update_in(cx, |mw, _, cx| {
4533 mw.set_random_database_id(cx);
4534 });
4535
4536 let window_id =
4537 multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
4538
4539 // Create a new workspace via the MultiWorkspace API (triggers next_id()).
4540 multi_workspace.update_in(cx, |mw, window, cx| {
4541 mw.create_test_workspace(window, cx).detach();
4542 });
4543
4544 // Let the async next_id() and re-serialization tasks complete.
4545 cx.run_until_parked();
4546
4547 // The new workspace should now have a database_id.
4548 let new_workspace_db_id =
4549 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
4550 assert!(
4551 new_workspace_db_id.is_some(),
4552 "New workspace should have a database_id after run_until_parked"
4553 );
4554
4555 // The multi-workspace state should record it as the active workspace.
4556 let state = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
4557 assert_eq!(
4558 state.active_workspace_id, new_workspace_db_id,
4559 "Serialized active_workspace_id should match the new workspace's database_id"
4560 );
4561
4562 // The individual workspace row should exist with real data
4563 // (not just the bare DEFAULT VALUES row from next_id).
4564 let workspace_id = new_workspace_db_id.unwrap();
4565 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4566 let serialized = db.workspace_for_id(workspace_id);
4567 assert!(
4568 serialized.is_some(),
4569 "Newly created workspace should be fully serialized in the DB after database_id assignment"
4570 );
4571 }
4572
4573 #[gpui::test]
4574 async fn test_remove_workspace_clears_session_binding(cx: &mut gpui::TestAppContext) {
4575 crate::tests::init_test(cx);
4576
4577 let fs = fs::FakeFs::new(cx.executor());
4578 let dir = unique_test_dir(&fs, "remove").await;
4579 let project1 = Project::test(fs.clone(), [], cx).await;
4580 let project2 = Project::test(fs.clone(), [], cx).await;
4581
4582 let (multi_workspace, cx) =
4583 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4584
4585 multi_workspace.update(cx, |mw, cx| {
4586 mw.open_sidebar(cx);
4587 });
4588
4589 multi_workspace.update_in(cx, |mw, _, cx| {
4590 mw.set_random_database_id(cx);
4591 });
4592
4593 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4594
4595 // Get a real DB id for workspace2 so the row actually exists.
4596 let workspace2_db_id = db.next_id().await.unwrap();
4597
4598 multi_workspace.update_in(cx, |mw, window, cx| {
4599 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4600 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4601 ws.set_database_id(workspace2_db_id)
4602 });
4603 mw.add(workspace.clone(), window, cx);
4604 });
4605
4606 // Save a full workspace row to the DB directly.
4607 let session_id = format!("remove-test-session-{}", Uuid::new_v4());
4608 db.save_workspace(SerializedWorkspace {
4609 id: workspace2_db_id,
4610 paths: PathList::new(&[&dir]),
4611 location: SerializedWorkspaceLocation::Local,
4612 center_group: Default::default(),
4613 window_bounds: Default::default(),
4614 display: Default::default(),
4615 docks: Default::default(),
4616 centered_layout: false,
4617 session_id: Some(session_id.clone()),
4618 bookmarks: Default::default(),
4619 breakpoints: Default::default(),
4620 window_id: Some(99),
4621 user_toolchains: Default::default(),
4622 })
4623 .await;
4624
4625 assert!(
4626 db.workspace_for_id(workspace2_db_id).is_some(),
4627 "Workspace2 should exist in DB before removal"
4628 );
4629
4630 // Remove workspace at index 1 (the second workspace).
4631 multi_workspace.update_in(cx, |mw, window, cx| {
4632 let ws = mw.workspaces().nth(1).unwrap().clone();
4633 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4634 .detach_and_log_err(cx);
4635 });
4636
4637 cx.run_until_parked();
4638
4639 // The row should still exist so it continues to appear in recent
4640 // projects, but the session binding should be cleared so it is not
4641 // restored as part of any future session.
4642 assert!(
4643 db.workspace_for_id(workspace2_db_id).is_some(),
4644 "Removed workspace's DB row should be preserved for recent projects"
4645 );
4646
4647 let session_workspaces = db
4648 .last_session_workspace_locations("remove-test-session", None, fs.as_ref())
4649 .await
4650 .unwrap();
4651 let restored_ids: Vec<WorkspaceId> = session_workspaces
4652 .iter()
4653 .map(|sw| sw.workspace_id)
4654 .collect();
4655 assert!(
4656 !restored_ids.contains(&workspace2_db_id),
4657 "Removed workspace should not appear in session restoration"
4658 );
4659 }
4660
4661 #[gpui::test]
4662 async fn test_remove_workspace_not_restored_as_zombie(cx: &mut gpui::TestAppContext) {
4663 crate::tests::init_test(cx);
4664
4665 let fs = fs::FakeFs::new(cx.executor());
4666 let dir1 = tempfile::TempDir::with_prefix("zombie_test1").unwrap();
4667 let dir2 = tempfile::TempDir::with_prefix("zombie_test2").unwrap();
4668 fs.insert_tree(dir1.path(), json!({})).await;
4669 fs.insert_tree(dir2.path(), json!({})).await;
4670
4671 let project1 = Project::test(fs.clone(), [], cx).await;
4672 let project2 = Project::test(fs.clone(), [], cx).await;
4673
4674 let db = cx.update(|cx| WorkspaceDb::global(cx));
4675
4676 // Get real DB ids so the rows actually exist.
4677 let ws1_id = db.next_id().await.unwrap();
4678 let ws2_id = db.next_id().await.unwrap();
4679
4680 let (multi_workspace, cx) =
4681 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4682
4683 multi_workspace.update(cx, |mw, cx| {
4684 mw.open_sidebar(cx);
4685 });
4686
4687 multi_workspace.update_in(cx, |mw, _, cx| {
4688 mw.workspace().update(cx, |ws, _cx| {
4689 ws.set_database_id(ws1_id);
4690 });
4691 });
4692
4693 multi_workspace.update_in(cx, |mw, window, cx| {
4694 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4695 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4696 ws.set_database_id(ws2_id)
4697 });
4698 mw.add(workspace.clone(), window, cx);
4699 });
4700
4701 let session_id = "test-zombie-session";
4702 let window_id_val: u64 = 42;
4703
4704 db.save_workspace(SerializedWorkspace {
4705 id: ws1_id,
4706 paths: PathList::new(&[dir1.path()]),
4707 location: SerializedWorkspaceLocation::Local,
4708 center_group: Default::default(),
4709 window_bounds: Default::default(),
4710 display: Default::default(),
4711 docks: Default::default(),
4712 centered_layout: false,
4713 session_id: Some(session_id.to_owned()),
4714 bookmarks: Default::default(),
4715 breakpoints: Default::default(),
4716 window_id: Some(window_id_val),
4717 user_toolchains: Default::default(),
4718 })
4719 .await;
4720
4721 db.save_workspace(SerializedWorkspace {
4722 id: ws2_id,
4723 paths: PathList::new(&[dir2.path()]),
4724 location: SerializedWorkspaceLocation::Local,
4725 center_group: Default::default(),
4726 window_bounds: Default::default(),
4727 display: Default::default(),
4728 docks: Default::default(),
4729 centered_layout: false,
4730 session_id: Some(session_id.to_owned()),
4731 bookmarks: Default::default(),
4732 breakpoints: Default::default(),
4733 window_id: Some(window_id_val),
4734 user_toolchains: Default::default(),
4735 })
4736 .await;
4737
4738 // Remove workspace2 (index 1).
4739 multi_workspace.update_in(cx, |mw, window, cx| {
4740 let ws = mw.workspaces().nth(1).unwrap().clone();
4741 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4742 .detach_and_log_err(cx);
4743 });
4744
4745 cx.run_until_parked();
4746
4747 // The removed workspace should NOT appear in session restoration.
4748 let locations = db
4749 .last_session_workspace_locations(session_id, None, fs.as_ref())
4750 .await
4751 .unwrap();
4752
4753 let restored_ids: Vec<WorkspaceId> = locations.iter().map(|sw| sw.workspace_id).collect();
4754 assert!(
4755 !restored_ids.contains(&ws2_id),
4756 "Removed workspace should not appear in session restoration list. Found: {:?}",
4757 restored_ids
4758 );
4759 assert!(
4760 restored_ids.contains(&ws1_id),
4761 "Remaining workspace should still appear in session restoration list"
4762 );
4763 }
4764
4765 #[gpui::test]
4766 async fn test_pending_removal_tasks_drained_on_flush(cx: &mut gpui::TestAppContext) {
4767 crate::tests::init_test(cx);
4768
4769 let fs = fs::FakeFs::new(cx.executor());
4770 let dir = unique_test_dir(&fs, "pending-removal").await;
4771 let project1 = Project::test(fs.clone(), [], cx).await;
4772 let project2 = Project::test(fs.clone(), [], cx).await;
4773
4774 let db = cx.update(|cx| WorkspaceDb::global(cx));
4775
4776 // Get a real DB id for workspace2 so the row actually exists.
4777 let workspace2_db_id = db.next_id().await.unwrap();
4778
4779 let (multi_workspace, cx) =
4780 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4781
4782 multi_workspace.update(cx, |mw, cx| {
4783 mw.open_sidebar(cx);
4784 });
4785
4786 multi_workspace.update_in(cx, |mw, _, cx| {
4787 mw.set_random_database_id(cx);
4788 });
4789
4790 multi_workspace.update_in(cx, |mw, window, cx| {
4791 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4792 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4793 ws.set_database_id(workspace2_db_id)
4794 });
4795 mw.add(workspace.clone(), window, cx);
4796 });
4797
4798 // Save a full workspace row to the DB directly and let it settle.
4799 let session_id = format!("pending-removal-session-{}", Uuid::new_v4());
4800 db.save_workspace(SerializedWorkspace {
4801 id: workspace2_db_id,
4802 paths: PathList::new(&[&dir]),
4803 location: SerializedWorkspaceLocation::Local,
4804 center_group: Default::default(),
4805 window_bounds: Default::default(),
4806 display: Default::default(),
4807 docks: Default::default(),
4808 centered_layout: false,
4809 session_id: Some(session_id.clone()),
4810 bookmarks: Default::default(),
4811 breakpoints: Default::default(),
4812 window_id: Some(88),
4813 user_toolchains: Default::default(),
4814 })
4815 .await;
4816 cx.run_until_parked();
4817
4818 // Remove workspace2 — this pushes a task to pending_removal_tasks.
4819 multi_workspace.update_in(cx, |mw, window, cx| {
4820 let ws = mw.workspaces().nth(1).unwrap().clone();
4821 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4822 .detach_and_log_err(cx);
4823 });
4824
4825 // Simulate the quit handler pattern: collect flush tasks + pending
4826 // removal tasks and await them all.
4827 let all_tasks = multi_workspace.update_in(cx, |mw, window, cx| {
4828 let mut tasks: Vec<Task<()>> = mw
4829 .workspaces()
4830 .map(|workspace| {
4831 workspace.update(cx, |workspace, cx| {
4832 workspace.flush_serialization(window, cx)
4833 })
4834 })
4835 .collect();
4836 let mut removal_tasks = mw.take_pending_removal_tasks();
4837 // Note: removal_tasks may be empty if the background task already
4838 // completed (take_pending_removal_tasks filters out ready tasks).
4839 tasks.append(&mut removal_tasks);
4840 tasks.push(mw.flush_serialization());
4841 tasks
4842 });
4843 futures::future::join_all(all_tasks).await;
4844
4845 // The row should still exist (for recent projects), but the session
4846 // binding should have been cleared by the pending removal task.
4847 assert!(
4848 db.workspace_for_id(workspace2_db_id).is_some(),
4849 "Workspace row should be preserved for recent projects"
4850 );
4851
4852 let session_workspaces = db
4853 .last_session_workspace_locations("pending-removal-session", None, fs.as_ref())
4854 .await
4855 .unwrap();
4856 let restored_ids: Vec<WorkspaceId> = session_workspaces
4857 .iter()
4858 .map(|sw| sw.workspace_id)
4859 .collect();
4860 assert!(
4861 !restored_ids.contains(&workspace2_db_id),
4862 "Pending removal task should have cleared the session binding"
4863 );
4864 }
4865
4866 #[gpui::test]
4867 async fn test_create_workspace_bounds_observer_uses_fresh_id(cx: &mut gpui::TestAppContext) {
4868 crate::tests::init_test(cx);
4869
4870 let fs = fs::FakeFs::new(cx.executor());
4871 let project = Project::test(fs.clone(), [], cx).await;
4872
4873 let (multi_workspace, cx) =
4874 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4875
4876 multi_workspace.update_in(cx, |mw, _, cx| {
4877 mw.set_random_database_id(cx);
4878 });
4879
4880 let task =
4881 multi_workspace.update_in(cx, |mw, window, cx| mw.create_test_workspace(window, cx));
4882 task.await;
4883
4884 let new_workspace_db_id =
4885 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
4886 assert!(
4887 new_workspace_db_id.is_some(),
4888 "After run_until_parked, the workspace should have a database_id"
4889 );
4890
4891 let workspace_id = new_workspace_db_id.unwrap();
4892
4893 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4894
4895 assert!(
4896 db.workspace_for_id(workspace_id).is_some(),
4897 "The workspace row should exist in the DB"
4898 );
4899
4900 cx.simulate_resize(gpui::size(px(1024.0), px(768.0)));
4901
4902 // Advance the clock past the 100ms debounce timer so the bounds
4903 // observer task fires
4904 cx.executor().advance_clock(Duration::from_millis(200));
4905 cx.run_until_parked();
4906
4907 let serialized = db
4908 .workspace_for_id(workspace_id)
4909 .expect("workspace row should still exist");
4910 assert!(
4911 serialized.window_bounds.is_some(),
4912 "The bounds observer should write bounds for the workspace's real DB ID, \
4913 even when the workspace was created via create_workspace (where the ID \
4914 is assigned asynchronously after construction)."
4915 );
4916 }
4917
4918 #[gpui::test]
4919 async fn test_flush_serialization_writes_bounds(cx: &mut gpui::TestAppContext) {
4920 crate::tests::init_test(cx);
4921
4922 let fs = fs::FakeFs::new(cx.executor());
4923 let dir = tempfile::TempDir::with_prefix("flush_bounds_test").unwrap();
4924 fs.insert_tree(dir.path(), json!({})).await;
4925
4926 let project = Project::test(fs.clone(), [dir.path()], cx).await;
4927
4928 let (multi_workspace, cx) =
4929 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4930
4931 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4932 let workspace_id = db.next_id().await.unwrap();
4933 multi_workspace.update_in(cx, |mw, _, cx| {
4934 mw.workspace().update(cx, |ws, _cx| {
4935 ws.set_database_id(workspace_id);
4936 });
4937 });
4938
4939 let task = multi_workspace.update_in(cx, |mw, window, cx| {
4940 mw.workspace()
4941 .update(cx, |ws, cx| ws.flush_serialization(window, cx))
4942 });
4943 task.await;
4944
4945 let after = db
4946 .workspace_for_id(workspace_id)
4947 .expect("workspace row should exist after flush_serialization");
4948 assert!(
4949 !after.paths.is_empty(),
4950 "flush_serialization should have written paths via save_workspace"
4951 );
4952 assert!(
4953 after.window_bounds.is_some(),
4954 "flush_serialization should ensure window bounds are persisted to the DB \
4955 before the process exits."
4956 );
4957 }
4958
4959 #[gpui::test]
4960 async fn test_resolve_worktree_workspaces(cx: &mut gpui::TestAppContext) {
4961 let fs = fs::FakeFs::new(cx.executor());
4962
4963 // Main repo with a linked worktree entry
4964 fs.insert_tree(
4965 "/repo",
4966 json!({
4967 ".git": {
4968 "worktrees": {
4969 "feature": {
4970 "commondir": "../../",
4971 "HEAD": "ref: refs/heads/feature"
4972 }
4973 }
4974 },
4975 "src": { "main.rs": "" }
4976 }),
4977 )
4978 .await;
4979
4980 // Linked worktree checkout pointing back to /repo
4981 fs.insert_tree(
4982 "/worktree",
4983 json!({
4984 ".git": "gitdir: /repo/.git/worktrees/feature",
4985 "src": { "main.rs": "" }
4986 }),
4987 )
4988 .await;
4989
4990 // A plain non-git project
4991 fs.insert_tree(
4992 "/plain-project",
4993 json!({
4994 "src": { "main.rs": "" }
4995 }),
4996 )
4997 .await;
4998
4999 // Another normal git repo (used in mixed-path entry)
5000 fs.insert_tree(
5001 "/other-repo",
5002 json!({
5003 ".git": {},
5004 "src": { "lib.rs": "" }
5005 }),
5006 )
5007 .await;
5008
5009 let t0 = Utc::now() - chrono::Duration::hours(4);
5010 let t1 = Utc::now() - chrono::Duration::hours(3);
5011 let t2 = Utc::now() - chrono::Duration::hours(2);
5012 let t3 = Utc::now() - chrono::Duration::hours(1);
5013
5014 let workspaces = vec![
5015 // 1: Main checkout of /repo (opened earlier)
5016 (
5017 WorkspaceId(1),
5018 SerializedWorkspaceLocation::Local,
5019 PathList::new(&["/repo"]),
5020 t0,
5021 ),
5022 // 2: Linked worktree of /repo (opened more recently)
5023 // Should dedup with #1; more recent timestamp wins.
5024 (
5025 WorkspaceId(2),
5026 SerializedWorkspaceLocation::Local,
5027 PathList::new(&["/worktree"]),
5028 t1,
5029 ),
5030 // 3: Mixed-path workspace: one root is a linked worktree,
5031 // the other is a normal repo. The worktree path should be
5032 // resolved; the normal path kept as-is.
5033 (
5034 WorkspaceId(3),
5035 SerializedWorkspaceLocation::Local,
5036 PathList::new(&["/other-repo", "/worktree"]),
5037 t2,
5038 ),
5039 // 4: Non-git project — passed through unchanged.
5040 (
5041 WorkspaceId(4),
5042 SerializedWorkspaceLocation::Local,
5043 PathList::new(&["/plain-project"]),
5044 t3,
5045 ),
5046 ];
5047
5048 let result = resolve_worktree_workspaces(workspaces, fs.as_ref()).await;
5049
5050 // Should have 3 entries: #1 and #2 deduped into one, plus #3 and #4.
5051 assert_eq!(result.len(), 3);
5052
5053 // First entry: /repo — deduplicated from #1 and #2.
5054 // Keeps the position of #1 (first seen), but with #2's later timestamp.
5055 assert_eq!(result[0].2.paths(), &[PathBuf::from("/repo")]);
5056 assert_eq!(result[0].3, t1);
5057
5058 // Second entry: mixed-path workspace with worktree resolved.
5059 // /worktree → /repo, so paths become [/other-repo, /repo] (sorted).
5060 assert_eq!(
5061 result[1].2.paths(),
5062 &[PathBuf::from("/other-repo"), PathBuf::from("/repo")]
5063 );
5064 assert_eq!(result[1].0, WorkspaceId(3));
5065
5066 // Third entry: non-git project, unchanged.
5067 assert_eq!(result[2].2.paths(), &[PathBuf::from("/plain-project")]);
5068 assert_eq!(result[2].0, WorkspaceId(4));
5069 }
5070
5071 #[gpui::test]
5072 async fn test_resolve_worktree_workspaces_bare_repo(cx: &mut gpui::TestAppContext) {
5073 let fs = fs::FakeFs::new(cx.executor());
5074
5075 // Bare repo at /foo/.bare (commondir doesn't end with .git)
5076 fs.insert_tree(
5077 "/foo/.bare",
5078 json!({
5079 "worktrees": {
5080 "my-feature": {
5081 "commondir": "../../",
5082 "HEAD": "ref: refs/heads/my-feature"
5083 }
5084 }
5085 }),
5086 )
5087 .await;
5088
5089 // Linked worktree whose commondir resolves to a bare repo (/foo/.bare)
5090 fs.insert_tree(
5091 "/foo/my-feature",
5092 json!({
5093 ".git": "gitdir: /foo/.bare/worktrees/my-feature",
5094 "src": { "main.rs": "" }
5095 }),
5096 )
5097 .await;
5098
5099 let t0 = Utc::now();
5100
5101 let workspaces = vec![(
5102 WorkspaceId(1),
5103 SerializedWorkspaceLocation::Local,
5104 PathList::new(&["/foo/my-feature"]),
5105 t0,
5106 )];
5107
5108 let result = resolve_worktree_workspaces(workspaces, fs.as_ref()).await;
5109
5110 // The worktree path must be preserved unchanged — /foo/.bare is a bare repo
5111 // and cannot serve as a working-tree root, so resolution must return None.
5112 assert_eq!(result.len(), 1);
5113 assert_eq!(result[0].2.paths(), &[PathBuf::from("/foo/my-feature")]);
5114 }
5115
5116 #[gpui::test]
5117 async fn test_restore_window_with_linked_worktree_and_multiple_project_groups(
5118 cx: &mut gpui::TestAppContext,
5119 ) {
5120 crate::tests::init_test(cx);
5121
5122 let fs = fs::FakeFs::new(cx.executor());
5123
5124 // Main git repo at /repo
5125 fs.insert_tree(
5126 "/repo",
5127 json!({
5128 ".git": {
5129 "HEAD": "ref: refs/heads/main",
5130 "worktrees": {
5131 "feature": {
5132 "commondir": "../../",
5133 "HEAD": "ref: refs/heads/feature"
5134 }
5135 }
5136 },
5137 "src": { "main.rs": "" }
5138 }),
5139 )
5140 .await;
5141
5142 // Linked worktree checkout pointing back to /repo
5143 fs.insert_tree(
5144 "/worktree-feature",
5145 json!({
5146 ".git": "gitdir: /repo/.git/worktrees/feature",
5147 "src": { "lib.rs": "" }
5148 }),
5149 )
5150 .await;
5151
5152 // --- Phase 1: Set up the original multi-workspace window ---
5153
5154 let project_1 = Project::test(fs.clone(), ["/repo".as_ref()], cx).await;
5155 let project_1_linked_worktree =
5156 Project::test(fs.clone(), ["/worktree-feature".as_ref()], cx).await;
5157
5158 // Wait for git discovery to finish.
5159 cx.run_until_parked();
5160
5161 // Create a second, unrelated project so we have two distinct project groups.
5162 fs.insert_tree(
5163 "/other-project",
5164 json!({
5165 ".git": { "HEAD": "ref: refs/heads/main" },
5166 "readme.md": ""
5167 }),
5168 )
5169 .await;
5170 let project_2 = Project::test(fs.clone(), ["/other-project".as_ref()], cx).await;
5171 cx.run_until_parked();
5172
5173 // Create the MultiWorkspace with project_2, then add the main repo
5174 // and its linked worktree. The linked worktree is added last and
5175 // becomes the active workspace.
5176 let (multi_workspace, cx) = cx
5177 .add_window_view(|window, cx| MultiWorkspace::test_new(project_2.clone(), window, cx));
5178
5179 multi_workspace.update(cx, |mw, cx| {
5180 mw.open_sidebar(cx);
5181 });
5182
5183 multi_workspace.update_in(cx, |mw, window, cx| {
5184 mw.test_add_workspace(project_1.clone(), window, cx);
5185 });
5186
5187 let workspace_worktree = multi_workspace.update_in(cx, |mw, window, cx| {
5188 mw.test_add_workspace(project_1_linked_worktree.clone(), window, cx)
5189 });
5190
5191 let tasks =
5192 multi_workspace.update_in(cx, |mw, window, cx| mw.flush_all_serialization(window, cx));
5193 cx.run_until_parked();
5194 for task in tasks {
5195 task.await;
5196 }
5197 cx.run_until_parked();
5198
5199 let active_db_id = workspace_worktree.read_with(cx, |ws, _| ws.database_id());
5200 assert!(
5201 active_db_id.is_some(),
5202 "Active workspace should have a database ID"
5203 );
5204
5205 // --- Phase 2: Read back and verify the serialized state ---
5206
5207 let session_id = multi_workspace
5208 .read_with(cx, |mw, cx| mw.workspace().read(cx).session_id())
5209 .unwrap();
5210 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
5211 let session_workspaces = db
5212 .last_session_workspace_locations(&session_id, None, fs.as_ref())
5213 .await
5214 .expect("should load session workspaces");
5215 assert!(
5216 !session_workspaces.is_empty(),
5217 "Should have at least one session workspace"
5218 );
5219
5220 let multi_workspaces =
5221 cx.update(|_, cx| read_serialized_multi_workspaces(session_workspaces, cx));
5222 assert_eq!(
5223 multi_workspaces.len(),
5224 1,
5225 "All workspaces share one window, so there should be exactly one multi-workspace"
5226 );
5227
5228 let serialized = &multi_workspaces[0];
5229 assert_eq!(
5230 serialized.active_workspace.workspace_id,
5231 active_db_id.unwrap(),
5232 );
5233 assert_eq!(serialized.state.project_groups.len(), 2,);
5234
5235 // Verify the serialized project group keys round-trip back to the
5236 // originals.
5237 let restored_keys: Vec<ProjectGroupKey> = serialized
5238 .state
5239 .project_groups
5240 .iter()
5241 .cloned()
5242 .map(Into::into)
5243 .collect();
5244 let expected_keys = vec![
5245 ProjectGroupKey::new(None, PathList::new(&["/repo"])),
5246 ProjectGroupKey::new(None, PathList::new(&["/other-project"])),
5247 ];
5248 assert_eq!(
5249 restored_keys, expected_keys,
5250 "Deserialized project group keys should match the originals"
5251 );
5252
5253 // --- Phase 3: Restore the window and verify the result ---
5254
5255 let app_state =
5256 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).app_state().clone());
5257
5258 let serialized_mw = multi_workspaces.into_iter().next().unwrap();
5259 let restored_handle: gpui::WindowHandle<MultiWorkspace> = cx
5260 .update(|_, cx| {
5261 cx.spawn(async move |mut cx| {
5262 crate::restore_multiworkspace(serialized_mw, app_state, &mut cx).await
5263 })
5264 })
5265 .await
5266 .expect("restore_multiworkspace should succeed");
5267
5268 cx.run_until_parked();
5269
5270 // The restored window should have the same project group keys.
5271 let restored_keys: Vec<ProjectGroupKey> = restored_handle
5272 .read_with(cx, |mw: &MultiWorkspace, _cx| mw.project_group_keys())
5273 .unwrap();
5274 assert_eq!(
5275 restored_keys, expected_keys,
5276 "Restored window should have the same project group keys as the original"
5277 );
5278
5279 // The active workspace in the restored window should have the linked
5280 // worktree paths.
5281 let active_paths: Vec<PathBuf> = restored_handle
5282 .read_with(cx, |mw: &MultiWorkspace, cx| {
5283 mw.workspace()
5284 .read(cx)
5285 .root_paths(cx)
5286 .into_iter()
5287 .map(|p: Arc<Path>| p.to_path_buf())
5288 .collect()
5289 })
5290 .unwrap();
5291 assert_eq!(
5292 active_paths,
5293 vec![PathBuf::from("/worktree-feature")],
5294 "The restored active workspace should be the linked worktree project"
5295 );
5296 }
5297
5298 #[gpui::test]
5299 async fn test_remove_project_group_falls_back_to_neighbor(cx: &mut gpui::TestAppContext) {
5300 crate::tests::init_test(cx);
5301
5302 let fs = fs::FakeFs::new(cx.executor());
5303 let dir_a = unique_test_dir(&fs, "group-a").await;
5304 let dir_b = unique_test_dir(&fs, "group-b").await;
5305 let dir_c = unique_test_dir(&fs, "group-c").await;
5306
5307 let project_a = Project::test(fs.clone(), [dir_a.as_path()], cx).await;
5308 let project_b = Project::test(fs.clone(), [dir_b.as_path()], cx).await;
5309 let project_c = Project::test(fs.clone(), [dir_c.as_path()], cx).await;
5310
5311 // Create a multi-workspace with project A, then add B and C.
5312 // project_groups stores newest first: [C, B, A].
5313 // Sidebar displays in the same order: C (top), B (middle), A (bottom).
5314 let (multi_workspace, cx) = cx
5315 .add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx));
5316
5317 multi_workspace.update(cx, |mw, cx| mw.open_sidebar(cx));
5318
5319 let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| {
5320 mw.test_add_workspace(project_b.clone(), window, cx)
5321 });
5322 let _workspace_c = multi_workspace.update_in(cx, |mw, window, cx| {
5323 mw.test_add_workspace(project_c.clone(), window, cx)
5324 });
5325 cx.run_until_parked();
5326
5327 let key_a = project_a.read_with(cx, |p, cx| p.project_group_key(cx));
5328 let key_b = project_b.read_with(cx, |p, cx| p.project_group_key(cx));
5329 let key_c = project_c.read_with(cx, |p, cx| p.project_group_key(cx));
5330
5331 // Activate workspace B so removing its group exercises the fallback.
5332 multi_workspace.update_in(cx, |mw, window, cx| {
5333 mw.activate(workspace_b.clone(), None, window, cx);
5334 });
5335 cx.run_until_parked();
5336
5337 // --- Remove group B (the middle one). ---
5338 // In the sidebar [C, B, A], "below" B is A.
5339 multi_workspace.update_in(cx, |mw, window, cx| {
5340 mw.remove_project_group(&key_b, window, cx)
5341 .detach_and_log_err(cx);
5342 });
5343 cx.run_until_parked();
5344
5345 let active_paths =
5346 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
5347 assert_eq!(
5348 active_paths
5349 .iter()
5350 .map(|p| p.to_path_buf())
5351 .collect::<Vec<_>>(),
5352 vec![dir_a.clone()],
5353 "After removing the middle group, should fall back to the group below (A)"
5354 );
5355
5356 // After removing B, keys = [A, C], sidebar = [C, A].
5357 // Activate workspace A (the bottom) so removing it tests the
5358 // "fall back upward" path.
5359 let workspace_a =
5360 multi_workspace.read_with(cx, |mw, _cx| mw.workspaces().next().unwrap().clone());
5361 multi_workspace.update_in(cx, |mw, window, cx| {
5362 mw.activate(workspace_a.clone(), None, window, cx);
5363 });
5364 cx.run_until_parked();
5365
5366 // --- Remove group A (the bottom one in sidebar). ---
5367 // Nothing below A, so should fall back upward to C.
5368 multi_workspace.update_in(cx, |mw, window, cx| {
5369 mw.remove_project_group(&key_a, window, cx)
5370 .detach_and_log_err(cx);
5371 });
5372 cx.run_until_parked();
5373
5374 let active_paths =
5375 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
5376 assert_eq!(
5377 active_paths
5378 .iter()
5379 .map(|p| p.to_path_buf())
5380 .collect::<Vec<_>>(),
5381 vec![dir_c.clone()],
5382 "After removing the bottom group, should fall back to the group above (C)"
5383 );
5384
5385 // --- Remove group C (the only one remaining). ---
5386 // Should create an empty workspace.
5387 multi_workspace.update_in(cx, |mw, window, cx| {
5388 mw.remove_project_group(&key_c, window, cx)
5389 .detach_and_log_err(cx);
5390 });
5391 cx.run_until_parked();
5392
5393 let active_paths =
5394 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
5395 assert!(
5396 active_paths.is_empty(),
5397 "After removing the only remaining group, should have an empty workspace"
5398 );
5399 }
5400
5401 /// Regression test for a crash where `find_or_create_local_workspace`
5402 /// returned a workspace that was about to be removed, hitting an assert
5403 /// in `MultiWorkspace::remove`.
5404 ///
5405 /// The scenario: two workspaces share the same root paths (e.g. due to
5406 /// a provisional key mismatch). When the first is removed and the
5407 /// fallback searches for the same paths, `workspace_for_paths` must
5408 /// skip the doomed workspace so the assert in `remove` is satisfied.
5409 #[gpui::test]
5410 async fn test_remove_fallback_skips_excluded_workspaces(cx: &mut gpui::TestAppContext) {
5411 crate::tests::init_test(cx);
5412
5413 let fs = fs::FakeFs::new(cx.executor());
5414 let dir = unique_test_dir(&fs, "shared").await;
5415
5416 // Two projects that open the same directory — this creates two
5417 // workspaces whose root_paths are identical.
5418 let project_a = Project::test(fs.clone(), [dir.as_path()], cx).await;
5419 let project_b = Project::test(fs.clone(), [dir.as_path()], cx).await;
5420
5421 let (multi_workspace, cx) = cx
5422 .add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx));
5423
5424 multi_workspace.update(cx, |mw, cx| mw.open_sidebar(cx));
5425
5426 let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| {
5427 mw.test_add_workspace(project_b.clone(), window, cx)
5428 });
5429 cx.run_until_parked();
5430
5431 // workspace_a is first in the workspaces vec.
5432 let workspace_a =
5433 multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().cloned().unwrap());
5434 assert_ne!(workspace_a, workspace_b);
5435
5436 // Activate workspace_a so removing it triggers the fallback path.
5437 multi_workspace.update_in(cx, |mw, window, cx| {
5438 mw.activate(workspace_a.clone(), None, window, cx);
5439 });
5440 cx.run_until_parked();
5441
5442 // Remove workspace_a. The fallback searches for the same paths.
5443 // Without the `excluding` parameter, `workspace_for_paths` would
5444 // return workspace_a (first match) and the assert in `remove`
5445 // would fire. With the fix, workspace_a is skipped and
5446 // workspace_b is found instead.
5447 let path_list = PathList::new(std::slice::from_ref(&dir));
5448 let excluded = vec![workspace_a.clone()];
5449 multi_workspace.update_in(cx, |mw, window, cx| {
5450 mw.remove(
5451 vec![workspace_a.clone()],
5452 move |this, window, cx| {
5453 this.find_or_create_local_workspace(
5454 path_list,
5455 None,
5456 &excluded,
5457 None,
5458 OpenMode::Activate,
5459 window,
5460 cx,
5461 )
5462 },
5463 window,
5464 cx,
5465 )
5466 .detach_and_log_err(cx);
5467 });
5468 cx.run_until_parked();
5469
5470 // workspace_b should now be active — workspace_a was removed.
5471 multi_workspace.read_with(cx, |mw, _cx| {
5472 assert_eq!(
5473 mw.workspace(),
5474 &workspace_b,
5475 "fallback should have found workspace_b, not the excluded workspace_a"
5476 );
5477 });
5478 }
5479}