1pub mod model;
2
3use std::{
4 borrow::Cow,
5 collections::BTreeMap,
6 path::{Path, PathBuf},
7 str::FromStr,
8 sync::Arc,
9};
10
11use chrono::{DateTime, NaiveDateTime, Utc};
12use fs::Fs;
13
14use anyhow::{Context as _, Result, bail};
15use collections::{HashMap, HashSet, IndexSet};
16use db::{
17 kvp::KeyValueStore,
18 query,
19 sqlez::{connection::Connection, domain::Domain},
20 sqlez_macros::sql,
21};
22use gpui::{Axis, Bounds, Task, WindowBounds, WindowId, point, size};
23use project::{
24 bookmark_store::SerializedBookmark,
25 debugger::breakpoint_store::{BreakpointState, SourceBreakpoint},
26 trusted_worktrees::{DbTrustedPaths, RemoteHostLocation},
27};
28
29use language::{LanguageName, Toolchain, ToolchainScope};
30use remote::{
31 DockerConnectionOptions, RemoteConnectionIdentity, RemoteConnectionOptions,
32 SshConnectionOptions, WslConnectionOptions, remote_connection_identity,
33};
34use serde::{Deserialize, Serialize};
35use sqlez::{
36 bindable::{Bind, Column, StaticColumnCount},
37 statement::Statement,
38 thread_safe_connection::ThreadSafeConnection,
39};
40
41use ui::{App, SharedString, px};
42use util::{ResultExt, maybe, rel_path::RelPath};
43use uuid::Uuid;
44
45use crate::{
46 WorkspaceId,
47 path_list::{PathList, SerializedPathList},
48 persistence::model::RemoteConnectionKind,
49};
50
51use model::{
52 GroupId, ItemId, PaneId, RemoteConnectionId, SerializedItem, SerializedPane,
53 SerializedPaneGroup, SerializedWorkspace,
54};
55
56use self::model::{DockStructure, SerializedWorkspaceLocation, SessionWorkspace};
57
58// https://www.sqlite.org/limits.html
59// > <..> the maximum value of a host parameter number is SQLITE_MAX_VARIABLE_NUMBER,
60// > which defaults to <..> 32766 for SQLite versions after 3.32.0.
61const MAX_QUERY_PLACEHOLDERS: usize = 32000;
62
63fn parse_timestamp(text: &str) -> DateTime<Utc> {
64 NaiveDateTime::parse_from_str(text, "%Y-%m-%d %H:%M:%S")
65 .map(|naive| naive.and_utc())
66 .unwrap_or_else(|_| Utc::now())
67}
68
69fn contains_wsl_path(paths: &PathList) -> bool {
70 cfg!(windows)
71 && paths
72 .paths()
73 .iter()
74 .any(|path| util::paths::WslPath::from_path(path).is_some())
75}
76
77#[derive(Copy, Clone, Debug, PartialEq)]
78pub(crate) struct SerializedAxis(pub(crate) gpui::Axis);
79impl sqlez::bindable::StaticColumnCount for SerializedAxis {}
80impl sqlez::bindable::Bind for SerializedAxis {
81 fn bind(
82 &self,
83 statement: &sqlez::statement::Statement,
84 start_index: i32,
85 ) -> anyhow::Result<i32> {
86 match self.0 {
87 gpui::Axis::Horizontal => "Horizontal",
88 gpui::Axis::Vertical => "Vertical",
89 }
90 .bind(statement, start_index)
91 }
92}
93
94impl sqlez::bindable::Column for SerializedAxis {
95 fn column(
96 statement: &mut sqlez::statement::Statement,
97 start_index: i32,
98 ) -> anyhow::Result<(Self, i32)> {
99 String::column(statement, start_index).and_then(|(axis_text, next_index)| {
100 Ok((
101 match axis_text.as_str() {
102 "Horizontal" => Self(Axis::Horizontal),
103 "Vertical" => Self(Axis::Vertical),
104 _ => anyhow::bail!("Stored serialized item kind is incorrect"),
105 },
106 next_index,
107 ))
108 })
109 }
110}
111
112#[derive(Copy, Clone, Debug, PartialEq, Default)]
113pub(crate) struct SerializedWindowBounds(pub(crate) WindowBounds);
114
115impl StaticColumnCount for SerializedWindowBounds {
116 fn column_count() -> usize {
117 5
118 }
119}
120
121impl Bind for SerializedWindowBounds {
122 fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
123 match self.0 {
124 WindowBounds::Windowed(bounds) => {
125 let next_index = statement.bind(&"Windowed", start_index)?;
126 statement.bind(
127 &(
128 SerializedPixels(bounds.origin.x),
129 SerializedPixels(bounds.origin.y),
130 SerializedPixels(bounds.size.width),
131 SerializedPixels(bounds.size.height),
132 ),
133 next_index,
134 )
135 }
136 WindowBounds::Maximized(bounds) => {
137 let next_index = statement.bind(&"Maximized", start_index)?;
138 statement.bind(
139 &(
140 SerializedPixels(bounds.origin.x),
141 SerializedPixels(bounds.origin.y),
142 SerializedPixels(bounds.size.width),
143 SerializedPixels(bounds.size.height),
144 ),
145 next_index,
146 )
147 }
148 WindowBounds::Fullscreen(bounds) => {
149 let next_index = statement.bind(&"FullScreen", start_index)?;
150 statement.bind(
151 &(
152 SerializedPixels(bounds.origin.x),
153 SerializedPixels(bounds.origin.y),
154 SerializedPixels(bounds.size.width),
155 SerializedPixels(bounds.size.height),
156 ),
157 next_index,
158 )
159 }
160 }
161 }
162}
163
164impl Column for SerializedWindowBounds {
165 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
166 let (window_state, next_index) = String::column(statement, start_index)?;
167 let ((x, y, width, height), _): ((i32, i32, i32, i32), _) =
168 Column::column(statement, next_index)?;
169 let bounds = Bounds {
170 origin: point(px(x as f32), px(y as f32)),
171 size: size(px(width as f32), px(height as f32)),
172 };
173
174 let status = match window_state.as_str() {
175 "Windowed" | "Fixed" => SerializedWindowBounds(WindowBounds::Windowed(bounds)),
176 "Maximized" => SerializedWindowBounds(WindowBounds::Maximized(bounds)),
177 "FullScreen" => SerializedWindowBounds(WindowBounds::Fullscreen(bounds)),
178 _ => bail!("Window State did not have a valid string"),
179 };
180
181 Ok((status, next_index + 4))
182 }
183}
184
185const DEFAULT_WINDOW_BOUNDS_KEY: &str = "default_window_bounds";
186
187pub fn read_default_window_bounds(kvp: &KeyValueStore) -> Option<(Uuid, WindowBounds)> {
188 let json_str = kvp
189 .read_kvp(DEFAULT_WINDOW_BOUNDS_KEY)
190 .log_err()
191 .flatten()?;
192
193 let (display_uuid, persisted) =
194 serde_json::from_str::<(Uuid, WindowBoundsJson)>(&json_str).ok()?;
195 Some((display_uuid, persisted.into()))
196}
197
198pub async fn write_default_window_bounds(
199 kvp: &KeyValueStore,
200 bounds: WindowBounds,
201 display_uuid: Uuid,
202) -> anyhow::Result<()> {
203 let persisted = WindowBoundsJson::from(bounds);
204 let json_str = serde_json::to_string(&(display_uuid, persisted))?;
205 kvp.write_kvp(DEFAULT_WINDOW_BOUNDS_KEY.to_string(), json_str)
206 .await?;
207 Ok(())
208}
209
210#[derive(Serialize, Deserialize)]
211pub enum WindowBoundsJson {
212 Windowed {
213 x: i32,
214 y: i32,
215 width: i32,
216 height: i32,
217 },
218 Maximized {
219 x: i32,
220 y: i32,
221 width: i32,
222 height: i32,
223 },
224 Fullscreen {
225 x: i32,
226 y: i32,
227 width: i32,
228 height: i32,
229 },
230}
231
232impl From<WindowBounds> for WindowBoundsJson {
233 fn from(b: WindowBounds) -> Self {
234 match b {
235 WindowBounds::Windowed(bounds) => {
236 let origin = bounds.origin;
237 let size = bounds.size;
238 WindowBoundsJson::Windowed {
239 x: f32::from(origin.x).round() as i32,
240 y: f32::from(origin.y).round() as i32,
241 width: f32::from(size.width).round() as i32,
242 height: f32::from(size.height).round() as i32,
243 }
244 }
245 WindowBounds::Maximized(bounds) => {
246 let origin = bounds.origin;
247 let size = bounds.size;
248 WindowBoundsJson::Maximized {
249 x: f32::from(origin.x).round() as i32,
250 y: f32::from(origin.y).round() as i32,
251 width: f32::from(size.width).round() as i32,
252 height: f32::from(size.height).round() as i32,
253 }
254 }
255 WindowBounds::Fullscreen(bounds) => {
256 let origin = bounds.origin;
257 let size = bounds.size;
258 WindowBoundsJson::Fullscreen {
259 x: f32::from(origin.x).round() as i32,
260 y: f32::from(origin.y).round() as i32,
261 width: f32::from(size.width).round() as i32,
262 height: f32::from(size.height).round() as i32,
263 }
264 }
265 }
266 }
267}
268
269impl From<WindowBoundsJson> for WindowBounds {
270 fn from(n: WindowBoundsJson) -> Self {
271 match n {
272 WindowBoundsJson::Windowed {
273 x,
274 y,
275 width,
276 height,
277 } => WindowBounds::Windowed(Bounds {
278 origin: point(px(x as f32), px(y as f32)),
279 size: size(px(width as f32), px(height as f32)),
280 }),
281 WindowBoundsJson::Maximized {
282 x,
283 y,
284 width,
285 height,
286 } => WindowBounds::Maximized(Bounds {
287 origin: point(px(x as f32), px(y as f32)),
288 size: size(px(width as f32), px(height as f32)),
289 }),
290 WindowBoundsJson::Fullscreen {
291 x,
292 y,
293 width,
294 height,
295 } => WindowBounds::Fullscreen(Bounds {
296 origin: point(px(x as f32), px(y as f32)),
297 size: size(px(width as f32), px(height as f32)),
298 }),
299 }
300 }
301}
302
303fn read_multi_workspace_state(window_id: WindowId, cx: &App) -> model::MultiWorkspaceState {
304 let kvp = KeyValueStore::global(cx);
305 kvp.scoped("multi_workspace_state")
306 .read(&window_id.as_u64().to_string())
307 .log_err()
308 .flatten()
309 .and_then(|json| serde_json::from_str(&json).ok())
310 .unwrap_or_default()
311}
312
313pub async fn write_multi_workspace_state(
314 kvp: &KeyValueStore,
315 window_id: WindowId,
316 state: model::MultiWorkspaceState,
317) {
318 if let Ok(json_str) = serde_json::to_string(&state) {
319 kvp.scoped("multi_workspace_state")
320 .write(window_id.as_u64().to_string(), json_str)
321 .await
322 .log_err();
323 }
324}
325
326pub fn read_serialized_multi_workspaces(
327 session_workspaces: Vec<model::SessionWorkspace>,
328 cx: &App,
329) -> Vec<model::SerializedMultiWorkspace> {
330 let mut window_groups: Vec<Vec<model::SessionWorkspace>> = Vec::new();
331 let mut window_id_to_group: HashMap<WindowId, usize> = HashMap::default();
332
333 for session_workspace in session_workspaces {
334 match session_workspace.window_id {
335 Some(window_id) => {
336 let group_index = *window_id_to_group.entry(window_id).or_insert_with(|| {
337 window_groups.push(Vec::new());
338 window_groups.len() - 1
339 });
340 window_groups[group_index].push(session_workspace);
341 }
342 None => {
343 window_groups.push(vec![session_workspace]);
344 }
345 }
346 }
347
348 window_groups
349 .into_iter()
350 .filter_map(|group| {
351 let window_id = group.first().and_then(|sw| sw.window_id);
352 let state = window_id
353 .map(|wid| read_multi_workspace_state(wid, cx))
354 .unwrap_or_default();
355 let active_workspace = state
356 .active_workspace_id
357 .and_then(|id| group.iter().position(|ws| ws.workspace_id == id))
358 .or(Some(0))
359 .and_then(|index| group.into_iter().nth(index))?;
360 Some(model::SerializedMultiWorkspace {
361 active_workspace,
362 state,
363 })
364 })
365 .collect()
366}
367
368const DEFAULT_DOCK_STATE_KEY: &str = "default_dock_state";
369
370pub fn read_default_dock_state(kvp: &KeyValueStore) -> Option<DockStructure> {
371 let json_str = kvp.read_kvp(DEFAULT_DOCK_STATE_KEY).log_err().flatten()?;
372
373 serde_json::from_str::<DockStructure>(&json_str).ok()
374}
375
376pub async fn write_default_dock_state(
377 kvp: &KeyValueStore,
378 docks: DockStructure,
379) -> anyhow::Result<()> {
380 let json_str = serde_json::to_string(&docks)?;
381 kvp.write_kvp(DEFAULT_DOCK_STATE_KEY.to_string(), json_str)
382 .await?;
383 Ok(())
384}
385
386#[derive(Debug)]
387pub struct Bookmark {
388 pub row: u32,
389}
390
391impl sqlez::bindable::StaticColumnCount for Bookmark {
392 fn column_count() -> usize {
393 // row
394 1
395 }
396}
397
398impl sqlez::bindable::Bind for Bookmark {
399 fn bind(
400 &self,
401 statement: &sqlez::statement::Statement,
402 start_index: i32,
403 ) -> anyhow::Result<i32> {
404 statement.bind(&self.row, start_index)
405 }
406}
407
408impl Column for Bookmark {
409 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
410 let row = statement
411 .column_int(start_index)
412 .with_context(|| format!("Failed to read bookmark at index {start_index}"))?
413 as u32;
414
415 Ok((Bookmark { row }, start_index + 1))
416 }
417}
418
419#[derive(Debug)]
420pub struct Breakpoint {
421 pub position: u32,
422 pub message: Option<Arc<str>>,
423 pub condition: Option<Arc<str>>,
424 pub hit_condition: Option<Arc<str>>,
425 pub state: BreakpointState,
426}
427
428/// Wrapper for DB type of a breakpoint
429struct BreakpointStateWrapper<'a>(Cow<'a, BreakpointState>);
430
431impl From<BreakpointState> for BreakpointStateWrapper<'static> {
432 fn from(kind: BreakpointState) -> Self {
433 BreakpointStateWrapper(Cow::Owned(kind))
434 }
435}
436
437impl StaticColumnCount for BreakpointStateWrapper<'_> {
438 fn column_count() -> usize {
439 1
440 }
441}
442
443impl Bind for BreakpointStateWrapper<'_> {
444 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
445 statement.bind(&self.0.to_int(), start_index)
446 }
447}
448
449impl Column for BreakpointStateWrapper<'_> {
450 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
451 let state = statement.column_int(start_index)?;
452
453 match state {
454 0 => Ok((BreakpointState::Enabled.into(), start_index + 1)),
455 1 => Ok((BreakpointState::Disabled.into(), start_index + 1)),
456 _ => anyhow::bail!("Invalid BreakpointState discriminant {state}"),
457 }
458 }
459}
460
461impl sqlez::bindable::StaticColumnCount for Breakpoint {
462 fn column_count() -> usize {
463 // Position, log message, condition message, and hit condition message
464 4 + BreakpointStateWrapper::column_count()
465 }
466}
467
468impl sqlez::bindable::Bind for Breakpoint {
469 fn bind(
470 &self,
471 statement: &sqlez::statement::Statement,
472 start_index: i32,
473 ) -> anyhow::Result<i32> {
474 let next_index = statement.bind(&self.position, start_index)?;
475 let next_index = statement.bind(&self.message, next_index)?;
476 let next_index = statement.bind(&self.condition, next_index)?;
477 let next_index = statement.bind(&self.hit_condition, next_index)?;
478 statement.bind(
479 &BreakpointStateWrapper(Cow::Borrowed(&self.state)),
480 next_index,
481 )
482 }
483}
484
485impl Column for Breakpoint {
486 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
487 let position = statement
488 .column_int(start_index)
489 .with_context(|| format!("Failed to read BreakPoint at index {start_index}"))?
490 as u32;
491 let (message, next_index) = Option::<String>::column(statement, start_index + 1)?;
492 let (condition, next_index) = Option::<String>::column(statement, next_index)?;
493 let (hit_condition, next_index) = Option::<String>::column(statement, next_index)?;
494 let (state, next_index) = BreakpointStateWrapper::column(statement, next_index)?;
495
496 Ok((
497 Breakpoint {
498 position,
499 message: message.map(Arc::from),
500 condition: condition.map(Arc::from),
501 hit_condition: hit_condition.map(Arc::from),
502 state: state.0.into_owned(),
503 },
504 next_index,
505 ))
506 }
507}
508
509#[derive(Clone, Debug, PartialEq)]
510struct SerializedPixels(gpui::Pixels);
511impl sqlez::bindable::StaticColumnCount for SerializedPixels {}
512
513impl sqlez::bindable::Bind for SerializedPixels {
514 fn bind(
515 &self,
516 statement: &sqlez::statement::Statement,
517 start_index: i32,
518 ) -> anyhow::Result<i32> {
519 let this: i32 = u32::from(self.0) as _;
520 this.bind(statement, start_index)
521 }
522}
523
524pub struct WorkspaceDb(ThreadSafeConnection);
525
526impl Domain for WorkspaceDb {
527 const NAME: &str = stringify!(WorkspaceDb);
528
529 const MIGRATIONS: &[&str] = &[
530 sql!(
531 CREATE TABLE workspaces(
532 workspace_id INTEGER PRIMARY KEY,
533 workspace_location BLOB UNIQUE,
534 dock_visible INTEGER, // Deprecated. Preserving so users can downgrade Zed.
535 dock_anchor TEXT, // Deprecated. Preserving so users can downgrade Zed.
536 dock_pane INTEGER, // Deprecated. Preserving so users can downgrade Zed.
537 left_sidebar_open INTEGER, // Boolean
538 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
539 FOREIGN KEY(dock_pane) REFERENCES panes(pane_id)
540 ) STRICT;
541
542 CREATE TABLE pane_groups(
543 group_id INTEGER PRIMARY KEY,
544 workspace_id INTEGER NOT NULL,
545 parent_group_id INTEGER, // NULL indicates that this is a root node
546 position INTEGER, // NULL indicates that this is a root node
547 axis TEXT NOT NULL, // Enum: 'Vertical' / 'Horizontal'
548 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
549 ON DELETE CASCADE
550 ON UPDATE CASCADE,
551 FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
552 ) STRICT;
553
554 CREATE TABLE panes(
555 pane_id INTEGER PRIMARY KEY,
556 workspace_id INTEGER NOT NULL,
557 active INTEGER NOT NULL, // Boolean
558 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
559 ON DELETE CASCADE
560 ON UPDATE CASCADE
561 ) STRICT;
562
563 CREATE TABLE center_panes(
564 pane_id INTEGER PRIMARY KEY,
565 parent_group_id INTEGER, // NULL means that this is a root pane
566 position INTEGER, // NULL means that this is a root pane
567 FOREIGN KEY(pane_id) REFERENCES panes(pane_id)
568 ON DELETE CASCADE,
569 FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
570 ) STRICT;
571
572 CREATE TABLE items(
573 item_id INTEGER NOT NULL, // This is the item's view id, so this is not unique
574 workspace_id INTEGER NOT NULL,
575 pane_id INTEGER NOT NULL,
576 kind TEXT NOT NULL,
577 position INTEGER NOT NULL,
578 active INTEGER NOT NULL,
579 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
580 ON DELETE CASCADE
581 ON UPDATE CASCADE,
582 FOREIGN KEY(pane_id) REFERENCES panes(pane_id)
583 ON DELETE CASCADE,
584 PRIMARY KEY(item_id, workspace_id)
585 ) STRICT;
586 ),
587 sql!(
588 ALTER TABLE workspaces ADD COLUMN window_state TEXT;
589 ALTER TABLE workspaces ADD COLUMN window_x REAL;
590 ALTER TABLE workspaces ADD COLUMN window_y REAL;
591 ALTER TABLE workspaces ADD COLUMN window_width REAL;
592 ALTER TABLE workspaces ADD COLUMN window_height REAL;
593 ALTER TABLE workspaces ADD COLUMN display BLOB;
594 ),
595 // Drop foreign key constraint from workspaces.dock_pane to panes table.
596 sql!(
597 CREATE TABLE workspaces_2(
598 workspace_id INTEGER PRIMARY KEY,
599 workspace_location BLOB UNIQUE,
600 dock_visible INTEGER, // Deprecated. Preserving so users can downgrade Zed.
601 dock_anchor TEXT, // Deprecated. Preserving so users can downgrade Zed.
602 dock_pane INTEGER, // Deprecated. Preserving so users can downgrade Zed.
603 left_sidebar_open INTEGER, // Boolean
604 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
605 window_state TEXT,
606 window_x REAL,
607 window_y REAL,
608 window_width REAL,
609 window_height REAL,
610 display BLOB
611 ) STRICT;
612 INSERT INTO workspaces_2 SELECT * FROM workspaces;
613 DROP TABLE workspaces;
614 ALTER TABLE workspaces_2 RENAME TO workspaces;
615 ),
616 // Add panels related information
617 sql!(
618 ALTER TABLE workspaces ADD COLUMN left_dock_visible INTEGER; //bool
619 ALTER TABLE workspaces ADD COLUMN left_dock_active_panel TEXT;
620 ALTER TABLE workspaces ADD COLUMN right_dock_visible INTEGER; //bool
621 ALTER TABLE workspaces ADD COLUMN right_dock_active_panel TEXT;
622 ALTER TABLE workspaces ADD COLUMN bottom_dock_visible INTEGER; //bool
623 ALTER TABLE workspaces ADD COLUMN bottom_dock_active_panel TEXT;
624 ),
625 // Add panel zoom persistence
626 sql!(
627 ALTER TABLE workspaces ADD COLUMN left_dock_zoom INTEGER; //bool
628 ALTER TABLE workspaces ADD COLUMN right_dock_zoom INTEGER; //bool
629 ALTER TABLE workspaces ADD COLUMN bottom_dock_zoom INTEGER; //bool
630 ),
631 // Add pane group flex data
632 sql!(
633 ALTER TABLE pane_groups ADD COLUMN flexes TEXT;
634 ),
635 // Add fullscreen field to workspace
636 // Deprecated, `WindowBounds` holds the fullscreen state now.
637 // Preserving so users can downgrade Zed.
638 sql!(
639 ALTER TABLE workspaces ADD COLUMN fullscreen INTEGER; //bool
640 ),
641 // Add preview field to items
642 sql!(
643 ALTER TABLE items ADD COLUMN preview INTEGER; //bool
644 ),
645 // Add centered_layout field to workspace
646 sql!(
647 ALTER TABLE workspaces ADD COLUMN centered_layout INTEGER; //bool
648 ),
649 sql!(
650 CREATE TABLE remote_projects (
651 remote_project_id INTEGER NOT NULL UNIQUE,
652 path TEXT,
653 dev_server_name TEXT
654 );
655 ALTER TABLE workspaces ADD COLUMN remote_project_id INTEGER;
656 ALTER TABLE workspaces RENAME COLUMN workspace_location TO local_paths;
657 ),
658 sql!(
659 DROP TABLE remote_projects;
660 CREATE TABLE dev_server_projects (
661 id INTEGER NOT NULL UNIQUE,
662 path TEXT,
663 dev_server_name TEXT
664 );
665 ALTER TABLE workspaces DROP COLUMN remote_project_id;
666 ALTER TABLE workspaces ADD COLUMN dev_server_project_id INTEGER;
667 ),
668 sql!(
669 ALTER TABLE workspaces ADD COLUMN local_paths_order BLOB;
670 ),
671 sql!(
672 ALTER TABLE workspaces ADD COLUMN session_id TEXT DEFAULT NULL;
673 ),
674 sql!(
675 ALTER TABLE workspaces ADD COLUMN window_id INTEGER DEFAULT NULL;
676 ),
677 sql!(
678 ALTER TABLE panes ADD COLUMN pinned_count INTEGER DEFAULT 0;
679 ),
680 sql!(
681 CREATE TABLE ssh_projects (
682 id INTEGER PRIMARY KEY,
683 host TEXT NOT NULL,
684 port INTEGER,
685 path TEXT NOT NULL,
686 user TEXT
687 );
688 ALTER TABLE workspaces ADD COLUMN ssh_project_id INTEGER REFERENCES ssh_projects(id) ON DELETE CASCADE;
689 ),
690 sql!(
691 ALTER TABLE ssh_projects RENAME COLUMN path TO paths;
692 ),
693 sql!(
694 CREATE TABLE toolchains (
695 workspace_id INTEGER,
696 worktree_id INTEGER,
697 language_name TEXT NOT NULL,
698 name TEXT NOT NULL,
699 path TEXT NOT NULL,
700 PRIMARY KEY (workspace_id, worktree_id, language_name)
701 );
702 ),
703 sql!(
704 ALTER TABLE toolchains ADD COLUMN raw_json TEXT DEFAULT "{}";
705 ),
706 sql!(
707 CREATE TABLE breakpoints (
708 workspace_id INTEGER NOT NULL,
709 path TEXT NOT NULL,
710 breakpoint_location INTEGER NOT NULL,
711 kind INTEGER NOT NULL,
712 log_message TEXT,
713 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
714 ON DELETE CASCADE
715 ON UPDATE CASCADE
716 );
717 ),
718 sql!(
719 ALTER TABLE workspaces ADD COLUMN local_paths_array TEXT;
720 CREATE UNIQUE INDEX local_paths_array_uq ON workspaces(local_paths_array);
721 ALTER TABLE workspaces ADD COLUMN local_paths_order_array TEXT;
722 ),
723 sql!(
724 ALTER TABLE breakpoints ADD COLUMN state INTEGER DEFAULT(0) NOT NULL
725 ),
726 sql!(
727 ALTER TABLE breakpoints DROP COLUMN kind
728 ),
729 sql!(ALTER TABLE toolchains ADD COLUMN relative_worktree_path TEXT DEFAULT "" NOT NULL),
730 sql!(
731 ALTER TABLE breakpoints ADD COLUMN condition TEXT;
732 ALTER TABLE breakpoints ADD COLUMN hit_condition TEXT;
733 ),
734 sql!(CREATE TABLE toolchains2 (
735 workspace_id INTEGER,
736 worktree_id INTEGER,
737 language_name TEXT NOT NULL,
738 name TEXT NOT NULL,
739 path TEXT NOT NULL,
740 raw_json TEXT NOT NULL,
741 relative_worktree_path TEXT NOT NULL,
742 PRIMARY KEY (workspace_id, worktree_id, language_name, relative_worktree_path)) STRICT;
743 INSERT INTO toolchains2
744 SELECT * FROM toolchains;
745 DROP TABLE toolchains;
746 ALTER TABLE toolchains2 RENAME TO toolchains;
747 ),
748 sql!(
749 CREATE TABLE ssh_connections (
750 id INTEGER PRIMARY KEY,
751 host TEXT NOT NULL,
752 port INTEGER,
753 user TEXT
754 );
755
756 INSERT INTO ssh_connections (host, port, user)
757 SELECT DISTINCT host, port, user
758 FROM ssh_projects;
759
760 CREATE TABLE workspaces_2(
761 workspace_id INTEGER PRIMARY KEY,
762 paths TEXT,
763 paths_order TEXT,
764 ssh_connection_id INTEGER REFERENCES ssh_connections(id),
765 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
766 window_state TEXT,
767 window_x REAL,
768 window_y REAL,
769 window_width REAL,
770 window_height REAL,
771 display BLOB,
772 left_dock_visible INTEGER,
773 left_dock_active_panel TEXT,
774 right_dock_visible INTEGER,
775 right_dock_active_panel TEXT,
776 bottom_dock_visible INTEGER,
777 bottom_dock_active_panel TEXT,
778 left_dock_zoom INTEGER,
779 right_dock_zoom INTEGER,
780 bottom_dock_zoom INTEGER,
781 fullscreen INTEGER,
782 centered_layout INTEGER,
783 session_id TEXT,
784 window_id INTEGER
785 ) STRICT;
786
787 INSERT
788 INTO workspaces_2
789 SELECT
790 workspaces.workspace_id,
791 CASE
792 WHEN ssh_projects.id IS NOT NULL THEN ssh_projects.paths
793 ELSE
794 CASE
795 WHEN workspaces.local_paths_array IS NULL OR workspaces.local_paths_array = "" THEN
796 NULL
797 ELSE
798 replace(workspaces.local_paths_array, ',', CHAR(10))
799 END
800 END as paths,
801
802 CASE
803 WHEN ssh_projects.id IS NOT NULL THEN ""
804 ELSE workspaces.local_paths_order_array
805 END as paths_order,
806
807 CASE
808 WHEN ssh_projects.id IS NOT NULL THEN (
809 SELECT ssh_connections.id
810 FROM ssh_connections
811 WHERE
812 ssh_connections.host IS ssh_projects.host AND
813 ssh_connections.port IS ssh_projects.port AND
814 ssh_connections.user IS ssh_projects.user
815 )
816 ELSE NULL
817 END as ssh_connection_id,
818
819 workspaces.timestamp,
820 workspaces.window_state,
821 workspaces.window_x,
822 workspaces.window_y,
823 workspaces.window_width,
824 workspaces.window_height,
825 workspaces.display,
826 workspaces.left_dock_visible,
827 workspaces.left_dock_active_panel,
828 workspaces.right_dock_visible,
829 workspaces.right_dock_active_panel,
830 workspaces.bottom_dock_visible,
831 workspaces.bottom_dock_active_panel,
832 workspaces.left_dock_zoom,
833 workspaces.right_dock_zoom,
834 workspaces.bottom_dock_zoom,
835 workspaces.fullscreen,
836 workspaces.centered_layout,
837 workspaces.session_id,
838 workspaces.window_id
839 FROM
840 workspaces LEFT JOIN
841 ssh_projects ON
842 workspaces.ssh_project_id = ssh_projects.id;
843
844 DELETE FROM workspaces_2
845 WHERE workspace_id NOT IN (
846 SELECT MAX(workspace_id)
847 FROM workspaces_2
848 GROUP BY ssh_connection_id, paths
849 );
850
851 DROP TABLE ssh_projects;
852 DROP TABLE workspaces;
853 ALTER TABLE workspaces_2 RENAME TO workspaces;
854
855 CREATE UNIQUE INDEX ix_workspaces_location ON workspaces(ssh_connection_id, paths);
856 ),
857 // Fix any data from when workspaces.paths were briefly encoded as JSON arrays
858 sql!(
859 UPDATE workspaces
860 SET paths = CASE
861 WHEN substr(paths, 1, 2) = '[' || '"' AND substr(paths, -2, 2) = '"' || ']' THEN
862 replace(
863 substr(paths, 3, length(paths) - 4),
864 '"' || ',' || '"',
865 CHAR(10)
866 )
867 ELSE
868 replace(paths, ',', CHAR(10))
869 END
870 WHERE paths IS NOT NULL
871 ),
872 sql!(
873 CREATE TABLE remote_connections(
874 id INTEGER PRIMARY KEY,
875 kind TEXT NOT NULL,
876 host TEXT,
877 port INTEGER,
878 user TEXT,
879 distro TEXT
880 );
881
882 CREATE TABLE workspaces_2(
883 workspace_id INTEGER PRIMARY KEY,
884 paths TEXT,
885 paths_order TEXT,
886 remote_connection_id INTEGER REFERENCES remote_connections(id),
887 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
888 window_state TEXT,
889 window_x REAL,
890 window_y REAL,
891 window_width REAL,
892 window_height REAL,
893 display BLOB,
894 left_dock_visible INTEGER,
895 left_dock_active_panel TEXT,
896 right_dock_visible INTEGER,
897 right_dock_active_panel TEXT,
898 bottom_dock_visible INTEGER,
899 bottom_dock_active_panel TEXT,
900 left_dock_zoom INTEGER,
901 right_dock_zoom INTEGER,
902 bottom_dock_zoom INTEGER,
903 fullscreen INTEGER,
904 centered_layout INTEGER,
905 session_id TEXT,
906 window_id INTEGER
907 ) STRICT;
908
909 INSERT INTO remote_connections
910 SELECT
911 id,
912 "ssh" as kind,
913 host,
914 port,
915 user,
916 NULL as distro
917 FROM ssh_connections;
918
919 INSERT
920 INTO workspaces_2
921 SELECT
922 workspace_id,
923 paths,
924 paths_order,
925 ssh_connection_id as remote_connection_id,
926 timestamp,
927 window_state,
928 window_x,
929 window_y,
930 window_width,
931 window_height,
932 display,
933 left_dock_visible,
934 left_dock_active_panel,
935 right_dock_visible,
936 right_dock_active_panel,
937 bottom_dock_visible,
938 bottom_dock_active_panel,
939 left_dock_zoom,
940 right_dock_zoom,
941 bottom_dock_zoom,
942 fullscreen,
943 centered_layout,
944 session_id,
945 window_id
946 FROM
947 workspaces;
948
949 DROP TABLE workspaces;
950 ALTER TABLE workspaces_2 RENAME TO workspaces;
951
952 CREATE UNIQUE INDEX ix_workspaces_location ON workspaces(remote_connection_id, paths);
953 ),
954 sql!(CREATE TABLE user_toolchains (
955 remote_connection_id INTEGER,
956 workspace_id INTEGER NOT NULL,
957 worktree_id INTEGER NOT NULL,
958 relative_worktree_path TEXT NOT NULL,
959 language_name TEXT NOT NULL,
960 name TEXT NOT NULL,
961 path TEXT NOT NULL,
962 raw_json TEXT NOT NULL,
963
964 PRIMARY KEY (workspace_id, worktree_id, relative_worktree_path, language_name, name, path, raw_json)
965 ) STRICT;),
966 sql!(
967 DROP TABLE ssh_connections;
968 ),
969 sql!(
970 ALTER TABLE remote_connections ADD COLUMN name TEXT;
971 ALTER TABLE remote_connections ADD COLUMN container_id TEXT;
972 ),
973 sql!(
974 CREATE TABLE IF NOT EXISTS trusted_worktrees (
975 trust_id INTEGER PRIMARY KEY AUTOINCREMENT,
976 absolute_path TEXT,
977 user_name TEXT,
978 host_name TEXT
979 ) STRICT;
980 ),
981 sql!(CREATE TABLE toolchains2 (
982 workspace_id INTEGER,
983 worktree_root_path TEXT NOT NULL,
984 language_name TEXT NOT NULL,
985 name TEXT NOT NULL,
986 path TEXT NOT NULL,
987 raw_json TEXT NOT NULL,
988 relative_worktree_path TEXT NOT NULL,
989 PRIMARY KEY (workspace_id, worktree_root_path, language_name, relative_worktree_path)) STRICT;
990 INSERT OR REPLACE INTO toolchains2
991 // The `instr(paths, '\n') = 0` part allows us to find all
992 // workspaces that have a single worktree, as `\n` is used as a
993 // separator when serializing the workspace paths, so if no `\n` is
994 // found, we know we have a single worktree.
995 SELECT toolchains.workspace_id, paths, language_name, name, path, raw_json, relative_worktree_path FROM toolchains INNER JOIN workspaces ON toolchains.workspace_id = workspaces.workspace_id AND instr(paths, '\n') = 0;
996 DROP TABLE toolchains;
997 ALTER TABLE toolchains2 RENAME TO toolchains;
998 ),
999 sql!(CREATE TABLE user_toolchains2 (
1000 remote_connection_id INTEGER,
1001 workspace_id INTEGER NOT NULL,
1002 worktree_root_path TEXT NOT NULL,
1003 relative_worktree_path TEXT NOT NULL,
1004 language_name TEXT NOT NULL,
1005 name TEXT NOT NULL,
1006 path TEXT NOT NULL,
1007 raw_json TEXT NOT NULL,
1008
1009 PRIMARY KEY (workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json)) STRICT;
1010 INSERT OR REPLACE INTO user_toolchains2
1011 // The `instr(paths, '\n') = 0` part allows us to find all
1012 // workspaces that have a single worktree, as `\n` is used as a
1013 // separator when serializing the workspace paths, so if no `\n` is
1014 // found, we know we have a single worktree.
1015 SELECT user_toolchains.remote_connection_id, user_toolchains.workspace_id, paths, relative_worktree_path, language_name, name, path, raw_json FROM user_toolchains INNER JOIN workspaces ON user_toolchains.workspace_id = workspaces.workspace_id AND instr(paths, '\n') = 0;
1016 DROP TABLE user_toolchains;
1017 ALTER TABLE user_toolchains2 RENAME TO user_toolchains;
1018 ),
1019 sql!(
1020 ALTER TABLE remote_connections ADD COLUMN use_podman BOOLEAN;
1021 ),
1022 sql!(
1023 ALTER TABLE remote_connections ADD COLUMN remote_env TEXT;
1024 ),
1025 sql!(
1026 CREATE TABLE bookmarks (
1027 workspace_id INTEGER NOT NULL,
1028 path TEXT NOT NULL,
1029 row INTEGER NOT NULL,
1030 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
1031 ON DELETE CASCADE
1032 ON UPDATE CASCADE
1033 );
1034 ),
1035 ];
1036
1037 // Allow recovering from bad migration that was initially shipped to nightly
1038 // when introducing the ssh_connections table.
1039 fn should_allow_migration_change(_index: usize, old: &str, new: &str) -> bool {
1040 old.starts_with("CREATE TABLE ssh_connections")
1041 && new.starts_with("CREATE TABLE ssh_connections")
1042 }
1043}
1044
1045db::static_connection!(WorkspaceDb, []);
1046
1047impl WorkspaceDb {
1048 /// Returns a serialized workspace for the given worktree_roots. If the passed array
1049 /// is empty, the most recent workspace is returned instead. If no workspace for the
1050 /// passed roots is stored, returns none.
1051 pub(crate) fn workspace_for_roots<P: AsRef<Path>>(
1052 &self,
1053 worktree_roots: &[P],
1054 ) -> Option<SerializedWorkspace> {
1055 self.workspace_for_roots_internal(worktree_roots, None)
1056 }
1057
1058 pub(crate) fn remote_workspace_for_roots<P: AsRef<Path>>(
1059 &self,
1060 worktree_roots: &[P],
1061 remote_project_id: RemoteConnectionId,
1062 ) -> Option<SerializedWorkspace> {
1063 self.workspace_for_roots_internal(worktree_roots, Some(remote_project_id))
1064 }
1065
1066 pub(crate) fn workspace_for_roots_internal<P: AsRef<Path>>(
1067 &self,
1068 worktree_roots: &[P],
1069 remote_connection_id: Option<RemoteConnectionId>,
1070 ) -> Option<SerializedWorkspace> {
1071 // paths are sorted before db interactions to ensure that the order of the paths
1072 // doesn't affect the workspace selection for existing workspaces
1073 let root_paths = PathList::new(worktree_roots);
1074
1075 // Empty workspaces cannot be matched by paths (all empty workspaces have paths = "").
1076 // They should only be restored via workspace_for_id during session restoration.
1077 if root_paths.is_empty() && remote_connection_id.is_none() {
1078 return None;
1079 }
1080
1081 // Note that we re-assign the workspace_id here in case it's empty
1082 // and we've grabbed the most recent workspace
1083 let (
1084 workspace_id,
1085 paths,
1086 paths_order,
1087 window_bounds,
1088 display,
1089 centered_layout,
1090 docks,
1091 window_id,
1092 ): (
1093 WorkspaceId,
1094 String,
1095 String,
1096 Option<SerializedWindowBounds>,
1097 Option<Uuid>,
1098 Option<bool>,
1099 DockStructure,
1100 Option<u64>,
1101 ) = self
1102 .select_row_bound(sql! {
1103 SELECT
1104 workspace_id,
1105 paths,
1106 paths_order,
1107 window_state,
1108 window_x,
1109 window_y,
1110 window_width,
1111 window_height,
1112 display,
1113 centered_layout,
1114 left_dock_visible,
1115 left_dock_active_panel,
1116 left_dock_zoom,
1117 right_dock_visible,
1118 right_dock_active_panel,
1119 right_dock_zoom,
1120 bottom_dock_visible,
1121 bottom_dock_active_panel,
1122 bottom_dock_zoom,
1123 window_id
1124 FROM workspaces
1125 WHERE
1126 paths IS ? AND
1127 remote_connection_id IS ?
1128 LIMIT 1
1129 })
1130 .and_then(|mut prepared_statement| {
1131 (prepared_statement)((
1132 root_paths.serialize().paths,
1133 remote_connection_id.map(|id| id.0 as i32),
1134 ))
1135 })
1136 .context("No workspaces found")
1137 .warn_on_err()
1138 .flatten()?;
1139
1140 let paths = PathList::deserialize(&SerializedPathList {
1141 paths,
1142 order: paths_order,
1143 });
1144
1145 let remote_connection_options = if let Some(remote_connection_id) = remote_connection_id {
1146 self.remote_connection(remote_connection_id)
1147 .context("Get remote connection")
1148 .log_err()
1149 } else {
1150 None
1151 };
1152
1153 Some(SerializedWorkspace {
1154 id: workspace_id,
1155 location: match remote_connection_options {
1156 Some(options) => SerializedWorkspaceLocation::Remote(options),
1157 None => SerializedWorkspaceLocation::Local,
1158 },
1159 paths,
1160 center_group: self
1161 .get_center_pane_group(workspace_id)
1162 .context("Getting center group")
1163 .log_err()?,
1164 window_bounds,
1165 centered_layout: centered_layout.unwrap_or(false),
1166 display,
1167 docks,
1168 session_id: None,
1169 bookmarks: self.bookmarks(workspace_id),
1170 breakpoints: self.breakpoints(workspace_id),
1171 window_id,
1172 user_toolchains: self.user_toolchains(workspace_id, remote_connection_id),
1173 })
1174 }
1175
1176 /// Returns the workspace with the given ID, loading all associated data.
1177 pub(crate) fn workspace_for_id(
1178 &self,
1179 workspace_id: WorkspaceId,
1180 ) -> Option<SerializedWorkspace> {
1181 let (
1182 paths,
1183 paths_order,
1184 window_bounds,
1185 display,
1186 centered_layout,
1187 docks,
1188 window_id,
1189 remote_connection_id,
1190 ): (
1191 String,
1192 String,
1193 Option<SerializedWindowBounds>,
1194 Option<Uuid>,
1195 Option<bool>,
1196 DockStructure,
1197 Option<u64>,
1198 Option<i32>,
1199 ) = self
1200 .select_row_bound(sql! {
1201 SELECT
1202 paths,
1203 paths_order,
1204 window_state,
1205 window_x,
1206 window_y,
1207 window_width,
1208 window_height,
1209 display,
1210 centered_layout,
1211 left_dock_visible,
1212 left_dock_active_panel,
1213 left_dock_zoom,
1214 right_dock_visible,
1215 right_dock_active_panel,
1216 right_dock_zoom,
1217 bottom_dock_visible,
1218 bottom_dock_active_panel,
1219 bottom_dock_zoom,
1220 window_id,
1221 remote_connection_id
1222 FROM workspaces
1223 WHERE workspace_id = ?
1224 })
1225 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id))
1226 .context("No workspace found for id")
1227 .warn_on_err()
1228 .flatten()?;
1229
1230 let paths = PathList::deserialize(&SerializedPathList {
1231 paths,
1232 order: paths_order,
1233 });
1234
1235 let remote_connection_id = remote_connection_id.map(|id| RemoteConnectionId(id as u64));
1236 let remote_connection_options = if let Some(remote_connection_id) = remote_connection_id {
1237 self.remote_connection(remote_connection_id)
1238 .context("Get remote connection")
1239 .log_err()
1240 } else {
1241 None
1242 };
1243
1244 Some(SerializedWorkspace {
1245 id: workspace_id,
1246 location: match remote_connection_options {
1247 Some(options) => SerializedWorkspaceLocation::Remote(options),
1248 None => SerializedWorkspaceLocation::Local,
1249 },
1250 paths,
1251 center_group: self
1252 .get_center_pane_group(workspace_id)
1253 .context("Getting center group")
1254 .log_err()?,
1255 window_bounds,
1256 centered_layout: centered_layout.unwrap_or(false),
1257 display,
1258 docks,
1259 session_id: None,
1260 bookmarks: self.bookmarks(workspace_id),
1261 breakpoints: self.breakpoints(workspace_id),
1262 window_id,
1263 user_toolchains: self.user_toolchains(workspace_id, remote_connection_id),
1264 })
1265 }
1266
1267 fn bookmarks(&self, workspace_id: WorkspaceId) -> BTreeMap<Arc<Path>, Vec<SerializedBookmark>> {
1268 let bookmarks: Result<Vec<(PathBuf, Bookmark)>> = self
1269 .select_bound(sql! {
1270 SELECT path, row
1271 FROM bookmarks
1272 WHERE workspace_id = ?
1273 ORDER BY path, row
1274 })
1275 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id));
1276
1277 match bookmarks {
1278 Ok(bookmarks) => {
1279 if bookmarks.is_empty() {
1280 log::debug!("Bookmarks are empty after querying database for them");
1281 }
1282
1283 let mut map: BTreeMap<_, Vec<_>> = BTreeMap::default();
1284
1285 for (path, bookmark) in bookmarks {
1286 let path: Arc<Path> = path.into();
1287 map.entry(path.clone())
1288 .or_default()
1289 .push(SerializedBookmark(bookmark.row))
1290 }
1291
1292 map
1293 }
1294 Err(e) => {
1295 log::error!("Failed to load bookmarks: {}", e);
1296 BTreeMap::default()
1297 }
1298 }
1299 }
1300
1301 fn breakpoints(&self, workspace_id: WorkspaceId) -> BTreeMap<Arc<Path>, Vec<SourceBreakpoint>> {
1302 let breakpoints: Result<Vec<(PathBuf, Breakpoint)>> = self
1303 .select_bound(sql! {
1304 SELECT path, breakpoint_location, log_message, condition, hit_condition, state
1305 FROM breakpoints
1306 WHERE workspace_id = ?
1307 })
1308 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id));
1309
1310 match breakpoints {
1311 Ok(bp) => {
1312 if bp.is_empty() {
1313 log::debug!("Breakpoints are empty after querying database for them");
1314 }
1315
1316 let mut map: BTreeMap<Arc<Path>, Vec<SourceBreakpoint>> = Default::default();
1317
1318 for (path, breakpoint) in bp {
1319 let path: Arc<Path> = path.into();
1320 map.entry(path.clone()).or_default().push(SourceBreakpoint {
1321 row: breakpoint.position,
1322 path,
1323 message: breakpoint.message,
1324 condition: breakpoint.condition,
1325 hit_condition: breakpoint.hit_condition,
1326 state: breakpoint.state,
1327 });
1328 }
1329
1330 for (path, bps) in map.iter() {
1331 log::info!(
1332 "Got {} breakpoints from database at path: {}",
1333 bps.len(),
1334 path.to_string_lossy()
1335 );
1336 }
1337
1338 map
1339 }
1340 Err(msg) => {
1341 log::error!("Breakpoints query failed with msg: {msg}");
1342 Default::default()
1343 }
1344 }
1345 }
1346
1347 fn user_toolchains(
1348 &self,
1349 workspace_id: WorkspaceId,
1350 remote_connection_id: Option<RemoteConnectionId>,
1351 ) -> BTreeMap<ToolchainScope, IndexSet<Toolchain>> {
1352 type RowKind = (WorkspaceId, String, String, String, String, String, String);
1353
1354 let toolchains: Vec<RowKind> = self
1355 .select_bound(sql! {
1356 SELECT workspace_id, worktree_root_path, relative_worktree_path,
1357 language_name, name, path, raw_json
1358 FROM user_toolchains WHERE remote_connection_id IS ?1 AND (
1359 workspace_id IN (0, ?2)
1360 )
1361 })
1362 .and_then(|mut statement| {
1363 (statement)((remote_connection_id.map(|id| id.0), workspace_id))
1364 })
1365 .unwrap_or_default();
1366 let mut ret = BTreeMap::<_, IndexSet<_>>::default();
1367
1368 for (
1369 _workspace_id,
1370 worktree_root_path,
1371 relative_worktree_path,
1372 language_name,
1373 name,
1374 path,
1375 raw_json,
1376 ) in toolchains
1377 {
1378 // INTEGER's that are primary keys (like workspace ids, remote connection ids and such) start at 1, so we're safe to
1379 let scope = if _workspace_id == WorkspaceId(0) {
1380 debug_assert_eq!(worktree_root_path, String::default());
1381 debug_assert_eq!(relative_worktree_path, String::default());
1382 ToolchainScope::Global
1383 } else {
1384 debug_assert_eq!(workspace_id, _workspace_id);
1385 debug_assert_eq!(
1386 worktree_root_path == String::default(),
1387 relative_worktree_path == String::default()
1388 );
1389
1390 let Some(relative_path) = RelPath::unix(&relative_worktree_path).log_err() else {
1391 continue;
1392 };
1393 if worktree_root_path != String::default()
1394 && relative_worktree_path != String::default()
1395 {
1396 ToolchainScope::Subproject(
1397 Arc::from(worktree_root_path.as_ref()),
1398 relative_path.into(),
1399 )
1400 } else {
1401 ToolchainScope::Project
1402 }
1403 };
1404 let Ok(as_json) = serde_json::from_str(&raw_json) else {
1405 continue;
1406 };
1407 let toolchain = Toolchain {
1408 name: SharedString::from(name),
1409 path: SharedString::from(path),
1410 language_name: LanguageName::from_proto(language_name),
1411 as_json,
1412 };
1413 ret.entry(scope).or_default().insert(toolchain);
1414 }
1415
1416 ret
1417 }
1418
1419 /// Saves a workspace using the worktree roots. Will garbage collect any workspaces
1420 /// that used this workspace previously
1421 pub(crate) async fn save_workspace(&self, workspace: SerializedWorkspace) {
1422 let paths = workspace.paths.serialize();
1423 log::debug!("Saving workspace at location: {:?}", workspace.location);
1424 self.write(move |conn| {
1425 conn.with_savepoint("update_worktrees", || {
1426 let remote_connection_id = match workspace.location.clone() {
1427 SerializedWorkspaceLocation::Local => None,
1428 SerializedWorkspaceLocation::Remote(connection_options) => {
1429 Some(Self::get_or_create_remote_connection_internal(
1430 conn,
1431 connection_options
1432 )?.0)
1433 }
1434 };
1435
1436 // Clear out panes and pane_groups
1437 conn.exec_bound(sql!(
1438 DELETE FROM pane_groups WHERE workspace_id = ?1;
1439 DELETE FROM panes WHERE workspace_id = ?1;))?(workspace.id)
1440 .context("Clearing old panes")?;
1441
1442 conn.exec_bound(
1443 sql!(
1444 DELETE FROM bookmarks WHERE workspace_id = ?1;
1445 )
1446 )?(workspace.id).context("Clearing old bookmarks")?;
1447
1448 for (path, bookmarks) in workspace.bookmarks {
1449 for bookmark in bookmarks {
1450 conn.exec_bound(sql!(
1451 INSERT INTO bookmarks (workspace_id, path, row)
1452 VALUES (?1, ?2, ?3);
1453 ))?((workspace.id, path.as_ref(), bookmark.0)).context("Inserting bookmark")?;
1454 }
1455 }
1456
1457 conn.exec_bound(
1458 sql!(
1459 DELETE FROM breakpoints WHERE workspace_id = ?1;
1460 )
1461 )?(workspace.id).context("Clearing old breakpoints")?;
1462
1463 for (path, breakpoints) in workspace.breakpoints {
1464 for bp in breakpoints {
1465 let state = BreakpointStateWrapper::from(bp.state);
1466 match conn.exec_bound(sql!(
1467 INSERT INTO breakpoints (workspace_id, path, breakpoint_location, log_message, condition, hit_condition, state)
1468 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7);))?
1469
1470 ((
1471 workspace.id,
1472 path.as_ref(),
1473 bp.row,
1474 bp.message,
1475 bp.condition,
1476 bp.hit_condition,
1477 state,
1478 )) {
1479 Ok(_) => {
1480 log::debug!("Stored breakpoint at row: {} in path: {}", bp.row, path.to_string_lossy())
1481 }
1482 Err(err) => {
1483 log::error!("{err}");
1484 continue;
1485 }
1486 }
1487 }
1488 }
1489
1490 conn.exec_bound(
1491 sql!(
1492 DELETE FROM user_toolchains WHERE workspace_id = ?1;
1493 )
1494 )?(workspace.id).context("Clearing old user toolchains")?;
1495
1496 for (scope, toolchains) in workspace.user_toolchains {
1497 for toolchain in toolchains {
1498 let query = sql!(INSERT OR REPLACE INTO user_toolchains(remote_connection_id, workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8));
1499 let (workspace_id, worktree_root_path, relative_worktree_path) = match scope {
1500 ToolchainScope::Subproject(ref worktree_root_path, ref path) => (Some(workspace.id), Some(worktree_root_path.to_string_lossy().into_owned()), Some(path.as_unix_str().to_owned())),
1501 ToolchainScope::Project => (Some(workspace.id), None, None),
1502 ToolchainScope::Global => (None, None, None),
1503 };
1504 let args = (remote_connection_id, workspace_id.unwrap_or(WorkspaceId(0)), worktree_root_path.unwrap_or_default(), relative_worktree_path.unwrap_or_default(),
1505 toolchain.language_name.as_ref().to_owned(), toolchain.name.to_string(), toolchain.path.to_string(), toolchain.as_json.to_string());
1506 if let Err(err) = conn.exec_bound(query)?(args) {
1507 log::error!("{err}");
1508 continue;
1509 }
1510 }
1511 }
1512
1513 // Clear out old workspaces with the same paths.
1514 // Skip this for empty workspaces - they are identified by workspace_id, not paths.
1515 // Multiple empty workspaces with different content should coexist.
1516 if !paths.paths.is_empty() {
1517 conn.exec_bound(sql!(
1518 DELETE
1519 FROM workspaces
1520 WHERE
1521 workspace_id != ?1 AND
1522 paths IS ?2 AND
1523 remote_connection_id IS ?3
1524 ))?((
1525 workspace.id,
1526 paths.paths.clone(),
1527 remote_connection_id,
1528 ))
1529 .context("clearing out old locations")?;
1530 }
1531
1532 // If there are no paths in the workspace, make sure to save
1533 // paths as NULL in the database, not as an empty string.
1534 // Otherwise, the workspace may fail to restore, potentially
1535 // losing any unsaved buffers.
1536 let maybe_paths = if !paths.paths.is_empty() {
1537 Some(paths.paths.clone())
1538 } else {
1539 None
1540 };
1541 let maybe_order = if !paths.order.is_empty() {
1542 Some(paths.order.clone())
1543 } else {
1544 None
1545 };
1546
1547 // Upsert
1548 let query = sql!(
1549 INSERT INTO workspaces(
1550 workspace_id,
1551 paths,
1552 paths_order,
1553 remote_connection_id,
1554 left_dock_visible,
1555 left_dock_active_panel,
1556 left_dock_zoom,
1557 right_dock_visible,
1558 right_dock_active_panel,
1559 right_dock_zoom,
1560 bottom_dock_visible,
1561 bottom_dock_active_panel,
1562 bottom_dock_zoom,
1563 session_id,
1564 window_id,
1565 timestamp
1566 )
1567 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, CURRENT_TIMESTAMP)
1568 ON CONFLICT DO
1569 UPDATE SET
1570 paths = ?2,
1571 paths_order = ?3,
1572 remote_connection_id = ?4,
1573 left_dock_visible = ?5,
1574 left_dock_active_panel = ?6,
1575 left_dock_zoom = ?7,
1576 right_dock_visible = ?8,
1577 right_dock_active_panel = ?9,
1578 right_dock_zoom = ?10,
1579 bottom_dock_visible = ?11,
1580 bottom_dock_active_panel = ?12,
1581 bottom_dock_zoom = ?13,
1582 session_id = ?14,
1583 window_id = ?15,
1584 timestamp = CURRENT_TIMESTAMP
1585 );
1586 let mut prepared_query = conn.exec_bound(query)?;
1587 let args = (
1588 workspace.id,
1589 maybe_paths,
1590 maybe_order,
1591 remote_connection_id,
1592 workspace.docks,
1593 workspace.session_id,
1594 workspace.window_id,
1595 );
1596
1597 prepared_query(args).context("Updating workspace")?;
1598
1599 // Save center pane group
1600 Self::save_pane_group(conn, workspace.id, &workspace.center_group, None)
1601 .context("save pane group in save workspace")?;
1602
1603 Ok(())
1604 })
1605 .log_err();
1606 })
1607 .await;
1608 }
1609
1610 pub(crate) async fn get_or_create_remote_connection(
1611 &self,
1612 options: RemoteConnectionOptions,
1613 ) -> Result<RemoteConnectionId> {
1614 self.write(move |conn| Self::get_or_create_remote_connection_internal(conn, options))
1615 .await
1616 }
1617
1618 fn get_or_create_remote_connection_internal(
1619 this: &Connection,
1620 options: RemoteConnectionOptions,
1621 ) -> Result<RemoteConnectionId> {
1622 let identity = remote_connection_identity(&options);
1623 let kind;
1624 let user: Option<String>;
1625 let mut host = None;
1626 let mut port = None;
1627 let mut distro = None;
1628 let mut name = None;
1629 let mut container_id = None;
1630 let mut use_podman = None;
1631 let mut remote_env = None;
1632
1633 match identity {
1634 RemoteConnectionIdentity::Ssh {
1635 host: identity_host,
1636 username,
1637 port: identity_port,
1638 } => {
1639 kind = RemoteConnectionKind::Ssh;
1640 host = Some(identity_host);
1641 port = identity_port;
1642 user = username;
1643 }
1644 RemoteConnectionIdentity::Wsl {
1645 distro_name,
1646 user: identity_user,
1647 } => {
1648 kind = RemoteConnectionKind::Wsl;
1649 distro = Some(distro_name);
1650 user = identity_user;
1651 }
1652 RemoteConnectionIdentity::Docker {
1653 container_id: identity_container_id,
1654 name: identity_name,
1655 remote_user,
1656 } => {
1657 kind = RemoteConnectionKind::Docker;
1658 container_id = Some(identity_container_id);
1659 name = Some(identity_name);
1660 user = Some(remote_user);
1661 }
1662 #[cfg(any(test, feature = "test-support"))]
1663 RemoteConnectionIdentity::Mock { id } => {
1664 kind = RemoteConnectionKind::Ssh;
1665 host = Some(format!("mock-{}", id));
1666 user = Some(format!("mock-user-{}", id));
1667 }
1668 }
1669
1670 if let RemoteConnectionOptions::Docker(options) = options {
1671 use_podman = Some(options.use_podman);
1672 remote_env = serde_json::to_string(&options.remote_env).ok();
1673 }
1674
1675 Self::get_or_create_remote_connection_query(
1676 this,
1677 kind,
1678 host,
1679 port,
1680 user,
1681 distro,
1682 name,
1683 container_id,
1684 use_podman,
1685 remote_env,
1686 )
1687 }
1688
1689 fn get_or_create_remote_connection_query(
1690 this: &Connection,
1691 kind: RemoteConnectionKind,
1692 host: Option<String>,
1693 port: Option<u16>,
1694 user: Option<String>,
1695 distro: Option<String>,
1696 name: Option<String>,
1697 container_id: Option<String>,
1698 use_podman: Option<bool>,
1699 remote_env: Option<String>,
1700 ) -> Result<RemoteConnectionId> {
1701 if let Some(id) = this.select_row_bound(sql!(
1702 SELECT id
1703 FROM remote_connections
1704 WHERE
1705 kind IS ? AND
1706 host IS ? AND
1707 port IS ? AND
1708 user IS ? AND
1709 distro IS ? AND
1710 name IS ? AND
1711 container_id IS ?
1712 LIMIT 1
1713 ))?((
1714 kind.serialize(),
1715 host.clone(),
1716 port,
1717 user.clone(),
1718 distro.clone(),
1719 name.clone(),
1720 container_id.clone(),
1721 ))? {
1722 Ok(RemoteConnectionId(id))
1723 } else {
1724 let id = this.select_row_bound(sql!(
1725 INSERT INTO remote_connections (
1726 kind,
1727 host,
1728 port,
1729 user,
1730 distro,
1731 name,
1732 container_id,
1733 use_podman,
1734 remote_env
1735 ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)
1736 RETURNING id
1737 ))?((
1738 kind.serialize(),
1739 host,
1740 port,
1741 user,
1742 distro,
1743 name,
1744 container_id,
1745 use_podman,
1746 remote_env,
1747 ))?
1748 .context("failed to insert remote project")?;
1749 Ok(RemoteConnectionId(id))
1750 }
1751 }
1752
1753 query! {
1754 pub async fn next_id() -> Result<WorkspaceId> {
1755 INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id
1756 }
1757 }
1758
1759 fn recent_workspaces(
1760 &self,
1761 ) -> Result<
1762 Vec<(
1763 WorkspaceId,
1764 PathList,
1765 Option<RemoteConnectionId>,
1766 Option<String>,
1767 DateTime<Utc>,
1768 )>,
1769 > {
1770 Ok(self
1771 .recent_workspaces_query()?
1772 .into_iter()
1773 .map(
1774 |(id, paths, order, remote_connection_id, session_id, timestamp)| {
1775 (
1776 id,
1777 PathList::deserialize(&SerializedPathList { paths, order }),
1778 remote_connection_id.map(RemoteConnectionId),
1779 session_id,
1780 parse_timestamp(×tamp),
1781 )
1782 },
1783 )
1784 .collect())
1785 }
1786
1787 query! {
1788 fn recent_workspaces_query() -> Result<Vec<(WorkspaceId, String, String, Option<u64>, Option<String>, String)>> {
1789 SELECT workspace_id, paths, paths_order, remote_connection_id, session_id, timestamp
1790 FROM workspaces
1791 WHERE
1792 paths IS NOT NULL OR
1793 remote_connection_id IS NOT NULL
1794 ORDER BY timestamp DESC
1795 }
1796 }
1797
1798 fn session_workspaces(
1799 &self,
1800 session_id: String,
1801 ) -> Result<
1802 Vec<(
1803 WorkspaceId,
1804 PathList,
1805 Option<u64>,
1806 Option<RemoteConnectionId>,
1807 )>,
1808 > {
1809 Ok(self
1810 .session_workspaces_query(session_id)?
1811 .into_iter()
1812 .map(
1813 |(workspace_id, paths, order, window_id, remote_connection_id)| {
1814 (
1815 WorkspaceId(workspace_id),
1816 PathList::deserialize(&SerializedPathList { paths, order }),
1817 window_id,
1818 remote_connection_id.map(RemoteConnectionId),
1819 )
1820 },
1821 )
1822 .collect())
1823 }
1824
1825 query! {
1826 fn session_workspaces_query(session_id: String) -> Result<Vec<(i64, String, String, Option<u64>, Option<u64>)>> {
1827 SELECT workspace_id, paths, paths_order, window_id, remote_connection_id
1828 FROM workspaces
1829 WHERE session_id = ?1
1830 ORDER BY timestamp DESC
1831 }
1832 }
1833
1834 query! {
1835 pub fn breakpoints_for_file(workspace_id: WorkspaceId, file_path: &Path) -> Result<Vec<Breakpoint>> {
1836 SELECT breakpoint_location
1837 FROM breakpoints
1838 WHERE workspace_id= ?1 AND path = ?2
1839 }
1840 }
1841
1842 query! {
1843 pub fn clear_breakpoints(file_path: &Path) -> Result<()> {
1844 DELETE FROM breakpoints
1845 WHERE file_path = ?2
1846 }
1847 }
1848
1849 fn remote_connections(&self) -> Result<HashMap<RemoteConnectionId, RemoteConnectionOptions>> {
1850 Ok(self.select(sql!(
1851 SELECT
1852 id, kind, host, port, user, distro, container_id, name, use_podman, remote_env
1853 FROM
1854 remote_connections
1855 ))?()?
1856 .into_iter()
1857 .filter_map(
1858 |(id, kind, host, port, user, distro, container_id, name, use_podman, remote_env)| {
1859 Some((
1860 RemoteConnectionId(id),
1861 Self::remote_connection_from_row(
1862 kind,
1863 host,
1864 port,
1865 user,
1866 distro,
1867 container_id,
1868 name,
1869 use_podman,
1870 remote_env,
1871 )?,
1872 ))
1873 },
1874 )
1875 .collect())
1876 }
1877
1878 pub(crate) fn remote_connection(
1879 &self,
1880 id: RemoteConnectionId,
1881 ) -> Result<RemoteConnectionOptions> {
1882 let (kind, host, port, user, distro, container_id, name, use_podman, remote_env) =
1883 self.select_row_bound(sql!(
1884 SELECT kind, host, port, user, distro, container_id, name, use_podman, remote_env
1885 FROM remote_connections
1886 WHERE id = ?
1887 ))?(id.0)?
1888 .context("no such remote connection")?;
1889 Self::remote_connection_from_row(
1890 kind,
1891 host,
1892 port,
1893 user,
1894 distro,
1895 container_id,
1896 name,
1897 use_podman,
1898 remote_env,
1899 )
1900 .context("invalid remote_connection row")
1901 }
1902
1903 fn remote_connection_from_row(
1904 kind: String,
1905 host: Option<String>,
1906 port: Option<u16>,
1907 user: Option<String>,
1908 distro: Option<String>,
1909 container_id: Option<String>,
1910 name: Option<String>,
1911 use_podman: Option<bool>,
1912 remote_env: Option<String>,
1913 ) -> Option<RemoteConnectionOptions> {
1914 match RemoteConnectionKind::deserialize(&kind)? {
1915 RemoteConnectionKind::Wsl => Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
1916 distro_name: distro?,
1917 user: user,
1918 })),
1919 RemoteConnectionKind::Ssh => Some(RemoteConnectionOptions::Ssh(SshConnectionOptions {
1920 host: host?.into(),
1921 port,
1922 username: user,
1923 ..Default::default()
1924 })),
1925 RemoteConnectionKind::Docker => {
1926 let remote_env: BTreeMap<String, String> =
1927 serde_json::from_str(&remote_env?).ok()?;
1928 Some(RemoteConnectionOptions::Docker(DockerConnectionOptions {
1929 container_id: container_id?,
1930 name: name?,
1931 remote_user: user?,
1932 upload_binary_over_docker_exec: false,
1933 use_podman: use_podman?,
1934 remote_env,
1935 }))
1936 }
1937 }
1938 }
1939
1940 query! {
1941 pub async fn delete_workspace_by_id(id: WorkspaceId) -> Result<()> {
1942 DELETE FROM workspaces
1943 WHERE workspace_id IS ?
1944 }
1945 }
1946
1947 async fn all_paths_exist_with_a_directory(paths: &[PathBuf], fs: &dyn Fs) -> bool {
1948 let mut any_dir = false;
1949 for path in paths {
1950 match fs.metadata(path).await.ok().flatten() {
1951 None => return false,
1952 Some(meta) => {
1953 if meta.is_dir {
1954 any_dir = true;
1955 }
1956 }
1957 }
1958 }
1959 any_dir
1960 }
1961
1962 // Returns the recent project workspaces suitable for showing in the recent-projects UI.
1963 // Scratch workspaces (no paths) are filtered out - they aren't really "projects" and
1964 // are restored separately by `last_session_workspace_locations`.
1965 pub async fn recent_project_workspaces(
1966 &self,
1967 fs: &dyn Fs,
1968 ) -> Result<
1969 Vec<(
1970 WorkspaceId,
1971 SerializedWorkspaceLocation,
1972 PathList,
1973 DateTime<Utc>,
1974 )>,
1975 > {
1976 let remote_connections = self.remote_connections()?;
1977 let mut result = Vec::new();
1978 for (id, paths, remote_connection_id, _session_id, timestamp) in self.recent_workspaces()? {
1979 if let Some(remote_connection_id) = remote_connection_id {
1980 if let Some(connection_options) = remote_connections.get(&remote_connection_id) {
1981 result.push((
1982 id,
1983 SerializedWorkspaceLocation::Remote(connection_options.clone()),
1984 paths,
1985 timestamp,
1986 ));
1987 }
1988 continue;
1989 }
1990
1991 if paths.paths().is_empty() || contains_wsl_path(&paths) {
1992 continue;
1993 }
1994
1995 if Self::all_paths_exist_with_a_directory(paths.paths(), fs).await {
1996 result.push((id, SerializedWorkspaceLocation::Local, paths, timestamp));
1997 }
1998 }
1999 Ok(result)
2000 }
2001
2002 // Deletes workspace rows that can no longer be restored from. Remote workspaces whose
2003 // connection was removed, and (on Windows) workspaces pointing at WSL paths, are cleaned
2004 // up immediately. Local workspaces with no valid paths on disk are kept for seven days
2005 // after going stale. Workspaces belonging to the current session or the last session are
2006 // always preserved so that an in-progress restore can rehydrate them.
2007 pub async fn garbage_collect_workspaces(
2008 &self,
2009 fs: &dyn Fs,
2010 current_session_id: &str,
2011 last_session_id: Option<&str>,
2012 ) -> Result<()> {
2013 let remote_connections = self.remote_connections()?;
2014 let now = Utc::now();
2015 let mut workspaces_to_delete = Vec::new();
2016 for (id, paths, remote_connection_id, session_id, timestamp) in self.recent_workspaces()? {
2017 if let Some(session_id) = session_id.as_deref() {
2018 if session_id == current_session_id || Some(session_id) == last_session_id {
2019 continue;
2020 }
2021 }
2022
2023 if let Some(remote_connection_id) = remote_connection_id {
2024 if !remote_connections.contains_key(&remote_connection_id) {
2025 workspaces_to_delete.push(id);
2026 }
2027 continue;
2028 }
2029
2030 // Delete the workspace if any of the paths are WSL paths. If a
2031 // local workspace points to WSL, attempting to read its metadata
2032 // will wait for the WSL VM and file server to boot up. This can
2033 // block for many seconds. Supported scenarios use remote
2034 // workspaces.
2035 if contains_wsl_path(&paths) {
2036 workspaces_to_delete.push(id);
2037 continue;
2038 }
2039
2040 if !Self::all_paths_exist_with_a_directory(paths.paths(), fs).await
2041 && now - timestamp >= chrono::Duration::days(7)
2042 {
2043 workspaces_to_delete.push(id);
2044 }
2045 }
2046
2047 futures::future::join_all(
2048 workspaces_to_delete
2049 .into_iter()
2050 .map(|id| self.delete_workspace_by_id(id)),
2051 )
2052 .await;
2053 Ok(())
2054 }
2055
2056 pub async fn last_workspace(
2057 &self,
2058 fs: &dyn Fs,
2059 ) -> Result<
2060 Option<(
2061 WorkspaceId,
2062 SerializedWorkspaceLocation,
2063 PathList,
2064 DateTime<Utc>,
2065 )>,
2066 > {
2067 Ok(self.recent_project_workspaces(fs).await?.into_iter().next())
2068 }
2069
2070 // Returns the locations of the workspaces that were still opened when the last
2071 // session was closed (i.e. when Zed was quit).
2072 // If `last_session_window_order` is provided, the returned locations are ordered
2073 // according to that.
2074 pub async fn last_session_workspace_locations(
2075 &self,
2076 last_session_id: &str,
2077 last_session_window_stack: Option<Vec<WindowId>>,
2078 fs: &dyn Fs,
2079 ) -> Result<Vec<SessionWorkspace>> {
2080 let mut workspaces = Vec::new();
2081
2082 for (workspace_id, paths, window_id, remote_connection_id) in
2083 self.session_workspaces(last_session_id.to_owned())?
2084 {
2085 let window_id = window_id.map(WindowId::from);
2086
2087 if let Some(remote_connection_id) = remote_connection_id {
2088 workspaces.push(SessionWorkspace {
2089 workspace_id,
2090 location: SerializedWorkspaceLocation::Remote(
2091 self.remote_connection(remote_connection_id)?,
2092 ),
2093 paths,
2094 window_id,
2095 });
2096 continue;
2097 }
2098
2099 if paths.is_empty() || Self::all_paths_exist_with_a_directory(paths.paths(), fs).await {
2100 workspaces.push(SessionWorkspace {
2101 workspace_id,
2102 location: SerializedWorkspaceLocation::Local,
2103 paths,
2104 window_id,
2105 });
2106 }
2107 }
2108
2109 if let Some(stack) = last_session_window_stack {
2110 workspaces.sort_by_key(|workspace| {
2111 workspace
2112 .window_id
2113 .and_then(|id| stack.iter().position(|&order_id| order_id == id))
2114 .unwrap_or(usize::MAX)
2115 });
2116 }
2117
2118 Ok(workspaces)
2119 }
2120
2121 fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result<SerializedPaneGroup> {
2122 Ok(self
2123 .get_pane_group(workspace_id, None)?
2124 .into_iter()
2125 .next()
2126 .unwrap_or_else(|| {
2127 SerializedPaneGroup::Pane(SerializedPane {
2128 active: true,
2129 children: vec![],
2130 pinned_count: 0,
2131 })
2132 }))
2133 }
2134
2135 fn get_pane_group(
2136 &self,
2137 workspace_id: WorkspaceId,
2138 group_id: Option<GroupId>,
2139 ) -> Result<Vec<SerializedPaneGroup>> {
2140 type GroupKey = (Option<GroupId>, WorkspaceId);
2141 type GroupOrPane = (
2142 Option<GroupId>,
2143 Option<SerializedAxis>,
2144 Option<PaneId>,
2145 Option<bool>,
2146 Option<usize>,
2147 Option<String>,
2148 );
2149 self.select_bound::<GroupKey, GroupOrPane>(sql!(
2150 SELECT group_id, axis, pane_id, active, pinned_count, flexes
2151 FROM (SELECT
2152 group_id,
2153 axis,
2154 NULL as pane_id,
2155 NULL as active,
2156 NULL as pinned_count,
2157 position,
2158 parent_group_id,
2159 workspace_id,
2160 flexes
2161 FROM pane_groups
2162 UNION
2163 SELECT
2164 NULL,
2165 NULL,
2166 center_panes.pane_id,
2167 panes.active as active,
2168 pinned_count,
2169 position,
2170 parent_group_id,
2171 panes.workspace_id as workspace_id,
2172 NULL
2173 FROM center_panes
2174 JOIN panes ON center_panes.pane_id = panes.pane_id)
2175 WHERE parent_group_id IS ? AND workspace_id = ?
2176 ORDER BY position
2177 ))?((group_id, workspace_id))?
2178 .into_iter()
2179 .map(|(group_id, axis, pane_id, active, pinned_count, flexes)| {
2180 let maybe_pane = maybe!({ Some((pane_id?, active?, pinned_count?)) });
2181 if let Some((group_id, axis)) = group_id.zip(axis) {
2182 let flexes = flexes
2183 .map(|flexes: String| serde_json::from_str::<Vec<f32>>(&flexes))
2184 .transpose()?;
2185
2186 Ok(SerializedPaneGroup::Group {
2187 axis,
2188 children: self.get_pane_group(workspace_id, Some(group_id))?,
2189 flexes,
2190 })
2191 } else if let Some((pane_id, active, pinned_count)) = maybe_pane {
2192 Ok(SerializedPaneGroup::Pane(SerializedPane::new(
2193 self.get_items(pane_id)?,
2194 active,
2195 pinned_count,
2196 )))
2197 } else {
2198 bail!("Pane Group Child was neither a pane group or a pane");
2199 }
2200 })
2201 // Filter out panes and pane groups which don't have any children or items
2202 .filter(|pane_group| match pane_group {
2203 Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(),
2204 Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(),
2205 _ => true,
2206 })
2207 .collect::<Result<_>>()
2208 }
2209
2210 fn save_pane_group(
2211 conn: &Connection,
2212 workspace_id: WorkspaceId,
2213 pane_group: &SerializedPaneGroup,
2214 parent: Option<(GroupId, usize)>,
2215 ) -> Result<()> {
2216 if parent.is_none() {
2217 log::debug!("Saving a pane group for workspace {workspace_id:?}");
2218 }
2219 match pane_group {
2220 SerializedPaneGroup::Group {
2221 axis,
2222 children,
2223 flexes,
2224 } => {
2225 let (parent_id, position) = parent.unzip();
2226
2227 let flex_string = flexes
2228 .as_ref()
2229 .map(|flexes| serde_json::json!(flexes).to_string());
2230
2231 let group_id = conn.select_row_bound::<_, i64>(sql!(
2232 INSERT INTO pane_groups(
2233 workspace_id,
2234 parent_group_id,
2235 position,
2236 axis,
2237 flexes
2238 )
2239 VALUES (?, ?, ?, ?, ?)
2240 RETURNING group_id
2241 ))?((
2242 workspace_id,
2243 parent_id,
2244 position,
2245 *axis,
2246 flex_string,
2247 ))?
2248 .context("Couldn't retrieve group_id from inserted pane_group")?;
2249
2250 for (position, group) in children.iter().enumerate() {
2251 Self::save_pane_group(conn, workspace_id, group, Some((group_id, position)))?
2252 }
2253
2254 Ok(())
2255 }
2256 SerializedPaneGroup::Pane(pane) => {
2257 Self::save_pane(conn, workspace_id, pane, parent)?;
2258 Ok(())
2259 }
2260 }
2261 }
2262
2263 fn save_pane(
2264 conn: &Connection,
2265 workspace_id: WorkspaceId,
2266 pane: &SerializedPane,
2267 parent: Option<(GroupId, usize)>,
2268 ) -> Result<PaneId> {
2269 let pane_id = conn.select_row_bound::<_, i64>(sql!(
2270 INSERT INTO panes(workspace_id, active, pinned_count)
2271 VALUES (?, ?, ?)
2272 RETURNING pane_id
2273 ))?((workspace_id, pane.active, pane.pinned_count))?
2274 .context("Could not retrieve inserted pane_id")?;
2275
2276 let (parent_id, order) = parent.unzip();
2277 conn.exec_bound(sql!(
2278 INSERT INTO center_panes(pane_id, parent_group_id, position)
2279 VALUES (?, ?, ?)
2280 ))?((pane_id, parent_id, order))?;
2281
2282 Self::save_items(conn, workspace_id, pane_id, &pane.children).context("Saving items")?;
2283
2284 Ok(pane_id)
2285 }
2286
2287 fn get_items(&self, pane_id: PaneId) -> Result<Vec<SerializedItem>> {
2288 self.select_bound(sql!(
2289 SELECT kind, item_id, active, preview FROM items
2290 WHERE pane_id = ?
2291 ORDER BY position
2292 ))?(pane_id)
2293 }
2294
2295 fn save_items(
2296 conn: &Connection,
2297 workspace_id: WorkspaceId,
2298 pane_id: PaneId,
2299 items: &[SerializedItem],
2300 ) -> Result<()> {
2301 let mut insert = conn.exec_bound(sql!(
2302 INSERT INTO items(workspace_id, pane_id, position, kind, item_id, active, preview) VALUES (?, ?, ?, ?, ?, ?, ?)
2303 )).context("Preparing insertion")?;
2304 for (position, item) in items.iter().enumerate() {
2305 insert((workspace_id, pane_id, position, item))?;
2306 }
2307
2308 Ok(())
2309 }
2310
2311 query! {
2312 pub async fn update_timestamp(workspace_id: WorkspaceId) -> Result<()> {
2313 UPDATE workspaces
2314 SET timestamp = CURRENT_TIMESTAMP
2315 WHERE workspace_id = ?
2316 }
2317 }
2318
2319 #[cfg(test)]
2320 query! {
2321 pub(crate) async fn set_timestamp_for_tests(workspace_id: WorkspaceId, timestamp: String) -> Result<()> {
2322 UPDATE workspaces
2323 SET timestamp = ?2
2324 WHERE workspace_id = ?1
2325 }
2326 }
2327
2328 query! {
2329 pub(crate) async fn set_window_open_status(workspace_id: WorkspaceId, bounds: SerializedWindowBounds, display: Uuid) -> Result<()> {
2330 UPDATE workspaces
2331 SET window_state = ?2,
2332 window_x = ?3,
2333 window_y = ?4,
2334 window_width = ?5,
2335 window_height = ?6,
2336 display = ?7
2337 WHERE workspace_id = ?1
2338 }
2339 }
2340
2341 query! {
2342 pub(crate) async fn set_centered_layout(workspace_id: WorkspaceId, centered_layout: bool) -> Result<()> {
2343 UPDATE workspaces
2344 SET centered_layout = ?2
2345 WHERE workspace_id = ?1
2346 }
2347 }
2348
2349 query! {
2350 pub(crate) async fn set_session_id(workspace_id: WorkspaceId, session_id: Option<String>) -> Result<()> {
2351 UPDATE workspaces
2352 SET session_id = ?2
2353 WHERE workspace_id = ?1
2354 }
2355 }
2356
2357 query! {
2358 pub(crate) async fn set_session_binding(workspace_id: WorkspaceId, session_id: Option<String>, window_id: Option<u64>) -> Result<()> {
2359 UPDATE workspaces
2360 SET session_id = ?2, window_id = ?3
2361 WHERE workspace_id = ?1
2362 }
2363 }
2364
2365 pub(crate) async fn toolchains(
2366 &self,
2367 workspace_id: WorkspaceId,
2368 ) -> Result<Vec<(Toolchain, Arc<Path>, Arc<RelPath>)>> {
2369 self.write(move |this| {
2370 let mut select = this
2371 .select_bound(sql!(
2372 SELECT
2373 name, path, worktree_root_path, relative_worktree_path, language_name, raw_json
2374 FROM toolchains
2375 WHERE workspace_id = ?
2376 ))
2377 .context("select toolchains")?;
2378
2379 let toolchain: Vec<(String, String, String, String, String, String)> =
2380 select(workspace_id)?;
2381
2382 Ok(toolchain
2383 .into_iter()
2384 .filter_map(
2385 |(name, path, worktree_root_path, relative_worktree_path, language, json)| {
2386 Some((
2387 Toolchain {
2388 name: name.into(),
2389 path: path.into(),
2390 language_name: LanguageName::new(&language),
2391 as_json: serde_json::Value::from_str(&json).ok()?,
2392 },
2393 Arc::from(worktree_root_path.as_ref()),
2394 RelPath::from_proto(&relative_worktree_path).log_err()?,
2395 ))
2396 },
2397 )
2398 .collect())
2399 })
2400 .await
2401 }
2402
2403 pub async fn set_toolchain(
2404 &self,
2405 workspace_id: WorkspaceId,
2406 worktree_root_path: Arc<Path>,
2407 relative_worktree_path: Arc<RelPath>,
2408 toolchain: Toolchain,
2409 ) -> Result<()> {
2410 log::debug!(
2411 "Setting toolchain for workspace, worktree: {worktree_root_path:?}, relative path: {relative_worktree_path:?}, toolchain: {}",
2412 toolchain.name
2413 );
2414 self.write(move |conn| {
2415 let mut insert = conn
2416 .exec_bound(sql!(
2417 INSERT INTO toolchains(workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json) VALUES (?, ?, ?, ?, ?, ?, ?)
2418 ON CONFLICT DO
2419 UPDATE SET
2420 name = ?5,
2421 path = ?6,
2422 raw_json = ?7
2423 ))
2424 .context("Preparing insertion")?;
2425
2426 insert((
2427 workspace_id,
2428 worktree_root_path.to_string_lossy().into_owned(),
2429 relative_worktree_path.as_unix_str(),
2430 toolchain.language_name.as_ref(),
2431 toolchain.name.as_ref(),
2432 toolchain.path.as_ref(),
2433 toolchain.as_json.to_string(),
2434 ))?;
2435
2436 Ok(())
2437 }).await
2438 }
2439
2440 pub(crate) async fn save_trusted_worktrees(
2441 &self,
2442 trusted_worktrees: HashMap<Option<RemoteHostLocation>, HashSet<PathBuf>>,
2443 ) -> anyhow::Result<()> {
2444 use anyhow::Context as _;
2445 use db::sqlez::statement::Statement;
2446 use itertools::Itertools as _;
2447
2448 self.clear_trusted_worktrees()
2449 .await
2450 .context("clearing previous trust state")?;
2451
2452 let trusted_worktrees = trusted_worktrees
2453 .into_iter()
2454 .flat_map(|(host, abs_paths)| {
2455 abs_paths
2456 .into_iter()
2457 .map(move |abs_path| (Some(abs_path), host.clone()))
2458 })
2459 .collect::<Vec<_>>();
2460 let mut first_worktree;
2461 let mut last_worktree = 0_usize;
2462 for (count, placeholders) in std::iter::once("(?, ?, ?)")
2463 .cycle()
2464 .take(trusted_worktrees.len())
2465 .chunks(MAX_QUERY_PLACEHOLDERS / 3)
2466 .into_iter()
2467 .map(|chunk| {
2468 let mut count = 0;
2469 let placeholders = chunk
2470 .inspect(|_| {
2471 count += 1;
2472 })
2473 .join(", ");
2474 (count, placeholders)
2475 })
2476 .collect::<Vec<_>>()
2477 {
2478 first_worktree = last_worktree;
2479 last_worktree = last_worktree + count;
2480 let query = format!(
2481 r#"INSERT INTO trusted_worktrees(absolute_path, user_name, host_name)
2482VALUES {placeholders};"#
2483 );
2484
2485 let trusted_worktrees = trusted_worktrees[first_worktree..last_worktree].to_vec();
2486 self.write(move |conn| {
2487 let mut statement = Statement::prepare(conn, query)?;
2488 let mut next_index = 1;
2489 for (abs_path, host) in trusted_worktrees {
2490 let abs_path = abs_path.as_ref().map(|abs_path| abs_path.to_string_lossy());
2491 next_index = statement.bind(
2492 &abs_path.as_ref().map(|abs_path| abs_path.as_ref()),
2493 next_index,
2494 )?;
2495 next_index = statement.bind(
2496 &host
2497 .as_ref()
2498 .and_then(|host| Some(host.user_name.as_ref()?.as_str())),
2499 next_index,
2500 )?;
2501 next_index = statement.bind(
2502 &host.as_ref().map(|host| host.host_identifier.as_str()),
2503 next_index,
2504 )?;
2505 }
2506 statement.exec()
2507 })
2508 .await
2509 .context("inserting new trusted state")?;
2510 }
2511 Ok(())
2512 }
2513
2514 pub fn fetch_trusted_worktrees(&self) -> Result<DbTrustedPaths> {
2515 let trusted_worktrees = self.trusted_worktrees()?;
2516 Ok(trusted_worktrees
2517 .into_iter()
2518 .filter_map(|(abs_path, user_name, host_name)| {
2519 let db_host = match (user_name, host_name) {
2520 (None, Some(host_name)) => Some(RemoteHostLocation {
2521 user_name: None,
2522 host_identifier: SharedString::new(host_name),
2523 }),
2524 (Some(user_name), Some(host_name)) => Some(RemoteHostLocation {
2525 user_name: Some(SharedString::new(user_name)),
2526 host_identifier: SharedString::new(host_name),
2527 }),
2528 _ => None,
2529 };
2530 Some((db_host, abs_path?))
2531 })
2532 .fold(HashMap::default(), |mut acc, (remote_host, abs_path)| {
2533 acc.entry(remote_host)
2534 .or_insert_with(HashSet::default)
2535 .insert(abs_path);
2536 acc
2537 }))
2538 }
2539
2540 query! {
2541 fn trusted_worktrees() -> Result<Vec<(Option<PathBuf>, Option<String>, Option<String>)>> {
2542 SELECT absolute_path, user_name, host_name
2543 FROM trusted_worktrees
2544 }
2545 }
2546
2547 query! {
2548 pub async fn clear_trusted_worktrees() -> Result<()> {
2549 DELETE FROM trusted_worktrees
2550 }
2551 }
2552}
2553
2554type WorkspaceEntry = (
2555 WorkspaceId,
2556 SerializedWorkspaceLocation,
2557 PathList,
2558 DateTime<Utc>,
2559);
2560
2561/// Resolves workspace entries whose paths are git linked worktree checkouts
2562/// to their main repository paths.
2563///
2564/// For each workspace entry:
2565/// - If any path is a linked worktree checkout, all worktree paths in that
2566/// entry are resolved to their main repository paths, producing a new
2567/// `PathList`.
2568/// - The resolved entry is then deduplicated against existing entries: if a
2569/// workspace with the same paths already exists, the entry with the most
2570/// recent timestamp is kept.
2571pub async fn resolve_worktree_workspaces(
2572 workspaces: impl IntoIterator<Item = WorkspaceEntry>,
2573 fs: &dyn Fs,
2574) -> Vec<WorkspaceEntry> {
2575 // First pass: resolve worktree paths to main repo paths concurrently.
2576 let resolved = futures::future::join_all(workspaces.into_iter().map(|entry| async move {
2577 let paths = entry.2.paths();
2578 if paths.is_empty() {
2579 return entry;
2580 }
2581
2582 // Resolve each path concurrently
2583 let resolved_paths = futures::future::join_all(
2584 paths
2585 .iter()
2586 .map(|path| project::git_store::resolve_git_worktree_to_main_repo(fs, path)),
2587 )
2588 .await;
2589
2590 // If no paths were resolved, this entry is not a worktree — keep as-is
2591 if resolved_paths.iter().all(|r| r.is_none()) {
2592 return entry;
2593 }
2594
2595 // Build new path list, substituting resolved paths
2596 let new_paths: Vec<PathBuf> = paths
2597 .iter()
2598 .zip(resolved_paths.iter())
2599 .map(|(original, resolved)| {
2600 resolved
2601 .as_ref()
2602 .cloned()
2603 .unwrap_or_else(|| original.clone())
2604 })
2605 .collect();
2606
2607 let new_path_refs: Vec<&Path> = new_paths.iter().map(|p| p.as_path()).collect();
2608 (entry.0, entry.1, PathList::new(&new_path_refs), entry.3)
2609 }))
2610 .await;
2611
2612 // Second pass: deduplicate by PathList.
2613 // When two entries resolve to the same paths, keep the one with the
2614 // more recent timestamp.
2615 let mut seen: collections::HashMap<Vec<PathBuf>, usize> = collections::HashMap::default();
2616 let mut result: Vec<WorkspaceEntry> = Vec::new();
2617
2618 for entry in resolved {
2619 let key: Vec<PathBuf> = entry.2.paths().to_vec();
2620 if let Some(&existing_idx) = seen.get(&key) {
2621 // Keep the entry with the more recent timestamp
2622 if entry.3 > result[existing_idx].3 {
2623 result[existing_idx] = entry;
2624 }
2625 } else {
2626 seen.insert(key, result.len());
2627 result.push(entry);
2628 }
2629 }
2630
2631 result
2632}
2633
2634pub fn delete_unloaded_items(
2635 alive_items: Vec<ItemId>,
2636 workspace_id: WorkspaceId,
2637 table: &'static str,
2638 db: &ThreadSafeConnection,
2639 cx: &mut App,
2640) -> Task<Result<()>> {
2641 let db = db.clone();
2642 cx.spawn(async move |_| {
2643 let placeholders = alive_items
2644 .iter()
2645 .map(|_| "?")
2646 .collect::<Vec<&str>>()
2647 .join(", ");
2648
2649 let query = format!(
2650 "DELETE FROM {table} WHERE workspace_id = ? AND item_id NOT IN ({placeholders})"
2651 );
2652
2653 db.write(move |conn| {
2654 let mut statement = Statement::prepare(conn, query)?;
2655 let mut next_index = statement.bind(&workspace_id, 1)?;
2656 for id in alive_items {
2657 next_index = statement.bind(&id, next_index)?;
2658 }
2659 statement.exec()
2660 })
2661 .await
2662 })
2663}
2664
2665#[cfg(test)]
2666mod tests {
2667 use super::*;
2668 use crate::OpenMode;
2669 use crate::PathList;
2670 use crate::ProjectGroupKey;
2671 use crate::{
2672 multi_workspace::MultiWorkspace,
2673 persistence::{
2674 model::{
2675 SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace,
2676 SessionWorkspace,
2677 },
2678 read_multi_workspace_state,
2679 },
2680 };
2681
2682 use gpui::AppContext as _;
2683 use pretty_assertions::assert_eq;
2684 use project::Project;
2685 use remote::SshConnectionOptions;
2686 use serde_json::json;
2687 use std::{thread, time::Duration};
2688
2689 /// Creates a unique directory in a FakeFs, returning the path.
2690 /// Uses a UUID suffix to avoid collisions with other tests sharing the global DB.
2691 async fn unique_test_dir(fs: &fs::FakeFs, prefix: &str) -> PathBuf {
2692 let dir = PathBuf::from(format!("/test-dirs/{}-{}", prefix, uuid::Uuid::new_v4()));
2693 fs.insert_tree(&dir, json!({})).await;
2694 dir
2695 }
2696
2697 #[gpui::test]
2698 async fn test_multi_workspace_serializes_on_add_and_remove(cx: &mut gpui::TestAppContext) {
2699 crate::tests::init_test(cx);
2700
2701 let fs = fs::FakeFs::new(cx.executor());
2702 let project1 = Project::test(fs.clone(), [], cx).await;
2703 let project2 = Project::test(fs.clone(), [], cx).await;
2704
2705 let (multi_workspace, cx) =
2706 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
2707
2708 multi_workspace.update(cx, |mw, cx| {
2709 mw.open_sidebar(cx);
2710 });
2711
2712 multi_workspace.update_in(cx, |mw, _, cx| {
2713 mw.set_random_database_id(cx);
2714 });
2715
2716 let window_id =
2717 multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
2718
2719 // --- Add a second workspace ---
2720 let workspace2 = multi_workspace.update_in(cx, |mw, window, cx| {
2721 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
2722 workspace.update(cx, |ws, _cx| ws.set_random_database_id());
2723 mw.activate(workspace.clone(), None, window, cx);
2724 workspace
2725 });
2726
2727 // Run background tasks so serialize has a chance to flush.
2728 cx.run_until_parked();
2729
2730 // Read back the persisted state and check that the active workspace ID was written.
2731 let state_after_add = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
2732 let active_workspace2_db_id = workspace2.read_with(cx, |ws, _| ws.database_id());
2733 assert_eq!(
2734 state_after_add.active_workspace_id, active_workspace2_db_id,
2735 "After adding a second workspace, the serialized active_workspace_id should match \
2736 the newly activated workspace's database id"
2737 );
2738
2739 // --- Remove the non-active workspace ---
2740 multi_workspace.update_in(cx, |mw, _window, cx| {
2741 let active = mw.workspace().clone();
2742 let ws = mw
2743 .workspaces()
2744 .find(|ws| *ws != &active)
2745 .expect("should have a non-active workspace");
2746 mw.remove([ws.clone()], |_, _, _| unreachable!(), _window, cx)
2747 .detach_and_log_err(cx);
2748 });
2749
2750 cx.run_until_parked();
2751
2752 let state_after_remove = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
2753 let remaining_db_id =
2754 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
2755 assert_eq!(
2756 state_after_remove.active_workspace_id, remaining_db_id,
2757 "After removing a workspace, the serialized active_workspace_id should match \
2758 the remaining active workspace's database id"
2759 );
2760 }
2761
2762 #[gpui::test]
2763 async fn test_breakpoints() {
2764 zlog::init_test();
2765
2766 let db = WorkspaceDb::open_test_db("test_breakpoints").await;
2767 let id = db.next_id().await.unwrap();
2768
2769 let path = Path::new("/tmp/test.rs");
2770
2771 let breakpoint = Breakpoint {
2772 position: 123,
2773 message: None,
2774 state: BreakpointState::Enabled,
2775 condition: None,
2776 hit_condition: None,
2777 };
2778
2779 let log_breakpoint = Breakpoint {
2780 position: 456,
2781 message: Some("Test log message".into()),
2782 state: BreakpointState::Enabled,
2783 condition: None,
2784 hit_condition: None,
2785 };
2786
2787 let disable_breakpoint = Breakpoint {
2788 position: 578,
2789 message: None,
2790 state: BreakpointState::Disabled,
2791 condition: None,
2792 hit_condition: None,
2793 };
2794
2795 let condition_breakpoint = Breakpoint {
2796 position: 789,
2797 message: None,
2798 state: BreakpointState::Enabled,
2799 condition: Some("x > 5".into()),
2800 hit_condition: None,
2801 };
2802
2803 let hit_condition_breakpoint = Breakpoint {
2804 position: 999,
2805 message: None,
2806 state: BreakpointState::Enabled,
2807 condition: None,
2808 hit_condition: Some(">= 3".into()),
2809 };
2810
2811 let workspace = SerializedWorkspace {
2812 id,
2813 paths: PathList::new(&["/tmp"]),
2814 location: SerializedWorkspaceLocation::Local,
2815 center_group: Default::default(),
2816 window_bounds: Default::default(),
2817 display: Default::default(),
2818 docks: Default::default(),
2819 centered_layout: false,
2820 bookmarks: Default::default(),
2821 breakpoints: {
2822 let mut map = collections::BTreeMap::default();
2823 map.insert(
2824 Arc::from(path),
2825 vec![
2826 SourceBreakpoint {
2827 row: breakpoint.position,
2828 path: Arc::from(path),
2829 message: breakpoint.message.clone(),
2830 state: breakpoint.state,
2831 condition: breakpoint.condition.clone(),
2832 hit_condition: breakpoint.hit_condition.clone(),
2833 },
2834 SourceBreakpoint {
2835 row: log_breakpoint.position,
2836 path: Arc::from(path),
2837 message: log_breakpoint.message.clone(),
2838 state: log_breakpoint.state,
2839 condition: log_breakpoint.condition.clone(),
2840 hit_condition: log_breakpoint.hit_condition.clone(),
2841 },
2842 SourceBreakpoint {
2843 row: disable_breakpoint.position,
2844 path: Arc::from(path),
2845 message: disable_breakpoint.message.clone(),
2846 state: disable_breakpoint.state,
2847 condition: disable_breakpoint.condition.clone(),
2848 hit_condition: disable_breakpoint.hit_condition.clone(),
2849 },
2850 SourceBreakpoint {
2851 row: condition_breakpoint.position,
2852 path: Arc::from(path),
2853 message: condition_breakpoint.message.clone(),
2854 state: condition_breakpoint.state,
2855 condition: condition_breakpoint.condition.clone(),
2856 hit_condition: condition_breakpoint.hit_condition.clone(),
2857 },
2858 SourceBreakpoint {
2859 row: hit_condition_breakpoint.position,
2860 path: Arc::from(path),
2861 message: hit_condition_breakpoint.message.clone(),
2862 state: hit_condition_breakpoint.state,
2863 condition: hit_condition_breakpoint.condition.clone(),
2864 hit_condition: hit_condition_breakpoint.hit_condition.clone(),
2865 },
2866 ],
2867 );
2868 map
2869 },
2870 session_id: None,
2871 window_id: None,
2872 user_toolchains: Default::default(),
2873 };
2874
2875 db.save_workspace(workspace.clone()).await;
2876
2877 let loaded = db.workspace_for_roots(&["/tmp"]).unwrap();
2878 let loaded_breakpoints = loaded.breakpoints.get(&Arc::from(path)).unwrap();
2879
2880 assert_eq!(loaded_breakpoints.len(), 5);
2881
2882 // normal breakpoint
2883 assert_eq!(loaded_breakpoints[0].row, breakpoint.position);
2884 assert_eq!(loaded_breakpoints[0].message, breakpoint.message);
2885 assert_eq!(loaded_breakpoints[0].condition, breakpoint.condition);
2886 assert_eq!(
2887 loaded_breakpoints[0].hit_condition,
2888 breakpoint.hit_condition
2889 );
2890 assert_eq!(loaded_breakpoints[0].state, breakpoint.state);
2891 assert_eq!(loaded_breakpoints[0].path, Arc::from(path));
2892
2893 // enabled breakpoint
2894 assert_eq!(loaded_breakpoints[1].row, log_breakpoint.position);
2895 assert_eq!(loaded_breakpoints[1].message, log_breakpoint.message);
2896 assert_eq!(loaded_breakpoints[1].condition, log_breakpoint.condition);
2897 assert_eq!(
2898 loaded_breakpoints[1].hit_condition,
2899 log_breakpoint.hit_condition
2900 );
2901 assert_eq!(loaded_breakpoints[1].state, log_breakpoint.state);
2902 assert_eq!(loaded_breakpoints[1].path, Arc::from(path));
2903
2904 // disable breakpoint
2905 assert_eq!(loaded_breakpoints[2].row, disable_breakpoint.position);
2906 assert_eq!(loaded_breakpoints[2].message, disable_breakpoint.message);
2907 assert_eq!(
2908 loaded_breakpoints[2].condition,
2909 disable_breakpoint.condition
2910 );
2911 assert_eq!(
2912 loaded_breakpoints[2].hit_condition,
2913 disable_breakpoint.hit_condition
2914 );
2915 assert_eq!(loaded_breakpoints[2].state, disable_breakpoint.state);
2916 assert_eq!(loaded_breakpoints[2].path, Arc::from(path));
2917
2918 // condition breakpoint
2919 assert_eq!(loaded_breakpoints[3].row, condition_breakpoint.position);
2920 assert_eq!(loaded_breakpoints[3].message, condition_breakpoint.message);
2921 assert_eq!(
2922 loaded_breakpoints[3].condition,
2923 condition_breakpoint.condition
2924 );
2925 assert_eq!(
2926 loaded_breakpoints[3].hit_condition,
2927 condition_breakpoint.hit_condition
2928 );
2929 assert_eq!(loaded_breakpoints[3].state, condition_breakpoint.state);
2930 assert_eq!(loaded_breakpoints[3].path, Arc::from(path));
2931
2932 // hit condition breakpoint
2933 assert_eq!(loaded_breakpoints[4].row, hit_condition_breakpoint.position);
2934 assert_eq!(
2935 loaded_breakpoints[4].message,
2936 hit_condition_breakpoint.message
2937 );
2938 assert_eq!(
2939 loaded_breakpoints[4].condition,
2940 hit_condition_breakpoint.condition
2941 );
2942 assert_eq!(
2943 loaded_breakpoints[4].hit_condition,
2944 hit_condition_breakpoint.hit_condition
2945 );
2946 assert_eq!(loaded_breakpoints[4].state, hit_condition_breakpoint.state);
2947 assert_eq!(loaded_breakpoints[4].path, Arc::from(path));
2948 }
2949
2950 #[gpui::test]
2951 async fn test_remove_last_breakpoint() {
2952 zlog::init_test();
2953
2954 let db = WorkspaceDb::open_test_db("test_remove_last_breakpoint").await;
2955 let id = db.next_id().await.unwrap();
2956
2957 let singular_path = Path::new("/tmp/test_remove_last_breakpoint.rs");
2958
2959 let breakpoint_to_remove = Breakpoint {
2960 position: 100,
2961 message: None,
2962 state: BreakpointState::Enabled,
2963 condition: None,
2964 hit_condition: None,
2965 };
2966
2967 let workspace = SerializedWorkspace {
2968 id,
2969 paths: PathList::new(&["/tmp"]),
2970 location: SerializedWorkspaceLocation::Local,
2971 center_group: Default::default(),
2972 window_bounds: Default::default(),
2973 display: Default::default(),
2974 docks: Default::default(),
2975 centered_layout: false,
2976 bookmarks: Default::default(),
2977 breakpoints: {
2978 let mut map = collections::BTreeMap::default();
2979 map.insert(
2980 Arc::from(singular_path),
2981 vec![SourceBreakpoint {
2982 row: breakpoint_to_remove.position,
2983 path: Arc::from(singular_path),
2984 message: None,
2985 state: BreakpointState::Enabled,
2986 condition: None,
2987 hit_condition: None,
2988 }],
2989 );
2990 map
2991 },
2992 session_id: None,
2993 window_id: None,
2994 user_toolchains: Default::default(),
2995 };
2996
2997 db.save_workspace(workspace.clone()).await;
2998
2999 let loaded = db.workspace_for_roots(&["/tmp"]).unwrap();
3000 let loaded_breakpoints = loaded.breakpoints.get(&Arc::from(singular_path)).unwrap();
3001
3002 assert_eq!(loaded_breakpoints.len(), 1);
3003 assert_eq!(loaded_breakpoints[0].row, breakpoint_to_remove.position);
3004 assert_eq!(loaded_breakpoints[0].message, breakpoint_to_remove.message);
3005 assert_eq!(
3006 loaded_breakpoints[0].condition,
3007 breakpoint_to_remove.condition
3008 );
3009 assert_eq!(
3010 loaded_breakpoints[0].hit_condition,
3011 breakpoint_to_remove.hit_condition
3012 );
3013 assert_eq!(loaded_breakpoints[0].state, breakpoint_to_remove.state);
3014 assert_eq!(loaded_breakpoints[0].path, Arc::from(singular_path));
3015
3016 let workspace_without_breakpoint = SerializedWorkspace {
3017 id,
3018 paths: PathList::new(&["/tmp"]),
3019 location: SerializedWorkspaceLocation::Local,
3020 center_group: Default::default(),
3021 window_bounds: Default::default(),
3022 display: Default::default(),
3023 docks: Default::default(),
3024 centered_layout: false,
3025 bookmarks: Default::default(),
3026 breakpoints: collections::BTreeMap::default(),
3027 session_id: None,
3028 window_id: None,
3029 user_toolchains: Default::default(),
3030 };
3031
3032 db.save_workspace(workspace_without_breakpoint.clone())
3033 .await;
3034
3035 let loaded_after_remove = db.workspace_for_roots(&["/tmp"]).unwrap();
3036 let empty_breakpoints = loaded_after_remove
3037 .breakpoints
3038 .get(&Arc::from(singular_path));
3039
3040 assert!(empty_breakpoints.is_none());
3041 }
3042
3043 #[gpui::test]
3044 async fn test_next_id_stability() {
3045 zlog::init_test();
3046
3047 let db = WorkspaceDb::open_test_db("test_next_id_stability").await;
3048
3049 db.write(|conn| {
3050 conn.migrate(
3051 "test_table",
3052 &[sql!(
3053 CREATE TABLE test_table(
3054 text TEXT,
3055 workspace_id INTEGER,
3056 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
3057 ON DELETE CASCADE
3058 ) STRICT;
3059 )],
3060 &mut |_, _, _| false,
3061 )
3062 .unwrap();
3063 })
3064 .await;
3065
3066 let id = db.next_id().await.unwrap();
3067 // Assert the empty row got inserted
3068 assert_eq!(
3069 Some(id),
3070 db.select_row_bound::<WorkspaceId, WorkspaceId>(sql!(
3071 SELECT workspace_id FROM workspaces WHERE workspace_id = ?
3072 ))
3073 .unwrap()(id)
3074 .unwrap()
3075 );
3076
3077 db.write(move |conn| {
3078 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
3079 .unwrap()(("test-text-1", id))
3080 .unwrap()
3081 })
3082 .await;
3083
3084 let test_text_1 = db
3085 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
3086 .unwrap()(1)
3087 .unwrap()
3088 .unwrap();
3089 assert_eq!(test_text_1, "test-text-1");
3090 }
3091
3092 #[gpui::test]
3093 async fn test_workspace_id_stability() {
3094 zlog::init_test();
3095
3096 let db = WorkspaceDb::open_test_db("test_workspace_id_stability").await;
3097
3098 db.write(|conn| {
3099 conn.migrate(
3100 "test_table",
3101 &[sql!(
3102 CREATE TABLE test_table(
3103 text TEXT,
3104 workspace_id INTEGER,
3105 FOREIGN KEY(workspace_id)
3106 REFERENCES workspaces(workspace_id)
3107 ON DELETE CASCADE
3108 ) STRICT;)],
3109 &mut |_, _, _| false,
3110 )
3111 })
3112 .await
3113 .unwrap();
3114
3115 let mut workspace_1 = SerializedWorkspace {
3116 id: WorkspaceId(1),
3117 paths: PathList::new(&["/tmp", "/tmp2"]),
3118 location: SerializedWorkspaceLocation::Local,
3119 center_group: Default::default(),
3120 window_bounds: Default::default(),
3121 display: Default::default(),
3122 docks: Default::default(),
3123 centered_layout: false,
3124 bookmarks: Default::default(),
3125 breakpoints: Default::default(),
3126 session_id: None,
3127 window_id: None,
3128 user_toolchains: Default::default(),
3129 };
3130
3131 let workspace_2 = SerializedWorkspace {
3132 id: WorkspaceId(2),
3133 paths: PathList::new(&["/tmp"]),
3134 location: SerializedWorkspaceLocation::Local,
3135 center_group: Default::default(),
3136 window_bounds: Default::default(),
3137 display: Default::default(),
3138 docks: Default::default(),
3139 centered_layout: false,
3140 bookmarks: Default::default(),
3141 breakpoints: Default::default(),
3142 session_id: None,
3143 window_id: None,
3144 user_toolchains: Default::default(),
3145 };
3146
3147 db.save_workspace(workspace_1.clone()).await;
3148
3149 db.write(|conn| {
3150 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
3151 .unwrap()(("test-text-1", 1))
3152 .unwrap();
3153 })
3154 .await;
3155
3156 db.save_workspace(workspace_2.clone()).await;
3157
3158 db.write(|conn| {
3159 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
3160 .unwrap()(("test-text-2", 2))
3161 .unwrap();
3162 })
3163 .await;
3164
3165 workspace_1.paths = PathList::new(&["/tmp", "/tmp3"]);
3166 db.save_workspace(workspace_1.clone()).await;
3167 db.save_workspace(workspace_1).await;
3168 db.save_workspace(workspace_2).await;
3169
3170 let test_text_2 = db
3171 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
3172 .unwrap()(2)
3173 .unwrap()
3174 .unwrap();
3175 assert_eq!(test_text_2, "test-text-2");
3176
3177 let test_text_1 = db
3178 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
3179 .unwrap()(1)
3180 .unwrap()
3181 .unwrap();
3182 assert_eq!(test_text_1, "test-text-1");
3183 }
3184
3185 fn group(axis: Axis, children: Vec<SerializedPaneGroup>) -> SerializedPaneGroup {
3186 SerializedPaneGroup::Group {
3187 axis: SerializedAxis(axis),
3188 flexes: None,
3189 children,
3190 }
3191 }
3192
3193 #[gpui::test]
3194 async fn test_full_workspace_serialization() {
3195 zlog::init_test();
3196
3197 let db = WorkspaceDb::open_test_db("test_full_workspace_serialization").await;
3198
3199 // -----------------
3200 // | 1,2 | 5,6 |
3201 // | - - - | |
3202 // | 3,4 | |
3203 // -----------------
3204 let center_group = group(
3205 Axis::Horizontal,
3206 vec![
3207 group(
3208 Axis::Vertical,
3209 vec![
3210 SerializedPaneGroup::Pane(SerializedPane::new(
3211 vec![
3212 SerializedItem::new("Terminal", 5, false, false),
3213 SerializedItem::new("Terminal", 6, true, false),
3214 ],
3215 false,
3216 0,
3217 )),
3218 SerializedPaneGroup::Pane(SerializedPane::new(
3219 vec![
3220 SerializedItem::new("Terminal", 7, true, false),
3221 SerializedItem::new("Terminal", 8, false, false),
3222 ],
3223 false,
3224 0,
3225 )),
3226 ],
3227 ),
3228 SerializedPaneGroup::Pane(SerializedPane::new(
3229 vec![
3230 SerializedItem::new("Terminal", 9, false, false),
3231 SerializedItem::new("Terminal", 10, true, false),
3232 ],
3233 false,
3234 0,
3235 )),
3236 ],
3237 );
3238
3239 let workspace = SerializedWorkspace {
3240 id: WorkspaceId(5),
3241 paths: PathList::new(&["/tmp", "/tmp2"]),
3242 location: SerializedWorkspaceLocation::Local,
3243 center_group,
3244 window_bounds: Default::default(),
3245 bookmarks: Default::default(),
3246 breakpoints: Default::default(),
3247 display: Default::default(),
3248 docks: Default::default(),
3249 centered_layout: false,
3250 session_id: None,
3251 window_id: Some(999),
3252 user_toolchains: Default::default(),
3253 };
3254
3255 db.save_workspace(workspace.clone()).await;
3256
3257 let round_trip_workspace = db.workspace_for_roots(&["/tmp2", "/tmp"]);
3258 assert_eq!(workspace, round_trip_workspace.unwrap());
3259
3260 // Test guaranteed duplicate IDs
3261 db.save_workspace(workspace.clone()).await;
3262 db.save_workspace(workspace.clone()).await;
3263
3264 let round_trip_workspace = db.workspace_for_roots(&["/tmp", "/tmp2"]);
3265 assert_eq!(workspace, round_trip_workspace.unwrap());
3266 }
3267
3268 #[gpui::test]
3269 async fn test_workspace_assignment() {
3270 zlog::init_test();
3271
3272 let db = WorkspaceDb::open_test_db("test_basic_functionality").await;
3273
3274 let workspace_1 = SerializedWorkspace {
3275 id: WorkspaceId(1),
3276 paths: PathList::new(&["/tmp", "/tmp2"]),
3277 location: SerializedWorkspaceLocation::Local,
3278 center_group: Default::default(),
3279 window_bounds: Default::default(),
3280 bookmarks: Default::default(),
3281 breakpoints: Default::default(),
3282 display: Default::default(),
3283 docks: Default::default(),
3284 centered_layout: false,
3285 session_id: None,
3286 window_id: Some(1),
3287 user_toolchains: Default::default(),
3288 };
3289
3290 let mut workspace_2 = SerializedWorkspace {
3291 id: WorkspaceId(2),
3292 paths: PathList::new(&["/tmp"]),
3293 location: SerializedWorkspaceLocation::Local,
3294 center_group: Default::default(),
3295 window_bounds: Default::default(),
3296 display: Default::default(),
3297 docks: Default::default(),
3298 centered_layout: false,
3299 bookmarks: Default::default(),
3300 breakpoints: Default::default(),
3301 session_id: None,
3302 window_id: Some(2),
3303 user_toolchains: Default::default(),
3304 };
3305
3306 db.save_workspace(workspace_1.clone()).await;
3307 db.save_workspace(workspace_2.clone()).await;
3308
3309 // Test that paths are treated as a set
3310 assert_eq!(
3311 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3312 workspace_1
3313 );
3314 assert_eq!(
3315 db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(),
3316 workspace_1
3317 );
3318
3319 // Make sure that other keys work
3320 assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2);
3321 assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None);
3322
3323 // Test 'mutate' case of updating a pre-existing id
3324 workspace_2.paths = PathList::new(&["/tmp", "/tmp2"]);
3325
3326 db.save_workspace(workspace_2.clone()).await;
3327 assert_eq!(
3328 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3329 workspace_2
3330 );
3331
3332 // Test other mechanism for mutating
3333 let mut workspace_3 = SerializedWorkspace {
3334 id: WorkspaceId(3),
3335 paths: PathList::new(&["/tmp2", "/tmp"]),
3336 location: SerializedWorkspaceLocation::Local,
3337 center_group: Default::default(),
3338 window_bounds: Default::default(),
3339 bookmarks: Default::default(),
3340 breakpoints: Default::default(),
3341 display: Default::default(),
3342 docks: Default::default(),
3343 centered_layout: false,
3344 session_id: None,
3345 window_id: Some(3),
3346 user_toolchains: Default::default(),
3347 };
3348
3349 db.save_workspace(workspace_3.clone()).await;
3350 assert_eq!(
3351 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3352 workspace_3
3353 );
3354
3355 // Make sure that updating paths differently also works
3356 workspace_3.paths = PathList::new(&["/tmp3", "/tmp4", "/tmp2"]);
3357 db.save_workspace(workspace_3.clone()).await;
3358 assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None);
3359 assert_eq!(
3360 db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"])
3361 .unwrap(),
3362 workspace_3
3363 );
3364 }
3365
3366 #[gpui::test]
3367 async fn test_session_workspaces() {
3368 zlog::init_test();
3369
3370 let db = WorkspaceDb::open_test_db("test_serializing_workspaces_session_id").await;
3371
3372 let workspace_1 = SerializedWorkspace {
3373 id: WorkspaceId(1),
3374 paths: PathList::new(&["/tmp1"]),
3375 location: SerializedWorkspaceLocation::Local,
3376 center_group: Default::default(),
3377 window_bounds: Default::default(),
3378 display: Default::default(),
3379 docks: Default::default(),
3380 centered_layout: false,
3381 bookmarks: Default::default(),
3382 breakpoints: Default::default(),
3383 session_id: Some("session-id-1".to_owned()),
3384 window_id: Some(10),
3385 user_toolchains: Default::default(),
3386 };
3387
3388 let workspace_2 = SerializedWorkspace {
3389 id: WorkspaceId(2),
3390 paths: PathList::new(&["/tmp2"]),
3391 location: SerializedWorkspaceLocation::Local,
3392 center_group: Default::default(),
3393 window_bounds: Default::default(),
3394 display: Default::default(),
3395 docks: Default::default(),
3396 centered_layout: false,
3397 bookmarks: Default::default(),
3398 breakpoints: Default::default(),
3399 session_id: Some("session-id-1".to_owned()),
3400 window_id: Some(20),
3401 user_toolchains: Default::default(),
3402 };
3403
3404 let workspace_3 = SerializedWorkspace {
3405 id: WorkspaceId(3),
3406 paths: PathList::new(&["/tmp3"]),
3407 location: SerializedWorkspaceLocation::Local,
3408 center_group: Default::default(),
3409 window_bounds: Default::default(),
3410 display: Default::default(),
3411 docks: Default::default(),
3412 centered_layout: false,
3413 bookmarks: Default::default(),
3414 breakpoints: Default::default(),
3415 session_id: Some("session-id-2".to_owned()),
3416 window_id: Some(30),
3417 user_toolchains: Default::default(),
3418 };
3419
3420 let workspace_4 = SerializedWorkspace {
3421 id: WorkspaceId(4),
3422 paths: PathList::new(&["/tmp4"]),
3423 location: SerializedWorkspaceLocation::Local,
3424 center_group: Default::default(),
3425 window_bounds: Default::default(),
3426 display: Default::default(),
3427 docks: Default::default(),
3428 centered_layout: false,
3429 bookmarks: Default::default(),
3430 breakpoints: Default::default(),
3431 session_id: None,
3432 window_id: None,
3433 user_toolchains: Default::default(),
3434 };
3435
3436 let connection_id = db
3437 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3438 host: "my-host".into(),
3439 port: Some(1234),
3440 ..Default::default()
3441 }))
3442 .await
3443 .unwrap();
3444
3445 let workspace_5 = SerializedWorkspace {
3446 id: WorkspaceId(5),
3447 paths: PathList::default(),
3448 location: SerializedWorkspaceLocation::Remote(
3449 db.remote_connection(connection_id).unwrap(),
3450 ),
3451 center_group: Default::default(),
3452 window_bounds: Default::default(),
3453 display: Default::default(),
3454 docks: Default::default(),
3455 centered_layout: false,
3456 bookmarks: Default::default(),
3457 breakpoints: Default::default(),
3458 session_id: Some("session-id-2".to_owned()),
3459 window_id: Some(50),
3460 user_toolchains: Default::default(),
3461 };
3462
3463 let workspace_6 = SerializedWorkspace {
3464 id: WorkspaceId(6),
3465 paths: PathList::new(&["/tmp6c", "/tmp6b", "/tmp6a"]),
3466 location: SerializedWorkspaceLocation::Local,
3467 center_group: Default::default(),
3468 window_bounds: Default::default(),
3469 bookmarks: Default::default(),
3470 breakpoints: Default::default(),
3471 display: Default::default(),
3472 docks: Default::default(),
3473 centered_layout: false,
3474 session_id: Some("session-id-3".to_owned()),
3475 window_id: Some(60),
3476 user_toolchains: Default::default(),
3477 };
3478
3479 db.save_workspace(workspace_1.clone()).await;
3480 thread::sleep(Duration::from_millis(1000)); // Force timestamps to increment
3481 db.save_workspace(workspace_2.clone()).await;
3482 db.save_workspace(workspace_3.clone()).await;
3483 thread::sleep(Duration::from_millis(1000)); // Force timestamps to increment
3484 db.save_workspace(workspace_4.clone()).await;
3485 db.save_workspace(workspace_5.clone()).await;
3486 db.save_workspace(workspace_6.clone()).await;
3487
3488 let locations = db.session_workspaces("session-id-1".to_owned()).unwrap();
3489 assert_eq!(locations.len(), 2);
3490 assert_eq!(locations[0].0, WorkspaceId(2));
3491 assert_eq!(locations[0].1, PathList::new(&["/tmp2"]));
3492 assert_eq!(locations[0].2, Some(20));
3493 assert_eq!(locations[1].0, WorkspaceId(1));
3494 assert_eq!(locations[1].1, PathList::new(&["/tmp1"]));
3495 assert_eq!(locations[1].2, Some(10));
3496
3497 let locations = db.session_workspaces("session-id-2".to_owned()).unwrap();
3498 assert_eq!(locations.len(), 2);
3499 assert_eq!(locations[0].0, WorkspaceId(5));
3500 assert_eq!(locations[0].1, PathList::default());
3501 assert_eq!(locations[0].2, Some(50));
3502 assert_eq!(locations[0].3, Some(connection_id));
3503 assert_eq!(locations[1].0, WorkspaceId(3));
3504 assert_eq!(locations[1].1, PathList::new(&["/tmp3"]));
3505 assert_eq!(locations[1].2, Some(30));
3506
3507 let locations = db.session_workspaces("session-id-3".to_owned()).unwrap();
3508 assert_eq!(locations.len(), 1);
3509 assert_eq!(locations[0].0, WorkspaceId(6));
3510 assert_eq!(
3511 locations[0].1,
3512 PathList::new(&["/tmp6c", "/tmp6b", "/tmp6a"]),
3513 );
3514 assert_eq!(locations[0].2, Some(60));
3515 }
3516
3517 fn default_workspace<P: AsRef<Path>>(
3518 paths: &[P],
3519 center_group: &SerializedPaneGroup,
3520 ) -> SerializedWorkspace {
3521 SerializedWorkspace {
3522 id: WorkspaceId(4),
3523 paths: PathList::new(paths),
3524 location: SerializedWorkspaceLocation::Local,
3525 center_group: center_group.clone(),
3526 window_bounds: Default::default(),
3527 display: Default::default(),
3528 docks: Default::default(),
3529 bookmarks: Default::default(),
3530 breakpoints: Default::default(),
3531 centered_layout: false,
3532 session_id: None,
3533 window_id: None,
3534 user_toolchains: Default::default(),
3535 }
3536 }
3537
3538 #[gpui::test]
3539 async fn test_last_session_workspace_locations(cx: &mut gpui::TestAppContext) {
3540 let dir1 = tempfile::TempDir::with_prefix("dir1").unwrap();
3541 let dir2 = tempfile::TempDir::with_prefix("dir2").unwrap();
3542 let dir3 = tempfile::TempDir::with_prefix("dir3").unwrap();
3543 let dir4 = tempfile::TempDir::with_prefix("dir4").unwrap();
3544
3545 let fs = fs::FakeFs::new(cx.executor());
3546 fs.insert_tree(dir1.path(), json!({})).await;
3547 fs.insert_tree(dir2.path(), json!({})).await;
3548 fs.insert_tree(dir3.path(), json!({})).await;
3549 fs.insert_tree(dir4.path(), json!({})).await;
3550
3551 let db =
3552 WorkspaceDb::open_test_db("test_serializing_workspaces_last_session_workspaces").await;
3553
3554 let workspaces = [
3555 (1, vec![dir1.path()], 9),
3556 (2, vec![dir2.path()], 5),
3557 (3, vec![dir3.path()], 8),
3558 (4, vec![dir4.path()], 2),
3559 (5, vec![dir1.path(), dir2.path(), dir3.path()], 3),
3560 (6, vec![dir4.path(), dir3.path(), dir2.path()], 4),
3561 ]
3562 .into_iter()
3563 .map(|(id, paths, window_id)| SerializedWorkspace {
3564 id: WorkspaceId(id),
3565 paths: PathList::new(paths.as_slice()),
3566 location: SerializedWorkspaceLocation::Local,
3567 center_group: Default::default(),
3568 window_bounds: Default::default(),
3569 display: Default::default(),
3570 docks: Default::default(),
3571 centered_layout: false,
3572 session_id: Some("one-session".to_owned()),
3573 bookmarks: Default::default(),
3574 breakpoints: Default::default(),
3575 window_id: Some(window_id),
3576 user_toolchains: Default::default(),
3577 })
3578 .collect::<Vec<_>>();
3579
3580 for workspace in workspaces.iter() {
3581 db.save_workspace(workspace.clone()).await;
3582 }
3583
3584 let stack = Some(Vec::from([
3585 WindowId::from(2), // Top
3586 WindowId::from(8),
3587 WindowId::from(5),
3588 WindowId::from(9),
3589 WindowId::from(3),
3590 WindowId::from(4), // Bottom
3591 ]));
3592
3593 let locations = db
3594 .last_session_workspace_locations("one-session", stack, fs.as_ref())
3595 .await
3596 .unwrap();
3597 assert_eq!(
3598 locations,
3599 [
3600 SessionWorkspace {
3601 workspace_id: WorkspaceId(4),
3602 location: SerializedWorkspaceLocation::Local,
3603 paths: PathList::new(&[dir4.path()]),
3604 window_id: Some(WindowId::from(2u64)),
3605 },
3606 SessionWorkspace {
3607 workspace_id: WorkspaceId(3),
3608 location: SerializedWorkspaceLocation::Local,
3609 paths: PathList::new(&[dir3.path()]),
3610 window_id: Some(WindowId::from(8u64)),
3611 },
3612 SessionWorkspace {
3613 workspace_id: WorkspaceId(2),
3614 location: SerializedWorkspaceLocation::Local,
3615 paths: PathList::new(&[dir2.path()]),
3616 window_id: Some(WindowId::from(5u64)),
3617 },
3618 SessionWorkspace {
3619 workspace_id: WorkspaceId(1),
3620 location: SerializedWorkspaceLocation::Local,
3621 paths: PathList::new(&[dir1.path()]),
3622 window_id: Some(WindowId::from(9u64)),
3623 },
3624 SessionWorkspace {
3625 workspace_id: WorkspaceId(5),
3626 location: SerializedWorkspaceLocation::Local,
3627 paths: PathList::new(&[dir1.path(), dir2.path(), dir3.path()]),
3628 window_id: Some(WindowId::from(3u64)),
3629 },
3630 SessionWorkspace {
3631 workspace_id: WorkspaceId(6),
3632 location: SerializedWorkspaceLocation::Local,
3633 paths: PathList::new(&[dir4.path(), dir3.path(), dir2.path()]),
3634 window_id: Some(WindowId::from(4u64)),
3635 },
3636 ]
3637 );
3638 }
3639
3640 fn pane_with_items(item_ids: &[ItemId]) -> SerializedPaneGroup {
3641 SerializedPaneGroup::Pane(SerializedPane::new(
3642 item_ids
3643 .iter()
3644 .map(|id| SerializedItem::new("Terminal", *id, true, false))
3645 .collect(),
3646 true,
3647 0,
3648 ))
3649 }
3650
3651 fn empty_pane_group() -> SerializedPaneGroup {
3652 SerializedPaneGroup::Pane(SerializedPane::default())
3653 }
3654
3655 fn workspace_with(
3656 id: u64,
3657 paths: &[&Path],
3658 center_group: SerializedPaneGroup,
3659 session_id: Option<&str>,
3660 ) -> SerializedWorkspace {
3661 SerializedWorkspace {
3662 id: WorkspaceId(id as i64),
3663 paths: PathList::new(paths),
3664 location: SerializedWorkspaceLocation::Local,
3665 center_group,
3666 window_bounds: Default::default(),
3667 display: Default::default(),
3668 docks: Default::default(),
3669 bookmarks: Default::default(),
3670 breakpoints: Default::default(),
3671 centered_layout: false,
3672 session_id: session_id.map(|s| s.to_owned()),
3673 window_id: Some(id),
3674 user_toolchains: Default::default(),
3675 }
3676 }
3677
3678 #[gpui::test]
3679 async fn test_scratch_only_workspace_restores_from_last_session(cx: &mut gpui::TestAppContext) {
3680 let fs = fs::FakeFs::new(cx.executor());
3681 let db =
3682 WorkspaceDb::open_test_db("test_scratch_only_workspace_restores_from_last_session")
3683 .await;
3684
3685 db.save_workspace(workspace_with(1, &[], pane_with_items(&[100]), Some("s1")))
3686 .await;
3687
3688 let sessions = db
3689 .last_session_workspace_locations("s1", None, fs.as_ref())
3690 .await
3691 .unwrap();
3692 assert_eq!(sessions.len(), 1);
3693 assert_eq!(sessions[0].workspace_id, WorkspaceId(1));
3694 assert!(sessions[0].paths.is_empty());
3695
3696 let recents = db.recent_project_workspaces(fs.as_ref()).await.unwrap();
3697 assert!(
3698 recents.iter().all(|(id, ..)| *id != WorkspaceId(1)),
3699 "scratch-only workspace must not appear in the recent-projects UI"
3700 );
3701 }
3702
3703 #[gpui::test]
3704 async fn test_gc_preserves_scratch_inside_window(cx: &mut gpui::TestAppContext) {
3705 let fs = fs::FakeFs::new(cx.executor());
3706 let db = WorkspaceDb::open_test_db("test_gc_preserves_scratch_inside_window").await;
3707
3708 db.save_workspace(workspace_with(1, &[], empty_pane_group(), None))
3709 .await;
3710
3711 db.garbage_collect_workspaces(fs.as_ref(), "current", None)
3712 .await
3713 .unwrap();
3714 assert!(
3715 db.workspace_for_id(WorkspaceId(1)).is_some(),
3716 "fresh stale workspace must not be deleted before the 7-day window"
3717 );
3718 }
3719
3720 #[gpui::test]
3721 async fn test_gc_deletes_stale_outside_window(cx: &mut gpui::TestAppContext) {
3722 let fs = fs::FakeFs::new(cx.executor());
3723 let db = WorkspaceDb::open_test_db("test_gc_deletes_stale_outside_window").await;
3724
3725 db.save_workspace(workspace_with(1, &[], empty_pane_group(), None))
3726 .await;
3727 db.set_timestamp_for_tests(WorkspaceId(1), "2000-01-01 00:00:00".to_owned())
3728 .await
3729 .unwrap();
3730
3731 db.garbage_collect_workspaces(fs.as_ref(), "current", None)
3732 .await
3733 .unwrap();
3734 assert!(
3735 db.workspace_for_id(WorkspaceId(1)).is_none(),
3736 "stale empty workspace older than the retention window must be deleted"
3737 );
3738 }
3739
3740 #[gpui::test]
3741 async fn test_gc_preserves_directory_workspace_with_missing_path(
3742 cx: &mut gpui::TestAppContext,
3743 ) {
3744 let fs = fs::FakeFs::new(cx.executor());
3745 let db =
3746 WorkspaceDb::open_test_db("test_gc_preserves_directory_workspace_with_missing_path")
3747 .await;
3748
3749 let missing_dir = PathBuf::from("/missing-project-dir");
3750 db.save_workspace(workspace_with(
3751 1,
3752 &[missing_dir.as_path()],
3753 empty_pane_group(),
3754 None,
3755 ))
3756 .await;
3757
3758 db.garbage_collect_workspaces(fs.as_ref(), "current", None)
3759 .await
3760 .unwrap();
3761 assert!(
3762 db.workspace_for_id(WorkspaceId(1)).is_some(),
3763 "a stale workspace within the retention window must be kept"
3764 );
3765
3766 db.set_timestamp_for_tests(WorkspaceId(1), "2000-01-01 00:00:00".to_owned())
3767 .await
3768 .unwrap();
3769 db.garbage_collect_workspaces(fs.as_ref(), "current", None)
3770 .await
3771 .unwrap();
3772 assert!(
3773 db.workspace_for_id(WorkspaceId(1)).is_none(),
3774 "a stale workspace past the retention window must be deleted"
3775 );
3776 }
3777
3778 #[gpui::test]
3779 async fn test_gc_preserves_current_and_last_sessions(cx: &mut gpui::TestAppContext) {
3780 let fs = fs::FakeFs::new(cx.executor());
3781 let db = WorkspaceDb::open_test_db("test_gc_preserves_current_and_last_sessions").await;
3782
3783 db.save_workspace(workspace_with(1, &[], empty_pane_group(), Some("current")))
3784 .await;
3785 db.save_workspace(workspace_with(2, &[], empty_pane_group(), Some("last")))
3786 .await;
3787 db.save_workspace(workspace_with(3, &[], empty_pane_group(), Some("stale")))
3788 .await;
3789
3790 for id in [1, 2, 3] {
3791 db.set_timestamp_for_tests(WorkspaceId(id), "2000-01-01 00:00:00".to_owned())
3792 .await
3793 .unwrap();
3794 }
3795
3796 db.garbage_collect_workspaces(fs.as_ref(), "current", Some("last"))
3797 .await
3798 .unwrap();
3799
3800 assert!(
3801 db.workspace_for_id(WorkspaceId(1)).is_some(),
3802 "GC must not delete workspaces belonging to the current session"
3803 );
3804 assert!(
3805 db.workspace_for_id(WorkspaceId(2)).is_some(),
3806 "GC must not delete workspaces belonging to the last session"
3807 );
3808 assert!(
3809 db.workspace_for_id(WorkspaceId(3)).is_none(),
3810 "GC should still delete stale workspaces from other sessions"
3811 );
3812 }
3813
3814 #[gpui::test]
3815 async fn test_gc_deletes_empty_workspace_with_items(cx: &mut gpui::TestAppContext) {
3816 let fs = fs::FakeFs::new(cx.executor());
3817 let db = WorkspaceDb::open_test_db("test_gc_deletes_empty_workspace_with_items").await;
3818
3819 db.save_workspace(workspace_with(1, &[], pane_with_items(&[100]), None))
3820 .await;
3821 db.set_timestamp_for_tests(WorkspaceId(1), "2000-01-01 00:00:00".to_owned())
3822 .await
3823 .unwrap();
3824
3825 db.garbage_collect_workspaces(fs.as_ref(), "current", None)
3826 .await
3827 .unwrap();
3828 assert!(
3829 db.workspace_for_id(WorkspaceId(1)).is_none(),
3830 "a stale empty-path workspace must be deleted regardless of its items"
3831 );
3832 }
3833
3834 #[gpui::test]
3835 async fn test_last_session_restores_workspace_with_missing_paths(
3836 cx: &mut gpui::TestAppContext,
3837 ) {
3838 let fs = fs::FakeFs::new(cx.executor());
3839 let db =
3840 WorkspaceDb::open_test_db("test_last_session_restores_workspace_with_missing_paths")
3841 .await;
3842
3843 let missing = PathBuf::from("/gone/file.rs");
3844 db.save_workspace(workspace_with(
3845 1,
3846 &[missing.as_path()],
3847 empty_pane_group(),
3848 Some("s"),
3849 ))
3850 .await;
3851
3852 let sessions = db
3853 .last_session_workspace_locations("s", None, fs.as_ref())
3854 .await
3855 .unwrap();
3856 assert!(
3857 sessions.is_empty(),
3858 "workspaces whose paths no longer exist on disk must not restore"
3859 );
3860 }
3861
3862 #[gpui::test]
3863 async fn test_last_session_workspace_locations_remote(cx: &mut gpui::TestAppContext) {
3864 let fs = fs::FakeFs::new(cx.executor());
3865 let db =
3866 WorkspaceDb::open_test_db("test_serializing_workspaces_last_session_workspaces_remote")
3867 .await;
3868
3869 let remote_connections = [
3870 ("host-1", "my-user-1"),
3871 ("host-2", "my-user-2"),
3872 ("host-3", "my-user-3"),
3873 ("host-4", "my-user-4"),
3874 ]
3875 .into_iter()
3876 .map(|(host, user)| async {
3877 let options = RemoteConnectionOptions::Ssh(SshConnectionOptions {
3878 host: host.into(),
3879 username: Some(user.to_string()),
3880 ..Default::default()
3881 });
3882 db.get_or_create_remote_connection(options.clone())
3883 .await
3884 .unwrap();
3885 options
3886 })
3887 .collect::<Vec<_>>();
3888
3889 let remote_connections = futures::future::join_all(remote_connections).await;
3890
3891 let workspaces = [
3892 (1, remote_connections[0].clone(), 9),
3893 (2, remote_connections[1].clone(), 5),
3894 (3, remote_connections[2].clone(), 8),
3895 (4, remote_connections[3].clone(), 2),
3896 ]
3897 .into_iter()
3898 .map(|(id, remote_connection, window_id)| SerializedWorkspace {
3899 id: WorkspaceId(id),
3900 paths: PathList::default(),
3901 location: SerializedWorkspaceLocation::Remote(remote_connection),
3902 center_group: Default::default(),
3903 window_bounds: Default::default(),
3904 display: Default::default(),
3905 docks: Default::default(),
3906 centered_layout: false,
3907 session_id: Some("one-session".to_owned()),
3908 bookmarks: Default::default(),
3909 breakpoints: Default::default(),
3910 window_id: Some(window_id),
3911 user_toolchains: Default::default(),
3912 })
3913 .collect::<Vec<_>>();
3914
3915 for workspace in workspaces.iter() {
3916 db.save_workspace(workspace.clone()).await;
3917 }
3918
3919 let stack = Some(Vec::from([
3920 WindowId::from(2), // Top
3921 WindowId::from(8),
3922 WindowId::from(5),
3923 WindowId::from(9), // Bottom
3924 ]));
3925
3926 let have = db
3927 .last_session_workspace_locations("one-session", stack, fs.as_ref())
3928 .await
3929 .unwrap();
3930 assert_eq!(have.len(), 4);
3931 assert_eq!(
3932 have[0],
3933 SessionWorkspace {
3934 workspace_id: WorkspaceId(4),
3935 location: SerializedWorkspaceLocation::Remote(remote_connections[3].clone()),
3936 paths: PathList::default(),
3937 window_id: Some(WindowId::from(2u64)),
3938 }
3939 );
3940 assert_eq!(
3941 have[1],
3942 SessionWorkspace {
3943 workspace_id: WorkspaceId(3),
3944 location: SerializedWorkspaceLocation::Remote(remote_connections[2].clone()),
3945 paths: PathList::default(),
3946 window_id: Some(WindowId::from(8u64)),
3947 }
3948 );
3949 assert_eq!(
3950 have[2],
3951 SessionWorkspace {
3952 workspace_id: WorkspaceId(2),
3953 location: SerializedWorkspaceLocation::Remote(remote_connections[1].clone()),
3954 paths: PathList::default(),
3955 window_id: Some(WindowId::from(5u64)),
3956 }
3957 );
3958 assert_eq!(
3959 have[3],
3960 SessionWorkspace {
3961 workspace_id: WorkspaceId(1),
3962 location: SerializedWorkspaceLocation::Remote(remote_connections[0].clone()),
3963 paths: PathList::default(),
3964 window_id: Some(WindowId::from(9u64)),
3965 }
3966 );
3967 }
3968
3969 #[gpui::test]
3970 async fn test_get_or_create_ssh_project() {
3971 let db = WorkspaceDb::open_test_db("test_get_or_create_ssh_project").await;
3972
3973 let host = "example.com".to_string();
3974 let port = Some(22_u16);
3975 let user = Some("user".to_string());
3976
3977 let connection_id = db
3978 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3979 host: host.clone().into(),
3980 port,
3981 username: user.clone(),
3982 ..Default::default()
3983 }))
3984 .await
3985 .unwrap();
3986
3987 // Test that calling the function again with the same parameters returns the same project
3988 let same_connection = db
3989 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3990 host: host.clone().into(),
3991 port,
3992 username: user.clone(),
3993 ..Default::default()
3994 }))
3995 .await
3996 .unwrap();
3997
3998 assert_eq!(connection_id, same_connection);
3999
4000 // Test with different parameters
4001 let host2 = "otherexample.com".to_string();
4002 let port2 = None;
4003 let user2 = Some("otheruser".to_string());
4004
4005 let different_connection = db
4006 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
4007 host: host2.clone().into(),
4008 port: port2,
4009 username: user2.clone(),
4010 ..Default::default()
4011 }))
4012 .await
4013 .unwrap();
4014
4015 assert_ne!(connection_id, different_connection);
4016 }
4017
4018 #[gpui::test]
4019 async fn test_get_or_create_ssh_project_with_null_user() {
4020 let db = WorkspaceDb::open_test_db("test_get_or_create_ssh_project_with_null_user").await;
4021
4022 let (host, port, user) = ("example.com".to_string(), None, None);
4023
4024 let connection_id = db
4025 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
4026 host: host.clone().into(),
4027 port,
4028 username: None,
4029 ..Default::default()
4030 }))
4031 .await
4032 .unwrap();
4033
4034 let same_connection_id = db
4035 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
4036 host: host.clone().into(),
4037 port,
4038 username: user.clone(),
4039 ..Default::default()
4040 }))
4041 .await
4042 .unwrap();
4043
4044 assert_eq!(connection_id, same_connection_id);
4045 }
4046
4047 #[gpui::test]
4048 async fn test_get_remote_connections() {
4049 let db = WorkspaceDb::open_test_db("test_get_remote_connections").await;
4050
4051 let connections = [
4052 ("example.com".to_string(), None, None),
4053 (
4054 "anotherexample.com".to_string(),
4055 Some(123_u16),
4056 Some("user2".to_string()),
4057 ),
4058 ("yetanother.com".to_string(), Some(345_u16), None),
4059 ];
4060
4061 let mut ids = Vec::new();
4062 for (host, port, user) in connections.iter() {
4063 ids.push(
4064 db.get_or_create_remote_connection(RemoteConnectionOptions::Ssh(
4065 SshConnectionOptions {
4066 host: host.clone().into(),
4067 port: *port,
4068 username: user.clone(),
4069 ..Default::default()
4070 },
4071 ))
4072 .await
4073 .unwrap(),
4074 );
4075 }
4076
4077 let stored_connections = db.remote_connections().unwrap();
4078 assert_eq!(
4079 stored_connections,
4080 [
4081 (
4082 ids[0],
4083 RemoteConnectionOptions::Ssh(SshConnectionOptions {
4084 host: "example.com".into(),
4085 port: None,
4086 username: None,
4087 ..Default::default()
4088 }),
4089 ),
4090 (
4091 ids[1],
4092 RemoteConnectionOptions::Ssh(SshConnectionOptions {
4093 host: "anotherexample.com".into(),
4094 port: Some(123),
4095 username: Some("user2".into()),
4096 ..Default::default()
4097 }),
4098 ),
4099 (
4100 ids[2],
4101 RemoteConnectionOptions::Ssh(SshConnectionOptions {
4102 host: "yetanother.com".into(),
4103 port: Some(345),
4104 username: None,
4105 ..Default::default()
4106 }),
4107 ),
4108 ]
4109 .into_iter()
4110 .collect::<HashMap<_, _>>(),
4111 );
4112 }
4113
4114 #[gpui::test]
4115 async fn test_simple_split() {
4116 zlog::init_test();
4117
4118 let db = WorkspaceDb::open_test_db("simple_split").await;
4119
4120 // -----------------
4121 // | 1,2 | 5,6 |
4122 // | - - - | |
4123 // | 3,4 | |
4124 // -----------------
4125 let center_pane = group(
4126 Axis::Horizontal,
4127 vec![
4128 group(
4129 Axis::Vertical,
4130 vec![
4131 SerializedPaneGroup::Pane(SerializedPane::new(
4132 vec![
4133 SerializedItem::new("Terminal", 1, false, false),
4134 SerializedItem::new("Terminal", 2, true, false),
4135 ],
4136 false,
4137 0,
4138 )),
4139 SerializedPaneGroup::Pane(SerializedPane::new(
4140 vec![
4141 SerializedItem::new("Terminal", 4, false, false),
4142 SerializedItem::new("Terminal", 3, true, false),
4143 ],
4144 true,
4145 0,
4146 )),
4147 ],
4148 ),
4149 SerializedPaneGroup::Pane(SerializedPane::new(
4150 vec![
4151 SerializedItem::new("Terminal", 5, true, false),
4152 SerializedItem::new("Terminal", 6, false, false),
4153 ],
4154 false,
4155 0,
4156 )),
4157 ],
4158 );
4159
4160 let workspace = default_workspace(&["/tmp"], ¢er_pane);
4161
4162 db.save_workspace(workspace.clone()).await;
4163
4164 let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap();
4165
4166 assert_eq!(workspace.center_group, new_workspace.center_group);
4167 }
4168
4169 #[gpui::test]
4170 async fn test_cleanup_panes() {
4171 zlog::init_test();
4172
4173 let db = WorkspaceDb::open_test_db("test_cleanup_panes").await;
4174
4175 let center_pane = group(
4176 Axis::Horizontal,
4177 vec![
4178 group(
4179 Axis::Vertical,
4180 vec![
4181 SerializedPaneGroup::Pane(SerializedPane::new(
4182 vec![
4183 SerializedItem::new("Terminal", 1, false, false),
4184 SerializedItem::new("Terminal", 2, true, false),
4185 ],
4186 false,
4187 0,
4188 )),
4189 SerializedPaneGroup::Pane(SerializedPane::new(
4190 vec![
4191 SerializedItem::new("Terminal", 4, false, false),
4192 SerializedItem::new("Terminal", 3, true, false),
4193 ],
4194 true,
4195 0,
4196 )),
4197 ],
4198 ),
4199 SerializedPaneGroup::Pane(SerializedPane::new(
4200 vec![
4201 SerializedItem::new("Terminal", 5, false, false),
4202 SerializedItem::new("Terminal", 6, true, false),
4203 ],
4204 false,
4205 0,
4206 )),
4207 ],
4208 );
4209
4210 let id = &["/tmp"];
4211
4212 let mut workspace = default_workspace(id, ¢er_pane);
4213
4214 db.save_workspace(workspace.clone()).await;
4215
4216 workspace.center_group = group(
4217 Axis::Vertical,
4218 vec![
4219 SerializedPaneGroup::Pane(SerializedPane::new(
4220 vec![
4221 SerializedItem::new("Terminal", 1, false, false),
4222 SerializedItem::new("Terminal", 2, true, false),
4223 ],
4224 false,
4225 0,
4226 )),
4227 SerializedPaneGroup::Pane(SerializedPane::new(
4228 vec![
4229 SerializedItem::new("Terminal", 4, true, false),
4230 SerializedItem::new("Terminal", 3, false, false),
4231 ],
4232 true,
4233 0,
4234 )),
4235 ],
4236 );
4237
4238 db.save_workspace(workspace.clone()).await;
4239
4240 let new_workspace = db.workspace_for_roots(id).unwrap();
4241
4242 assert_eq!(workspace.center_group, new_workspace.center_group);
4243 }
4244
4245 #[gpui::test]
4246 async fn test_empty_workspace_window_bounds() {
4247 zlog::init_test();
4248
4249 let db = WorkspaceDb::open_test_db("test_empty_workspace_window_bounds").await;
4250 let id = db.next_id().await.unwrap();
4251
4252 // Create a workspace with empty paths (empty workspace)
4253 let empty_paths: &[&str] = &[];
4254 let display_uuid = Uuid::new_v4();
4255 let window_bounds = SerializedWindowBounds(WindowBounds::Windowed(Bounds {
4256 origin: point(px(100.0), px(200.0)),
4257 size: size(px(800.0), px(600.0)),
4258 }));
4259
4260 let workspace = SerializedWorkspace {
4261 id,
4262 paths: PathList::new(empty_paths),
4263 location: SerializedWorkspaceLocation::Local,
4264 center_group: Default::default(),
4265 window_bounds: None,
4266 display: None,
4267 docks: Default::default(),
4268 bookmarks: Default::default(),
4269 breakpoints: Default::default(),
4270 centered_layout: false,
4271 session_id: None,
4272 window_id: None,
4273 user_toolchains: Default::default(),
4274 };
4275
4276 // Save the workspace (this creates the record with empty paths)
4277 db.save_workspace(workspace.clone()).await;
4278
4279 // Save window bounds separately (as the actual code does via set_window_open_status)
4280 db.set_window_open_status(id, window_bounds, display_uuid)
4281 .await
4282 .unwrap();
4283
4284 // Empty workspaces cannot be retrieved by paths (they'd all match).
4285 // They must be retrieved by workspace_id.
4286 assert!(db.workspace_for_roots(empty_paths).is_none());
4287
4288 // Retrieve using workspace_for_id instead
4289 let retrieved = db.workspace_for_id(id).unwrap();
4290
4291 // Verify window bounds were persisted
4292 assert_eq!(retrieved.id, id);
4293 assert!(retrieved.window_bounds.is_some());
4294 assert_eq!(retrieved.window_bounds.unwrap().0, window_bounds.0);
4295 assert!(retrieved.display.is_some());
4296 assert_eq!(retrieved.display.unwrap(), display_uuid);
4297 }
4298
4299 #[gpui::test]
4300 async fn test_last_session_workspace_locations_groups_by_window_id(
4301 cx: &mut gpui::TestAppContext,
4302 ) {
4303 let dir1 = tempfile::TempDir::with_prefix("dir1").unwrap();
4304 let dir2 = tempfile::TempDir::with_prefix("dir2").unwrap();
4305 let dir3 = tempfile::TempDir::with_prefix("dir3").unwrap();
4306 let dir4 = tempfile::TempDir::with_prefix("dir4").unwrap();
4307 let dir5 = tempfile::TempDir::with_prefix("dir5").unwrap();
4308
4309 let fs = fs::FakeFs::new(cx.executor());
4310 fs.insert_tree(dir1.path(), json!({})).await;
4311 fs.insert_tree(dir2.path(), json!({})).await;
4312 fs.insert_tree(dir3.path(), json!({})).await;
4313 fs.insert_tree(dir4.path(), json!({})).await;
4314 fs.insert_tree(dir5.path(), json!({})).await;
4315
4316 let db =
4317 WorkspaceDb::open_test_db("test_last_session_workspace_locations_groups_by_window_id")
4318 .await;
4319
4320 // Simulate two MultiWorkspace windows each containing two workspaces,
4321 // plus one single-workspace window:
4322 // Window 10: workspace 1, workspace 2
4323 // Window 20: workspace 3, workspace 4
4324 // Window 30: workspace 5 (only one)
4325 //
4326 // On session restore, the caller should be able to group these by
4327 // window_id to reconstruct the MultiWorkspace windows.
4328 let workspaces_data: Vec<(i64, &Path, u64)> = vec![
4329 (1, dir1.path(), 10),
4330 (2, dir2.path(), 10),
4331 (3, dir3.path(), 20),
4332 (4, dir4.path(), 20),
4333 (5, dir5.path(), 30),
4334 ];
4335
4336 for (id, dir, window_id) in &workspaces_data {
4337 db.save_workspace(SerializedWorkspace {
4338 id: WorkspaceId(*id),
4339 paths: PathList::new(&[*dir]),
4340 location: SerializedWorkspaceLocation::Local,
4341 center_group: Default::default(),
4342 window_bounds: Default::default(),
4343 display: Default::default(),
4344 docks: Default::default(),
4345 centered_layout: false,
4346 session_id: Some("test-session".to_owned()),
4347 bookmarks: Default::default(),
4348 breakpoints: Default::default(),
4349 window_id: Some(*window_id),
4350 user_toolchains: Default::default(),
4351 })
4352 .await;
4353 }
4354
4355 let locations = db
4356 .last_session_workspace_locations("test-session", None, fs.as_ref())
4357 .await
4358 .unwrap();
4359
4360 // All 5 workspaces should be returned with their window_ids.
4361 assert_eq!(locations.len(), 5);
4362
4363 // Every entry should have a window_id so the caller can group them.
4364 for session_workspace in &locations {
4365 assert!(
4366 session_workspace.window_id.is_some(),
4367 "workspace {:?} missing window_id",
4368 session_workspace.workspace_id
4369 );
4370 }
4371
4372 // Group by window_id, simulating what the restoration code should do.
4373 let mut by_window: HashMap<WindowId, Vec<WorkspaceId>> = HashMap::default();
4374 for session_workspace in &locations {
4375 if let Some(window_id) = session_workspace.window_id {
4376 by_window
4377 .entry(window_id)
4378 .or_default()
4379 .push(session_workspace.workspace_id);
4380 }
4381 }
4382
4383 // Should produce 3 windows, not 5.
4384 assert_eq!(
4385 by_window.len(),
4386 3,
4387 "Expected 3 window groups, got {}: {:?}",
4388 by_window.len(),
4389 by_window
4390 );
4391
4392 // Window 10 should contain workspaces 1 and 2.
4393 let window_10 = by_window.get(&WindowId::from(10u64)).unwrap();
4394 assert_eq!(window_10.len(), 2);
4395 assert!(window_10.contains(&WorkspaceId(1)));
4396 assert!(window_10.contains(&WorkspaceId(2)));
4397
4398 // Window 20 should contain workspaces 3 and 4.
4399 let window_20 = by_window.get(&WindowId::from(20u64)).unwrap();
4400 assert_eq!(window_20.len(), 2);
4401 assert!(window_20.contains(&WorkspaceId(3)));
4402 assert!(window_20.contains(&WorkspaceId(4)));
4403
4404 // Window 30 should contain only workspace 5.
4405 let window_30 = by_window.get(&WindowId::from(30u64)).unwrap();
4406 assert_eq!(window_30.len(), 1);
4407 assert!(window_30.contains(&WorkspaceId(5)));
4408 }
4409
4410 #[gpui::test]
4411 async fn test_read_serialized_multi_workspaces_with_state(cx: &mut gpui::TestAppContext) {
4412 use crate::persistence::model::MultiWorkspaceState;
4413
4414 // Write multi-workspace state for two windows via the scoped KVP.
4415 let window_10 = WindowId::from(10u64);
4416 let window_20 = WindowId::from(20u64);
4417
4418 let kvp = cx.update(|cx| KeyValueStore::global(cx));
4419
4420 write_multi_workspace_state(
4421 &kvp,
4422 window_10,
4423 MultiWorkspaceState {
4424 active_workspace_id: Some(WorkspaceId(2)),
4425 project_groups: vec![],
4426 sidebar_open: true,
4427 sidebar_state: None,
4428 },
4429 )
4430 .await;
4431
4432 write_multi_workspace_state(
4433 &kvp,
4434 window_20,
4435 MultiWorkspaceState {
4436 active_workspace_id: Some(WorkspaceId(3)),
4437 project_groups: vec![],
4438 sidebar_open: false,
4439 sidebar_state: None,
4440 },
4441 )
4442 .await;
4443
4444 // Build session workspaces: two in window 10, one in window 20, one with no window.
4445 let session_workspaces = vec![
4446 SessionWorkspace {
4447 workspace_id: WorkspaceId(1),
4448 location: SerializedWorkspaceLocation::Local,
4449 paths: PathList::new(&["/a"]),
4450 window_id: Some(window_10),
4451 },
4452 SessionWorkspace {
4453 workspace_id: WorkspaceId(2),
4454 location: SerializedWorkspaceLocation::Local,
4455 paths: PathList::new(&["/b"]),
4456 window_id: Some(window_10),
4457 },
4458 SessionWorkspace {
4459 workspace_id: WorkspaceId(3),
4460 location: SerializedWorkspaceLocation::Local,
4461 paths: PathList::new(&["/c"]),
4462 window_id: Some(window_20),
4463 },
4464 SessionWorkspace {
4465 workspace_id: WorkspaceId(4),
4466 location: SerializedWorkspaceLocation::Local,
4467 paths: PathList::new(&["/d"]),
4468 window_id: None,
4469 },
4470 ];
4471
4472 let results = cx.update(|cx| read_serialized_multi_workspaces(session_workspaces, cx));
4473
4474 // Should produce 3 results: window 10, window 20, and the orphan.
4475 assert_eq!(results.len(), 3);
4476
4477 // Window 10: active_workspace_id = 2 picks workspace 2 (paths /b), sidebar open.
4478 let group_10 = &results[0];
4479 assert_eq!(group_10.active_workspace.workspace_id, WorkspaceId(2));
4480 assert_eq!(group_10.state.active_workspace_id, Some(WorkspaceId(2)));
4481 assert_eq!(group_10.state.sidebar_open, true);
4482
4483 // Window 20: active_workspace_id = 3 picks workspace 3 (paths /c), sidebar closed.
4484 let group_20 = &results[1];
4485 assert_eq!(group_20.active_workspace.workspace_id, WorkspaceId(3));
4486 assert_eq!(group_20.state.active_workspace_id, Some(WorkspaceId(3)));
4487 assert_eq!(group_20.state.sidebar_open, false);
4488
4489 // Orphan: no active_workspace_id, falls back to first workspace (id 4).
4490 let group_none = &results[2];
4491 assert_eq!(group_none.active_workspace.workspace_id, WorkspaceId(4));
4492 assert_eq!(group_none.state.active_workspace_id, None);
4493 assert_eq!(group_none.state.sidebar_open, false);
4494 }
4495
4496 #[gpui::test]
4497 async fn test_flush_serialization_completes_before_quit(cx: &mut gpui::TestAppContext) {
4498 crate::tests::init_test(cx);
4499
4500 let fs = fs::FakeFs::new(cx.executor());
4501 let project = Project::test(fs.clone(), [], cx).await;
4502
4503 let (multi_workspace, cx) =
4504 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4505
4506 let workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone());
4507
4508 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4509
4510 // Assign a database_id so serialization will actually persist.
4511 let workspace_id = db.next_id().await.unwrap();
4512 workspace.update(cx, |ws, _cx| {
4513 ws.set_database_id(workspace_id);
4514 });
4515
4516 // Mutate some workspace state.
4517 db.set_centered_layout(workspace_id, true).await.unwrap();
4518
4519 // Call flush_serialization and await the returned task directly
4520 // (without run_until_parked — the point is that awaiting the task
4521 // alone is sufficient).
4522 let task = multi_workspace.update_in(cx, |mw, window, cx| {
4523 mw.workspace()
4524 .update(cx, |ws, cx| ws.flush_serialization(window, cx))
4525 });
4526 task.await;
4527
4528 // Read the workspace back from the DB and verify serialization happened.
4529 let serialized = db.workspace_for_id(workspace_id);
4530 assert!(
4531 serialized.is_some(),
4532 "flush_serialization should have persisted the workspace to DB"
4533 );
4534 }
4535
4536 #[gpui::test]
4537 async fn test_create_workspace_serialization(cx: &mut gpui::TestAppContext) {
4538 crate::tests::init_test(cx);
4539
4540 let fs = fs::FakeFs::new(cx.executor());
4541 let project = Project::test(fs.clone(), [], cx).await;
4542
4543 let (multi_workspace, cx) =
4544 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4545
4546 // Give the first workspace a database_id.
4547 multi_workspace.update_in(cx, |mw, _, cx| {
4548 mw.set_random_database_id(cx);
4549 });
4550
4551 let window_id =
4552 multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
4553
4554 // Create a new workspace via the MultiWorkspace API (triggers next_id()).
4555 multi_workspace.update_in(cx, |mw, window, cx| {
4556 mw.create_test_workspace(window, cx).detach();
4557 });
4558
4559 // Let the async next_id() and re-serialization tasks complete.
4560 cx.run_until_parked();
4561
4562 // The new workspace should now have a database_id.
4563 let new_workspace_db_id =
4564 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
4565 assert!(
4566 new_workspace_db_id.is_some(),
4567 "New workspace should have a database_id after run_until_parked"
4568 );
4569
4570 // The multi-workspace state should record it as the active workspace.
4571 let state = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
4572 assert_eq!(
4573 state.active_workspace_id, new_workspace_db_id,
4574 "Serialized active_workspace_id should match the new workspace's database_id"
4575 );
4576
4577 // The individual workspace row should exist with real data
4578 // (not just the bare DEFAULT VALUES row from next_id).
4579 let workspace_id = new_workspace_db_id.unwrap();
4580 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4581 let serialized = db.workspace_for_id(workspace_id);
4582 assert!(
4583 serialized.is_some(),
4584 "Newly created workspace should be fully serialized in the DB after database_id assignment"
4585 );
4586 }
4587
4588 #[gpui::test]
4589 async fn test_remove_workspace_clears_session_binding(cx: &mut gpui::TestAppContext) {
4590 crate::tests::init_test(cx);
4591
4592 let fs = fs::FakeFs::new(cx.executor());
4593 let dir = unique_test_dir(&fs, "remove").await;
4594 let project1 = Project::test(fs.clone(), [], cx).await;
4595 let project2 = Project::test(fs.clone(), [], cx).await;
4596
4597 let (multi_workspace, cx) =
4598 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4599
4600 multi_workspace.update(cx, |mw, cx| {
4601 mw.open_sidebar(cx);
4602 });
4603
4604 multi_workspace.update_in(cx, |mw, _, cx| {
4605 mw.set_random_database_id(cx);
4606 });
4607
4608 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4609
4610 // Get a real DB id for workspace2 so the row actually exists.
4611 let workspace2_db_id = db.next_id().await.unwrap();
4612
4613 multi_workspace.update_in(cx, |mw, window, cx| {
4614 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4615 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4616 ws.set_database_id(workspace2_db_id)
4617 });
4618 mw.add(workspace.clone(), window, cx);
4619 });
4620
4621 // Save a full workspace row to the DB directly.
4622 let session_id = format!("remove-test-session-{}", Uuid::new_v4());
4623 db.save_workspace(SerializedWorkspace {
4624 id: workspace2_db_id,
4625 paths: PathList::new(&[&dir]),
4626 location: SerializedWorkspaceLocation::Local,
4627 center_group: Default::default(),
4628 window_bounds: Default::default(),
4629 display: Default::default(),
4630 docks: Default::default(),
4631 centered_layout: false,
4632 session_id: Some(session_id.clone()),
4633 bookmarks: Default::default(),
4634 breakpoints: Default::default(),
4635 window_id: Some(99),
4636 user_toolchains: Default::default(),
4637 })
4638 .await;
4639
4640 assert!(
4641 db.workspace_for_id(workspace2_db_id).is_some(),
4642 "Workspace2 should exist in DB before removal"
4643 );
4644
4645 // Remove workspace at index 1 (the second workspace).
4646 multi_workspace.update_in(cx, |mw, window, cx| {
4647 let ws = mw.workspaces().nth(1).unwrap().clone();
4648 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4649 .detach_and_log_err(cx);
4650 });
4651
4652 cx.run_until_parked();
4653
4654 // The row should still exist so it continues to appear in recent
4655 // projects, but the session binding should be cleared so it is not
4656 // restored as part of any future session.
4657 assert!(
4658 db.workspace_for_id(workspace2_db_id).is_some(),
4659 "Removed workspace's DB row should be preserved for recent projects"
4660 );
4661
4662 let session_workspaces = db
4663 .last_session_workspace_locations("remove-test-session", None, fs.as_ref())
4664 .await
4665 .unwrap();
4666 let restored_ids: Vec<WorkspaceId> = session_workspaces
4667 .iter()
4668 .map(|sw| sw.workspace_id)
4669 .collect();
4670 assert!(
4671 !restored_ids.contains(&workspace2_db_id),
4672 "Removed workspace should not appear in session restoration"
4673 );
4674 }
4675
4676 #[gpui::test]
4677 async fn test_remove_workspace_not_restored_as_zombie(cx: &mut gpui::TestAppContext) {
4678 crate::tests::init_test(cx);
4679
4680 let fs = fs::FakeFs::new(cx.executor());
4681 let dir1 = tempfile::TempDir::with_prefix("zombie_test1").unwrap();
4682 let dir2 = tempfile::TempDir::with_prefix("zombie_test2").unwrap();
4683 fs.insert_tree(dir1.path(), json!({})).await;
4684 fs.insert_tree(dir2.path(), json!({})).await;
4685
4686 let project1 = Project::test(fs.clone(), [], cx).await;
4687 let project2 = Project::test(fs.clone(), [], cx).await;
4688
4689 let db = cx.update(|cx| WorkspaceDb::global(cx));
4690
4691 // Get real DB ids so the rows actually exist.
4692 let ws1_id = db.next_id().await.unwrap();
4693 let ws2_id = db.next_id().await.unwrap();
4694
4695 let (multi_workspace, cx) =
4696 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4697
4698 multi_workspace.update(cx, |mw, cx| {
4699 mw.open_sidebar(cx);
4700 });
4701
4702 multi_workspace.update_in(cx, |mw, _, cx| {
4703 mw.workspace().update(cx, |ws, _cx| {
4704 ws.set_database_id(ws1_id);
4705 });
4706 });
4707
4708 multi_workspace.update_in(cx, |mw, window, cx| {
4709 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4710 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4711 ws.set_database_id(ws2_id)
4712 });
4713 mw.add(workspace.clone(), window, cx);
4714 });
4715
4716 let session_id = "test-zombie-session";
4717 let window_id_val: u64 = 42;
4718
4719 db.save_workspace(SerializedWorkspace {
4720 id: ws1_id,
4721 paths: PathList::new(&[dir1.path()]),
4722 location: SerializedWorkspaceLocation::Local,
4723 center_group: Default::default(),
4724 window_bounds: Default::default(),
4725 display: Default::default(),
4726 docks: Default::default(),
4727 centered_layout: false,
4728 session_id: Some(session_id.to_owned()),
4729 bookmarks: Default::default(),
4730 breakpoints: Default::default(),
4731 window_id: Some(window_id_val),
4732 user_toolchains: Default::default(),
4733 })
4734 .await;
4735
4736 db.save_workspace(SerializedWorkspace {
4737 id: ws2_id,
4738 paths: PathList::new(&[dir2.path()]),
4739 location: SerializedWorkspaceLocation::Local,
4740 center_group: Default::default(),
4741 window_bounds: Default::default(),
4742 display: Default::default(),
4743 docks: Default::default(),
4744 centered_layout: false,
4745 session_id: Some(session_id.to_owned()),
4746 bookmarks: Default::default(),
4747 breakpoints: Default::default(),
4748 window_id: Some(window_id_val),
4749 user_toolchains: Default::default(),
4750 })
4751 .await;
4752
4753 // Remove workspace2 (index 1).
4754 multi_workspace.update_in(cx, |mw, window, cx| {
4755 let ws = mw.workspaces().nth(1).unwrap().clone();
4756 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4757 .detach_and_log_err(cx);
4758 });
4759
4760 cx.run_until_parked();
4761
4762 // The removed workspace should NOT appear in session restoration.
4763 let locations = db
4764 .last_session_workspace_locations(session_id, None, fs.as_ref())
4765 .await
4766 .unwrap();
4767
4768 let restored_ids: Vec<WorkspaceId> = locations.iter().map(|sw| sw.workspace_id).collect();
4769 assert!(
4770 !restored_ids.contains(&ws2_id),
4771 "Removed workspace should not appear in session restoration list. Found: {:?}",
4772 restored_ids
4773 );
4774 assert!(
4775 restored_ids.contains(&ws1_id),
4776 "Remaining workspace should still appear in session restoration list"
4777 );
4778 }
4779
4780 #[gpui::test]
4781 async fn test_pending_removal_tasks_drained_on_flush(cx: &mut gpui::TestAppContext) {
4782 crate::tests::init_test(cx);
4783
4784 let fs = fs::FakeFs::new(cx.executor());
4785 let dir = unique_test_dir(&fs, "pending-removal").await;
4786 let project1 = Project::test(fs.clone(), [], cx).await;
4787 let project2 = Project::test(fs.clone(), [], cx).await;
4788
4789 let db = cx.update(|cx| WorkspaceDb::global(cx));
4790
4791 // Get a real DB id for workspace2 so the row actually exists.
4792 let workspace2_db_id = db.next_id().await.unwrap();
4793
4794 let (multi_workspace, cx) =
4795 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4796
4797 multi_workspace.update(cx, |mw, cx| {
4798 mw.open_sidebar(cx);
4799 });
4800
4801 multi_workspace.update_in(cx, |mw, _, cx| {
4802 mw.set_random_database_id(cx);
4803 });
4804
4805 multi_workspace.update_in(cx, |mw, window, cx| {
4806 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4807 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4808 ws.set_database_id(workspace2_db_id)
4809 });
4810 mw.add(workspace.clone(), window, cx);
4811 });
4812
4813 // Save a full workspace row to the DB directly and let it settle.
4814 let session_id = format!("pending-removal-session-{}", Uuid::new_v4());
4815 db.save_workspace(SerializedWorkspace {
4816 id: workspace2_db_id,
4817 paths: PathList::new(&[&dir]),
4818 location: SerializedWorkspaceLocation::Local,
4819 center_group: Default::default(),
4820 window_bounds: Default::default(),
4821 display: Default::default(),
4822 docks: Default::default(),
4823 centered_layout: false,
4824 session_id: Some(session_id.clone()),
4825 bookmarks: Default::default(),
4826 breakpoints: Default::default(),
4827 window_id: Some(88),
4828 user_toolchains: Default::default(),
4829 })
4830 .await;
4831 cx.run_until_parked();
4832
4833 // Remove workspace2 — this pushes a task to pending_removal_tasks.
4834 multi_workspace.update_in(cx, |mw, window, cx| {
4835 let ws = mw.workspaces().nth(1).unwrap().clone();
4836 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4837 .detach_and_log_err(cx);
4838 });
4839
4840 // Simulate the quit handler pattern: collect flush tasks + pending
4841 // removal tasks and await them all.
4842 let all_tasks = multi_workspace.update_in(cx, |mw, window, cx| {
4843 let mut tasks: Vec<Task<()>> = mw
4844 .workspaces()
4845 .map(|workspace| {
4846 workspace.update(cx, |workspace, cx| {
4847 workspace.flush_serialization(window, cx)
4848 })
4849 })
4850 .collect();
4851 let mut removal_tasks = mw.take_pending_removal_tasks();
4852 // Note: removal_tasks may be empty if the background task already
4853 // completed (take_pending_removal_tasks filters out ready tasks).
4854 tasks.append(&mut removal_tasks);
4855 tasks.push(mw.flush_serialization());
4856 tasks
4857 });
4858 futures::future::join_all(all_tasks).await;
4859
4860 // The row should still exist (for recent projects), but the session
4861 // binding should have been cleared by the pending removal task.
4862 assert!(
4863 db.workspace_for_id(workspace2_db_id).is_some(),
4864 "Workspace row should be preserved for recent projects"
4865 );
4866
4867 let session_workspaces = db
4868 .last_session_workspace_locations("pending-removal-session", None, fs.as_ref())
4869 .await
4870 .unwrap();
4871 let restored_ids: Vec<WorkspaceId> = session_workspaces
4872 .iter()
4873 .map(|sw| sw.workspace_id)
4874 .collect();
4875 assert!(
4876 !restored_ids.contains(&workspace2_db_id),
4877 "Pending removal task should have cleared the session binding"
4878 );
4879 }
4880
4881 #[gpui::test]
4882 async fn test_create_workspace_bounds_observer_uses_fresh_id(cx: &mut gpui::TestAppContext) {
4883 crate::tests::init_test(cx);
4884
4885 let fs = fs::FakeFs::new(cx.executor());
4886 let project = Project::test(fs.clone(), [], cx).await;
4887
4888 let (multi_workspace, cx) =
4889 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4890
4891 multi_workspace.update_in(cx, |mw, _, cx| {
4892 mw.set_random_database_id(cx);
4893 });
4894
4895 let task =
4896 multi_workspace.update_in(cx, |mw, window, cx| mw.create_test_workspace(window, cx));
4897 task.await;
4898
4899 let new_workspace_db_id =
4900 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
4901 assert!(
4902 new_workspace_db_id.is_some(),
4903 "After run_until_parked, the workspace should have a database_id"
4904 );
4905
4906 let workspace_id = new_workspace_db_id.unwrap();
4907
4908 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4909
4910 assert!(
4911 db.workspace_for_id(workspace_id).is_some(),
4912 "The workspace row should exist in the DB"
4913 );
4914
4915 cx.simulate_resize(gpui::size(px(1024.0), px(768.0)));
4916
4917 // Advance the clock past the 100ms debounce timer so the bounds
4918 // observer task fires
4919 cx.executor().advance_clock(Duration::from_millis(200));
4920 cx.run_until_parked();
4921
4922 let serialized = db
4923 .workspace_for_id(workspace_id)
4924 .expect("workspace row should still exist");
4925 assert!(
4926 serialized.window_bounds.is_some(),
4927 "The bounds observer should write bounds for the workspace's real DB ID, \
4928 even when the workspace was created via create_workspace (where the ID \
4929 is assigned asynchronously after construction)."
4930 );
4931 }
4932
4933 #[gpui::test]
4934 async fn test_flush_serialization_writes_bounds(cx: &mut gpui::TestAppContext) {
4935 crate::tests::init_test(cx);
4936
4937 let fs = fs::FakeFs::new(cx.executor());
4938 let dir = tempfile::TempDir::with_prefix("flush_bounds_test").unwrap();
4939 fs.insert_tree(dir.path(), json!({})).await;
4940
4941 let project = Project::test(fs.clone(), [dir.path()], cx).await;
4942
4943 let (multi_workspace, cx) =
4944 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4945
4946 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4947 let workspace_id = db.next_id().await.unwrap();
4948 multi_workspace.update_in(cx, |mw, _, cx| {
4949 mw.workspace().update(cx, |ws, _cx| {
4950 ws.set_database_id(workspace_id);
4951 });
4952 });
4953
4954 let task = multi_workspace.update_in(cx, |mw, window, cx| {
4955 mw.workspace()
4956 .update(cx, |ws, cx| ws.flush_serialization(window, cx))
4957 });
4958 task.await;
4959
4960 let after = db
4961 .workspace_for_id(workspace_id)
4962 .expect("workspace row should exist after flush_serialization");
4963 assert!(
4964 !after.paths.is_empty(),
4965 "flush_serialization should have written paths via save_workspace"
4966 );
4967 assert!(
4968 after.window_bounds.is_some(),
4969 "flush_serialization should ensure window bounds are persisted to the DB \
4970 before the process exits."
4971 );
4972 }
4973
4974 #[gpui::test]
4975 async fn test_resolve_worktree_workspaces(cx: &mut gpui::TestAppContext) {
4976 let fs = fs::FakeFs::new(cx.executor());
4977
4978 // Main repo with a linked worktree entry
4979 fs.insert_tree(
4980 "/repo",
4981 json!({
4982 ".git": {
4983 "worktrees": {
4984 "feature": {
4985 "commondir": "../../",
4986 "HEAD": "ref: refs/heads/feature"
4987 }
4988 }
4989 },
4990 "src": { "main.rs": "" }
4991 }),
4992 )
4993 .await;
4994
4995 // Linked worktree checkout pointing back to /repo
4996 fs.insert_tree(
4997 "/worktree",
4998 json!({
4999 ".git": "gitdir: /repo/.git/worktrees/feature",
5000 "src": { "main.rs": "" }
5001 }),
5002 )
5003 .await;
5004
5005 // A plain non-git project
5006 fs.insert_tree(
5007 "/plain-project",
5008 json!({
5009 "src": { "main.rs": "" }
5010 }),
5011 )
5012 .await;
5013
5014 // Another normal git repo (used in mixed-path entry)
5015 fs.insert_tree(
5016 "/other-repo",
5017 json!({
5018 ".git": {},
5019 "src": { "lib.rs": "" }
5020 }),
5021 )
5022 .await;
5023
5024 let t0 = Utc::now() - chrono::Duration::hours(4);
5025 let t1 = Utc::now() - chrono::Duration::hours(3);
5026 let t2 = Utc::now() - chrono::Duration::hours(2);
5027 let t3 = Utc::now() - chrono::Duration::hours(1);
5028
5029 let workspaces = vec![
5030 // 1: Main checkout of /repo (opened earlier)
5031 (
5032 WorkspaceId(1),
5033 SerializedWorkspaceLocation::Local,
5034 PathList::new(&["/repo"]),
5035 t0,
5036 ),
5037 // 2: Linked worktree of /repo (opened more recently)
5038 // Should dedup with #1; more recent timestamp wins.
5039 (
5040 WorkspaceId(2),
5041 SerializedWorkspaceLocation::Local,
5042 PathList::new(&["/worktree"]),
5043 t1,
5044 ),
5045 // 3: Mixed-path workspace: one root is a linked worktree,
5046 // the other is a normal repo. The worktree path should be
5047 // resolved; the normal path kept as-is.
5048 (
5049 WorkspaceId(3),
5050 SerializedWorkspaceLocation::Local,
5051 PathList::new(&["/other-repo", "/worktree"]),
5052 t2,
5053 ),
5054 // 4: Non-git project — passed through unchanged.
5055 (
5056 WorkspaceId(4),
5057 SerializedWorkspaceLocation::Local,
5058 PathList::new(&["/plain-project"]),
5059 t3,
5060 ),
5061 ];
5062
5063 let result = resolve_worktree_workspaces(workspaces, fs.as_ref()).await;
5064
5065 // Should have 3 entries: #1 and #2 deduped into one, plus #3 and #4.
5066 assert_eq!(result.len(), 3);
5067
5068 // First entry: /repo — deduplicated from #1 and #2.
5069 // Keeps the position of #1 (first seen), but with #2's later timestamp.
5070 assert_eq!(result[0].2.paths(), &[PathBuf::from("/repo")]);
5071 assert_eq!(result[0].3, t1);
5072
5073 // Second entry: mixed-path workspace with worktree resolved.
5074 // /worktree → /repo, so paths become [/other-repo, /repo] (sorted).
5075 assert_eq!(
5076 result[1].2.paths(),
5077 &[PathBuf::from("/other-repo"), PathBuf::from("/repo")]
5078 );
5079 assert_eq!(result[1].0, WorkspaceId(3));
5080
5081 // Third entry: non-git project, unchanged.
5082 assert_eq!(result[2].2.paths(), &[PathBuf::from("/plain-project")]);
5083 assert_eq!(result[2].0, WorkspaceId(4));
5084 }
5085
5086 #[gpui::test]
5087 async fn test_resolve_worktree_workspaces_bare_repo(cx: &mut gpui::TestAppContext) {
5088 let fs = fs::FakeFs::new(cx.executor());
5089
5090 // Bare repo at /foo/.bare (commondir doesn't end with .git)
5091 fs.insert_tree(
5092 "/foo/.bare",
5093 json!({
5094 "worktrees": {
5095 "my-feature": {
5096 "commondir": "../../",
5097 "HEAD": "ref: refs/heads/my-feature"
5098 }
5099 }
5100 }),
5101 )
5102 .await;
5103
5104 // Linked worktree whose commondir resolves to a bare repo (/foo/.bare)
5105 fs.insert_tree(
5106 "/foo/my-feature",
5107 json!({
5108 ".git": "gitdir: /foo/.bare/worktrees/my-feature",
5109 "src": { "main.rs": "" }
5110 }),
5111 )
5112 .await;
5113
5114 let t0 = Utc::now();
5115
5116 let workspaces = vec![(
5117 WorkspaceId(1),
5118 SerializedWorkspaceLocation::Local,
5119 PathList::new(&["/foo/my-feature"]),
5120 t0,
5121 )];
5122
5123 let result = resolve_worktree_workspaces(workspaces, fs.as_ref()).await;
5124
5125 // The worktree path must be preserved unchanged — /foo/.bare is a bare repo
5126 // and cannot serve as a working-tree root, so resolution must return None.
5127 assert_eq!(result.len(), 1);
5128 assert_eq!(result[0].2.paths(), &[PathBuf::from("/foo/my-feature")]);
5129 }
5130
5131 #[gpui::test]
5132 async fn test_restore_window_with_linked_worktree_and_multiple_project_groups(
5133 cx: &mut gpui::TestAppContext,
5134 ) {
5135 crate::tests::init_test(cx);
5136
5137 let fs = fs::FakeFs::new(cx.executor());
5138
5139 // Main git repo at /repo
5140 fs.insert_tree(
5141 "/repo",
5142 json!({
5143 ".git": {
5144 "HEAD": "ref: refs/heads/main",
5145 "worktrees": {
5146 "feature": {
5147 "commondir": "../../",
5148 "HEAD": "ref: refs/heads/feature"
5149 }
5150 }
5151 },
5152 "src": { "main.rs": "" }
5153 }),
5154 )
5155 .await;
5156
5157 // Linked worktree checkout pointing back to /repo
5158 fs.insert_tree(
5159 "/worktree-feature",
5160 json!({
5161 ".git": "gitdir: /repo/.git/worktrees/feature",
5162 "src": { "lib.rs": "" }
5163 }),
5164 )
5165 .await;
5166
5167 // --- Phase 1: Set up the original multi-workspace window ---
5168
5169 let project_1 = Project::test(fs.clone(), ["/repo".as_ref()], cx).await;
5170 let project_1_linked_worktree =
5171 Project::test(fs.clone(), ["/worktree-feature".as_ref()], cx).await;
5172
5173 // Wait for git discovery to finish.
5174 cx.run_until_parked();
5175
5176 // Create a second, unrelated project so we have two distinct project groups.
5177 fs.insert_tree(
5178 "/other-project",
5179 json!({
5180 ".git": { "HEAD": "ref: refs/heads/main" },
5181 "readme.md": ""
5182 }),
5183 )
5184 .await;
5185 let project_2 = Project::test(fs.clone(), ["/other-project".as_ref()], cx).await;
5186 cx.run_until_parked();
5187
5188 // Create the MultiWorkspace with project_2, then add the main repo
5189 // and its linked worktree. The linked worktree is added last and
5190 // becomes the active workspace.
5191 let (multi_workspace, cx) = cx
5192 .add_window_view(|window, cx| MultiWorkspace::test_new(project_2.clone(), window, cx));
5193
5194 multi_workspace.update(cx, |mw, cx| {
5195 mw.open_sidebar(cx);
5196 });
5197
5198 multi_workspace.update_in(cx, |mw, window, cx| {
5199 mw.test_add_workspace(project_1.clone(), window, cx);
5200 });
5201
5202 let workspace_worktree = multi_workspace.update_in(cx, |mw, window, cx| {
5203 mw.test_add_workspace(project_1_linked_worktree.clone(), window, cx)
5204 });
5205
5206 let tasks =
5207 multi_workspace.update_in(cx, |mw, window, cx| mw.flush_all_serialization(window, cx));
5208 cx.run_until_parked();
5209 for task in tasks {
5210 task.await;
5211 }
5212 cx.run_until_parked();
5213
5214 let active_db_id = workspace_worktree.read_with(cx, |ws, _| ws.database_id());
5215 assert!(
5216 active_db_id.is_some(),
5217 "Active workspace should have a database ID"
5218 );
5219
5220 // --- Phase 2: Read back and verify the serialized state ---
5221
5222 let session_id = multi_workspace
5223 .read_with(cx, |mw, cx| mw.workspace().read(cx).session_id())
5224 .unwrap();
5225 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
5226 let session_workspaces = db
5227 .last_session_workspace_locations(&session_id, None, fs.as_ref())
5228 .await
5229 .expect("should load session workspaces");
5230 assert!(
5231 !session_workspaces.is_empty(),
5232 "Should have at least one session workspace"
5233 );
5234
5235 let multi_workspaces =
5236 cx.update(|_, cx| read_serialized_multi_workspaces(session_workspaces, cx));
5237 assert_eq!(
5238 multi_workspaces.len(),
5239 1,
5240 "All workspaces share one window, so there should be exactly one multi-workspace"
5241 );
5242
5243 let serialized = &multi_workspaces[0];
5244 assert_eq!(
5245 serialized.active_workspace.workspace_id,
5246 active_db_id.unwrap(),
5247 );
5248 assert_eq!(serialized.state.project_groups.len(), 2,);
5249
5250 // Verify the serialized project group keys round-trip back to the
5251 // originals.
5252 let restored_keys: Vec<ProjectGroupKey> = serialized
5253 .state
5254 .project_groups
5255 .iter()
5256 .cloned()
5257 .map(Into::into)
5258 .collect();
5259 let expected_keys = vec![
5260 ProjectGroupKey::new(None, PathList::new(&["/repo"])),
5261 ProjectGroupKey::new(None, PathList::new(&["/other-project"])),
5262 ];
5263 assert_eq!(
5264 restored_keys, expected_keys,
5265 "Deserialized project group keys should match the originals"
5266 );
5267
5268 // --- Phase 3: Restore the window and verify the result ---
5269
5270 let app_state =
5271 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).app_state().clone());
5272
5273 let serialized_mw = multi_workspaces.into_iter().next().unwrap();
5274 let restored_handle: gpui::WindowHandle<MultiWorkspace> = cx
5275 .update(|_, cx| {
5276 cx.spawn(async move |mut cx| {
5277 crate::restore_multiworkspace(serialized_mw, app_state, &mut cx).await
5278 })
5279 })
5280 .await
5281 .expect("restore_multiworkspace should succeed");
5282
5283 cx.run_until_parked();
5284
5285 // The restored window should have the same project group keys.
5286 let restored_keys: Vec<ProjectGroupKey> = restored_handle
5287 .read_with(cx, |mw: &MultiWorkspace, _cx| mw.project_group_keys())
5288 .unwrap();
5289 assert_eq!(
5290 restored_keys, expected_keys,
5291 "Restored window should have the same project group keys as the original"
5292 );
5293
5294 // The active workspace in the restored window should have the linked
5295 // worktree paths.
5296 let active_paths: Vec<PathBuf> = restored_handle
5297 .read_with(cx, |mw: &MultiWorkspace, cx| {
5298 mw.workspace()
5299 .read(cx)
5300 .root_paths(cx)
5301 .into_iter()
5302 .map(|p: Arc<Path>| p.to_path_buf())
5303 .collect()
5304 })
5305 .unwrap();
5306 assert_eq!(
5307 active_paths,
5308 vec![PathBuf::from("/worktree-feature")],
5309 "The restored active workspace should be the linked worktree project"
5310 );
5311 }
5312
5313 #[gpui::test]
5314 async fn test_remove_project_group_falls_back_to_neighbor(cx: &mut gpui::TestAppContext) {
5315 crate::tests::init_test(cx);
5316
5317 let fs = fs::FakeFs::new(cx.executor());
5318 let dir_a = unique_test_dir(&fs, "group-a").await;
5319 let dir_b = unique_test_dir(&fs, "group-b").await;
5320 let dir_c = unique_test_dir(&fs, "group-c").await;
5321
5322 let project_a = Project::test(fs.clone(), [dir_a.as_path()], cx).await;
5323 let project_b = Project::test(fs.clone(), [dir_b.as_path()], cx).await;
5324 let project_c = Project::test(fs.clone(), [dir_c.as_path()], cx).await;
5325
5326 // Create a multi-workspace with project A, then add B and C.
5327 // project_groups stores newest first: [C, B, A].
5328 // Sidebar displays in the same order: C (top), B (middle), A (bottom).
5329 let (multi_workspace, cx) = cx
5330 .add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx));
5331
5332 multi_workspace.update(cx, |mw, cx| mw.open_sidebar(cx));
5333
5334 let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| {
5335 mw.test_add_workspace(project_b.clone(), window, cx)
5336 });
5337 let _workspace_c = multi_workspace.update_in(cx, |mw, window, cx| {
5338 mw.test_add_workspace(project_c.clone(), window, cx)
5339 });
5340 cx.run_until_parked();
5341
5342 let key_a = project_a.read_with(cx, |p, cx| p.project_group_key(cx));
5343 let key_b = project_b.read_with(cx, |p, cx| p.project_group_key(cx));
5344 let key_c = project_c.read_with(cx, |p, cx| p.project_group_key(cx));
5345
5346 // Activate workspace B so removing its group exercises the fallback.
5347 multi_workspace.update_in(cx, |mw, window, cx| {
5348 mw.activate(workspace_b.clone(), None, window, cx);
5349 });
5350 cx.run_until_parked();
5351
5352 // --- Remove group B (the middle one). ---
5353 // In the sidebar [C, B, A], "below" B is A.
5354 multi_workspace.update_in(cx, |mw, window, cx| {
5355 mw.remove_project_group(&key_b, window, cx)
5356 .detach_and_log_err(cx);
5357 });
5358 cx.run_until_parked();
5359
5360 let active_paths =
5361 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
5362 assert_eq!(
5363 active_paths
5364 .iter()
5365 .map(|p| p.to_path_buf())
5366 .collect::<Vec<_>>(),
5367 vec![dir_a.clone()],
5368 "After removing the middle group, should fall back to the group below (A)"
5369 );
5370
5371 // After removing B, keys = [A, C], sidebar = [C, A].
5372 // Activate workspace A (the bottom) so removing it tests the
5373 // "fall back upward" path.
5374 let workspace_a =
5375 multi_workspace.read_with(cx, |mw, _cx| mw.workspaces().next().unwrap().clone());
5376 multi_workspace.update_in(cx, |mw, window, cx| {
5377 mw.activate(workspace_a.clone(), None, window, cx);
5378 });
5379 cx.run_until_parked();
5380
5381 // --- Remove group A (the bottom one in sidebar). ---
5382 // Nothing below A, so should fall back upward to C.
5383 multi_workspace.update_in(cx, |mw, window, cx| {
5384 mw.remove_project_group(&key_a, window, cx)
5385 .detach_and_log_err(cx);
5386 });
5387 cx.run_until_parked();
5388
5389 let active_paths =
5390 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
5391 assert_eq!(
5392 active_paths
5393 .iter()
5394 .map(|p| p.to_path_buf())
5395 .collect::<Vec<_>>(),
5396 vec![dir_c.clone()],
5397 "After removing the bottom group, should fall back to the group above (C)"
5398 );
5399
5400 // --- Remove group C (the only one remaining). ---
5401 // Should create an empty workspace.
5402 multi_workspace.update_in(cx, |mw, window, cx| {
5403 mw.remove_project_group(&key_c, window, cx)
5404 .detach_and_log_err(cx);
5405 });
5406 cx.run_until_parked();
5407
5408 let active_paths =
5409 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
5410 assert!(
5411 active_paths.is_empty(),
5412 "After removing the only remaining group, should have an empty workspace"
5413 );
5414 }
5415
5416 /// Regression test for a crash where `find_or_create_local_workspace`
5417 /// returned a workspace that was about to be removed, hitting an assert
5418 /// in `MultiWorkspace::remove`.
5419 ///
5420 /// The scenario: two workspaces share the same root paths (e.g. due to
5421 /// a provisional key mismatch). When the first is removed and the
5422 /// fallback searches for the same paths, `workspace_for_paths` must
5423 /// skip the doomed workspace so the assert in `remove` is satisfied.
5424 #[gpui::test]
5425 async fn test_remove_fallback_skips_excluded_workspaces(cx: &mut gpui::TestAppContext) {
5426 crate::tests::init_test(cx);
5427
5428 let fs = fs::FakeFs::new(cx.executor());
5429 let dir = unique_test_dir(&fs, "shared").await;
5430
5431 // Two projects that open the same directory — this creates two
5432 // workspaces whose root_paths are identical.
5433 let project_a = Project::test(fs.clone(), [dir.as_path()], cx).await;
5434 let project_b = Project::test(fs.clone(), [dir.as_path()], cx).await;
5435
5436 let (multi_workspace, cx) = cx
5437 .add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx));
5438
5439 multi_workspace.update(cx, |mw, cx| mw.open_sidebar(cx));
5440
5441 let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| {
5442 mw.test_add_workspace(project_b.clone(), window, cx)
5443 });
5444 cx.run_until_parked();
5445
5446 // workspace_a is first in the workspaces vec.
5447 let workspace_a =
5448 multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().cloned().unwrap());
5449 assert_ne!(workspace_a, workspace_b);
5450
5451 // Activate workspace_a so removing it triggers the fallback path.
5452 multi_workspace.update_in(cx, |mw, window, cx| {
5453 mw.activate(workspace_a.clone(), None, window, cx);
5454 });
5455 cx.run_until_parked();
5456
5457 // Remove workspace_a. The fallback searches for the same paths.
5458 // Without the `excluding` parameter, `workspace_for_paths` would
5459 // return workspace_a (first match) and the assert in `remove`
5460 // would fire. With the fix, workspace_a is skipped and
5461 // workspace_b is found instead.
5462 let path_list = PathList::new(std::slice::from_ref(&dir));
5463 let excluded = vec![workspace_a.clone()];
5464 multi_workspace.update_in(cx, |mw, window, cx| {
5465 mw.remove(
5466 vec![workspace_a.clone()],
5467 move |this, window, cx| {
5468 this.find_or_create_local_workspace(
5469 path_list,
5470 None,
5471 &excluded,
5472 None,
5473 OpenMode::Activate,
5474 window,
5475 cx,
5476 )
5477 },
5478 window,
5479 cx,
5480 )
5481 .detach_and_log_err(cx);
5482 });
5483 cx.run_until_parked();
5484
5485 // workspace_b should now be active — workspace_a was removed.
5486 multi_workspace.read_with(cx, |mw, _cx| {
5487 assert_eq!(
5488 mw.workspace(),
5489 &workspace_b,
5490 "fallback should have found workspace_b, not the excluded workspace_a"
5491 );
5492 });
5493 }
5494}