1pub mod model;
2
3use std::{
4 borrow::Cow,
5 collections::BTreeMap,
6 path::{Path, PathBuf},
7 str::FromStr,
8 sync::Arc,
9};
10
11use chrono::{DateTime, NaiveDateTime, Utc};
12use fs::Fs;
13
14use anyhow::{Context as _, Result, bail};
15use collections::{HashMap, HashSet, IndexSet};
16use db::{
17 kvp::KeyValueStore,
18 query,
19 sqlez::{connection::Connection, domain::Domain},
20 sqlez_macros::sql,
21};
22use gpui::{Axis, Bounds, Task, WindowBounds, WindowId, point, size};
23use project::{
24 debugger::breakpoint_store::{BreakpointState, SourceBreakpoint},
25 trusted_worktrees::{DbTrustedPaths, RemoteHostLocation},
26};
27
28use language::{LanguageName, Toolchain, ToolchainScope};
29use remote::{
30 DockerConnectionOptions, RemoteConnectionIdentity, RemoteConnectionOptions,
31 SshConnectionOptions, WslConnectionOptions, remote_connection_identity,
32};
33use serde::{Deserialize, Serialize};
34use sqlez::{
35 bindable::{Bind, Column, StaticColumnCount},
36 statement::Statement,
37 thread_safe_connection::ThreadSafeConnection,
38};
39
40use ui::{App, SharedString, px};
41use util::{ResultExt, maybe, rel_path::RelPath};
42use uuid::Uuid;
43
44use crate::{
45 WorkspaceId,
46 path_list::{PathList, SerializedPathList},
47 persistence::model::RemoteConnectionKind,
48};
49
50use model::{
51 GroupId, ItemId, PaneId, RemoteConnectionId, SerializedItem, SerializedPane,
52 SerializedPaneGroup, SerializedWorkspace,
53};
54
55use self::model::{DockStructure, SerializedWorkspaceLocation, SessionWorkspace};
56
57// https://www.sqlite.org/limits.html
58// > <..> the maximum value of a host parameter number is SQLITE_MAX_VARIABLE_NUMBER,
59// > which defaults to <..> 32766 for SQLite versions after 3.32.0.
60const MAX_QUERY_PLACEHOLDERS: usize = 32000;
61
62fn parse_timestamp(text: &str) -> DateTime<Utc> {
63 NaiveDateTime::parse_from_str(text, "%Y-%m-%d %H:%M:%S")
64 .map(|naive| naive.and_utc())
65 .unwrap_or_else(|_| Utc::now())
66}
67
68#[derive(Copy, Clone, Debug, PartialEq)]
69pub(crate) struct SerializedAxis(pub(crate) gpui::Axis);
70impl sqlez::bindable::StaticColumnCount for SerializedAxis {}
71impl sqlez::bindable::Bind for SerializedAxis {
72 fn bind(
73 &self,
74 statement: &sqlez::statement::Statement,
75 start_index: i32,
76 ) -> anyhow::Result<i32> {
77 match self.0 {
78 gpui::Axis::Horizontal => "Horizontal",
79 gpui::Axis::Vertical => "Vertical",
80 }
81 .bind(statement, start_index)
82 }
83}
84
85impl sqlez::bindable::Column for SerializedAxis {
86 fn column(
87 statement: &mut sqlez::statement::Statement,
88 start_index: i32,
89 ) -> anyhow::Result<(Self, i32)> {
90 String::column(statement, start_index).and_then(|(axis_text, next_index)| {
91 Ok((
92 match axis_text.as_str() {
93 "Horizontal" => Self(Axis::Horizontal),
94 "Vertical" => Self(Axis::Vertical),
95 _ => anyhow::bail!("Stored serialized item kind is incorrect"),
96 },
97 next_index,
98 ))
99 })
100 }
101}
102
103#[derive(Copy, Clone, Debug, PartialEq, Default)]
104pub(crate) struct SerializedWindowBounds(pub(crate) WindowBounds);
105
106impl StaticColumnCount for SerializedWindowBounds {
107 fn column_count() -> usize {
108 5
109 }
110}
111
112impl Bind for SerializedWindowBounds {
113 fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
114 match self.0 {
115 WindowBounds::Windowed(bounds) => {
116 let next_index = statement.bind(&"Windowed", start_index)?;
117 statement.bind(
118 &(
119 SerializedPixels(bounds.origin.x),
120 SerializedPixels(bounds.origin.y),
121 SerializedPixels(bounds.size.width),
122 SerializedPixels(bounds.size.height),
123 ),
124 next_index,
125 )
126 }
127 WindowBounds::Maximized(bounds) => {
128 let next_index = statement.bind(&"Maximized", start_index)?;
129 statement.bind(
130 &(
131 SerializedPixels(bounds.origin.x),
132 SerializedPixels(bounds.origin.y),
133 SerializedPixels(bounds.size.width),
134 SerializedPixels(bounds.size.height),
135 ),
136 next_index,
137 )
138 }
139 WindowBounds::Fullscreen(bounds) => {
140 let next_index = statement.bind(&"FullScreen", start_index)?;
141 statement.bind(
142 &(
143 SerializedPixels(bounds.origin.x),
144 SerializedPixels(bounds.origin.y),
145 SerializedPixels(bounds.size.width),
146 SerializedPixels(bounds.size.height),
147 ),
148 next_index,
149 )
150 }
151 }
152 }
153}
154
155impl Column for SerializedWindowBounds {
156 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
157 let (window_state, next_index) = String::column(statement, start_index)?;
158 let ((x, y, width, height), _): ((i32, i32, i32, i32), _) =
159 Column::column(statement, next_index)?;
160 let bounds = Bounds {
161 origin: point(px(x as f32), px(y as f32)),
162 size: size(px(width as f32), px(height as f32)),
163 };
164
165 let status = match window_state.as_str() {
166 "Windowed" | "Fixed" => SerializedWindowBounds(WindowBounds::Windowed(bounds)),
167 "Maximized" => SerializedWindowBounds(WindowBounds::Maximized(bounds)),
168 "FullScreen" => SerializedWindowBounds(WindowBounds::Fullscreen(bounds)),
169 _ => bail!("Window State did not have a valid string"),
170 };
171
172 Ok((status, next_index + 4))
173 }
174}
175
176const DEFAULT_WINDOW_BOUNDS_KEY: &str = "default_window_bounds";
177
178pub fn read_default_window_bounds(kvp: &KeyValueStore) -> Option<(Uuid, WindowBounds)> {
179 let json_str = kvp
180 .read_kvp(DEFAULT_WINDOW_BOUNDS_KEY)
181 .log_err()
182 .flatten()?;
183
184 let (display_uuid, persisted) =
185 serde_json::from_str::<(Uuid, WindowBoundsJson)>(&json_str).ok()?;
186 Some((display_uuid, persisted.into()))
187}
188
189pub async fn write_default_window_bounds(
190 kvp: &KeyValueStore,
191 bounds: WindowBounds,
192 display_uuid: Uuid,
193) -> anyhow::Result<()> {
194 let persisted = WindowBoundsJson::from(bounds);
195 let json_str = serde_json::to_string(&(display_uuid, persisted))?;
196 kvp.write_kvp(DEFAULT_WINDOW_BOUNDS_KEY.to_string(), json_str)
197 .await?;
198 Ok(())
199}
200
201#[derive(Serialize, Deserialize)]
202pub enum WindowBoundsJson {
203 Windowed {
204 x: i32,
205 y: i32,
206 width: i32,
207 height: i32,
208 },
209 Maximized {
210 x: i32,
211 y: i32,
212 width: i32,
213 height: i32,
214 },
215 Fullscreen {
216 x: i32,
217 y: i32,
218 width: i32,
219 height: i32,
220 },
221}
222
223impl From<WindowBounds> for WindowBoundsJson {
224 fn from(b: WindowBounds) -> Self {
225 match b {
226 WindowBounds::Windowed(bounds) => {
227 let origin = bounds.origin;
228 let size = bounds.size;
229 WindowBoundsJson::Windowed {
230 x: f32::from(origin.x).round() as i32,
231 y: f32::from(origin.y).round() as i32,
232 width: f32::from(size.width).round() as i32,
233 height: f32::from(size.height).round() as i32,
234 }
235 }
236 WindowBounds::Maximized(bounds) => {
237 let origin = bounds.origin;
238 let size = bounds.size;
239 WindowBoundsJson::Maximized {
240 x: f32::from(origin.x).round() as i32,
241 y: f32::from(origin.y).round() as i32,
242 width: f32::from(size.width).round() as i32,
243 height: f32::from(size.height).round() as i32,
244 }
245 }
246 WindowBounds::Fullscreen(bounds) => {
247 let origin = bounds.origin;
248 let size = bounds.size;
249 WindowBoundsJson::Fullscreen {
250 x: f32::from(origin.x).round() as i32,
251 y: f32::from(origin.y).round() as i32,
252 width: f32::from(size.width).round() as i32,
253 height: f32::from(size.height).round() as i32,
254 }
255 }
256 }
257 }
258}
259
260impl From<WindowBoundsJson> for WindowBounds {
261 fn from(n: WindowBoundsJson) -> Self {
262 match n {
263 WindowBoundsJson::Windowed {
264 x,
265 y,
266 width,
267 height,
268 } => WindowBounds::Windowed(Bounds {
269 origin: point(px(x as f32), px(y as f32)),
270 size: size(px(width as f32), px(height as f32)),
271 }),
272 WindowBoundsJson::Maximized {
273 x,
274 y,
275 width,
276 height,
277 } => WindowBounds::Maximized(Bounds {
278 origin: point(px(x as f32), px(y as f32)),
279 size: size(px(width as f32), px(height as f32)),
280 }),
281 WindowBoundsJson::Fullscreen {
282 x,
283 y,
284 width,
285 height,
286 } => WindowBounds::Fullscreen(Bounds {
287 origin: point(px(x as f32), px(y as f32)),
288 size: size(px(width as f32), px(height as f32)),
289 }),
290 }
291 }
292}
293
294fn read_multi_workspace_state(window_id: WindowId, cx: &App) -> model::MultiWorkspaceState {
295 let kvp = KeyValueStore::global(cx);
296 kvp.scoped("multi_workspace_state")
297 .read(&window_id.as_u64().to_string())
298 .log_err()
299 .flatten()
300 .and_then(|json| serde_json::from_str(&json).ok())
301 .unwrap_or_default()
302}
303
304pub async fn write_multi_workspace_state(
305 kvp: &KeyValueStore,
306 window_id: WindowId,
307 state: model::MultiWorkspaceState,
308) {
309 if let Ok(json_str) = serde_json::to_string(&state) {
310 kvp.scoped("multi_workspace_state")
311 .write(window_id.as_u64().to_string(), json_str)
312 .await
313 .log_err();
314 }
315}
316
317pub fn read_serialized_multi_workspaces(
318 session_workspaces: Vec<model::SessionWorkspace>,
319 cx: &App,
320) -> Vec<model::SerializedMultiWorkspace> {
321 let mut window_groups: Vec<Vec<model::SessionWorkspace>> = Vec::new();
322 let mut window_id_to_group: HashMap<WindowId, usize> = HashMap::default();
323
324 for session_workspace in session_workspaces {
325 match session_workspace.window_id {
326 Some(window_id) => {
327 let group_index = *window_id_to_group.entry(window_id).or_insert_with(|| {
328 window_groups.push(Vec::new());
329 window_groups.len() - 1
330 });
331 window_groups[group_index].push(session_workspace);
332 }
333 None => {
334 window_groups.push(vec![session_workspace]);
335 }
336 }
337 }
338
339 window_groups
340 .into_iter()
341 .filter_map(|group| {
342 let window_id = group.first().and_then(|sw| sw.window_id);
343 let state = window_id
344 .map(|wid| read_multi_workspace_state(wid, cx))
345 .unwrap_or_default();
346 let active_workspace = state
347 .active_workspace_id
348 .and_then(|id| group.iter().position(|ws| ws.workspace_id == id))
349 .or(Some(0))
350 .and_then(|index| group.into_iter().nth(index))?;
351 Some(model::SerializedMultiWorkspace {
352 active_workspace,
353 state,
354 })
355 })
356 .collect()
357}
358
359const DEFAULT_DOCK_STATE_KEY: &str = "default_dock_state";
360
361pub fn read_default_dock_state(kvp: &KeyValueStore) -> Option<DockStructure> {
362 let json_str = kvp.read_kvp(DEFAULT_DOCK_STATE_KEY).log_err().flatten()?;
363
364 serde_json::from_str::<DockStructure>(&json_str).ok()
365}
366
367pub async fn write_default_dock_state(
368 kvp: &KeyValueStore,
369 docks: DockStructure,
370) -> anyhow::Result<()> {
371 let json_str = serde_json::to_string(&docks)?;
372 kvp.write_kvp(DEFAULT_DOCK_STATE_KEY.to_string(), json_str)
373 .await?;
374 Ok(())
375}
376
377#[derive(Debug)]
378pub struct Breakpoint {
379 pub position: u32,
380 pub message: Option<Arc<str>>,
381 pub condition: Option<Arc<str>>,
382 pub hit_condition: Option<Arc<str>>,
383 pub state: BreakpointState,
384}
385
386/// Wrapper for DB type of a breakpoint
387struct BreakpointStateWrapper<'a>(Cow<'a, BreakpointState>);
388
389impl From<BreakpointState> for BreakpointStateWrapper<'static> {
390 fn from(kind: BreakpointState) -> Self {
391 BreakpointStateWrapper(Cow::Owned(kind))
392 }
393}
394
395impl StaticColumnCount for BreakpointStateWrapper<'_> {
396 fn column_count() -> usize {
397 1
398 }
399}
400
401impl Bind for BreakpointStateWrapper<'_> {
402 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
403 statement.bind(&self.0.to_int(), start_index)
404 }
405}
406
407impl Column for BreakpointStateWrapper<'_> {
408 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
409 let state = statement.column_int(start_index)?;
410
411 match state {
412 0 => Ok((BreakpointState::Enabled.into(), start_index + 1)),
413 1 => Ok((BreakpointState::Disabled.into(), start_index + 1)),
414 _ => anyhow::bail!("Invalid BreakpointState discriminant {state}"),
415 }
416 }
417}
418
419impl sqlez::bindable::StaticColumnCount for Breakpoint {
420 fn column_count() -> usize {
421 // Position, log message, condition message, and hit condition message
422 4 + BreakpointStateWrapper::column_count()
423 }
424}
425
426impl sqlez::bindable::Bind for Breakpoint {
427 fn bind(
428 &self,
429 statement: &sqlez::statement::Statement,
430 start_index: i32,
431 ) -> anyhow::Result<i32> {
432 let next_index = statement.bind(&self.position, start_index)?;
433 let next_index = statement.bind(&self.message, next_index)?;
434 let next_index = statement.bind(&self.condition, next_index)?;
435 let next_index = statement.bind(&self.hit_condition, next_index)?;
436 statement.bind(
437 &BreakpointStateWrapper(Cow::Borrowed(&self.state)),
438 next_index,
439 )
440 }
441}
442
443impl Column for Breakpoint {
444 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
445 let position = statement
446 .column_int(start_index)
447 .with_context(|| format!("Failed to read BreakPoint at index {start_index}"))?
448 as u32;
449 let (message, next_index) = Option::<String>::column(statement, start_index + 1)?;
450 let (condition, next_index) = Option::<String>::column(statement, next_index)?;
451 let (hit_condition, next_index) = Option::<String>::column(statement, next_index)?;
452 let (state, next_index) = BreakpointStateWrapper::column(statement, next_index)?;
453
454 Ok((
455 Breakpoint {
456 position,
457 message: message.map(Arc::from),
458 condition: condition.map(Arc::from),
459 hit_condition: hit_condition.map(Arc::from),
460 state: state.0.into_owned(),
461 },
462 next_index,
463 ))
464 }
465}
466
467#[derive(Clone, Debug, PartialEq)]
468struct SerializedPixels(gpui::Pixels);
469impl sqlez::bindable::StaticColumnCount for SerializedPixels {}
470
471impl sqlez::bindable::Bind for SerializedPixels {
472 fn bind(
473 &self,
474 statement: &sqlez::statement::Statement,
475 start_index: i32,
476 ) -> anyhow::Result<i32> {
477 let this: i32 = u32::from(self.0) as _;
478 this.bind(statement, start_index)
479 }
480}
481
482pub struct WorkspaceDb(ThreadSafeConnection);
483
484impl Domain for WorkspaceDb {
485 const NAME: &str = stringify!(WorkspaceDb);
486
487 const MIGRATIONS: &[&str] = &[
488 sql!(
489 CREATE TABLE workspaces(
490 workspace_id INTEGER PRIMARY KEY,
491 workspace_location BLOB UNIQUE,
492 dock_visible INTEGER, // Deprecated. Preserving so users can downgrade Zed.
493 dock_anchor TEXT, // Deprecated. Preserving so users can downgrade Zed.
494 dock_pane INTEGER, // Deprecated. Preserving so users can downgrade Zed.
495 left_sidebar_open INTEGER, // Boolean
496 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
497 FOREIGN KEY(dock_pane) REFERENCES panes(pane_id)
498 ) STRICT;
499
500 CREATE TABLE pane_groups(
501 group_id INTEGER PRIMARY KEY,
502 workspace_id INTEGER NOT NULL,
503 parent_group_id INTEGER, // NULL indicates that this is a root node
504 position INTEGER, // NULL indicates that this is a root node
505 axis TEXT NOT NULL, // Enum: 'Vertical' / 'Horizontal'
506 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
507 ON DELETE CASCADE
508 ON UPDATE CASCADE,
509 FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
510 ) STRICT;
511
512 CREATE TABLE panes(
513 pane_id INTEGER PRIMARY KEY,
514 workspace_id INTEGER NOT NULL,
515 active INTEGER NOT NULL, // Boolean
516 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
517 ON DELETE CASCADE
518 ON UPDATE CASCADE
519 ) STRICT;
520
521 CREATE TABLE center_panes(
522 pane_id INTEGER PRIMARY KEY,
523 parent_group_id INTEGER, // NULL means that this is a root pane
524 position INTEGER, // NULL means that this is a root pane
525 FOREIGN KEY(pane_id) REFERENCES panes(pane_id)
526 ON DELETE CASCADE,
527 FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
528 ) STRICT;
529
530 CREATE TABLE items(
531 item_id INTEGER NOT NULL, // This is the item's view id, so this is not unique
532 workspace_id INTEGER NOT NULL,
533 pane_id INTEGER NOT NULL,
534 kind TEXT NOT NULL,
535 position INTEGER NOT NULL,
536 active INTEGER NOT NULL,
537 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
538 ON DELETE CASCADE
539 ON UPDATE CASCADE,
540 FOREIGN KEY(pane_id) REFERENCES panes(pane_id)
541 ON DELETE CASCADE,
542 PRIMARY KEY(item_id, workspace_id)
543 ) STRICT;
544 ),
545 sql!(
546 ALTER TABLE workspaces ADD COLUMN window_state TEXT;
547 ALTER TABLE workspaces ADD COLUMN window_x REAL;
548 ALTER TABLE workspaces ADD COLUMN window_y REAL;
549 ALTER TABLE workspaces ADD COLUMN window_width REAL;
550 ALTER TABLE workspaces ADD COLUMN window_height REAL;
551 ALTER TABLE workspaces ADD COLUMN display BLOB;
552 ),
553 // Drop foreign key constraint from workspaces.dock_pane to panes table.
554 sql!(
555 CREATE TABLE workspaces_2(
556 workspace_id INTEGER PRIMARY KEY,
557 workspace_location BLOB UNIQUE,
558 dock_visible INTEGER, // Deprecated. Preserving so users can downgrade Zed.
559 dock_anchor TEXT, // Deprecated. Preserving so users can downgrade Zed.
560 dock_pane INTEGER, // Deprecated. Preserving so users can downgrade Zed.
561 left_sidebar_open INTEGER, // Boolean
562 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
563 window_state TEXT,
564 window_x REAL,
565 window_y REAL,
566 window_width REAL,
567 window_height REAL,
568 display BLOB
569 ) STRICT;
570 INSERT INTO workspaces_2 SELECT * FROM workspaces;
571 DROP TABLE workspaces;
572 ALTER TABLE workspaces_2 RENAME TO workspaces;
573 ),
574 // Add panels related information
575 sql!(
576 ALTER TABLE workspaces ADD COLUMN left_dock_visible INTEGER; //bool
577 ALTER TABLE workspaces ADD COLUMN left_dock_active_panel TEXT;
578 ALTER TABLE workspaces ADD COLUMN right_dock_visible INTEGER; //bool
579 ALTER TABLE workspaces ADD COLUMN right_dock_active_panel TEXT;
580 ALTER TABLE workspaces ADD COLUMN bottom_dock_visible INTEGER; //bool
581 ALTER TABLE workspaces ADD COLUMN bottom_dock_active_panel TEXT;
582 ),
583 // Add panel zoom persistence
584 sql!(
585 ALTER TABLE workspaces ADD COLUMN left_dock_zoom INTEGER; //bool
586 ALTER TABLE workspaces ADD COLUMN right_dock_zoom INTEGER; //bool
587 ALTER TABLE workspaces ADD COLUMN bottom_dock_zoom INTEGER; //bool
588 ),
589 // Add pane group flex data
590 sql!(
591 ALTER TABLE pane_groups ADD COLUMN flexes TEXT;
592 ),
593 // Add fullscreen field to workspace
594 // Deprecated, `WindowBounds` holds the fullscreen state now.
595 // Preserving so users can downgrade Zed.
596 sql!(
597 ALTER TABLE workspaces ADD COLUMN fullscreen INTEGER; //bool
598 ),
599 // Add preview field to items
600 sql!(
601 ALTER TABLE items ADD COLUMN preview INTEGER; //bool
602 ),
603 // Add centered_layout field to workspace
604 sql!(
605 ALTER TABLE workspaces ADD COLUMN centered_layout INTEGER; //bool
606 ),
607 sql!(
608 CREATE TABLE remote_projects (
609 remote_project_id INTEGER NOT NULL UNIQUE,
610 path TEXT,
611 dev_server_name TEXT
612 );
613 ALTER TABLE workspaces ADD COLUMN remote_project_id INTEGER;
614 ALTER TABLE workspaces RENAME COLUMN workspace_location TO local_paths;
615 ),
616 sql!(
617 DROP TABLE remote_projects;
618 CREATE TABLE dev_server_projects (
619 id INTEGER NOT NULL UNIQUE,
620 path TEXT,
621 dev_server_name TEXT
622 );
623 ALTER TABLE workspaces DROP COLUMN remote_project_id;
624 ALTER TABLE workspaces ADD COLUMN dev_server_project_id INTEGER;
625 ),
626 sql!(
627 ALTER TABLE workspaces ADD COLUMN local_paths_order BLOB;
628 ),
629 sql!(
630 ALTER TABLE workspaces ADD COLUMN session_id TEXT DEFAULT NULL;
631 ),
632 sql!(
633 ALTER TABLE workspaces ADD COLUMN window_id INTEGER DEFAULT NULL;
634 ),
635 sql!(
636 ALTER TABLE panes ADD COLUMN pinned_count INTEGER DEFAULT 0;
637 ),
638 sql!(
639 CREATE TABLE ssh_projects (
640 id INTEGER PRIMARY KEY,
641 host TEXT NOT NULL,
642 port INTEGER,
643 path TEXT NOT NULL,
644 user TEXT
645 );
646 ALTER TABLE workspaces ADD COLUMN ssh_project_id INTEGER REFERENCES ssh_projects(id) ON DELETE CASCADE;
647 ),
648 sql!(
649 ALTER TABLE ssh_projects RENAME COLUMN path TO paths;
650 ),
651 sql!(
652 CREATE TABLE toolchains (
653 workspace_id INTEGER,
654 worktree_id INTEGER,
655 language_name TEXT NOT NULL,
656 name TEXT NOT NULL,
657 path TEXT NOT NULL,
658 PRIMARY KEY (workspace_id, worktree_id, language_name)
659 );
660 ),
661 sql!(
662 ALTER TABLE toolchains ADD COLUMN raw_json TEXT DEFAULT "{}";
663 ),
664 sql!(
665 CREATE TABLE breakpoints (
666 workspace_id INTEGER NOT NULL,
667 path TEXT NOT NULL,
668 breakpoint_location INTEGER NOT NULL,
669 kind INTEGER NOT NULL,
670 log_message TEXT,
671 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
672 ON DELETE CASCADE
673 ON UPDATE CASCADE
674 );
675 ),
676 sql!(
677 ALTER TABLE workspaces ADD COLUMN local_paths_array TEXT;
678 CREATE UNIQUE INDEX local_paths_array_uq ON workspaces(local_paths_array);
679 ALTER TABLE workspaces ADD COLUMN local_paths_order_array TEXT;
680 ),
681 sql!(
682 ALTER TABLE breakpoints ADD COLUMN state INTEGER DEFAULT(0) NOT NULL
683 ),
684 sql!(
685 ALTER TABLE breakpoints DROP COLUMN kind
686 ),
687 sql!(ALTER TABLE toolchains ADD COLUMN relative_worktree_path TEXT DEFAULT "" NOT NULL),
688 sql!(
689 ALTER TABLE breakpoints ADD COLUMN condition TEXT;
690 ALTER TABLE breakpoints ADD COLUMN hit_condition TEXT;
691 ),
692 sql!(CREATE TABLE toolchains2 (
693 workspace_id INTEGER,
694 worktree_id INTEGER,
695 language_name TEXT NOT NULL,
696 name TEXT NOT NULL,
697 path TEXT NOT NULL,
698 raw_json TEXT NOT NULL,
699 relative_worktree_path TEXT NOT NULL,
700 PRIMARY KEY (workspace_id, worktree_id, language_name, relative_worktree_path)) STRICT;
701 INSERT INTO toolchains2
702 SELECT * FROM toolchains;
703 DROP TABLE toolchains;
704 ALTER TABLE toolchains2 RENAME TO toolchains;
705 ),
706 sql!(
707 CREATE TABLE ssh_connections (
708 id INTEGER PRIMARY KEY,
709 host TEXT NOT NULL,
710 port INTEGER,
711 user TEXT
712 );
713
714 INSERT INTO ssh_connections (host, port, user)
715 SELECT DISTINCT host, port, user
716 FROM ssh_projects;
717
718 CREATE TABLE workspaces_2(
719 workspace_id INTEGER PRIMARY KEY,
720 paths TEXT,
721 paths_order TEXT,
722 ssh_connection_id INTEGER REFERENCES ssh_connections(id),
723 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
724 window_state TEXT,
725 window_x REAL,
726 window_y REAL,
727 window_width REAL,
728 window_height REAL,
729 display BLOB,
730 left_dock_visible INTEGER,
731 left_dock_active_panel TEXT,
732 right_dock_visible INTEGER,
733 right_dock_active_panel TEXT,
734 bottom_dock_visible INTEGER,
735 bottom_dock_active_panel TEXT,
736 left_dock_zoom INTEGER,
737 right_dock_zoom INTEGER,
738 bottom_dock_zoom INTEGER,
739 fullscreen INTEGER,
740 centered_layout INTEGER,
741 session_id TEXT,
742 window_id INTEGER
743 ) STRICT;
744
745 INSERT
746 INTO workspaces_2
747 SELECT
748 workspaces.workspace_id,
749 CASE
750 WHEN ssh_projects.id IS NOT NULL THEN ssh_projects.paths
751 ELSE
752 CASE
753 WHEN workspaces.local_paths_array IS NULL OR workspaces.local_paths_array = "" THEN
754 NULL
755 ELSE
756 replace(workspaces.local_paths_array, ',', CHAR(10))
757 END
758 END as paths,
759
760 CASE
761 WHEN ssh_projects.id IS NOT NULL THEN ""
762 ELSE workspaces.local_paths_order_array
763 END as paths_order,
764
765 CASE
766 WHEN ssh_projects.id IS NOT NULL THEN (
767 SELECT ssh_connections.id
768 FROM ssh_connections
769 WHERE
770 ssh_connections.host IS ssh_projects.host AND
771 ssh_connections.port IS ssh_projects.port AND
772 ssh_connections.user IS ssh_projects.user
773 )
774 ELSE NULL
775 END as ssh_connection_id,
776
777 workspaces.timestamp,
778 workspaces.window_state,
779 workspaces.window_x,
780 workspaces.window_y,
781 workspaces.window_width,
782 workspaces.window_height,
783 workspaces.display,
784 workspaces.left_dock_visible,
785 workspaces.left_dock_active_panel,
786 workspaces.right_dock_visible,
787 workspaces.right_dock_active_panel,
788 workspaces.bottom_dock_visible,
789 workspaces.bottom_dock_active_panel,
790 workspaces.left_dock_zoom,
791 workspaces.right_dock_zoom,
792 workspaces.bottom_dock_zoom,
793 workspaces.fullscreen,
794 workspaces.centered_layout,
795 workspaces.session_id,
796 workspaces.window_id
797 FROM
798 workspaces LEFT JOIN
799 ssh_projects ON
800 workspaces.ssh_project_id = ssh_projects.id;
801
802 DELETE FROM workspaces_2
803 WHERE workspace_id NOT IN (
804 SELECT MAX(workspace_id)
805 FROM workspaces_2
806 GROUP BY ssh_connection_id, paths
807 );
808
809 DROP TABLE ssh_projects;
810 DROP TABLE workspaces;
811 ALTER TABLE workspaces_2 RENAME TO workspaces;
812
813 CREATE UNIQUE INDEX ix_workspaces_location ON workspaces(ssh_connection_id, paths);
814 ),
815 // Fix any data from when workspaces.paths were briefly encoded as JSON arrays
816 sql!(
817 UPDATE workspaces
818 SET paths = CASE
819 WHEN substr(paths, 1, 2) = '[' || '"' AND substr(paths, -2, 2) = '"' || ']' THEN
820 replace(
821 substr(paths, 3, length(paths) - 4),
822 '"' || ',' || '"',
823 CHAR(10)
824 )
825 ELSE
826 replace(paths, ',', CHAR(10))
827 END
828 WHERE paths IS NOT NULL
829 ),
830 sql!(
831 CREATE TABLE remote_connections(
832 id INTEGER PRIMARY KEY,
833 kind TEXT NOT NULL,
834 host TEXT,
835 port INTEGER,
836 user TEXT,
837 distro TEXT
838 );
839
840 CREATE TABLE workspaces_2(
841 workspace_id INTEGER PRIMARY KEY,
842 paths TEXT,
843 paths_order TEXT,
844 remote_connection_id INTEGER REFERENCES remote_connections(id),
845 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
846 window_state TEXT,
847 window_x REAL,
848 window_y REAL,
849 window_width REAL,
850 window_height REAL,
851 display BLOB,
852 left_dock_visible INTEGER,
853 left_dock_active_panel TEXT,
854 right_dock_visible INTEGER,
855 right_dock_active_panel TEXT,
856 bottom_dock_visible INTEGER,
857 bottom_dock_active_panel TEXT,
858 left_dock_zoom INTEGER,
859 right_dock_zoom INTEGER,
860 bottom_dock_zoom INTEGER,
861 fullscreen INTEGER,
862 centered_layout INTEGER,
863 session_id TEXT,
864 window_id INTEGER
865 ) STRICT;
866
867 INSERT INTO remote_connections
868 SELECT
869 id,
870 "ssh" as kind,
871 host,
872 port,
873 user,
874 NULL as distro
875 FROM ssh_connections;
876
877 INSERT
878 INTO workspaces_2
879 SELECT
880 workspace_id,
881 paths,
882 paths_order,
883 ssh_connection_id as remote_connection_id,
884 timestamp,
885 window_state,
886 window_x,
887 window_y,
888 window_width,
889 window_height,
890 display,
891 left_dock_visible,
892 left_dock_active_panel,
893 right_dock_visible,
894 right_dock_active_panel,
895 bottom_dock_visible,
896 bottom_dock_active_panel,
897 left_dock_zoom,
898 right_dock_zoom,
899 bottom_dock_zoom,
900 fullscreen,
901 centered_layout,
902 session_id,
903 window_id
904 FROM
905 workspaces;
906
907 DROP TABLE workspaces;
908 ALTER TABLE workspaces_2 RENAME TO workspaces;
909
910 CREATE UNIQUE INDEX ix_workspaces_location ON workspaces(remote_connection_id, paths);
911 ),
912 sql!(CREATE TABLE user_toolchains (
913 remote_connection_id INTEGER,
914 workspace_id INTEGER NOT NULL,
915 worktree_id INTEGER NOT NULL,
916 relative_worktree_path TEXT NOT NULL,
917 language_name TEXT NOT NULL,
918 name TEXT NOT NULL,
919 path TEXT NOT NULL,
920 raw_json TEXT NOT NULL,
921
922 PRIMARY KEY (workspace_id, worktree_id, relative_worktree_path, language_name, name, path, raw_json)
923 ) STRICT;),
924 sql!(
925 DROP TABLE ssh_connections;
926 ),
927 sql!(
928 ALTER TABLE remote_connections ADD COLUMN name TEXT;
929 ALTER TABLE remote_connections ADD COLUMN container_id TEXT;
930 ),
931 sql!(
932 CREATE TABLE IF NOT EXISTS trusted_worktrees (
933 trust_id INTEGER PRIMARY KEY AUTOINCREMENT,
934 absolute_path TEXT,
935 user_name TEXT,
936 host_name TEXT
937 ) STRICT;
938 ),
939 sql!(CREATE TABLE toolchains2 (
940 workspace_id INTEGER,
941 worktree_root_path TEXT NOT NULL,
942 language_name TEXT NOT NULL,
943 name TEXT NOT NULL,
944 path TEXT NOT NULL,
945 raw_json TEXT NOT NULL,
946 relative_worktree_path TEXT NOT NULL,
947 PRIMARY KEY (workspace_id, worktree_root_path, language_name, relative_worktree_path)) STRICT;
948 INSERT OR REPLACE INTO toolchains2
949 // The `instr(paths, '\n') = 0` part allows us to find all
950 // workspaces that have a single worktree, as `\n` is used as a
951 // separator when serializing the workspace paths, so if no `\n` is
952 // found, we know we have a single worktree.
953 SELECT toolchains.workspace_id, paths, language_name, name, path, raw_json, relative_worktree_path FROM toolchains INNER JOIN workspaces ON toolchains.workspace_id = workspaces.workspace_id AND instr(paths, '\n') = 0;
954 DROP TABLE toolchains;
955 ALTER TABLE toolchains2 RENAME TO toolchains;
956 ),
957 sql!(CREATE TABLE user_toolchains2 (
958 remote_connection_id INTEGER,
959 workspace_id INTEGER NOT NULL,
960 worktree_root_path TEXT NOT NULL,
961 relative_worktree_path TEXT NOT NULL,
962 language_name TEXT NOT NULL,
963 name TEXT NOT NULL,
964 path TEXT NOT NULL,
965 raw_json TEXT NOT NULL,
966
967 PRIMARY KEY (workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json)) STRICT;
968 INSERT OR REPLACE INTO user_toolchains2
969 // The `instr(paths, '\n') = 0` part allows us to find all
970 // workspaces that have a single worktree, as `\n` is used as a
971 // separator when serializing the workspace paths, so if no `\n` is
972 // found, we know we have a single worktree.
973 SELECT user_toolchains.remote_connection_id, user_toolchains.workspace_id, paths, relative_worktree_path, language_name, name, path, raw_json FROM user_toolchains INNER JOIN workspaces ON user_toolchains.workspace_id = workspaces.workspace_id AND instr(paths, '\n') = 0;
974 DROP TABLE user_toolchains;
975 ALTER TABLE user_toolchains2 RENAME TO user_toolchains;
976 ),
977 sql!(
978 ALTER TABLE remote_connections ADD COLUMN use_podman BOOLEAN;
979 ),
980 sql!(
981 ALTER TABLE remote_connections ADD COLUMN remote_env TEXT;
982 ),
983 ];
984
985 // Allow recovering from bad migration that was initially shipped to nightly
986 // when introducing the ssh_connections table.
987 fn should_allow_migration_change(_index: usize, old: &str, new: &str) -> bool {
988 old.starts_with("CREATE TABLE ssh_connections")
989 && new.starts_with("CREATE TABLE ssh_connections")
990 }
991}
992
993db::static_connection!(WorkspaceDb, []);
994
995impl WorkspaceDb {
996 /// Returns a serialized workspace for the given worktree_roots. If the passed array
997 /// is empty, the most recent workspace is returned instead. If no workspace for the
998 /// passed roots is stored, returns none.
999 pub(crate) fn workspace_for_roots<P: AsRef<Path>>(
1000 &self,
1001 worktree_roots: &[P],
1002 ) -> Option<SerializedWorkspace> {
1003 self.workspace_for_roots_internal(worktree_roots, None)
1004 }
1005
1006 pub(crate) fn remote_workspace_for_roots<P: AsRef<Path>>(
1007 &self,
1008 worktree_roots: &[P],
1009 remote_project_id: RemoteConnectionId,
1010 ) -> Option<SerializedWorkspace> {
1011 self.workspace_for_roots_internal(worktree_roots, Some(remote_project_id))
1012 }
1013
1014 pub(crate) fn workspace_for_roots_internal<P: AsRef<Path>>(
1015 &self,
1016 worktree_roots: &[P],
1017 remote_connection_id: Option<RemoteConnectionId>,
1018 ) -> Option<SerializedWorkspace> {
1019 // paths are sorted before db interactions to ensure that the order of the paths
1020 // doesn't affect the workspace selection for existing workspaces
1021 let root_paths = PathList::new(worktree_roots);
1022
1023 // Empty workspaces cannot be matched by paths (all empty workspaces have paths = "").
1024 // They should only be restored via workspace_for_id during session restoration.
1025 if root_paths.is_empty() && remote_connection_id.is_none() {
1026 return None;
1027 }
1028
1029 // Note that we re-assign the workspace_id here in case it's empty
1030 // and we've grabbed the most recent workspace
1031 let (
1032 workspace_id,
1033 paths,
1034 paths_order,
1035 window_bounds,
1036 display,
1037 centered_layout,
1038 docks,
1039 window_id,
1040 ): (
1041 WorkspaceId,
1042 String,
1043 String,
1044 Option<SerializedWindowBounds>,
1045 Option<Uuid>,
1046 Option<bool>,
1047 DockStructure,
1048 Option<u64>,
1049 ) = self
1050 .select_row_bound(sql! {
1051 SELECT
1052 workspace_id,
1053 paths,
1054 paths_order,
1055 window_state,
1056 window_x,
1057 window_y,
1058 window_width,
1059 window_height,
1060 display,
1061 centered_layout,
1062 left_dock_visible,
1063 left_dock_active_panel,
1064 left_dock_zoom,
1065 right_dock_visible,
1066 right_dock_active_panel,
1067 right_dock_zoom,
1068 bottom_dock_visible,
1069 bottom_dock_active_panel,
1070 bottom_dock_zoom,
1071 window_id
1072 FROM workspaces
1073 WHERE
1074 paths IS ? AND
1075 remote_connection_id IS ?
1076 LIMIT 1
1077 })
1078 .and_then(|mut prepared_statement| {
1079 (prepared_statement)((
1080 root_paths.serialize().paths,
1081 remote_connection_id.map(|id| id.0 as i32),
1082 ))
1083 })
1084 .context("No workspaces found")
1085 .warn_on_err()
1086 .flatten()?;
1087
1088 let paths = PathList::deserialize(&SerializedPathList {
1089 paths,
1090 order: paths_order,
1091 });
1092
1093 let remote_connection_options = if let Some(remote_connection_id) = remote_connection_id {
1094 self.remote_connection(remote_connection_id)
1095 .context("Get remote connection")
1096 .log_err()
1097 } else {
1098 None
1099 };
1100
1101 Some(SerializedWorkspace {
1102 id: workspace_id,
1103 location: match remote_connection_options {
1104 Some(options) => SerializedWorkspaceLocation::Remote(options),
1105 None => SerializedWorkspaceLocation::Local,
1106 },
1107 paths,
1108 center_group: self
1109 .get_center_pane_group(workspace_id)
1110 .context("Getting center group")
1111 .log_err()?,
1112 window_bounds,
1113 centered_layout: centered_layout.unwrap_or(false),
1114 display,
1115 docks,
1116 session_id: None,
1117 breakpoints: self.breakpoints(workspace_id),
1118 window_id,
1119 user_toolchains: self.user_toolchains(workspace_id, remote_connection_id),
1120 })
1121 }
1122
1123 /// Returns the workspace with the given ID, loading all associated data.
1124 pub(crate) fn workspace_for_id(
1125 &self,
1126 workspace_id: WorkspaceId,
1127 ) -> Option<SerializedWorkspace> {
1128 let (
1129 paths,
1130 paths_order,
1131 window_bounds,
1132 display,
1133 centered_layout,
1134 docks,
1135 window_id,
1136 remote_connection_id,
1137 ): (
1138 String,
1139 String,
1140 Option<SerializedWindowBounds>,
1141 Option<Uuid>,
1142 Option<bool>,
1143 DockStructure,
1144 Option<u64>,
1145 Option<i32>,
1146 ) = self
1147 .select_row_bound(sql! {
1148 SELECT
1149 paths,
1150 paths_order,
1151 window_state,
1152 window_x,
1153 window_y,
1154 window_width,
1155 window_height,
1156 display,
1157 centered_layout,
1158 left_dock_visible,
1159 left_dock_active_panel,
1160 left_dock_zoom,
1161 right_dock_visible,
1162 right_dock_active_panel,
1163 right_dock_zoom,
1164 bottom_dock_visible,
1165 bottom_dock_active_panel,
1166 bottom_dock_zoom,
1167 window_id,
1168 remote_connection_id
1169 FROM workspaces
1170 WHERE workspace_id = ?
1171 })
1172 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id))
1173 .context("No workspace found for id")
1174 .warn_on_err()
1175 .flatten()?;
1176
1177 let paths = PathList::deserialize(&SerializedPathList {
1178 paths,
1179 order: paths_order,
1180 });
1181
1182 let remote_connection_id = remote_connection_id.map(|id| RemoteConnectionId(id as u64));
1183 let remote_connection_options = if let Some(remote_connection_id) = remote_connection_id {
1184 self.remote_connection(remote_connection_id)
1185 .context("Get remote connection")
1186 .log_err()
1187 } else {
1188 None
1189 };
1190
1191 Some(SerializedWorkspace {
1192 id: workspace_id,
1193 location: match remote_connection_options {
1194 Some(options) => SerializedWorkspaceLocation::Remote(options),
1195 None => SerializedWorkspaceLocation::Local,
1196 },
1197 paths,
1198 center_group: self
1199 .get_center_pane_group(workspace_id)
1200 .context("Getting center group")
1201 .log_err()?,
1202 window_bounds,
1203 centered_layout: centered_layout.unwrap_or(false),
1204 display,
1205 docks,
1206 session_id: None,
1207 breakpoints: self.breakpoints(workspace_id),
1208 window_id,
1209 user_toolchains: self.user_toolchains(workspace_id, remote_connection_id),
1210 })
1211 }
1212
1213 fn breakpoints(&self, workspace_id: WorkspaceId) -> BTreeMap<Arc<Path>, Vec<SourceBreakpoint>> {
1214 let breakpoints: Result<Vec<(PathBuf, Breakpoint)>> = self
1215 .select_bound(sql! {
1216 SELECT path, breakpoint_location, log_message, condition, hit_condition, state
1217 FROM breakpoints
1218 WHERE workspace_id = ?
1219 })
1220 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id));
1221
1222 match breakpoints {
1223 Ok(bp) => {
1224 if bp.is_empty() {
1225 log::debug!("Breakpoints are empty after querying database for them");
1226 }
1227
1228 let mut map: BTreeMap<Arc<Path>, Vec<SourceBreakpoint>> = Default::default();
1229
1230 for (path, breakpoint) in bp {
1231 let path: Arc<Path> = path.into();
1232 map.entry(path.clone()).or_default().push(SourceBreakpoint {
1233 row: breakpoint.position,
1234 path,
1235 message: breakpoint.message,
1236 condition: breakpoint.condition,
1237 hit_condition: breakpoint.hit_condition,
1238 state: breakpoint.state,
1239 });
1240 }
1241
1242 for (path, bps) in map.iter() {
1243 log::info!(
1244 "Got {} breakpoints from database at path: {}",
1245 bps.len(),
1246 path.to_string_lossy()
1247 );
1248 }
1249
1250 map
1251 }
1252 Err(msg) => {
1253 log::error!("Breakpoints query failed with msg: {msg}");
1254 Default::default()
1255 }
1256 }
1257 }
1258
1259 fn user_toolchains(
1260 &self,
1261 workspace_id: WorkspaceId,
1262 remote_connection_id: Option<RemoteConnectionId>,
1263 ) -> BTreeMap<ToolchainScope, IndexSet<Toolchain>> {
1264 type RowKind = (WorkspaceId, String, String, String, String, String, String);
1265
1266 let toolchains: Vec<RowKind> = self
1267 .select_bound(sql! {
1268 SELECT workspace_id, worktree_root_path, relative_worktree_path,
1269 language_name, name, path, raw_json
1270 FROM user_toolchains WHERE remote_connection_id IS ?1 AND (
1271 workspace_id IN (0, ?2)
1272 )
1273 })
1274 .and_then(|mut statement| {
1275 (statement)((remote_connection_id.map(|id| id.0), workspace_id))
1276 })
1277 .unwrap_or_default();
1278 let mut ret = BTreeMap::<_, IndexSet<_>>::default();
1279
1280 for (
1281 _workspace_id,
1282 worktree_root_path,
1283 relative_worktree_path,
1284 language_name,
1285 name,
1286 path,
1287 raw_json,
1288 ) in toolchains
1289 {
1290 // INTEGER's that are primary keys (like workspace ids, remote connection ids and such) start at 1, so we're safe to
1291 let scope = if _workspace_id == WorkspaceId(0) {
1292 debug_assert_eq!(worktree_root_path, String::default());
1293 debug_assert_eq!(relative_worktree_path, String::default());
1294 ToolchainScope::Global
1295 } else {
1296 debug_assert_eq!(workspace_id, _workspace_id);
1297 debug_assert_eq!(
1298 worktree_root_path == String::default(),
1299 relative_worktree_path == String::default()
1300 );
1301
1302 let Some(relative_path) = RelPath::unix(&relative_worktree_path).log_err() else {
1303 continue;
1304 };
1305 if worktree_root_path != String::default()
1306 && relative_worktree_path != String::default()
1307 {
1308 ToolchainScope::Subproject(
1309 Arc::from(worktree_root_path.as_ref()),
1310 relative_path.into(),
1311 )
1312 } else {
1313 ToolchainScope::Project
1314 }
1315 };
1316 let Ok(as_json) = serde_json::from_str(&raw_json) else {
1317 continue;
1318 };
1319 let toolchain = Toolchain {
1320 name: SharedString::from(name),
1321 path: SharedString::from(path),
1322 language_name: LanguageName::from_proto(language_name),
1323 as_json,
1324 };
1325 ret.entry(scope).or_default().insert(toolchain);
1326 }
1327
1328 ret
1329 }
1330
1331 /// Saves a workspace using the worktree roots. Will garbage collect any workspaces
1332 /// that used this workspace previously
1333 pub(crate) async fn save_workspace(&self, workspace: SerializedWorkspace) {
1334 let paths = workspace.paths.serialize();
1335 log::debug!("Saving workspace at location: {:?}", workspace.location);
1336 self.write(move |conn| {
1337 conn.with_savepoint("update_worktrees", || {
1338 let remote_connection_id = match workspace.location.clone() {
1339 SerializedWorkspaceLocation::Local => None,
1340 SerializedWorkspaceLocation::Remote(connection_options) => {
1341 Some(Self::get_or_create_remote_connection_internal(
1342 conn,
1343 connection_options
1344 )?.0)
1345 }
1346 };
1347
1348 // Clear out panes and pane_groups
1349 conn.exec_bound(sql!(
1350 DELETE FROM pane_groups WHERE workspace_id = ?1;
1351 DELETE FROM panes WHERE workspace_id = ?1;))?(workspace.id)
1352 .context("Clearing old panes")?;
1353
1354 conn.exec_bound(
1355 sql!(
1356 DELETE FROM breakpoints WHERE workspace_id = ?1;
1357 )
1358 )?(workspace.id).context("Clearing old breakpoints")?;
1359
1360 for (path, breakpoints) in workspace.breakpoints {
1361 for bp in breakpoints {
1362 let state = BreakpointStateWrapper::from(bp.state);
1363 match conn.exec_bound(sql!(
1364 INSERT INTO breakpoints (workspace_id, path, breakpoint_location, log_message, condition, hit_condition, state)
1365 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7);))?
1366
1367 ((
1368 workspace.id,
1369 path.as_ref(),
1370 bp.row,
1371 bp.message,
1372 bp.condition,
1373 bp.hit_condition,
1374 state,
1375 )) {
1376 Ok(_) => {
1377 log::debug!("Stored breakpoint at row: {} in path: {}", bp.row, path.to_string_lossy())
1378 }
1379 Err(err) => {
1380 log::error!("{err}");
1381 continue;
1382 }
1383 }
1384 }
1385 }
1386
1387 conn.exec_bound(
1388 sql!(
1389 DELETE FROM user_toolchains WHERE workspace_id = ?1;
1390 )
1391 )?(workspace.id).context("Clearing old user toolchains")?;
1392
1393 for (scope, toolchains) in workspace.user_toolchains {
1394 for toolchain in toolchains {
1395 let query = sql!(INSERT OR REPLACE INTO user_toolchains(remote_connection_id, workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8));
1396 let (workspace_id, worktree_root_path, relative_worktree_path) = match scope {
1397 ToolchainScope::Subproject(ref worktree_root_path, ref path) => (Some(workspace.id), Some(worktree_root_path.to_string_lossy().into_owned()), Some(path.as_unix_str().to_owned())),
1398 ToolchainScope::Project => (Some(workspace.id), None, None),
1399 ToolchainScope::Global => (None, None, None),
1400 };
1401 let args = (remote_connection_id, workspace_id.unwrap_or(WorkspaceId(0)), worktree_root_path.unwrap_or_default(), relative_worktree_path.unwrap_or_default(),
1402 toolchain.language_name.as_ref().to_owned(), toolchain.name.to_string(), toolchain.path.to_string(), toolchain.as_json.to_string());
1403 if let Err(err) = conn.exec_bound(query)?(args) {
1404 log::error!("{err}");
1405 continue;
1406 }
1407 }
1408 }
1409
1410 // Clear out old workspaces with the same paths.
1411 // Skip this for empty workspaces - they are identified by workspace_id, not paths.
1412 // Multiple empty workspaces with different content should coexist.
1413 if !paths.paths.is_empty() {
1414 conn.exec_bound(sql!(
1415 DELETE
1416 FROM workspaces
1417 WHERE
1418 workspace_id != ?1 AND
1419 paths IS ?2 AND
1420 remote_connection_id IS ?3
1421 ))?((
1422 workspace.id,
1423 paths.paths.clone(),
1424 remote_connection_id,
1425 ))
1426 .context("clearing out old locations")?;
1427 }
1428
1429 // Upsert
1430 let query = sql!(
1431 INSERT INTO workspaces(
1432 workspace_id,
1433 paths,
1434 paths_order,
1435 remote_connection_id,
1436 left_dock_visible,
1437 left_dock_active_panel,
1438 left_dock_zoom,
1439 right_dock_visible,
1440 right_dock_active_panel,
1441 right_dock_zoom,
1442 bottom_dock_visible,
1443 bottom_dock_active_panel,
1444 bottom_dock_zoom,
1445 session_id,
1446 window_id,
1447 timestamp
1448 )
1449 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, CURRENT_TIMESTAMP)
1450 ON CONFLICT DO
1451 UPDATE SET
1452 paths = ?2,
1453 paths_order = ?3,
1454 remote_connection_id = ?4,
1455 left_dock_visible = ?5,
1456 left_dock_active_panel = ?6,
1457 left_dock_zoom = ?7,
1458 right_dock_visible = ?8,
1459 right_dock_active_panel = ?9,
1460 right_dock_zoom = ?10,
1461 bottom_dock_visible = ?11,
1462 bottom_dock_active_panel = ?12,
1463 bottom_dock_zoom = ?13,
1464 session_id = ?14,
1465 window_id = ?15,
1466 timestamp = CURRENT_TIMESTAMP
1467 );
1468 let mut prepared_query = conn.exec_bound(query)?;
1469 let args = (
1470 workspace.id,
1471 paths.paths.clone(),
1472 paths.order.clone(),
1473 remote_connection_id,
1474 workspace.docks,
1475 workspace.session_id,
1476 workspace.window_id,
1477 );
1478
1479 prepared_query(args).context("Updating workspace")?;
1480
1481 // Save center pane group
1482 Self::save_pane_group(conn, workspace.id, &workspace.center_group, None)
1483 .context("save pane group in save workspace")?;
1484
1485 Ok(())
1486 })
1487 .log_err();
1488 })
1489 .await;
1490 }
1491
1492 pub(crate) async fn get_or_create_remote_connection(
1493 &self,
1494 options: RemoteConnectionOptions,
1495 ) -> Result<RemoteConnectionId> {
1496 self.write(move |conn| Self::get_or_create_remote_connection_internal(conn, options))
1497 .await
1498 }
1499
1500 fn get_or_create_remote_connection_internal(
1501 this: &Connection,
1502 options: RemoteConnectionOptions,
1503 ) -> Result<RemoteConnectionId> {
1504 let identity = remote_connection_identity(&options);
1505 let kind;
1506 let user: Option<String>;
1507 let mut host = None;
1508 let mut port = None;
1509 let mut distro = None;
1510 let mut name = None;
1511 let mut container_id = None;
1512 let mut use_podman = None;
1513 let mut remote_env = None;
1514
1515 match identity {
1516 RemoteConnectionIdentity::Ssh {
1517 host: identity_host,
1518 username,
1519 port: identity_port,
1520 } => {
1521 kind = RemoteConnectionKind::Ssh;
1522 host = Some(identity_host);
1523 port = identity_port;
1524 user = username;
1525 }
1526 RemoteConnectionIdentity::Wsl {
1527 distro_name,
1528 user: identity_user,
1529 } => {
1530 kind = RemoteConnectionKind::Wsl;
1531 distro = Some(distro_name);
1532 user = identity_user;
1533 }
1534 RemoteConnectionIdentity::Docker {
1535 container_id: identity_container_id,
1536 name: identity_name,
1537 remote_user,
1538 } => {
1539 kind = RemoteConnectionKind::Docker;
1540 container_id = Some(identity_container_id);
1541 name = Some(identity_name);
1542 user = Some(remote_user);
1543 }
1544 #[cfg(any(test, feature = "test-support"))]
1545 RemoteConnectionIdentity::Mock { id } => {
1546 kind = RemoteConnectionKind::Ssh;
1547 host = Some(format!("mock-{}", id));
1548 user = Some(format!("mock-user-{}", id));
1549 }
1550 }
1551
1552 if let RemoteConnectionOptions::Docker(options) = options {
1553 use_podman = Some(options.use_podman);
1554 remote_env = serde_json::to_string(&options.remote_env).ok();
1555 }
1556
1557 Self::get_or_create_remote_connection_query(
1558 this,
1559 kind,
1560 host,
1561 port,
1562 user,
1563 distro,
1564 name,
1565 container_id,
1566 use_podman,
1567 remote_env,
1568 )
1569 }
1570
1571 fn get_or_create_remote_connection_query(
1572 this: &Connection,
1573 kind: RemoteConnectionKind,
1574 host: Option<String>,
1575 port: Option<u16>,
1576 user: Option<String>,
1577 distro: Option<String>,
1578 name: Option<String>,
1579 container_id: Option<String>,
1580 use_podman: Option<bool>,
1581 remote_env: Option<String>,
1582 ) -> Result<RemoteConnectionId> {
1583 if let Some(id) = this.select_row_bound(sql!(
1584 SELECT id
1585 FROM remote_connections
1586 WHERE
1587 kind IS ? AND
1588 host IS ? AND
1589 port IS ? AND
1590 user IS ? AND
1591 distro IS ? AND
1592 name IS ? AND
1593 container_id IS ?
1594 LIMIT 1
1595 ))?((
1596 kind.serialize(),
1597 host.clone(),
1598 port,
1599 user.clone(),
1600 distro.clone(),
1601 name.clone(),
1602 container_id.clone(),
1603 ))? {
1604 Ok(RemoteConnectionId(id))
1605 } else {
1606 let id = this.select_row_bound(sql!(
1607 INSERT INTO remote_connections (
1608 kind,
1609 host,
1610 port,
1611 user,
1612 distro,
1613 name,
1614 container_id,
1615 use_podman,
1616 remote_env
1617 ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)
1618 RETURNING id
1619 ))?((
1620 kind.serialize(),
1621 host,
1622 port,
1623 user,
1624 distro,
1625 name,
1626 container_id,
1627 use_podman,
1628 remote_env,
1629 ))?
1630 .context("failed to insert remote project")?;
1631 Ok(RemoteConnectionId(id))
1632 }
1633 }
1634
1635 query! {
1636 pub async fn next_id() -> Result<WorkspaceId> {
1637 INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id
1638 }
1639 }
1640
1641 fn recent_workspaces(
1642 &self,
1643 ) -> Result<
1644 Vec<(
1645 WorkspaceId,
1646 PathList,
1647 Option<RemoteConnectionId>,
1648 DateTime<Utc>,
1649 )>,
1650 > {
1651 Ok(self
1652 .recent_workspaces_query()?
1653 .into_iter()
1654 .map(|(id, paths, order, remote_connection_id, timestamp)| {
1655 (
1656 id,
1657 PathList::deserialize(&SerializedPathList { paths, order }),
1658 remote_connection_id.map(RemoteConnectionId),
1659 parse_timestamp(×tamp),
1660 )
1661 })
1662 .collect())
1663 }
1664
1665 query! {
1666 fn recent_workspaces_query() -> Result<Vec<(WorkspaceId, String, String, Option<u64>, String)>> {
1667 SELECT workspace_id, paths, paths_order, remote_connection_id, timestamp
1668 FROM workspaces
1669 WHERE
1670 paths IS NOT NULL OR
1671 remote_connection_id IS NOT NULL
1672 ORDER BY timestamp DESC
1673 }
1674 }
1675
1676 fn session_workspaces(
1677 &self,
1678 session_id: String,
1679 ) -> Result<
1680 Vec<(
1681 WorkspaceId,
1682 PathList,
1683 Option<u64>,
1684 Option<RemoteConnectionId>,
1685 )>,
1686 > {
1687 Ok(self
1688 .session_workspaces_query(session_id)?
1689 .into_iter()
1690 .map(
1691 |(workspace_id, paths, order, window_id, remote_connection_id)| {
1692 (
1693 WorkspaceId(workspace_id),
1694 PathList::deserialize(&SerializedPathList { paths, order }),
1695 window_id,
1696 remote_connection_id.map(RemoteConnectionId),
1697 )
1698 },
1699 )
1700 .collect())
1701 }
1702
1703 query! {
1704 fn session_workspaces_query(session_id: String) -> Result<Vec<(i64, String, String, Option<u64>, Option<u64>)>> {
1705 SELECT workspace_id, paths, paths_order, window_id, remote_connection_id
1706 FROM workspaces
1707 WHERE session_id = ?1
1708 ORDER BY timestamp DESC
1709 }
1710 }
1711
1712 query! {
1713 pub fn breakpoints_for_file(workspace_id: WorkspaceId, file_path: &Path) -> Result<Vec<Breakpoint>> {
1714 SELECT breakpoint_location
1715 FROM breakpoints
1716 WHERE workspace_id= ?1 AND path = ?2
1717 }
1718 }
1719
1720 query! {
1721 pub fn clear_breakpoints(file_path: &Path) -> Result<()> {
1722 DELETE FROM breakpoints
1723 WHERE file_path = ?2
1724 }
1725 }
1726
1727 fn remote_connections(&self) -> Result<HashMap<RemoteConnectionId, RemoteConnectionOptions>> {
1728 Ok(self.select(sql!(
1729 SELECT
1730 id, kind, host, port, user, distro, container_id, name, use_podman, remote_env
1731 FROM
1732 remote_connections
1733 ))?()?
1734 .into_iter()
1735 .filter_map(
1736 |(id, kind, host, port, user, distro, container_id, name, use_podman, remote_env)| {
1737 Some((
1738 RemoteConnectionId(id),
1739 Self::remote_connection_from_row(
1740 kind,
1741 host,
1742 port,
1743 user,
1744 distro,
1745 container_id,
1746 name,
1747 use_podman,
1748 remote_env,
1749 )?,
1750 ))
1751 },
1752 )
1753 .collect())
1754 }
1755
1756 pub(crate) fn remote_connection(
1757 &self,
1758 id: RemoteConnectionId,
1759 ) -> Result<RemoteConnectionOptions> {
1760 let (kind, host, port, user, distro, container_id, name, use_podman, remote_env) =
1761 self.select_row_bound(sql!(
1762 SELECT kind, host, port, user, distro, container_id, name, use_podman, remote_env
1763 FROM remote_connections
1764 WHERE id = ?
1765 ))?(id.0)?
1766 .context("no such remote connection")?;
1767 Self::remote_connection_from_row(
1768 kind,
1769 host,
1770 port,
1771 user,
1772 distro,
1773 container_id,
1774 name,
1775 use_podman,
1776 remote_env,
1777 )
1778 .context("invalid remote_connection row")
1779 }
1780
1781 fn remote_connection_from_row(
1782 kind: String,
1783 host: Option<String>,
1784 port: Option<u16>,
1785 user: Option<String>,
1786 distro: Option<String>,
1787 container_id: Option<String>,
1788 name: Option<String>,
1789 use_podman: Option<bool>,
1790 remote_env: Option<String>,
1791 ) -> Option<RemoteConnectionOptions> {
1792 match RemoteConnectionKind::deserialize(&kind)? {
1793 RemoteConnectionKind::Wsl => Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
1794 distro_name: distro?,
1795 user: user,
1796 })),
1797 RemoteConnectionKind::Ssh => Some(RemoteConnectionOptions::Ssh(SshConnectionOptions {
1798 host: host?.into(),
1799 port,
1800 username: user,
1801 ..Default::default()
1802 })),
1803 RemoteConnectionKind::Docker => {
1804 let remote_env: BTreeMap<String, String> =
1805 serde_json::from_str(&remote_env?).ok()?;
1806 Some(RemoteConnectionOptions::Docker(DockerConnectionOptions {
1807 container_id: container_id?,
1808 name: name?,
1809 remote_user: user?,
1810 upload_binary_over_docker_exec: false,
1811 use_podman: use_podman?,
1812 remote_env,
1813 }))
1814 }
1815 }
1816 }
1817
1818 query! {
1819 pub async fn delete_workspace_by_id(id: WorkspaceId) -> Result<()> {
1820 DELETE FROM workspaces
1821 WHERE workspace_id IS ?
1822 }
1823 }
1824
1825 async fn all_paths_exist_with_a_directory(paths: &[PathBuf], fs: &dyn Fs) -> bool {
1826 let mut any_dir = false;
1827 for path in paths {
1828 match fs.metadata(path).await.ok().flatten() {
1829 None => {
1830 return false;
1831 }
1832 Some(meta) => {
1833 if meta.is_dir {
1834 any_dir = true;
1835 }
1836 }
1837 }
1838 }
1839 any_dir
1840 }
1841
1842 // Returns the recent locations which are still valid on disk and deletes ones which no longer
1843 // exist.
1844 pub async fn recent_workspaces_on_disk(
1845 &self,
1846 fs: &dyn Fs,
1847 ) -> Result<
1848 Vec<(
1849 WorkspaceId,
1850 SerializedWorkspaceLocation,
1851 PathList,
1852 DateTime<Utc>,
1853 )>,
1854 > {
1855 let mut result = Vec::new();
1856 let mut workspaces_to_delete = Vec::new();
1857 let remote_connections = self.remote_connections()?;
1858 let now = Utc::now();
1859 for (id, paths, remote_connection_id, timestamp) in self.recent_workspaces()? {
1860 if let Some(remote_connection_id) = remote_connection_id {
1861 if let Some(connection_options) = remote_connections.get(&remote_connection_id) {
1862 result.push((
1863 id,
1864 SerializedWorkspaceLocation::Remote(connection_options.clone()),
1865 paths,
1866 timestamp,
1867 ));
1868 } else {
1869 workspaces_to_delete.push(id);
1870 }
1871 continue;
1872 }
1873
1874 // Delete the workspace if any of the paths are WSL paths. If a
1875 // local workspace points to WSL, attempting to read its metadata
1876 // will wait for the WSL VM and file server to boot up. This can
1877 // block for many seconds. Supported scenarios use remote
1878 // workspaces.
1879 if cfg!(windows) {
1880 let has_wsl_path = paths
1881 .paths()
1882 .iter()
1883 .any(|path| util::paths::WslPath::from_path(path).is_some());
1884 if has_wsl_path {
1885 workspaces_to_delete.push(id);
1886 continue;
1887 }
1888 }
1889
1890 if Self::all_paths_exist_with_a_directory(paths.paths(), fs).await {
1891 result.push((id, SerializedWorkspaceLocation::Local, paths, timestamp));
1892 } else if now - timestamp >= chrono::Duration::days(7) {
1893 workspaces_to_delete.push(id);
1894 }
1895 }
1896
1897 futures::future::join_all(
1898 workspaces_to_delete
1899 .into_iter()
1900 .map(|id| self.delete_workspace_by_id(id)),
1901 )
1902 .await;
1903 Ok(result)
1904 }
1905
1906 pub async fn last_workspace(
1907 &self,
1908 fs: &dyn Fs,
1909 ) -> Result<
1910 Option<(
1911 WorkspaceId,
1912 SerializedWorkspaceLocation,
1913 PathList,
1914 DateTime<Utc>,
1915 )>,
1916 > {
1917 Ok(self.recent_workspaces_on_disk(fs).await?.into_iter().next())
1918 }
1919
1920 // Returns the locations of the workspaces that were still opened when the last
1921 // session was closed (i.e. when Zed was quit).
1922 // If `last_session_window_order` is provided, the returned locations are ordered
1923 // according to that.
1924 pub async fn last_session_workspace_locations(
1925 &self,
1926 last_session_id: &str,
1927 last_session_window_stack: Option<Vec<WindowId>>,
1928 fs: &dyn Fs,
1929 ) -> Result<Vec<SessionWorkspace>> {
1930 let mut workspaces = Vec::new();
1931
1932 for (workspace_id, paths, window_id, remote_connection_id) in
1933 self.session_workspaces(last_session_id.to_owned())?
1934 {
1935 let window_id = window_id.map(WindowId::from);
1936
1937 if let Some(remote_connection_id) = remote_connection_id {
1938 workspaces.push(SessionWorkspace {
1939 workspace_id,
1940 location: SerializedWorkspaceLocation::Remote(
1941 self.remote_connection(remote_connection_id)?,
1942 ),
1943 paths,
1944 window_id,
1945 });
1946 } else if paths.is_empty() {
1947 // Empty workspace with items (drafts, files) - include for restoration
1948 workspaces.push(SessionWorkspace {
1949 workspace_id,
1950 location: SerializedWorkspaceLocation::Local,
1951 paths,
1952 window_id,
1953 });
1954 } else {
1955 if Self::all_paths_exist_with_a_directory(paths.paths(), fs).await {
1956 workspaces.push(SessionWorkspace {
1957 workspace_id,
1958 location: SerializedWorkspaceLocation::Local,
1959 paths,
1960 window_id,
1961 });
1962 }
1963 }
1964 }
1965
1966 if let Some(stack) = last_session_window_stack {
1967 workspaces.sort_by_key(|workspace| {
1968 workspace
1969 .window_id
1970 .and_then(|id| stack.iter().position(|&order_id| order_id == id))
1971 .unwrap_or(usize::MAX)
1972 });
1973 }
1974
1975 Ok(workspaces)
1976 }
1977
1978 fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result<SerializedPaneGroup> {
1979 Ok(self
1980 .get_pane_group(workspace_id, None)?
1981 .into_iter()
1982 .next()
1983 .unwrap_or_else(|| {
1984 SerializedPaneGroup::Pane(SerializedPane {
1985 active: true,
1986 children: vec![],
1987 pinned_count: 0,
1988 })
1989 }))
1990 }
1991
1992 fn get_pane_group(
1993 &self,
1994 workspace_id: WorkspaceId,
1995 group_id: Option<GroupId>,
1996 ) -> Result<Vec<SerializedPaneGroup>> {
1997 type GroupKey = (Option<GroupId>, WorkspaceId);
1998 type GroupOrPane = (
1999 Option<GroupId>,
2000 Option<SerializedAxis>,
2001 Option<PaneId>,
2002 Option<bool>,
2003 Option<usize>,
2004 Option<String>,
2005 );
2006 self.select_bound::<GroupKey, GroupOrPane>(sql!(
2007 SELECT group_id, axis, pane_id, active, pinned_count, flexes
2008 FROM (SELECT
2009 group_id,
2010 axis,
2011 NULL as pane_id,
2012 NULL as active,
2013 NULL as pinned_count,
2014 position,
2015 parent_group_id,
2016 workspace_id,
2017 flexes
2018 FROM pane_groups
2019 UNION
2020 SELECT
2021 NULL,
2022 NULL,
2023 center_panes.pane_id,
2024 panes.active as active,
2025 pinned_count,
2026 position,
2027 parent_group_id,
2028 panes.workspace_id as workspace_id,
2029 NULL
2030 FROM center_panes
2031 JOIN panes ON center_panes.pane_id = panes.pane_id)
2032 WHERE parent_group_id IS ? AND workspace_id = ?
2033 ORDER BY position
2034 ))?((group_id, workspace_id))?
2035 .into_iter()
2036 .map(|(group_id, axis, pane_id, active, pinned_count, flexes)| {
2037 let maybe_pane = maybe!({ Some((pane_id?, active?, pinned_count?)) });
2038 if let Some((group_id, axis)) = group_id.zip(axis) {
2039 let flexes = flexes
2040 .map(|flexes: String| serde_json::from_str::<Vec<f32>>(&flexes))
2041 .transpose()?;
2042
2043 Ok(SerializedPaneGroup::Group {
2044 axis,
2045 children: self.get_pane_group(workspace_id, Some(group_id))?,
2046 flexes,
2047 })
2048 } else if let Some((pane_id, active, pinned_count)) = maybe_pane {
2049 Ok(SerializedPaneGroup::Pane(SerializedPane::new(
2050 self.get_items(pane_id)?,
2051 active,
2052 pinned_count,
2053 )))
2054 } else {
2055 bail!("Pane Group Child was neither a pane group or a pane");
2056 }
2057 })
2058 // Filter out panes and pane groups which don't have any children or items
2059 .filter(|pane_group| match pane_group {
2060 Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(),
2061 Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(),
2062 _ => true,
2063 })
2064 .collect::<Result<_>>()
2065 }
2066
2067 fn save_pane_group(
2068 conn: &Connection,
2069 workspace_id: WorkspaceId,
2070 pane_group: &SerializedPaneGroup,
2071 parent: Option<(GroupId, usize)>,
2072 ) -> Result<()> {
2073 if parent.is_none() {
2074 log::debug!("Saving a pane group for workspace {workspace_id:?}");
2075 }
2076 match pane_group {
2077 SerializedPaneGroup::Group {
2078 axis,
2079 children,
2080 flexes,
2081 } => {
2082 let (parent_id, position) = parent.unzip();
2083
2084 let flex_string = flexes
2085 .as_ref()
2086 .map(|flexes| serde_json::json!(flexes).to_string());
2087
2088 let group_id = conn.select_row_bound::<_, i64>(sql!(
2089 INSERT INTO pane_groups(
2090 workspace_id,
2091 parent_group_id,
2092 position,
2093 axis,
2094 flexes
2095 )
2096 VALUES (?, ?, ?, ?, ?)
2097 RETURNING group_id
2098 ))?((
2099 workspace_id,
2100 parent_id,
2101 position,
2102 *axis,
2103 flex_string,
2104 ))?
2105 .context("Couldn't retrieve group_id from inserted pane_group")?;
2106
2107 for (position, group) in children.iter().enumerate() {
2108 Self::save_pane_group(conn, workspace_id, group, Some((group_id, position)))?
2109 }
2110
2111 Ok(())
2112 }
2113 SerializedPaneGroup::Pane(pane) => {
2114 Self::save_pane(conn, workspace_id, pane, parent)?;
2115 Ok(())
2116 }
2117 }
2118 }
2119
2120 fn save_pane(
2121 conn: &Connection,
2122 workspace_id: WorkspaceId,
2123 pane: &SerializedPane,
2124 parent: Option<(GroupId, usize)>,
2125 ) -> Result<PaneId> {
2126 let pane_id = conn.select_row_bound::<_, i64>(sql!(
2127 INSERT INTO panes(workspace_id, active, pinned_count)
2128 VALUES (?, ?, ?)
2129 RETURNING pane_id
2130 ))?((workspace_id, pane.active, pane.pinned_count))?
2131 .context("Could not retrieve inserted pane_id")?;
2132
2133 let (parent_id, order) = parent.unzip();
2134 conn.exec_bound(sql!(
2135 INSERT INTO center_panes(pane_id, parent_group_id, position)
2136 VALUES (?, ?, ?)
2137 ))?((pane_id, parent_id, order))?;
2138
2139 Self::save_items(conn, workspace_id, pane_id, &pane.children).context("Saving items")?;
2140
2141 Ok(pane_id)
2142 }
2143
2144 fn get_items(&self, pane_id: PaneId) -> Result<Vec<SerializedItem>> {
2145 self.select_bound(sql!(
2146 SELECT kind, item_id, active, preview FROM items
2147 WHERE pane_id = ?
2148 ORDER BY position
2149 ))?(pane_id)
2150 }
2151
2152 fn save_items(
2153 conn: &Connection,
2154 workspace_id: WorkspaceId,
2155 pane_id: PaneId,
2156 items: &[SerializedItem],
2157 ) -> Result<()> {
2158 let mut insert = conn.exec_bound(sql!(
2159 INSERT INTO items(workspace_id, pane_id, position, kind, item_id, active, preview) VALUES (?, ?, ?, ?, ?, ?, ?)
2160 )).context("Preparing insertion")?;
2161 for (position, item) in items.iter().enumerate() {
2162 insert((workspace_id, pane_id, position, item))?;
2163 }
2164
2165 Ok(())
2166 }
2167
2168 query! {
2169 pub async fn update_timestamp(workspace_id: WorkspaceId) -> Result<()> {
2170 UPDATE workspaces
2171 SET timestamp = CURRENT_TIMESTAMP
2172 WHERE workspace_id = ?
2173 }
2174 }
2175
2176 query! {
2177 pub(crate) async fn set_window_open_status(workspace_id: WorkspaceId, bounds: SerializedWindowBounds, display: Uuid) -> Result<()> {
2178 UPDATE workspaces
2179 SET window_state = ?2,
2180 window_x = ?3,
2181 window_y = ?4,
2182 window_width = ?5,
2183 window_height = ?6,
2184 display = ?7
2185 WHERE workspace_id = ?1
2186 }
2187 }
2188
2189 query! {
2190 pub(crate) async fn set_centered_layout(workspace_id: WorkspaceId, centered_layout: bool) -> Result<()> {
2191 UPDATE workspaces
2192 SET centered_layout = ?2
2193 WHERE workspace_id = ?1
2194 }
2195 }
2196
2197 query! {
2198 pub(crate) async fn set_session_id(workspace_id: WorkspaceId, session_id: Option<String>) -> Result<()> {
2199 UPDATE workspaces
2200 SET session_id = ?2
2201 WHERE workspace_id = ?1
2202 }
2203 }
2204
2205 query! {
2206 pub(crate) async fn set_session_binding(workspace_id: WorkspaceId, session_id: Option<String>, window_id: Option<u64>) -> Result<()> {
2207 UPDATE workspaces
2208 SET session_id = ?2, window_id = ?3
2209 WHERE workspace_id = ?1
2210 }
2211 }
2212
2213 pub(crate) async fn toolchains(
2214 &self,
2215 workspace_id: WorkspaceId,
2216 ) -> Result<Vec<(Toolchain, Arc<Path>, Arc<RelPath>)>> {
2217 self.write(move |this| {
2218 let mut select = this
2219 .select_bound(sql!(
2220 SELECT
2221 name, path, worktree_root_path, relative_worktree_path, language_name, raw_json
2222 FROM toolchains
2223 WHERE workspace_id = ?
2224 ))
2225 .context("select toolchains")?;
2226
2227 let toolchain: Vec<(String, String, String, String, String, String)> =
2228 select(workspace_id)?;
2229
2230 Ok(toolchain
2231 .into_iter()
2232 .filter_map(
2233 |(name, path, worktree_root_path, relative_worktree_path, language, json)| {
2234 Some((
2235 Toolchain {
2236 name: name.into(),
2237 path: path.into(),
2238 language_name: LanguageName::new(&language),
2239 as_json: serde_json::Value::from_str(&json).ok()?,
2240 },
2241 Arc::from(worktree_root_path.as_ref()),
2242 RelPath::from_proto(&relative_worktree_path).log_err()?,
2243 ))
2244 },
2245 )
2246 .collect())
2247 })
2248 .await
2249 }
2250
2251 pub async fn set_toolchain(
2252 &self,
2253 workspace_id: WorkspaceId,
2254 worktree_root_path: Arc<Path>,
2255 relative_worktree_path: Arc<RelPath>,
2256 toolchain: Toolchain,
2257 ) -> Result<()> {
2258 log::debug!(
2259 "Setting toolchain for workspace, worktree: {worktree_root_path:?}, relative path: {relative_worktree_path:?}, toolchain: {}",
2260 toolchain.name
2261 );
2262 self.write(move |conn| {
2263 let mut insert = conn
2264 .exec_bound(sql!(
2265 INSERT INTO toolchains(workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json) VALUES (?, ?, ?, ?, ?, ?, ?)
2266 ON CONFLICT DO
2267 UPDATE SET
2268 name = ?5,
2269 path = ?6,
2270 raw_json = ?7
2271 ))
2272 .context("Preparing insertion")?;
2273
2274 insert((
2275 workspace_id,
2276 worktree_root_path.to_string_lossy().into_owned(),
2277 relative_worktree_path.as_unix_str(),
2278 toolchain.language_name.as_ref(),
2279 toolchain.name.as_ref(),
2280 toolchain.path.as_ref(),
2281 toolchain.as_json.to_string(),
2282 ))?;
2283
2284 Ok(())
2285 }).await
2286 }
2287
2288 pub(crate) async fn save_trusted_worktrees(
2289 &self,
2290 trusted_worktrees: HashMap<Option<RemoteHostLocation>, HashSet<PathBuf>>,
2291 ) -> anyhow::Result<()> {
2292 use anyhow::Context as _;
2293 use db::sqlez::statement::Statement;
2294 use itertools::Itertools as _;
2295
2296 self.clear_trusted_worktrees()
2297 .await
2298 .context("clearing previous trust state")?;
2299
2300 let trusted_worktrees = trusted_worktrees
2301 .into_iter()
2302 .flat_map(|(host, abs_paths)| {
2303 abs_paths
2304 .into_iter()
2305 .map(move |abs_path| (Some(abs_path), host.clone()))
2306 })
2307 .collect::<Vec<_>>();
2308 let mut first_worktree;
2309 let mut last_worktree = 0_usize;
2310 for (count, placeholders) in std::iter::once("(?, ?, ?)")
2311 .cycle()
2312 .take(trusted_worktrees.len())
2313 .chunks(MAX_QUERY_PLACEHOLDERS / 3)
2314 .into_iter()
2315 .map(|chunk| {
2316 let mut count = 0;
2317 let placeholders = chunk
2318 .inspect(|_| {
2319 count += 1;
2320 })
2321 .join(", ");
2322 (count, placeholders)
2323 })
2324 .collect::<Vec<_>>()
2325 {
2326 first_worktree = last_worktree;
2327 last_worktree = last_worktree + count;
2328 let query = format!(
2329 r#"INSERT INTO trusted_worktrees(absolute_path, user_name, host_name)
2330VALUES {placeholders};"#
2331 );
2332
2333 let trusted_worktrees = trusted_worktrees[first_worktree..last_worktree].to_vec();
2334 self.write(move |conn| {
2335 let mut statement = Statement::prepare(conn, query)?;
2336 let mut next_index = 1;
2337 for (abs_path, host) in trusted_worktrees {
2338 let abs_path = abs_path.as_ref().map(|abs_path| abs_path.to_string_lossy());
2339 next_index = statement.bind(
2340 &abs_path.as_ref().map(|abs_path| abs_path.as_ref()),
2341 next_index,
2342 )?;
2343 next_index = statement.bind(
2344 &host
2345 .as_ref()
2346 .and_then(|host| Some(host.user_name.as_ref()?.as_str())),
2347 next_index,
2348 )?;
2349 next_index = statement.bind(
2350 &host.as_ref().map(|host| host.host_identifier.as_str()),
2351 next_index,
2352 )?;
2353 }
2354 statement.exec()
2355 })
2356 .await
2357 .context("inserting new trusted state")?;
2358 }
2359 Ok(())
2360 }
2361
2362 pub fn fetch_trusted_worktrees(&self) -> Result<DbTrustedPaths> {
2363 let trusted_worktrees = self.trusted_worktrees()?;
2364 Ok(trusted_worktrees
2365 .into_iter()
2366 .filter_map(|(abs_path, user_name, host_name)| {
2367 let db_host = match (user_name, host_name) {
2368 (None, Some(host_name)) => Some(RemoteHostLocation {
2369 user_name: None,
2370 host_identifier: SharedString::new(host_name),
2371 }),
2372 (Some(user_name), Some(host_name)) => Some(RemoteHostLocation {
2373 user_name: Some(SharedString::new(user_name)),
2374 host_identifier: SharedString::new(host_name),
2375 }),
2376 _ => None,
2377 };
2378 Some((db_host, abs_path?))
2379 })
2380 .fold(HashMap::default(), |mut acc, (remote_host, abs_path)| {
2381 acc.entry(remote_host)
2382 .or_insert_with(HashSet::default)
2383 .insert(abs_path);
2384 acc
2385 }))
2386 }
2387
2388 query! {
2389 fn trusted_worktrees() -> Result<Vec<(Option<PathBuf>, Option<String>, Option<String>)>> {
2390 SELECT absolute_path, user_name, host_name
2391 FROM trusted_worktrees
2392 }
2393 }
2394
2395 query! {
2396 pub async fn clear_trusted_worktrees() -> Result<()> {
2397 DELETE FROM trusted_worktrees
2398 }
2399 }
2400}
2401
2402type WorkspaceEntry = (
2403 WorkspaceId,
2404 SerializedWorkspaceLocation,
2405 PathList,
2406 DateTime<Utc>,
2407);
2408
2409/// Resolves workspace entries whose paths are git linked worktree checkouts
2410/// to their main repository paths.
2411///
2412/// For each workspace entry:
2413/// - If any path is a linked worktree checkout, all worktree paths in that
2414/// entry are resolved to their main repository paths, producing a new
2415/// `PathList`.
2416/// - The resolved entry is then deduplicated against existing entries: if a
2417/// workspace with the same paths already exists, the entry with the most
2418/// recent timestamp is kept.
2419pub async fn resolve_worktree_workspaces(
2420 workspaces: impl IntoIterator<Item = WorkspaceEntry>,
2421 fs: &dyn Fs,
2422) -> Vec<WorkspaceEntry> {
2423 // First pass: resolve worktree paths to main repo paths concurrently.
2424 let resolved = futures::future::join_all(workspaces.into_iter().map(|entry| async move {
2425 let paths = entry.2.paths();
2426 if paths.is_empty() {
2427 return entry;
2428 }
2429
2430 // Resolve each path concurrently
2431 let resolved_paths = futures::future::join_all(
2432 paths
2433 .iter()
2434 .map(|path| project::git_store::resolve_git_worktree_to_main_repo(fs, path)),
2435 )
2436 .await;
2437
2438 // If no paths were resolved, this entry is not a worktree — keep as-is
2439 if resolved_paths.iter().all(|r| r.is_none()) {
2440 return entry;
2441 }
2442
2443 // Build new path list, substituting resolved paths
2444 let new_paths: Vec<PathBuf> = paths
2445 .iter()
2446 .zip(resolved_paths.iter())
2447 .map(|(original, resolved)| {
2448 resolved
2449 .as_ref()
2450 .cloned()
2451 .unwrap_or_else(|| original.clone())
2452 })
2453 .collect();
2454
2455 let new_path_refs: Vec<&Path> = new_paths.iter().map(|p| p.as_path()).collect();
2456 (entry.0, entry.1, PathList::new(&new_path_refs), entry.3)
2457 }))
2458 .await;
2459
2460 // Second pass: deduplicate by PathList.
2461 // When two entries resolve to the same paths, keep the one with the
2462 // more recent timestamp.
2463 let mut seen: collections::HashMap<Vec<PathBuf>, usize> = collections::HashMap::default();
2464 let mut result: Vec<WorkspaceEntry> = Vec::new();
2465
2466 for entry in resolved {
2467 let key: Vec<PathBuf> = entry.2.paths().to_vec();
2468 if let Some(&existing_idx) = seen.get(&key) {
2469 // Keep the entry with the more recent timestamp
2470 if entry.3 > result[existing_idx].3 {
2471 result[existing_idx] = entry;
2472 }
2473 } else {
2474 seen.insert(key, result.len());
2475 result.push(entry);
2476 }
2477 }
2478
2479 result
2480}
2481
2482pub fn delete_unloaded_items(
2483 alive_items: Vec<ItemId>,
2484 workspace_id: WorkspaceId,
2485 table: &'static str,
2486 db: &ThreadSafeConnection,
2487 cx: &mut App,
2488) -> Task<Result<()>> {
2489 let db = db.clone();
2490 cx.spawn(async move |_| {
2491 let placeholders = alive_items
2492 .iter()
2493 .map(|_| "?")
2494 .collect::<Vec<&str>>()
2495 .join(", ");
2496
2497 let query = format!(
2498 "DELETE FROM {table} WHERE workspace_id = ? AND item_id NOT IN ({placeholders})"
2499 );
2500
2501 db.write(move |conn| {
2502 let mut statement = Statement::prepare(conn, query)?;
2503 let mut next_index = statement.bind(&workspace_id, 1)?;
2504 for id in alive_items {
2505 next_index = statement.bind(&id, next_index)?;
2506 }
2507 statement.exec()
2508 })
2509 .await
2510 })
2511}
2512
2513#[cfg(test)]
2514mod tests {
2515 use super::*;
2516 use crate::OpenMode;
2517 use crate::PathList;
2518 use crate::ProjectGroupKey;
2519 use crate::{
2520 multi_workspace::MultiWorkspace,
2521 persistence::{
2522 model::{
2523 SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace,
2524 SessionWorkspace,
2525 },
2526 read_multi_workspace_state,
2527 },
2528 };
2529
2530 use gpui::AppContext as _;
2531 use pretty_assertions::assert_eq;
2532 use project::Project;
2533 use remote::SshConnectionOptions;
2534 use serde_json::json;
2535 use std::{thread, time::Duration};
2536
2537 /// Creates a unique directory in a FakeFs, returning the path.
2538 /// Uses a UUID suffix to avoid collisions with other tests sharing the global DB.
2539 async fn unique_test_dir(fs: &fs::FakeFs, prefix: &str) -> PathBuf {
2540 let dir = PathBuf::from(format!("/test-dirs/{}-{}", prefix, uuid::Uuid::new_v4()));
2541 fs.insert_tree(&dir, json!({})).await;
2542 dir
2543 }
2544
2545 #[gpui::test]
2546 async fn test_multi_workspace_serializes_on_add_and_remove(cx: &mut gpui::TestAppContext) {
2547 crate::tests::init_test(cx);
2548
2549 let fs = fs::FakeFs::new(cx.executor());
2550 let project1 = Project::test(fs.clone(), [], cx).await;
2551 let project2 = Project::test(fs.clone(), [], cx).await;
2552
2553 let (multi_workspace, cx) =
2554 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
2555
2556 multi_workspace.update(cx, |mw, cx| {
2557 mw.open_sidebar(cx);
2558 });
2559
2560 multi_workspace.update_in(cx, |mw, _, cx| {
2561 mw.set_random_database_id(cx);
2562 });
2563
2564 let window_id =
2565 multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
2566
2567 // --- Add a second workspace ---
2568 let workspace2 = multi_workspace.update_in(cx, |mw, window, cx| {
2569 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
2570 workspace.update(cx, |ws, _cx| ws.set_random_database_id());
2571 mw.activate(workspace.clone(), window, cx);
2572 workspace
2573 });
2574
2575 // Run background tasks so serialize has a chance to flush.
2576 cx.run_until_parked();
2577
2578 // Read back the persisted state and check that the active workspace ID was written.
2579 let state_after_add = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
2580 let active_workspace2_db_id = workspace2.read_with(cx, |ws, _| ws.database_id());
2581 assert_eq!(
2582 state_after_add.active_workspace_id, active_workspace2_db_id,
2583 "After adding a second workspace, the serialized active_workspace_id should match \
2584 the newly activated workspace's database id"
2585 );
2586
2587 // --- Remove the non-active workspace ---
2588 multi_workspace.update_in(cx, |mw, _window, cx| {
2589 let active = mw.workspace().clone();
2590 let ws = mw
2591 .workspaces()
2592 .find(|ws| *ws != &active)
2593 .expect("should have a non-active workspace");
2594 mw.remove([ws.clone()], |_, _, _| unreachable!(), _window, cx)
2595 .detach_and_log_err(cx);
2596 });
2597
2598 cx.run_until_parked();
2599
2600 let state_after_remove = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
2601 let remaining_db_id =
2602 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
2603 assert_eq!(
2604 state_after_remove.active_workspace_id, remaining_db_id,
2605 "After removing a workspace, the serialized active_workspace_id should match \
2606 the remaining active workspace's database id"
2607 );
2608 }
2609
2610 #[gpui::test]
2611 async fn test_breakpoints() {
2612 zlog::init_test();
2613
2614 let db = WorkspaceDb::open_test_db("test_breakpoints").await;
2615 let id = db.next_id().await.unwrap();
2616
2617 let path = Path::new("/tmp/test.rs");
2618
2619 let breakpoint = Breakpoint {
2620 position: 123,
2621 message: None,
2622 state: BreakpointState::Enabled,
2623 condition: None,
2624 hit_condition: None,
2625 };
2626
2627 let log_breakpoint = Breakpoint {
2628 position: 456,
2629 message: Some("Test log message".into()),
2630 state: BreakpointState::Enabled,
2631 condition: None,
2632 hit_condition: None,
2633 };
2634
2635 let disable_breakpoint = Breakpoint {
2636 position: 578,
2637 message: None,
2638 state: BreakpointState::Disabled,
2639 condition: None,
2640 hit_condition: None,
2641 };
2642
2643 let condition_breakpoint = Breakpoint {
2644 position: 789,
2645 message: None,
2646 state: BreakpointState::Enabled,
2647 condition: Some("x > 5".into()),
2648 hit_condition: None,
2649 };
2650
2651 let hit_condition_breakpoint = Breakpoint {
2652 position: 999,
2653 message: None,
2654 state: BreakpointState::Enabled,
2655 condition: None,
2656 hit_condition: Some(">= 3".into()),
2657 };
2658
2659 let workspace = SerializedWorkspace {
2660 id,
2661 paths: PathList::new(&["/tmp"]),
2662 location: SerializedWorkspaceLocation::Local,
2663 center_group: Default::default(),
2664 window_bounds: Default::default(),
2665 display: Default::default(),
2666 docks: Default::default(),
2667 centered_layout: false,
2668 breakpoints: {
2669 let mut map = collections::BTreeMap::default();
2670 map.insert(
2671 Arc::from(path),
2672 vec![
2673 SourceBreakpoint {
2674 row: breakpoint.position,
2675 path: Arc::from(path),
2676 message: breakpoint.message.clone(),
2677 state: breakpoint.state,
2678 condition: breakpoint.condition.clone(),
2679 hit_condition: breakpoint.hit_condition.clone(),
2680 },
2681 SourceBreakpoint {
2682 row: log_breakpoint.position,
2683 path: Arc::from(path),
2684 message: log_breakpoint.message.clone(),
2685 state: log_breakpoint.state,
2686 condition: log_breakpoint.condition.clone(),
2687 hit_condition: log_breakpoint.hit_condition.clone(),
2688 },
2689 SourceBreakpoint {
2690 row: disable_breakpoint.position,
2691 path: Arc::from(path),
2692 message: disable_breakpoint.message.clone(),
2693 state: disable_breakpoint.state,
2694 condition: disable_breakpoint.condition.clone(),
2695 hit_condition: disable_breakpoint.hit_condition.clone(),
2696 },
2697 SourceBreakpoint {
2698 row: condition_breakpoint.position,
2699 path: Arc::from(path),
2700 message: condition_breakpoint.message.clone(),
2701 state: condition_breakpoint.state,
2702 condition: condition_breakpoint.condition.clone(),
2703 hit_condition: condition_breakpoint.hit_condition.clone(),
2704 },
2705 SourceBreakpoint {
2706 row: hit_condition_breakpoint.position,
2707 path: Arc::from(path),
2708 message: hit_condition_breakpoint.message.clone(),
2709 state: hit_condition_breakpoint.state,
2710 condition: hit_condition_breakpoint.condition.clone(),
2711 hit_condition: hit_condition_breakpoint.hit_condition.clone(),
2712 },
2713 ],
2714 );
2715 map
2716 },
2717 session_id: None,
2718 window_id: None,
2719 user_toolchains: Default::default(),
2720 };
2721
2722 db.save_workspace(workspace.clone()).await;
2723
2724 let loaded = db.workspace_for_roots(&["/tmp"]).unwrap();
2725 let loaded_breakpoints = loaded.breakpoints.get(&Arc::from(path)).unwrap();
2726
2727 assert_eq!(loaded_breakpoints.len(), 5);
2728
2729 // normal breakpoint
2730 assert_eq!(loaded_breakpoints[0].row, breakpoint.position);
2731 assert_eq!(loaded_breakpoints[0].message, breakpoint.message);
2732 assert_eq!(loaded_breakpoints[0].condition, breakpoint.condition);
2733 assert_eq!(
2734 loaded_breakpoints[0].hit_condition,
2735 breakpoint.hit_condition
2736 );
2737 assert_eq!(loaded_breakpoints[0].state, breakpoint.state);
2738 assert_eq!(loaded_breakpoints[0].path, Arc::from(path));
2739
2740 // enabled breakpoint
2741 assert_eq!(loaded_breakpoints[1].row, log_breakpoint.position);
2742 assert_eq!(loaded_breakpoints[1].message, log_breakpoint.message);
2743 assert_eq!(loaded_breakpoints[1].condition, log_breakpoint.condition);
2744 assert_eq!(
2745 loaded_breakpoints[1].hit_condition,
2746 log_breakpoint.hit_condition
2747 );
2748 assert_eq!(loaded_breakpoints[1].state, log_breakpoint.state);
2749 assert_eq!(loaded_breakpoints[1].path, Arc::from(path));
2750
2751 // disable breakpoint
2752 assert_eq!(loaded_breakpoints[2].row, disable_breakpoint.position);
2753 assert_eq!(loaded_breakpoints[2].message, disable_breakpoint.message);
2754 assert_eq!(
2755 loaded_breakpoints[2].condition,
2756 disable_breakpoint.condition
2757 );
2758 assert_eq!(
2759 loaded_breakpoints[2].hit_condition,
2760 disable_breakpoint.hit_condition
2761 );
2762 assert_eq!(loaded_breakpoints[2].state, disable_breakpoint.state);
2763 assert_eq!(loaded_breakpoints[2].path, Arc::from(path));
2764
2765 // condition breakpoint
2766 assert_eq!(loaded_breakpoints[3].row, condition_breakpoint.position);
2767 assert_eq!(loaded_breakpoints[3].message, condition_breakpoint.message);
2768 assert_eq!(
2769 loaded_breakpoints[3].condition,
2770 condition_breakpoint.condition
2771 );
2772 assert_eq!(
2773 loaded_breakpoints[3].hit_condition,
2774 condition_breakpoint.hit_condition
2775 );
2776 assert_eq!(loaded_breakpoints[3].state, condition_breakpoint.state);
2777 assert_eq!(loaded_breakpoints[3].path, Arc::from(path));
2778
2779 // hit condition breakpoint
2780 assert_eq!(loaded_breakpoints[4].row, hit_condition_breakpoint.position);
2781 assert_eq!(
2782 loaded_breakpoints[4].message,
2783 hit_condition_breakpoint.message
2784 );
2785 assert_eq!(
2786 loaded_breakpoints[4].condition,
2787 hit_condition_breakpoint.condition
2788 );
2789 assert_eq!(
2790 loaded_breakpoints[4].hit_condition,
2791 hit_condition_breakpoint.hit_condition
2792 );
2793 assert_eq!(loaded_breakpoints[4].state, hit_condition_breakpoint.state);
2794 assert_eq!(loaded_breakpoints[4].path, Arc::from(path));
2795 }
2796
2797 #[gpui::test]
2798 async fn test_remove_last_breakpoint() {
2799 zlog::init_test();
2800
2801 let db = WorkspaceDb::open_test_db("test_remove_last_breakpoint").await;
2802 let id = db.next_id().await.unwrap();
2803
2804 let singular_path = Path::new("/tmp/test_remove_last_breakpoint.rs");
2805
2806 let breakpoint_to_remove = Breakpoint {
2807 position: 100,
2808 message: None,
2809 state: BreakpointState::Enabled,
2810 condition: None,
2811 hit_condition: None,
2812 };
2813
2814 let workspace = SerializedWorkspace {
2815 id,
2816 paths: PathList::new(&["/tmp"]),
2817 location: SerializedWorkspaceLocation::Local,
2818 center_group: Default::default(),
2819 window_bounds: Default::default(),
2820 display: Default::default(),
2821 docks: Default::default(),
2822 centered_layout: false,
2823 breakpoints: {
2824 let mut map = collections::BTreeMap::default();
2825 map.insert(
2826 Arc::from(singular_path),
2827 vec![SourceBreakpoint {
2828 row: breakpoint_to_remove.position,
2829 path: Arc::from(singular_path),
2830 message: None,
2831 state: BreakpointState::Enabled,
2832 condition: None,
2833 hit_condition: None,
2834 }],
2835 );
2836 map
2837 },
2838 session_id: None,
2839 window_id: None,
2840 user_toolchains: Default::default(),
2841 };
2842
2843 db.save_workspace(workspace.clone()).await;
2844
2845 let loaded = db.workspace_for_roots(&["/tmp"]).unwrap();
2846 let loaded_breakpoints = loaded.breakpoints.get(&Arc::from(singular_path)).unwrap();
2847
2848 assert_eq!(loaded_breakpoints.len(), 1);
2849 assert_eq!(loaded_breakpoints[0].row, breakpoint_to_remove.position);
2850 assert_eq!(loaded_breakpoints[0].message, breakpoint_to_remove.message);
2851 assert_eq!(
2852 loaded_breakpoints[0].condition,
2853 breakpoint_to_remove.condition
2854 );
2855 assert_eq!(
2856 loaded_breakpoints[0].hit_condition,
2857 breakpoint_to_remove.hit_condition
2858 );
2859 assert_eq!(loaded_breakpoints[0].state, breakpoint_to_remove.state);
2860 assert_eq!(loaded_breakpoints[0].path, Arc::from(singular_path));
2861
2862 let workspace_without_breakpoint = SerializedWorkspace {
2863 id,
2864 paths: PathList::new(&["/tmp"]),
2865 location: SerializedWorkspaceLocation::Local,
2866 center_group: Default::default(),
2867 window_bounds: Default::default(),
2868 display: Default::default(),
2869 docks: Default::default(),
2870 centered_layout: false,
2871 breakpoints: collections::BTreeMap::default(),
2872 session_id: None,
2873 window_id: None,
2874 user_toolchains: Default::default(),
2875 };
2876
2877 db.save_workspace(workspace_without_breakpoint.clone())
2878 .await;
2879
2880 let loaded_after_remove = db.workspace_for_roots(&["/tmp"]).unwrap();
2881 let empty_breakpoints = loaded_after_remove
2882 .breakpoints
2883 .get(&Arc::from(singular_path));
2884
2885 assert!(empty_breakpoints.is_none());
2886 }
2887
2888 #[gpui::test]
2889 async fn test_next_id_stability() {
2890 zlog::init_test();
2891
2892 let db = WorkspaceDb::open_test_db("test_next_id_stability").await;
2893
2894 db.write(|conn| {
2895 conn.migrate(
2896 "test_table",
2897 &[sql!(
2898 CREATE TABLE test_table(
2899 text TEXT,
2900 workspace_id INTEGER,
2901 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
2902 ON DELETE CASCADE
2903 ) STRICT;
2904 )],
2905 &mut |_, _, _| false,
2906 )
2907 .unwrap();
2908 })
2909 .await;
2910
2911 let id = db.next_id().await.unwrap();
2912 // Assert the empty row got inserted
2913 assert_eq!(
2914 Some(id),
2915 db.select_row_bound::<WorkspaceId, WorkspaceId>(sql!(
2916 SELECT workspace_id FROM workspaces WHERE workspace_id = ?
2917 ))
2918 .unwrap()(id)
2919 .unwrap()
2920 );
2921
2922 db.write(move |conn| {
2923 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
2924 .unwrap()(("test-text-1", id))
2925 .unwrap()
2926 })
2927 .await;
2928
2929 let test_text_1 = db
2930 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
2931 .unwrap()(1)
2932 .unwrap()
2933 .unwrap();
2934 assert_eq!(test_text_1, "test-text-1");
2935 }
2936
2937 #[gpui::test]
2938 async fn test_workspace_id_stability() {
2939 zlog::init_test();
2940
2941 let db = WorkspaceDb::open_test_db("test_workspace_id_stability").await;
2942
2943 db.write(|conn| {
2944 conn.migrate(
2945 "test_table",
2946 &[sql!(
2947 CREATE TABLE test_table(
2948 text TEXT,
2949 workspace_id INTEGER,
2950 FOREIGN KEY(workspace_id)
2951 REFERENCES workspaces(workspace_id)
2952 ON DELETE CASCADE
2953 ) STRICT;)],
2954 &mut |_, _, _| false,
2955 )
2956 })
2957 .await
2958 .unwrap();
2959
2960 let mut workspace_1 = SerializedWorkspace {
2961 id: WorkspaceId(1),
2962 paths: PathList::new(&["/tmp", "/tmp2"]),
2963 location: SerializedWorkspaceLocation::Local,
2964 center_group: Default::default(),
2965 window_bounds: Default::default(),
2966 display: Default::default(),
2967 docks: Default::default(),
2968 centered_layout: false,
2969 breakpoints: Default::default(),
2970 session_id: None,
2971 window_id: None,
2972 user_toolchains: Default::default(),
2973 };
2974
2975 let workspace_2 = SerializedWorkspace {
2976 id: WorkspaceId(2),
2977 paths: PathList::new(&["/tmp"]),
2978 location: SerializedWorkspaceLocation::Local,
2979 center_group: Default::default(),
2980 window_bounds: Default::default(),
2981 display: Default::default(),
2982 docks: Default::default(),
2983 centered_layout: false,
2984 breakpoints: Default::default(),
2985 session_id: None,
2986 window_id: None,
2987 user_toolchains: Default::default(),
2988 };
2989
2990 db.save_workspace(workspace_1.clone()).await;
2991
2992 db.write(|conn| {
2993 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
2994 .unwrap()(("test-text-1", 1))
2995 .unwrap();
2996 })
2997 .await;
2998
2999 db.save_workspace(workspace_2.clone()).await;
3000
3001 db.write(|conn| {
3002 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
3003 .unwrap()(("test-text-2", 2))
3004 .unwrap();
3005 })
3006 .await;
3007
3008 workspace_1.paths = PathList::new(&["/tmp", "/tmp3"]);
3009 db.save_workspace(workspace_1.clone()).await;
3010 db.save_workspace(workspace_1).await;
3011 db.save_workspace(workspace_2).await;
3012
3013 let test_text_2 = db
3014 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
3015 .unwrap()(2)
3016 .unwrap()
3017 .unwrap();
3018 assert_eq!(test_text_2, "test-text-2");
3019
3020 let test_text_1 = db
3021 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
3022 .unwrap()(1)
3023 .unwrap()
3024 .unwrap();
3025 assert_eq!(test_text_1, "test-text-1");
3026 }
3027
3028 fn group(axis: Axis, children: Vec<SerializedPaneGroup>) -> SerializedPaneGroup {
3029 SerializedPaneGroup::Group {
3030 axis: SerializedAxis(axis),
3031 flexes: None,
3032 children,
3033 }
3034 }
3035
3036 #[gpui::test]
3037 async fn test_full_workspace_serialization() {
3038 zlog::init_test();
3039
3040 let db = WorkspaceDb::open_test_db("test_full_workspace_serialization").await;
3041
3042 // -----------------
3043 // | 1,2 | 5,6 |
3044 // | - - - | |
3045 // | 3,4 | |
3046 // -----------------
3047 let center_group = group(
3048 Axis::Horizontal,
3049 vec![
3050 group(
3051 Axis::Vertical,
3052 vec![
3053 SerializedPaneGroup::Pane(SerializedPane::new(
3054 vec![
3055 SerializedItem::new("Terminal", 5, false, false),
3056 SerializedItem::new("Terminal", 6, true, false),
3057 ],
3058 false,
3059 0,
3060 )),
3061 SerializedPaneGroup::Pane(SerializedPane::new(
3062 vec![
3063 SerializedItem::new("Terminal", 7, true, false),
3064 SerializedItem::new("Terminal", 8, false, false),
3065 ],
3066 false,
3067 0,
3068 )),
3069 ],
3070 ),
3071 SerializedPaneGroup::Pane(SerializedPane::new(
3072 vec![
3073 SerializedItem::new("Terminal", 9, false, false),
3074 SerializedItem::new("Terminal", 10, true, false),
3075 ],
3076 false,
3077 0,
3078 )),
3079 ],
3080 );
3081
3082 let workspace = SerializedWorkspace {
3083 id: WorkspaceId(5),
3084 paths: PathList::new(&["/tmp", "/tmp2"]),
3085 location: SerializedWorkspaceLocation::Local,
3086 center_group,
3087 window_bounds: Default::default(),
3088 breakpoints: Default::default(),
3089 display: Default::default(),
3090 docks: Default::default(),
3091 centered_layout: false,
3092 session_id: None,
3093 window_id: Some(999),
3094 user_toolchains: Default::default(),
3095 };
3096
3097 db.save_workspace(workspace.clone()).await;
3098
3099 let round_trip_workspace = db.workspace_for_roots(&["/tmp2", "/tmp"]);
3100 assert_eq!(workspace, round_trip_workspace.unwrap());
3101
3102 // Test guaranteed duplicate IDs
3103 db.save_workspace(workspace.clone()).await;
3104 db.save_workspace(workspace.clone()).await;
3105
3106 let round_trip_workspace = db.workspace_for_roots(&["/tmp", "/tmp2"]);
3107 assert_eq!(workspace, round_trip_workspace.unwrap());
3108 }
3109
3110 #[gpui::test]
3111 async fn test_workspace_assignment() {
3112 zlog::init_test();
3113
3114 let db = WorkspaceDb::open_test_db("test_basic_functionality").await;
3115
3116 let workspace_1 = SerializedWorkspace {
3117 id: WorkspaceId(1),
3118 paths: PathList::new(&["/tmp", "/tmp2"]),
3119 location: SerializedWorkspaceLocation::Local,
3120 center_group: Default::default(),
3121 window_bounds: Default::default(),
3122 breakpoints: Default::default(),
3123 display: Default::default(),
3124 docks: Default::default(),
3125 centered_layout: false,
3126 session_id: None,
3127 window_id: Some(1),
3128 user_toolchains: Default::default(),
3129 };
3130
3131 let mut workspace_2 = SerializedWorkspace {
3132 id: WorkspaceId(2),
3133 paths: PathList::new(&["/tmp"]),
3134 location: SerializedWorkspaceLocation::Local,
3135 center_group: Default::default(),
3136 window_bounds: Default::default(),
3137 display: Default::default(),
3138 docks: Default::default(),
3139 centered_layout: false,
3140 breakpoints: Default::default(),
3141 session_id: None,
3142 window_id: Some(2),
3143 user_toolchains: Default::default(),
3144 };
3145
3146 db.save_workspace(workspace_1.clone()).await;
3147 db.save_workspace(workspace_2.clone()).await;
3148
3149 // Test that paths are treated as a set
3150 assert_eq!(
3151 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3152 workspace_1
3153 );
3154 assert_eq!(
3155 db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(),
3156 workspace_1
3157 );
3158
3159 // Make sure that other keys work
3160 assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2);
3161 assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None);
3162
3163 // Test 'mutate' case of updating a pre-existing id
3164 workspace_2.paths = PathList::new(&["/tmp", "/tmp2"]);
3165
3166 db.save_workspace(workspace_2.clone()).await;
3167 assert_eq!(
3168 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3169 workspace_2
3170 );
3171
3172 // Test other mechanism for mutating
3173 let mut workspace_3 = SerializedWorkspace {
3174 id: WorkspaceId(3),
3175 paths: PathList::new(&["/tmp2", "/tmp"]),
3176 location: SerializedWorkspaceLocation::Local,
3177 center_group: Default::default(),
3178 window_bounds: Default::default(),
3179 breakpoints: Default::default(),
3180 display: Default::default(),
3181 docks: Default::default(),
3182 centered_layout: false,
3183 session_id: None,
3184 window_id: Some(3),
3185 user_toolchains: Default::default(),
3186 };
3187
3188 db.save_workspace(workspace_3.clone()).await;
3189 assert_eq!(
3190 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3191 workspace_3
3192 );
3193
3194 // Make sure that updating paths differently also works
3195 workspace_3.paths = PathList::new(&["/tmp3", "/tmp4", "/tmp2"]);
3196 db.save_workspace(workspace_3.clone()).await;
3197 assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None);
3198 assert_eq!(
3199 db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"])
3200 .unwrap(),
3201 workspace_3
3202 );
3203 }
3204
3205 #[gpui::test]
3206 async fn test_session_workspaces() {
3207 zlog::init_test();
3208
3209 let db = WorkspaceDb::open_test_db("test_serializing_workspaces_session_id").await;
3210
3211 let workspace_1 = SerializedWorkspace {
3212 id: WorkspaceId(1),
3213 paths: PathList::new(&["/tmp1"]),
3214 location: SerializedWorkspaceLocation::Local,
3215 center_group: Default::default(),
3216 window_bounds: Default::default(),
3217 display: Default::default(),
3218 docks: Default::default(),
3219 centered_layout: false,
3220 breakpoints: Default::default(),
3221 session_id: Some("session-id-1".to_owned()),
3222 window_id: Some(10),
3223 user_toolchains: Default::default(),
3224 };
3225
3226 let workspace_2 = SerializedWorkspace {
3227 id: WorkspaceId(2),
3228 paths: PathList::new(&["/tmp2"]),
3229 location: SerializedWorkspaceLocation::Local,
3230 center_group: Default::default(),
3231 window_bounds: Default::default(),
3232 display: Default::default(),
3233 docks: Default::default(),
3234 centered_layout: false,
3235 breakpoints: Default::default(),
3236 session_id: Some("session-id-1".to_owned()),
3237 window_id: Some(20),
3238 user_toolchains: Default::default(),
3239 };
3240
3241 let workspace_3 = SerializedWorkspace {
3242 id: WorkspaceId(3),
3243 paths: PathList::new(&["/tmp3"]),
3244 location: SerializedWorkspaceLocation::Local,
3245 center_group: Default::default(),
3246 window_bounds: Default::default(),
3247 display: Default::default(),
3248 docks: Default::default(),
3249 centered_layout: false,
3250 breakpoints: Default::default(),
3251 session_id: Some("session-id-2".to_owned()),
3252 window_id: Some(30),
3253 user_toolchains: Default::default(),
3254 };
3255
3256 let workspace_4 = SerializedWorkspace {
3257 id: WorkspaceId(4),
3258 paths: PathList::new(&["/tmp4"]),
3259 location: SerializedWorkspaceLocation::Local,
3260 center_group: Default::default(),
3261 window_bounds: Default::default(),
3262 display: Default::default(),
3263 docks: Default::default(),
3264 centered_layout: false,
3265 breakpoints: Default::default(),
3266 session_id: None,
3267 window_id: None,
3268 user_toolchains: Default::default(),
3269 };
3270
3271 let connection_id = db
3272 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3273 host: "my-host".into(),
3274 port: Some(1234),
3275 ..Default::default()
3276 }))
3277 .await
3278 .unwrap();
3279
3280 let workspace_5 = SerializedWorkspace {
3281 id: WorkspaceId(5),
3282 paths: PathList::default(),
3283 location: SerializedWorkspaceLocation::Remote(
3284 db.remote_connection(connection_id).unwrap(),
3285 ),
3286 center_group: Default::default(),
3287 window_bounds: Default::default(),
3288 display: Default::default(),
3289 docks: Default::default(),
3290 centered_layout: false,
3291 breakpoints: Default::default(),
3292 session_id: Some("session-id-2".to_owned()),
3293 window_id: Some(50),
3294 user_toolchains: Default::default(),
3295 };
3296
3297 let workspace_6 = SerializedWorkspace {
3298 id: WorkspaceId(6),
3299 paths: PathList::new(&["/tmp6c", "/tmp6b", "/tmp6a"]),
3300 location: SerializedWorkspaceLocation::Local,
3301 center_group: Default::default(),
3302 window_bounds: Default::default(),
3303 breakpoints: Default::default(),
3304 display: Default::default(),
3305 docks: Default::default(),
3306 centered_layout: false,
3307 session_id: Some("session-id-3".to_owned()),
3308 window_id: Some(60),
3309 user_toolchains: Default::default(),
3310 };
3311
3312 db.save_workspace(workspace_1.clone()).await;
3313 thread::sleep(Duration::from_millis(1000)); // Force timestamps to increment
3314 db.save_workspace(workspace_2.clone()).await;
3315 db.save_workspace(workspace_3.clone()).await;
3316 thread::sleep(Duration::from_millis(1000)); // Force timestamps to increment
3317 db.save_workspace(workspace_4.clone()).await;
3318 db.save_workspace(workspace_5.clone()).await;
3319 db.save_workspace(workspace_6.clone()).await;
3320
3321 let locations = db.session_workspaces("session-id-1".to_owned()).unwrap();
3322 assert_eq!(locations.len(), 2);
3323 assert_eq!(locations[0].0, WorkspaceId(2));
3324 assert_eq!(locations[0].1, PathList::new(&["/tmp2"]));
3325 assert_eq!(locations[0].2, Some(20));
3326 assert_eq!(locations[1].0, WorkspaceId(1));
3327 assert_eq!(locations[1].1, PathList::new(&["/tmp1"]));
3328 assert_eq!(locations[1].2, Some(10));
3329
3330 let locations = db.session_workspaces("session-id-2".to_owned()).unwrap();
3331 assert_eq!(locations.len(), 2);
3332 assert_eq!(locations[0].0, WorkspaceId(5));
3333 assert_eq!(locations[0].1, PathList::default());
3334 assert_eq!(locations[0].2, Some(50));
3335 assert_eq!(locations[0].3, Some(connection_id));
3336 assert_eq!(locations[1].0, WorkspaceId(3));
3337 assert_eq!(locations[1].1, PathList::new(&["/tmp3"]));
3338 assert_eq!(locations[1].2, Some(30));
3339
3340 let locations = db.session_workspaces("session-id-3".to_owned()).unwrap();
3341 assert_eq!(locations.len(), 1);
3342 assert_eq!(locations[0].0, WorkspaceId(6));
3343 assert_eq!(
3344 locations[0].1,
3345 PathList::new(&["/tmp6c", "/tmp6b", "/tmp6a"]),
3346 );
3347 assert_eq!(locations[0].2, Some(60));
3348 }
3349
3350 fn default_workspace<P: AsRef<Path>>(
3351 paths: &[P],
3352 center_group: &SerializedPaneGroup,
3353 ) -> SerializedWorkspace {
3354 SerializedWorkspace {
3355 id: WorkspaceId(4),
3356 paths: PathList::new(paths),
3357 location: SerializedWorkspaceLocation::Local,
3358 center_group: center_group.clone(),
3359 window_bounds: Default::default(),
3360 display: Default::default(),
3361 docks: Default::default(),
3362 breakpoints: Default::default(),
3363 centered_layout: false,
3364 session_id: None,
3365 window_id: None,
3366 user_toolchains: Default::default(),
3367 }
3368 }
3369
3370 #[gpui::test]
3371 async fn test_last_session_workspace_locations(cx: &mut gpui::TestAppContext) {
3372 let dir1 = tempfile::TempDir::with_prefix("dir1").unwrap();
3373 let dir2 = tempfile::TempDir::with_prefix("dir2").unwrap();
3374 let dir3 = tempfile::TempDir::with_prefix("dir3").unwrap();
3375 let dir4 = tempfile::TempDir::with_prefix("dir4").unwrap();
3376
3377 let fs = fs::FakeFs::new(cx.executor());
3378 fs.insert_tree(dir1.path(), json!({})).await;
3379 fs.insert_tree(dir2.path(), json!({})).await;
3380 fs.insert_tree(dir3.path(), json!({})).await;
3381 fs.insert_tree(dir4.path(), json!({})).await;
3382
3383 let db =
3384 WorkspaceDb::open_test_db("test_serializing_workspaces_last_session_workspaces").await;
3385
3386 let workspaces = [
3387 (1, vec![dir1.path()], 9),
3388 (2, vec![dir2.path()], 5),
3389 (3, vec![dir3.path()], 8),
3390 (4, vec![dir4.path()], 2),
3391 (5, vec![dir1.path(), dir2.path(), dir3.path()], 3),
3392 (6, vec![dir4.path(), dir3.path(), dir2.path()], 4),
3393 ]
3394 .into_iter()
3395 .map(|(id, paths, window_id)| SerializedWorkspace {
3396 id: WorkspaceId(id),
3397 paths: PathList::new(paths.as_slice()),
3398 location: SerializedWorkspaceLocation::Local,
3399 center_group: Default::default(),
3400 window_bounds: Default::default(),
3401 display: Default::default(),
3402 docks: Default::default(),
3403 centered_layout: false,
3404 session_id: Some("one-session".to_owned()),
3405 breakpoints: Default::default(),
3406 window_id: Some(window_id),
3407 user_toolchains: Default::default(),
3408 })
3409 .collect::<Vec<_>>();
3410
3411 for workspace in workspaces.iter() {
3412 db.save_workspace(workspace.clone()).await;
3413 }
3414
3415 let stack = Some(Vec::from([
3416 WindowId::from(2), // Top
3417 WindowId::from(8),
3418 WindowId::from(5),
3419 WindowId::from(9),
3420 WindowId::from(3),
3421 WindowId::from(4), // Bottom
3422 ]));
3423
3424 let locations = db
3425 .last_session_workspace_locations("one-session", stack, fs.as_ref())
3426 .await
3427 .unwrap();
3428 assert_eq!(
3429 locations,
3430 [
3431 SessionWorkspace {
3432 workspace_id: WorkspaceId(4),
3433 location: SerializedWorkspaceLocation::Local,
3434 paths: PathList::new(&[dir4.path()]),
3435 window_id: Some(WindowId::from(2u64)),
3436 },
3437 SessionWorkspace {
3438 workspace_id: WorkspaceId(3),
3439 location: SerializedWorkspaceLocation::Local,
3440 paths: PathList::new(&[dir3.path()]),
3441 window_id: Some(WindowId::from(8u64)),
3442 },
3443 SessionWorkspace {
3444 workspace_id: WorkspaceId(2),
3445 location: SerializedWorkspaceLocation::Local,
3446 paths: PathList::new(&[dir2.path()]),
3447 window_id: Some(WindowId::from(5u64)),
3448 },
3449 SessionWorkspace {
3450 workspace_id: WorkspaceId(1),
3451 location: SerializedWorkspaceLocation::Local,
3452 paths: PathList::new(&[dir1.path()]),
3453 window_id: Some(WindowId::from(9u64)),
3454 },
3455 SessionWorkspace {
3456 workspace_id: WorkspaceId(5),
3457 location: SerializedWorkspaceLocation::Local,
3458 paths: PathList::new(&[dir1.path(), dir2.path(), dir3.path()]),
3459 window_id: Some(WindowId::from(3u64)),
3460 },
3461 SessionWorkspace {
3462 workspace_id: WorkspaceId(6),
3463 location: SerializedWorkspaceLocation::Local,
3464 paths: PathList::new(&[dir4.path(), dir3.path(), dir2.path()]),
3465 window_id: Some(WindowId::from(4u64)),
3466 },
3467 ]
3468 );
3469 }
3470
3471 #[gpui::test]
3472 async fn test_last_session_workspace_locations_remote(cx: &mut gpui::TestAppContext) {
3473 let fs = fs::FakeFs::new(cx.executor());
3474 let db =
3475 WorkspaceDb::open_test_db("test_serializing_workspaces_last_session_workspaces_remote")
3476 .await;
3477
3478 let remote_connections = [
3479 ("host-1", "my-user-1"),
3480 ("host-2", "my-user-2"),
3481 ("host-3", "my-user-3"),
3482 ("host-4", "my-user-4"),
3483 ]
3484 .into_iter()
3485 .map(|(host, user)| async {
3486 let options = RemoteConnectionOptions::Ssh(SshConnectionOptions {
3487 host: host.into(),
3488 username: Some(user.to_string()),
3489 ..Default::default()
3490 });
3491 db.get_or_create_remote_connection(options.clone())
3492 .await
3493 .unwrap();
3494 options
3495 })
3496 .collect::<Vec<_>>();
3497
3498 let remote_connections = futures::future::join_all(remote_connections).await;
3499
3500 let workspaces = [
3501 (1, remote_connections[0].clone(), 9),
3502 (2, remote_connections[1].clone(), 5),
3503 (3, remote_connections[2].clone(), 8),
3504 (4, remote_connections[3].clone(), 2),
3505 ]
3506 .into_iter()
3507 .map(|(id, remote_connection, window_id)| SerializedWorkspace {
3508 id: WorkspaceId(id),
3509 paths: PathList::default(),
3510 location: SerializedWorkspaceLocation::Remote(remote_connection),
3511 center_group: Default::default(),
3512 window_bounds: Default::default(),
3513 display: Default::default(),
3514 docks: Default::default(),
3515 centered_layout: false,
3516 session_id: Some("one-session".to_owned()),
3517 breakpoints: Default::default(),
3518 window_id: Some(window_id),
3519 user_toolchains: Default::default(),
3520 })
3521 .collect::<Vec<_>>();
3522
3523 for workspace in workspaces.iter() {
3524 db.save_workspace(workspace.clone()).await;
3525 }
3526
3527 let stack = Some(Vec::from([
3528 WindowId::from(2), // Top
3529 WindowId::from(8),
3530 WindowId::from(5),
3531 WindowId::from(9), // Bottom
3532 ]));
3533
3534 let have = db
3535 .last_session_workspace_locations("one-session", stack, fs.as_ref())
3536 .await
3537 .unwrap();
3538 assert_eq!(have.len(), 4);
3539 assert_eq!(
3540 have[0],
3541 SessionWorkspace {
3542 workspace_id: WorkspaceId(4),
3543 location: SerializedWorkspaceLocation::Remote(remote_connections[3].clone()),
3544 paths: PathList::default(),
3545 window_id: Some(WindowId::from(2u64)),
3546 }
3547 );
3548 assert_eq!(
3549 have[1],
3550 SessionWorkspace {
3551 workspace_id: WorkspaceId(3),
3552 location: SerializedWorkspaceLocation::Remote(remote_connections[2].clone()),
3553 paths: PathList::default(),
3554 window_id: Some(WindowId::from(8u64)),
3555 }
3556 );
3557 assert_eq!(
3558 have[2],
3559 SessionWorkspace {
3560 workspace_id: WorkspaceId(2),
3561 location: SerializedWorkspaceLocation::Remote(remote_connections[1].clone()),
3562 paths: PathList::default(),
3563 window_id: Some(WindowId::from(5u64)),
3564 }
3565 );
3566 assert_eq!(
3567 have[3],
3568 SessionWorkspace {
3569 workspace_id: WorkspaceId(1),
3570 location: SerializedWorkspaceLocation::Remote(remote_connections[0].clone()),
3571 paths: PathList::default(),
3572 window_id: Some(WindowId::from(9u64)),
3573 }
3574 );
3575 }
3576
3577 #[gpui::test]
3578 async fn test_get_or_create_ssh_project() {
3579 let db = WorkspaceDb::open_test_db("test_get_or_create_ssh_project").await;
3580
3581 let host = "example.com".to_string();
3582 let port = Some(22_u16);
3583 let user = Some("user".to_string());
3584
3585 let connection_id = db
3586 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3587 host: host.clone().into(),
3588 port,
3589 username: user.clone(),
3590 ..Default::default()
3591 }))
3592 .await
3593 .unwrap();
3594
3595 // Test that calling the function again with the same parameters returns the same project
3596 let same_connection = db
3597 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3598 host: host.clone().into(),
3599 port,
3600 username: user.clone(),
3601 ..Default::default()
3602 }))
3603 .await
3604 .unwrap();
3605
3606 assert_eq!(connection_id, same_connection);
3607
3608 // Test with different parameters
3609 let host2 = "otherexample.com".to_string();
3610 let port2 = None;
3611 let user2 = Some("otheruser".to_string());
3612
3613 let different_connection = db
3614 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3615 host: host2.clone().into(),
3616 port: port2,
3617 username: user2.clone(),
3618 ..Default::default()
3619 }))
3620 .await
3621 .unwrap();
3622
3623 assert_ne!(connection_id, different_connection);
3624 }
3625
3626 #[gpui::test]
3627 async fn test_get_or_create_ssh_project_with_null_user() {
3628 let db = WorkspaceDb::open_test_db("test_get_or_create_ssh_project_with_null_user").await;
3629
3630 let (host, port, user) = ("example.com".to_string(), None, None);
3631
3632 let connection_id = db
3633 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3634 host: host.clone().into(),
3635 port,
3636 username: None,
3637 ..Default::default()
3638 }))
3639 .await
3640 .unwrap();
3641
3642 let same_connection_id = db
3643 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3644 host: host.clone().into(),
3645 port,
3646 username: user.clone(),
3647 ..Default::default()
3648 }))
3649 .await
3650 .unwrap();
3651
3652 assert_eq!(connection_id, same_connection_id);
3653 }
3654
3655 #[gpui::test]
3656 async fn test_get_remote_connections() {
3657 let db = WorkspaceDb::open_test_db("test_get_remote_connections").await;
3658
3659 let connections = [
3660 ("example.com".to_string(), None, None),
3661 (
3662 "anotherexample.com".to_string(),
3663 Some(123_u16),
3664 Some("user2".to_string()),
3665 ),
3666 ("yetanother.com".to_string(), Some(345_u16), None),
3667 ];
3668
3669 let mut ids = Vec::new();
3670 for (host, port, user) in connections.iter() {
3671 ids.push(
3672 db.get_or_create_remote_connection(RemoteConnectionOptions::Ssh(
3673 SshConnectionOptions {
3674 host: host.clone().into(),
3675 port: *port,
3676 username: user.clone(),
3677 ..Default::default()
3678 },
3679 ))
3680 .await
3681 .unwrap(),
3682 );
3683 }
3684
3685 let stored_connections = db.remote_connections().unwrap();
3686 assert_eq!(
3687 stored_connections,
3688 [
3689 (
3690 ids[0],
3691 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3692 host: "example.com".into(),
3693 port: None,
3694 username: None,
3695 ..Default::default()
3696 }),
3697 ),
3698 (
3699 ids[1],
3700 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3701 host: "anotherexample.com".into(),
3702 port: Some(123),
3703 username: Some("user2".into()),
3704 ..Default::default()
3705 }),
3706 ),
3707 (
3708 ids[2],
3709 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3710 host: "yetanother.com".into(),
3711 port: Some(345),
3712 username: None,
3713 ..Default::default()
3714 }),
3715 ),
3716 ]
3717 .into_iter()
3718 .collect::<HashMap<_, _>>(),
3719 );
3720 }
3721
3722 #[gpui::test]
3723 async fn test_simple_split() {
3724 zlog::init_test();
3725
3726 let db = WorkspaceDb::open_test_db("simple_split").await;
3727
3728 // -----------------
3729 // | 1,2 | 5,6 |
3730 // | - - - | |
3731 // | 3,4 | |
3732 // -----------------
3733 let center_pane = group(
3734 Axis::Horizontal,
3735 vec![
3736 group(
3737 Axis::Vertical,
3738 vec![
3739 SerializedPaneGroup::Pane(SerializedPane::new(
3740 vec![
3741 SerializedItem::new("Terminal", 1, false, false),
3742 SerializedItem::new("Terminal", 2, true, false),
3743 ],
3744 false,
3745 0,
3746 )),
3747 SerializedPaneGroup::Pane(SerializedPane::new(
3748 vec![
3749 SerializedItem::new("Terminal", 4, false, false),
3750 SerializedItem::new("Terminal", 3, true, false),
3751 ],
3752 true,
3753 0,
3754 )),
3755 ],
3756 ),
3757 SerializedPaneGroup::Pane(SerializedPane::new(
3758 vec![
3759 SerializedItem::new("Terminal", 5, true, false),
3760 SerializedItem::new("Terminal", 6, false, false),
3761 ],
3762 false,
3763 0,
3764 )),
3765 ],
3766 );
3767
3768 let workspace = default_workspace(&["/tmp"], ¢er_pane);
3769
3770 db.save_workspace(workspace.clone()).await;
3771
3772 let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap();
3773
3774 assert_eq!(workspace.center_group, new_workspace.center_group);
3775 }
3776
3777 #[gpui::test]
3778 async fn test_cleanup_panes() {
3779 zlog::init_test();
3780
3781 let db = WorkspaceDb::open_test_db("test_cleanup_panes").await;
3782
3783 let center_pane = group(
3784 Axis::Horizontal,
3785 vec![
3786 group(
3787 Axis::Vertical,
3788 vec![
3789 SerializedPaneGroup::Pane(SerializedPane::new(
3790 vec![
3791 SerializedItem::new("Terminal", 1, false, false),
3792 SerializedItem::new("Terminal", 2, true, false),
3793 ],
3794 false,
3795 0,
3796 )),
3797 SerializedPaneGroup::Pane(SerializedPane::new(
3798 vec![
3799 SerializedItem::new("Terminal", 4, false, false),
3800 SerializedItem::new("Terminal", 3, true, false),
3801 ],
3802 true,
3803 0,
3804 )),
3805 ],
3806 ),
3807 SerializedPaneGroup::Pane(SerializedPane::new(
3808 vec![
3809 SerializedItem::new("Terminal", 5, false, false),
3810 SerializedItem::new("Terminal", 6, true, false),
3811 ],
3812 false,
3813 0,
3814 )),
3815 ],
3816 );
3817
3818 let id = &["/tmp"];
3819
3820 let mut workspace = default_workspace(id, ¢er_pane);
3821
3822 db.save_workspace(workspace.clone()).await;
3823
3824 workspace.center_group = group(
3825 Axis::Vertical,
3826 vec![
3827 SerializedPaneGroup::Pane(SerializedPane::new(
3828 vec![
3829 SerializedItem::new("Terminal", 1, false, false),
3830 SerializedItem::new("Terminal", 2, true, false),
3831 ],
3832 false,
3833 0,
3834 )),
3835 SerializedPaneGroup::Pane(SerializedPane::new(
3836 vec![
3837 SerializedItem::new("Terminal", 4, true, false),
3838 SerializedItem::new("Terminal", 3, false, false),
3839 ],
3840 true,
3841 0,
3842 )),
3843 ],
3844 );
3845
3846 db.save_workspace(workspace.clone()).await;
3847
3848 let new_workspace = db.workspace_for_roots(id).unwrap();
3849
3850 assert_eq!(workspace.center_group, new_workspace.center_group);
3851 }
3852
3853 #[gpui::test]
3854 async fn test_empty_workspace_window_bounds() {
3855 zlog::init_test();
3856
3857 let db = WorkspaceDb::open_test_db("test_empty_workspace_window_bounds").await;
3858 let id = db.next_id().await.unwrap();
3859
3860 // Create a workspace with empty paths (empty workspace)
3861 let empty_paths: &[&str] = &[];
3862 let display_uuid = Uuid::new_v4();
3863 let window_bounds = SerializedWindowBounds(WindowBounds::Windowed(Bounds {
3864 origin: point(px(100.0), px(200.0)),
3865 size: size(px(800.0), px(600.0)),
3866 }));
3867
3868 let workspace = SerializedWorkspace {
3869 id,
3870 paths: PathList::new(empty_paths),
3871 location: SerializedWorkspaceLocation::Local,
3872 center_group: Default::default(),
3873 window_bounds: None,
3874 display: None,
3875 docks: Default::default(),
3876 breakpoints: Default::default(),
3877 centered_layout: false,
3878 session_id: None,
3879 window_id: None,
3880 user_toolchains: Default::default(),
3881 };
3882
3883 // Save the workspace (this creates the record with empty paths)
3884 db.save_workspace(workspace.clone()).await;
3885
3886 // Save window bounds separately (as the actual code does via set_window_open_status)
3887 db.set_window_open_status(id, window_bounds, display_uuid)
3888 .await
3889 .unwrap();
3890
3891 // Empty workspaces cannot be retrieved by paths (they'd all match).
3892 // They must be retrieved by workspace_id.
3893 assert!(db.workspace_for_roots(empty_paths).is_none());
3894
3895 // Retrieve using workspace_for_id instead
3896 let retrieved = db.workspace_for_id(id).unwrap();
3897
3898 // Verify window bounds were persisted
3899 assert_eq!(retrieved.id, id);
3900 assert!(retrieved.window_bounds.is_some());
3901 assert_eq!(retrieved.window_bounds.unwrap().0, window_bounds.0);
3902 assert!(retrieved.display.is_some());
3903 assert_eq!(retrieved.display.unwrap(), display_uuid);
3904 }
3905
3906 #[gpui::test]
3907 async fn test_last_session_workspace_locations_groups_by_window_id(
3908 cx: &mut gpui::TestAppContext,
3909 ) {
3910 let dir1 = tempfile::TempDir::with_prefix("dir1").unwrap();
3911 let dir2 = tempfile::TempDir::with_prefix("dir2").unwrap();
3912 let dir3 = tempfile::TempDir::with_prefix("dir3").unwrap();
3913 let dir4 = tempfile::TempDir::with_prefix("dir4").unwrap();
3914 let dir5 = tempfile::TempDir::with_prefix("dir5").unwrap();
3915
3916 let fs = fs::FakeFs::new(cx.executor());
3917 fs.insert_tree(dir1.path(), json!({})).await;
3918 fs.insert_tree(dir2.path(), json!({})).await;
3919 fs.insert_tree(dir3.path(), json!({})).await;
3920 fs.insert_tree(dir4.path(), json!({})).await;
3921 fs.insert_tree(dir5.path(), json!({})).await;
3922
3923 let db =
3924 WorkspaceDb::open_test_db("test_last_session_workspace_locations_groups_by_window_id")
3925 .await;
3926
3927 // Simulate two MultiWorkspace windows each containing two workspaces,
3928 // plus one single-workspace window:
3929 // Window 10: workspace 1, workspace 2
3930 // Window 20: workspace 3, workspace 4
3931 // Window 30: workspace 5 (only one)
3932 //
3933 // On session restore, the caller should be able to group these by
3934 // window_id to reconstruct the MultiWorkspace windows.
3935 let workspaces_data: Vec<(i64, &Path, u64)> = vec![
3936 (1, dir1.path(), 10),
3937 (2, dir2.path(), 10),
3938 (3, dir3.path(), 20),
3939 (4, dir4.path(), 20),
3940 (5, dir5.path(), 30),
3941 ];
3942
3943 for (id, dir, window_id) in &workspaces_data {
3944 db.save_workspace(SerializedWorkspace {
3945 id: WorkspaceId(*id),
3946 paths: PathList::new(&[*dir]),
3947 location: SerializedWorkspaceLocation::Local,
3948 center_group: Default::default(),
3949 window_bounds: Default::default(),
3950 display: Default::default(),
3951 docks: Default::default(),
3952 centered_layout: false,
3953 session_id: Some("test-session".to_owned()),
3954 breakpoints: Default::default(),
3955 window_id: Some(*window_id),
3956 user_toolchains: Default::default(),
3957 })
3958 .await;
3959 }
3960
3961 let locations = db
3962 .last_session_workspace_locations("test-session", None, fs.as_ref())
3963 .await
3964 .unwrap();
3965
3966 // All 5 workspaces should be returned with their window_ids.
3967 assert_eq!(locations.len(), 5);
3968
3969 // Every entry should have a window_id so the caller can group them.
3970 for session_workspace in &locations {
3971 assert!(
3972 session_workspace.window_id.is_some(),
3973 "workspace {:?} missing window_id",
3974 session_workspace.workspace_id
3975 );
3976 }
3977
3978 // Group by window_id, simulating what the restoration code should do.
3979 let mut by_window: HashMap<WindowId, Vec<WorkspaceId>> = HashMap::default();
3980 for session_workspace in &locations {
3981 if let Some(window_id) = session_workspace.window_id {
3982 by_window
3983 .entry(window_id)
3984 .or_default()
3985 .push(session_workspace.workspace_id);
3986 }
3987 }
3988
3989 // Should produce 3 windows, not 5.
3990 assert_eq!(
3991 by_window.len(),
3992 3,
3993 "Expected 3 window groups, got {}: {:?}",
3994 by_window.len(),
3995 by_window
3996 );
3997
3998 // Window 10 should contain workspaces 1 and 2.
3999 let window_10 = by_window.get(&WindowId::from(10u64)).unwrap();
4000 assert_eq!(window_10.len(), 2);
4001 assert!(window_10.contains(&WorkspaceId(1)));
4002 assert!(window_10.contains(&WorkspaceId(2)));
4003
4004 // Window 20 should contain workspaces 3 and 4.
4005 let window_20 = by_window.get(&WindowId::from(20u64)).unwrap();
4006 assert_eq!(window_20.len(), 2);
4007 assert!(window_20.contains(&WorkspaceId(3)));
4008 assert!(window_20.contains(&WorkspaceId(4)));
4009
4010 // Window 30 should contain only workspace 5.
4011 let window_30 = by_window.get(&WindowId::from(30u64)).unwrap();
4012 assert_eq!(window_30.len(), 1);
4013 assert!(window_30.contains(&WorkspaceId(5)));
4014 }
4015
4016 #[gpui::test]
4017 async fn test_read_serialized_multi_workspaces_with_state(cx: &mut gpui::TestAppContext) {
4018 use crate::persistence::model::MultiWorkspaceState;
4019
4020 // Write multi-workspace state for two windows via the scoped KVP.
4021 let window_10 = WindowId::from(10u64);
4022 let window_20 = WindowId::from(20u64);
4023
4024 let kvp = cx.update(|cx| KeyValueStore::global(cx));
4025
4026 write_multi_workspace_state(
4027 &kvp,
4028 window_10,
4029 MultiWorkspaceState {
4030 active_workspace_id: Some(WorkspaceId(2)),
4031 project_groups: vec![],
4032 sidebar_open: true,
4033 sidebar_state: None,
4034 },
4035 )
4036 .await;
4037
4038 write_multi_workspace_state(
4039 &kvp,
4040 window_20,
4041 MultiWorkspaceState {
4042 active_workspace_id: Some(WorkspaceId(3)),
4043 project_groups: vec![],
4044 sidebar_open: false,
4045 sidebar_state: None,
4046 },
4047 )
4048 .await;
4049
4050 // Build session workspaces: two in window 10, one in window 20, one with no window.
4051 let session_workspaces = vec![
4052 SessionWorkspace {
4053 workspace_id: WorkspaceId(1),
4054 location: SerializedWorkspaceLocation::Local,
4055 paths: PathList::new(&["/a"]),
4056 window_id: Some(window_10),
4057 },
4058 SessionWorkspace {
4059 workspace_id: WorkspaceId(2),
4060 location: SerializedWorkspaceLocation::Local,
4061 paths: PathList::new(&["/b"]),
4062 window_id: Some(window_10),
4063 },
4064 SessionWorkspace {
4065 workspace_id: WorkspaceId(3),
4066 location: SerializedWorkspaceLocation::Local,
4067 paths: PathList::new(&["/c"]),
4068 window_id: Some(window_20),
4069 },
4070 SessionWorkspace {
4071 workspace_id: WorkspaceId(4),
4072 location: SerializedWorkspaceLocation::Local,
4073 paths: PathList::new(&["/d"]),
4074 window_id: None,
4075 },
4076 ];
4077
4078 let results = cx.update(|cx| read_serialized_multi_workspaces(session_workspaces, cx));
4079
4080 // Should produce 3 results: window 10, window 20, and the orphan.
4081 assert_eq!(results.len(), 3);
4082
4083 // Window 10: active_workspace_id = 2 picks workspace 2 (paths /b), sidebar open.
4084 let group_10 = &results[0];
4085 assert_eq!(group_10.active_workspace.workspace_id, WorkspaceId(2));
4086 assert_eq!(group_10.state.active_workspace_id, Some(WorkspaceId(2)));
4087 assert_eq!(group_10.state.sidebar_open, true);
4088
4089 // Window 20: active_workspace_id = 3 picks workspace 3 (paths /c), sidebar closed.
4090 let group_20 = &results[1];
4091 assert_eq!(group_20.active_workspace.workspace_id, WorkspaceId(3));
4092 assert_eq!(group_20.state.active_workspace_id, Some(WorkspaceId(3)));
4093 assert_eq!(group_20.state.sidebar_open, false);
4094
4095 // Orphan: no active_workspace_id, falls back to first workspace (id 4).
4096 let group_none = &results[2];
4097 assert_eq!(group_none.active_workspace.workspace_id, WorkspaceId(4));
4098 assert_eq!(group_none.state.active_workspace_id, None);
4099 assert_eq!(group_none.state.sidebar_open, false);
4100 }
4101
4102 #[gpui::test]
4103 async fn test_flush_serialization_completes_before_quit(cx: &mut gpui::TestAppContext) {
4104 crate::tests::init_test(cx);
4105
4106 let fs = fs::FakeFs::new(cx.executor());
4107 let project = Project::test(fs.clone(), [], cx).await;
4108
4109 let (multi_workspace, cx) =
4110 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4111
4112 let workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone());
4113
4114 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4115
4116 // Assign a database_id so serialization will actually persist.
4117 let workspace_id = db.next_id().await.unwrap();
4118 workspace.update(cx, |ws, _cx| {
4119 ws.set_database_id(workspace_id);
4120 });
4121
4122 // Mutate some workspace state.
4123 db.set_centered_layout(workspace_id, true).await.unwrap();
4124
4125 // Call flush_serialization and await the returned task directly
4126 // (without run_until_parked — the point is that awaiting the task
4127 // alone is sufficient).
4128 let task = multi_workspace.update_in(cx, |mw, window, cx| {
4129 mw.workspace()
4130 .update(cx, |ws, cx| ws.flush_serialization(window, cx))
4131 });
4132 task.await;
4133
4134 // Read the workspace back from the DB and verify serialization happened.
4135 let serialized = db.workspace_for_id(workspace_id);
4136 assert!(
4137 serialized.is_some(),
4138 "flush_serialization should have persisted the workspace to DB"
4139 );
4140 }
4141
4142 #[gpui::test]
4143 async fn test_create_workspace_serialization(cx: &mut gpui::TestAppContext) {
4144 crate::tests::init_test(cx);
4145
4146 let fs = fs::FakeFs::new(cx.executor());
4147 let project = Project::test(fs.clone(), [], cx).await;
4148
4149 let (multi_workspace, cx) =
4150 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4151
4152 // Give the first workspace a database_id.
4153 multi_workspace.update_in(cx, |mw, _, cx| {
4154 mw.set_random_database_id(cx);
4155 });
4156
4157 let window_id =
4158 multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
4159
4160 // Create a new workspace via the MultiWorkspace API (triggers next_id()).
4161 multi_workspace.update_in(cx, |mw, window, cx| {
4162 mw.create_test_workspace(window, cx).detach();
4163 });
4164
4165 // Let the async next_id() and re-serialization tasks complete.
4166 cx.run_until_parked();
4167
4168 // The new workspace should now have a database_id.
4169 let new_workspace_db_id =
4170 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
4171 assert!(
4172 new_workspace_db_id.is_some(),
4173 "New workspace should have a database_id after run_until_parked"
4174 );
4175
4176 // The multi-workspace state should record it as the active workspace.
4177 let state = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
4178 assert_eq!(
4179 state.active_workspace_id, new_workspace_db_id,
4180 "Serialized active_workspace_id should match the new workspace's database_id"
4181 );
4182
4183 // The individual workspace row should exist with real data
4184 // (not just the bare DEFAULT VALUES row from next_id).
4185 let workspace_id = new_workspace_db_id.unwrap();
4186 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4187 let serialized = db.workspace_for_id(workspace_id);
4188 assert!(
4189 serialized.is_some(),
4190 "Newly created workspace should be fully serialized in the DB after database_id assignment"
4191 );
4192 }
4193
4194 #[gpui::test]
4195 async fn test_remove_workspace_clears_session_binding(cx: &mut gpui::TestAppContext) {
4196 crate::tests::init_test(cx);
4197
4198 let fs = fs::FakeFs::new(cx.executor());
4199 let dir = unique_test_dir(&fs, "remove").await;
4200 let project1 = Project::test(fs.clone(), [], cx).await;
4201 let project2 = Project::test(fs.clone(), [], cx).await;
4202
4203 let (multi_workspace, cx) =
4204 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4205
4206 multi_workspace.update(cx, |mw, cx| {
4207 mw.open_sidebar(cx);
4208 });
4209
4210 multi_workspace.update_in(cx, |mw, _, cx| {
4211 mw.set_random_database_id(cx);
4212 });
4213
4214 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4215
4216 // Get a real DB id for workspace2 so the row actually exists.
4217 let workspace2_db_id = db.next_id().await.unwrap();
4218
4219 multi_workspace.update_in(cx, |mw, window, cx| {
4220 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4221 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4222 ws.set_database_id(workspace2_db_id)
4223 });
4224 mw.add(workspace.clone(), window, cx);
4225 });
4226
4227 // Save a full workspace row to the DB directly.
4228 let session_id = format!("remove-test-session-{}", Uuid::new_v4());
4229 db.save_workspace(SerializedWorkspace {
4230 id: workspace2_db_id,
4231 paths: PathList::new(&[&dir]),
4232 location: SerializedWorkspaceLocation::Local,
4233 center_group: Default::default(),
4234 window_bounds: Default::default(),
4235 display: Default::default(),
4236 docks: Default::default(),
4237 centered_layout: false,
4238 session_id: Some(session_id.clone()),
4239 breakpoints: Default::default(),
4240 window_id: Some(99),
4241 user_toolchains: Default::default(),
4242 })
4243 .await;
4244
4245 assert!(
4246 db.workspace_for_id(workspace2_db_id).is_some(),
4247 "Workspace2 should exist in DB before removal"
4248 );
4249
4250 // Remove workspace at index 1 (the second workspace).
4251 multi_workspace.update_in(cx, |mw, window, cx| {
4252 let ws = mw.workspaces().nth(1).unwrap().clone();
4253 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4254 .detach_and_log_err(cx);
4255 });
4256
4257 cx.run_until_parked();
4258
4259 // The row should still exist so it continues to appear in recent
4260 // projects, but the session binding should be cleared so it is not
4261 // restored as part of any future session.
4262 assert!(
4263 db.workspace_for_id(workspace2_db_id).is_some(),
4264 "Removed workspace's DB row should be preserved for recent projects"
4265 );
4266
4267 let session_workspaces = db
4268 .last_session_workspace_locations("remove-test-session", None, fs.as_ref())
4269 .await
4270 .unwrap();
4271 let restored_ids: Vec<WorkspaceId> = session_workspaces
4272 .iter()
4273 .map(|sw| sw.workspace_id)
4274 .collect();
4275 assert!(
4276 !restored_ids.contains(&workspace2_db_id),
4277 "Removed workspace should not appear in session restoration"
4278 );
4279 }
4280
4281 #[gpui::test]
4282 async fn test_remove_workspace_not_restored_as_zombie(cx: &mut gpui::TestAppContext) {
4283 crate::tests::init_test(cx);
4284
4285 let fs = fs::FakeFs::new(cx.executor());
4286 let dir1 = tempfile::TempDir::with_prefix("zombie_test1").unwrap();
4287 let dir2 = tempfile::TempDir::with_prefix("zombie_test2").unwrap();
4288 fs.insert_tree(dir1.path(), json!({})).await;
4289 fs.insert_tree(dir2.path(), json!({})).await;
4290
4291 let project1 = Project::test(fs.clone(), [], cx).await;
4292 let project2 = Project::test(fs.clone(), [], cx).await;
4293
4294 let db = cx.update(|cx| WorkspaceDb::global(cx));
4295
4296 // Get real DB ids so the rows actually exist.
4297 let ws1_id = db.next_id().await.unwrap();
4298 let ws2_id = db.next_id().await.unwrap();
4299
4300 let (multi_workspace, cx) =
4301 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4302
4303 multi_workspace.update(cx, |mw, cx| {
4304 mw.open_sidebar(cx);
4305 });
4306
4307 multi_workspace.update_in(cx, |mw, _, cx| {
4308 mw.workspace().update(cx, |ws, _cx| {
4309 ws.set_database_id(ws1_id);
4310 });
4311 });
4312
4313 multi_workspace.update_in(cx, |mw, window, cx| {
4314 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4315 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4316 ws.set_database_id(ws2_id)
4317 });
4318 mw.add(workspace.clone(), window, cx);
4319 });
4320
4321 let session_id = "test-zombie-session";
4322 let window_id_val: u64 = 42;
4323
4324 db.save_workspace(SerializedWorkspace {
4325 id: ws1_id,
4326 paths: PathList::new(&[dir1.path()]),
4327 location: SerializedWorkspaceLocation::Local,
4328 center_group: Default::default(),
4329 window_bounds: Default::default(),
4330 display: Default::default(),
4331 docks: Default::default(),
4332 centered_layout: false,
4333 session_id: Some(session_id.to_owned()),
4334 breakpoints: Default::default(),
4335 window_id: Some(window_id_val),
4336 user_toolchains: Default::default(),
4337 })
4338 .await;
4339
4340 db.save_workspace(SerializedWorkspace {
4341 id: ws2_id,
4342 paths: PathList::new(&[dir2.path()]),
4343 location: SerializedWorkspaceLocation::Local,
4344 center_group: Default::default(),
4345 window_bounds: Default::default(),
4346 display: Default::default(),
4347 docks: Default::default(),
4348 centered_layout: false,
4349 session_id: Some(session_id.to_owned()),
4350 breakpoints: Default::default(),
4351 window_id: Some(window_id_val),
4352 user_toolchains: Default::default(),
4353 })
4354 .await;
4355
4356 // Remove workspace2 (index 1).
4357 multi_workspace.update_in(cx, |mw, window, cx| {
4358 let ws = mw.workspaces().nth(1).unwrap().clone();
4359 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4360 .detach_and_log_err(cx);
4361 });
4362
4363 cx.run_until_parked();
4364
4365 // The removed workspace should NOT appear in session restoration.
4366 let locations = db
4367 .last_session_workspace_locations(session_id, None, fs.as_ref())
4368 .await
4369 .unwrap();
4370
4371 let restored_ids: Vec<WorkspaceId> = locations.iter().map(|sw| sw.workspace_id).collect();
4372 assert!(
4373 !restored_ids.contains(&ws2_id),
4374 "Removed workspace should not appear in session restoration list. Found: {:?}",
4375 restored_ids
4376 );
4377 assert!(
4378 restored_ids.contains(&ws1_id),
4379 "Remaining workspace should still appear in session restoration list"
4380 );
4381 }
4382
4383 #[gpui::test]
4384 async fn test_pending_removal_tasks_drained_on_flush(cx: &mut gpui::TestAppContext) {
4385 crate::tests::init_test(cx);
4386
4387 let fs = fs::FakeFs::new(cx.executor());
4388 let dir = unique_test_dir(&fs, "pending-removal").await;
4389 let project1 = Project::test(fs.clone(), [], cx).await;
4390 let project2 = Project::test(fs.clone(), [], cx).await;
4391
4392 let db = cx.update(|cx| WorkspaceDb::global(cx));
4393
4394 // Get a real DB id for workspace2 so the row actually exists.
4395 let workspace2_db_id = db.next_id().await.unwrap();
4396
4397 let (multi_workspace, cx) =
4398 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4399
4400 multi_workspace.update(cx, |mw, cx| {
4401 mw.open_sidebar(cx);
4402 });
4403
4404 multi_workspace.update_in(cx, |mw, _, cx| {
4405 mw.set_random_database_id(cx);
4406 });
4407
4408 multi_workspace.update_in(cx, |mw, window, cx| {
4409 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4410 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4411 ws.set_database_id(workspace2_db_id)
4412 });
4413 mw.add(workspace.clone(), window, cx);
4414 });
4415
4416 // Save a full workspace row to the DB directly and let it settle.
4417 let session_id = format!("pending-removal-session-{}", Uuid::new_v4());
4418 db.save_workspace(SerializedWorkspace {
4419 id: workspace2_db_id,
4420 paths: PathList::new(&[&dir]),
4421 location: SerializedWorkspaceLocation::Local,
4422 center_group: Default::default(),
4423 window_bounds: Default::default(),
4424 display: Default::default(),
4425 docks: Default::default(),
4426 centered_layout: false,
4427 session_id: Some(session_id.clone()),
4428 breakpoints: Default::default(),
4429 window_id: Some(88),
4430 user_toolchains: Default::default(),
4431 })
4432 .await;
4433 cx.run_until_parked();
4434
4435 // Remove workspace2 — this pushes a task to pending_removal_tasks.
4436 multi_workspace.update_in(cx, |mw, window, cx| {
4437 let ws = mw.workspaces().nth(1).unwrap().clone();
4438 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4439 .detach_and_log_err(cx);
4440 });
4441
4442 // Simulate the quit handler pattern: collect flush tasks + pending
4443 // removal tasks and await them all.
4444 let all_tasks = multi_workspace.update_in(cx, |mw, window, cx| {
4445 let mut tasks: Vec<Task<()>> = mw
4446 .workspaces()
4447 .map(|workspace| {
4448 workspace.update(cx, |workspace, cx| {
4449 workspace.flush_serialization(window, cx)
4450 })
4451 })
4452 .collect();
4453 let mut removal_tasks = mw.take_pending_removal_tasks();
4454 // Note: removal_tasks may be empty if the background task already
4455 // completed (take_pending_removal_tasks filters out ready tasks).
4456 tasks.append(&mut removal_tasks);
4457 tasks.push(mw.flush_serialization());
4458 tasks
4459 });
4460 futures::future::join_all(all_tasks).await;
4461
4462 // The row should still exist (for recent projects), but the session
4463 // binding should have been cleared by the pending removal task.
4464 assert!(
4465 db.workspace_for_id(workspace2_db_id).is_some(),
4466 "Workspace row should be preserved for recent projects"
4467 );
4468
4469 let session_workspaces = db
4470 .last_session_workspace_locations("pending-removal-session", None, fs.as_ref())
4471 .await
4472 .unwrap();
4473 let restored_ids: Vec<WorkspaceId> = session_workspaces
4474 .iter()
4475 .map(|sw| sw.workspace_id)
4476 .collect();
4477 assert!(
4478 !restored_ids.contains(&workspace2_db_id),
4479 "Pending removal task should have cleared the session binding"
4480 );
4481 }
4482
4483 #[gpui::test]
4484 async fn test_create_workspace_bounds_observer_uses_fresh_id(cx: &mut gpui::TestAppContext) {
4485 crate::tests::init_test(cx);
4486
4487 let fs = fs::FakeFs::new(cx.executor());
4488 let project = Project::test(fs.clone(), [], cx).await;
4489
4490 let (multi_workspace, cx) =
4491 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4492
4493 multi_workspace.update_in(cx, |mw, _, cx| {
4494 mw.set_random_database_id(cx);
4495 });
4496
4497 let task =
4498 multi_workspace.update_in(cx, |mw, window, cx| mw.create_test_workspace(window, cx));
4499 task.await;
4500
4501 let new_workspace_db_id =
4502 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
4503 assert!(
4504 new_workspace_db_id.is_some(),
4505 "After run_until_parked, the workspace should have a database_id"
4506 );
4507
4508 let workspace_id = new_workspace_db_id.unwrap();
4509
4510 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4511
4512 assert!(
4513 db.workspace_for_id(workspace_id).is_some(),
4514 "The workspace row should exist in the DB"
4515 );
4516
4517 cx.simulate_resize(gpui::size(px(1024.0), px(768.0)));
4518
4519 // Advance the clock past the 100ms debounce timer so the bounds
4520 // observer task fires
4521 cx.executor().advance_clock(Duration::from_millis(200));
4522 cx.run_until_parked();
4523
4524 let serialized = db
4525 .workspace_for_id(workspace_id)
4526 .expect("workspace row should still exist");
4527 assert!(
4528 serialized.window_bounds.is_some(),
4529 "The bounds observer should write bounds for the workspace's real DB ID, \
4530 even when the workspace was created via create_workspace (where the ID \
4531 is assigned asynchronously after construction)."
4532 );
4533 }
4534
4535 #[gpui::test]
4536 async fn test_flush_serialization_writes_bounds(cx: &mut gpui::TestAppContext) {
4537 crate::tests::init_test(cx);
4538
4539 let fs = fs::FakeFs::new(cx.executor());
4540 let dir = tempfile::TempDir::with_prefix("flush_bounds_test").unwrap();
4541 fs.insert_tree(dir.path(), json!({})).await;
4542
4543 let project = Project::test(fs.clone(), [dir.path()], cx).await;
4544
4545 let (multi_workspace, cx) =
4546 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4547
4548 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4549 let workspace_id = db.next_id().await.unwrap();
4550 multi_workspace.update_in(cx, |mw, _, cx| {
4551 mw.workspace().update(cx, |ws, _cx| {
4552 ws.set_database_id(workspace_id);
4553 });
4554 });
4555
4556 let task = multi_workspace.update_in(cx, |mw, window, cx| {
4557 mw.workspace()
4558 .update(cx, |ws, cx| ws.flush_serialization(window, cx))
4559 });
4560 task.await;
4561
4562 let after = db
4563 .workspace_for_id(workspace_id)
4564 .expect("workspace row should exist after flush_serialization");
4565 assert!(
4566 !after.paths.is_empty(),
4567 "flush_serialization should have written paths via save_workspace"
4568 );
4569 assert!(
4570 after.window_bounds.is_some(),
4571 "flush_serialization should ensure window bounds are persisted to the DB \
4572 before the process exits."
4573 );
4574 }
4575
4576 #[gpui::test]
4577 async fn test_resolve_worktree_workspaces(cx: &mut gpui::TestAppContext) {
4578 let fs = fs::FakeFs::new(cx.executor());
4579
4580 // Main repo with a linked worktree entry
4581 fs.insert_tree(
4582 "/repo",
4583 json!({
4584 ".git": {
4585 "worktrees": {
4586 "feature": {
4587 "commondir": "../../",
4588 "HEAD": "ref: refs/heads/feature"
4589 }
4590 }
4591 },
4592 "src": { "main.rs": "" }
4593 }),
4594 )
4595 .await;
4596
4597 // Linked worktree checkout pointing back to /repo
4598 fs.insert_tree(
4599 "/worktree",
4600 json!({
4601 ".git": "gitdir: /repo/.git/worktrees/feature",
4602 "src": { "main.rs": "" }
4603 }),
4604 )
4605 .await;
4606
4607 // A plain non-git project
4608 fs.insert_tree(
4609 "/plain-project",
4610 json!({
4611 "src": { "main.rs": "" }
4612 }),
4613 )
4614 .await;
4615
4616 // Another normal git repo (used in mixed-path entry)
4617 fs.insert_tree(
4618 "/other-repo",
4619 json!({
4620 ".git": {},
4621 "src": { "lib.rs": "" }
4622 }),
4623 )
4624 .await;
4625
4626 let t0 = Utc::now() - chrono::Duration::hours(4);
4627 let t1 = Utc::now() - chrono::Duration::hours(3);
4628 let t2 = Utc::now() - chrono::Duration::hours(2);
4629 let t3 = Utc::now() - chrono::Duration::hours(1);
4630
4631 let workspaces = vec![
4632 // 1: Main checkout of /repo (opened earlier)
4633 (
4634 WorkspaceId(1),
4635 SerializedWorkspaceLocation::Local,
4636 PathList::new(&["/repo"]),
4637 t0,
4638 ),
4639 // 2: Linked worktree of /repo (opened more recently)
4640 // Should dedup with #1; more recent timestamp wins.
4641 (
4642 WorkspaceId(2),
4643 SerializedWorkspaceLocation::Local,
4644 PathList::new(&["/worktree"]),
4645 t1,
4646 ),
4647 // 3: Mixed-path workspace: one root is a linked worktree,
4648 // the other is a normal repo. The worktree path should be
4649 // resolved; the normal path kept as-is.
4650 (
4651 WorkspaceId(3),
4652 SerializedWorkspaceLocation::Local,
4653 PathList::new(&["/other-repo", "/worktree"]),
4654 t2,
4655 ),
4656 // 4: Non-git project — passed through unchanged.
4657 (
4658 WorkspaceId(4),
4659 SerializedWorkspaceLocation::Local,
4660 PathList::new(&["/plain-project"]),
4661 t3,
4662 ),
4663 ];
4664
4665 let result = resolve_worktree_workspaces(workspaces, fs.as_ref()).await;
4666
4667 // Should have 3 entries: #1 and #2 deduped into one, plus #3 and #4.
4668 assert_eq!(result.len(), 3);
4669
4670 // First entry: /repo — deduplicated from #1 and #2.
4671 // Keeps the position of #1 (first seen), but with #2's later timestamp.
4672 assert_eq!(result[0].2.paths(), &[PathBuf::from("/repo")]);
4673 assert_eq!(result[0].3, t1);
4674
4675 // Second entry: mixed-path workspace with worktree resolved.
4676 // /worktree → /repo, so paths become [/other-repo, /repo] (sorted).
4677 assert_eq!(
4678 result[1].2.paths(),
4679 &[PathBuf::from("/other-repo"), PathBuf::from("/repo")]
4680 );
4681 assert_eq!(result[1].0, WorkspaceId(3));
4682
4683 // Third entry: non-git project, unchanged.
4684 assert_eq!(result[2].2.paths(), &[PathBuf::from("/plain-project")]);
4685 assert_eq!(result[2].0, WorkspaceId(4));
4686 }
4687
4688 #[gpui::test]
4689 async fn test_resolve_worktree_workspaces_bare_repo(cx: &mut gpui::TestAppContext) {
4690 let fs = fs::FakeFs::new(cx.executor());
4691
4692 // Bare repo at /foo/.bare (commondir doesn't end with .git)
4693 fs.insert_tree(
4694 "/foo/.bare",
4695 json!({
4696 "worktrees": {
4697 "my-feature": {
4698 "commondir": "../../",
4699 "HEAD": "ref: refs/heads/my-feature"
4700 }
4701 }
4702 }),
4703 )
4704 .await;
4705
4706 // Linked worktree whose commondir resolves to a bare repo (/foo/.bare)
4707 fs.insert_tree(
4708 "/foo/my-feature",
4709 json!({
4710 ".git": "gitdir: /foo/.bare/worktrees/my-feature",
4711 "src": { "main.rs": "" }
4712 }),
4713 )
4714 .await;
4715
4716 let t0 = Utc::now();
4717
4718 let workspaces = vec![(
4719 WorkspaceId(1),
4720 SerializedWorkspaceLocation::Local,
4721 PathList::new(&["/foo/my-feature"]),
4722 t0,
4723 )];
4724
4725 let result = resolve_worktree_workspaces(workspaces, fs.as_ref()).await;
4726
4727 // The worktree path must be preserved unchanged — /foo/.bare is a bare repo
4728 // and cannot serve as a working-tree root, so resolution must return None.
4729 assert_eq!(result.len(), 1);
4730 assert_eq!(result[0].2.paths(), &[PathBuf::from("/foo/my-feature")]);
4731 }
4732
4733 #[gpui::test]
4734 async fn test_restore_window_with_linked_worktree_and_multiple_project_groups(
4735 cx: &mut gpui::TestAppContext,
4736 ) {
4737 crate::tests::init_test(cx);
4738
4739 let fs = fs::FakeFs::new(cx.executor());
4740
4741 // Main git repo at /repo
4742 fs.insert_tree(
4743 "/repo",
4744 json!({
4745 ".git": {
4746 "HEAD": "ref: refs/heads/main",
4747 "worktrees": {
4748 "feature": {
4749 "commondir": "../../",
4750 "HEAD": "ref: refs/heads/feature"
4751 }
4752 }
4753 },
4754 "src": { "main.rs": "" }
4755 }),
4756 )
4757 .await;
4758
4759 // Linked worktree checkout pointing back to /repo
4760 fs.insert_tree(
4761 "/worktree-feature",
4762 json!({
4763 ".git": "gitdir: /repo/.git/worktrees/feature",
4764 "src": { "lib.rs": "" }
4765 }),
4766 )
4767 .await;
4768
4769 // --- Phase 1: Set up the original multi-workspace window ---
4770
4771 let project_1 = Project::test(fs.clone(), ["/repo".as_ref()], cx).await;
4772 let project_1_linked_worktree =
4773 Project::test(fs.clone(), ["/worktree-feature".as_ref()], cx).await;
4774
4775 // Wait for git discovery to finish.
4776 cx.run_until_parked();
4777
4778 // Create a second, unrelated project so we have two distinct project groups.
4779 fs.insert_tree(
4780 "/other-project",
4781 json!({
4782 ".git": { "HEAD": "ref: refs/heads/main" },
4783 "readme.md": ""
4784 }),
4785 )
4786 .await;
4787 let project_2 = Project::test(fs.clone(), ["/other-project".as_ref()], cx).await;
4788 cx.run_until_parked();
4789
4790 // Create the MultiWorkspace with project_2, then add the main repo
4791 // and its linked worktree. The linked worktree is added last and
4792 // becomes the active workspace.
4793 let (multi_workspace, cx) = cx
4794 .add_window_view(|window, cx| MultiWorkspace::test_new(project_2.clone(), window, cx));
4795
4796 multi_workspace.update(cx, |mw, cx| {
4797 mw.open_sidebar(cx);
4798 });
4799
4800 multi_workspace.update_in(cx, |mw, window, cx| {
4801 mw.test_add_workspace(project_1.clone(), window, cx);
4802 });
4803
4804 let workspace_worktree = multi_workspace.update_in(cx, |mw, window, cx| {
4805 mw.test_add_workspace(project_1_linked_worktree.clone(), window, cx)
4806 });
4807
4808 let tasks =
4809 multi_workspace.update_in(cx, |mw, window, cx| mw.flush_all_serialization(window, cx));
4810 cx.run_until_parked();
4811 for task in tasks {
4812 task.await;
4813 }
4814 cx.run_until_parked();
4815
4816 let active_db_id = workspace_worktree.read_with(cx, |ws, _| ws.database_id());
4817 assert!(
4818 active_db_id.is_some(),
4819 "Active workspace should have a database ID"
4820 );
4821
4822 // --- Phase 2: Read back and verify the serialized state ---
4823
4824 let session_id = multi_workspace
4825 .read_with(cx, |mw, cx| mw.workspace().read(cx).session_id())
4826 .unwrap();
4827 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4828 let session_workspaces = db
4829 .last_session_workspace_locations(&session_id, None, fs.as_ref())
4830 .await
4831 .expect("should load session workspaces");
4832 assert!(
4833 !session_workspaces.is_empty(),
4834 "Should have at least one session workspace"
4835 );
4836
4837 let multi_workspaces =
4838 cx.update(|_, cx| read_serialized_multi_workspaces(session_workspaces, cx));
4839 assert_eq!(
4840 multi_workspaces.len(),
4841 1,
4842 "All workspaces share one window, so there should be exactly one multi-workspace"
4843 );
4844
4845 let serialized = &multi_workspaces[0];
4846 assert_eq!(
4847 serialized.active_workspace.workspace_id,
4848 active_db_id.unwrap(),
4849 );
4850 assert_eq!(serialized.state.project_groups.len(), 2,);
4851
4852 // Verify the serialized project group keys round-trip back to the
4853 // originals.
4854 let restored_keys: Vec<ProjectGroupKey> = serialized
4855 .state
4856 .project_groups
4857 .iter()
4858 .cloned()
4859 .map(Into::into)
4860 .collect();
4861 let expected_keys = vec![
4862 ProjectGroupKey::new(None, PathList::new(&["/repo"])),
4863 ProjectGroupKey::new(None, PathList::new(&["/other-project"])),
4864 ];
4865 assert_eq!(
4866 restored_keys, expected_keys,
4867 "Deserialized project group keys should match the originals"
4868 );
4869
4870 // --- Phase 3: Restore the window and verify the result ---
4871
4872 let app_state =
4873 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).app_state().clone());
4874
4875 let serialized_mw = multi_workspaces.into_iter().next().unwrap();
4876 let restored_handle: gpui::WindowHandle<MultiWorkspace> = cx
4877 .update(|_, cx| {
4878 cx.spawn(async move |mut cx| {
4879 crate::restore_multiworkspace(serialized_mw, app_state, &mut cx).await
4880 })
4881 })
4882 .await
4883 .expect("restore_multiworkspace should succeed");
4884
4885 cx.run_until_parked();
4886
4887 // The restored window should have the same project group keys.
4888 let restored_keys: Vec<ProjectGroupKey> = restored_handle
4889 .read_with(cx, |mw: &MultiWorkspace, _cx| mw.project_group_keys())
4890 .unwrap();
4891 assert_eq!(
4892 restored_keys, expected_keys,
4893 "Restored window should have the same project group keys as the original"
4894 );
4895
4896 // The active workspace in the restored window should have the linked
4897 // worktree paths.
4898 let active_paths: Vec<PathBuf> = restored_handle
4899 .read_with(cx, |mw: &MultiWorkspace, cx| {
4900 mw.workspace()
4901 .read(cx)
4902 .root_paths(cx)
4903 .into_iter()
4904 .map(|p: Arc<Path>| p.to_path_buf())
4905 .collect()
4906 })
4907 .unwrap();
4908 assert_eq!(
4909 active_paths,
4910 vec![PathBuf::from("/worktree-feature")],
4911 "The restored active workspace should be the linked worktree project"
4912 );
4913 }
4914
4915 #[gpui::test]
4916 async fn test_remove_project_group_falls_back_to_neighbor(cx: &mut gpui::TestAppContext) {
4917 crate::tests::init_test(cx);
4918
4919 let fs = fs::FakeFs::new(cx.executor());
4920 let dir_a = unique_test_dir(&fs, "group-a").await;
4921 let dir_b = unique_test_dir(&fs, "group-b").await;
4922 let dir_c = unique_test_dir(&fs, "group-c").await;
4923
4924 let project_a = Project::test(fs.clone(), [dir_a.as_path()], cx).await;
4925 let project_b = Project::test(fs.clone(), [dir_b.as_path()], cx).await;
4926 let project_c = Project::test(fs.clone(), [dir_c.as_path()], cx).await;
4927
4928 // Create a multi-workspace with project A, then add B and C.
4929 // project_groups stores newest first: [C, B, A].
4930 // Sidebar displays in the same order: C (top), B (middle), A (bottom).
4931 let (multi_workspace, cx) = cx
4932 .add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx));
4933
4934 multi_workspace.update(cx, |mw, cx| mw.open_sidebar(cx));
4935
4936 let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| {
4937 mw.test_add_workspace(project_b.clone(), window, cx)
4938 });
4939 let _workspace_c = multi_workspace.update_in(cx, |mw, window, cx| {
4940 mw.test_add_workspace(project_c.clone(), window, cx)
4941 });
4942 cx.run_until_parked();
4943
4944 let key_a = project_a.read_with(cx, |p, cx| p.project_group_key(cx));
4945 let key_b = project_b.read_with(cx, |p, cx| p.project_group_key(cx));
4946 let key_c = project_c.read_with(cx, |p, cx| p.project_group_key(cx));
4947
4948 // Activate workspace B so removing its group exercises the fallback.
4949 multi_workspace.update_in(cx, |mw, window, cx| {
4950 mw.activate(workspace_b.clone(), window, cx);
4951 });
4952 cx.run_until_parked();
4953
4954 // --- Remove group B (the middle one). ---
4955 // In the sidebar [C, B, A], "below" B is A.
4956 multi_workspace.update_in(cx, |mw, window, cx| {
4957 mw.remove_project_group(&key_b, window, cx)
4958 .detach_and_log_err(cx);
4959 });
4960 cx.run_until_parked();
4961
4962 let active_paths =
4963 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
4964 assert_eq!(
4965 active_paths
4966 .iter()
4967 .map(|p| p.to_path_buf())
4968 .collect::<Vec<_>>(),
4969 vec![dir_a.clone()],
4970 "After removing the middle group, should fall back to the group below (A)"
4971 );
4972
4973 // After removing B, keys = [A, C], sidebar = [C, A].
4974 // Activate workspace A (the bottom) so removing it tests the
4975 // "fall back upward" path.
4976 let workspace_a =
4977 multi_workspace.read_with(cx, |mw, _cx| mw.workspaces().next().unwrap().clone());
4978 multi_workspace.update_in(cx, |mw, window, cx| {
4979 mw.activate(workspace_a.clone(), window, cx);
4980 });
4981 cx.run_until_parked();
4982
4983 // --- Remove group A (the bottom one in sidebar). ---
4984 // Nothing below A, so should fall back upward to C.
4985 multi_workspace.update_in(cx, |mw, window, cx| {
4986 mw.remove_project_group(&key_a, window, cx)
4987 .detach_and_log_err(cx);
4988 });
4989 cx.run_until_parked();
4990
4991 let active_paths =
4992 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
4993 assert_eq!(
4994 active_paths
4995 .iter()
4996 .map(|p| p.to_path_buf())
4997 .collect::<Vec<_>>(),
4998 vec![dir_c.clone()],
4999 "After removing the bottom group, should fall back to the group above (C)"
5000 );
5001
5002 // --- Remove group C (the only one remaining). ---
5003 // Should create an empty workspace.
5004 multi_workspace.update_in(cx, |mw, window, cx| {
5005 mw.remove_project_group(&key_c, window, cx)
5006 .detach_and_log_err(cx);
5007 });
5008 cx.run_until_parked();
5009
5010 let active_paths =
5011 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
5012 assert!(
5013 active_paths.is_empty(),
5014 "After removing the only remaining group, should have an empty workspace"
5015 );
5016 }
5017
5018 /// Regression test for a crash where `find_or_create_local_workspace`
5019 /// returned a workspace that was about to be removed, hitting an assert
5020 /// in `MultiWorkspace::remove`.
5021 ///
5022 /// The scenario: two workspaces share the same root paths (e.g. due to
5023 /// a provisional key mismatch). When the first is removed and the
5024 /// fallback searches for the same paths, `workspace_for_paths` must
5025 /// skip the doomed workspace so the assert in `remove` is satisfied.
5026 #[gpui::test]
5027 async fn test_remove_fallback_skips_excluded_workspaces(cx: &mut gpui::TestAppContext) {
5028 crate::tests::init_test(cx);
5029
5030 let fs = fs::FakeFs::new(cx.executor());
5031 let dir = unique_test_dir(&fs, "shared").await;
5032
5033 // Two projects that open the same directory — this creates two
5034 // workspaces whose root_paths are identical.
5035 let project_a = Project::test(fs.clone(), [dir.as_path()], cx).await;
5036 let project_b = Project::test(fs.clone(), [dir.as_path()], cx).await;
5037
5038 let (multi_workspace, cx) = cx
5039 .add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx));
5040
5041 multi_workspace.update(cx, |mw, cx| mw.open_sidebar(cx));
5042
5043 let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| {
5044 mw.test_add_workspace(project_b.clone(), window, cx)
5045 });
5046 cx.run_until_parked();
5047
5048 // workspace_a is first in the workspaces vec.
5049 let workspace_a =
5050 multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().cloned().unwrap());
5051 assert_ne!(workspace_a, workspace_b);
5052
5053 // Activate workspace_a so removing it triggers the fallback path.
5054 multi_workspace.update_in(cx, |mw, window, cx| {
5055 mw.activate(workspace_a.clone(), window, cx);
5056 });
5057 cx.run_until_parked();
5058
5059 // Remove workspace_a. The fallback searches for the same paths.
5060 // Without the `excluding` parameter, `workspace_for_paths` would
5061 // return workspace_a (first match) and the assert in `remove`
5062 // would fire. With the fix, workspace_a is skipped and
5063 // workspace_b is found instead.
5064 let path_list = PathList::new(std::slice::from_ref(&dir));
5065 let excluded = vec![workspace_a.clone()];
5066 multi_workspace.update_in(cx, |mw, window, cx| {
5067 mw.remove(
5068 vec![workspace_a.clone()],
5069 move |this, window, cx| {
5070 this.find_or_create_local_workspace(
5071 path_list,
5072 None,
5073 &excluded,
5074 None,
5075 OpenMode::Activate,
5076 window,
5077 cx,
5078 )
5079 },
5080 window,
5081 cx,
5082 )
5083 .detach_and_log_err(cx);
5084 });
5085 cx.run_until_parked();
5086
5087 // workspace_b should now be active — workspace_a was removed.
5088 multi_workspace.read_with(cx, |mw, _cx| {
5089 assert_eq!(
5090 mw.workspace(),
5091 &workspace_b,
5092 "fallback should have found workspace_b, not the excluded workspace_a"
5093 );
5094 });
5095 }
5096}