1pub mod model;
2
3use std::{
4 borrow::Cow,
5 collections::BTreeMap,
6 path::{Path, PathBuf},
7 str::FromStr,
8 sync::Arc,
9};
10
11use chrono::{DateTime, NaiveDateTime, Utc};
12use fs::Fs;
13
14use anyhow::{Context as _, Result, bail};
15use collections::{HashMap, HashSet, IndexSet};
16use db::{
17 kvp::KeyValueStore,
18 query,
19 sqlez::{connection::Connection, domain::Domain},
20 sqlez_macros::sql,
21};
22use gpui::{Axis, Bounds, Task, WindowBounds, WindowId, point, size};
23use project::{
24 debugger::breakpoint_store::{BreakpointState, SourceBreakpoint},
25 trusted_worktrees::{DbTrustedPaths, RemoteHostLocation},
26};
27
28use language::{LanguageName, Toolchain, ToolchainScope};
29use remote::{
30 DockerConnectionOptions, RemoteConnectionIdentity, RemoteConnectionOptions,
31 SshConnectionOptions, WslConnectionOptions, remote_connection_identity,
32};
33use serde::{Deserialize, Serialize};
34use sqlez::{
35 bindable::{Bind, Column, StaticColumnCount},
36 statement::Statement,
37 thread_safe_connection::ThreadSafeConnection,
38};
39
40use ui::{App, SharedString, px};
41use util::{ResultExt, maybe, rel_path::RelPath};
42use uuid::Uuid;
43
44use crate::{
45 WorkspaceId,
46 path_list::{PathList, SerializedPathList},
47 persistence::model::RemoteConnectionKind,
48};
49
50use model::{
51 GroupId, ItemId, PaneId, RemoteConnectionId, SerializedItem, SerializedPane,
52 SerializedPaneGroup, SerializedWorkspace,
53};
54
55use self::model::{DockStructure, SerializedWorkspaceLocation, SessionWorkspace};
56
57// https://www.sqlite.org/limits.html
58// > <..> the maximum value of a host parameter number is SQLITE_MAX_VARIABLE_NUMBER,
59// > which defaults to <..> 32766 for SQLite versions after 3.32.0.
60const MAX_QUERY_PLACEHOLDERS: usize = 32000;
61
62fn parse_timestamp(text: &str) -> DateTime<Utc> {
63 NaiveDateTime::parse_from_str(text, "%Y-%m-%d %H:%M:%S")
64 .map(|naive| naive.and_utc())
65 .unwrap_or_else(|_| Utc::now())
66}
67
68#[derive(Copy, Clone, Debug, PartialEq)]
69pub(crate) struct SerializedAxis(pub(crate) gpui::Axis);
70impl sqlez::bindable::StaticColumnCount for SerializedAxis {}
71impl sqlez::bindable::Bind for SerializedAxis {
72 fn bind(
73 &self,
74 statement: &sqlez::statement::Statement,
75 start_index: i32,
76 ) -> anyhow::Result<i32> {
77 match self.0 {
78 gpui::Axis::Horizontal => "Horizontal",
79 gpui::Axis::Vertical => "Vertical",
80 }
81 .bind(statement, start_index)
82 }
83}
84
85impl sqlez::bindable::Column for SerializedAxis {
86 fn column(
87 statement: &mut sqlez::statement::Statement,
88 start_index: i32,
89 ) -> anyhow::Result<(Self, i32)> {
90 String::column(statement, start_index).and_then(|(axis_text, next_index)| {
91 Ok((
92 match axis_text.as_str() {
93 "Horizontal" => Self(Axis::Horizontal),
94 "Vertical" => Self(Axis::Vertical),
95 _ => anyhow::bail!("Stored serialized item kind is incorrect"),
96 },
97 next_index,
98 ))
99 })
100 }
101}
102
103#[derive(Copy, Clone, Debug, PartialEq, Default)]
104pub(crate) struct SerializedWindowBounds(pub(crate) WindowBounds);
105
106impl StaticColumnCount for SerializedWindowBounds {
107 fn column_count() -> usize {
108 5
109 }
110}
111
112impl Bind for SerializedWindowBounds {
113 fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
114 match self.0 {
115 WindowBounds::Windowed(bounds) => {
116 let next_index = statement.bind(&"Windowed", start_index)?;
117 statement.bind(
118 &(
119 SerializedPixels(bounds.origin.x),
120 SerializedPixels(bounds.origin.y),
121 SerializedPixels(bounds.size.width),
122 SerializedPixels(bounds.size.height),
123 ),
124 next_index,
125 )
126 }
127 WindowBounds::Maximized(bounds) => {
128 let next_index = statement.bind(&"Maximized", start_index)?;
129 statement.bind(
130 &(
131 SerializedPixels(bounds.origin.x),
132 SerializedPixels(bounds.origin.y),
133 SerializedPixels(bounds.size.width),
134 SerializedPixels(bounds.size.height),
135 ),
136 next_index,
137 )
138 }
139 WindowBounds::Fullscreen(bounds) => {
140 let next_index = statement.bind(&"FullScreen", start_index)?;
141 statement.bind(
142 &(
143 SerializedPixels(bounds.origin.x),
144 SerializedPixels(bounds.origin.y),
145 SerializedPixels(bounds.size.width),
146 SerializedPixels(bounds.size.height),
147 ),
148 next_index,
149 )
150 }
151 }
152 }
153}
154
155impl Column for SerializedWindowBounds {
156 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
157 let (window_state, next_index) = String::column(statement, start_index)?;
158 let ((x, y, width, height), _): ((i32, i32, i32, i32), _) =
159 Column::column(statement, next_index)?;
160 let bounds = Bounds {
161 origin: point(px(x as f32), px(y as f32)),
162 size: size(px(width as f32), px(height as f32)),
163 };
164
165 let status = match window_state.as_str() {
166 "Windowed" | "Fixed" => SerializedWindowBounds(WindowBounds::Windowed(bounds)),
167 "Maximized" => SerializedWindowBounds(WindowBounds::Maximized(bounds)),
168 "FullScreen" => SerializedWindowBounds(WindowBounds::Fullscreen(bounds)),
169 _ => bail!("Window State did not have a valid string"),
170 };
171
172 Ok((status, next_index + 4))
173 }
174}
175
176const DEFAULT_WINDOW_BOUNDS_KEY: &str = "default_window_bounds";
177
178pub fn read_default_window_bounds(kvp: &KeyValueStore) -> Option<(Uuid, WindowBounds)> {
179 let json_str = kvp
180 .read_kvp(DEFAULT_WINDOW_BOUNDS_KEY)
181 .log_err()
182 .flatten()?;
183
184 let (display_uuid, persisted) =
185 serde_json::from_str::<(Uuid, WindowBoundsJson)>(&json_str).ok()?;
186 Some((display_uuid, persisted.into()))
187}
188
189pub async fn write_default_window_bounds(
190 kvp: &KeyValueStore,
191 bounds: WindowBounds,
192 display_uuid: Uuid,
193) -> anyhow::Result<()> {
194 let persisted = WindowBoundsJson::from(bounds);
195 let json_str = serde_json::to_string(&(display_uuid, persisted))?;
196 kvp.write_kvp(DEFAULT_WINDOW_BOUNDS_KEY.to_string(), json_str)
197 .await?;
198 Ok(())
199}
200
201#[derive(Serialize, Deserialize)]
202pub enum WindowBoundsJson {
203 Windowed {
204 x: i32,
205 y: i32,
206 width: i32,
207 height: i32,
208 },
209 Maximized {
210 x: i32,
211 y: i32,
212 width: i32,
213 height: i32,
214 },
215 Fullscreen {
216 x: i32,
217 y: i32,
218 width: i32,
219 height: i32,
220 },
221}
222
223impl From<WindowBounds> for WindowBoundsJson {
224 fn from(b: WindowBounds) -> Self {
225 match b {
226 WindowBounds::Windowed(bounds) => {
227 let origin = bounds.origin;
228 let size = bounds.size;
229 WindowBoundsJson::Windowed {
230 x: f32::from(origin.x).round() as i32,
231 y: f32::from(origin.y).round() as i32,
232 width: f32::from(size.width).round() as i32,
233 height: f32::from(size.height).round() as i32,
234 }
235 }
236 WindowBounds::Maximized(bounds) => {
237 let origin = bounds.origin;
238 let size = bounds.size;
239 WindowBoundsJson::Maximized {
240 x: f32::from(origin.x).round() as i32,
241 y: f32::from(origin.y).round() as i32,
242 width: f32::from(size.width).round() as i32,
243 height: f32::from(size.height).round() as i32,
244 }
245 }
246 WindowBounds::Fullscreen(bounds) => {
247 let origin = bounds.origin;
248 let size = bounds.size;
249 WindowBoundsJson::Fullscreen {
250 x: f32::from(origin.x).round() as i32,
251 y: f32::from(origin.y).round() as i32,
252 width: f32::from(size.width).round() as i32,
253 height: f32::from(size.height).round() as i32,
254 }
255 }
256 }
257 }
258}
259
260impl From<WindowBoundsJson> for WindowBounds {
261 fn from(n: WindowBoundsJson) -> Self {
262 match n {
263 WindowBoundsJson::Windowed {
264 x,
265 y,
266 width,
267 height,
268 } => WindowBounds::Windowed(Bounds {
269 origin: point(px(x as f32), px(y as f32)),
270 size: size(px(width as f32), px(height as f32)),
271 }),
272 WindowBoundsJson::Maximized {
273 x,
274 y,
275 width,
276 height,
277 } => WindowBounds::Maximized(Bounds {
278 origin: point(px(x as f32), px(y as f32)),
279 size: size(px(width as f32), px(height as f32)),
280 }),
281 WindowBoundsJson::Fullscreen {
282 x,
283 y,
284 width,
285 height,
286 } => WindowBounds::Fullscreen(Bounds {
287 origin: point(px(x as f32), px(y as f32)),
288 size: size(px(width as f32), px(height as f32)),
289 }),
290 }
291 }
292}
293
294fn read_multi_workspace_state(window_id: WindowId, cx: &App) -> model::MultiWorkspaceState {
295 let kvp = KeyValueStore::global(cx);
296 kvp.scoped("multi_workspace_state")
297 .read(&window_id.as_u64().to_string())
298 .log_err()
299 .flatten()
300 .and_then(|json| serde_json::from_str(&json).ok())
301 .unwrap_or_default()
302}
303
304pub async fn write_multi_workspace_state(
305 kvp: &KeyValueStore,
306 window_id: WindowId,
307 state: model::MultiWorkspaceState,
308) {
309 if let Ok(json_str) = serde_json::to_string(&state) {
310 kvp.scoped("multi_workspace_state")
311 .write(window_id.as_u64().to_string(), json_str)
312 .await
313 .log_err();
314 }
315}
316
317pub fn read_serialized_multi_workspaces(
318 session_workspaces: Vec<model::SessionWorkspace>,
319 cx: &App,
320) -> Vec<model::SerializedMultiWorkspace> {
321 let mut window_groups: Vec<Vec<model::SessionWorkspace>> = Vec::new();
322 let mut window_id_to_group: HashMap<WindowId, usize> = HashMap::default();
323
324 for session_workspace in session_workspaces {
325 match session_workspace.window_id {
326 Some(window_id) => {
327 let group_index = *window_id_to_group.entry(window_id).or_insert_with(|| {
328 window_groups.push(Vec::new());
329 window_groups.len() - 1
330 });
331 window_groups[group_index].push(session_workspace);
332 }
333 None => {
334 window_groups.push(vec![session_workspace]);
335 }
336 }
337 }
338
339 window_groups
340 .into_iter()
341 .filter_map(|group| {
342 let window_id = group.first().and_then(|sw| sw.window_id);
343 let state = window_id
344 .map(|wid| read_multi_workspace_state(wid, cx))
345 .unwrap_or_default();
346 let active_workspace = state
347 .active_workspace_id
348 .and_then(|id| group.iter().position(|ws| ws.workspace_id == id))
349 .or(Some(0))
350 .and_then(|index| group.into_iter().nth(index))?;
351 Some(model::SerializedMultiWorkspace {
352 active_workspace,
353 state,
354 })
355 })
356 .collect()
357}
358
359const DEFAULT_DOCK_STATE_KEY: &str = "default_dock_state";
360
361pub fn read_default_dock_state(kvp: &KeyValueStore) -> Option<DockStructure> {
362 let json_str = kvp.read_kvp(DEFAULT_DOCK_STATE_KEY).log_err().flatten()?;
363
364 serde_json::from_str::<DockStructure>(&json_str).ok()
365}
366
367pub async fn write_default_dock_state(
368 kvp: &KeyValueStore,
369 docks: DockStructure,
370) -> anyhow::Result<()> {
371 let json_str = serde_json::to_string(&docks)?;
372 kvp.write_kvp(DEFAULT_DOCK_STATE_KEY.to_string(), json_str)
373 .await?;
374 Ok(())
375}
376
377#[derive(Debug)]
378pub struct Breakpoint {
379 pub position: u32,
380 pub message: Option<Arc<str>>,
381 pub condition: Option<Arc<str>>,
382 pub hit_condition: Option<Arc<str>>,
383 pub state: BreakpointState,
384}
385
386/// Wrapper for DB type of a breakpoint
387struct BreakpointStateWrapper<'a>(Cow<'a, BreakpointState>);
388
389impl From<BreakpointState> for BreakpointStateWrapper<'static> {
390 fn from(kind: BreakpointState) -> Self {
391 BreakpointStateWrapper(Cow::Owned(kind))
392 }
393}
394
395impl StaticColumnCount for BreakpointStateWrapper<'_> {
396 fn column_count() -> usize {
397 1
398 }
399}
400
401impl Bind for BreakpointStateWrapper<'_> {
402 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
403 statement.bind(&self.0.to_int(), start_index)
404 }
405}
406
407impl Column for BreakpointStateWrapper<'_> {
408 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
409 let state = statement.column_int(start_index)?;
410
411 match state {
412 0 => Ok((BreakpointState::Enabled.into(), start_index + 1)),
413 1 => Ok((BreakpointState::Disabled.into(), start_index + 1)),
414 _ => anyhow::bail!("Invalid BreakpointState discriminant {state}"),
415 }
416 }
417}
418
419impl sqlez::bindable::StaticColumnCount for Breakpoint {
420 fn column_count() -> usize {
421 // Position, log message, condition message, and hit condition message
422 4 + BreakpointStateWrapper::column_count()
423 }
424}
425
426impl sqlez::bindable::Bind for Breakpoint {
427 fn bind(
428 &self,
429 statement: &sqlez::statement::Statement,
430 start_index: i32,
431 ) -> anyhow::Result<i32> {
432 let next_index = statement.bind(&self.position, start_index)?;
433 let next_index = statement.bind(&self.message, next_index)?;
434 let next_index = statement.bind(&self.condition, next_index)?;
435 let next_index = statement.bind(&self.hit_condition, next_index)?;
436 statement.bind(
437 &BreakpointStateWrapper(Cow::Borrowed(&self.state)),
438 next_index,
439 )
440 }
441}
442
443impl Column for Breakpoint {
444 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
445 let position = statement
446 .column_int(start_index)
447 .with_context(|| format!("Failed to read BreakPoint at index {start_index}"))?
448 as u32;
449 let (message, next_index) = Option::<String>::column(statement, start_index + 1)?;
450 let (condition, next_index) = Option::<String>::column(statement, next_index)?;
451 let (hit_condition, next_index) = Option::<String>::column(statement, next_index)?;
452 let (state, next_index) = BreakpointStateWrapper::column(statement, next_index)?;
453
454 Ok((
455 Breakpoint {
456 position,
457 message: message.map(Arc::from),
458 condition: condition.map(Arc::from),
459 hit_condition: hit_condition.map(Arc::from),
460 state: state.0.into_owned(),
461 },
462 next_index,
463 ))
464 }
465}
466
467#[derive(Clone, Debug, PartialEq)]
468struct SerializedPixels(gpui::Pixels);
469impl sqlez::bindable::StaticColumnCount for SerializedPixels {}
470
471impl sqlez::bindable::Bind for SerializedPixels {
472 fn bind(
473 &self,
474 statement: &sqlez::statement::Statement,
475 start_index: i32,
476 ) -> anyhow::Result<i32> {
477 let this: i32 = u32::from(self.0) as _;
478 this.bind(statement, start_index)
479 }
480}
481
482pub struct WorkspaceDb(ThreadSafeConnection);
483
484impl Domain for WorkspaceDb {
485 const NAME: &str = stringify!(WorkspaceDb);
486
487 const MIGRATIONS: &[&str] = &[
488 sql!(
489 CREATE TABLE workspaces(
490 workspace_id INTEGER PRIMARY KEY,
491 workspace_location BLOB UNIQUE,
492 dock_visible INTEGER, // Deprecated. Preserving so users can downgrade Zed.
493 dock_anchor TEXT, // Deprecated. Preserving so users can downgrade Zed.
494 dock_pane INTEGER, // Deprecated. Preserving so users can downgrade Zed.
495 left_sidebar_open INTEGER, // Boolean
496 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
497 FOREIGN KEY(dock_pane) REFERENCES panes(pane_id)
498 ) STRICT;
499
500 CREATE TABLE pane_groups(
501 group_id INTEGER PRIMARY KEY,
502 workspace_id INTEGER NOT NULL,
503 parent_group_id INTEGER, // NULL indicates that this is a root node
504 position INTEGER, // NULL indicates that this is a root node
505 axis TEXT NOT NULL, // Enum: 'Vertical' / 'Horizontal'
506 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
507 ON DELETE CASCADE
508 ON UPDATE CASCADE,
509 FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
510 ) STRICT;
511
512 CREATE TABLE panes(
513 pane_id INTEGER PRIMARY KEY,
514 workspace_id INTEGER NOT NULL,
515 active INTEGER NOT NULL, // Boolean
516 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
517 ON DELETE CASCADE
518 ON UPDATE CASCADE
519 ) STRICT;
520
521 CREATE TABLE center_panes(
522 pane_id INTEGER PRIMARY KEY,
523 parent_group_id INTEGER, // NULL means that this is a root pane
524 position INTEGER, // NULL means that this is a root pane
525 FOREIGN KEY(pane_id) REFERENCES panes(pane_id)
526 ON DELETE CASCADE,
527 FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
528 ) STRICT;
529
530 CREATE TABLE items(
531 item_id INTEGER NOT NULL, // This is the item's view id, so this is not unique
532 workspace_id INTEGER NOT NULL,
533 pane_id INTEGER NOT NULL,
534 kind TEXT NOT NULL,
535 position INTEGER NOT NULL,
536 active INTEGER NOT NULL,
537 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
538 ON DELETE CASCADE
539 ON UPDATE CASCADE,
540 FOREIGN KEY(pane_id) REFERENCES panes(pane_id)
541 ON DELETE CASCADE,
542 PRIMARY KEY(item_id, workspace_id)
543 ) STRICT;
544 ),
545 sql!(
546 ALTER TABLE workspaces ADD COLUMN window_state TEXT;
547 ALTER TABLE workspaces ADD COLUMN window_x REAL;
548 ALTER TABLE workspaces ADD COLUMN window_y REAL;
549 ALTER TABLE workspaces ADD COLUMN window_width REAL;
550 ALTER TABLE workspaces ADD COLUMN window_height REAL;
551 ALTER TABLE workspaces ADD COLUMN display BLOB;
552 ),
553 // Drop foreign key constraint from workspaces.dock_pane to panes table.
554 sql!(
555 CREATE TABLE workspaces_2(
556 workspace_id INTEGER PRIMARY KEY,
557 workspace_location BLOB UNIQUE,
558 dock_visible INTEGER, // Deprecated. Preserving so users can downgrade Zed.
559 dock_anchor TEXT, // Deprecated. Preserving so users can downgrade Zed.
560 dock_pane INTEGER, // Deprecated. Preserving so users can downgrade Zed.
561 left_sidebar_open INTEGER, // Boolean
562 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
563 window_state TEXT,
564 window_x REAL,
565 window_y REAL,
566 window_width REAL,
567 window_height REAL,
568 display BLOB
569 ) STRICT;
570 INSERT INTO workspaces_2 SELECT * FROM workspaces;
571 DROP TABLE workspaces;
572 ALTER TABLE workspaces_2 RENAME TO workspaces;
573 ),
574 // Add panels related information
575 sql!(
576 ALTER TABLE workspaces ADD COLUMN left_dock_visible INTEGER; //bool
577 ALTER TABLE workspaces ADD COLUMN left_dock_active_panel TEXT;
578 ALTER TABLE workspaces ADD COLUMN right_dock_visible INTEGER; //bool
579 ALTER TABLE workspaces ADD COLUMN right_dock_active_panel TEXT;
580 ALTER TABLE workspaces ADD COLUMN bottom_dock_visible INTEGER; //bool
581 ALTER TABLE workspaces ADD COLUMN bottom_dock_active_panel TEXT;
582 ),
583 // Add panel zoom persistence
584 sql!(
585 ALTER TABLE workspaces ADD COLUMN left_dock_zoom INTEGER; //bool
586 ALTER TABLE workspaces ADD COLUMN right_dock_zoom INTEGER; //bool
587 ALTER TABLE workspaces ADD COLUMN bottom_dock_zoom INTEGER; //bool
588 ),
589 // Add pane group flex data
590 sql!(
591 ALTER TABLE pane_groups ADD COLUMN flexes TEXT;
592 ),
593 // Add fullscreen field to workspace
594 // Deprecated, `WindowBounds` holds the fullscreen state now.
595 // Preserving so users can downgrade Zed.
596 sql!(
597 ALTER TABLE workspaces ADD COLUMN fullscreen INTEGER; //bool
598 ),
599 // Add preview field to items
600 sql!(
601 ALTER TABLE items ADD COLUMN preview INTEGER; //bool
602 ),
603 // Add centered_layout field to workspace
604 sql!(
605 ALTER TABLE workspaces ADD COLUMN centered_layout INTEGER; //bool
606 ),
607 sql!(
608 CREATE TABLE remote_projects (
609 remote_project_id INTEGER NOT NULL UNIQUE,
610 path TEXT,
611 dev_server_name TEXT
612 );
613 ALTER TABLE workspaces ADD COLUMN remote_project_id INTEGER;
614 ALTER TABLE workspaces RENAME COLUMN workspace_location TO local_paths;
615 ),
616 sql!(
617 DROP TABLE remote_projects;
618 CREATE TABLE dev_server_projects (
619 id INTEGER NOT NULL UNIQUE,
620 path TEXT,
621 dev_server_name TEXT
622 );
623 ALTER TABLE workspaces DROP COLUMN remote_project_id;
624 ALTER TABLE workspaces ADD COLUMN dev_server_project_id INTEGER;
625 ),
626 sql!(
627 ALTER TABLE workspaces ADD COLUMN local_paths_order BLOB;
628 ),
629 sql!(
630 ALTER TABLE workspaces ADD COLUMN session_id TEXT DEFAULT NULL;
631 ),
632 sql!(
633 ALTER TABLE workspaces ADD COLUMN window_id INTEGER DEFAULT NULL;
634 ),
635 sql!(
636 ALTER TABLE panes ADD COLUMN pinned_count INTEGER DEFAULT 0;
637 ),
638 sql!(
639 CREATE TABLE ssh_projects (
640 id INTEGER PRIMARY KEY,
641 host TEXT NOT NULL,
642 port INTEGER,
643 path TEXT NOT NULL,
644 user TEXT
645 );
646 ALTER TABLE workspaces ADD COLUMN ssh_project_id INTEGER REFERENCES ssh_projects(id) ON DELETE CASCADE;
647 ),
648 sql!(
649 ALTER TABLE ssh_projects RENAME COLUMN path TO paths;
650 ),
651 sql!(
652 CREATE TABLE toolchains (
653 workspace_id INTEGER,
654 worktree_id INTEGER,
655 language_name TEXT NOT NULL,
656 name TEXT NOT NULL,
657 path TEXT NOT NULL,
658 PRIMARY KEY (workspace_id, worktree_id, language_name)
659 );
660 ),
661 sql!(
662 ALTER TABLE toolchains ADD COLUMN raw_json TEXT DEFAULT "{}";
663 ),
664 sql!(
665 CREATE TABLE breakpoints (
666 workspace_id INTEGER NOT NULL,
667 path TEXT NOT NULL,
668 breakpoint_location INTEGER NOT NULL,
669 kind INTEGER NOT NULL,
670 log_message TEXT,
671 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
672 ON DELETE CASCADE
673 ON UPDATE CASCADE
674 );
675 ),
676 sql!(
677 ALTER TABLE workspaces ADD COLUMN local_paths_array TEXT;
678 CREATE UNIQUE INDEX local_paths_array_uq ON workspaces(local_paths_array);
679 ALTER TABLE workspaces ADD COLUMN local_paths_order_array TEXT;
680 ),
681 sql!(
682 ALTER TABLE breakpoints ADD COLUMN state INTEGER DEFAULT(0) NOT NULL
683 ),
684 sql!(
685 ALTER TABLE breakpoints DROP COLUMN kind
686 ),
687 sql!(ALTER TABLE toolchains ADD COLUMN relative_worktree_path TEXT DEFAULT "" NOT NULL),
688 sql!(
689 ALTER TABLE breakpoints ADD COLUMN condition TEXT;
690 ALTER TABLE breakpoints ADD COLUMN hit_condition TEXT;
691 ),
692 sql!(CREATE TABLE toolchains2 (
693 workspace_id INTEGER,
694 worktree_id INTEGER,
695 language_name TEXT NOT NULL,
696 name TEXT NOT NULL,
697 path TEXT NOT NULL,
698 raw_json TEXT NOT NULL,
699 relative_worktree_path TEXT NOT NULL,
700 PRIMARY KEY (workspace_id, worktree_id, language_name, relative_worktree_path)) STRICT;
701 INSERT INTO toolchains2
702 SELECT * FROM toolchains;
703 DROP TABLE toolchains;
704 ALTER TABLE toolchains2 RENAME TO toolchains;
705 ),
706 sql!(
707 CREATE TABLE ssh_connections (
708 id INTEGER PRIMARY KEY,
709 host TEXT NOT NULL,
710 port INTEGER,
711 user TEXT
712 );
713
714 INSERT INTO ssh_connections (host, port, user)
715 SELECT DISTINCT host, port, user
716 FROM ssh_projects;
717
718 CREATE TABLE workspaces_2(
719 workspace_id INTEGER PRIMARY KEY,
720 paths TEXT,
721 paths_order TEXT,
722 ssh_connection_id INTEGER REFERENCES ssh_connections(id),
723 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
724 window_state TEXT,
725 window_x REAL,
726 window_y REAL,
727 window_width REAL,
728 window_height REAL,
729 display BLOB,
730 left_dock_visible INTEGER,
731 left_dock_active_panel TEXT,
732 right_dock_visible INTEGER,
733 right_dock_active_panel TEXT,
734 bottom_dock_visible INTEGER,
735 bottom_dock_active_panel TEXT,
736 left_dock_zoom INTEGER,
737 right_dock_zoom INTEGER,
738 bottom_dock_zoom INTEGER,
739 fullscreen INTEGER,
740 centered_layout INTEGER,
741 session_id TEXT,
742 window_id INTEGER
743 ) STRICT;
744
745 INSERT
746 INTO workspaces_2
747 SELECT
748 workspaces.workspace_id,
749 CASE
750 WHEN ssh_projects.id IS NOT NULL THEN ssh_projects.paths
751 ELSE
752 CASE
753 WHEN workspaces.local_paths_array IS NULL OR workspaces.local_paths_array = "" THEN
754 NULL
755 ELSE
756 replace(workspaces.local_paths_array, ',', CHAR(10))
757 END
758 END as paths,
759
760 CASE
761 WHEN ssh_projects.id IS NOT NULL THEN ""
762 ELSE workspaces.local_paths_order_array
763 END as paths_order,
764
765 CASE
766 WHEN ssh_projects.id IS NOT NULL THEN (
767 SELECT ssh_connections.id
768 FROM ssh_connections
769 WHERE
770 ssh_connections.host IS ssh_projects.host AND
771 ssh_connections.port IS ssh_projects.port AND
772 ssh_connections.user IS ssh_projects.user
773 )
774 ELSE NULL
775 END as ssh_connection_id,
776
777 workspaces.timestamp,
778 workspaces.window_state,
779 workspaces.window_x,
780 workspaces.window_y,
781 workspaces.window_width,
782 workspaces.window_height,
783 workspaces.display,
784 workspaces.left_dock_visible,
785 workspaces.left_dock_active_panel,
786 workspaces.right_dock_visible,
787 workspaces.right_dock_active_panel,
788 workspaces.bottom_dock_visible,
789 workspaces.bottom_dock_active_panel,
790 workspaces.left_dock_zoom,
791 workspaces.right_dock_zoom,
792 workspaces.bottom_dock_zoom,
793 workspaces.fullscreen,
794 workspaces.centered_layout,
795 workspaces.session_id,
796 workspaces.window_id
797 FROM
798 workspaces LEFT JOIN
799 ssh_projects ON
800 workspaces.ssh_project_id = ssh_projects.id;
801
802 DELETE FROM workspaces_2
803 WHERE workspace_id NOT IN (
804 SELECT MAX(workspace_id)
805 FROM workspaces_2
806 GROUP BY ssh_connection_id, paths
807 );
808
809 DROP TABLE ssh_projects;
810 DROP TABLE workspaces;
811 ALTER TABLE workspaces_2 RENAME TO workspaces;
812
813 CREATE UNIQUE INDEX ix_workspaces_location ON workspaces(ssh_connection_id, paths);
814 ),
815 // Fix any data from when workspaces.paths were briefly encoded as JSON arrays
816 sql!(
817 UPDATE workspaces
818 SET paths = CASE
819 WHEN substr(paths, 1, 2) = '[' || '"' AND substr(paths, -2, 2) = '"' || ']' THEN
820 replace(
821 substr(paths, 3, length(paths) - 4),
822 '"' || ',' || '"',
823 CHAR(10)
824 )
825 ELSE
826 replace(paths, ',', CHAR(10))
827 END
828 WHERE paths IS NOT NULL
829 ),
830 sql!(
831 CREATE TABLE remote_connections(
832 id INTEGER PRIMARY KEY,
833 kind TEXT NOT NULL,
834 host TEXT,
835 port INTEGER,
836 user TEXT,
837 distro TEXT
838 );
839
840 CREATE TABLE workspaces_2(
841 workspace_id INTEGER PRIMARY KEY,
842 paths TEXT,
843 paths_order TEXT,
844 remote_connection_id INTEGER REFERENCES remote_connections(id),
845 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
846 window_state TEXT,
847 window_x REAL,
848 window_y REAL,
849 window_width REAL,
850 window_height REAL,
851 display BLOB,
852 left_dock_visible INTEGER,
853 left_dock_active_panel TEXT,
854 right_dock_visible INTEGER,
855 right_dock_active_panel TEXT,
856 bottom_dock_visible INTEGER,
857 bottom_dock_active_panel TEXT,
858 left_dock_zoom INTEGER,
859 right_dock_zoom INTEGER,
860 bottom_dock_zoom INTEGER,
861 fullscreen INTEGER,
862 centered_layout INTEGER,
863 session_id TEXT,
864 window_id INTEGER
865 ) STRICT;
866
867 INSERT INTO remote_connections
868 SELECT
869 id,
870 "ssh" as kind,
871 host,
872 port,
873 user,
874 NULL as distro
875 FROM ssh_connections;
876
877 INSERT
878 INTO workspaces_2
879 SELECT
880 workspace_id,
881 paths,
882 paths_order,
883 ssh_connection_id as remote_connection_id,
884 timestamp,
885 window_state,
886 window_x,
887 window_y,
888 window_width,
889 window_height,
890 display,
891 left_dock_visible,
892 left_dock_active_panel,
893 right_dock_visible,
894 right_dock_active_panel,
895 bottom_dock_visible,
896 bottom_dock_active_panel,
897 left_dock_zoom,
898 right_dock_zoom,
899 bottom_dock_zoom,
900 fullscreen,
901 centered_layout,
902 session_id,
903 window_id
904 FROM
905 workspaces;
906
907 DROP TABLE workspaces;
908 ALTER TABLE workspaces_2 RENAME TO workspaces;
909
910 CREATE UNIQUE INDEX ix_workspaces_location ON workspaces(remote_connection_id, paths);
911 ),
912 sql!(CREATE TABLE user_toolchains (
913 remote_connection_id INTEGER,
914 workspace_id INTEGER NOT NULL,
915 worktree_id INTEGER NOT NULL,
916 relative_worktree_path TEXT NOT NULL,
917 language_name TEXT NOT NULL,
918 name TEXT NOT NULL,
919 path TEXT NOT NULL,
920 raw_json TEXT NOT NULL,
921
922 PRIMARY KEY (workspace_id, worktree_id, relative_worktree_path, language_name, name, path, raw_json)
923 ) STRICT;),
924 sql!(
925 DROP TABLE ssh_connections;
926 ),
927 sql!(
928 ALTER TABLE remote_connections ADD COLUMN name TEXT;
929 ALTER TABLE remote_connections ADD COLUMN container_id TEXT;
930 ),
931 sql!(
932 CREATE TABLE IF NOT EXISTS trusted_worktrees (
933 trust_id INTEGER PRIMARY KEY AUTOINCREMENT,
934 absolute_path TEXT,
935 user_name TEXT,
936 host_name TEXT
937 ) STRICT;
938 ),
939 sql!(CREATE TABLE toolchains2 (
940 workspace_id INTEGER,
941 worktree_root_path TEXT NOT NULL,
942 language_name TEXT NOT NULL,
943 name TEXT NOT NULL,
944 path TEXT NOT NULL,
945 raw_json TEXT NOT NULL,
946 relative_worktree_path TEXT NOT NULL,
947 PRIMARY KEY (workspace_id, worktree_root_path, language_name, relative_worktree_path)) STRICT;
948 INSERT OR REPLACE INTO toolchains2
949 // The `instr(paths, '\n') = 0` part allows us to find all
950 // workspaces that have a single worktree, as `\n` is used as a
951 // separator when serializing the workspace paths, so if no `\n` is
952 // found, we know we have a single worktree.
953 SELECT toolchains.workspace_id, paths, language_name, name, path, raw_json, relative_worktree_path FROM toolchains INNER JOIN workspaces ON toolchains.workspace_id = workspaces.workspace_id AND instr(paths, '\n') = 0;
954 DROP TABLE toolchains;
955 ALTER TABLE toolchains2 RENAME TO toolchains;
956 ),
957 sql!(CREATE TABLE user_toolchains2 (
958 remote_connection_id INTEGER,
959 workspace_id INTEGER NOT NULL,
960 worktree_root_path TEXT NOT NULL,
961 relative_worktree_path TEXT NOT NULL,
962 language_name TEXT NOT NULL,
963 name TEXT NOT NULL,
964 path TEXT NOT NULL,
965 raw_json TEXT NOT NULL,
966
967 PRIMARY KEY (workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json)) STRICT;
968 INSERT OR REPLACE INTO user_toolchains2
969 // The `instr(paths, '\n') = 0` part allows us to find all
970 // workspaces that have a single worktree, as `\n` is used as a
971 // separator when serializing the workspace paths, so if no `\n` is
972 // found, we know we have a single worktree.
973 SELECT user_toolchains.remote_connection_id, user_toolchains.workspace_id, paths, relative_worktree_path, language_name, name, path, raw_json FROM user_toolchains INNER JOIN workspaces ON user_toolchains.workspace_id = workspaces.workspace_id AND instr(paths, '\n') = 0;
974 DROP TABLE user_toolchains;
975 ALTER TABLE user_toolchains2 RENAME TO user_toolchains;
976 ),
977 sql!(
978 ALTER TABLE remote_connections ADD COLUMN use_podman BOOLEAN;
979 ),
980 sql!(
981 ALTER TABLE remote_connections ADD COLUMN remote_env TEXT;
982 ),
983 ];
984
985 // Allow recovering from bad migration that was initially shipped to nightly
986 // when introducing the ssh_connections table.
987 fn should_allow_migration_change(_index: usize, old: &str, new: &str) -> bool {
988 old.starts_with("CREATE TABLE ssh_connections")
989 && new.starts_with("CREATE TABLE ssh_connections")
990 }
991}
992
993db::static_connection!(WorkspaceDb, []);
994
995impl WorkspaceDb {
996 /// Returns a serialized workspace for the given worktree_roots. If the passed array
997 /// is empty, the most recent workspace is returned instead. If no workspace for the
998 /// passed roots is stored, returns none.
999 pub(crate) fn workspace_for_roots<P: AsRef<Path>>(
1000 &self,
1001 worktree_roots: &[P],
1002 ) -> Option<SerializedWorkspace> {
1003 self.workspace_for_roots_internal(worktree_roots, None)
1004 }
1005
1006 pub(crate) fn remote_workspace_for_roots<P: AsRef<Path>>(
1007 &self,
1008 worktree_roots: &[P],
1009 remote_project_id: RemoteConnectionId,
1010 ) -> Option<SerializedWorkspace> {
1011 self.workspace_for_roots_internal(worktree_roots, Some(remote_project_id))
1012 }
1013
1014 pub(crate) fn workspace_for_roots_internal<P: AsRef<Path>>(
1015 &self,
1016 worktree_roots: &[P],
1017 remote_connection_id: Option<RemoteConnectionId>,
1018 ) -> Option<SerializedWorkspace> {
1019 // paths are sorted before db interactions to ensure that the order of the paths
1020 // doesn't affect the workspace selection for existing workspaces
1021 let root_paths = PathList::new(worktree_roots);
1022
1023 // Empty workspaces cannot be matched by paths (all empty workspaces have paths = "").
1024 // They should only be restored via workspace_for_id during session restoration.
1025 if root_paths.is_empty() && remote_connection_id.is_none() {
1026 return None;
1027 }
1028
1029 // Note that we re-assign the workspace_id here in case it's empty
1030 // and we've grabbed the most recent workspace
1031 let (
1032 workspace_id,
1033 paths,
1034 paths_order,
1035 window_bounds,
1036 display,
1037 centered_layout,
1038 docks,
1039 window_id,
1040 ): (
1041 WorkspaceId,
1042 String,
1043 String,
1044 Option<SerializedWindowBounds>,
1045 Option<Uuid>,
1046 Option<bool>,
1047 DockStructure,
1048 Option<u64>,
1049 ) = self
1050 .select_row_bound(sql! {
1051 SELECT
1052 workspace_id,
1053 paths,
1054 paths_order,
1055 window_state,
1056 window_x,
1057 window_y,
1058 window_width,
1059 window_height,
1060 display,
1061 centered_layout,
1062 left_dock_visible,
1063 left_dock_active_panel,
1064 left_dock_zoom,
1065 right_dock_visible,
1066 right_dock_active_panel,
1067 right_dock_zoom,
1068 bottom_dock_visible,
1069 bottom_dock_active_panel,
1070 bottom_dock_zoom,
1071 window_id
1072 FROM workspaces
1073 WHERE
1074 paths IS ? AND
1075 remote_connection_id IS ?
1076 LIMIT 1
1077 })
1078 .and_then(|mut prepared_statement| {
1079 (prepared_statement)((
1080 root_paths.serialize().paths,
1081 remote_connection_id.map(|id| id.0 as i32),
1082 ))
1083 })
1084 .context("No workspaces found")
1085 .warn_on_err()
1086 .flatten()?;
1087
1088 let paths = PathList::deserialize(&SerializedPathList {
1089 paths,
1090 order: paths_order,
1091 });
1092
1093 let remote_connection_options = if let Some(remote_connection_id) = remote_connection_id {
1094 self.remote_connection(remote_connection_id)
1095 .context("Get remote connection")
1096 .log_err()
1097 } else {
1098 None
1099 };
1100
1101 Some(SerializedWorkspace {
1102 id: workspace_id,
1103 location: match remote_connection_options {
1104 Some(options) => SerializedWorkspaceLocation::Remote(options),
1105 None => SerializedWorkspaceLocation::Local,
1106 },
1107 paths,
1108 center_group: self
1109 .get_center_pane_group(workspace_id)
1110 .context("Getting center group")
1111 .log_err()?,
1112 window_bounds,
1113 centered_layout: centered_layout.unwrap_or(false),
1114 display,
1115 docks,
1116 session_id: None,
1117 breakpoints: self.breakpoints(workspace_id),
1118 window_id,
1119 user_toolchains: self.user_toolchains(workspace_id, remote_connection_id),
1120 })
1121 }
1122
1123 /// Returns the workspace with the given ID, loading all associated data.
1124 pub(crate) fn workspace_for_id(
1125 &self,
1126 workspace_id: WorkspaceId,
1127 ) -> Option<SerializedWorkspace> {
1128 let (
1129 paths,
1130 paths_order,
1131 window_bounds,
1132 display,
1133 centered_layout,
1134 docks,
1135 window_id,
1136 remote_connection_id,
1137 ): (
1138 String,
1139 String,
1140 Option<SerializedWindowBounds>,
1141 Option<Uuid>,
1142 Option<bool>,
1143 DockStructure,
1144 Option<u64>,
1145 Option<i32>,
1146 ) = self
1147 .select_row_bound(sql! {
1148 SELECT
1149 paths,
1150 paths_order,
1151 window_state,
1152 window_x,
1153 window_y,
1154 window_width,
1155 window_height,
1156 display,
1157 centered_layout,
1158 left_dock_visible,
1159 left_dock_active_panel,
1160 left_dock_zoom,
1161 right_dock_visible,
1162 right_dock_active_panel,
1163 right_dock_zoom,
1164 bottom_dock_visible,
1165 bottom_dock_active_panel,
1166 bottom_dock_zoom,
1167 window_id,
1168 remote_connection_id
1169 FROM workspaces
1170 WHERE workspace_id = ?
1171 })
1172 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id))
1173 .context("No workspace found for id")
1174 .warn_on_err()
1175 .flatten()?;
1176
1177 let paths = PathList::deserialize(&SerializedPathList {
1178 paths,
1179 order: paths_order,
1180 });
1181
1182 let remote_connection_id = remote_connection_id.map(|id| RemoteConnectionId(id as u64));
1183 let remote_connection_options = if let Some(remote_connection_id) = remote_connection_id {
1184 self.remote_connection(remote_connection_id)
1185 .context("Get remote connection")
1186 .log_err()
1187 } else {
1188 None
1189 };
1190
1191 Some(SerializedWorkspace {
1192 id: workspace_id,
1193 location: match remote_connection_options {
1194 Some(options) => SerializedWorkspaceLocation::Remote(options),
1195 None => SerializedWorkspaceLocation::Local,
1196 },
1197 paths,
1198 center_group: self
1199 .get_center_pane_group(workspace_id)
1200 .context("Getting center group")
1201 .log_err()?,
1202 window_bounds,
1203 centered_layout: centered_layout.unwrap_or(false),
1204 display,
1205 docks,
1206 session_id: None,
1207 breakpoints: self.breakpoints(workspace_id),
1208 window_id,
1209 user_toolchains: self.user_toolchains(workspace_id, remote_connection_id),
1210 })
1211 }
1212
1213 fn breakpoints(&self, workspace_id: WorkspaceId) -> BTreeMap<Arc<Path>, Vec<SourceBreakpoint>> {
1214 let breakpoints: Result<Vec<(PathBuf, Breakpoint)>> = self
1215 .select_bound(sql! {
1216 SELECT path, breakpoint_location, log_message, condition, hit_condition, state
1217 FROM breakpoints
1218 WHERE workspace_id = ?
1219 })
1220 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id));
1221
1222 match breakpoints {
1223 Ok(bp) => {
1224 if bp.is_empty() {
1225 log::debug!("Breakpoints are empty after querying database for them");
1226 }
1227
1228 let mut map: BTreeMap<Arc<Path>, Vec<SourceBreakpoint>> = Default::default();
1229
1230 for (path, breakpoint) in bp {
1231 let path: Arc<Path> = path.into();
1232 map.entry(path.clone()).or_default().push(SourceBreakpoint {
1233 row: breakpoint.position,
1234 path,
1235 message: breakpoint.message,
1236 condition: breakpoint.condition,
1237 hit_condition: breakpoint.hit_condition,
1238 state: breakpoint.state,
1239 });
1240 }
1241
1242 for (path, bps) in map.iter() {
1243 log::info!(
1244 "Got {} breakpoints from database at path: {}",
1245 bps.len(),
1246 path.to_string_lossy()
1247 );
1248 }
1249
1250 map
1251 }
1252 Err(msg) => {
1253 log::error!("Breakpoints query failed with msg: {msg}");
1254 Default::default()
1255 }
1256 }
1257 }
1258
1259 fn user_toolchains(
1260 &self,
1261 workspace_id: WorkspaceId,
1262 remote_connection_id: Option<RemoteConnectionId>,
1263 ) -> BTreeMap<ToolchainScope, IndexSet<Toolchain>> {
1264 type RowKind = (WorkspaceId, String, String, String, String, String, String);
1265
1266 let toolchains: Vec<RowKind> = self
1267 .select_bound(sql! {
1268 SELECT workspace_id, worktree_root_path, relative_worktree_path,
1269 language_name, name, path, raw_json
1270 FROM user_toolchains WHERE remote_connection_id IS ?1 AND (
1271 workspace_id IN (0, ?2)
1272 )
1273 })
1274 .and_then(|mut statement| {
1275 (statement)((remote_connection_id.map(|id| id.0), workspace_id))
1276 })
1277 .unwrap_or_default();
1278 let mut ret = BTreeMap::<_, IndexSet<_>>::default();
1279
1280 for (
1281 _workspace_id,
1282 worktree_root_path,
1283 relative_worktree_path,
1284 language_name,
1285 name,
1286 path,
1287 raw_json,
1288 ) in toolchains
1289 {
1290 // INTEGER's that are primary keys (like workspace ids, remote connection ids and such) start at 1, so we're safe to
1291 let scope = if _workspace_id == WorkspaceId(0) {
1292 debug_assert_eq!(worktree_root_path, String::default());
1293 debug_assert_eq!(relative_worktree_path, String::default());
1294 ToolchainScope::Global
1295 } else {
1296 debug_assert_eq!(workspace_id, _workspace_id);
1297 debug_assert_eq!(
1298 worktree_root_path == String::default(),
1299 relative_worktree_path == String::default()
1300 );
1301
1302 let Some(relative_path) = RelPath::unix(&relative_worktree_path).log_err() else {
1303 continue;
1304 };
1305 if worktree_root_path != String::default()
1306 && relative_worktree_path != String::default()
1307 {
1308 ToolchainScope::Subproject(
1309 Arc::from(worktree_root_path.as_ref()),
1310 relative_path.into(),
1311 )
1312 } else {
1313 ToolchainScope::Project
1314 }
1315 };
1316 let Ok(as_json) = serde_json::from_str(&raw_json) else {
1317 continue;
1318 };
1319 let toolchain = Toolchain {
1320 name: SharedString::from(name),
1321 path: SharedString::from(path),
1322 language_name: LanguageName::from_proto(language_name),
1323 as_json,
1324 };
1325 ret.entry(scope).or_default().insert(toolchain);
1326 }
1327
1328 ret
1329 }
1330
1331 /// Saves a workspace using the worktree roots. Will garbage collect any workspaces
1332 /// that used this workspace previously
1333 pub(crate) async fn save_workspace(&self, workspace: SerializedWorkspace) {
1334 let paths = workspace.paths.serialize();
1335 log::debug!("Saving workspace at location: {:?}", workspace.location);
1336 self.write(move |conn| {
1337 conn.with_savepoint("update_worktrees", || {
1338 let remote_connection_id = match workspace.location.clone() {
1339 SerializedWorkspaceLocation::Local => None,
1340 SerializedWorkspaceLocation::Remote(connection_options) => {
1341 Some(Self::get_or_create_remote_connection_internal(
1342 conn,
1343 connection_options
1344 )?.0)
1345 }
1346 };
1347
1348 // Clear out panes and pane_groups
1349 conn.exec_bound(sql!(
1350 DELETE FROM pane_groups WHERE workspace_id = ?1;
1351 DELETE FROM panes WHERE workspace_id = ?1;))?(workspace.id)
1352 .context("Clearing old panes")?;
1353
1354 conn.exec_bound(
1355 sql!(
1356 DELETE FROM breakpoints WHERE workspace_id = ?1;
1357 )
1358 )?(workspace.id).context("Clearing old breakpoints")?;
1359
1360 for (path, breakpoints) in workspace.breakpoints {
1361 for bp in breakpoints {
1362 let state = BreakpointStateWrapper::from(bp.state);
1363 match conn.exec_bound(sql!(
1364 INSERT INTO breakpoints (workspace_id, path, breakpoint_location, log_message, condition, hit_condition, state)
1365 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7);))?
1366
1367 ((
1368 workspace.id,
1369 path.as_ref(),
1370 bp.row,
1371 bp.message,
1372 bp.condition,
1373 bp.hit_condition,
1374 state,
1375 )) {
1376 Ok(_) => {
1377 log::debug!("Stored breakpoint at row: {} in path: {}", bp.row, path.to_string_lossy())
1378 }
1379 Err(err) => {
1380 log::error!("{err}");
1381 continue;
1382 }
1383 }
1384 }
1385 }
1386
1387 conn.exec_bound(
1388 sql!(
1389 DELETE FROM user_toolchains WHERE workspace_id = ?1;
1390 )
1391 )?(workspace.id).context("Clearing old user toolchains")?;
1392
1393 for (scope, toolchains) in workspace.user_toolchains {
1394 for toolchain in toolchains {
1395 let query = sql!(INSERT OR REPLACE INTO user_toolchains(remote_connection_id, workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8));
1396 let (workspace_id, worktree_root_path, relative_worktree_path) = match scope {
1397 ToolchainScope::Subproject(ref worktree_root_path, ref path) => (Some(workspace.id), Some(worktree_root_path.to_string_lossy().into_owned()), Some(path.as_unix_str().to_owned())),
1398 ToolchainScope::Project => (Some(workspace.id), None, None),
1399 ToolchainScope::Global => (None, None, None),
1400 };
1401 let args = (remote_connection_id, workspace_id.unwrap_or(WorkspaceId(0)), worktree_root_path.unwrap_or_default(), relative_worktree_path.unwrap_or_default(),
1402 toolchain.language_name.as_ref().to_owned(), toolchain.name.to_string(), toolchain.path.to_string(), toolchain.as_json.to_string());
1403 if let Err(err) = conn.exec_bound(query)?(args) {
1404 log::error!("{err}");
1405 continue;
1406 }
1407 }
1408 }
1409
1410 // Clear out old workspaces with the same paths.
1411 // Skip this for empty workspaces - they are identified by workspace_id, not paths.
1412 // Multiple empty workspaces with different content should coexist.
1413 if !paths.paths.is_empty() {
1414 conn.exec_bound(sql!(
1415 DELETE
1416 FROM workspaces
1417 WHERE
1418 workspace_id != ?1 AND
1419 paths IS ?2 AND
1420 remote_connection_id IS ?3
1421 ))?((
1422 workspace.id,
1423 paths.paths.clone(),
1424 remote_connection_id,
1425 ))
1426 .context("clearing out old locations")?;
1427 }
1428
1429 // Upsert
1430 let query = sql!(
1431 INSERT INTO workspaces(
1432 workspace_id,
1433 paths,
1434 paths_order,
1435 remote_connection_id,
1436 left_dock_visible,
1437 left_dock_active_panel,
1438 left_dock_zoom,
1439 right_dock_visible,
1440 right_dock_active_panel,
1441 right_dock_zoom,
1442 bottom_dock_visible,
1443 bottom_dock_active_panel,
1444 bottom_dock_zoom,
1445 session_id,
1446 window_id,
1447 timestamp
1448 )
1449 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, CURRENT_TIMESTAMP)
1450 ON CONFLICT DO
1451 UPDATE SET
1452 paths = ?2,
1453 paths_order = ?3,
1454 remote_connection_id = ?4,
1455 left_dock_visible = ?5,
1456 left_dock_active_panel = ?6,
1457 left_dock_zoom = ?7,
1458 right_dock_visible = ?8,
1459 right_dock_active_panel = ?9,
1460 right_dock_zoom = ?10,
1461 bottom_dock_visible = ?11,
1462 bottom_dock_active_panel = ?12,
1463 bottom_dock_zoom = ?13,
1464 session_id = ?14,
1465 window_id = ?15,
1466 timestamp = CURRENT_TIMESTAMP
1467 );
1468 let mut prepared_query = conn.exec_bound(query)?;
1469 let args = (
1470 workspace.id,
1471 paths.paths.clone(),
1472 paths.order.clone(),
1473 remote_connection_id,
1474 workspace.docks,
1475 workspace.session_id,
1476 workspace.window_id,
1477 );
1478
1479 prepared_query(args).context("Updating workspace")?;
1480
1481 // Save center pane group
1482 Self::save_pane_group(conn, workspace.id, &workspace.center_group, None)
1483 .context("save pane group in save workspace")?;
1484
1485 Ok(())
1486 })
1487 .log_err();
1488 })
1489 .await;
1490 }
1491
1492 pub(crate) async fn get_or_create_remote_connection(
1493 &self,
1494 options: RemoteConnectionOptions,
1495 ) -> Result<RemoteConnectionId> {
1496 self.write(move |conn| Self::get_or_create_remote_connection_internal(conn, options))
1497 .await
1498 }
1499
1500 fn get_or_create_remote_connection_internal(
1501 this: &Connection,
1502 options: RemoteConnectionOptions,
1503 ) -> Result<RemoteConnectionId> {
1504 let identity = remote_connection_identity(&options);
1505 let kind;
1506 let user: Option<String>;
1507 let mut host = None;
1508 let mut port = None;
1509 let mut distro = None;
1510 let mut name = None;
1511 let mut container_id = None;
1512 let mut use_podman = None;
1513 let mut remote_env = None;
1514
1515 match identity {
1516 RemoteConnectionIdentity::Ssh {
1517 host: identity_host,
1518 username,
1519 port: identity_port,
1520 } => {
1521 kind = RemoteConnectionKind::Ssh;
1522 host = Some(identity_host);
1523 port = identity_port;
1524 user = username;
1525 }
1526 RemoteConnectionIdentity::Wsl {
1527 distro_name,
1528 user: identity_user,
1529 } => {
1530 kind = RemoteConnectionKind::Wsl;
1531 distro = Some(distro_name);
1532 user = identity_user;
1533 }
1534 RemoteConnectionIdentity::Docker {
1535 container_id: identity_container_id,
1536 name: identity_name,
1537 remote_user,
1538 } => {
1539 kind = RemoteConnectionKind::Docker;
1540 container_id = Some(identity_container_id);
1541 name = Some(identity_name);
1542 user = Some(remote_user);
1543 }
1544 #[cfg(any(test, feature = "test-support"))]
1545 RemoteConnectionIdentity::Mock { id } => {
1546 kind = RemoteConnectionKind::Ssh;
1547 host = Some(format!("mock-{}", id));
1548 user = Some(format!("mock-user-{}", id));
1549 }
1550 }
1551
1552 if let RemoteConnectionOptions::Docker(options) = options {
1553 use_podman = Some(options.use_podman);
1554 remote_env = serde_json::to_string(&options.remote_env).ok();
1555 }
1556
1557 Self::get_or_create_remote_connection_query(
1558 this,
1559 kind,
1560 host,
1561 port,
1562 user,
1563 distro,
1564 name,
1565 container_id,
1566 use_podman,
1567 remote_env,
1568 )
1569 }
1570
1571 fn get_or_create_remote_connection_query(
1572 this: &Connection,
1573 kind: RemoteConnectionKind,
1574 host: Option<String>,
1575 port: Option<u16>,
1576 user: Option<String>,
1577 distro: Option<String>,
1578 name: Option<String>,
1579 container_id: Option<String>,
1580 use_podman: Option<bool>,
1581 remote_env: Option<String>,
1582 ) -> Result<RemoteConnectionId> {
1583 if let Some(id) = this.select_row_bound(sql!(
1584 SELECT id
1585 FROM remote_connections
1586 WHERE
1587 kind IS ? AND
1588 host IS ? AND
1589 port IS ? AND
1590 user IS ? AND
1591 distro IS ? AND
1592 name IS ? AND
1593 container_id IS ?
1594 LIMIT 1
1595 ))?((
1596 kind.serialize(),
1597 host.clone(),
1598 port,
1599 user.clone(),
1600 distro.clone(),
1601 name.clone(),
1602 container_id.clone(),
1603 ))? {
1604 Ok(RemoteConnectionId(id))
1605 } else {
1606 let id = this.select_row_bound(sql!(
1607 INSERT INTO remote_connections (
1608 kind,
1609 host,
1610 port,
1611 user,
1612 distro,
1613 name,
1614 container_id,
1615 use_podman,
1616 remote_env
1617 ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)
1618 RETURNING id
1619 ))?((
1620 kind.serialize(),
1621 host,
1622 port,
1623 user,
1624 distro,
1625 name,
1626 container_id,
1627 use_podman,
1628 remote_env,
1629 ))?
1630 .context("failed to insert remote project")?;
1631 Ok(RemoteConnectionId(id))
1632 }
1633 }
1634
1635 query! {
1636 pub async fn next_id() -> Result<WorkspaceId> {
1637 INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id
1638 }
1639 }
1640
1641 fn recent_workspaces(
1642 &self,
1643 ) -> Result<
1644 Vec<(
1645 WorkspaceId,
1646 PathList,
1647 Option<RemoteConnectionId>,
1648 DateTime<Utc>,
1649 )>,
1650 > {
1651 Ok(self
1652 .recent_workspaces_query()?
1653 .into_iter()
1654 .map(|(id, paths, order, remote_connection_id, timestamp)| {
1655 (
1656 id,
1657 PathList::deserialize(&SerializedPathList { paths, order }),
1658 remote_connection_id.map(RemoteConnectionId),
1659 parse_timestamp(×tamp),
1660 )
1661 })
1662 .collect())
1663 }
1664
1665 query! {
1666 fn recent_workspaces_query() -> Result<Vec<(WorkspaceId, String, String, Option<u64>, String)>> {
1667 SELECT workspace_id, paths, paths_order, remote_connection_id, timestamp
1668 FROM workspaces
1669 WHERE
1670 paths IS NOT NULL OR
1671 remote_connection_id IS NOT NULL
1672 ORDER BY timestamp DESC
1673 }
1674 }
1675
1676 fn session_workspaces(
1677 &self,
1678 session_id: String,
1679 ) -> Result<
1680 Vec<(
1681 WorkspaceId,
1682 PathList,
1683 Option<u64>,
1684 Option<RemoteConnectionId>,
1685 )>,
1686 > {
1687 Ok(self
1688 .session_workspaces_query(session_id)?
1689 .into_iter()
1690 .map(
1691 |(workspace_id, paths, order, window_id, remote_connection_id)| {
1692 (
1693 WorkspaceId(workspace_id),
1694 PathList::deserialize(&SerializedPathList { paths, order }),
1695 window_id,
1696 remote_connection_id.map(RemoteConnectionId),
1697 )
1698 },
1699 )
1700 .collect())
1701 }
1702
1703 query! {
1704 fn session_workspaces_query(session_id: String) -> Result<Vec<(i64, String, String, Option<u64>, Option<u64>)>> {
1705 SELECT workspace_id, paths, paths_order, window_id, remote_connection_id
1706 FROM workspaces
1707 WHERE session_id = ?1
1708 ORDER BY timestamp DESC
1709 }
1710 }
1711
1712 query! {
1713 pub fn breakpoints_for_file(workspace_id: WorkspaceId, file_path: &Path) -> Result<Vec<Breakpoint>> {
1714 SELECT breakpoint_location
1715 FROM breakpoints
1716 WHERE workspace_id= ?1 AND path = ?2
1717 }
1718 }
1719
1720 query! {
1721 pub fn clear_breakpoints(file_path: &Path) -> Result<()> {
1722 DELETE FROM breakpoints
1723 WHERE file_path = ?2
1724 }
1725 }
1726
1727 fn remote_connections(&self) -> Result<HashMap<RemoteConnectionId, RemoteConnectionOptions>> {
1728 Ok(self.select(sql!(
1729 SELECT
1730 id, kind, host, port, user, distro, container_id, name, use_podman, remote_env
1731 FROM
1732 remote_connections
1733 ))?()?
1734 .into_iter()
1735 .filter_map(
1736 |(id, kind, host, port, user, distro, container_id, name, use_podman, remote_env)| {
1737 Some((
1738 RemoteConnectionId(id),
1739 Self::remote_connection_from_row(
1740 kind,
1741 host,
1742 port,
1743 user,
1744 distro,
1745 container_id,
1746 name,
1747 use_podman,
1748 remote_env,
1749 )?,
1750 ))
1751 },
1752 )
1753 .collect())
1754 }
1755
1756 pub(crate) fn remote_connection(
1757 &self,
1758 id: RemoteConnectionId,
1759 ) -> Result<RemoteConnectionOptions> {
1760 let (kind, host, port, user, distro, container_id, name, use_podman, remote_env) =
1761 self.select_row_bound(sql!(
1762 SELECT kind, host, port, user, distro, container_id, name, use_podman, remote_env
1763 FROM remote_connections
1764 WHERE id = ?
1765 ))?(id.0)?
1766 .context("no such remote connection")?;
1767 Self::remote_connection_from_row(
1768 kind,
1769 host,
1770 port,
1771 user,
1772 distro,
1773 container_id,
1774 name,
1775 use_podman,
1776 remote_env,
1777 )
1778 .context("invalid remote_connection row")
1779 }
1780
1781 fn remote_connection_from_row(
1782 kind: String,
1783 host: Option<String>,
1784 port: Option<u16>,
1785 user: Option<String>,
1786 distro: Option<String>,
1787 container_id: Option<String>,
1788 name: Option<String>,
1789 use_podman: Option<bool>,
1790 remote_env: Option<String>,
1791 ) -> Option<RemoteConnectionOptions> {
1792 match RemoteConnectionKind::deserialize(&kind)? {
1793 RemoteConnectionKind::Wsl => Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
1794 distro_name: distro?,
1795 user: user,
1796 })),
1797 RemoteConnectionKind::Ssh => Some(RemoteConnectionOptions::Ssh(SshConnectionOptions {
1798 host: host?.into(),
1799 port,
1800 username: user,
1801 ..Default::default()
1802 })),
1803 RemoteConnectionKind::Docker => {
1804 let remote_env: BTreeMap<String, String> =
1805 serde_json::from_str(&remote_env?).ok()?;
1806 Some(RemoteConnectionOptions::Docker(DockerConnectionOptions {
1807 container_id: container_id?,
1808 name: name?,
1809 remote_user: user?,
1810 upload_binary_over_docker_exec: false,
1811 use_podman: use_podman?,
1812 remote_env,
1813 }))
1814 }
1815 }
1816 }
1817
1818 query! {
1819 pub async fn delete_workspace_by_id(id: WorkspaceId) -> Result<()> {
1820 DELETE FROM workspaces
1821 WHERE workspace_id IS ?
1822 }
1823 }
1824
1825 async fn all_paths_exist_with_a_directory(paths: &[PathBuf], fs: &dyn Fs) -> bool {
1826 let mut any_dir = false;
1827 for path in paths {
1828 match fs.metadata(path).await.ok().flatten() {
1829 None => {
1830 return false;
1831 }
1832 Some(meta) => {
1833 if meta.is_dir {
1834 any_dir = true;
1835 }
1836 }
1837 }
1838 }
1839 any_dir
1840 }
1841
1842 // Returns the recent locations which are still valid on disk and deletes ones which no longer
1843 // exist.
1844 pub async fn recent_workspaces_on_disk(
1845 &self,
1846 fs: &dyn Fs,
1847 ) -> Result<
1848 Vec<(
1849 WorkspaceId,
1850 SerializedWorkspaceLocation,
1851 PathList,
1852 DateTime<Utc>,
1853 )>,
1854 > {
1855 let mut result = Vec::new();
1856 let mut workspaces_to_delete = Vec::new();
1857 let remote_connections = self.remote_connections()?;
1858 let now = Utc::now();
1859 for (id, paths, remote_connection_id, timestamp) in self.recent_workspaces()? {
1860 if let Some(remote_connection_id) = remote_connection_id {
1861 if let Some(connection_options) = remote_connections.get(&remote_connection_id) {
1862 result.push((
1863 id,
1864 SerializedWorkspaceLocation::Remote(connection_options.clone()),
1865 paths,
1866 timestamp,
1867 ));
1868 } else {
1869 workspaces_to_delete.push(id);
1870 }
1871 continue;
1872 }
1873
1874 // Delete the workspace if any of the paths are WSL paths. If a
1875 // local workspace points to WSL, attempting to read its metadata
1876 // will wait for the WSL VM and file server to boot up. This can
1877 // block for many seconds. Supported scenarios use remote
1878 // workspaces.
1879 if cfg!(windows) {
1880 let has_wsl_path = paths
1881 .paths()
1882 .iter()
1883 .any(|path| util::paths::WslPath::from_path(path).is_some());
1884 if has_wsl_path {
1885 workspaces_to_delete.push(id);
1886 continue;
1887 }
1888 }
1889
1890 if Self::all_paths_exist_with_a_directory(paths.paths(), fs).await {
1891 result.push((id, SerializedWorkspaceLocation::Local, paths, timestamp));
1892 } else if now - timestamp >= chrono::Duration::days(7) {
1893 workspaces_to_delete.push(id);
1894 }
1895 }
1896
1897 futures::future::join_all(
1898 workspaces_to_delete
1899 .into_iter()
1900 .map(|id| self.delete_workspace_by_id(id)),
1901 )
1902 .await;
1903 Ok(result)
1904 }
1905
1906 pub async fn last_workspace(
1907 &self,
1908 fs: &dyn Fs,
1909 ) -> Result<
1910 Option<(
1911 WorkspaceId,
1912 SerializedWorkspaceLocation,
1913 PathList,
1914 DateTime<Utc>,
1915 )>,
1916 > {
1917 Ok(self.recent_workspaces_on_disk(fs).await?.into_iter().next())
1918 }
1919
1920 // Returns the locations of the workspaces that were still opened when the last
1921 // session was closed (i.e. when Zed was quit).
1922 // If `last_session_window_order` is provided, the returned locations are ordered
1923 // according to that.
1924 pub async fn last_session_workspace_locations(
1925 &self,
1926 last_session_id: &str,
1927 last_session_window_stack: Option<Vec<WindowId>>,
1928 fs: &dyn Fs,
1929 ) -> Result<Vec<SessionWorkspace>> {
1930 let mut workspaces = Vec::new();
1931
1932 for (workspace_id, paths, window_id, remote_connection_id) in
1933 self.session_workspaces(last_session_id.to_owned())?
1934 {
1935 let window_id = window_id.map(WindowId::from);
1936
1937 if let Some(remote_connection_id) = remote_connection_id {
1938 workspaces.push(SessionWorkspace {
1939 workspace_id,
1940 location: SerializedWorkspaceLocation::Remote(
1941 self.remote_connection(remote_connection_id)?,
1942 ),
1943 paths,
1944 window_id,
1945 });
1946 } else if paths.is_empty() {
1947 // Empty workspace with items (drafts, files) - include for restoration
1948 workspaces.push(SessionWorkspace {
1949 workspace_id,
1950 location: SerializedWorkspaceLocation::Local,
1951 paths,
1952 window_id,
1953 });
1954 } else {
1955 if Self::all_paths_exist_with_a_directory(paths.paths(), fs).await {
1956 workspaces.push(SessionWorkspace {
1957 workspace_id,
1958 location: SerializedWorkspaceLocation::Local,
1959 paths,
1960 window_id,
1961 });
1962 }
1963 }
1964 }
1965
1966 if let Some(stack) = last_session_window_stack {
1967 workspaces.sort_by_key(|workspace| {
1968 workspace
1969 .window_id
1970 .and_then(|id| stack.iter().position(|&order_id| order_id == id))
1971 .unwrap_or(usize::MAX)
1972 });
1973 }
1974
1975 Ok(workspaces)
1976 }
1977
1978 fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result<SerializedPaneGroup> {
1979 Ok(self
1980 .get_pane_group(workspace_id, None)?
1981 .into_iter()
1982 .next()
1983 .unwrap_or_else(|| {
1984 SerializedPaneGroup::Pane(SerializedPane {
1985 active: true,
1986 children: vec![],
1987 pinned_count: 0,
1988 })
1989 }))
1990 }
1991
1992 fn get_pane_group(
1993 &self,
1994 workspace_id: WorkspaceId,
1995 group_id: Option<GroupId>,
1996 ) -> Result<Vec<SerializedPaneGroup>> {
1997 type GroupKey = (Option<GroupId>, WorkspaceId);
1998 type GroupOrPane = (
1999 Option<GroupId>,
2000 Option<SerializedAxis>,
2001 Option<PaneId>,
2002 Option<bool>,
2003 Option<usize>,
2004 Option<String>,
2005 );
2006 self.select_bound::<GroupKey, GroupOrPane>(sql!(
2007 SELECT group_id, axis, pane_id, active, pinned_count, flexes
2008 FROM (SELECT
2009 group_id,
2010 axis,
2011 NULL as pane_id,
2012 NULL as active,
2013 NULL as pinned_count,
2014 position,
2015 parent_group_id,
2016 workspace_id,
2017 flexes
2018 FROM pane_groups
2019 UNION
2020 SELECT
2021 NULL,
2022 NULL,
2023 center_panes.pane_id,
2024 panes.active as active,
2025 pinned_count,
2026 position,
2027 parent_group_id,
2028 panes.workspace_id as workspace_id,
2029 NULL
2030 FROM center_panes
2031 JOIN panes ON center_panes.pane_id = panes.pane_id)
2032 WHERE parent_group_id IS ? AND workspace_id = ?
2033 ORDER BY position
2034 ))?((group_id, workspace_id))?
2035 .into_iter()
2036 .map(|(group_id, axis, pane_id, active, pinned_count, flexes)| {
2037 let maybe_pane = maybe!({ Some((pane_id?, active?, pinned_count?)) });
2038 if let Some((group_id, axis)) = group_id.zip(axis) {
2039 let flexes = flexes
2040 .map(|flexes: String| serde_json::from_str::<Vec<f32>>(&flexes))
2041 .transpose()?;
2042
2043 Ok(SerializedPaneGroup::Group {
2044 axis,
2045 children: self.get_pane_group(workspace_id, Some(group_id))?,
2046 flexes,
2047 })
2048 } else if let Some((pane_id, active, pinned_count)) = maybe_pane {
2049 Ok(SerializedPaneGroup::Pane(SerializedPane::new(
2050 self.get_items(pane_id)?,
2051 active,
2052 pinned_count,
2053 )))
2054 } else {
2055 bail!("Pane Group Child was neither a pane group or a pane");
2056 }
2057 })
2058 // Filter out panes and pane groups which don't have any children or items
2059 .filter(|pane_group| match pane_group {
2060 Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(),
2061 Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(),
2062 _ => true,
2063 })
2064 .collect::<Result<_>>()
2065 }
2066
2067 fn save_pane_group(
2068 conn: &Connection,
2069 workspace_id: WorkspaceId,
2070 pane_group: &SerializedPaneGroup,
2071 parent: Option<(GroupId, usize)>,
2072 ) -> Result<()> {
2073 if parent.is_none() {
2074 log::debug!("Saving a pane group for workspace {workspace_id:?}");
2075 }
2076 match pane_group {
2077 SerializedPaneGroup::Group {
2078 axis,
2079 children,
2080 flexes,
2081 } => {
2082 let (parent_id, position) = parent.unzip();
2083
2084 let flex_string = flexes
2085 .as_ref()
2086 .map(|flexes| serde_json::json!(flexes).to_string());
2087
2088 let group_id = conn.select_row_bound::<_, i64>(sql!(
2089 INSERT INTO pane_groups(
2090 workspace_id,
2091 parent_group_id,
2092 position,
2093 axis,
2094 flexes
2095 )
2096 VALUES (?, ?, ?, ?, ?)
2097 RETURNING group_id
2098 ))?((
2099 workspace_id,
2100 parent_id,
2101 position,
2102 *axis,
2103 flex_string,
2104 ))?
2105 .context("Couldn't retrieve group_id from inserted pane_group")?;
2106
2107 for (position, group) in children.iter().enumerate() {
2108 Self::save_pane_group(conn, workspace_id, group, Some((group_id, position)))?
2109 }
2110
2111 Ok(())
2112 }
2113 SerializedPaneGroup::Pane(pane) => {
2114 Self::save_pane(conn, workspace_id, pane, parent)?;
2115 Ok(())
2116 }
2117 }
2118 }
2119
2120 fn save_pane(
2121 conn: &Connection,
2122 workspace_id: WorkspaceId,
2123 pane: &SerializedPane,
2124 parent: Option<(GroupId, usize)>,
2125 ) -> Result<PaneId> {
2126 let pane_id = conn.select_row_bound::<_, i64>(sql!(
2127 INSERT INTO panes(workspace_id, active, pinned_count)
2128 VALUES (?, ?, ?)
2129 RETURNING pane_id
2130 ))?((workspace_id, pane.active, pane.pinned_count))?
2131 .context("Could not retrieve inserted pane_id")?;
2132
2133 let (parent_id, order) = parent.unzip();
2134 conn.exec_bound(sql!(
2135 INSERT INTO center_panes(pane_id, parent_group_id, position)
2136 VALUES (?, ?, ?)
2137 ))?((pane_id, parent_id, order))?;
2138
2139 Self::save_items(conn, workspace_id, pane_id, &pane.children).context("Saving items")?;
2140
2141 Ok(pane_id)
2142 }
2143
2144 fn get_items(&self, pane_id: PaneId) -> Result<Vec<SerializedItem>> {
2145 self.select_bound(sql!(
2146 SELECT kind, item_id, active, preview FROM items
2147 WHERE pane_id = ?
2148 ORDER BY position
2149 ))?(pane_id)
2150 }
2151
2152 fn save_items(
2153 conn: &Connection,
2154 workspace_id: WorkspaceId,
2155 pane_id: PaneId,
2156 items: &[SerializedItem],
2157 ) -> Result<()> {
2158 let mut insert = conn.exec_bound(sql!(
2159 INSERT INTO items(workspace_id, pane_id, position, kind, item_id, active, preview) VALUES (?, ?, ?, ?, ?, ?, ?)
2160 )).context("Preparing insertion")?;
2161 for (position, item) in items.iter().enumerate() {
2162 insert((workspace_id, pane_id, position, item))?;
2163 }
2164
2165 Ok(())
2166 }
2167
2168 query! {
2169 pub async fn update_timestamp(workspace_id: WorkspaceId) -> Result<()> {
2170 UPDATE workspaces
2171 SET timestamp = CURRENT_TIMESTAMP
2172 WHERE workspace_id = ?
2173 }
2174 }
2175
2176 query! {
2177 pub(crate) async fn set_window_open_status(workspace_id: WorkspaceId, bounds: SerializedWindowBounds, display: Uuid) -> Result<()> {
2178 UPDATE workspaces
2179 SET window_state = ?2,
2180 window_x = ?3,
2181 window_y = ?4,
2182 window_width = ?5,
2183 window_height = ?6,
2184 display = ?7
2185 WHERE workspace_id = ?1
2186 }
2187 }
2188
2189 query! {
2190 pub(crate) async fn set_centered_layout(workspace_id: WorkspaceId, centered_layout: bool) -> Result<()> {
2191 UPDATE workspaces
2192 SET centered_layout = ?2
2193 WHERE workspace_id = ?1
2194 }
2195 }
2196
2197 query! {
2198 pub(crate) async fn set_session_id(workspace_id: WorkspaceId, session_id: Option<String>) -> Result<()> {
2199 UPDATE workspaces
2200 SET session_id = ?2
2201 WHERE workspace_id = ?1
2202 }
2203 }
2204
2205 query! {
2206 pub(crate) async fn set_session_binding(workspace_id: WorkspaceId, session_id: Option<String>, window_id: Option<u64>) -> Result<()> {
2207 UPDATE workspaces
2208 SET session_id = ?2, window_id = ?3
2209 WHERE workspace_id = ?1
2210 }
2211 }
2212
2213 pub(crate) async fn toolchains(
2214 &self,
2215 workspace_id: WorkspaceId,
2216 ) -> Result<Vec<(Toolchain, Arc<Path>, Arc<RelPath>)>> {
2217 self.write(move |this| {
2218 let mut select = this
2219 .select_bound(sql!(
2220 SELECT
2221 name, path, worktree_root_path, relative_worktree_path, language_name, raw_json
2222 FROM toolchains
2223 WHERE workspace_id = ?
2224 ))
2225 .context("select toolchains")?;
2226
2227 let toolchain: Vec<(String, String, String, String, String, String)> =
2228 select(workspace_id)?;
2229
2230 Ok(toolchain
2231 .into_iter()
2232 .filter_map(
2233 |(name, path, worktree_root_path, relative_worktree_path, language, json)| {
2234 Some((
2235 Toolchain {
2236 name: name.into(),
2237 path: path.into(),
2238 language_name: LanguageName::new(&language),
2239 as_json: serde_json::Value::from_str(&json).ok()?,
2240 },
2241 Arc::from(worktree_root_path.as_ref()),
2242 RelPath::from_proto(&relative_worktree_path).log_err()?,
2243 ))
2244 },
2245 )
2246 .collect())
2247 })
2248 .await
2249 }
2250
2251 pub async fn set_toolchain(
2252 &self,
2253 workspace_id: WorkspaceId,
2254 worktree_root_path: Arc<Path>,
2255 relative_worktree_path: Arc<RelPath>,
2256 toolchain: Toolchain,
2257 ) -> Result<()> {
2258 log::debug!(
2259 "Setting toolchain for workspace, worktree: {worktree_root_path:?}, relative path: {relative_worktree_path:?}, toolchain: {}",
2260 toolchain.name
2261 );
2262 self.write(move |conn| {
2263 let mut insert = conn
2264 .exec_bound(sql!(
2265 INSERT INTO toolchains(workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json) VALUES (?, ?, ?, ?, ?, ?, ?)
2266 ON CONFLICT DO
2267 UPDATE SET
2268 name = ?5,
2269 path = ?6,
2270 raw_json = ?7
2271 ))
2272 .context("Preparing insertion")?;
2273
2274 insert((
2275 workspace_id,
2276 worktree_root_path.to_string_lossy().into_owned(),
2277 relative_worktree_path.as_unix_str(),
2278 toolchain.language_name.as_ref(),
2279 toolchain.name.as_ref(),
2280 toolchain.path.as_ref(),
2281 toolchain.as_json.to_string(),
2282 ))?;
2283
2284 Ok(())
2285 }).await
2286 }
2287
2288 pub(crate) async fn save_trusted_worktrees(
2289 &self,
2290 trusted_worktrees: HashMap<Option<RemoteHostLocation>, HashSet<PathBuf>>,
2291 ) -> anyhow::Result<()> {
2292 use anyhow::Context as _;
2293 use db::sqlez::statement::Statement;
2294 use itertools::Itertools as _;
2295
2296 self.clear_trusted_worktrees()
2297 .await
2298 .context("clearing previous trust state")?;
2299
2300 let trusted_worktrees = trusted_worktrees
2301 .into_iter()
2302 .flat_map(|(host, abs_paths)| {
2303 abs_paths
2304 .into_iter()
2305 .map(move |abs_path| (Some(abs_path), host.clone()))
2306 })
2307 .collect::<Vec<_>>();
2308 let mut first_worktree;
2309 let mut last_worktree = 0_usize;
2310 for (count, placeholders) in std::iter::once("(?, ?, ?)")
2311 .cycle()
2312 .take(trusted_worktrees.len())
2313 .chunks(MAX_QUERY_PLACEHOLDERS / 3)
2314 .into_iter()
2315 .map(|chunk| {
2316 let mut count = 0;
2317 let placeholders = chunk
2318 .inspect(|_| {
2319 count += 1;
2320 })
2321 .join(", ");
2322 (count, placeholders)
2323 })
2324 .collect::<Vec<_>>()
2325 {
2326 first_worktree = last_worktree;
2327 last_worktree = last_worktree + count;
2328 let query = format!(
2329 r#"INSERT INTO trusted_worktrees(absolute_path, user_name, host_name)
2330VALUES {placeholders};"#
2331 );
2332
2333 let trusted_worktrees = trusted_worktrees[first_worktree..last_worktree].to_vec();
2334 self.write(move |conn| {
2335 let mut statement = Statement::prepare(conn, query)?;
2336 let mut next_index = 1;
2337 for (abs_path, host) in trusted_worktrees {
2338 let abs_path = abs_path.as_ref().map(|abs_path| abs_path.to_string_lossy());
2339 next_index = statement.bind(
2340 &abs_path.as_ref().map(|abs_path| abs_path.as_ref()),
2341 next_index,
2342 )?;
2343 next_index = statement.bind(
2344 &host
2345 .as_ref()
2346 .and_then(|host| Some(host.user_name.as_ref()?.as_str())),
2347 next_index,
2348 )?;
2349 next_index = statement.bind(
2350 &host.as_ref().map(|host| host.host_identifier.as_str()),
2351 next_index,
2352 )?;
2353 }
2354 statement.exec()
2355 })
2356 .await
2357 .context("inserting new trusted state")?;
2358 }
2359 Ok(())
2360 }
2361
2362 pub fn fetch_trusted_worktrees(&self) -> Result<DbTrustedPaths> {
2363 let trusted_worktrees = self.trusted_worktrees()?;
2364 Ok(trusted_worktrees
2365 .into_iter()
2366 .filter_map(|(abs_path, user_name, host_name)| {
2367 let db_host = match (user_name, host_name) {
2368 (None, Some(host_name)) => Some(RemoteHostLocation {
2369 user_name: None,
2370 host_identifier: SharedString::new(host_name),
2371 }),
2372 (Some(user_name), Some(host_name)) => Some(RemoteHostLocation {
2373 user_name: Some(SharedString::new(user_name)),
2374 host_identifier: SharedString::new(host_name),
2375 }),
2376 _ => None,
2377 };
2378 Some((db_host, abs_path?))
2379 })
2380 .fold(HashMap::default(), |mut acc, (remote_host, abs_path)| {
2381 acc.entry(remote_host)
2382 .or_insert_with(HashSet::default)
2383 .insert(abs_path);
2384 acc
2385 }))
2386 }
2387
2388 query! {
2389 fn trusted_worktrees() -> Result<Vec<(Option<PathBuf>, Option<String>, Option<String>)>> {
2390 SELECT absolute_path, user_name, host_name
2391 FROM trusted_worktrees
2392 }
2393 }
2394
2395 query! {
2396 pub async fn clear_trusted_worktrees() -> Result<()> {
2397 DELETE FROM trusted_worktrees
2398 }
2399 }
2400}
2401
2402type WorkspaceEntry = (
2403 WorkspaceId,
2404 SerializedWorkspaceLocation,
2405 PathList,
2406 DateTime<Utc>,
2407);
2408
2409/// Resolves workspace entries whose paths are git linked worktree checkouts
2410/// to their main repository paths.
2411///
2412/// For each workspace entry:
2413/// - If any path is a linked worktree checkout, all worktree paths in that
2414/// entry are resolved to their main repository paths, producing a new
2415/// `PathList`.
2416/// - The resolved entry is then deduplicated against existing entries: if a
2417/// workspace with the same paths already exists, the entry with the most
2418/// recent timestamp is kept.
2419pub async fn resolve_worktree_workspaces(
2420 workspaces: impl IntoIterator<Item = WorkspaceEntry>,
2421 fs: &dyn Fs,
2422) -> Vec<WorkspaceEntry> {
2423 // First pass: resolve worktree paths to main repo paths concurrently.
2424 let resolved = futures::future::join_all(workspaces.into_iter().map(|entry| async move {
2425 let paths = entry.2.paths();
2426 if paths.is_empty() {
2427 return entry;
2428 }
2429
2430 // Resolve each path concurrently
2431 let resolved_paths = futures::future::join_all(
2432 paths
2433 .iter()
2434 .map(|path| project::git_store::resolve_git_worktree_to_main_repo(fs, path)),
2435 )
2436 .await;
2437
2438 // If no paths were resolved, this entry is not a worktree — keep as-is
2439 if resolved_paths.iter().all(|r| r.is_none()) {
2440 return entry;
2441 }
2442
2443 // Build new path list, substituting resolved paths
2444 let new_paths: Vec<PathBuf> = paths
2445 .iter()
2446 .zip(resolved_paths.iter())
2447 .map(|(original, resolved)| {
2448 resolved
2449 .as_ref()
2450 .cloned()
2451 .unwrap_or_else(|| original.clone())
2452 })
2453 .collect();
2454
2455 let new_path_refs: Vec<&Path> = new_paths.iter().map(|p| p.as_path()).collect();
2456 (entry.0, entry.1, PathList::new(&new_path_refs), entry.3)
2457 }))
2458 .await;
2459
2460 // Second pass: deduplicate by PathList.
2461 // When two entries resolve to the same paths, keep the one with the
2462 // more recent timestamp.
2463 let mut seen: collections::HashMap<Vec<PathBuf>, usize> = collections::HashMap::default();
2464 let mut result: Vec<WorkspaceEntry> = Vec::new();
2465
2466 for entry in resolved {
2467 let key: Vec<PathBuf> = entry.2.paths().to_vec();
2468 if let Some(&existing_idx) = seen.get(&key) {
2469 // Keep the entry with the more recent timestamp
2470 if entry.3 > result[existing_idx].3 {
2471 result[existing_idx] = entry;
2472 }
2473 } else {
2474 seen.insert(key, result.len());
2475 result.push(entry);
2476 }
2477 }
2478
2479 result
2480}
2481
2482pub fn delete_unloaded_items(
2483 alive_items: Vec<ItemId>,
2484 workspace_id: WorkspaceId,
2485 table: &'static str,
2486 db: &ThreadSafeConnection,
2487 cx: &mut App,
2488) -> Task<Result<()>> {
2489 let db = db.clone();
2490 cx.spawn(async move |_| {
2491 let placeholders = alive_items
2492 .iter()
2493 .map(|_| "?")
2494 .collect::<Vec<&str>>()
2495 .join(", ");
2496
2497 let query = format!(
2498 "DELETE FROM {table} WHERE workspace_id = ? AND item_id NOT IN ({placeholders})"
2499 );
2500
2501 db.write(move |conn| {
2502 let mut statement = Statement::prepare(conn, query)?;
2503 let mut next_index = statement.bind(&workspace_id, 1)?;
2504 for id in alive_items {
2505 next_index = statement.bind(&id, next_index)?;
2506 }
2507 statement.exec()
2508 })
2509 .await
2510 })
2511}
2512
2513#[cfg(test)]
2514mod tests {
2515 use super::*;
2516 use crate::PathList;
2517 use crate::ProjectGroupKey;
2518 use crate::{
2519 multi_workspace::MultiWorkspace,
2520 persistence::{
2521 model::{
2522 SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace,
2523 SessionWorkspace,
2524 },
2525 read_multi_workspace_state,
2526 },
2527 };
2528
2529 use gpui::AppContext as _;
2530 use pretty_assertions::assert_eq;
2531 use project::Project;
2532 use remote::SshConnectionOptions;
2533 use serde_json::json;
2534 use std::{thread, time::Duration};
2535
2536 /// Creates a unique directory in a FakeFs, returning the path.
2537 /// Uses a UUID suffix to avoid collisions with other tests sharing the global DB.
2538 async fn unique_test_dir(fs: &fs::FakeFs, prefix: &str) -> PathBuf {
2539 let dir = PathBuf::from(format!("/test-dirs/{}-{}", prefix, uuid::Uuid::new_v4()));
2540 fs.insert_tree(&dir, json!({})).await;
2541 dir
2542 }
2543
2544 #[gpui::test]
2545 async fn test_multi_workspace_serializes_on_add_and_remove(cx: &mut gpui::TestAppContext) {
2546 crate::tests::init_test(cx);
2547
2548 let fs = fs::FakeFs::new(cx.executor());
2549 let project1 = Project::test(fs.clone(), [], cx).await;
2550 let project2 = Project::test(fs.clone(), [], cx).await;
2551
2552 let (multi_workspace, cx) =
2553 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
2554
2555 multi_workspace.update(cx, |mw, cx| {
2556 mw.open_sidebar(cx);
2557 });
2558
2559 multi_workspace.update_in(cx, |mw, _, cx| {
2560 mw.set_random_database_id(cx);
2561 });
2562
2563 let window_id =
2564 multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
2565
2566 // --- Add a second workspace ---
2567 let workspace2 = multi_workspace.update_in(cx, |mw, window, cx| {
2568 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
2569 workspace.update(cx, |ws, _cx| ws.set_random_database_id());
2570 mw.activate(workspace.clone(), window, cx);
2571 workspace
2572 });
2573
2574 // Run background tasks so serialize has a chance to flush.
2575 cx.run_until_parked();
2576
2577 // Read back the persisted state and check that the active workspace ID was written.
2578 let state_after_add = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
2579 let active_workspace2_db_id = workspace2.read_with(cx, |ws, _| ws.database_id());
2580 assert_eq!(
2581 state_after_add.active_workspace_id, active_workspace2_db_id,
2582 "After adding a second workspace, the serialized active_workspace_id should match \
2583 the newly activated workspace's database id"
2584 );
2585
2586 // --- Remove the non-active workspace ---
2587 multi_workspace.update_in(cx, |mw, _window, cx| {
2588 let active = mw.workspace().clone();
2589 let ws = mw
2590 .workspaces()
2591 .find(|ws| *ws != &active)
2592 .expect("should have a non-active workspace");
2593 mw.remove([ws.clone()], |_, _, _| unreachable!(), _window, cx)
2594 .detach_and_log_err(cx);
2595 });
2596
2597 cx.run_until_parked();
2598
2599 let state_after_remove = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
2600 let remaining_db_id =
2601 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
2602 assert_eq!(
2603 state_after_remove.active_workspace_id, remaining_db_id,
2604 "After removing a workspace, the serialized active_workspace_id should match \
2605 the remaining active workspace's database id"
2606 );
2607 }
2608
2609 #[gpui::test]
2610 async fn test_breakpoints() {
2611 zlog::init_test();
2612
2613 let db = WorkspaceDb::open_test_db("test_breakpoints").await;
2614 let id = db.next_id().await.unwrap();
2615
2616 let path = Path::new("/tmp/test.rs");
2617
2618 let breakpoint = Breakpoint {
2619 position: 123,
2620 message: None,
2621 state: BreakpointState::Enabled,
2622 condition: None,
2623 hit_condition: None,
2624 };
2625
2626 let log_breakpoint = Breakpoint {
2627 position: 456,
2628 message: Some("Test log message".into()),
2629 state: BreakpointState::Enabled,
2630 condition: None,
2631 hit_condition: None,
2632 };
2633
2634 let disable_breakpoint = Breakpoint {
2635 position: 578,
2636 message: None,
2637 state: BreakpointState::Disabled,
2638 condition: None,
2639 hit_condition: None,
2640 };
2641
2642 let condition_breakpoint = Breakpoint {
2643 position: 789,
2644 message: None,
2645 state: BreakpointState::Enabled,
2646 condition: Some("x > 5".into()),
2647 hit_condition: None,
2648 };
2649
2650 let hit_condition_breakpoint = Breakpoint {
2651 position: 999,
2652 message: None,
2653 state: BreakpointState::Enabled,
2654 condition: None,
2655 hit_condition: Some(">= 3".into()),
2656 };
2657
2658 let workspace = SerializedWorkspace {
2659 id,
2660 paths: PathList::new(&["/tmp"]),
2661 location: SerializedWorkspaceLocation::Local,
2662 center_group: Default::default(),
2663 window_bounds: Default::default(),
2664 display: Default::default(),
2665 docks: Default::default(),
2666 centered_layout: false,
2667 breakpoints: {
2668 let mut map = collections::BTreeMap::default();
2669 map.insert(
2670 Arc::from(path),
2671 vec![
2672 SourceBreakpoint {
2673 row: breakpoint.position,
2674 path: Arc::from(path),
2675 message: breakpoint.message.clone(),
2676 state: breakpoint.state,
2677 condition: breakpoint.condition.clone(),
2678 hit_condition: breakpoint.hit_condition.clone(),
2679 },
2680 SourceBreakpoint {
2681 row: log_breakpoint.position,
2682 path: Arc::from(path),
2683 message: log_breakpoint.message.clone(),
2684 state: log_breakpoint.state,
2685 condition: log_breakpoint.condition.clone(),
2686 hit_condition: log_breakpoint.hit_condition.clone(),
2687 },
2688 SourceBreakpoint {
2689 row: disable_breakpoint.position,
2690 path: Arc::from(path),
2691 message: disable_breakpoint.message.clone(),
2692 state: disable_breakpoint.state,
2693 condition: disable_breakpoint.condition.clone(),
2694 hit_condition: disable_breakpoint.hit_condition.clone(),
2695 },
2696 SourceBreakpoint {
2697 row: condition_breakpoint.position,
2698 path: Arc::from(path),
2699 message: condition_breakpoint.message.clone(),
2700 state: condition_breakpoint.state,
2701 condition: condition_breakpoint.condition.clone(),
2702 hit_condition: condition_breakpoint.hit_condition.clone(),
2703 },
2704 SourceBreakpoint {
2705 row: hit_condition_breakpoint.position,
2706 path: Arc::from(path),
2707 message: hit_condition_breakpoint.message.clone(),
2708 state: hit_condition_breakpoint.state,
2709 condition: hit_condition_breakpoint.condition.clone(),
2710 hit_condition: hit_condition_breakpoint.hit_condition.clone(),
2711 },
2712 ],
2713 );
2714 map
2715 },
2716 session_id: None,
2717 window_id: None,
2718 user_toolchains: Default::default(),
2719 };
2720
2721 db.save_workspace(workspace.clone()).await;
2722
2723 let loaded = db.workspace_for_roots(&["/tmp"]).unwrap();
2724 let loaded_breakpoints = loaded.breakpoints.get(&Arc::from(path)).unwrap();
2725
2726 assert_eq!(loaded_breakpoints.len(), 5);
2727
2728 // normal breakpoint
2729 assert_eq!(loaded_breakpoints[0].row, breakpoint.position);
2730 assert_eq!(loaded_breakpoints[0].message, breakpoint.message);
2731 assert_eq!(loaded_breakpoints[0].condition, breakpoint.condition);
2732 assert_eq!(
2733 loaded_breakpoints[0].hit_condition,
2734 breakpoint.hit_condition
2735 );
2736 assert_eq!(loaded_breakpoints[0].state, breakpoint.state);
2737 assert_eq!(loaded_breakpoints[0].path, Arc::from(path));
2738
2739 // enabled breakpoint
2740 assert_eq!(loaded_breakpoints[1].row, log_breakpoint.position);
2741 assert_eq!(loaded_breakpoints[1].message, log_breakpoint.message);
2742 assert_eq!(loaded_breakpoints[1].condition, log_breakpoint.condition);
2743 assert_eq!(
2744 loaded_breakpoints[1].hit_condition,
2745 log_breakpoint.hit_condition
2746 );
2747 assert_eq!(loaded_breakpoints[1].state, log_breakpoint.state);
2748 assert_eq!(loaded_breakpoints[1].path, Arc::from(path));
2749
2750 // disable breakpoint
2751 assert_eq!(loaded_breakpoints[2].row, disable_breakpoint.position);
2752 assert_eq!(loaded_breakpoints[2].message, disable_breakpoint.message);
2753 assert_eq!(
2754 loaded_breakpoints[2].condition,
2755 disable_breakpoint.condition
2756 );
2757 assert_eq!(
2758 loaded_breakpoints[2].hit_condition,
2759 disable_breakpoint.hit_condition
2760 );
2761 assert_eq!(loaded_breakpoints[2].state, disable_breakpoint.state);
2762 assert_eq!(loaded_breakpoints[2].path, Arc::from(path));
2763
2764 // condition breakpoint
2765 assert_eq!(loaded_breakpoints[3].row, condition_breakpoint.position);
2766 assert_eq!(loaded_breakpoints[3].message, condition_breakpoint.message);
2767 assert_eq!(
2768 loaded_breakpoints[3].condition,
2769 condition_breakpoint.condition
2770 );
2771 assert_eq!(
2772 loaded_breakpoints[3].hit_condition,
2773 condition_breakpoint.hit_condition
2774 );
2775 assert_eq!(loaded_breakpoints[3].state, condition_breakpoint.state);
2776 assert_eq!(loaded_breakpoints[3].path, Arc::from(path));
2777
2778 // hit condition breakpoint
2779 assert_eq!(loaded_breakpoints[4].row, hit_condition_breakpoint.position);
2780 assert_eq!(
2781 loaded_breakpoints[4].message,
2782 hit_condition_breakpoint.message
2783 );
2784 assert_eq!(
2785 loaded_breakpoints[4].condition,
2786 hit_condition_breakpoint.condition
2787 );
2788 assert_eq!(
2789 loaded_breakpoints[4].hit_condition,
2790 hit_condition_breakpoint.hit_condition
2791 );
2792 assert_eq!(loaded_breakpoints[4].state, hit_condition_breakpoint.state);
2793 assert_eq!(loaded_breakpoints[4].path, Arc::from(path));
2794 }
2795
2796 #[gpui::test]
2797 async fn test_remove_last_breakpoint() {
2798 zlog::init_test();
2799
2800 let db = WorkspaceDb::open_test_db("test_remove_last_breakpoint").await;
2801 let id = db.next_id().await.unwrap();
2802
2803 let singular_path = Path::new("/tmp/test_remove_last_breakpoint.rs");
2804
2805 let breakpoint_to_remove = Breakpoint {
2806 position: 100,
2807 message: None,
2808 state: BreakpointState::Enabled,
2809 condition: None,
2810 hit_condition: None,
2811 };
2812
2813 let workspace = SerializedWorkspace {
2814 id,
2815 paths: PathList::new(&["/tmp"]),
2816 location: SerializedWorkspaceLocation::Local,
2817 center_group: Default::default(),
2818 window_bounds: Default::default(),
2819 display: Default::default(),
2820 docks: Default::default(),
2821 centered_layout: false,
2822 breakpoints: {
2823 let mut map = collections::BTreeMap::default();
2824 map.insert(
2825 Arc::from(singular_path),
2826 vec![SourceBreakpoint {
2827 row: breakpoint_to_remove.position,
2828 path: Arc::from(singular_path),
2829 message: None,
2830 state: BreakpointState::Enabled,
2831 condition: None,
2832 hit_condition: None,
2833 }],
2834 );
2835 map
2836 },
2837 session_id: None,
2838 window_id: None,
2839 user_toolchains: Default::default(),
2840 };
2841
2842 db.save_workspace(workspace.clone()).await;
2843
2844 let loaded = db.workspace_for_roots(&["/tmp"]).unwrap();
2845 let loaded_breakpoints = loaded.breakpoints.get(&Arc::from(singular_path)).unwrap();
2846
2847 assert_eq!(loaded_breakpoints.len(), 1);
2848 assert_eq!(loaded_breakpoints[0].row, breakpoint_to_remove.position);
2849 assert_eq!(loaded_breakpoints[0].message, breakpoint_to_remove.message);
2850 assert_eq!(
2851 loaded_breakpoints[0].condition,
2852 breakpoint_to_remove.condition
2853 );
2854 assert_eq!(
2855 loaded_breakpoints[0].hit_condition,
2856 breakpoint_to_remove.hit_condition
2857 );
2858 assert_eq!(loaded_breakpoints[0].state, breakpoint_to_remove.state);
2859 assert_eq!(loaded_breakpoints[0].path, Arc::from(singular_path));
2860
2861 let workspace_without_breakpoint = SerializedWorkspace {
2862 id,
2863 paths: PathList::new(&["/tmp"]),
2864 location: SerializedWorkspaceLocation::Local,
2865 center_group: Default::default(),
2866 window_bounds: Default::default(),
2867 display: Default::default(),
2868 docks: Default::default(),
2869 centered_layout: false,
2870 breakpoints: collections::BTreeMap::default(),
2871 session_id: None,
2872 window_id: None,
2873 user_toolchains: Default::default(),
2874 };
2875
2876 db.save_workspace(workspace_without_breakpoint.clone())
2877 .await;
2878
2879 let loaded_after_remove = db.workspace_for_roots(&["/tmp"]).unwrap();
2880 let empty_breakpoints = loaded_after_remove
2881 .breakpoints
2882 .get(&Arc::from(singular_path));
2883
2884 assert!(empty_breakpoints.is_none());
2885 }
2886
2887 #[gpui::test]
2888 async fn test_next_id_stability() {
2889 zlog::init_test();
2890
2891 let db = WorkspaceDb::open_test_db("test_next_id_stability").await;
2892
2893 db.write(|conn| {
2894 conn.migrate(
2895 "test_table",
2896 &[sql!(
2897 CREATE TABLE test_table(
2898 text TEXT,
2899 workspace_id INTEGER,
2900 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
2901 ON DELETE CASCADE
2902 ) STRICT;
2903 )],
2904 &mut |_, _, _| false,
2905 )
2906 .unwrap();
2907 })
2908 .await;
2909
2910 let id = db.next_id().await.unwrap();
2911 // Assert the empty row got inserted
2912 assert_eq!(
2913 Some(id),
2914 db.select_row_bound::<WorkspaceId, WorkspaceId>(sql!(
2915 SELECT workspace_id FROM workspaces WHERE workspace_id = ?
2916 ))
2917 .unwrap()(id)
2918 .unwrap()
2919 );
2920
2921 db.write(move |conn| {
2922 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
2923 .unwrap()(("test-text-1", id))
2924 .unwrap()
2925 })
2926 .await;
2927
2928 let test_text_1 = db
2929 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
2930 .unwrap()(1)
2931 .unwrap()
2932 .unwrap();
2933 assert_eq!(test_text_1, "test-text-1");
2934 }
2935
2936 #[gpui::test]
2937 async fn test_workspace_id_stability() {
2938 zlog::init_test();
2939
2940 let db = WorkspaceDb::open_test_db("test_workspace_id_stability").await;
2941
2942 db.write(|conn| {
2943 conn.migrate(
2944 "test_table",
2945 &[sql!(
2946 CREATE TABLE test_table(
2947 text TEXT,
2948 workspace_id INTEGER,
2949 FOREIGN KEY(workspace_id)
2950 REFERENCES workspaces(workspace_id)
2951 ON DELETE CASCADE
2952 ) STRICT;)],
2953 &mut |_, _, _| false,
2954 )
2955 })
2956 .await
2957 .unwrap();
2958
2959 let mut workspace_1 = SerializedWorkspace {
2960 id: WorkspaceId(1),
2961 paths: PathList::new(&["/tmp", "/tmp2"]),
2962 location: SerializedWorkspaceLocation::Local,
2963 center_group: Default::default(),
2964 window_bounds: Default::default(),
2965 display: Default::default(),
2966 docks: Default::default(),
2967 centered_layout: false,
2968 breakpoints: Default::default(),
2969 session_id: None,
2970 window_id: None,
2971 user_toolchains: Default::default(),
2972 };
2973
2974 let workspace_2 = SerializedWorkspace {
2975 id: WorkspaceId(2),
2976 paths: PathList::new(&["/tmp"]),
2977 location: SerializedWorkspaceLocation::Local,
2978 center_group: Default::default(),
2979 window_bounds: Default::default(),
2980 display: Default::default(),
2981 docks: Default::default(),
2982 centered_layout: false,
2983 breakpoints: Default::default(),
2984 session_id: None,
2985 window_id: None,
2986 user_toolchains: Default::default(),
2987 };
2988
2989 db.save_workspace(workspace_1.clone()).await;
2990
2991 db.write(|conn| {
2992 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
2993 .unwrap()(("test-text-1", 1))
2994 .unwrap();
2995 })
2996 .await;
2997
2998 db.save_workspace(workspace_2.clone()).await;
2999
3000 db.write(|conn| {
3001 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
3002 .unwrap()(("test-text-2", 2))
3003 .unwrap();
3004 })
3005 .await;
3006
3007 workspace_1.paths = PathList::new(&["/tmp", "/tmp3"]);
3008 db.save_workspace(workspace_1.clone()).await;
3009 db.save_workspace(workspace_1).await;
3010 db.save_workspace(workspace_2).await;
3011
3012 let test_text_2 = db
3013 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
3014 .unwrap()(2)
3015 .unwrap()
3016 .unwrap();
3017 assert_eq!(test_text_2, "test-text-2");
3018
3019 let test_text_1 = db
3020 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
3021 .unwrap()(1)
3022 .unwrap()
3023 .unwrap();
3024 assert_eq!(test_text_1, "test-text-1");
3025 }
3026
3027 fn group(axis: Axis, children: Vec<SerializedPaneGroup>) -> SerializedPaneGroup {
3028 SerializedPaneGroup::Group {
3029 axis: SerializedAxis(axis),
3030 flexes: None,
3031 children,
3032 }
3033 }
3034
3035 #[gpui::test]
3036 async fn test_full_workspace_serialization() {
3037 zlog::init_test();
3038
3039 let db = WorkspaceDb::open_test_db("test_full_workspace_serialization").await;
3040
3041 // -----------------
3042 // | 1,2 | 5,6 |
3043 // | - - - | |
3044 // | 3,4 | |
3045 // -----------------
3046 let center_group = group(
3047 Axis::Horizontal,
3048 vec![
3049 group(
3050 Axis::Vertical,
3051 vec![
3052 SerializedPaneGroup::Pane(SerializedPane::new(
3053 vec![
3054 SerializedItem::new("Terminal", 5, false, false),
3055 SerializedItem::new("Terminal", 6, true, false),
3056 ],
3057 false,
3058 0,
3059 )),
3060 SerializedPaneGroup::Pane(SerializedPane::new(
3061 vec![
3062 SerializedItem::new("Terminal", 7, true, false),
3063 SerializedItem::new("Terminal", 8, false, false),
3064 ],
3065 false,
3066 0,
3067 )),
3068 ],
3069 ),
3070 SerializedPaneGroup::Pane(SerializedPane::new(
3071 vec![
3072 SerializedItem::new("Terminal", 9, false, false),
3073 SerializedItem::new("Terminal", 10, true, false),
3074 ],
3075 false,
3076 0,
3077 )),
3078 ],
3079 );
3080
3081 let workspace = SerializedWorkspace {
3082 id: WorkspaceId(5),
3083 paths: PathList::new(&["/tmp", "/tmp2"]),
3084 location: SerializedWorkspaceLocation::Local,
3085 center_group,
3086 window_bounds: Default::default(),
3087 breakpoints: Default::default(),
3088 display: Default::default(),
3089 docks: Default::default(),
3090 centered_layout: false,
3091 session_id: None,
3092 window_id: Some(999),
3093 user_toolchains: Default::default(),
3094 };
3095
3096 db.save_workspace(workspace.clone()).await;
3097
3098 let round_trip_workspace = db.workspace_for_roots(&["/tmp2", "/tmp"]);
3099 assert_eq!(workspace, round_trip_workspace.unwrap());
3100
3101 // Test guaranteed duplicate IDs
3102 db.save_workspace(workspace.clone()).await;
3103 db.save_workspace(workspace.clone()).await;
3104
3105 let round_trip_workspace = db.workspace_for_roots(&["/tmp", "/tmp2"]);
3106 assert_eq!(workspace, round_trip_workspace.unwrap());
3107 }
3108
3109 #[gpui::test]
3110 async fn test_workspace_assignment() {
3111 zlog::init_test();
3112
3113 let db = WorkspaceDb::open_test_db("test_basic_functionality").await;
3114
3115 let workspace_1 = SerializedWorkspace {
3116 id: WorkspaceId(1),
3117 paths: PathList::new(&["/tmp", "/tmp2"]),
3118 location: SerializedWorkspaceLocation::Local,
3119 center_group: Default::default(),
3120 window_bounds: Default::default(),
3121 breakpoints: Default::default(),
3122 display: Default::default(),
3123 docks: Default::default(),
3124 centered_layout: false,
3125 session_id: None,
3126 window_id: Some(1),
3127 user_toolchains: Default::default(),
3128 };
3129
3130 let mut workspace_2 = SerializedWorkspace {
3131 id: WorkspaceId(2),
3132 paths: PathList::new(&["/tmp"]),
3133 location: SerializedWorkspaceLocation::Local,
3134 center_group: Default::default(),
3135 window_bounds: Default::default(),
3136 display: Default::default(),
3137 docks: Default::default(),
3138 centered_layout: false,
3139 breakpoints: Default::default(),
3140 session_id: None,
3141 window_id: Some(2),
3142 user_toolchains: Default::default(),
3143 };
3144
3145 db.save_workspace(workspace_1.clone()).await;
3146 db.save_workspace(workspace_2.clone()).await;
3147
3148 // Test that paths are treated as a set
3149 assert_eq!(
3150 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3151 workspace_1
3152 );
3153 assert_eq!(
3154 db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(),
3155 workspace_1
3156 );
3157
3158 // Make sure that other keys work
3159 assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2);
3160 assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None);
3161
3162 // Test 'mutate' case of updating a pre-existing id
3163 workspace_2.paths = PathList::new(&["/tmp", "/tmp2"]);
3164
3165 db.save_workspace(workspace_2.clone()).await;
3166 assert_eq!(
3167 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3168 workspace_2
3169 );
3170
3171 // Test other mechanism for mutating
3172 let mut workspace_3 = SerializedWorkspace {
3173 id: WorkspaceId(3),
3174 paths: PathList::new(&["/tmp2", "/tmp"]),
3175 location: SerializedWorkspaceLocation::Local,
3176 center_group: Default::default(),
3177 window_bounds: Default::default(),
3178 breakpoints: Default::default(),
3179 display: Default::default(),
3180 docks: Default::default(),
3181 centered_layout: false,
3182 session_id: None,
3183 window_id: Some(3),
3184 user_toolchains: Default::default(),
3185 };
3186
3187 db.save_workspace(workspace_3.clone()).await;
3188 assert_eq!(
3189 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3190 workspace_3
3191 );
3192
3193 // Make sure that updating paths differently also works
3194 workspace_3.paths = PathList::new(&["/tmp3", "/tmp4", "/tmp2"]);
3195 db.save_workspace(workspace_3.clone()).await;
3196 assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None);
3197 assert_eq!(
3198 db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"])
3199 .unwrap(),
3200 workspace_3
3201 );
3202 }
3203
3204 #[gpui::test]
3205 async fn test_session_workspaces() {
3206 zlog::init_test();
3207
3208 let db = WorkspaceDb::open_test_db("test_serializing_workspaces_session_id").await;
3209
3210 let workspace_1 = SerializedWorkspace {
3211 id: WorkspaceId(1),
3212 paths: PathList::new(&["/tmp1"]),
3213 location: SerializedWorkspaceLocation::Local,
3214 center_group: Default::default(),
3215 window_bounds: Default::default(),
3216 display: Default::default(),
3217 docks: Default::default(),
3218 centered_layout: false,
3219 breakpoints: Default::default(),
3220 session_id: Some("session-id-1".to_owned()),
3221 window_id: Some(10),
3222 user_toolchains: Default::default(),
3223 };
3224
3225 let workspace_2 = SerializedWorkspace {
3226 id: WorkspaceId(2),
3227 paths: PathList::new(&["/tmp2"]),
3228 location: SerializedWorkspaceLocation::Local,
3229 center_group: Default::default(),
3230 window_bounds: Default::default(),
3231 display: Default::default(),
3232 docks: Default::default(),
3233 centered_layout: false,
3234 breakpoints: Default::default(),
3235 session_id: Some("session-id-1".to_owned()),
3236 window_id: Some(20),
3237 user_toolchains: Default::default(),
3238 };
3239
3240 let workspace_3 = SerializedWorkspace {
3241 id: WorkspaceId(3),
3242 paths: PathList::new(&["/tmp3"]),
3243 location: SerializedWorkspaceLocation::Local,
3244 center_group: Default::default(),
3245 window_bounds: Default::default(),
3246 display: Default::default(),
3247 docks: Default::default(),
3248 centered_layout: false,
3249 breakpoints: Default::default(),
3250 session_id: Some("session-id-2".to_owned()),
3251 window_id: Some(30),
3252 user_toolchains: Default::default(),
3253 };
3254
3255 let workspace_4 = SerializedWorkspace {
3256 id: WorkspaceId(4),
3257 paths: PathList::new(&["/tmp4"]),
3258 location: SerializedWorkspaceLocation::Local,
3259 center_group: Default::default(),
3260 window_bounds: Default::default(),
3261 display: Default::default(),
3262 docks: Default::default(),
3263 centered_layout: false,
3264 breakpoints: Default::default(),
3265 session_id: None,
3266 window_id: None,
3267 user_toolchains: Default::default(),
3268 };
3269
3270 let connection_id = db
3271 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3272 host: "my-host".into(),
3273 port: Some(1234),
3274 ..Default::default()
3275 }))
3276 .await
3277 .unwrap();
3278
3279 let workspace_5 = SerializedWorkspace {
3280 id: WorkspaceId(5),
3281 paths: PathList::default(),
3282 location: SerializedWorkspaceLocation::Remote(
3283 db.remote_connection(connection_id).unwrap(),
3284 ),
3285 center_group: Default::default(),
3286 window_bounds: Default::default(),
3287 display: Default::default(),
3288 docks: Default::default(),
3289 centered_layout: false,
3290 breakpoints: Default::default(),
3291 session_id: Some("session-id-2".to_owned()),
3292 window_id: Some(50),
3293 user_toolchains: Default::default(),
3294 };
3295
3296 let workspace_6 = SerializedWorkspace {
3297 id: WorkspaceId(6),
3298 paths: PathList::new(&["/tmp6c", "/tmp6b", "/tmp6a"]),
3299 location: SerializedWorkspaceLocation::Local,
3300 center_group: Default::default(),
3301 window_bounds: Default::default(),
3302 breakpoints: Default::default(),
3303 display: Default::default(),
3304 docks: Default::default(),
3305 centered_layout: false,
3306 session_id: Some("session-id-3".to_owned()),
3307 window_id: Some(60),
3308 user_toolchains: Default::default(),
3309 };
3310
3311 db.save_workspace(workspace_1.clone()).await;
3312 thread::sleep(Duration::from_millis(1000)); // Force timestamps to increment
3313 db.save_workspace(workspace_2.clone()).await;
3314 db.save_workspace(workspace_3.clone()).await;
3315 thread::sleep(Duration::from_millis(1000)); // Force timestamps to increment
3316 db.save_workspace(workspace_4.clone()).await;
3317 db.save_workspace(workspace_5.clone()).await;
3318 db.save_workspace(workspace_6.clone()).await;
3319
3320 let locations = db.session_workspaces("session-id-1".to_owned()).unwrap();
3321 assert_eq!(locations.len(), 2);
3322 assert_eq!(locations[0].0, WorkspaceId(2));
3323 assert_eq!(locations[0].1, PathList::new(&["/tmp2"]));
3324 assert_eq!(locations[0].2, Some(20));
3325 assert_eq!(locations[1].0, WorkspaceId(1));
3326 assert_eq!(locations[1].1, PathList::new(&["/tmp1"]));
3327 assert_eq!(locations[1].2, Some(10));
3328
3329 let locations = db.session_workspaces("session-id-2".to_owned()).unwrap();
3330 assert_eq!(locations.len(), 2);
3331 assert_eq!(locations[0].0, WorkspaceId(5));
3332 assert_eq!(locations[0].1, PathList::default());
3333 assert_eq!(locations[0].2, Some(50));
3334 assert_eq!(locations[0].3, Some(connection_id));
3335 assert_eq!(locations[1].0, WorkspaceId(3));
3336 assert_eq!(locations[1].1, PathList::new(&["/tmp3"]));
3337 assert_eq!(locations[1].2, Some(30));
3338
3339 let locations = db.session_workspaces("session-id-3".to_owned()).unwrap();
3340 assert_eq!(locations.len(), 1);
3341 assert_eq!(locations[0].0, WorkspaceId(6));
3342 assert_eq!(
3343 locations[0].1,
3344 PathList::new(&["/tmp6c", "/tmp6b", "/tmp6a"]),
3345 );
3346 assert_eq!(locations[0].2, Some(60));
3347 }
3348
3349 fn default_workspace<P: AsRef<Path>>(
3350 paths: &[P],
3351 center_group: &SerializedPaneGroup,
3352 ) -> SerializedWorkspace {
3353 SerializedWorkspace {
3354 id: WorkspaceId(4),
3355 paths: PathList::new(paths),
3356 location: SerializedWorkspaceLocation::Local,
3357 center_group: center_group.clone(),
3358 window_bounds: Default::default(),
3359 display: Default::default(),
3360 docks: Default::default(),
3361 breakpoints: Default::default(),
3362 centered_layout: false,
3363 session_id: None,
3364 window_id: None,
3365 user_toolchains: Default::default(),
3366 }
3367 }
3368
3369 #[gpui::test]
3370 async fn test_last_session_workspace_locations(cx: &mut gpui::TestAppContext) {
3371 let dir1 = tempfile::TempDir::with_prefix("dir1").unwrap();
3372 let dir2 = tempfile::TempDir::with_prefix("dir2").unwrap();
3373 let dir3 = tempfile::TempDir::with_prefix("dir3").unwrap();
3374 let dir4 = tempfile::TempDir::with_prefix("dir4").unwrap();
3375
3376 let fs = fs::FakeFs::new(cx.executor());
3377 fs.insert_tree(dir1.path(), json!({})).await;
3378 fs.insert_tree(dir2.path(), json!({})).await;
3379 fs.insert_tree(dir3.path(), json!({})).await;
3380 fs.insert_tree(dir4.path(), json!({})).await;
3381
3382 let db =
3383 WorkspaceDb::open_test_db("test_serializing_workspaces_last_session_workspaces").await;
3384
3385 let workspaces = [
3386 (1, vec![dir1.path()], 9),
3387 (2, vec![dir2.path()], 5),
3388 (3, vec![dir3.path()], 8),
3389 (4, vec![dir4.path()], 2),
3390 (5, vec![dir1.path(), dir2.path(), dir3.path()], 3),
3391 (6, vec![dir4.path(), dir3.path(), dir2.path()], 4),
3392 ]
3393 .into_iter()
3394 .map(|(id, paths, window_id)| SerializedWorkspace {
3395 id: WorkspaceId(id),
3396 paths: PathList::new(paths.as_slice()),
3397 location: SerializedWorkspaceLocation::Local,
3398 center_group: Default::default(),
3399 window_bounds: Default::default(),
3400 display: Default::default(),
3401 docks: Default::default(),
3402 centered_layout: false,
3403 session_id: Some("one-session".to_owned()),
3404 breakpoints: Default::default(),
3405 window_id: Some(window_id),
3406 user_toolchains: Default::default(),
3407 })
3408 .collect::<Vec<_>>();
3409
3410 for workspace in workspaces.iter() {
3411 db.save_workspace(workspace.clone()).await;
3412 }
3413
3414 let stack = Some(Vec::from([
3415 WindowId::from(2), // Top
3416 WindowId::from(8),
3417 WindowId::from(5),
3418 WindowId::from(9),
3419 WindowId::from(3),
3420 WindowId::from(4), // Bottom
3421 ]));
3422
3423 let locations = db
3424 .last_session_workspace_locations("one-session", stack, fs.as_ref())
3425 .await
3426 .unwrap();
3427 assert_eq!(
3428 locations,
3429 [
3430 SessionWorkspace {
3431 workspace_id: WorkspaceId(4),
3432 location: SerializedWorkspaceLocation::Local,
3433 paths: PathList::new(&[dir4.path()]),
3434 window_id: Some(WindowId::from(2u64)),
3435 },
3436 SessionWorkspace {
3437 workspace_id: WorkspaceId(3),
3438 location: SerializedWorkspaceLocation::Local,
3439 paths: PathList::new(&[dir3.path()]),
3440 window_id: Some(WindowId::from(8u64)),
3441 },
3442 SessionWorkspace {
3443 workspace_id: WorkspaceId(2),
3444 location: SerializedWorkspaceLocation::Local,
3445 paths: PathList::new(&[dir2.path()]),
3446 window_id: Some(WindowId::from(5u64)),
3447 },
3448 SessionWorkspace {
3449 workspace_id: WorkspaceId(1),
3450 location: SerializedWorkspaceLocation::Local,
3451 paths: PathList::new(&[dir1.path()]),
3452 window_id: Some(WindowId::from(9u64)),
3453 },
3454 SessionWorkspace {
3455 workspace_id: WorkspaceId(5),
3456 location: SerializedWorkspaceLocation::Local,
3457 paths: PathList::new(&[dir1.path(), dir2.path(), dir3.path()]),
3458 window_id: Some(WindowId::from(3u64)),
3459 },
3460 SessionWorkspace {
3461 workspace_id: WorkspaceId(6),
3462 location: SerializedWorkspaceLocation::Local,
3463 paths: PathList::new(&[dir4.path(), dir3.path(), dir2.path()]),
3464 window_id: Some(WindowId::from(4u64)),
3465 },
3466 ]
3467 );
3468 }
3469
3470 #[gpui::test]
3471 async fn test_last_session_workspace_locations_remote(cx: &mut gpui::TestAppContext) {
3472 let fs = fs::FakeFs::new(cx.executor());
3473 let db =
3474 WorkspaceDb::open_test_db("test_serializing_workspaces_last_session_workspaces_remote")
3475 .await;
3476
3477 let remote_connections = [
3478 ("host-1", "my-user-1"),
3479 ("host-2", "my-user-2"),
3480 ("host-3", "my-user-3"),
3481 ("host-4", "my-user-4"),
3482 ]
3483 .into_iter()
3484 .map(|(host, user)| async {
3485 let options = RemoteConnectionOptions::Ssh(SshConnectionOptions {
3486 host: host.into(),
3487 username: Some(user.to_string()),
3488 ..Default::default()
3489 });
3490 db.get_or_create_remote_connection(options.clone())
3491 .await
3492 .unwrap();
3493 options
3494 })
3495 .collect::<Vec<_>>();
3496
3497 let remote_connections = futures::future::join_all(remote_connections).await;
3498
3499 let workspaces = [
3500 (1, remote_connections[0].clone(), 9),
3501 (2, remote_connections[1].clone(), 5),
3502 (3, remote_connections[2].clone(), 8),
3503 (4, remote_connections[3].clone(), 2),
3504 ]
3505 .into_iter()
3506 .map(|(id, remote_connection, window_id)| SerializedWorkspace {
3507 id: WorkspaceId(id),
3508 paths: PathList::default(),
3509 location: SerializedWorkspaceLocation::Remote(remote_connection),
3510 center_group: Default::default(),
3511 window_bounds: Default::default(),
3512 display: Default::default(),
3513 docks: Default::default(),
3514 centered_layout: false,
3515 session_id: Some("one-session".to_owned()),
3516 breakpoints: Default::default(),
3517 window_id: Some(window_id),
3518 user_toolchains: Default::default(),
3519 })
3520 .collect::<Vec<_>>();
3521
3522 for workspace in workspaces.iter() {
3523 db.save_workspace(workspace.clone()).await;
3524 }
3525
3526 let stack = Some(Vec::from([
3527 WindowId::from(2), // Top
3528 WindowId::from(8),
3529 WindowId::from(5),
3530 WindowId::from(9), // Bottom
3531 ]));
3532
3533 let have = db
3534 .last_session_workspace_locations("one-session", stack, fs.as_ref())
3535 .await
3536 .unwrap();
3537 assert_eq!(have.len(), 4);
3538 assert_eq!(
3539 have[0],
3540 SessionWorkspace {
3541 workspace_id: WorkspaceId(4),
3542 location: SerializedWorkspaceLocation::Remote(remote_connections[3].clone()),
3543 paths: PathList::default(),
3544 window_id: Some(WindowId::from(2u64)),
3545 }
3546 );
3547 assert_eq!(
3548 have[1],
3549 SessionWorkspace {
3550 workspace_id: WorkspaceId(3),
3551 location: SerializedWorkspaceLocation::Remote(remote_connections[2].clone()),
3552 paths: PathList::default(),
3553 window_id: Some(WindowId::from(8u64)),
3554 }
3555 );
3556 assert_eq!(
3557 have[2],
3558 SessionWorkspace {
3559 workspace_id: WorkspaceId(2),
3560 location: SerializedWorkspaceLocation::Remote(remote_connections[1].clone()),
3561 paths: PathList::default(),
3562 window_id: Some(WindowId::from(5u64)),
3563 }
3564 );
3565 assert_eq!(
3566 have[3],
3567 SessionWorkspace {
3568 workspace_id: WorkspaceId(1),
3569 location: SerializedWorkspaceLocation::Remote(remote_connections[0].clone()),
3570 paths: PathList::default(),
3571 window_id: Some(WindowId::from(9u64)),
3572 }
3573 );
3574 }
3575
3576 #[gpui::test]
3577 async fn test_get_or_create_ssh_project() {
3578 let db = WorkspaceDb::open_test_db("test_get_or_create_ssh_project").await;
3579
3580 let host = "example.com".to_string();
3581 let port = Some(22_u16);
3582 let user = Some("user".to_string());
3583
3584 let connection_id = db
3585 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3586 host: host.clone().into(),
3587 port,
3588 username: user.clone(),
3589 ..Default::default()
3590 }))
3591 .await
3592 .unwrap();
3593
3594 // Test that calling the function again with the same parameters returns the same project
3595 let same_connection = db
3596 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3597 host: host.clone().into(),
3598 port,
3599 username: user.clone(),
3600 ..Default::default()
3601 }))
3602 .await
3603 .unwrap();
3604
3605 assert_eq!(connection_id, same_connection);
3606
3607 // Test with different parameters
3608 let host2 = "otherexample.com".to_string();
3609 let port2 = None;
3610 let user2 = Some("otheruser".to_string());
3611
3612 let different_connection = db
3613 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3614 host: host2.clone().into(),
3615 port: port2,
3616 username: user2.clone(),
3617 ..Default::default()
3618 }))
3619 .await
3620 .unwrap();
3621
3622 assert_ne!(connection_id, different_connection);
3623 }
3624
3625 #[gpui::test]
3626 async fn test_get_or_create_ssh_project_with_null_user() {
3627 let db = WorkspaceDb::open_test_db("test_get_or_create_ssh_project_with_null_user").await;
3628
3629 let (host, port, user) = ("example.com".to_string(), None, None);
3630
3631 let connection_id = db
3632 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3633 host: host.clone().into(),
3634 port,
3635 username: None,
3636 ..Default::default()
3637 }))
3638 .await
3639 .unwrap();
3640
3641 let same_connection_id = db
3642 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3643 host: host.clone().into(),
3644 port,
3645 username: user.clone(),
3646 ..Default::default()
3647 }))
3648 .await
3649 .unwrap();
3650
3651 assert_eq!(connection_id, same_connection_id);
3652 }
3653
3654 #[gpui::test]
3655 async fn test_get_remote_connections() {
3656 let db = WorkspaceDb::open_test_db("test_get_remote_connections").await;
3657
3658 let connections = [
3659 ("example.com".to_string(), None, None),
3660 (
3661 "anotherexample.com".to_string(),
3662 Some(123_u16),
3663 Some("user2".to_string()),
3664 ),
3665 ("yetanother.com".to_string(), Some(345_u16), None),
3666 ];
3667
3668 let mut ids = Vec::new();
3669 for (host, port, user) in connections.iter() {
3670 ids.push(
3671 db.get_or_create_remote_connection(RemoteConnectionOptions::Ssh(
3672 SshConnectionOptions {
3673 host: host.clone().into(),
3674 port: *port,
3675 username: user.clone(),
3676 ..Default::default()
3677 },
3678 ))
3679 .await
3680 .unwrap(),
3681 );
3682 }
3683
3684 let stored_connections = db.remote_connections().unwrap();
3685 assert_eq!(
3686 stored_connections,
3687 [
3688 (
3689 ids[0],
3690 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3691 host: "example.com".into(),
3692 port: None,
3693 username: None,
3694 ..Default::default()
3695 }),
3696 ),
3697 (
3698 ids[1],
3699 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3700 host: "anotherexample.com".into(),
3701 port: Some(123),
3702 username: Some("user2".into()),
3703 ..Default::default()
3704 }),
3705 ),
3706 (
3707 ids[2],
3708 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3709 host: "yetanother.com".into(),
3710 port: Some(345),
3711 username: None,
3712 ..Default::default()
3713 }),
3714 ),
3715 ]
3716 .into_iter()
3717 .collect::<HashMap<_, _>>(),
3718 );
3719 }
3720
3721 #[gpui::test]
3722 async fn test_simple_split() {
3723 zlog::init_test();
3724
3725 let db = WorkspaceDb::open_test_db("simple_split").await;
3726
3727 // -----------------
3728 // | 1,2 | 5,6 |
3729 // | - - - | |
3730 // | 3,4 | |
3731 // -----------------
3732 let center_pane = group(
3733 Axis::Horizontal,
3734 vec![
3735 group(
3736 Axis::Vertical,
3737 vec![
3738 SerializedPaneGroup::Pane(SerializedPane::new(
3739 vec![
3740 SerializedItem::new("Terminal", 1, false, false),
3741 SerializedItem::new("Terminal", 2, true, false),
3742 ],
3743 false,
3744 0,
3745 )),
3746 SerializedPaneGroup::Pane(SerializedPane::new(
3747 vec![
3748 SerializedItem::new("Terminal", 4, false, false),
3749 SerializedItem::new("Terminal", 3, true, false),
3750 ],
3751 true,
3752 0,
3753 )),
3754 ],
3755 ),
3756 SerializedPaneGroup::Pane(SerializedPane::new(
3757 vec![
3758 SerializedItem::new("Terminal", 5, true, false),
3759 SerializedItem::new("Terminal", 6, false, false),
3760 ],
3761 false,
3762 0,
3763 )),
3764 ],
3765 );
3766
3767 let workspace = default_workspace(&["/tmp"], ¢er_pane);
3768
3769 db.save_workspace(workspace.clone()).await;
3770
3771 let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap();
3772
3773 assert_eq!(workspace.center_group, new_workspace.center_group);
3774 }
3775
3776 #[gpui::test]
3777 async fn test_cleanup_panes() {
3778 zlog::init_test();
3779
3780 let db = WorkspaceDb::open_test_db("test_cleanup_panes").await;
3781
3782 let center_pane = group(
3783 Axis::Horizontal,
3784 vec![
3785 group(
3786 Axis::Vertical,
3787 vec![
3788 SerializedPaneGroup::Pane(SerializedPane::new(
3789 vec![
3790 SerializedItem::new("Terminal", 1, false, false),
3791 SerializedItem::new("Terminal", 2, true, false),
3792 ],
3793 false,
3794 0,
3795 )),
3796 SerializedPaneGroup::Pane(SerializedPane::new(
3797 vec![
3798 SerializedItem::new("Terminal", 4, false, false),
3799 SerializedItem::new("Terminal", 3, true, false),
3800 ],
3801 true,
3802 0,
3803 )),
3804 ],
3805 ),
3806 SerializedPaneGroup::Pane(SerializedPane::new(
3807 vec![
3808 SerializedItem::new("Terminal", 5, false, false),
3809 SerializedItem::new("Terminal", 6, true, false),
3810 ],
3811 false,
3812 0,
3813 )),
3814 ],
3815 );
3816
3817 let id = &["/tmp"];
3818
3819 let mut workspace = default_workspace(id, ¢er_pane);
3820
3821 db.save_workspace(workspace.clone()).await;
3822
3823 workspace.center_group = group(
3824 Axis::Vertical,
3825 vec![
3826 SerializedPaneGroup::Pane(SerializedPane::new(
3827 vec![
3828 SerializedItem::new("Terminal", 1, false, false),
3829 SerializedItem::new("Terminal", 2, true, false),
3830 ],
3831 false,
3832 0,
3833 )),
3834 SerializedPaneGroup::Pane(SerializedPane::new(
3835 vec![
3836 SerializedItem::new("Terminal", 4, true, false),
3837 SerializedItem::new("Terminal", 3, false, false),
3838 ],
3839 true,
3840 0,
3841 )),
3842 ],
3843 );
3844
3845 db.save_workspace(workspace.clone()).await;
3846
3847 let new_workspace = db.workspace_for_roots(id).unwrap();
3848
3849 assert_eq!(workspace.center_group, new_workspace.center_group);
3850 }
3851
3852 #[gpui::test]
3853 async fn test_empty_workspace_window_bounds() {
3854 zlog::init_test();
3855
3856 let db = WorkspaceDb::open_test_db("test_empty_workspace_window_bounds").await;
3857 let id = db.next_id().await.unwrap();
3858
3859 // Create a workspace with empty paths (empty workspace)
3860 let empty_paths: &[&str] = &[];
3861 let display_uuid = Uuid::new_v4();
3862 let window_bounds = SerializedWindowBounds(WindowBounds::Windowed(Bounds {
3863 origin: point(px(100.0), px(200.0)),
3864 size: size(px(800.0), px(600.0)),
3865 }));
3866
3867 let workspace = SerializedWorkspace {
3868 id,
3869 paths: PathList::new(empty_paths),
3870 location: SerializedWorkspaceLocation::Local,
3871 center_group: Default::default(),
3872 window_bounds: None,
3873 display: None,
3874 docks: Default::default(),
3875 breakpoints: Default::default(),
3876 centered_layout: false,
3877 session_id: None,
3878 window_id: None,
3879 user_toolchains: Default::default(),
3880 };
3881
3882 // Save the workspace (this creates the record with empty paths)
3883 db.save_workspace(workspace.clone()).await;
3884
3885 // Save window bounds separately (as the actual code does via set_window_open_status)
3886 db.set_window_open_status(id, window_bounds, display_uuid)
3887 .await
3888 .unwrap();
3889
3890 // Empty workspaces cannot be retrieved by paths (they'd all match).
3891 // They must be retrieved by workspace_id.
3892 assert!(db.workspace_for_roots(empty_paths).is_none());
3893
3894 // Retrieve using workspace_for_id instead
3895 let retrieved = db.workspace_for_id(id).unwrap();
3896
3897 // Verify window bounds were persisted
3898 assert_eq!(retrieved.id, id);
3899 assert!(retrieved.window_bounds.is_some());
3900 assert_eq!(retrieved.window_bounds.unwrap().0, window_bounds.0);
3901 assert!(retrieved.display.is_some());
3902 assert_eq!(retrieved.display.unwrap(), display_uuid);
3903 }
3904
3905 #[gpui::test]
3906 async fn test_last_session_workspace_locations_groups_by_window_id(
3907 cx: &mut gpui::TestAppContext,
3908 ) {
3909 let dir1 = tempfile::TempDir::with_prefix("dir1").unwrap();
3910 let dir2 = tempfile::TempDir::with_prefix("dir2").unwrap();
3911 let dir3 = tempfile::TempDir::with_prefix("dir3").unwrap();
3912 let dir4 = tempfile::TempDir::with_prefix("dir4").unwrap();
3913 let dir5 = tempfile::TempDir::with_prefix("dir5").unwrap();
3914
3915 let fs = fs::FakeFs::new(cx.executor());
3916 fs.insert_tree(dir1.path(), json!({})).await;
3917 fs.insert_tree(dir2.path(), json!({})).await;
3918 fs.insert_tree(dir3.path(), json!({})).await;
3919 fs.insert_tree(dir4.path(), json!({})).await;
3920 fs.insert_tree(dir5.path(), json!({})).await;
3921
3922 let db =
3923 WorkspaceDb::open_test_db("test_last_session_workspace_locations_groups_by_window_id")
3924 .await;
3925
3926 // Simulate two MultiWorkspace windows each containing two workspaces,
3927 // plus one single-workspace window:
3928 // Window 10: workspace 1, workspace 2
3929 // Window 20: workspace 3, workspace 4
3930 // Window 30: workspace 5 (only one)
3931 //
3932 // On session restore, the caller should be able to group these by
3933 // window_id to reconstruct the MultiWorkspace windows.
3934 let workspaces_data: Vec<(i64, &Path, u64)> = vec![
3935 (1, dir1.path(), 10),
3936 (2, dir2.path(), 10),
3937 (3, dir3.path(), 20),
3938 (4, dir4.path(), 20),
3939 (5, dir5.path(), 30),
3940 ];
3941
3942 for (id, dir, window_id) in &workspaces_data {
3943 db.save_workspace(SerializedWorkspace {
3944 id: WorkspaceId(*id),
3945 paths: PathList::new(&[*dir]),
3946 location: SerializedWorkspaceLocation::Local,
3947 center_group: Default::default(),
3948 window_bounds: Default::default(),
3949 display: Default::default(),
3950 docks: Default::default(),
3951 centered_layout: false,
3952 session_id: Some("test-session".to_owned()),
3953 breakpoints: Default::default(),
3954 window_id: Some(*window_id),
3955 user_toolchains: Default::default(),
3956 })
3957 .await;
3958 }
3959
3960 let locations = db
3961 .last_session_workspace_locations("test-session", None, fs.as_ref())
3962 .await
3963 .unwrap();
3964
3965 // All 5 workspaces should be returned with their window_ids.
3966 assert_eq!(locations.len(), 5);
3967
3968 // Every entry should have a window_id so the caller can group them.
3969 for session_workspace in &locations {
3970 assert!(
3971 session_workspace.window_id.is_some(),
3972 "workspace {:?} missing window_id",
3973 session_workspace.workspace_id
3974 );
3975 }
3976
3977 // Group by window_id, simulating what the restoration code should do.
3978 let mut by_window: HashMap<WindowId, Vec<WorkspaceId>> = HashMap::default();
3979 for session_workspace in &locations {
3980 if let Some(window_id) = session_workspace.window_id {
3981 by_window
3982 .entry(window_id)
3983 .or_default()
3984 .push(session_workspace.workspace_id);
3985 }
3986 }
3987
3988 // Should produce 3 windows, not 5.
3989 assert_eq!(
3990 by_window.len(),
3991 3,
3992 "Expected 3 window groups, got {}: {:?}",
3993 by_window.len(),
3994 by_window
3995 );
3996
3997 // Window 10 should contain workspaces 1 and 2.
3998 let window_10 = by_window.get(&WindowId::from(10u64)).unwrap();
3999 assert_eq!(window_10.len(), 2);
4000 assert!(window_10.contains(&WorkspaceId(1)));
4001 assert!(window_10.contains(&WorkspaceId(2)));
4002
4003 // Window 20 should contain workspaces 3 and 4.
4004 let window_20 = by_window.get(&WindowId::from(20u64)).unwrap();
4005 assert_eq!(window_20.len(), 2);
4006 assert!(window_20.contains(&WorkspaceId(3)));
4007 assert!(window_20.contains(&WorkspaceId(4)));
4008
4009 // Window 30 should contain only workspace 5.
4010 let window_30 = by_window.get(&WindowId::from(30u64)).unwrap();
4011 assert_eq!(window_30.len(), 1);
4012 assert!(window_30.contains(&WorkspaceId(5)));
4013 }
4014
4015 #[gpui::test]
4016 async fn test_read_serialized_multi_workspaces_with_state(cx: &mut gpui::TestAppContext) {
4017 use crate::persistence::model::MultiWorkspaceState;
4018
4019 // Write multi-workspace state for two windows via the scoped KVP.
4020 let window_10 = WindowId::from(10u64);
4021 let window_20 = WindowId::from(20u64);
4022
4023 let kvp = cx.update(|cx| KeyValueStore::global(cx));
4024
4025 write_multi_workspace_state(
4026 &kvp,
4027 window_10,
4028 MultiWorkspaceState {
4029 active_workspace_id: Some(WorkspaceId(2)),
4030 project_groups: vec![],
4031 sidebar_open: true,
4032 sidebar_state: None,
4033 },
4034 )
4035 .await;
4036
4037 write_multi_workspace_state(
4038 &kvp,
4039 window_20,
4040 MultiWorkspaceState {
4041 active_workspace_id: Some(WorkspaceId(3)),
4042 project_groups: vec![],
4043 sidebar_open: false,
4044 sidebar_state: None,
4045 },
4046 )
4047 .await;
4048
4049 // Build session workspaces: two in window 10, one in window 20, one with no window.
4050 let session_workspaces = vec![
4051 SessionWorkspace {
4052 workspace_id: WorkspaceId(1),
4053 location: SerializedWorkspaceLocation::Local,
4054 paths: PathList::new(&["/a"]),
4055 window_id: Some(window_10),
4056 },
4057 SessionWorkspace {
4058 workspace_id: WorkspaceId(2),
4059 location: SerializedWorkspaceLocation::Local,
4060 paths: PathList::new(&["/b"]),
4061 window_id: Some(window_10),
4062 },
4063 SessionWorkspace {
4064 workspace_id: WorkspaceId(3),
4065 location: SerializedWorkspaceLocation::Local,
4066 paths: PathList::new(&["/c"]),
4067 window_id: Some(window_20),
4068 },
4069 SessionWorkspace {
4070 workspace_id: WorkspaceId(4),
4071 location: SerializedWorkspaceLocation::Local,
4072 paths: PathList::new(&["/d"]),
4073 window_id: None,
4074 },
4075 ];
4076
4077 let results = cx.update(|cx| read_serialized_multi_workspaces(session_workspaces, cx));
4078
4079 // Should produce 3 results: window 10, window 20, and the orphan.
4080 assert_eq!(results.len(), 3);
4081
4082 // Window 10: active_workspace_id = 2 picks workspace 2 (paths /b), sidebar open.
4083 let group_10 = &results[0];
4084 assert_eq!(group_10.active_workspace.workspace_id, WorkspaceId(2));
4085 assert_eq!(group_10.state.active_workspace_id, Some(WorkspaceId(2)));
4086 assert_eq!(group_10.state.sidebar_open, true);
4087
4088 // Window 20: active_workspace_id = 3 picks workspace 3 (paths /c), sidebar closed.
4089 let group_20 = &results[1];
4090 assert_eq!(group_20.active_workspace.workspace_id, WorkspaceId(3));
4091 assert_eq!(group_20.state.active_workspace_id, Some(WorkspaceId(3)));
4092 assert_eq!(group_20.state.sidebar_open, false);
4093
4094 // Orphan: no active_workspace_id, falls back to first workspace (id 4).
4095 let group_none = &results[2];
4096 assert_eq!(group_none.active_workspace.workspace_id, WorkspaceId(4));
4097 assert_eq!(group_none.state.active_workspace_id, None);
4098 assert_eq!(group_none.state.sidebar_open, false);
4099 }
4100
4101 #[gpui::test]
4102 async fn test_flush_serialization_completes_before_quit(cx: &mut gpui::TestAppContext) {
4103 crate::tests::init_test(cx);
4104
4105 let fs = fs::FakeFs::new(cx.executor());
4106 let project = Project::test(fs.clone(), [], cx).await;
4107
4108 let (multi_workspace, cx) =
4109 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4110
4111 let workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone());
4112
4113 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4114
4115 // Assign a database_id so serialization will actually persist.
4116 let workspace_id = db.next_id().await.unwrap();
4117 workspace.update(cx, |ws, _cx| {
4118 ws.set_database_id(workspace_id);
4119 });
4120
4121 // Mutate some workspace state.
4122 db.set_centered_layout(workspace_id, true).await.unwrap();
4123
4124 // Call flush_serialization and await the returned task directly
4125 // (without run_until_parked — the point is that awaiting the task
4126 // alone is sufficient).
4127 let task = multi_workspace.update_in(cx, |mw, window, cx| {
4128 mw.workspace()
4129 .update(cx, |ws, cx| ws.flush_serialization(window, cx))
4130 });
4131 task.await;
4132
4133 // Read the workspace back from the DB and verify serialization happened.
4134 let serialized = db.workspace_for_id(workspace_id);
4135 assert!(
4136 serialized.is_some(),
4137 "flush_serialization should have persisted the workspace to DB"
4138 );
4139 }
4140
4141 #[gpui::test]
4142 async fn test_create_workspace_serialization(cx: &mut gpui::TestAppContext) {
4143 crate::tests::init_test(cx);
4144
4145 let fs = fs::FakeFs::new(cx.executor());
4146 let project = Project::test(fs.clone(), [], cx).await;
4147
4148 let (multi_workspace, cx) =
4149 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4150
4151 // Give the first workspace a database_id.
4152 multi_workspace.update_in(cx, |mw, _, cx| {
4153 mw.set_random_database_id(cx);
4154 });
4155
4156 let window_id =
4157 multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
4158
4159 // Create a new workspace via the MultiWorkspace API (triggers next_id()).
4160 multi_workspace.update_in(cx, |mw, window, cx| {
4161 mw.create_test_workspace(window, cx).detach();
4162 });
4163
4164 // Let the async next_id() and re-serialization tasks complete.
4165 cx.run_until_parked();
4166
4167 // The new workspace should now have a database_id.
4168 let new_workspace_db_id =
4169 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
4170 assert!(
4171 new_workspace_db_id.is_some(),
4172 "New workspace should have a database_id after run_until_parked"
4173 );
4174
4175 // The multi-workspace state should record it as the active workspace.
4176 let state = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
4177 assert_eq!(
4178 state.active_workspace_id, new_workspace_db_id,
4179 "Serialized active_workspace_id should match the new workspace's database_id"
4180 );
4181
4182 // The individual workspace row should exist with real data
4183 // (not just the bare DEFAULT VALUES row from next_id).
4184 let workspace_id = new_workspace_db_id.unwrap();
4185 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4186 let serialized = db.workspace_for_id(workspace_id);
4187 assert!(
4188 serialized.is_some(),
4189 "Newly created workspace should be fully serialized in the DB after database_id assignment"
4190 );
4191 }
4192
4193 #[gpui::test]
4194 async fn test_remove_workspace_clears_session_binding(cx: &mut gpui::TestAppContext) {
4195 crate::tests::init_test(cx);
4196
4197 let fs = fs::FakeFs::new(cx.executor());
4198 let dir = unique_test_dir(&fs, "remove").await;
4199 let project1 = Project::test(fs.clone(), [], cx).await;
4200 let project2 = Project::test(fs.clone(), [], cx).await;
4201
4202 let (multi_workspace, cx) =
4203 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4204
4205 multi_workspace.update(cx, |mw, cx| {
4206 mw.open_sidebar(cx);
4207 });
4208
4209 multi_workspace.update_in(cx, |mw, _, cx| {
4210 mw.set_random_database_id(cx);
4211 });
4212
4213 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4214
4215 // Get a real DB id for workspace2 so the row actually exists.
4216 let workspace2_db_id = db.next_id().await.unwrap();
4217
4218 multi_workspace.update_in(cx, |mw, window, cx| {
4219 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4220 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4221 ws.set_database_id(workspace2_db_id)
4222 });
4223 mw.add(workspace.clone(), window, cx);
4224 });
4225
4226 // Save a full workspace row to the DB directly.
4227 let session_id = format!("remove-test-session-{}", Uuid::new_v4());
4228 db.save_workspace(SerializedWorkspace {
4229 id: workspace2_db_id,
4230 paths: PathList::new(&[&dir]),
4231 location: SerializedWorkspaceLocation::Local,
4232 center_group: Default::default(),
4233 window_bounds: Default::default(),
4234 display: Default::default(),
4235 docks: Default::default(),
4236 centered_layout: false,
4237 session_id: Some(session_id.clone()),
4238 breakpoints: Default::default(),
4239 window_id: Some(99),
4240 user_toolchains: Default::default(),
4241 })
4242 .await;
4243
4244 assert!(
4245 db.workspace_for_id(workspace2_db_id).is_some(),
4246 "Workspace2 should exist in DB before removal"
4247 );
4248
4249 // Remove workspace at index 1 (the second workspace).
4250 multi_workspace.update_in(cx, |mw, window, cx| {
4251 let ws = mw.workspaces().nth(1).unwrap().clone();
4252 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4253 .detach_and_log_err(cx);
4254 });
4255
4256 cx.run_until_parked();
4257
4258 // The row should still exist so it continues to appear in recent
4259 // projects, but the session binding should be cleared so it is not
4260 // restored as part of any future session.
4261 assert!(
4262 db.workspace_for_id(workspace2_db_id).is_some(),
4263 "Removed workspace's DB row should be preserved for recent projects"
4264 );
4265
4266 let session_workspaces = db
4267 .last_session_workspace_locations("remove-test-session", None, fs.as_ref())
4268 .await
4269 .unwrap();
4270 let restored_ids: Vec<WorkspaceId> = session_workspaces
4271 .iter()
4272 .map(|sw| sw.workspace_id)
4273 .collect();
4274 assert!(
4275 !restored_ids.contains(&workspace2_db_id),
4276 "Removed workspace should not appear in session restoration"
4277 );
4278 }
4279
4280 #[gpui::test]
4281 async fn test_remove_workspace_not_restored_as_zombie(cx: &mut gpui::TestAppContext) {
4282 crate::tests::init_test(cx);
4283
4284 let fs = fs::FakeFs::new(cx.executor());
4285 let dir1 = tempfile::TempDir::with_prefix("zombie_test1").unwrap();
4286 let dir2 = tempfile::TempDir::with_prefix("zombie_test2").unwrap();
4287 fs.insert_tree(dir1.path(), json!({})).await;
4288 fs.insert_tree(dir2.path(), json!({})).await;
4289
4290 let project1 = Project::test(fs.clone(), [], cx).await;
4291 let project2 = Project::test(fs.clone(), [], cx).await;
4292
4293 let db = cx.update(|cx| WorkspaceDb::global(cx));
4294
4295 // Get real DB ids so the rows actually exist.
4296 let ws1_id = db.next_id().await.unwrap();
4297 let ws2_id = db.next_id().await.unwrap();
4298
4299 let (multi_workspace, cx) =
4300 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4301
4302 multi_workspace.update(cx, |mw, cx| {
4303 mw.open_sidebar(cx);
4304 });
4305
4306 multi_workspace.update_in(cx, |mw, _, cx| {
4307 mw.workspace().update(cx, |ws, _cx| {
4308 ws.set_database_id(ws1_id);
4309 });
4310 });
4311
4312 multi_workspace.update_in(cx, |mw, window, cx| {
4313 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4314 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4315 ws.set_database_id(ws2_id)
4316 });
4317 mw.add(workspace.clone(), window, cx);
4318 });
4319
4320 let session_id = "test-zombie-session";
4321 let window_id_val: u64 = 42;
4322
4323 db.save_workspace(SerializedWorkspace {
4324 id: ws1_id,
4325 paths: PathList::new(&[dir1.path()]),
4326 location: SerializedWorkspaceLocation::Local,
4327 center_group: Default::default(),
4328 window_bounds: Default::default(),
4329 display: Default::default(),
4330 docks: Default::default(),
4331 centered_layout: false,
4332 session_id: Some(session_id.to_owned()),
4333 breakpoints: Default::default(),
4334 window_id: Some(window_id_val),
4335 user_toolchains: Default::default(),
4336 })
4337 .await;
4338
4339 db.save_workspace(SerializedWorkspace {
4340 id: ws2_id,
4341 paths: PathList::new(&[dir2.path()]),
4342 location: SerializedWorkspaceLocation::Local,
4343 center_group: Default::default(),
4344 window_bounds: Default::default(),
4345 display: Default::default(),
4346 docks: Default::default(),
4347 centered_layout: false,
4348 session_id: Some(session_id.to_owned()),
4349 breakpoints: Default::default(),
4350 window_id: Some(window_id_val),
4351 user_toolchains: Default::default(),
4352 })
4353 .await;
4354
4355 // Remove workspace2 (index 1).
4356 multi_workspace.update_in(cx, |mw, window, cx| {
4357 let ws = mw.workspaces().nth(1).unwrap().clone();
4358 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4359 .detach_and_log_err(cx);
4360 });
4361
4362 cx.run_until_parked();
4363
4364 // The removed workspace should NOT appear in session restoration.
4365 let locations = db
4366 .last_session_workspace_locations(session_id, None, fs.as_ref())
4367 .await
4368 .unwrap();
4369
4370 let restored_ids: Vec<WorkspaceId> = locations.iter().map(|sw| sw.workspace_id).collect();
4371 assert!(
4372 !restored_ids.contains(&ws2_id),
4373 "Removed workspace should not appear in session restoration list. Found: {:?}",
4374 restored_ids
4375 );
4376 assert!(
4377 restored_ids.contains(&ws1_id),
4378 "Remaining workspace should still appear in session restoration list"
4379 );
4380 }
4381
4382 #[gpui::test]
4383 async fn test_pending_removal_tasks_drained_on_flush(cx: &mut gpui::TestAppContext) {
4384 crate::tests::init_test(cx);
4385
4386 let fs = fs::FakeFs::new(cx.executor());
4387 let dir = unique_test_dir(&fs, "pending-removal").await;
4388 let project1 = Project::test(fs.clone(), [], cx).await;
4389 let project2 = Project::test(fs.clone(), [], cx).await;
4390
4391 let db = cx.update(|cx| WorkspaceDb::global(cx));
4392
4393 // Get a real DB id for workspace2 so the row actually exists.
4394 let workspace2_db_id = db.next_id().await.unwrap();
4395
4396 let (multi_workspace, cx) =
4397 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4398
4399 multi_workspace.update(cx, |mw, cx| {
4400 mw.open_sidebar(cx);
4401 });
4402
4403 multi_workspace.update_in(cx, |mw, _, cx| {
4404 mw.set_random_database_id(cx);
4405 });
4406
4407 multi_workspace.update_in(cx, |mw, window, cx| {
4408 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4409 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4410 ws.set_database_id(workspace2_db_id)
4411 });
4412 mw.add(workspace.clone(), window, cx);
4413 });
4414
4415 // Save a full workspace row to the DB directly and let it settle.
4416 let session_id = format!("pending-removal-session-{}", Uuid::new_v4());
4417 db.save_workspace(SerializedWorkspace {
4418 id: workspace2_db_id,
4419 paths: PathList::new(&[&dir]),
4420 location: SerializedWorkspaceLocation::Local,
4421 center_group: Default::default(),
4422 window_bounds: Default::default(),
4423 display: Default::default(),
4424 docks: Default::default(),
4425 centered_layout: false,
4426 session_id: Some(session_id.clone()),
4427 breakpoints: Default::default(),
4428 window_id: Some(88),
4429 user_toolchains: Default::default(),
4430 })
4431 .await;
4432 cx.run_until_parked();
4433
4434 // Remove workspace2 — this pushes a task to pending_removal_tasks.
4435 multi_workspace.update_in(cx, |mw, window, cx| {
4436 let ws = mw.workspaces().nth(1).unwrap().clone();
4437 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4438 .detach_and_log_err(cx);
4439 });
4440
4441 // Simulate the quit handler pattern: collect flush tasks + pending
4442 // removal tasks and await them all.
4443 let all_tasks = multi_workspace.update_in(cx, |mw, window, cx| {
4444 let mut tasks: Vec<Task<()>> = mw
4445 .workspaces()
4446 .map(|workspace| {
4447 workspace.update(cx, |workspace, cx| {
4448 workspace.flush_serialization(window, cx)
4449 })
4450 })
4451 .collect();
4452 let mut removal_tasks = mw.take_pending_removal_tasks();
4453 // Note: removal_tasks may be empty if the background task already
4454 // completed (take_pending_removal_tasks filters out ready tasks).
4455 tasks.append(&mut removal_tasks);
4456 tasks.push(mw.flush_serialization());
4457 tasks
4458 });
4459 futures::future::join_all(all_tasks).await;
4460
4461 // The row should still exist (for recent projects), but the session
4462 // binding should have been cleared by the pending removal task.
4463 assert!(
4464 db.workspace_for_id(workspace2_db_id).is_some(),
4465 "Workspace row should be preserved for recent projects"
4466 );
4467
4468 let session_workspaces = db
4469 .last_session_workspace_locations("pending-removal-session", None, fs.as_ref())
4470 .await
4471 .unwrap();
4472 let restored_ids: Vec<WorkspaceId> = session_workspaces
4473 .iter()
4474 .map(|sw| sw.workspace_id)
4475 .collect();
4476 assert!(
4477 !restored_ids.contains(&workspace2_db_id),
4478 "Pending removal task should have cleared the session binding"
4479 );
4480 }
4481
4482 #[gpui::test]
4483 async fn test_create_workspace_bounds_observer_uses_fresh_id(cx: &mut gpui::TestAppContext) {
4484 crate::tests::init_test(cx);
4485
4486 let fs = fs::FakeFs::new(cx.executor());
4487 let project = Project::test(fs.clone(), [], cx).await;
4488
4489 let (multi_workspace, cx) =
4490 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4491
4492 multi_workspace.update_in(cx, |mw, _, cx| {
4493 mw.set_random_database_id(cx);
4494 });
4495
4496 let task =
4497 multi_workspace.update_in(cx, |mw, window, cx| mw.create_test_workspace(window, cx));
4498 task.await;
4499
4500 let new_workspace_db_id =
4501 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
4502 assert!(
4503 new_workspace_db_id.is_some(),
4504 "After run_until_parked, the workspace should have a database_id"
4505 );
4506
4507 let workspace_id = new_workspace_db_id.unwrap();
4508
4509 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4510
4511 assert!(
4512 db.workspace_for_id(workspace_id).is_some(),
4513 "The workspace row should exist in the DB"
4514 );
4515
4516 cx.simulate_resize(gpui::size(px(1024.0), px(768.0)));
4517
4518 // Advance the clock past the 100ms debounce timer so the bounds
4519 // observer task fires
4520 cx.executor().advance_clock(Duration::from_millis(200));
4521 cx.run_until_parked();
4522
4523 let serialized = db
4524 .workspace_for_id(workspace_id)
4525 .expect("workspace row should still exist");
4526 assert!(
4527 serialized.window_bounds.is_some(),
4528 "The bounds observer should write bounds for the workspace's real DB ID, \
4529 even when the workspace was created via create_workspace (where the ID \
4530 is assigned asynchronously after construction)."
4531 );
4532 }
4533
4534 #[gpui::test]
4535 async fn test_flush_serialization_writes_bounds(cx: &mut gpui::TestAppContext) {
4536 crate::tests::init_test(cx);
4537
4538 let fs = fs::FakeFs::new(cx.executor());
4539 let dir = tempfile::TempDir::with_prefix("flush_bounds_test").unwrap();
4540 fs.insert_tree(dir.path(), json!({})).await;
4541
4542 let project = Project::test(fs.clone(), [dir.path()], cx).await;
4543
4544 let (multi_workspace, cx) =
4545 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4546
4547 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4548 let workspace_id = db.next_id().await.unwrap();
4549 multi_workspace.update_in(cx, |mw, _, cx| {
4550 mw.workspace().update(cx, |ws, _cx| {
4551 ws.set_database_id(workspace_id);
4552 });
4553 });
4554
4555 let task = multi_workspace.update_in(cx, |mw, window, cx| {
4556 mw.workspace()
4557 .update(cx, |ws, cx| ws.flush_serialization(window, cx))
4558 });
4559 task.await;
4560
4561 let after = db
4562 .workspace_for_id(workspace_id)
4563 .expect("workspace row should exist after flush_serialization");
4564 assert!(
4565 !after.paths.is_empty(),
4566 "flush_serialization should have written paths via save_workspace"
4567 );
4568 assert!(
4569 after.window_bounds.is_some(),
4570 "flush_serialization should ensure window bounds are persisted to the DB \
4571 before the process exits."
4572 );
4573 }
4574
4575 #[gpui::test]
4576 async fn test_resolve_worktree_workspaces(cx: &mut gpui::TestAppContext) {
4577 let fs = fs::FakeFs::new(cx.executor());
4578
4579 // Main repo with a linked worktree entry
4580 fs.insert_tree(
4581 "/repo",
4582 json!({
4583 ".git": {
4584 "worktrees": {
4585 "feature": {
4586 "commondir": "../../",
4587 "HEAD": "ref: refs/heads/feature"
4588 }
4589 }
4590 },
4591 "src": { "main.rs": "" }
4592 }),
4593 )
4594 .await;
4595
4596 // Linked worktree checkout pointing back to /repo
4597 fs.insert_tree(
4598 "/worktree",
4599 json!({
4600 ".git": "gitdir: /repo/.git/worktrees/feature",
4601 "src": { "main.rs": "" }
4602 }),
4603 )
4604 .await;
4605
4606 // A plain non-git project
4607 fs.insert_tree(
4608 "/plain-project",
4609 json!({
4610 "src": { "main.rs": "" }
4611 }),
4612 )
4613 .await;
4614
4615 // Another normal git repo (used in mixed-path entry)
4616 fs.insert_tree(
4617 "/other-repo",
4618 json!({
4619 ".git": {},
4620 "src": { "lib.rs": "" }
4621 }),
4622 )
4623 .await;
4624
4625 let t0 = Utc::now() - chrono::Duration::hours(4);
4626 let t1 = Utc::now() - chrono::Duration::hours(3);
4627 let t2 = Utc::now() - chrono::Duration::hours(2);
4628 let t3 = Utc::now() - chrono::Duration::hours(1);
4629
4630 let workspaces = vec![
4631 // 1: Main checkout of /repo (opened earlier)
4632 (
4633 WorkspaceId(1),
4634 SerializedWorkspaceLocation::Local,
4635 PathList::new(&["/repo"]),
4636 t0,
4637 ),
4638 // 2: Linked worktree of /repo (opened more recently)
4639 // Should dedup with #1; more recent timestamp wins.
4640 (
4641 WorkspaceId(2),
4642 SerializedWorkspaceLocation::Local,
4643 PathList::new(&["/worktree"]),
4644 t1,
4645 ),
4646 // 3: Mixed-path workspace: one root is a linked worktree,
4647 // the other is a normal repo. The worktree path should be
4648 // resolved; the normal path kept as-is.
4649 (
4650 WorkspaceId(3),
4651 SerializedWorkspaceLocation::Local,
4652 PathList::new(&["/other-repo", "/worktree"]),
4653 t2,
4654 ),
4655 // 4: Non-git project — passed through unchanged.
4656 (
4657 WorkspaceId(4),
4658 SerializedWorkspaceLocation::Local,
4659 PathList::new(&["/plain-project"]),
4660 t3,
4661 ),
4662 ];
4663
4664 let result = resolve_worktree_workspaces(workspaces, fs.as_ref()).await;
4665
4666 // Should have 3 entries: #1 and #2 deduped into one, plus #3 and #4.
4667 assert_eq!(result.len(), 3);
4668
4669 // First entry: /repo — deduplicated from #1 and #2.
4670 // Keeps the position of #1 (first seen), but with #2's later timestamp.
4671 assert_eq!(result[0].2.paths(), &[PathBuf::from("/repo")]);
4672 assert_eq!(result[0].3, t1);
4673
4674 // Second entry: mixed-path workspace with worktree resolved.
4675 // /worktree → /repo, so paths become [/other-repo, /repo] (sorted).
4676 assert_eq!(
4677 result[1].2.paths(),
4678 &[PathBuf::from("/other-repo"), PathBuf::from("/repo")]
4679 );
4680 assert_eq!(result[1].0, WorkspaceId(3));
4681
4682 // Third entry: non-git project, unchanged.
4683 assert_eq!(result[2].2.paths(), &[PathBuf::from("/plain-project")]);
4684 assert_eq!(result[2].0, WorkspaceId(4));
4685 }
4686
4687 #[gpui::test]
4688 async fn test_resolve_worktree_workspaces_bare_repo(cx: &mut gpui::TestAppContext) {
4689 let fs = fs::FakeFs::new(cx.executor());
4690
4691 // Bare repo at /foo/.bare (commondir doesn't end with .git)
4692 fs.insert_tree(
4693 "/foo/.bare",
4694 json!({
4695 "worktrees": {
4696 "my-feature": {
4697 "commondir": "../../",
4698 "HEAD": "ref: refs/heads/my-feature"
4699 }
4700 }
4701 }),
4702 )
4703 .await;
4704
4705 // Linked worktree whose commondir resolves to a bare repo (/foo/.bare)
4706 fs.insert_tree(
4707 "/foo/my-feature",
4708 json!({
4709 ".git": "gitdir: /foo/.bare/worktrees/my-feature",
4710 "src": { "main.rs": "" }
4711 }),
4712 )
4713 .await;
4714
4715 let t0 = Utc::now();
4716
4717 let workspaces = vec![(
4718 WorkspaceId(1),
4719 SerializedWorkspaceLocation::Local,
4720 PathList::new(&["/foo/my-feature"]),
4721 t0,
4722 )];
4723
4724 let result = resolve_worktree_workspaces(workspaces, fs.as_ref()).await;
4725
4726 // The worktree path must be preserved unchanged — /foo/.bare is a bare repo
4727 // and cannot serve as a working-tree root, so resolution must return None.
4728 assert_eq!(result.len(), 1);
4729 assert_eq!(result[0].2.paths(), &[PathBuf::from("/foo/my-feature")]);
4730 }
4731
4732 #[gpui::test]
4733 async fn test_restore_window_with_linked_worktree_and_multiple_project_groups(
4734 cx: &mut gpui::TestAppContext,
4735 ) {
4736 crate::tests::init_test(cx);
4737
4738 let fs = fs::FakeFs::new(cx.executor());
4739
4740 // Main git repo at /repo
4741 fs.insert_tree(
4742 "/repo",
4743 json!({
4744 ".git": {
4745 "HEAD": "ref: refs/heads/main",
4746 "worktrees": {
4747 "feature": {
4748 "commondir": "../../",
4749 "HEAD": "ref: refs/heads/feature"
4750 }
4751 }
4752 },
4753 "src": { "main.rs": "" }
4754 }),
4755 )
4756 .await;
4757
4758 // Linked worktree checkout pointing back to /repo
4759 fs.insert_tree(
4760 "/worktree-feature",
4761 json!({
4762 ".git": "gitdir: /repo/.git/worktrees/feature",
4763 "src": { "lib.rs": "" }
4764 }),
4765 )
4766 .await;
4767
4768 // --- Phase 1: Set up the original multi-workspace window ---
4769
4770 let project_1 = Project::test(fs.clone(), ["/repo".as_ref()], cx).await;
4771 let project_1_linked_worktree =
4772 Project::test(fs.clone(), ["/worktree-feature".as_ref()], cx).await;
4773
4774 // Wait for git discovery to finish.
4775 cx.run_until_parked();
4776
4777 // Create a second, unrelated project so we have two distinct project groups.
4778 fs.insert_tree(
4779 "/other-project",
4780 json!({
4781 ".git": { "HEAD": "ref: refs/heads/main" },
4782 "readme.md": ""
4783 }),
4784 )
4785 .await;
4786 let project_2 = Project::test(fs.clone(), ["/other-project".as_ref()], cx).await;
4787 cx.run_until_parked();
4788
4789 // Create the MultiWorkspace with project_2, then add the main repo
4790 // and its linked worktree. The linked worktree is added last and
4791 // becomes the active workspace.
4792 let (multi_workspace, cx) = cx
4793 .add_window_view(|window, cx| MultiWorkspace::test_new(project_2.clone(), window, cx));
4794
4795 multi_workspace.update(cx, |mw, cx| {
4796 mw.open_sidebar(cx);
4797 });
4798
4799 multi_workspace.update_in(cx, |mw, window, cx| {
4800 mw.test_add_workspace(project_1.clone(), window, cx);
4801 });
4802
4803 let workspace_worktree = multi_workspace.update_in(cx, |mw, window, cx| {
4804 mw.test_add_workspace(project_1_linked_worktree.clone(), window, cx)
4805 });
4806
4807 let tasks =
4808 multi_workspace.update_in(cx, |mw, window, cx| mw.flush_all_serialization(window, cx));
4809 cx.run_until_parked();
4810 for task in tasks {
4811 task.await;
4812 }
4813 cx.run_until_parked();
4814
4815 let active_db_id = workspace_worktree.read_with(cx, |ws, _| ws.database_id());
4816 assert!(
4817 active_db_id.is_some(),
4818 "Active workspace should have a database ID"
4819 );
4820
4821 // --- Phase 2: Read back and verify the serialized state ---
4822
4823 let session_id = multi_workspace
4824 .read_with(cx, |mw, cx| mw.workspace().read(cx).session_id())
4825 .unwrap();
4826 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4827 let session_workspaces = db
4828 .last_session_workspace_locations(&session_id, None, fs.as_ref())
4829 .await
4830 .expect("should load session workspaces");
4831 assert!(
4832 !session_workspaces.is_empty(),
4833 "Should have at least one session workspace"
4834 );
4835
4836 let multi_workspaces =
4837 cx.update(|_, cx| read_serialized_multi_workspaces(session_workspaces, cx));
4838 assert_eq!(
4839 multi_workspaces.len(),
4840 1,
4841 "All workspaces share one window, so there should be exactly one multi-workspace"
4842 );
4843
4844 let serialized = &multi_workspaces[0];
4845 assert_eq!(
4846 serialized.active_workspace.workspace_id,
4847 active_db_id.unwrap(),
4848 );
4849 assert_eq!(serialized.state.project_groups.len(), 2,);
4850
4851 // Verify the serialized project group keys round-trip back to the
4852 // originals.
4853 let restored_keys: Vec<ProjectGroupKey> = serialized
4854 .state
4855 .project_groups
4856 .iter()
4857 .cloned()
4858 .map(Into::into)
4859 .collect();
4860 let expected_keys = vec![
4861 ProjectGroupKey::new(None, PathList::new(&["/repo"])),
4862 ProjectGroupKey::new(None, PathList::new(&["/other-project"])),
4863 ];
4864 assert_eq!(
4865 restored_keys, expected_keys,
4866 "Deserialized project group keys should match the originals"
4867 );
4868
4869 // --- Phase 3: Restore the window and verify the result ---
4870
4871 let app_state =
4872 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).app_state().clone());
4873
4874 let serialized_mw = multi_workspaces.into_iter().next().unwrap();
4875 let restored_handle: gpui::WindowHandle<MultiWorkspace> = cx
4876 .update(|_, cx| {
4877 cx.spawn(async move |mut cx| {
4878 crate::restore_multiworkspace(serialized_mw, app_state, &mut cx).await
4879 })
4880 })
4881 .await
4882 .expect("restore_multiworkspace should succeed");
4883
4884 cx.run_until_parked();
4885
4886 // The restored window should have the same project group keys.
4887 let restored_keys: Vec<ProjectGroupKey> = restored_handle
4888 .read_with(cx, |mw: &MultiWorkspace, _cx| mw.project_group_keys())
4889 .unwrap();
4890 assert_eq!(
4891 restored_keys, expected_keys,
4892 "Restored window should have the same project group keys as the original"
4893 );
4894
4895 // The active workspace in the restored window should have the linked
4896 // worktree paths.
4897 let active_paths: Vec<PathBuf> = restored_handle
4898 .read_with(cx, |mw: &MultiWorkspace, cx| {
4899 mw.workspace()
4900 .read(cx)
4901 .root_paths(cx)
4902 .into_iter()
4903 .map(|p: Arc<Path>| p.to_path_buf())
4904 .collect()
4905 })
4906 .unwrap();
4907 assert_eq!(
4908 active_paths,
4909 vec![PathBuf::from("/worktree-feature")],
4910 "The restored active workspace should be the linked worktree project"
4911 );
4912 }
4913
4914 #[gpui::test]
4915 async fn test_remove_project_group_falls_back_to_neighbor(cx: &mut gpui::TestAppContext) {
4916 crate::tests::init_test(cx);
4917
4918 let fs = fs::FakeFs::new(cx.executor());
4919 let dir_a = unique_test_dir(&fs, "group-a").await;
4920 let dir_b = unique_test_dir(&fs, "group-b").await;
4921 let dir_c = unique_test_dir(&fs, "group-c").await;
4922
4923 let project_a = Project::test(fs.clone(), [dir_a.as_path()], cx).await;
4924 let project_b = Project::test(fs.clone(), [dir_b.as_path()], cx).await;
4925 let project_c = Project::test(fs.clone(), [dir_c.as_path()], cx).await;
4926
4927 // Create a multi-workspace with project A, then add B and C.
4928 // project_groups stores newest first: [C, B, A].
4929 // Sidebar displays in the same order: C (top), B (middle), A (bottom).
4930 let (multi_workspace, cx) = cx
4931 .add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx));
4932
4933 multi_workspace.update(cx, |mw, cx| mw.open_sidebar(cx));
4934
4935 let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| {
4936 mw.test_add_workspace(project_b.clone(), window, cx)
4937 });
4938 let _workspace_c = multi_workspace.update_in(cx, |mw, window, cx| {
4939 mw.test_add_workspace(project_c.clone(), window, cx)
4940 });
4941 cx.run_until_parked();
4942
4943 let key_a = project_a.read_with(cx, |p, cx| p.project_group_key(cx));
4944 let key_b = project_b.read_with(cx, |p, cx| p.project_group_key(cx));
4945 let key_c = project_c.read_with(cx, |p, cx| p.project_group_key(cx));
4946
4947 // Activate workspace B so removing its group exercises the fallback.
4948 multi_workspace.update_in(cx, |mw, window, cx| {
4949 mw.activate(workspace_b.clone(), window, cx);
4950 });
4951 cx.run_until_parked();
4952
4953 // --- Remove group B (the middle one). ---
4954 // In the sidebar [C, B, A], "below" B is A.
4955 multi_workspace.update_in(cx, |mw, window, cx| {
4956 mw.remove_project_group(&key_b, window, cx)
4957 .detach_and_log_err(cx);
4958 });
4959 cx.run_until_parked();
4960
4961 let active_paths =
4962 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
4963 assert_eq!(
4964 active_paths
4965 .iter()
4966 .map(|p| p.to_path_buf())
4967 .collect::<Vec<_>>(),
4968 vec![dir_a.clone()],
4969 "After removing the middle group, should fall back to the group below (A)"
4970 );
4971
4972 // After removing B, keys = [A, C], sidebar = [C, A].
4973 // Activate workspace A (the bottom) so removing it tests the
4974 // "fall back upward" path.
4975 let workspace_a =
4976 multi_workspace.read_with(cx, |mw, _cx| mw.workspaces().next().unwrap().clone());
4977 multi_workspace.update_in(cx, |mw, window, cx| {
4978 mw.activate(workspace_a.clone(), window, cx);
4979 });
4980 cx.run_until_parked();
4981
4982 // --- Remove group A (the bottom one in sidebar). ---
4983 // Nothing below A, so should fall back upward to C.
4984 multi_workspace.update_in(cx, |mw, window, cx| {
4985 mw.remove_project_group(&key_a, window, cx)
4986 .detach_and_log_err(cx);
4987 });
4988 cx.run_until_parked();
4989
4990 let active_paths =
4991 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
4992 assert_eq!(
4993 active_paths
4994 .iter()
4995 .map(|p| p.to_path_buf())
4996 .collect::<Vec<_>>(),
4997 vec![dir_c.clone()],
4998 "After removing the bottom group, should fall back to the group above (C)"
4999 );
5000
5001 // --- Remove group C (the only one remaining). ---
5002 // Should create an empty workspace.
5003 multi_workspace.update_in(cx, |mw, window, cx| {
5004 mw.remove_project_group(&key_c, window, cx)
5005 .detach_and_log_err(cx);
5006 });
5007 cx.run_until_parked();
5008
5009 let active_paths =
5010 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
5011 assert!(
5012 active_paths.is_empty(),
5013 "After removing the only remaining group, should have an empty workspace"
5014 );
5015 }
5016
5017 /// Regression test for a crash where `find_or_create_local_workspace`
5018 /// returned a workspace that was about to be removed, hitting an assert
5019 /// in `MultiWorkspace::remove`.
5020 ///
5021 /// The scenario: two workspaces share the same root paths (e.g. due to
5022 /// a provisional key mismatch). When the first is removed and the
5023 /// fallback searches for the same paths, `workspace_for_paths` must
5024 /// skip the doomed workspace so the assert in `remove` is satisfied.
5025 #[gpui::test]
5026 async fn test_remove_fallback_skips_excluded_workspaces(cx: &mut gpui::TestAppContext) {
5027 crate::tests::init_test(cx);
5028
5029 let fs = fs::FakeFs::new(cx.executor());
5030 let dir = unique_test_dir(&fs, "shared").await;
5031
5032 // Two projects that open the same directory — this creates two
5033 // workspaces whose root_paths are identical.
5034 let project_a = Project::test(fs.clone(), [dir.as_path()], cx).await;
5035 let project_b = Project::test(fs.clone(), [dir.as_path()], cx).await;
5036
5037 let (multi_workspace, cx) = cx
5038 .add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx));
5039
5040 multi_workspace.update(cx, |mw, cx| mw.open_sidebar(cx));
5041
5042 let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| {
5043 mw.test_add_workspace(project_b.clone(), window, cx)
5044 });
5045 cx.run_until_parked();
5046
5047 // workspace_a is first in the workspaces vec.
5048 let workspace_a =
5049 multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().cloned().unwrap());
5050 assert_ne!(workspace_a, workspace_b);
5051
5052 // Activate workspace_a so removing it triggers the fallback path.
5053 multi_workspace.update_in(cx, |mw, window, cx| {
5054 mw.activate(workspace_a.clone(), window, cx);
5055 });
5056 cx.run_until_parked();
5057
5058 // Remove workspace_a. The fallback searches for the same paths.
5059 // Without the `excluding` parameter, `workspace_for_paths` would
5060 // return workspace_a (first match) and the assert in `remove`
5061 // would fire. With the fix, workspace_a is skipped and
5062 // workspace_b is found instead.
5063 let path_list = PathList::new(std::slice::from_ref(&dir));
5064 let excluded = vec![workspace_a.clone()];
5065 multi_workspace.update_in(cx, |mw, window, cx| {
5066 mw.remove(
5067 vec![workspace_a.clone()],
5068 move |this, window, cx| {
5069 this.find_or_create_local_workspace(path_list, None, &excluded, window, cx)
5070 },
5071 window,
5072 cx,
5073 )
5074 .detach_and_log_err(cx);
5075 });
5076 cx.run_until_parked();
5077
5078 // workspace_b should now be active — workspace_a was removed.
5079 multi_workspace.read_with(cx, |mw, _cx| {
5080 assert_eq!(
5081 mw.workspace(),
5082 &workspace_b,
5083 "fallback should have found workspace_b, not the excluded workspace_a"
5084 );
5085 });
5086 }
5087}