1pub mod model;
2
3use std::{
4 borrow::Cow,
5 collections::BTreeMap,
6 path::{Path, PathBuf},
7 str::FromStr,
8 sync::Arc,
9};
10
11use chrono::{DateTime, NaiveDateTime, Utc};
12use fs::Fs;
13
14use anyhow::{Context as _, Result, bail};
15use collections::{HashMap, HashSet, IndexSet};
16use db::{
17 kvp::KeyValueStore,
18 query,
19 sqlez::{connection::Connection, domain::Domain},
20 sqlez_macros::sql,
21};
22use gpui::{Axis, Bounds, Task, WindowBounds, WindowId, point, size};
23use project::{
24 bookmark_store::SerializedBookmark,
25 debugger::breakpoint_store::{BreakpointState, SourceBreakpoint},
26 trusted_worktrees::{DbTrustedPaths, RemoteHostLocation},
27};
28
29use language::{LanguageName, Toolchain, ToolchainScope};
30use remote::{
31 DockerConnectionOptions, RemoteConnectionIdentity, RemoteConnectionOptions,
32 SshConnectionOptions, WslConnectionOptions, remote_connection_identity,
33};
34use serde::{Deserialize, Serialize};
35use sqlez::{
36 bindable::{Bind, Column, StaticColumnCount},
37 statement::Statement,
38 thread_safe_connection::ThreadSafeConnection,
39};
40
41use ui::{App, SharedString, px};
42use util::{ResultExt, maybe, rel_path::RelPath};
43use uuid::Uuid;
44
45use crate::{
46 WorkspaceId,
47 path_list::{PathList, SerializedPathList},
48 persistence::model::RemoteConnectionKind,
49};
50
51use model::{
52 GroupId, ItemId, PaneId, RemoteConnectionId, SerializedItem, SerializedPane,
53 SerializedPaneGroup, SerializedWorkspace,
54};
55
56use self::model::{DockStructure, SerializedWorkspaceLocation, SessionWorkspace};
57
58// https://www.sqlite.org/limits.html
59// > <..> the maximum value of a host parameter number is SQLITE_MAX_VARIABLE_NUMBER,
60// > which defaults to <..> 32766 for SQLite versions after 3.32.0.
61const MAX_QUERY_PLACEHOLDERS: usize = 32000;
62
63fn parse_timestamp(text: &str) -> DateTime<Utc> {
64 NaiveDateTime::parse_from_str(text, "%Y-%m-%d %H:%M:%S")
65 .map(|naive| naive.and_utc())
66 .unwrap_or_else(|_| Utc::now())
67}
68
69#[derive(Copy, Clone, Debug, PartialEq)]
70pub(crate) struct SerializedAxis(pub(crate) gpui::Axis);
71impl sqlez::bindable::StaticColumnCount for SerializedAxis {}
72impl sqlez::bindable::Bind for SerializedAxis {
73 fn bind(
74 &self,
75 statement: &sqlez::statement::Statement,
76 start_index: i32,
77 ) -> anyhow::Result<i32> {
78 match self.0 {
79 gpui::Axis::Horizontal => "Horizontal",
80 gpui::Axis::Vertical => "Vertical",
81 }
82 .bind(statement, start_index)
83 }
84}
85
86impl sqlez::bindable::Column for SerializedAxis {
87 fn column(
88 statement: &mut sqlez::statement::Statement,
89 start_index: i32,
90 ) -> anyhow::Result<(Self, i32)> {
91 String::column(statement, start_index).and_then(|(axis_text, next_index)| {
92 Ok((
93 match axis_text.as_str() {
94 "Horizontal" => Self(Axis::Horizontal),
95 "Vertical" => Self(Axis::Vertical),
96 _ => anyhow::bail!("Stored serialized item kind is incorrect"),
97 },
98 next_index,
99 ))
100 })
101 }
102}
103
104#[derive(Copy, Clone, Debug, PartialEq, Default)]
105pub(crate) struct SerializedWindowBounds(pub(crate) WindowBounds);
106
107impl StaticColumnCount for SerializedWindowBounds {
108 fn column_count() -> usize {
109 5
110 }
111}
112
113impl Bind for SerializedWindowBounds {
114 fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
115 match self.0 {
116 WindowBounds::Windowed(bounds) => {
117 let next_index = statement.bind(&"Windowed", start_index)?;
118 statement.bind(
119 &(
120 SerializedPixels(bounds.origin.x),
121 SerializedPixels(bounds.origin.y),
122 SerializedPixels(bounds.size.width),
123 SerializedPixels(bounds.size.height),
124 ),
125 next_index,
126 )
127 }
128 WindowBounds::Maximized(bounds) => {
129 let next_index = statement.bind(&"Maximized", start_index)?;
130 statement.bind(
131 &(
132 SerializedPixels(bounds.origin.x),
133 SerializedPixels(bounds.origin.y),
134 SerializedPixels(bounds.size.width),
135 SerializedPixels(bounds.size.height),
136 ),
137 next_index,
138 )
139 }
140 WindowBounds::Fullscreen(bounds) => {
141 let next_index = statement.bind(&"FullScreen", start_index)?;
142 statement.bind(
143 &(
144 SerializedPixels(bounds.origin.x),
145 SerializedPixels(bounds.origin.y),
146 SerializedPixels(bounds.size.width),
147 SerializedPixels(bounds.size.height),
148 ),
149 next_index,
150 )
151 }
152 }
153 }
154}
155
156impl Column for SerializedWindowBounds {
157 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
158 let (window_state, next_index) = String::column(statement, start_index)?;
159 let ((x, y, width, height), _): ((i32, i32, i32, i32), _) =
160 Column::column(statement, next_index)?;
161 let bounds = Bounds {
162 origin: point(px(x as f32), px(y as f32)),
163 size: size(px(width as f32), px(height as f32)),
164 };
165
166 let status = match window_state.as_str() {
167 "Windowed" | "Fixed" => SerializedWindowBounds(WindowBounds::Windowed(bounds)),
168 "Maximized" => SerializedWindowBounds(WindowBounds::Maximized(bounds)),
169 "FullScreen" => SerializedWindowBounds(WindowBounds::Fullscreen(bounds)),
170 _ => bail!("Window State did not have a valid string"),
171 };
172
173 Ok((status, next_index + 4))
174 }
175}
176
177const DEFAULT_WINDOW_BOUNDS_KEY: &str = "default_window_bounds";
178
179pub fn read_default_window_bounds(kvp: &KeyValueStore) -> Option<(Uuid, WindowBounds)> {
180 let json_str = kvp
181 .read_kvp(DEFAULT_WINDOW_BOUNDS_KEY)
182 .log_err()
183 .flatten()?;
184
185 let (display_uuid, persisted) =
186 serde_json::from_str::<(Uuid, WindowBoundsJson)>(&json_str).ok()?;
187 Some((display_uuid, persisted.into()))
188}
189
190pub async fn write_default_window_bounds(
191 kvp: &KeyValueStore,
192 bounds: WindowBounds,
193 display_uuid: Uuid,
194) -> anyhow::Result<()> {
195 let persisted = WindowBoundsJson::from(bounds);
196 let json_str = serde_json::to_string(&(display_uuid, persisted))?;
197 kvp.write_kvp(DEFAULT_WINDOW_BOUNDS_KEY.to_string(), json_str)
198 .await?;
199 Ok(())
200}
201
202#[derive(Serialize, Deserialize)]
203pub enum WindowBoundsJson {
204 Windowed {
205 x: i32,
206 y: i32,
207 width: i32,
208 height: i32,
209 },
210 Maximized {
211 x: i32,
212 y: i32,
213 width: i32,
214 height: i32,
215 },
216 Fullscreen {
217 x: i32,
218 y: i32,
219 width: i32,
220 height: i32,
221 },
222}
223
224impl From<WindowBounds> for WindowBoundsJson {
225 fn from(b: WindowBounds) -> Self {
226 match b {
227 WindowBounds::Windowed(bounds) => {
228 let origin = bounds.origin;
229 let size = bounds.size;
230 WindowBoundsJson::Windowed {
231 x: f32::from(origin.x).round() as i32,
232 y: f32::from(origin.y).round() as i32,
233 width: f32::from(size.width).round() as i32,
234 height: f32::from(size.height).round() as i32,
235 }
236 }
237 WindowBounds::Maximized(bounds) => {
238 let origin = bounds.origin;
239 let size = bounds.size;
240 WindowBoundsJson::Maximized {
241 x: f32::from(origin.x).round() as i32,
242 y: f32::from(origin.y).round() as i32,
243 width: f32::from(size.width).round() as i32,
244 height: f32::from(size.height).round() as i32,
245 }
246 }
247 WindowBounds::Fullscreen(bounds) => {
248 let origin = bounds.origin;
249 let size = bounds.size;
250 WindowBoundsJson::Fullscreen {
251 x: f32::from(origin.x).round() as i32,
252 y: f32::from(origin.y).round() as i32,
253 width: f32::from(size.width).round() as i32,
254 height: f32::from(size.height).round() as i32,
255 }
256 }
257 }
258 }
259}
260
261impl From<WindowBoundsJson> for WindowBounds {
262 fn from(n: WindowBoundsJson) -> Self {
263 match n {
264 WindowBoundsJson::Windowed {
265 x,
266 y,
267 width,
268 height,
269 } => WindowBounds::Windowed(Bounds {
270 origin: point(px(x as f32), px(y as f32)),
271 size: size(px(width as f32), px(height as f32)),
272 }),
273 WindowBoundsJson::Maximized {
274 x,
275 y,
276 width,
277 height,
278 } => WindowBounds::Maximized(Bounds {
279 origin: point(px(x as f32), px(y as f32)),
280 size: size(px(width as f32), px(height as f32)),
281 }),
282 WindowBoundsJson::Fullscreen {
283 x,
284 y,
285 width,
286 height,
287 } => WindowBounds::Fullscreen(Bounds {
288 origin: point(px(x as f32), px(y as f32)),
289 size: size(px(width as f32), px(height as f32)),
290 }),
291 }
292 }
293}
294
295fn read_multi_workspace_state(window_id: WindowId, cx: &App) -> model::MultiWorkspaceState {
296 let kvp = KeyValueStore::global(cx);
297 kvp.scoped("multi_workspace_state")
298 .read(&window_id.as_u64().to_string())
299 .log_err()
300 .flatten()
301 .and_then(|json| serde_json::from_str(&json).ok())
302 .unwrap_or_default()
303}
304
305pub async fn write_multi_workspace_state(
306 kvp: &KeyValueStore,
307 window_id: WindowId,
308 state: model::MultiWorkspaceState,
309) {
310 if let Ok(json_str) = serde_json::to_string(&state) {
311 kvp.scoped("multi_workspace_state")
312 .write(window_id.as_u64().to_string(), json_str)
313 .await
314 .log_err();
315 }
316}
317
318pub fn read_serialized_multi_workspaces(
319 session_workspaces: Vec<model::SessionWorkspace>,
320 cx: &App,
321) -> Vec<model::SerializedMultiWorkspace> {
322 let mut window_groups: Vec<Vec<model::SessionWorkspace>> = Vec::new();
323 let mut window_id_to_group: HashMap<WindowId, usize> = HashMap::default();
324
325 for session_workspace in session_workspaces {
326 match session_workspace.window_id {
327 Some(window_id) => {
328 let group_index = *window_id_to_group.entry(window_id).or_insert_with(|| {
329 window_groups.push(Vec::new());
330 window_groups.len() - 1
331 });
332 window_groups[group_index].push(session_workspace);
333 }
334 None => {
335 window_groups.push(vec![session_workspace]);
336 }
337 }
338 }
339
340 window_groups
341 .into_iter()
342 .filter_map(|group| {
343 let window_id = group.first().and_then(|sw| sw.window_id);
344 let state = window_id
345 .map(|wid| read_multi_workspace_state(wid, cx))
346 .unwrap_or_default();
347 let active_workspace = state
348 .active_workspace_id
349 .and_then(|id| group.iter().position(|ws| ws.workspace_id == id))
350 .or(Some(0))
351 .and_then(|index| group.into_iter().nth(index))?;
352 Some(model::SerializedMultiWorkspace {
353 active_workspace,
354 state,
355 })
356 })
357 .collect()
358}
359
360const DEFAULT_DOCK_STATE_KEY: &str = "default_dock_state";
361
362pub fn read_default_dock_state(kvp: &KeyValueStore) -> Option<DockStructure> {
363 let json_str = kvp.read_kvp(DEFAULT_DOCK_STATE_KEY).log_err().flatten()?;
364
365 serde_json::from_str::<DockStructure>(&json_str).ok()
366}
367
368pub async fn write_default_dock_state(
369 kvp: &KeyValueStore,
370 docks: DockStructure,
371) -> anyhow::Result<()> {
372 let json_str = serde_json::to_string(&docks)?;
373 kvp.write_kvp(DEFAULT_DOCK_STATE_KEY.to_string(), json_str)
374 .await?;
375 Ok(())
376}
377
378#[derive(Debug)]
379pub struct Bookmark {
380 pub row: u32,
381}
382
383impl sqlez::bindable::StaticColumnCount for Bookmark {
384 fn column_count() -> usize {
385 // row
386 1
387 }
388}
389
390impl sqlez::bindable::Bind for Bookmark {
391 fn bind(
392 &self,
393 statement: &sqlez::statement::Statement,
394 start_index: i32,
395 ) -> anyhow::Result<i32> {
396 statement.bind(&self.row, start_index)
397 }
398}
399
400impl Column for Bookmark {
401 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
402 let row = statement
403 .column_int(start_index)
404 .with_context(|| format!("Failed to read bookmark at index {start_index}"))?
405 as u32;
406
407 Ok((Bookmark { row }, start_index + 1))
408 }
409}
410
411#[derive(Debug)]
412pub struct Breakpoint {
413 pub position: u32,
414 pub message: Option<Arc<str>>,
415 pub condition: Option<Arc<str>>,
416 pub hit_condition: Option<Arc<str>>,
417 pub state: BreakpointState,
418}
419
420/// Wrapper for DB type of a breakpoint
421struct BreakpointStateWrapper<'a>(Cow<'a, BreakpointState>);
422
423impl From<BreakpointState> for BreakpointStateWrapper<'static> {
424 fn from(kind: BreakpointState) -> Self {
425 BreakpointStateWrapper(Cow::Owned(kind))
426 }
427}
428
429impl StaticColumnCount for BreakpointStateWrapper<'_> {
430 fn column_count() -> usize {
431 1
432 }
433}
434
435impl Bind for BreakpointStateWrapper<'_> {
436 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
437 statement.bind(&self.0.to_int(), start_index)
438 }
439}
440
441impl Column for BreakpointStateWrapper<'_> {
442 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
443 let state = statement.column_int(start_index)?;
444
445 match state {
446 0 => Ok((BreakpointState::Enabled.into(), start_index + 1)),
447 1 => Ok((BreakpointState::Disabled.into(), start_index + 1)),
448 _ => anyhow::bail!("Invalid BreakpointState discriminant {state}"),
449 }
450 }
451}
452
453impl sqlez::bindable::StaticColumnCount for Breakpoint {
454 fn column_count() -> usize {
455 // Position, log message, condition message, and hit condition message
456 4 + BreakpointStateWrapper::column_count()
457 }
458}
459
460impl sqlez::bindable::Bind for Breakpoint {
461 fn bind(
462 &self,
463 statement: &sqlez::statement::Statement,
464 start_index: i32,
465 ) -> anyhow::Result<i32> {
466 let next_index = statement.bind(&self.position, start_index)?;
467 let next_index = statement.bind(&self.message, next_index)?;
468 let next_index = statement.bind(&self.condition, next_index)?;
469 let next_index = statement.bind(&self.hit_condition, next_index)?;
470 statement.bind(
471 &BreakpointStateWrapper(Cow::Borrowed(&self.state)),
472 next_index,
473 )
474 }
475}
476
477impl Column for Breakpoint {
478 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
479 let position = statement
480 .column_int(start_index)
481 .with_context(|| format!("Failed to read BreakPoint at index {start_index}"))?
482 as u32;
483 let (message, next_index) = Option::<String>::column(statement, start_index + 1)?;
484 let (condition, next_index) = Option::<String>::column(statement, next_index)?;
485 let (hit_condition, next_index) = Option::<String>::column(statement, next_index)?;
486 let (state, next_index) = BreakpointStateWrapper::column(statement, next_index)?;
487
488 Ok((
489 Breakpoint {
490 position,
491 message: message.map(Arc::from),
492 condition: condition.map(Arc::from),
493 hit_condition: hit_condition.map(Arc::from),
494 state: state.0.into_owned(),
495 },
496 next_index,
497 ))
498 }
499}
500
501#[derive(Clone, Debug, PartialEq)]
502struct SerializedPixels(gpui::Pixels);
503impl sqlez::bindable::StaticColumnCount for SerializedPixels {}
504
505impl sqlez::bindable::Bind for SerializedPixels {
506 fn bind(
507 &self,
508 statement: &sqlez::statement::Statement,
509 start_index: i32,
510 ) -> anyhow::Result<i32> {
511 let this: i32 = u32::from(self.0) as _;
512 this.bind(statement, start_index)
513 }
514}
515
516pub struct WorkspaceDb(ThreadSafeConnection);
517
518impl Domain for WorkspaceDb {
519 const NAME: &str = stringify!(WorkspaceDb);
520
521 const MIGRATIONS: &[&str] = &[
522 sql!(
523 CREATE TABLE workspaces(
524 workspace_id INTEGER PRIMARY KEY,
525 workspace_location BLOB UNIQUE,
526 dock_visible INTEGER, // Deprecated. Preserving so users can downgrade Zed.
527 dock_anchor TEXT, // Deprecated. Preserving so users can downgrade Zed.
528 dock_pane INTEGER, // Deprecated. Preserving so users can downgrade Zed.
529 left_sidebar_open INTEGER, // Boolean
530 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
531 FOREIGN KEY(dock_pane) REFERENCES panes(pane_id)
532 ) STRICT;
533
534 CREATE TABLE pane_groups(
535 group_id INTEGER PRIMARY KEY,
536 workspace_id INTEGER NOT NULL,
537 parent_group_id INTEGER, // NULL indicates that this is a root node
538 position INTEGER, // NULL indicates that this is a root node
539 axis TEXT NOT NULL, // Enum: 'Vertical' / 'Horizontal'
540 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
541 ON DELETE CASCADE
542 ON UPDATE CASCADE,
543 FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
544 ) STRICT;
545
546 CREATE TABLE panes(
547 pane_id INTEGER PRIMARY KEY,
548 workspace_id INTEGER NOT NULL,
549 active INTEGER NOT NULL, // Boolean
550 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
551 ON DELETE CASCADE
552 ON UPDATE CASCADE
553 ) STRICT;
554
555 CREATE TABLE center_panes(
556 pane_id INTEGER PRIMARY KEY,
557 parent_group_id INTEGER, // NULL means that this is a root pane
558 position INTEGER, // NULL means that this is a root pane
559 FOREIGN KEY(pane_id) REFERENCES panes(pane_id)
560 ON DELETE CASCADE,
561 FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
562 ) STRICT;
563
564 CREATE TABLE items(
565 item_id INTEGER NOT NULL, // This is the item's view id, so this is not unique
566 workspace_id INTEGER NOT NULL,
567 pane_id INTEGER NOT NULL,
568 kind TEXT NOT NULL,
569 position INTEGER NOT NULL,
570 active INTEGER NOT NULL,
571 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
572 ON DELETE CASCADE
573 ON UPDATE CASCADE,
574 FOREIGN KEY(pane_id) REFERENCES panes(pane_id)
575 ON DELETE CASCADE,
576 PRIMARY KEY(item_id, workspace_id)
577 ) STRICT;
578 ),
579 sql!(
580 ALTER TABLE workspaces ADD COLUMN window_state TEXT;
581 ALTER TABLE workspaces ADD COLUMN window_x REAL;
582 ALTER TABLE workspaces ADD COLUMN window_y REAL;
583 ALTER TABLE workspaces ADD COLUMN window_width REAL;
584 ALTER TABLE workspaces ADD COLUMN window_height REAL;
585 ALTER TABLE workspaces ADD COLUMN display BLOB;
586 ),
587 // Drop foreign key constraint from workspaces.dock_pane to panes table.
588 sql!(
589 CREATE TABLE workspaces_2(
590 workspace_id INTEGER PRIMARY KEY,
591 workspace_location BLOB UNIQUE,
592 dock_visible INTEGER, // Deprecated. Preserving so users can downgrade Zed.
593 dock_anchor TEXT, // Deprecated. Preserving so users can downgrade Zed.
594 dock_pane INTEGER, // Deprecated. Preserving so users can downgrade Zed.
595 left_sidebar_open INTEGER, // Boolean
596 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
597 window_state TEXT,
598 window_x REAL,
599 window_y REAL,
600 window_width REAL,
601 window_height REAL,
602 display BLOB
603 ) STRICT;
604 INSERT INTO workspaces_2 SELECT * FROM workspaces;
605 DROP TABLE workspaces;
606 ALTER TABLE workspaces_2 RENAME TO workspaces;
607 ),
608 // Add panels related information
609 sql!(
610 ALTER TABLE workspaces ADD COLUMN left_dock_visible INTEGER; //bool
611 ALTER TABLE workspaces ADD COLUMN left_dock_active_panel TEXT;
612 ALTER TABLE workspaces ADD COLUMN right_dock_visible INTEGER; //bool
613 ALTER TABLE workspaces ADD COLUMN right_dock_active_panel TEXT;
614 ALTER TABLE workspaces ADD COLUMN bottom_dock_visible INTEGER; //bool
615 ALTER TABLE workspaces ADD COLUMN bottom_dock_active_panel TEXT;
616 ),
617 // Add panel zoom persistence
618 sql!(
619 ALTER TABLE workspaces ADD COLUMN left_dock_zoom INTEGER; //bool
620 ALTER TABLE workspaces ADD COLUMN right_dock_zoom INTEGER; //bool
621 ALTER TABLE workspaces ADD COLUMN bottom_dock_zoom INTEGER; //bool
622 ),
623 // Add pane group flex data
624 sql!(
625 ALTER TABLE pane_groups ADD COLUMN flexes TEXT;
626 ),
627 // Add fullscreen field to workspace
628 // Deprecated, `WindowBounds` holds the fullscreen state now.
629 // Preserving so users can downgrade Zed.
630 sql!(
631 ALTER TABLE workspaces ADD COLUMN fullscreen INTEGER; //bool
632 ),
633 // Add preview field to items
634 sql!(
635 ALTER TABLE items ADD COLUMN preview INTEGER; //bool
636 ),
637 // Add centered_layout field to workspace
638 sql!(
639 ALTER TABLE workspaces ADD COLUMN centered_layout INTEGER; //bool
640 ),
641 sql!(
642 CREATE TABLE remote_projects (
643 remote_project_id INTEGER NOT NULL UNIQUE,
644 path TEXT,
645 dev_server_name TEXT
646 );
647 ALTER TABLE workspaces ADD COLUMN remote_project_id INTEGER;
648 ALTER TABLE workspaces RENAME COLUMN workspace_location TO local_paths;
649 ),
650 sql!(
651 DROP TABLE remote_projects;
652 CREATE TABLE dev_server_projects (
653 id INTEGER NOT NULL UNIQUE,
654 path TEXT,
655 dev_server_name TEXT
656 );
657 ALTER TABLE workspaces DROP COLUMN remote_project_id;
658 ALTER TABLE workspaces ADD COLUMN dev_server_project_id INTEGER;
659 ),
660 sql!(
661 ALTER TABLE workspaces ADD COLUMN local_paths_order BLOB;
662 ),
663 sql!(
664 ALTER TABLE workspaces ADD COLUMN session_id TEXT DEFAULT NULL;
665 ),
666 sql!(
667 ALTER TABLE workspaces ADD COLUMN window_id INTEGER DEFAULT NULL;
668 ),
669 sql!(
670 ALTER TABLE panes ADD COLUMN pinned_count INTEGER DEFAULT 0;
671 ),
672 sql!(
673 CREATE TABLE ssh_projects (
674 id INTEGER PRIMARY KEY,
675 host TEXT NOT NULL,
676 port INTEGER,
677 path TEXT NOT NULL,
678 user TEXT
679 );
680 ALTER TABLE workspaces ADD COLUMN ssh_project_id INTEGER REFERENCES ssh_projects(id) ON DELETE CASCADE;
681 ),
682 sql!(
683 ALTER TABLE ssh_projects RENAME COLUMN path TO paths;
684 ),
685 sql!(
686 CREATE TABLE toolchains (
687 workspace_id INTEGER,
688 worktree_id INTEGER,
689 language_name TEXT NOT NULL,
690 name TEXT NOT NULL,
691 path TEXT NOT NULL,
692 PRIMARY KEY (workspace_id, worktree_id, language_name)
693 );
694 ),
695 sql!(
696 ALTER TABLE toolchains ADD COLUMN raw_json TEXT DEFAULT "{}";
697 ),
698 sql!(
699 CREATE TABLE breakpoints (
700 workspace_id INTEGER NOT NULL,
701 path TEXT NOT NULL,
702 breakpoint_location INTEGER NOT NULL,
703 kind INTEGER NOT NULL,
704 log_message TEXT,
705 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
706 ON DELETE CASCADE
707 ON UPDATE CASCADE
708 );
709 ),
710 sql!(
711 ALTER TABLE workspaces ADD COLUMN local_paths_array TEXT;
712 CREATE UNIQUE INDEX local_paths_array_uq ON workspaces(local_paths_array);
713 ALTER TABLE workspaces ADD COLUMN local_paths_order_array TEXT;
714 ),
715 sql!(
716 ALTER TABLE breakpoints ADD COLUMN state INTEGER DEFAULT(0) NOT NULL
717 ),
718 sql!(
719 ALTER TABLE breakpoints DROP COLUMN kind
720 ),
721 sql!(ALTER TABLE toolchains ADD COLUMN relative_worktree_path TEXT DEFAULT "" NOT NULL),
722 sql!(
723 ALTER TABLE breakpoints ADD COLUMN condition TEXT;
724 ALTER TABLE breakpoints ADD COLUMN hit_condition TEXT;
725 ),
726 sql!(CREATE TABLE toolchains2 (
727 workspace_id INTEGER,
728 worktree_id INTEGER,
729 language_name TEXT NOT NULL,
730 name TEXT NOT NULL,
731 path TEXT NOT NULL,
732 raw_json TEXT NOT NULL,
733 relative_worktree_path TEXT NOT NULL,
734 PRIMARY KEY (workspace_id, worktree_id, language_name, relative_worktree_path)) STRICT;
735 INSERT INTO toolchains2
736 SELECT * FROM toolchains;
737 DROP TABLE toolchains;
738 ALTER TABLE toolchains2 RENAME TO toolchains;
739 ),
740 sql!(
741 CREATE TABLE ssh_connections (
742 id INTEGER PRIMARY KEY,
743 host TEXT NOT NULL,
744 port INTEGER,
745 user TEXT
746 );
747
748 INSERT INTO ssh_connections (host, port, user)
749 SELECT DISTINCT host, port, user
750 FROM ssh_projects;
751
752 CREATE TABLE workspaces_2(
753 workspace_id INTEGER PRIMARY KEY,
754 paths TEXT,
755 paths_order TEXT,
756 ssh_connection_id INTEGER REFERENCES ssh_connections(id),
757 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
758 window_state TEXT,
759 window_x REAL,
760 window_y REAL,
761 window_width REAL,
762 window_height REAL,
763 display BLOB,
764 left_dock_visible INTEGER,
765 left_dock_active_panel TEXT,
766 right_dock_visible INTEGER,
767 right_dock_active_panel TEXT,
768 bottom_dock_visible INTEGER,
769 bottom_dock_active_panel TEXT,
770 left_dock_zoom INTEGER,
771 right_dock_zoom INTEGER,
772 bottom_dock_zoom INTEGER,
773 fullscreen INTEGER,
774 centered_layout INTEGER,
775 session_id TEXT,
776 window_id INTEGER
777 ) STRICT;
778
779 INSERT
780 INTO workspaces_2
781 SELECT
782 workspaces.workspace_id,
783 CASE
784 WHEN ssh_projects.id IS NOT NULL THEN ssh_projects.paths
785 ELSE
786 CASE
787 WHEN workspaces.local_paths_array IS NULL OR workspaces.local_paths_array = "" THEN
788 NULL
789 ELSE
790 replace(workspaces.local_paths_array, ',', CHAR(10))
791 END
792 END as paths,
793
794 CASE
795 WHEN ssh_projects.id IS NOT NULL THEN ""
796 ELSE workspaces.local_paths_order_array
797 END as paths_order,
798
799 CASE
800 WHEN ssh_projects.id IS NOT NULL THEN (
801 SELECT ssh_connections.id
802 FROM ssh_connections
803 WHERE
804 ssh_connections.host IS ssh_projects.host AND
805 ssh_connections.port IS ssh_projects.port AND
806 ssh_connections.user IS ssh_projects.user
807 )
808 ELSE NULL
809 END as ssh_connection_id,
810
811 workspaces.timestamp,
812 workspaces.window_state,
813 workspaces.window_x,
814 workspaces.window_y,
815 workspaces.window_width,
816 workspaces.window_height,
817 workspaces.display,
818 workspaces.left_dock_visible,
819 workspaces.left_dock_active_panel,
820 workspaces.right_dock_visible,
821 workspaces.right_dock_active_panel,
822 workspaces.bottom_dock_visible,
823 workspaces.bottom_dock_active_panel,
824 workspaces.left_dock_zoom,
825 workspaces.right_dock_zoom,
826 workspaces.bottom_dock_zoom,
827 workspaces.fullscreen,
828 workspaces.centered_layout,
829 workspaces.session_id,
830 workspaces.window_id
831 FROM
832 workspaces LEFT JOIN
833 ssh_projects ON
834 workspaces.ssh_project_id = ssh_projects.id;
835
836 DELETE FROM workspaces_2
837 WHERE workspace_id NOT IN (
838 SELECT MAX(workspace_id)
839 FROM workspaces_2
840 GROUP BY ssh_connection_id, paths
841 );
842
843 DROP TABLE ssh_projects;
844 DROP TABLE workspaces;
845 ALTER TABLE workspaces_2 RENAME TO workspaces;
846
847 CREATE UNIQUE INDEX ix_workspaces_location ON workspaces(ssh_connection_id, paths);
848 ),
849 // Fix any data from when workspaces.paths were briefly encoded as JSON arrays
850 sql!(
851 UPDATE workspaces
852 SET paths = CASE
853 WHEN substr(paths, 1, 2) = '[' || '"' AND substr(paths, -2, 2) = '"' || ']' THEN
854 replace(
855 substr(paths, 3, length(paths) - 4),
856 '"' || ',' || '"',
857 CHAR(10)
858 )
859 ELSE
860 replace(paths, ',', CHAR(10))
861 END
862 WHERE paths IS NOT NULL
863 ),
864 sql!(
865 CREATE TABLE remote_connections(
866 id INTEGER PRIMARY KEY,
867 kind TEXT NOT NULL,
868 host TEXT,
869 port INTEGER,
870 user TEXT,
871 distro TEXT
872 );
873
874 CREATE TABLE workspaces_2(
875 workspace_id INTEGER PRIMARY KEY,
876 paths TEXT,
877 paths_order TEXT,
878 remote_connection_id INTEGER REFERENCES remote_connections(id),
879 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
880 window_state TEXT,
881 window_x REAL,
882 window_y REAL,
883 window_width REAL,
884 window_height REAL,
885 display BLOB,
886 left_dock_visible INTEGER,
887 left_dock_active_panel TEXT,
888 right_dock_visible INTEGER,
889 right_dock_active_panel TEXT,
890 bottom_dock_visible INTEGER,
891 bottom_dock_active_panel TEXT,
892 left_dock_zoom INTEGER,
893 right_dock_zoom INTEGER,
894 bottom_dock_zoom INTEGER,
895 fullscreen INTEGER,
896 centered_layout INTEGER,
897 session_id TEXT,
898 window_id INTEGER
899 ) STRICT;
900
901 INSERT INTO remote_connections
902 SELECT
903 id,
904 "ssh" as kind,
905 host,
906 port,
907 user,
908 NULL as distro
909 FROM ssh_connections;
910
911 INSERT
912 INTO workspaces_2
913 SELECT
914 workspace_id,
915 paths,
916 paths_order,
917 ssh_connection_id as remote_connection_id,
918 timestamp,
919 window_state,
920 window_x,
921 window_y,
922 window_width,
923 window_height,
924 display,
925 left_dock_visible,
926 left_dock_active_panel,
927 right_dock_visible,
928 right_dock_active_panel,
929 bottom_dock_visible,
930 bottom_dock_active_panel,
931 left_dock_zoom,
932 right_dock_zoom,
933 bottom_dock_zoom,
934 fullscreen,
935 centered_layout,
936 session_id,
937 window_id
938 FROM
939 workspaces;
940
941 DROP TABLE workspaces;
942 ALTER TABLE workspaces_2 RENAME TO workspaces;
943
944 CREATE UNIQUE INDEX ix_workspaces_location ON workspaces(remote_connection_id, paths);
945 ),
946 sql!(CREATE TABLE user_toolchains (
947 remote_connection_id INTEGER,
948 workspace_id INTEGER NOT NULL,
949 worktree_id INTEGER NOT NULL,
950 relative_worktree_path TEXT NOT NULL,
951 language_name TEXT NOT NULL,
952 name TEXT NOT NULL,
953 path TEXT NOT NULL,
954 raw_json TEXT NOT NULL,
955
956 PRIMARY KEY (workspace_id, worktree_id, relative_worktree_path, language_name, name, path, raw_json)
957 ) STRICT;),
958 sql!(
959 DROP TABLE ssh_connections;
960 ),
961 sql!(
962 ALTER TABLE remote_connections ADD COLUMN name TEXT;
963 ALTER TABLE remote_connections ADD COLUMN container_id TEXT;
964 ),
965 sql!(
966 CREATE TABLE IF NOT EXISTS trusted_worktrees (
967 trust_id INTEGER PRIMARY KEY AUTOINCREMENT,
968 absolute_path TEXT,
969 user_name TEXT,
970 host_name TEXT
971 ) STRICT;
972 ),
973 sql!(CREATE TABLE toolchains2 (
974 workspace_id INTEGER,
975 worktree_root_path TEXT NOT NULL,
976 language_name TEXT NOT NULL,
977 name TEXT NOT NULL,
978 path TEXT NOT NULL,
979 raw_json TEXT NOT NULL,
980 relative_worktree_path TEXT NOT NULL,
981 PRIMARY KEY (workspace_id, worktree_root_path, language_name, relative_worktree_path)) STRICT;
982 INSERT OR REPLACE INTO toolchains2
983 // The `instr(paths, '\n') = 0` part allows us to find all
984 // workspaces that have a single worktree, as `\n` is used as a
985 // separator when serializing the workspace paths, so if no `\n` is
986 // found, we know we have a single worktree.
987 SELECT toolchains.workspace_id, paths, language_name, name, path, raw_json, relative_worktree_path FROM toolchains INNER JOIN workspaces ON toolchains.workspace_id = workspaces.workspace_id AND instr(paths, '\n') = 0;
988 DROP TABLE toolchains;
989 ALTER TABLE toolchains2 RENAME TO toolchains;
990 ),
991 sql!(CREATE TABLE user_toolchains2 (
992 remote_connection_id INTEGER,
993 workspace_id INTEGER NOT NULL,
994 worktree_root_path TEXT NOT NULL,
995 relative_worktree_path TEXT NOT NULL,
996 language_name TEXT NOT NULL,
997 name TEXT NOT NULL,
998 path TEXT NOT NULL,
999 raw_json TEXT NOT NULL,
1000
1001 PRIMARY KEY (workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json)) STRICT;
1002 INSERT OR REPLACE INTO user_toolchains2
1003 // The `instr(paths, '\n') = 0` part allows us to find all
1004 // workspaces that have a single worktree, as `\n` is used as a
1005 // separator when serializing the workspace paths, so if no `\n` is
1006 // found, we know we have a single worktree.
1007 SELECT user_toolchains.remote_connection_id, user_toolchains.workspace_id, paths, relative_worktree_path, language_name, name, path, raw_json FROM user_toolchains INNER JOIN workspaces ON user_toolchains.workspace_id = workspaces.workspace_id AND instr(paths, '\n') = 0;
1008 DROP TABLE user_toolchains;
1009 ALTER TABLE user_toolchains2 RENAME TO user_toolchains;
1010 ),
1011 sql!(
1012 ALTER TABLE remote_connections ADD COLUMN use_podman BOOLEAN;
1013 ),
1014 sql!(
1015 ALTER TABLE remote_connections ADD COLUMN remote_env TEXT;
1016 ),
1017 sql!(
1018 CREATE TABLE bookmarks (
1019 workspace_id INTEGER NOT NULL,
1020 path TEXT NOT NULL,
1021 row INTEGER NOT NULL,
1022 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
1023 ON DELETE CASCADE
1024 ON UPDATE CASCADE
1025 );
1026 ),
1027 ];
1028
1029 // Allow recovering from bad migration that was initially shipped to nightly
1030 // when introducing the ssh_connections table.
1031 fn should_allow_migration_change(_index: usize, old: &str, new: &str) -> bool {
1032 old.starts_with("CREATE TABLE ssh_connections")
1033 && new.starts_with("CREATE TABLE ssh_connections")
1034 }
1035}
1036
1037db::static_connection!(WorkspaceDb, []);
1038
1039impl WorkspaceDb {
1040 /// Returns a serialized workspace for the given worktree_roots. If the passed array
1041 /// is empty, the most recent workspace is returned instead. If no workspace for the
1042 /// passed roots is stored, returns none.
1043 pub(crate) fn workspace_for_roots<P: AsRef<Path>>(
1044 &self,
1045 worktree_roots: &[P],
1046 ) -> Option<SerializedWorkspace> {
1047 self.workspace_for_roots_internal(worktree_roots, None)
1048 }
1049
1050 pub(crate) fn remote_workspace_for_roots<P: AsRef<Path>>(
1051 &self,
1052 worktree_roots: &[P],
1053 remote_project_id: RemoteConnectionId,
1054 ) -> Option<SerializedWorkspace> {
1055 self.workspace_for_roots_internal(worktree_roots, Some(remote_project_id))
1056 }
1057
1058 pub(crate) fn workspace_for_roots_internal<P: AsRef<Path>>(
1059 &self,
1060 worktree_roots: &[P],
1061 remote_connection_id: Option<RemoteConnectionId>,
1062 ) -> Option<SerializedWorkspace> {
1063 // paths are sorted before db interactions to ensure that the order of the paths
1064 // doesn't affect the workspace selection for existing workspaces
1065 let root_paths = PathList::new(worktree_roots);
1066
1067 // Empty workspaces cannot be matched by paths (all empty workspaces have paths = "").
1068 // They should only be restored via workspace_for_id during session restoration.
1069 if root_paths.is_empty() && remote_connection_id.is_none() {
1070 return None;
1071 }
1072
1073 // Note that we re-assign the workspace_id here in case it's empty
1074 // and we've grabbed the most recent workspace
1075 let (
1076 workspace_id,
1077 paths,
1078 paths_order,
1079 window_bounds,
1080 display,
1081 centered_layout,
1082 docks,
1083 window_id,
1084 ): (
1085 WorkspaceId,
1086 String,
1087 String,
1088 Option<SerializedWindowBounds>,
1089 Option<Uuid>,
1090 Option<bool>,
1091 DockStructure,
1092 Option<u64>,
1093 ) = self
1094 .select_row_bound(sql! {
1095 SELECT
1096 workspace_id,
1097 paths,
1098 paths_order,
1099 window_state,
1100 window_x,
1101 window_y,
1102 window_width,
1103 window_height,
1104 display,
1105 centered_layout,
1106 left_dock_visible,
1107 left_dock_active_panel,
1108 left_dock_zoom,
1109 right_dock_visible,
1110 right_dock_active_panel,
1111 right_dock_zoom,
1112 bottom_dock_visible,
1113 bottom_dock_active_panel,
1114 bottom_dock_zoom,
1115 window_id
1116 FROM workspaces
1117 WHERE
1118 paths IS ? AND
1119 remote_connection_id IS ?
1120 LIMIT 1
1121 })
1122 .and_then(|mut prepared_statement| {
1123 (prepared_statement)((
1124 root_paths.serialize().paths,
1125 remote_connection_id.map(|id| id.0 as i32),
1126 ))
1127 })
1128 .context("No workspaces found")
1129 .warn_on_err()
1130 .flatten()?;
1131
1132 let paths = PathList::deserialize(&SerializedPathList {
1133 paths,
1134 order: paths_order,
1135 });
1136
1137 let remote_connection_options = if let Some(remote_connection_id) = remote_connection_id {
1138 self.remote_connection(remote_connection_id)
1139 .context("Get remote connection")
1140 .log_err()
1141 } else {
1142 None
1143 };
1144
1145 Some(SerializedWorkspace {
1146 id: workspace_id,
1147 location: match remote_connection_options {
1148 Some(options) => SerializedWorkspaceLocation::Remote(options),
1149 None => SerializedWorkspaceLocation::Local,
1150 },
1151 paths,
1152 center_group: self
1153 .get_center_pane_group(workspace_id)
1154 .context("Getting center group")
1155 .log_err()?,
1156 window_bounds,
1157 centered_layout: centered_layout.unwrap_or(false),
1158 display,
1159 docks,
1160 session_id: None,
1161 bookmarks: self.bookmarks(workspace_id),
1162 breakpoints: self.breakpoints(workspace_id),
1163 window_id,
1164 user_toolchains: self.user_toolchains(workspace_id, remote_connection_id),
1165 })
1166 }
1167
1168 /// Returns the workspace with the given ID, loading all associated data.
1169 pub(crate) fn workspace_for_id(
1170 &self,
1171 workspace_id: WorkspaceId,
1172 ) -> Option<SerializedWorkspace> {
1173 let (
1174 paths,
1175 paths_order,
1176 window_bounds,
1177 display,
1178 centered_layout,
1179 docks,
1180 window_id,
1181 remote_connection_id,
1182 ): (
1183 String,
1184 String,
1185 Option<SerializedWindowBounds>,
1186 Option<Uuid>,
1187 Option<bool>,
1188 DockStructure,
1189 Option<u64>,
1190 Option<i32>,
1191 ) = self
1192 .select_row_bound(sql! {
1193 SELECT
1194 paths,
1195 paths_order,
1196 window_state,
1197 window_x,
1198 window_y,
1199 window_width,
1200 window_height,
1201 display,
1202 centered_layout,
1203 left_dock_visible,
1204 left_dock_active_panel,
1205 left_dock_zoom,
1206 right_dock_visible,
1207 right_dock_active_panel,
1208 right_dock_zoom,
1209 bottom_dock_visible,
1210 bottom_dock_active_panel,
1211 bottom_dock_zoom,
1212 window_id,
1213 remote_connection_id
1214 FROM workspaces
1215 WHERE workspace_id = ?
1216 })
1217 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id))
1218 .context("No workspace found for id")
1219 .warn_on_err()
1220 .flatten()?;
1221
1222 let paths = PathList::deserialize(&SerializedPathList {
1223 paths,
1224 order: paths_order,
1225 });
1226
1227 let remote_connection_id = remote_connection_id.map(|id| RemoteConnectionId(id as u64));
1228 let remote_connection_options = if let Some(remote_connection_id) = remote_connection_id {
1229 self.remote_connection(remote_connection_id)
1230 .context("Get remote connection")
1231 .log_err()
1232 } else {
1233 None
1234 };
1235
1236 Some(SerializedWorkspace {
1237 id: workspace_id,
1238 location: match remote_connection_options {
1239 Some(options) => SerializedWorkspaceLocation::Remote(options),
1240 None => SerializedWorkspaceLocation::Local,
1241 },
1242 paths,
1243 center_group: self
1244 .get_center_pane_group(workspace_id)
1245 .context("Getting center group")
1246 .log_err()?,
1247 window_bounds,
1248 centered_layout: centered_layout.unwrap_or(false),
1249 display,
1250 docks,
1251 session_id: None,
1252 bookmarks: self.bookmarks(workspace_id),
1253 breakpoints: self.breakpoints(workspace_id),
1254 window_id,
1255 user_toolchains: self.user_toolchains(workspace_id, remote_connection_id),
1256 })
1257 }
1258
1259 fn bookmarks(&self, workspace_id: WorkspaceId) -> BTreeMap<Arc<Path>, Vec<SerializedBookmark>> {
1260 let bookmarks: Result<Vec<(PathBuf, Bookmark)>> = self
1261 .select_bound(sql! {
1262 SELECT path, row
1263 FROM bookmarks
1264 WHERE workspace_id = ?
1265 ORDER BY path, row
1266 })
1267 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id));
1268
1269 match bookmarks {
1270 Ok(bookmarks) => {
1271 if bookmarks.is_empty() {
1272 log::debug!("Bookmarks are empty after querying database for them");
1273 }
1274
1275 let mut map: BTreeMap<_, Vec<_>> = BTreeMap::default();
1276
1277 for (path, bookmark) in bookmarks {
1278 let path: Arc<Path> = path.into();
1279 map.entry(path.clone())
1280 .or_default()
1281 .push(SerializedBookmark(bookmark.row))
1282 }
1283
1284 map
1285 }
1286 Err(e) => {
1287 log::error!("Failed to load bookmarks: {}", e);
1288 BTreeMap::default()
1289 }
1290 }
1291 }
1292
1293 fn breakpoints(&self, workspace_id: WorkspaceId) -> BTreeMap<Arc<Path>, Vec<SourceBreakpoint>> {
1294 let breakpoints: Result<Vec<(PathBuf, Breakpoint)>> = self
1295 .select_bound(sql! {
1296 SELECT path, breakpoint_location, log_message, condition, hit_condition, state
1297 FROM breakpoints
1298 WHERE workspace_id = ?
1299 })
1300 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id));
1301
1302 match breakpoints {
1303 Ok(bp) => {
1304 if bp.is_empty() {
1305 log::debug!("Breakpoints are empty after querying database for them");
1306 }
1307
1308 let mut map: BTreeMap<Arc<Path>, Vec<SourceBreakpoint>> = Default::default();
1309
1310 for (path, breakpoint) in bp {
1311 let path: Arc<Path> = path.into();
1312 map.entry(path.clone()).or_default().push(SourceBreakpoint {
1313 row: breakpoint.position,
1314 path,
1315 message: breakpoint.message,
1316 condition: breakpoint.condition,
1317 hit_condition: breakpoint.hit_condition,
1318 state: breakpoint.state,
1319 });
1320 }
1321
1322 for (path, bps) in map.iter() {
1323 log::info!(
1324 "Got {} breakpoints from database at path: {}",
1325 bps.len(),
1326 path.to_string_lossy()
1327 );
1328 }
1329
1330 map
1331 }
1332 Err(msg) => {
1333 log::error!("Breakpoints query failed with msg: {msg}");
1334 Default::default()
1335 }
1336 }
1337 }
1338
1339 fn user_toolchains(
1340 &self,
1341 workspace_id: WorkspaceId,
1342 remote_connection_id: Option<RemoteConnectionId>,
1343 ) -> BTreeMap<ToolchainScope, IndexSet<Toolchain>> {
1344 type RowKind = (WorkspaceId, String, String, String, String, String, String);
1345
1346 let toolchains: Vec<RowKind> = self
1347 .select_bound(sql! {
1348 SELECT workspace_id, worktree_root_path, relative_worktree_path,
1349 language_name, name, path, raw_json
1350 FROM user_toolchains WHERE remote_connection_id IS ?1 AND (
1351 workspace_id IN (0, ?2)
1352 )
1353 })
1354 .and_then(|mut statement| {
1355 (statement)((remote_connection_id.map(|id| id.0), workspace_id))
1356 })
1357 .unwrap_or_default();
1358 let mut ret = BTreeMap::<_, IndexSet<_>>::default();
1359
1360 for (
1361 _workspace_id,
1362 worktree_root_path,
1363 relative_worktree_path,
1364 language_name,
1365 name,
1366 path,
1367 raw_json,
1368 ) in toolchains
1369 {
1370 // INTEGER's that are primary keys (like workspace ids, remote connection ids and such) start at 1, so we're safe to
1371 let scope = if _workspace_id == WorkspaceId(0) {
1372 debug_assert_eq!(worktree_root_path, String::default());
1373 debug_assert_eq!(relative_worktree_path, String::default());
1374 ToolchainScope::Global
1375 } else {
1376 debug_assert_eq!(workspace_id, _workspace_id);
1377 debug_assert_eq!(
1378 worktree_root_path == String::default(),
1379 relative_worktree_path == String::default()
1380 );
1381
1382 let Some(relative_path) = RelPath::unix(&relative_worktree_path).log_err() else {
1383 continue;
1384 };
1385 if worktree_root_path != String::default()
1386 && relative_worktree_path != String::default()
1387 {
1388 ToolchainScope::Subproject(
1389 Arc::from(worktree_root_path.as_ref()),
1390 relative_path.into(),
1391 )
1392 } else {
1393 ToolchainScope::Project
1394 }
1395 };
1396 let Ok(as_json) = serde_json::from_str(&raw_json) else {
1397 continue;
1398 };
1399 let toolchain = Toolchain {
1400 name: SharedString::from(name),
1401 path: SharedString::from(path),
1402 language_name: LanguageName::from_proto(language_name),
1403 as_json,
1404 };
1405 ret.entry(scope).or_default().insert(toolchain);
1406 }
1407
1408 ret
1409 }
1410
1411 /// Saves a workspace using the worktree roots. Will garbage collect any workspaces
1412 /// that used this workspace previously
1413 pub(crate) async fn save_workspace(&self, workspace: SerializedWorkspace) {
1414 let paths = workspace.paths.serialize();
1415 log::debug!("Saving workspace at location: {:?}", workspace.location);
1416 self.write(move |conn| {
1417 conn.with_savepoint("update_worktrees", || {
1418 let remote_connection_id = match workspace.location.clone() {
1419 SerializedWorkspaceLocation::Local => None,
1420 SerializedWorkspaceLocation::Remote(connection_options) => {
1421 Some(Self::get_or_create_remote_connection_internal(
1422 conn,
1423 connection_options
1424 )?.0)
1425 }
1426 };
1427
1428 // Clear out panes and pane_groups
1429 conn.exec_bound(sql!(
1430 DELETE FROM pane_groups WHERE workspace_id = ?1;
1431 DELETE FROM panes WHERE workspace_id = ?1;))?(workspace.id)
1432 .context("Clearing old panes")?;
1433
1434 conn.exec_bound(
1435 sql!(
1436 DELETE FROM bookmarks WHERE workspace_id = ?1;
1437 )
1438 )?(workspace.id).context("Clearing old bookmarks")?;
1439
1440 for (path, bookmarks) in workspace.bookmarks {
1441 for bookmark in bookmarks {
1442 conn.exec_bound(sql!(
1443 INSERT INTO bookmarks (workspace_id, path, row)
1444 VALUES (?1, ?2, ?3);
1445 ))?((workspace.id, path.as_ref(), bookmark.0)).context("Inserting bookmark")?;
1446 }
1447 }
1448
1449 conn.exec_bound(
1450 sql!(
1451 DELETE FROM breakpoints WHERE workspace_id = ?1;
1452 )
1453 )?(workspace.id).context("Clearing old breakpoints")?;
1454
1455 for (path, breakpoints) in workspace.breakpoints {
1456 for bp in breakpoints {
1457 let state = BreakpointStateWrapper::from(bp.state);
1458 match conn.exec_bound(sql!(
1459 INSERT INTO breakpoints (workspace_id, path, breakpoint_location, log_message, condition, hit_condition, state)
1460 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7);))?
1461
1462 ((
1463 workspace.id,
1464 path.as_ref(),
1465 bp.row,
1466 bp.message,
1467 bp.condition,
1468 bp.hit_condition,
1469 state,
1470 )) {
1471 Ok(_) => {
1472 log::debug!("Stored breakpoint at row: {} in path: {}", bp.row, path.to_string_lossy())
1473 }
1474 Err(err) => {
1475 log::error!("{err}");
1476 continue;
1477 }
1478 }
1479 }
1480 }
1481
1482 conn.exec_bound(
1483 sql!(
1484 DELETE FROM user_toolchains WHERE workspace_id = ?1;
1485 )
1486 )?(workspace.id).context("Clearing old user toolchains")?;
1487
1488 for (scope, toolchains) in workspace.user_toolchains {
1489 for toolchain in toolchains {
1490 let query = sql!(INSERT OR REPLACE INTO user_toolchains(remote_connection_id, workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8));
1491 let (workspace_id, worktree_root_path, relative_worktree_path) = match scope {
1492 ToolchainScope::Subproject(ref worktree_root_path, ref path) => (Some(workspace.id), Some(worktree_root_path.to_string_lossy().into_owned()), Some(path.as_unix_str().to_owned())),
1493 ToolchainScope::Project => (Some(workspace.id), None, None),
1494 ToolchainScope::Global => (None, None, None),
1495 };
1496 let args = (remote_connection_id, workspace_id.unwrap_or(WorkspaceId(0)), worktree_root_path.unwrap_or_default(), relative_worktree_path.unwrap_or_default(),
1497 toolchain.language_name.as_ref().to_owned(), toolchain.name.to_string(), toolchain.path.to_string(), toolchain.as_json.to_string());
1498 if let Err(err) = conn.exec_bound(query)?(args) {
1499 log::error!("{err}");
1500 continue;
1501 }
1502 }
1503 }
1504
1505 // Clear out old workspaces with the same paths.
1506 // Skip this for empty workspaces - they are identified by workspace_id, not paths.
1507 // Multiple empty workspaces with different content should coexist.
1508 if !paths.paths.is_empty() {
1509 conn.exec_bound(sql!(
1510 DELETE
1511 FROM workspaces
1512 WHERE
1513 workspace_id != ?1 AND
1514 paths IS ?2 AND
1515 remote_connection_id IS ?3
1516 ))?((
1517 workspace.id,
1518 paths.paths.clone(),
1519 remote_connection_id,
1520 ))
1521 .context("clearing out old locations")?;
1522 }
1523
1524 // Upsert
1525 let query = sql!(
1526 INSERT INTO workspaces(
1527 workspace_id,
1528 paths,
1529 paths_order,
1530 remote_connection_id,
1531 left_dock_visible,
1532 left_dock_active_panel,
1533 left_dock_zoom,
1534 right_dock_visible,
1535 right_dock_active_panel,
1536 right_dock_zoom,
1537 bottom_dock_visible,
1538 bottom_dock_active_panel,
1539 bottom_dock_zoom,
1540 session_id,
1541 window_id,
1542 timestamp
1543 )
1544 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, CURRENT_TIMESTAMP)
1545 ON CONFLICT DO
1546 UPDATE SET
1547 paths = ?2,
1548 paths_order = ?3,
1549 remote_connection_id = ?4,
1550 left_dock_visible = ?5,
1551 left_dock_active_panel = ?6,
1552 left_dock_zoom = ?7,
1553 right_dock_visible = ?8,
1554 right_dock_active_panel = ?9,
1555 right_dock_zoom = ?10,
1556 bottom_dock_visible = ?11,
1557 bottom_dock_active_panel = ?12,
1558 bottom_dock_zoom = ?13,
1559 session_id = ?14,
1560 window_id = ?15,
1561 timestamp = CURRENT_TIMESTAMP
1562 );
1563 let mut prepared_query = conn.exec_bound(query)?;
1564 let args = (
1565 workspace.id,
1566 paths.paths.clone(),
1567 paths.order.clone(),
1568 remote_connection_id,
1569 workspace.docks,
1570 workspace.session_id,
1571 workspace.window_id,
1572 );
1573
1574 prepared_query(args).context("Updating workspace")?;
1575
1576 // Save center pane group
1577 Self::save_pane_group(conn, workspace.id, &workspace.center_group, None)
1578 .context("save pane group in save workspace")?;
1579
1580 Ok(())
1581 })
1582 .log_err();
1583 })
1584 .await;
1585 }
1586
1587 pub(crate) async fn get_or_create_remote_connection(
1588 &self,
1589 options: RemoteConnectionOptions,
1590 ) -> Result<RemoteConnectionId> {
1591 self.write(move |conn| Self::get_or_create_remote_connection_internal(conn, options))
1592 .await
1593 }
1594
1595 fn get_or_create_remote_connection_internal(
1596 this: &Connection,
1597 options: RemoteConnectionOptions,
1598 ) -> Result<RemoteConnectionId> {
1599 let identity = remote_connection_identity(&options);
1600 let kind;
1601 let user: Option<String>;
1602 let mut host = None;
1603 let mut port = None;
1604 let mut distro = None;
1605 let mut name = None;
1606 let mut container_id = None;
1607 let mut use_podman = None;
1608 let mut remote_env = None;
1609
1610 match identity {
1611 RemoteConnectionIdentity::Ssh {
1612 host: identity_host,
1613 username,
1614 port: identity_port,
1615 } => {
1616 kind = RemoteConnectionKind::Ssh;
1617 host = Some(identity_host);
1618 port = identity_port;
1619 user = username;
1620 }
1621 RemoteConnectionIdentity::Wsl {
1622 distro_name,
1623 user: identity_user,
1624 } => {
1625 kind = RemoteConnectionKind::Wsl;
1626 distro = Some(distro_name);
1627 user = identity_user;
1628 }
1629 RemoteConnectionIdentity::Docker {
1630 container_id: identity_container_id,
1631 name: identity_name,
1632 remote_user,
1633 } => {
1634 kind = RemoteConnectionKind::Docker;
1635 container_id = Some(identity_container_id);
1636 name = Some(identity_name);
1637 user = Some(remote_user);
1638 }
1639 #[cfg(any(test, feature = "test-support"))]
1640 RemoteConnectionIdentity::Mock { id } => {
1641 kind = RemoteConnectionKind::Ssh;
1642 host = Some(format!("mock-{}", id));
1643 user = Some(format!("mock-user-{}", id));
1644 }
1645 }
1646
1647 if let RemoteConnectionOptions::Docker(options) = options {
1648 use_podman = Some(options.use_podman);
1649 remote_env = serde_json::to_string(&options.remote_env).ok();
1650 }
1651
1652 Self::get_or_create_remote_connection_query(
1653 this,
1654 kind,
1655 host,
1656 port,
1657 user,
1658 distro,
1659 name,
1660 container_id,
1661 use_podman,
1662 remote_env,
1663 )
1664 }
1665
1666 fn get_or_create_remote_connection_query(
1667 this: &Connection,
1668 kind: RemoteConnectionKind,
1669 host: Option<String>,
1670 port: Option<u16>,
1671 user: Option<String>,
1672 distro: Option<String>,
1673 name: Option<String>,
1674 container_id: Option<String>,
1675 use_podman: Option<bool>,
1676 remote_env: Option<String>,
1677 ) -> Result<RemoteConnectionId> {
1678 if let Some(id) = this.select_row_bound(sql!(
1679 SELECT id
1680 FROM remote_connections
1681 WHERE
1682 kind IS ? AND
1683 host IS ? AND
1684 port IS ? AND
1685 user IS ? AND
1686 distro IS ? AND
1687 name IS ? AND
1688 container_id IS ?
1689 LIMIT 1
1690 ))?((
1691 kind.serialize(),
1692 host.clone(),
1693 port,
1694 user.clone(),
1695 distro.clone(),
1696 name.clone(),
1697 container_id.clone(),
1698 ))? {
1699 Ok(RemoteConnectionId(id))
1700 } else {
1701 let id = this.select_row_bound(sql!(
1702 INSERT INTO remote_connections (
1703 kind,
1704 host,
1705 port,
1706 user,
1707 distro,
1708 name,
1709 container_id,
1710 use_podman,
1711 remote_env
1712 ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)
1713 RETURNING id
1714 ))?((
1715 kind.serialize(),
1716 host,
1717 port,
1718 user,
1719 distro,
1720 name,
1721 container_id,
1722 use_podman,
1723 remote_env,
1724 ))?
1725 .context("failed to insert remote project")?;
1726 Ok(RemoteConnectionId(id))
1727 }
1728 }
1729
1730 query! {
1731 pub async fn next_id() -> Result<WorkspaceId> {
1732 INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id
1733 }
1734 }
1735
1736 fn recent_workspaces(
1737 &self,
1738 ) -> Result<
1739 Vec<(
1740 WorkspaceId,
1741 PathList,
1742 Option<RemoteConnectionId>,
1743 DateTime<Utc>,
1744 )>,
1745 > {
1746 Ok(self
1747 .recent_workspaces_query()?
1748 .into_iter()
1749 .map(|(id, paths, order, remote_connection_id, timestamp)| {
1750 (
1751 id,
1752 PathList::deserialize(&SerializedPathList { paths, order }),
1753 remote_connection_id.map(RemoteConnectionId),
1754 parse_timestamp(×tamp),
1755 )
1756 })
1757 .collect())
1758 }
1759
1760 query! {
1761 fn recent_workspaces_query() -> Result<Vec<(WorkspaceId, String, String, Option<u64>, String)>> {
1762 SELECT workspace_id, paths, paths_order, remote_connection_id, timestamp
1763 FROM workspaces
1764 WHERE
1765 paths IS NOT NULL OR
1766 remote_connection_id IS NOT NULL
1767 ORDER BY timestamp DESC
1768 }
1769 }
1770
1771 fn session_workspaces(
1772 &self,
1773 session_id: String,
1774 ) -> Result<
1775 Vec<(
1776 WorkspaceId,
1777 PathList,
1778 Option<u64>,
1779 Option<RemoteConnectionId>,
1780 )>,
1781 > {
1782 Ok(self
1783 .session_workspaces_query(session_id)?
1784 .into_iter()
1785 .map(
1786 |(workspace_id, paths, order, window_id, remote_connection_id)| {
1787 (
1788 WorkspaceId(workspace_id),
1789 PathList::deserialize(&SerializedPathList { paths, order }),
1790 window_id,
1791 remote_connection_id.map(RemoteConnectionId),
1792 )
1793 },
1794 )
1795 .collect())
1796 }
1797
1798 query! {
1799 fn session_workspaces_query(session_id: String) -> Result<Vec<(i64, String, String, Option<u64>, Option<u64>)>> {
1800 SELECT workspace_id, paths, paths_order, window_id, remote_connection_id
1801 FROM workspaces
1802 WHERE session_id = ?1
1803 ORDER BY timestamp DESC
1804 }
1805 }
1806
1807 query! {
1808 pub fn breakpoints_for_file(workspace_id: WorkspaceId, file_path: &Path) -> Result<Vec<Breakpoint>> {
1809 SELECT breakpoint_location
1810 FROM breakpoints
1811 WHERE workspace_id= ?1 AND path = ?2
1812 }
1813 }
1814
1815 query! {
1816 pub fn clear_breakpoints(file_path: &Path) -> Result<()> {
1817 DELETE FROM breakpoints
1818 WHERE file_path = ?2
1819 }
1820 }
1821
1822 fn remote_connections(&self) -> Result<HashMap<RemoteConnectionId, RemoteConnectionOptions>> {
1823 Ok(self.select(sql!(
1824 SELECT
1825 id, kind, host, port, user, distro, container_id, name, use_podman, remote_env
1826 FROM
1827 remote_connections
1828 ))?()?
1829 .into_iter()
1830 .filter_map(
1831 |(id, kind, host, port, user, distro, container_id, name, use_podman, remote_env)| {
1832 Some((
1833 RemoteConnectionId(id),
1834 Self::remote_connection_from_row(
1835 kind,
1836 host,
1837 port,
1838 user,
1839 distro,
1840 container_id,
1841 name,
1842 use_podman,
1843 remote_env,
1844 )?,
1845 ))
1846 },
1847 )
1848 .collect())
1849 }
1850
1851 pub(crate) fn remote_connection(
1852 &self,
1853 id: RemoteConnectionId,
1854 ) -> Result<RemoteConnectionOptions> {
1855 let (kind, host, port, user, distro, container_id, name, use_podman, remote_env) =
1856 self.select_row_bound(sql!(
1857 SELECT kind, host, port, user, distro, container_id, name, use_podman, remote_env
1858 FROM remote_connections
1859 WHERE id = ?
1860 ))?(id.0)?
1861 .context("no such remote connection")?;
1862 Self::remote_connection_from_row(
1863 kind,
1864 host,
1865 port,
1866 user,
1867 distro,
1868 container_id,
1869 name,
1870 use_podman,
1871 remote_env,
1872 )
1873 .context("invalid remote_connection row")
1874 }
1875
1876 fn remote_connection_from_row(
1877 kind: String,
1878 host: Option<String>,
1879 port: Option<u16>,
1880 user: Option<String>,
1881 distro: Option<String>,
1882 container_id: Option<String>,
1883 name: Option<String>,
1884 use_podman: Option<bool>,
1885 remote_env: Option<String>,
1886 ) -> Option<RemoteConnectionOptions> {
1887 match RemoteConnectionKind::deserialize(&kind)? {
1888 RemoteConnectionKind::Wsl => Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
1889 distro_name: distro?,
1890 user: user,
1891 })),
1892 RemoteConnectionKind::Ssh => Some(RemoteConnectionOptions::Ssh(SshConnectionOptions {
1893 host: host?.into(),
1894 port,
1895 username: user,
1896 ..Default::default()
1897 })),
1898 RemoteConnectionKind::Docker => {
1899 let remote_env: BTreeMap<String, String> =
1900 serde_json::from_str(&remote_env?).ok()?;
1901 Some(RemoteConnectionOptions::Docker(DockerConnectionOptions {
1902 container_id: container_id?,
1903 name: name?,
1904 remote_user: user?,
1905 upload_binary_over_docker_exec: false,
1906 use_podman: use_podman?,
1907 remote_env,
1908 }))
1909 }
1910 }
1911 }
1912
1913 query! {
1914 pub async fn delete_workspace_by_id(id: WorkspaceId) -> Result<()> {
1915 DELETE FROM workspaces
1916 WHERE workspace_id IS ?
1917 }
1918 }
1919
1920 async fn all_paths_exist_with_a_directory(paths: &[PathBuf], fs: &dyn Fs) -> bool {
1921 let mut any_dir = false;
1922 for path in paths {
1923 match fs.metadata(path).await.ok().flatten() {
1924 None => {
1925 return false;
1926 }
1927 Some(meta) => {
1928 if meta.is_dir {
1929 any_dir = true;
1930 }
1931 }
1932 }
1933 }
1934 any_dir
1935 }
1936
1937 // Returns the recent locations which are still valid on disk and deletes ones which no longer
1938 // exist.
1939 pub async fn recent_workspaces_on_disk(
1940 &self,
1941 fs: &dyn Fs,
1942 ) -> Result<
1943 Vec<(
1944 WorkspaceId,
1945 SerializedWorkspaceLocation,
1946 PathList,
1947 DateTime<Utc>,
1948 )>,
1949 > {
1950 let mut result = Vec::new();
1951 let mut workspaces_to_delete = Vec::new();
1952 let remote_connections = self.remote_connections()?;
1953 let now = Utc::now();
1954 for (id, paths, remote_connection_id, timestamp) in self.recent_workspaces()? {
1955 if let Some(remote_connection_id) = remote_connection_id {
1956 if let Some(connection_options) = remote_connections.get(&remote_connection_id) {
1957 result.push((
1958 id,
1959 SerializedWorkspaceLocation::Remote(connection_options.clone()),
1960 paths,
1961 timestamp,
1962 ));
1963 } else {
1964 workspaces_to_delete.push(id);
1965 }
1966 continue;
1967 }
1968
1969 // Delete the workspace if any of the paths are WSL paths. If a
1970 // local workspace points to WSL, attempting to read its metadata
1971 // will wait for the WSL VM and file server to boot up. This can
1972 // block for many seconds. Supported scenarios use remote
1973 // workspaces.
1974 if cfg!(windows) {
1975 let has_wsl_path = paths
1976 .paths()
1977 .iter()
1978 .any(|path| util::paths::WslPath::from_path(path).is_some());
1979 if has_wsl_path {
1980 workspaces_to_delete.push(id);
1981 continue;
1982 }
1983 }
1984
1985 if Self::all_paths_exist_with_a_directory(paths.paths(), fs).await {
1986 result.push((id, SerializedWorkspaceLocation::Local, paths, timestamp));
1987 } else if now - timestamp >= chrono::Duration::days(7) {
1988 workspaces_to_delete.push(id);
1989 }
1990 }
1991
1992 futures::future::join_all(
1993 workspaces_to_delete
1994 .into_iter()
1995 .map(|id| self.delete_workspace_by_id(id)),
1996 )
1997 .await;
1998 Ok(result)
1999 }
2000
2001 pub async fn last_workspace(
2002 &self,
2003 fs: &dyn Fs,
2004 ) -> Result<
2005 Option<(
2006 WorkspaceId,
2007 SerializedWorkspaceLocation,
2008 PathList,
2009 DateTime<Utc>,
2010 )>,
2011 > {
2012 Ok(self.recent_workspaces_on_disk(fs).await?.into_iter().next())
2013 }
2014
2015 // Returns the locations of the workspaces that were still opened when the last
2016 // session was closed (i.e. when Zed was quit).
2017 // If `last_session_window_order` is provided, the returned locations are ordered
2018 // according to that.
2019 pub async fn last_session_workspace_locations(
2020 &self,
2021 last_session_id: &str,
2022 last_session_window_stack: Option<Vec<WindowId>>,
2023 fs: &dyn Fs,
2024 ) -> Result<Vec<SessionWorkspace>> {
2025 let mut workspaces = Vec::new();
2026
2027 for (workspace_id, paths, window_id, remote_connection_id) in
2028 self.session_workspaces(last_session_id.to_owned())?
2029 {
2030 let window_id = window_id.map(WindowId::from);
2031
2032 if let Some(remote_connection_id) = remote_connection_id {
2033 workspaces.push(SessionWorkspace {
2034 workspace_id,
2035 location: SerializedWorkspaceLocation::Remote(
2036 self.remote_connection(remote_connection_id)?,
2037 ),
2038 paths,
2039 window_id,
2040 });
2041 } else if paths.is_empty() {
2042 // Empty workspace with items (drafts, files) - include for restoration
2043 workspaces.push(SessionWorkspace {
2044 workspace_id,
2045 location: SerializedWorkspaceLocation::Local,
2046 paths,
2047 window_id,
2048 });
2049 } else {
2050 if Self::all_paths_exist_with_a_directory(paths.paths(), fs).await {
2051 workspaces.push(SessionWorkspace {
2052 workspace_id,
2053 location: SerializedWorkspaceLocation::Local,
2054 paths,
2055 window_id,
2056 });
2057 }
2058 }
2059 }
2060
2061 if let Some(stack) = last_session_window_stack {
2062 workspaces.sort_by_key(|workspace| {
2063 workspace
2064 .window_id
2065 .and_then(|id| stack.iter().position(|&order_id| order_id == id))
2066 .unwrap_or(usize::MAX)
2067 });
2068 }
2069
2070 Ok(workspaces)
2071 }
2072
2073 fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result<SerializedPaneGroup> {
2074 Ok(self
2075 .get_pane_group(workspace_id, None)?
2076 .into_iter()
2077 .next()
2078 .unwrap_or_else(|| {
2079 SerializedPaneGroup::Pane(SerializedPane {
2080 active: true,
2081 children: vec![],
2082 pinned_count: 0,
2083 })
2084 }))
2085 }
2086
2087 fn get_pane_group(
2088 &self,
2089 workspace_id: WorkspaceId,
2090 group_id: Option<GroupId>,
2091 ) -> Result<Vec<SerializedPaneGroup>> {
2092 type GroupKey = (Option<GroupId>, WorkspaceId);
2093 type GroupOrPane = (
2094 Option<GroupId>,
2095 Option<SerializedAxis>,
2096 Option<PaneId>,
2097 Option<bool>,
2098 Option<usize>,
2099 Option<String>,
2100 );
2101 self.select_bound::<GroupKey, GroupOrPane>(sql!(
2102 SELECT group_id, axis, pane_id, active, pinned_count, flexes
2103 FROM (SELECT
2104 group_id,
2105 axis,
2106 NULL as pane_id,
2107 NULL as active,
2108 NULL as pinned_count,
2109 position,
2110 parent_group_id,
2111 workspace_id,
2112 flexes
2113 FROM pane_groups
2114 UNION
2115 SELECT
2116 NULL,
2117 NULL,
2118 center_panes.pane_id,
2119 panes.active as active,
2120 pinned_count,
2121 position,
2122 parent_group_id,
2123 panes.workspace_id as workspace_id,
2124 NULL
2125 FROM center_panes
2126 JOIN panes ON center_panes.pane_id = panes.pane_id)
2127 WHERE parent_group_id IS ? AND workspace_id = ?
2128 ORDER BY position
2129 ))?((group_id, workspace_id))?
2130 .into_iter()
2131 .map(|(group_id, axis, pane_id, active, pinned_count, flexes)| {
2132 let maybe_pane = maybe!({ Some((pane_id?, active?, pinned_count?)) });
2133 if let Some((group_id, axis)) = group_id.zip(axis) {
2134 let flexes = flexes
2135 .map(|flexes: String| serde_json::from_str::<Vec<f32>>(&flexes))
2136 .transpose()?;
2137
2138 Ok(SerializedPaneGroup::Group {
2139 axis,
2140 children: self.get_pane_group(workspace_id, Some(group_id))?,
2141 flexes,
2142 })
2143 } else if let Some((pane_id, active, pinned_count)) = maybe_pane {
2144 Ok(SerializedPaneGroup::Pane(SerializedPane::new(
2145 self.get_items(pane_id)?,
2146 active,
2147 pinned_count,
2148 )))
2149 } else {
2150 bail!("Pane Group Child was neither a pane group or a pane");
2151 }
2152 })
2153 // Filter out panes and pane groups which don't have any children or items
2154 .filter(|pane_group| match pane_group {
2155 Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(),
2156 Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(),
2157 _ => true,
2158 })
2159 .collect::<Result<_>>()
2160 }
2161
2162 fn save_pane_group(
2163 conn: &Connection,
2164 workspace_id: WorkspaceId,
2165 pane_group: &SerializedPaneGroup,
2166 parent: Option<(GroupId, usize)>,
2167 ) -> Result<()> {
2168 if parent.is_none() {
2169 log::debug!("Saving a pane group for workspace {workspace_id:?}");
2170 }
2171 match pane_group {
2172 SerializedPaneGroup::Group {
2173 axis,
2174 children,
2175 flexes,
2176 } => {
2177 let (parent_id, position) = parent.unzip();
2178
2179 let flex_string = flexes
2180 .as_ref()
2181 .map(|flexes| serde_json::json!(flexes).to_string());
2182
2183 let group_id = conn.select_row_bound::<_, i64>(sql!(
2184 INSERT INTO pane_groups(
2185 workspace_id,
2186 parent_group_id,
2187 position,
2188 axis,
2189 flexes
2190 )
2191 VALUES (?, ?, ?, ?, ?)
2192 RETURNING group_id
2193 ))?((
2194 workspace_id,
2195 parent_id,
2196 position,
2197 *axis,
2198 flex_string,
2199 ))?
2200 .context("Couldn't retrieve group_id from inserted pane_group")?;
2201
2202 for (position, group) in children.iter().enumerate() {
2203 Self::save_pane_group(conn, workspace_id, group, Some((group_id, position)))?
2204 }
2205
2206 Ok(())
2207 }
2208 SerializedPaneGroup::Pane(pane) => {
2209 Self::save_pane(conn, workspace_id, pane, parent)?;
2210 Ok(())
2211 }
2212 }
2213 }
2214
2215 fn save_pane(
2216 conn: &Connection,
2217 workspace_id: WorkspaceId,
2218 pane: &SerializedPane,
2219 parent: Option<(GroupId, usize)>,
2220 ) -> Result<PaneId> {
2221 let pane_id = conn.select_row_bound::<_, i64>(sql!(
2222 INSERT INTO panes(workspace_id, active, pinned_count)
2223 VALUES (?, ?, ?)
2224 RETURNING pane_id
2225 ))?((workspace_id, pane.active, pane.pinned_count))?
2226 .context("Could not retrieve inserted pane_id")?;
2227
2228 let (parent_id, order) = parent.unzip();
2229 conn.exec_bound(sql!(
2230 INSERT INTO center_panes(pane_id, parent_group_id, position)
2231 VALUES (?, ?, ?)
2232 ))?((pane_id, parent_id, order))?;
2233
2234 Self::save_items(conn, workspace_id, pane_id, &pane.children).context("Saving items")?;
2235
2236 Ok(pane_id)
2237 }
2238
2239 fn get_items(&self, pane_id: PaneId) -> Result<Vec<SerializedItem>> {
2240 self.select_bound(sql!(
2241 SELECT kind, item_id, active, preview FROM items
2242 WHERE pane_id = ?
2243 ORDER BY position
2244 ))?(pane_id)
2245 }
2246
2247 fn save_items(
2248 conn: &Connection,
2249 workspace_id: WorkspaceId,
2250 pane_id: PaneId,
2251 items: &[SerializedItem],
2252 ) -> Result<()> {
2253 let mut insert = conn.exec_bound(sql!(
2254 INSERT INTO items(workspace_id, pane_id, position, kind, item_id, active, preview) VALUES (?, ?, ?, ?, ?, ?, ?)
2255 )).context("Preparing insertion")?;
2256 for (position, item) in items.iter().enumerate() {
2257 insert((workspace_id, pane_id, position, item))?;
2258 }
2259
2260 Ok(())
2261 }
2262
2263 query! {
2264 pub async fn update_timestamp(workspace_id: WorkspaceId) -> Result<()> {
2265 UPDATE workspaces
2266 SET timestamp = CURRENT_TIMESTAMP
2267 WHERE workspace_id = ?
2268 }
2269 }
2270
2271 query! {
2272 pub(crate) async fn set_window_open_status(workspace_id: WorkspaceId, bounds: SerializedWindowBounds, display: Uuid) -> Result<()> {
2273 UPDATE workspaces
2274 SET window_state = ?2,
2275 window_x = ?3,
2276 window_y = ?4,
2277 window_width = ?5,
2278 window_height = ?6,
2279 display = ?7
2280 WHERE workspace_id = ?1
2281 }
2282 }
2283
2284 query! {
2285 pub(crate) async fn set_centered_layout(workspace_id: WorkspaceId, centered_layout: bool) -> Result<()> {
2286 UPDATE workspaces
2287 SET centered_layout = ?2
2288 WHERE workspace_id = ?1
2289 }
2290 }
2291
2292 query! {
2293 pub(crate) async fn set_session_id(workspace_id: WorkspaceId, session_id: Option<String>) -> Result<()> {
2294 UPDATE workspaces
2295 SET session_id = ?2
2296 WHERE workspace_id = ?1
2297 }
2298 }
2299
2300 query! {
2301 pub(crate) async fn set_session_binding(workspace_id: WorkspaceId, session_id: Option<String>, window_id: Option<u64>) -> Result<()> {
2302 UPDATE workspaces
2303 SET session_id = ?2, window_id = ?3
2304 WHERE workspace_id = ?1
2305 }
2306 }
2307
2308 pub(crate) async fn toolchains(
2309 &self,
2310 workspace_id: WorkspaceId,
2311 ) -> Result<Vec<(Toolchain, Arc<Path>, Arc<RelPath>)>> {
2312 self.write(move |this| {
2313 let mut select = this
2314 .select_bound(sql!(
2315 SELECT
2316 name, path, worktree_root_path, relative_worktree_path, language_name, raw_json
2317 FROM toolchains
2318 WHERE workspace_id = ?
2319 ))
2320 .context("select toolchains")?;
2321
2322 let toolchain: Vec<(String, String, String, String, String, String)> =
2323 select(workspace_id)?;
2324
2325 Ok(toolchain
2326 .into_iter()
2327 .filter_map(
2328 |(name, path, worktree_root_path, relative_worktree_path, language, json)| {
2329 Some((
2330 Toolchain {
2331 name: name.into(),
2332 path: path.into(),
2333 language_name: LanguageName::new(&language),
2334 as_json: serde_json::Value::from_str(&json).ok()?,
2335 },
2336 Arc::from(worktree_root_path.as_ref()),
2337 RelPath::from_proto(&relative_worktree_path).log_err()?,
2338 ))
2339 },
2340 )
2341 .collect())
2342 })
2343 .await
2344 }
2345
2346 pub async fn set_toolchain(
2347 &self,
2348 workspace_id: WorkspaceId,
2349 worktree_root_path: Arc<Path>,
2350 relative_worktree_path: Arc<RelPath>,
2351 toolchain: Toolchain,
2352 ) -> Result<()> {
2353 log::debug!(
2354 "Setting toolchain for workspace, worktree: {worktree_root_path:?}, relative path: {relative_worktree_path:?}, toolchain: {}",
2355 toolchain.name
2356 );
2357 self.write(move |conn| {
2358 let mut insert = conn
2359 .exec_bound(sql!(
2360 INSERT INTO toolchains(workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json) VALUES (?, ?, ?, ?, ?, ?, ?)
2361 ON CONFLICT DO
2362 UPDATE SET
2363 name = ?5,
2364 path = ?6,
2365 raw_json = ?7
2366 ))
2367 .context("Preparing insertion")?;
2368
2369 insert((
2370 workspace_id,
2371 worktree_root_path.to_string_lossy().into_owned(),
2372 relative_worktree_path.as_unix_str(),
2373 toolchain.language_name.as_ref(),
2374 toolchain.name.as_ref(),
2375 toolchain.path.as_ref(),
2376 toolchain.as_json.to_string(),
2377 ))?;
2378
2379 Ok(())
2380 }).await
2381 }
2382
2383 pub(crate) async fn save_trusted_worktrees(
2384 &self,
2385 trusted_worktrees: HashMap<Option<RemoteHostLocation>, HashSet<PathBuf>>,
2386 ) -> anyhow::Result<()> {
2387 use anyhow::Context as _;
2388 use db::sqlez::statement::Statement;
2389 use itertools::Itertools as _;
2390
2391 self.clear_trusted_worktrees()
2392 .await
2393 .context("clearing previous trust state")?;
2394
2395 let trusted_worktrees = trusted_worktrees
2396 .into_iter()
2397 .flat_map(|(host, abs_paths)| {
2398 abs_paths
2399 .into_iter()
2400 .map(move |abs_path| (Some(abs_path), host.clone()))
2401 })
2402 .collect::<Vec<_>>();
2403 let mut first_worktree;
2404 let mut last_worktree = 0_usize;
2405 for (count, placeholders) in std::iter::once("(?, ?, ?)")
2406 .cycle()
2407 .take(trusted_worktrees.len())
2408 .chunks(MAX_QUERY_PLACEHOLDERS / 3)
2409 .into_iter()
2410 .map(|chunk| {
2411 let mut count = 0;
2412 let placeholders = chunk
2413 .inspect(|_| {
2414 count += 1;
2415 })
2416 .join(", ");
2417 (count, placeholders)
2418 })
2419 .collect::<Vec<_>>()
2420 {
2421 first_worktree = last_worktree;
2422 last_worktree = last_worktree + count;
2423 let query = format!(
2424 r#"INSERT INTO trusted_worktrees(absolute_path, user_name, host_name)
2425VALUES {placeholders};"#
2426 );
2427
2428 let trusted_worktrees = trusted_worktrees[first_worktree..last_worktree].to_vec();
2429 self.write(move |conn| {
2430 let mut statement = Statement::prepare(conn, query)?;
2431 let mut next_index = 1;
2432 for (abs_path, host) in trusted_worktrees {
2433 let abs_path = abs_path.as_ref().map(|abs_path| abs_path.to_string_lossy());
2434 next_index = statement.bind(
2435 &abs_path.as_ref().map(|abs_path| abs_path.as_ref()),
2436 next_index,
2437 )?;
2438 next_index = statement.bind(
2439 &host
2440 .as_ref()
2441 .and_then(|host| Some(host.user_name.as_ref()?.as_str())),
2442 next_index,
2443 )?;
2444 next_index = statement.bind(
2445 &host.as_ref().map(|host| host.host_identifier.as_str()),
2446 next_index,
2447 )?;
2448 }
2449 statement.exec()
2450 })
2451 .await
2452 .context("inserting new trusted state")?;
2453 }
2454 Ok(())
2455 }
2456
2457 pub fn fetch_trusted_worktrees(&self) -> Result<DbTrustedPaths> {
2458 let trusted_worktrees = self.trusted_worktrees()?;
2459 Ok(trusted_worktrees
2460 .into_iter()
2461 .filter_map(|(abs_path, user_name, host_name)| {
2462 let db_host = match (user_name, host_name) {
2463 (None, Some(host_name)) => Some(RemoteHostLocation {
2464 user_name: None,
2465 host_identifier: SharedString::new(host_name),
2466 }),
2467 (Some(user_name), Some(host_name)) => Some(RemoteHostLocation {
2468 user_name: Some(SharedString::new(user_name)),
2469 host_identifier: SharedString::new(host_name),
2470 }),
2471 _ => None,
2472 };
2473 Some((db_host, abs_path?))
2474 })
2475 .fold(HashMap::default(), |mut acc, (remote_host, abs_path)| {
2476 acc.entry(remote_host)
2477 .or_insert_with(HashSet::default)
2478 .insert(abs_path);
2479 acc
2480 }))
2481 }
2482
2483 query! {
2484 fn trusted_worktrees() -> Result<Vec<(Option<PathBuf>, Option<String>, Option<String>)>> {
2485 SELECT absolute_path, user_name, host_name
2486 FROM trusted_worktrees
2487 }
2488 }
2489
2490 query! {
2491 pub async fn clear_trusted_worktrees() -> Result<()> {
2492 DELETE FROM trusted_worktrees
2493 }
2494 }
2495}
2496
2497type WorkspaceEntry = (
2498 WorkspaceId,
2499 SerializedWorkspaceLocation,
2500 PathList,
2501 DateTime<Utc>,
2502);
2503
2504/// Resolves workspace entries whose paths are git linked worktree checkouts
2505/// to their main repository paths.
2506///
2507/// For each workspace entry:
2508/// - If any path is a linked worktree checkout, all worktree paths in that
2509/// entry are resolved to their main repository paths, producing a new
2510/// `PathList`.
2511/// - The resolved entry is then deduplicated against existing entries: if a
2512/// workspace with the same paths already exists, the entry with the most
2513/// recent timestamp is kept.
2514pub async fn resolve_worktree_workspaces(
2515 workspaces: impl IntoIterator<Item = WorkspaceEntry>,
2516 fs: &dyn Fs,
2517) -> Vec<WorkspaceEntry> {
2518 // First pass: resolve worktree paths to main repo paths concurrently.
2519 let resolved = futures::future::join_all(workspaces.into_iter().map(|entry| async move {
2520 let paths = entry.2.paths();
2521 if paths.is_empty() {
2522 return entry;
2523 }
2524
2525 // Resolve each path concurrently
2526 let resolved_paths = futures::future::join_all(
2527 paths
2528 .iter()
2529 .map(|path| project::git_store::resolve_git_worktree_to_main_repo(fs, path)),
2530 )
2531 .await;
2532
2533 // If no paths were resolved, this entry is not a worktree — keep as-is
2534 if resolved_paths.iter().all(|r| r.is_none()) {
2535 return entry;
2536 }
2537
2538 // Build new path list, substituting resolved paths
2539 let new_paths: Vec<PathBuf> = paths
2540 .iter()
2541 .zip(resolved_paths.iter())
2542 .map(|(original, resolved)| {
2543 resolved
2544 .as_ref()
2545 .cloned()
2546 .unwrap_or_else(|| original.clone())
2547 })
2548 .collect();
2549
2550 let new_path_refs: Vec<&Path> = new_paths.iter().map(|p| p.as_path()).collect();
2551 (entry.0, entry.1, PathList::new(&new_path_refs), entry.3)
2552 }))
2553 .await;
2554
2555 // Second pass: deduplicate by PathList.
2556 // When two entries resolve to the same paths, keep the one with the
2557 // more recent timestamp.
2558 let mut seen: collections::HashMap<Vec<PathBuf>, usize> = collections::HashMap::default();
2559 let mut result: Vec<WorkspaceEntry> = Vec::new();
2560
2561 for entry in resolved {
2562 let key: Vec<PathBuf> = entry.2.paths().to_vec();
2563 if let Some(&existing_idx) = seen.get(&key) {
2564 // Keep the entry with the more recent timestamp
2565 if entry.3 > result[existing_idx].3 {
2566 result[existing_idx] = entry;
2567 }
2568 } else {
2569 seen.insert(key, result.len());
2570 result.push(entry);
2571 }
2572 }
2573
2574 result
2575}
2576
2577pub fn delete_unloaded_items(
2578 alive_items: Vec<ItemId>,
2579 workspace_id: WorkspaceId,
2580 table: &'static str,
2581 db: &ThreadSafeConnection,
2582 cx: &mut App,
2583) -> Task<Result<()>> {
2584 let db = db.clone();
2585 cx.spawn(async move |_| {
2586 let placeholders = alive_items
2587 .iter()
2588 .map(|_| "?")
2589 .collect::<Vec<&str>>()
2590 .join(", ");
2591
2592 let query = format!(
2593 "DELETE FROM {table} WHERE workspace_id = ? AND item_id NOT IN ({placeholders})"
2594 );
2595
2596 db.write(move |conn| {
2597 let mut statement = Statement::prepare(conn, query)?;
2598 let mut next_index = statement.bind(&workspace_id, 1)?;
2599 for id in alive_items {
2600 next_index = statement.bind(&id, next_index)?;
2601 }
2602 statement.exec()
2603 })
2604 .await
2605 })
2606}
2607
2608#[cfg(test)]
2609mod tests {
2610 use super::*;
2611 use crate::OpenMode;
2612 use crate::PathList;
2613 use crate::ProjectGroupKey;
2614 use crate::{
2615 multi_workspace::MultiWorkspace,
2616 persistence::{
2617 model::{
2618 SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace,
2619 SessionWorkspace,
2620 },
2621 read_multi_workspace_state,
2622 },
2623 };
2624
2625 use gpui::AppContext as _;
2626 use pretty_assertions::assert_eq;
2627 use project::Project;
2628 use remote::SshConnectionOptions;
2629 use serde_json::json;
2630 use std::{thread, time::Duration};
2631
2632 /// Creates a unique directory in a FakeFs, returning the path.
2633 /// Uses a UUID suffix to avoid collisions with other tests sharing the global DB.
2634 async fn unique_test_dir(fs: &fs::FakeFs, prefix: &str) -> PathBuf {
2635 let dir = PathBuf::from(format!("/test-dirs/{}-{}", prefix, uuid::Uuid::new_v4()));
2636 fs.insert_tree(&dir, json!({})).await;
2637 dir
2638 }
2639
2640 #[gpui::test]
2641 async fn test_multi_workspace_serializes_on_add_and_remove(cx: &mut gpui::TestAppContext) {
2642 crate::tests::init_test(cx);
2643
2644 let fs = fs::FakeFs::new(cx.executor());
2645 let project1 = Project::test(fs.clone(), [], cx).await;
2646 let project2 = Project::test(fs.clone(), [], cx).await;
2647
2648 let (multi_workspace, cx) =
2649 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
2650
2651 multi_workspace.update(cx, |mw, cx| {
2652 mw.open_sidebar(cx);
2653 });
2654
2655 multi_workspace.update_in(cx, |mw, _, cx| {
2656 mw.set_random_database_id(cx);
2657 });
2658
2659 let window_id =
2660 multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
2661
2662 // --- Add a second workspace ---
2663 let workspace2 = multi_workspace.update_in(cx, |mw, window, cx| {
2664 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
2665 workspace.update(cx, |ws, _cx| ws.set_random_database_id());
2666 mw.activate(workspace.clone(), window, cx);
2667 workspace
2668 });
2669
2670 // Run background tasks so serialize has a chance to flush.
2671 cx.run_until_parked();
2672
2673 // Read back the persisted state and check that the active workspace ID was written.
2674 let state_after_add = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
2675 let active_workspace2_db_id = workspace2.read_with(cx, |ws, _| ws.database_id());
2676 assert_eq!(
2677 state_after_add.active_workspace_id, active_workspace2_db_id,
2678 "After adding a second workspace, the serialized active_workspace_id should match \
2679 the newly activated workspace's database id"
2680 );
2681
2682 // --- Remove the non-active workspace ---
2683 multi_workspace.update_in(cx, |mw, _window, cx| {
2684 let active = mw.workspace().clone();
2685 let ws = mw
2686 .workspaces()
2687 .find(|ws| *ws != &active)
2688 .expect("should have a non-active workspace");
2689 mw.remove([ws.clone()], |_, _, _| unreachable!(), _window, cx)
2690 .detach_and_log_err(cx);
2691 });
2692
2693 cx.run_until_parked();
2694
2695 let state_after_remove = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
2696 let remaining_db_id =
2697 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
2698 assert_eq!(
2699 state_after_remove.active_workspace_id, remaining_db_id,
2700 "After removing a workspace, the serialized active_workspace_id should match \
2701 the remaining active workspace's database id"
2702 );
2703 }
2704
2705 #[gpui::test]
2706 async fn test_breakpoints() {
2707 zlog::init_test();
2708
2709 let db = WorkspaceDb::open_test_db("test_breakpoints").await;
2710 let id = db.next_id().await.unwrap();
2711
2712 let path = Path::new("/tmp/test.rs");
2713
2714 let breakpoint = Breakpoint {
2715 position: 123,
2716 message: None,
2717 state: BreakpointState::Enabled,
2718 condition: None,
2719 hit_condition: None,
2720 };
2721
2722 let log_breakpoint = Breakpoint {
2723 position: 456,
2724 message: Some("Test log message".into()),
2725 state: BreakpointState::Enabled,
2726 condition: None,
2727 hit_condition: None,
2728 };
2729
2730 let disable_breakpoint = Breakpoint {
2731 position: 578,
2732 message: None,
2733 state: BreakpointState::Disabled,
2734 condition: None,
2735 hit_condition: None,
2736 };
2737
2738 let condition_breakpoint = Breakpoint {
2739 position: 789,
2740 message: None,
2741 state: BreakpointState::Enabled,
2742 condition: Some("x > 5".into()),
2743 hit_condition: None,
2744 };
2745
2746 let hit_condition_breakpoint = Breakpoint {
2747 position: 999,
2748 message: None,
2749 state: BreakpointState::Enabled,
2750 condition: None,
2751 hit_condition: Some(">= 3".into()),
2752 };
2753
2754 let workspace = SerializedWorkspace {
2755 id,
2756 paths: PathList::new(&["/tmp"]),
2757 location: SerializedWorkspaceLocation::Local,
2758 center_group: Default::default(),
2759 window_bounds: Default::default(),
2760 display: Default::default(),
2761 docks: Default::default(),
2762 centered_layout: false,
2763 bookmarks: Default::default(),
2764 breakpoints: {
2765 let mut map = collections::BTreeMap::default();
2766 map.insert(
2767 Arc::from(path),
2768 vec![
2769 SourceBreakpoint {
2770 row: breakpoint.position,
2771 path: Arc::from(path),
2772 message: breakpoint.message.clone(),
2773 state: breakpoint.state,
2774 condition: breakpoint.condition.clone(),
2775 hit_condition: breakpoint.hit_condition.clone(),
2776 },
2777 SourceBreakpoint {
2778 row: log_breakpoint.position,
2779 path: Arc::from(path),
2780 message: log_breakpoint.message.clone(),
2781 state: log_breakpoint.state,
2782 condition: log_breakpoint.condition.clone(),
2783 hit_condition: log_breakpoint.hit_condition.clone(),
2784 },
2785 SourceBreakpoint {
2786 row: disable_breakpoint.position,
2787 path: Arc::from(path),
2788 message: disable_breakpoint.message.clone(),
2789 state: disable_breakpoint.state,
2790 condition: disable_breakpoint.condition.clone(),
2791 hit_condition: disable_breakpoint.hit_condition.clone(),
2792 },
2793 SourceBreakpoint {
2794 row: condition_breakpoint.position,
2795 path: Arc::from(path),
2796 message: condition_breakpoint.message.clone(),
2797 state: condition_breakpoint.state,
2798 condition: condition_breakpoint.condition.clone(),
2799 hit_condition: condition_breakpoint.hit_condition.clone(),
2800 },
2801 SourceBreakpoint {
2802 row: hit_condition_breakpoint.position,
2803 path: Arc::from(path),
2804 message: hit_condition_breakpoint.message.clone(),
2805 state: hit_condition_breakpoint.state,
2806 condition: hit_condition_breakpoint.condition.clone(),
2807 hit_condition: hit_condition_breakpoint.hit_condition.clone(),
2808 },
2809 ],
2810 );
2811 map
2812 },
2813 session_id: None,
2814 window_id: None,
2815 user_toolchains: Default::default(),
2816 };
2817
2818 db.save_workspace(workspace.clone()).await;
2819
2820 let loaded = db.workspace_for_roots(&["/tmp"]).unwrap();
2821 let loaded_breakpoints = loaded.breakpoints.get(&Arc::from(path)).unwrap();
2822
2823 assert_eq!(loaded_breakpoints.len(), 5);
2824
2825 // normal breakpoint
2826 assert_eq!(loaded_breakpoints[0].row, breakpoint.position);
2827 assert_eq!(loaded_breakpoints[0].message, breakpoint.message);
2828 assert_eq!(loaded_breakpoints[0].condition, breakpoint.condition);
2829 assert_eq!(
2830 loaded_breakpoints[0].hit_condition,
2831 breakpoint.hit_condition
2832 );
2833 assert_eq!(loaded_breakpoints[0].state, breakpoint.state);
2834 assert_eq!(loaded_breakpoints[0].path, Arc::from(path));
2835
2836 // enabled breakpoint
2837 assert_eq!(loaded_breakpoints[1].row, log_breakpoint.position);
2838 assert_eq!(loaded_breakpoints[1].message, log_breakpoint.message);
2839 assert_eq!(loaded_breakpoints[1].condition, log_breakpoint.condition);
2840 assert_eq!(
2841 loaded_breakpoints[1].hit_condition,
2842 log_breakpoint.hit_condition
2843 );
2844 assert_eq!(loaded_breakpoints[1].state, log_breakpoint.state);
2845 assert_eq!(loaded_breakpoints[1].path, Arc::from(path));
2846
2847 // disable breakpoint
2848 assert_eq!(loaded_breakpoints[2].row, disable_breakpoint.position);
2849 assert_eq!(loaded_breakpoints[2].message, disable_breakpoint.message);
2850 assert_eq!(
2851 loaded_breakpoints[2].condition,
2852 disable_breakpoint.condition
2853 );
2854 assert_eq!(
2855 loaded_breakpoints[2].hit_condition,
2856 disable_breakpoint.hit_condition
2857 );
2858 assert_eq!(loaded_breakpoints[2].state, disable_breakpoint.state);
2859 assert_eq!(loaded_breakpoints[2].path, Arc::from(path));
2860
2861 // condition breakpoint
2862 assert_eq!(loaded_breakpoints[3].row, condition_breakpoint.position);
2863 assert_eq!(loaded_breakpoints[3].message, condition_breakpoint.message);
2864 assert_eq!(
2865 loaded_breakpoints[3].condition,
2866 condition_breakpoint.condition
2867 );
2868 assert_eq!(
2869 loaded_breakpoints[3].hit_condition,
2870 condition_breakpoint.hit_condition
2871 );
2872 assert_eq!(loaded_breakpoints[3].state, condition_breakpoint.state);
2873 assert_eq!(loaded_breakpoints[3].path, Arc::from(path));
2874
2875 // hit condition breakpoint
2876 assert_eq!(loaded_breakpoints[4].row, hit_condition_breakpoint.position);
2877 assert_eq!(
2878 loaded_breakpoints[4].message,
2879 hit_condition_breakpoint.message
2880 );
2881 assert_eq!(
2882 loaded_breakpoints[4].condition,
2883 hit_condition_breakpoint.condition
2884 );
2885 assert_eq!(
2886 loaded_breakpoints[4].hit_condition,
2887 hit_condition_breakpoint.hit_condition
2888 );
2889 assert_eq!(loaded_breakpoints[4].state, hit_condition_breakpoint.state);
2890 assert_eq!(loaded_breakpoints[4].path, Arc::from(path));
2891 }
2892
2893 #[gpui::test]
2894 async fn test_remove_last_breakpoint() {
2895 zlog::init_test();
2896
2897 let db = WorkspaceDb::open_test_db("test_remove_last_breakpoint").await;
2898 let id = db.next_id().await.unwrap();
2899
2900 let singular_path = Path::new("/tmp/test_remove_last_breakpoint.rs");
2901
2902 let breakpoint_to_remove = Breakpoint {
2903 position: 100,
2904 message: None,
2905 state: BreakpointState::Enabled,
2906 condition: None,
2907 hit_condition: None,
2908 };
2909
2910 let workspace = SerializedWorkspace {
2911 id,
2912 paths: PathList::new(&["/tmp"]),
2913 location: SerializedWorkspaceLocation::Local,
2914 center_group: Default::default(),
2915 window_bounds: Default::default(),
2916 display: Default::default(),
2917 docks: Default::default(),
2918 centered_layout: false,
2919 bookmarks: Default::default(),
2920 breakpoints: {
2921 let mut map = collections::BTreeMap::default();
2922 map.insert(
2923 Arc::from(singular_path),
2924 vec![SourceBreakpoint {
2925 row: breakpoint_to_remove.position,
2926 path: Arc::from(singular_path),
2927 message: None,
2928 state: BreakpointState::Enabled,
2929 condition: None,
2930 hit_condition: None,
2931 }],
2932 );
2933 map
2934 },
2935 session_id: None,
2936 window_id: None,
2937 user_toolchains: Default::default(),
2938 };
2939
2940 db.save_workspace(workspace.clone()).await;
2941
2942 let loaded = db.workspace_for_roots(&["/tmp"]).unwrap();
2943 let loaded_breakpoints = loaded.breakpoints.get(&Arc::from(singular_path)).unwrap();
2944
2945 assert_eq!(loaded_breakpoints.len(), 1);
2946 assert_eq!(loaded_breakpoints[0].row, breakpoint_to_remove.position);
2947 assert_eq!(loaded_breakpoints[0].message, breakpoint_to_remove.message);
2948 assert_eq!(
2949 loaded_breakpoints[0].condition,
2950 breakpoint_to_remove.condition
2951 );
2952 assert_eq!(
2953 loaded_breakpoints[0].hit_condition,
2954 breakpoint_to_remove.hit_condition
2955 );
2956 assert_eq!(loaded_breakpoints[0].state, breakpoint_to_remove.state);
2957 assert_eq!(loaded_breakpoints[0].path, Arc::from(singular_path));
2958
2959 let workspace_without_breakpoint = SerializedWorkspace {
2960 id,
2961 paths: PathList::new(&["/tmp"]),
2962 location: SerializedWorkspaceLocation::Local,
2963 center_group: Default::default(),
2964 window_bounds: Default::default(),
2965 display: Default::default(),
2966 docks: Default::default(),
2967 centered_layout: false,
2968 bookmarks: Default::default(),
2969 breakpoints: collections::BTreeMap::default(),
2970 session_id: None,
2971 window_id: None,
2972 user_toolchains: Default::default(),
2973 };
2974
2975 db.save_workspace(workspace_without_breakpoint.clone())
2976 .await;
2977
2978 let loaded_after_remove = db.workspace_for_roots(&["/tmp"]).unwrap();
2979 let empty_breakpoints = loaded_after_remove
2980 .breakpoints
2981 .get(&Arc::from(singular_path));
2982
2983 assert!(empty_breakpoints.is_none());
2984 }
2985
2986 #[gpui::test]
2987 async fn test_next_id_stability() {
2988 zlog::init_test();
2989
2990 let db = WorkspaceDb::open_test_db("test_next_id_stability").await;
2991
2992 db.write(|conn| {
2993 conn.migrate(
2994 "test_table",
2995 &[sql!(
2996 CREATE TABLE test_table(
2997 text TEXT,
2998 workspace_id INTEGER,
2999 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
3000 ON DELETE CASCADE
3001 ) STRICT;
3002 )],
3003 &mut |_, _, _| false,
3004 )
3005 .unwrap();
3006 })
3007 .await;
3008
3009 let id = db.next_id().await.unwrap();
3010 // Assert the empty row got inserted
3011 assert_eq!(
3012 Some(id),
3013 db.select_row_bound::<WorkspaceId, WorkspaceId>(sql!(
3014 SELECT workspace_id FROM workspaces WHERE workspace_id = ?
3015 ))
3016 .unwrap()(id)
3017 .unwrap()
3018 );
3019
3020 db.write(move |conn| {
3021 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
3022 .unwrap()(("test-text-1", id))
3023 .unwrap()
3024 })
3025 .await;
3026
3027 let test_text_1 = db
3028 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
3029 .unwrap()(1)
3030 .unwrap()
3031 .unwrap();
3032 assert_eq!(test_text_1, "test-text-1");
3033 }
3034
3035 #[gpui::test]
3036 async fn test_workspace_id_stability() {
3037 zlog::init_test();
3038
3039 let db = WorkspaceDb::open_test_db("test_workspace_id_stability").await;
3040
3041 db.write(|conn| {
3042 conn.migrate(
3043 "test_table",
3044 &[sql!(
3045 CREATE TABLE test_table(
3046 text TEXT,
3047 workspace_id INTEGER,
3048 FOREIGN KEY(workspace_id)
3049 REFERENCES workspaces(workspace_id)
3050 ON DELETE CASCADE
3051 ) STRICT;)],
3052 &mut |_, _, _| false,
3053 )
3054 })
3055 .await
3056 .unwrap();
3057
3058 let mut workspace_1 = SerializedWorkspace {
3059 id: WorkspaceId(1),
3060 paths: PathList::new(&["/tmp", "/tmp2"]),
3061 location: SerializedWorkspaceLocation::Local,
3062 center_group: Default::default(),
3063 window_bounds: Default::default(),
3064 display: Default::default(),
3065 docks: Default::default(),
3066 centered_layout: false,
3067 bookmarks: Default::default(),
3068 breakpoints: Default::default(),
3069 session_id: None,
3070 window_id: None,
3071 user_toolchains: Default::default(),
3072 };
3073
3074 let workspace_2 = SerializedWorkspace {
3075 id: WorkspaceId(2),
3076 paths: PathList::new(&["/tmp"]),
3077 location: SerializedWorkspaceLocation::Local,
3078 center_group: Default::default(),
3079 window_bounds: Default::default(),
3080 display: Default::default(),
3081 docks: Default::default(),
3082 centered_layout: false,
3083 bookmarks: Default::default(),
3084 breakpoints: Default::default(),
3085 session_id: None,
3086 window_id: None,
3087 user_toolchains: Default::default(),
3088 };
3089
3090 db.save_workspace(workspace_1.clone()).await;
3091
3092 db.write(|conn| {
3093 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
3094 .unwrap()(("test-text-1", 1))
3095 .unwrap();
3096 })
3097 .await;
3098
3099 db.save_workspace(workspace_2.clone()).await;
3100
3101 db.write(|conn| {
3102 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
3103 .unwrap()(("test-text-2", 2))
3104 .unwrap();
3105 })
3106 .await;
3107
3108 workspace_1.paths = PathList::new(&["/tmp", "/tmp3"]);
3109 db.save_workspace(workspace_1.clone()).await;
3110 db.save_workspace(workspace_1).await;
3111 db.save_workspace(workspace_2).await;
3112
3113 let test_text_2 = db
3114 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
3115 .unwrap()(2)
3116 .unwrap()
3117 .unwrap();
3118 assert_eq!(test_text_2, "test-text-2");
3119
3120 let test_text_1 = db
3121 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
3122 .unwrap()(1)
3123 .unwrap()
3124 .unwrap();
3125 assert_eq!(test_text_1, "test-text-1");
3126 }
3127
3128 fn group(axis: Axis, children: Vec<SerializedPaneGroup>) -> SerializedPaneGroup {
3129 SerializedPaneGroup::Group {
3130 axis: SerializedAxis(axis),
3131 flexes: None,
3132 children,
3133 }
3134 }
3135
3136 #[gpui::test]
3137 async fn test_full_workspace_serialization() {
3138 zlog::init_test();
3139
3140 let db = WorkspaceDb::open_test_db("test_full_workspace_serialization").await;
3141
3142 // -----------------
3143 // | 1,2 | 5,6 |
3144 // | - - - | |
3145 // | 3,4 | |
3146 // -----------------
3147 let center_group = group(
3148 Axis::Horizontal,
3149 vec![
3150 group(
3151 Axis::Vertical,
3152 vec![
3153 SerializedPaneGroup::Pane(SerializedPane::new(
3154 vec![
3155 SerializedItem::new("Terminal", 5, false, false),
3156 SerializedItem::new("Terminal", 6, true, false),
3157 ],
3158 false,
3159 0,
3160 )),
3161 SerializedPaneGroup::Pane(SerializedPane::new(
3162 vec![
3163 SerializedItem::new("Terminal", 7, true, false),
3164 SerializedItem::new("Terminal", 8, false, false),
3165 ],
3166 false,
3167 0,
3168 )),
3169 ],
3170 ),
3171 SerializedPaneGroup::Pane(SerializedPane::new(
3172 vec![
3173 SerializedItem::new("Terminal", 9, false, false),
3174 SerializedItem::new("Terminal", 10, true, false),
3175 ],
3176 false,
3177 0,
3178 )),
3179 ],
3180 );
3181
3182 let workspace = SerializedWorkspace {
3183 id: WorkspaceId(5),
3184 paths: PathList::new(&["/tmp", "/tmp2"]),
3185 location: SerializedWorkspaceLocation::Local,
3186 center_group,
3187 window_bounds: Default::default(),
3188 bookmarks: Default::default(),
3189 breakpoints: Default::default(),
3190 display: Default::default(),
3191 docks: Default::default(),
3192 centered_layout: false,
3193 session_id: None,
3194 window_id: Some(999),
3195 user_toolchains: Default::default(),
3196 };
3197
3198 db.save_workspace(workspace.clone()).await;
3199
3200 let round_trip_workspace = db.workspace_for_roots(&["/tmp2", "/tmp"]);
3201 assert_eq!(workspace, round_trip_workspace.unwrap());
3202
3203 // Test guaranteed duplicate IDs
3204 db.save_workspace(workspace.clone()).await;
3205 db.save_workspace(workspace.clone()).await;
3206
3207 let round_trip_workspace = db.workspace_for_roots(&["/tmp", "/tmp2"]);
3208 assert_eq!(workspace, round_trip_workspace.unwrap());
3209 }
3210
3211 #[gpui::test]
3212 async fn test_workspace_assignment() {
3213 zlog::init_test();
3214
3215 let db = WorkspaceDb::open_test_db("test_basic_functionality").await;
3216
3217 let workspace_1 = SerializedWorkspace {
3218 id: WorkspaceId(1),
3219 paths: PathList::new(&["/tmp", "/tmp2"]),
3220 location: SerializedWorkspaceLocation::Local,
3221 center_group: Default::default(),
3222 window_bounds: Default::default(),
3223 bookmarks: Default::default(),
3224 breakpoints: Default::default(),
3225 display: Default::default(),
3226 docks: Default::default(),
3227 centered_layout: false,
3228 session_id: None,
3229 window_id: Some(1),
3230 user_toolchains: Default::default(),
3231 };
3232
3233 let mut workspace_2 = SerializedWorkspace {
3234 id: WorkspaceId(2),
3235 paths: PathList::new(&["/tmp"]),
3236 location: SerializedWorkspaceLocation::Local,
3237 center_group: Default::default(),
3238 window_bounds: Default::default(),
3239 display: Default::default(),
3240 docks: Default::default(),
3241 centered_layout: false,
3242 bookmarks: Default::default(),
3243 breakpoints: Default::default(),
3244 session_id: None,
3245 window_id: Some(2),
3246 user_toolchains: Default::default(),
3247 };
3248
3249 db.save_workspace(workspace_1.clone()).await;
3250 db.save_workspace(workspace_2.clone()).await;
3251
3252 // Test that paths are treated as a set
3253 assert_eq!(
3254 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3255 workspace_1
3256 );
3257 assert_eq!(
3258 db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(),
3259 workspace_1
3260 );
3261
3262 // Make sure that other keys work
3263 assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2);
3264 assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None);
3265
3266 // Test 'mutate' case of updating a pre-existing id
3267 workspace_2.paths = PathList::new(&["/tmp", "/tmp2"]);
3268
3269 db.save_workspace(workspace_2.clone()).await;
3270 assert_eq!(
3271 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3272 workspace_2
3273 );
3274
3275 // Test other mechanism for mutating
3276 let mut workspace_3 = SerializedWorkspace {
3277 id: WorkspaceId(3),
3278 paths: PathList::new(&["/tmp2", "/tmp"]),
3279 location: SerializedWorkspaceLocation::Local,
3280 center_group: Default::default(),
3281 window_bounds: Default::default(),
3282 bookmarks: Default::default(),
3283 breakpoints: Default::default(),
3284 display: Default::default(),
3285 docks: Default::default(),
3286 centered_layout: false,
3287 session_id: None,
3288 window_id: Some(3),
3289 user_toolchains: Default::default(),
3290 };
3291
3292 db.save_workspace(workspace_3.clone()).await;
3293 assert_eq!(
3294 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3295 workspace_3
3296 );
3297
3298 // Make sure that updating paths differently also works
3299 workspace_3.paths = PathList::new(&["/tmp3", "/tmp4", "/tmp2"]);
3300 db.save_workspace(workspace_3.clone()).await;
3301 assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None);
3302 assert_eq!(
3303 db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"])
3304 .unwrap(),
3305 workspace_3
3306 );
3307 }
3308
3309 #[gpui::test]
3310 async fn test_session_workspaces() {
3311 zlog::init_test();
3312
3313 let db = WorkspaceDb::open_test_db("test_serializing_workspaces_session_id").await;
3314
3315 let workspace_1 = SerializedWorkspace {
3316 id: WorkspaceId(1),
3317 paths: PathList::new(&["/tmp1"]),
3318 location: SerializedWorkspaceLocation::Local,
3319 center_group: Default::default(),
3320 window_bounds: Default::default(),
3321 display: Default::default(),
3322 docks: Default::default(),
3323 centered_layout: false,
3324 bookmarks: Default::default(),
3325 breakpoints: Default::default(),
3326 session_id: Some("session-id-1".to_owned()),
3327 window_id: Some(10),
3328 user_toolchains: Default::default(),
3329 };
3330
3331 let workspace_2 = SerializedWorkspace {
3332 id: WorkspaceId(2),
3333 paths: PathList::new(&["/tmp2"]),
3334 location: SerializedWorkspaceLocation::Local,
3335 center_group: Default::default(),
3336 window_bounds: Default::default(),
3337 display: Default::default(),
3338 docks: Default::default(),
3339 centered_layout: false,
3340 bookmarks: Default::default(),
3341 breakpoints: Default::default(),
3342 session_id: Some("session-id-1".to_owned()),
3343 window_id: Some(20),
3344 user_toolchains: Default::default(),
3345 };
3346
3347 let workspace_3 = SerializedWorkspace {
3348 id: WorkspaceId(3),
3349 paths: PathList::new(&["/tmp3"]),
3350 location: SerializedWorkspaceLocation::Local,
3351 center_group: Default::default(),
3352 window_bounds: Default::default(),
3353 display: Default::default(),
3354 docks: Default::default(),
3355 centered_layout: false,
3356 bookmarks: Default::default(),
3357 breakpoints: Default::default(),
3358 session_id: Some("session-id-2".to_owned()),
3359 window_id: Some(30),
3360 user_toolchains: Default::default(),
3361 };
3362
3363 let workspace_4 = SerializedWorkspace {
3364 id: WorkspaceId(4),
3365 paths: PathList::new(&["/tmp4"]),
3366 location: SerializedWorkspaceLocation::Local,
3367 center_group: Default::default(),
3368 window_bounds: Default::default(),
3369 display: Default::default(),
3370 docks: Default::default(),
3371 centered_layout: false,
3372 bookmarks: Default::default(),
3373 breakpoints: Default::default(),
3374 session_id: None,
3375 window_id: None,
3376 user_toolchains: Default::default(),
3377 };
3378
3379 let connection_id = db
3380 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3381 host: "my-host".into(),
3382 port: Some(1234),
3383 ..Default::default()
3384 }))
3385 .await
3386 .unwrap();
3387
3388 let workspace_5 = SerializedWorkspace {
3389 id: WorkspaceId(5),
3390 paths: PathList::default(),
3391 location: SerializedWorkspaceLocation::Remote(
3392 db.remote_connection(connection_id).unwrap(),
3393 ),
3394 center_group: Default::default(),
3395 window_bounds: Default::default(),
3396 display: Default::default(),
3397 docks: Default::default(),
3398 centered_layout: false,
3399 bookmarks: Default::default(),
3400 breakpoints: Default::default(),
3401 session_id: Some("session-id-2".to_owned()),
3402 window_id: Some(50),
3403 user_toolchains: Default::default(),
3404 };
3405
3406 let workspace_6 = SerializedWorkspace {
3407 id: WorkspaceId(6),
3408 paths: PathList::new(&["/tmp6c", "/tmp6b", "/tmp6a"]),
3409 location: SerializedWorkspaceLocation::Local,
3410 center_group: Default::default(),
3411 window_bounds: Default::default(),
3412 bookmarks: Default::default(),
3413 breakpoints: Default::default(),
3414 display: Default::default(),
3415 docks: Default::default(),
3416 centered_layout: false,
3417 session_id: Some("session-id-3".to_owned()),
3418 window_id: Some(60),
3419 user_toolchains: Default::default(),
3420 };
3421
3422 db.save_workspace(workspace_1.clone()).await;
3423 thread::sleep(Duration::from_millis(1000)); // Force timestamps to increment
3424 db.save_workspace(workspace_2.clone()).await;
3425 db.save_workspace(workspace_3.clone()).await;
3426 thread::sleep(Duration::from_millis(1000)); // Force timestamps to increment
3427 db.save_workspace(workspace_4.clone()).await;
3428 db.save_workspace(workspace_5.clone()).await;
3429 db.save_workspace(workspace_6.clone()).await;
3430
3431 let locations = db.session_workspaces("session-id-1".to_owned()).unwrap();
3432 assert_eq!(locations.len(), 2);
3433 assert_eq!(locations[0].0, WorkspaceId(2));
3434 assert_eq!(locations[0].1, PathList::new(&["/tmp2"]));
3435 assert_eq!(locations[0].2, Some(20));
3436 assert_eq!(locations[1].0, WorkspaceId(1));
3437 assert_eq!(locations[1].1, PathList::new(&["/tmp1"]));
3438 assert_eq!(locations[1].2, Some(10));
3439
3440 let locations = db.session_workspaces("session-id-2".to_owned()).unwrap();
3441 assert_eq!(locations.len(), 2);
3442 assert_eq!(locations[0].0, WorkspaceId(5));
3443 assert_eq!(locations[0].1, PathList::default());
3444 assert_eq!(locations[0].2, Some(50));
3445 assert_eq!(locations[0].3, Some(connection_id));
3446 assert_eq!(locations[1].0, WorkspaceId(3));
3447 assert_eq!(locations[1].1, PathList::new(&["/tmp3"]));
3448 assert_eq!(locations[1].2, Some(30));
3449
3450 let locations = db.session_workspaces("session-id-3".to_owned()).unwrap();
3451 assert_eq!(locations.len(), 1);
3452 assert_eq!(locations[0].0, WorkspaceId(6));
3453 assert_eq!(
3454 locations[0].1,
3455 PathList::new(&["/tmp6c", "/tmp6b", "/tmp6a"]),
3456 );
3457 assert_eq!(locations[0].2, Some(60));
3458 }
3459
3460 fn default_workspace<P: AsRef<Path>>(
3461 paths: &[P],
3462 center_group: &SerializedPaneGroup,
3463 ) -> SerializedWorkspace {
3464 SerializedWorkspace {
3465 id: WorkspaceId(4),
3466 paths: PathList::new(paths),
3467 location: SerializedWorkspaceLocation::Local,
3468 center_group: center_group.clone(),
3469 window_bounds: Default::default(),
3470 display: Default::default(),
3471 docks: Default::default(),
3472 bookmarks: Default::default(),
3473 breakpoints: Default::default(),
3474 centered_layout: false,
3475 session_id: None,
3476 window_id: None,
3477 user_toolchains: Default::default(),
3478 }
3479 }
3480
3481 #[gpui::test]
3482 async fn test_last_session_workspace_locations(cx: &mut gpui::TestAppContext) {
3483 let dir1 = tempfile::TempDir::with_prefix("dir1").unwrap();
3484 let dir2 = tempfile::TempDir::with_prefix("dir2").unwrap();
3485 let dir3 = tempfile::TempDir::with_prefix("dir3").unwrap();
3486 let dir4 = tempfile::TempDir::with_prefix("dir4").unwrap();
3487
3488 let fs = fs::FakeFs::new(cx.executor());
3489 fs.insert_tree(dir1.path(), json!({})).await;
3490 fs.insert_tree(dir2.path(), json!({})).await;
3491 fs.insert_tree(dir3.path(), json!({})).await;
3492 fs.insert_tree(dir4.path(), json!({})).await;
3493
3494 let db =
3495 WorkspaceDb::open_test_db("test_serializing_workspaces_last_session_workspaces").await;
3496
3497 let workspaces = [
3498 (1, vec![dir1.path()], 9),
3499 (2, vec![dir2.path()], 5),
3500 (3, vec![dir3.path()], 8),
3501 (4, vec![dir4.path()], 2),
3502 (5, vec![dir1.path(), dir2.path(), dir3.path()], 3),
3503 (6, vec![dir4.path(), dir3.path(), dir2.path()], 4),
3504 ]
3505 .into_iter()
3506 .map(|(id, paths, window_id)| SerializedWorkspace {
3507 id: WorkspaceId(id),
3508 paths: PathList::new(paths.as_slice()),
3509 location: SerializedWorkspaceLocation::Local,
3510 center_group: Default::default(),
3511 window_bounds: Default::default(),
3512 display: Default::default(),
3513 docks: Default::default(),
3514 centered_layout: false,
3515 session_id: Some("one-session".to_owned()),
3516 bookmarks: Default::default(),
3517 breakpoints: Default::default(),
3518 window_id: Some(window_id),
3519 user_toolchains: Default::default(),
3520 })
3521 .collect::<Vec<_>>();
3522
3523 for workspace in workspaces.iter() {
3524 db.save_workspace(workspace.clone()).await;
3525 }
3526
3527 let stack = Some(Vec::from([
3528 WindowId::from(2), // Top
3529 WindowId::from(8),
3530 WindowId::from(5),
3531 WindowId::from(9),
3532 WindowId::from(3),
3533 WindowId::from(4), // Bottom
3534 ]));
3535
3536 let locations = db
3537 .last_session_workspace_locations("one-session", stack, fs.as_ref())
3538 .await
3539 .unwrap();
3540 assert_eq!(
3541 locations,
3542 [
3543 SessionWorkspace {
3544 workspace_id: WorkspaceId(4),
3545 location: SerializedWorkspaceLocation::Local,
3546 paths: PathList::new(&[dir4.path()]),
3547 window_id: Some(WindowId::from(2u64)),
3548 },
3549 SessionWorkspace {
3550 workspace_id: WorkspaceId(3),
3551 location: SerializedWorkspaceLocation::Local,
3552 paths: PathList::new(&[dir3.path()]),
3553 window_id: Some(WindowId::from(8u64)),
3554 },
3555 SessionWorkspace {
3556 workspace_id: WorkspaceId(2),
3557 location: SerializedWorkspaceLocation::Local,
3558 paths: PathList::new(&[dir2.path()]),
3559 window_id: Some(WindowId::from(5u64)),
3560 },
3561 SessionWorkspace {
3562 workspace_id: WorkspaceId(1),
3563 location: SerializedWorkspaceLocation::Local,
3564 paths: PathList::new(&[dir1.path()]),
3565 window_id: Some(WindowId::from(9u64)),
3566 },
3567 SessionWorkspace {
3568 workspace_id: WorkspaceId(5),
3569 location: SerializedWorkspaceLocation::Local,
3570 paths: PathList::new(&[dir1.path(), dir2.path(), dir3.path()]),
3571 window_id: Some(WindowId::from(3u64)),
3572 },
3573 SessionWorkspace {
3574 workspace_id: WorkspaceId(6),
3575 location: SerializedWorkspaceLocation::Local,
3576 paths: PathList::new(&[dir4.path(), dir3.path(), dir2.path()]),
3577 window_id: Some(WindowId::from(4u64)),
3578 },
3579 ]
3580 );
3581 }
3582
3583 #[gpui::test]
3584 async fn test_last_session_workspace_locations_remote(cx: &mut gpui::TestAppContext) {
3585 let fs = fs::FakeFs::new(cx.executor());
3586 let db =
3587 WorkspaceDb::open_test_db("test_serializing_workspaces_last_session_workspaces_remote")
3588 .await;
3589
3590 let remote_connections = [
3591 ("host-1", "my-user-1"),
3592 ("host-2", "my-user-2"),
3593 ("host-3", "my-user-3"),
3594 ("host-4", "my-user-4"),
3595 ]
3596 .into_iter()
3597 .map(|(host, user)| async {
3598 let options = RemoteConnectionOptions::Ssh(SshConnectionOptions {
3599 host: host.into(),
3600 username: Some(user.to_string()),
3601 ..Default::default()
3602 });
3603 db.get_or_create_remote_connection(options.clone())
3604 .await
3605 .unwrap();
3606 options
3607 })
3608 .collect::<Vec<_>>();
3609
3610 let remote_connections = futures::future::join_all(remote_connections).await;
3611
3612 let workspaces = [
3613 (1, remote_connections[0].clone(), 9),
3614 (2, remote_connections[1].clone(), 5),
3615 (3, remote_connections[2].clone(), 8),
3616 (4, remote_connections[3].clone(), 2),
3617 ]
3618 .into_iter()
3619 .map(|(id, remote_connection, window_id)| SerializedWorkspace {
3620 id: WorkspaceId(id),
3621 paths: PathList::default(),
3622 location: SerializedWorkspaceLocation::Remote(remote_connection),
3623 center_group: Default::default(),
3624 window_bounds: Default::default(),
3625 display: Default::default(),
3626 docks: Default::default(),
3627 centered_layout: false,
3628 session_id: Some("one-session".to_owned()),
3629 bookmarks: Default::default(),
3630 breakpoints: Default::default(),
3631 window_id: Some(window_id),
3632 user_toolchains: Default::default(),
3633 })
3634 .collect::<Vec<_>>();
3635
3636 for workspace in workspaces.iter() {
3637 db.save_workspace(workspace.clone()).await;
3638 }
3639
3640 let stack = Some(Vec::from([
3641 WindowId::from(2), // Top
3642 WindowId::from(8),
3643 WindowId::from(5),
3644 WindowId::from(9), // Bottom
3645 ]));
3646
3647 let have = db
3648 .last_session_workspace_locations("one-session", stack, fs.as_ref())
3649 .await
3650 .unwrap();
3651 assert_eq!(have.len(), 4);
3652 assert_eq!(
3653 have[0],
3654 SessionWorkspace {
3655 workspace_id: WorkspaceId(4),
3656 location: SerializedWorkspaceLocation::Remote(remote_connections[3].clone()),
3657 paths: PathList::default(),
3658 window_id: Some(WindowId::from(2u64)),
3659 }
3660 );
3661 assert_eq!(
3662 have[1],
3663 SessionWorkspace {
3664 workspace_id: WorkspaceId(3),
3665 location: SerializedWorkspaceLocation::Remote(remote_connections[2].clone()),
3666 paths: PathList::default(),
3667 window_id: Some(WindowId::from(8u64)),
3668 }
3669 );
3670 assert_eq!(
3671 have[2],
3672 SessionWorkspace {
3673 workspace_id: WorkspaceId(2),
3674 location: SerializedWorkspaceLocation::Remote(remote_connections[1].clone()),
3675 paths: PathList::default(),
3676 window_id: Some(WindowId::from(5u64)),
3677 }
3678 );
3679 assert_eq!(
3680 have[3],
3681 SessionWorkspace {
3682 workspace_id: WorkspaceId(1),
3683 location: SerializedWorkspaceLocation::Remote(remote_connections[0].clone()),
3684 paths: PathList::default(),
3685 window_id: Some(WindowId::from(9u64)),
3686 }
3687 );
3688 }
3689
3690 #[gpui::test]
3691 async fn test_get_or_create_ssh_project() {
3692 let db = WorkspaceDb::open_test_db("test_get_or_create_ssh_project").await;
3693
3694 let host = "example.com".to_string();
3695 let port = Some(22_u16);
3696 let user = Some("user".to_string());
3697
3698 let connection_id = db
3699 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3700 host: host.clone().into(),
3701 port,
3702 username: user.clone(),
3703 ..Default::default()
3704 }))
3705 .await
3706 .unwrap();
3707
3708 // Test that calling the function again with the same parameters returns the same project
3709 let same_connection = db
3710 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3711 host: host.clone().into(),
3712 port,
3713 username: user.clone(),
3714 ..Default::default()
3715 }))
3716 .await
3717 .unwrap();
3718
3719 assert_eq!(connection_id, same_connection);
3720
3721 // Test with different parameters
3722 let host2 = "otherexample.com".to_string();
3723 let port2 = None;
3724 let user2 = Some("otheruser".to_string());
3725
3726 let different_connection = db
3727 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3728 host: host2.clone().into(),
3729 port: port2,
3730 username: user2.clone(),
3731 ..Default::default()
3732 }))
3733 .await
3734 .unwrap();
3735
3736 assert_ne!(connection_id, different_connection);
3737 }
3738
3739 #[gpui::test]
3740 async fn test_get_or_create_ssh_project_with_null_user() {
3741 let db = WorkspaceDb::open_test_db("test_get_or_create_ssh_project_with_null_user").await;
3742
3743 let (host, port, user) = ("example.com".to_string(), None, None);
3744
3745 let connection_id = db
3746 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3747 host: host.clone().into(),
3748 port,
3749 username: None,
3750 ..Default::default()
3751 }))
3752 .await
3753 .unwrap();
3754
3755 let same_connection_id = db
3756 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3757 host: host.clone().into(),
3758 port,
3759 username: user.clone(),
3760 ..Default::default()
3761 }))
3762 .await
3763 .unwrap();
3764
3765 assert_eq!(connection_id, same_connection_id);
3766 }
3767
3768 #[gpui::test]
3769 async fn test_get_remote_connections() {
3770 let db = WorkspaceDb::open_test_db("test_get_remote_connections").await;
3771
3772 let connections = [
3773 ("example.com".to_string(), None, None),
3774 (
3775 "anotherexample.com".to_string(),
3776 Some(123_u16),
3777 Some("user2".to_string()),
3778 ),
3779 ("yetanother.com".to_string(), Some(345_u16), None),
3780 ];
3781
3782 let mut ids = Vec::new();
3783 for (host, port, user) in connections.iter() {
3784 ids.push(
3785 db.get_or_create_remote_connection(RemoteConnectionOptions::Ssh(
3786 SshConnectionOptions {
3787 host: host.clone().into(),
3788 port: *port,
3789 username: user.clone(),
3790 ..Default::default()
3791 },
3792 ))
3793 .await
3794 .unwrap(),
3795 );
3796 }
3797
3798 let stored_connections = db.remote_connections().unwrap();
3799 assert_eq!(
3800 stored_connections,
3801 [
3802 (
3803 ids[0],
3804 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3805 host: "example.com".into(),
3806 port: None,
3807 username: None,
3808 ..Default::default()
3809 }),
3810 ),
3811 (
3812 ids[1],
3813 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3814 host: "anotherexample.com".into(),
3815 port: Some(123),
3816 username: Some("user2".into()),
3817 ..Default::default()
3818 }),
3819 ),
3820 (
3821 ids[2],
3822 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3823 host: "yetanother.com".into(),
3824 port: Some(345),
3825 username: None,
3826 ..Default::default()
3827 }),
3828 ),
3829 ]
3830 .into_iter()
3831 .collect::<HashMap<_, _>>(),
3832 );
3833 }
3834
3835 #[gpui::test]
3836 async fn test_simple_split() {
3837 zlog::init_test();
3838
3839 let db = WorkspaceDb::open_test_db("simple_split").await;
3840
3841 // -----------------
3842 // | 1,2 | 5,6 |
3843 // | - - - | |
3844 // | 3,4 | |
3845 // -----------------
3846 let center_pane = group(
3847 Axis::Horizontal,
3848 vec![
3849 group(
3850 Axis::Vertical,
3851 vec![
3852 SerializedPaneGroup::Pane(SerializedPane::new(
3853 vec![
3854 SerializedItem::new("Terminal", 1, false, false),
3855 SerializedItem::new("Terminal", 2, true, false),
3856 ],
3857 false,
3858 0,
3859 )),
3860 SerializedPaneGroup::Pane(SerializedPane::new(
3861 vec![
3862 SerializedItem::new("Terminal", 4, false, false),
3863 SerializedItem::new("Terminal", 3, true, false),
3864 ],
3865 true,
3866 0,
3867 )),
3868 ],
3869 ),
3870 SerializedPaneGroup::Pane(SerializedPane::new(
3871 vec![
3872 SerializedItem::new("Terminal", 5, true, false),
3873 SerializedItem::new("Terminal", 6, false, false),
3874 ],
3875 false,
3876 0,
3877 )),
3878 ],
3879 );
3880
3881 let workspace = default_workspace(&["/tmp"], ¢er_pane);
3882
3883 db.save_workspace(workspace.clone()).await;
3884
3885 let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap();
3886
3887 assert_eq!(workspace.center_group, new_workspace.center_group);
3888 }
3889
3890 #[gpui::test]
3891 async fn test_cleanup_panes() {
3892 zlog::init_test();
3893
3894 let db = WorkspaceDb::open_test_db("test_cleanup_panes").await;
3895
3896 let center_pane = group(
3897 Axis::Horizontal,
3898 vec![
3899 group(
3900 Axis::Vertical,
3901 vec![
3902 SerializedPaneGroup::Pane(SerializedPane::new(
3903 vec![
3904 SerializedItem::new("Terminal", 1, false, false),
3905 SerializedItem::new("Terminal", 2, true, false),
3906 ],
3907 false,
3908 0,
3909 )),
3910 SerializedPaneGroup::Pane(SerializedPane::new(
3911 vec![
3912 SerializedItem::new("Terminal", 4, false, false),
3913 SerializedItem::new("Terminal", 3, true, false),
3914 ],
3915 true,
3916 0,
3917 )),
3918 ],
3919 ),
3920 SerializedPaneGroup::Pane(SerializedPane::new(
3921 vec![
3922 SerializedItem::new("Terminal", 5, false, false),
3923 SerializedItem::new("Terminal", 6, true, false),
3924 ],
3925 false,
3926 0,
3927 )),
3928 ],
3929 );
3930
3931 let id = &["/tmp"];
3932
3933 let mut workspace = default_workspace(id, ¢er_pane);
3934
3935 db.save_workspace(workspace.clone()).await;
3936
3937 workspace.center_group = group(
3938 Axis::Vertical,
3939 vec![
3940 SerializedPaneGroup::Pane(SerializedPane::new(
3941 vec![
3942 SerializedItem::new("Terminal", 1, false, false),
3943 SerializedItem::new("Terminal", 2, true, false),
3944 ],
3945 false,
3946 0,
3947 )),
3948 SerializedPaneGroup::Pane(SerializedPane::new(
3949 vec![
3950 SerializedItem::new("Terminal", 4, true, false),
3951 SerializedItem::new("Terminal", 3, false, false),
3952 ],
3953 true,
3954 0,
3955 )),
3956 ],
3957 );
3958
3959 db.save_workspace(workspace.clone()).await;
3960
3961 let new_workspace = db.workspace_for_roots(id).unwrap();
3962
3963 assert_eq!(workspace.center_group, new_workspace.center_group);
3964 }
3965
3966 #[gpui::test]
3967 async fn test_empty_workspace_window_bounds() {
3968 zlog::init_test();
3969
3970 let db = WorkspaceDb::open_test_db("test_empty_workspace_window_bounds").await;
3971 let id = db.next_id().await.unwrap();
3972
3973 // Create a workspace with empty paths (empty workspace)
3974 let empty_paths: &[&str] = &[];
3975 let display_uuid = Uuid::new_v4();
3976 let window_bounds = SerializedWindowBounds(WindowBounds::Windowed(Bounds {
3977 origin: point(px(100.0), px(200.0)),
3978 size: size(px(800.0), px(600.0)),
3979 }));
3980
3981 let workspace = SerializedWorkspace {
3982 id,
3983 paths: PathList::new(empty_paths),
3984 location: SerializedWorkspaceLocation::Local,
3985 center_group: Default::default(),
3986 window_bounds: None,
3987 display: None,
3988 docks: Default::default(),
3989 bookmarks: Default::default(),
3990 breakpoints: Default::default(),
3991 centered_layout: false,
3992 session_id: None,
3993 window_id: None,
3994 user_toolchains: Default::default(),
3995 };
3996
3997 // Save the workspace (this creates the record with empty paths)
3998 db.save_workspace(workspace.clone()).await;
3999
4000 // Save window bounds separately (as the actual code does via set_window_open_status)
4001 db.set_window_open_status(id, window_bounds, display_uuid)
4002 .await
4003 .unwrap();
4004
4005 // Empty workspaces cannot be retrieved by paths (they'd all match).
4006 // They must be retrieved by workspace_id.
4007 assert!(db.workspace_for_roots(empty_paths).is_none());
4008
4009 // Retrieve using workspace_for_id instead
4010 let retrieved = db.workspace_for_id(id).unwrap();
4011
4012 // Verify window bounds were persisted
4013 assert_eq!(retrieved.id, id);
4014 assert!(retrieved.window_bounds.is_some());
4015 assert_eq!(retrieved.window_bounds.unwrap().0, window_bounds.0);
4016 assert!(retrieved.display.is_some());
4017 assert_eq!(retrieved.display.unwrap(), display_uuid);
4018 }
4019
4020 #[gpui::test]
4021 async fn test_last_session_workspace_locations_groups_by_window_id(
4022 cx: &mut gpui::TestAppContext,
4023 ) {
4024 let dir1 = tempfile::TempDir::with_prefix("dir1").unwrap();
4025 let dir2 = tempfile::TempDir::with_prefix("dir2").unwrap();
4026 let dir3 = tempfile::TempDir::with_prefix("dir3").unwrap();
4027 let dir4 = tempfile::TempDir::with_prefix("dir4").unwrap();
4028 let dir5 = tempfile::TempDir::with_prefix("dir5").unwrap();
4029
4030 let fs = fs::FakeFs::new(cx.executor());
4031 fs.insert_tree(dir1.path(), json!({})).await;
4032 fs.insert_tree(dir2.path(), json!({})).await;
4033 fs.insert_tree(dir3.path(), json!({})).await;
4034 fs.insert_tree(dir4.path(), json!({})).await;
4035 fs.insert_tree(dir5.path(), json!({})).await;
4036
4037 let db =
4038 WorkspaceDb::open_test_db("test_last_session_workspace_locations_groups_by_window_id")
4039 .await;
4040
4041 // Simulate two MultiWorkspace windows each containing two workspaces,
4042 // plus one single-workspace window:
4043 // Window 10: workspace 1, workspace 2
4044 // Window 20: workspace 3, workspace 4
4045 // Window 30: workspace 5 (only one)
4046 //
4047 // On session restore, the caller should be able to group these by
4048 // window_id to reconstruct the MultiWorkspace windows.
4049 let workspaces_data: Vec<(i64, &Path, u64)> = vec![
4050 (1, dir1.path(), 10),
4051 (2, dir2.path(), 10),
4052 (3, dir3.path(), 20),
4053 (4, dir4.path(), 20),
4054 (5, dir5.path(), 30),
4055 ];
4056
4057 for (id, dir, window_id) in &workspaces_data {
4058 db.save_workspace(SerializedWorkspace {
4059 id: WorkspaceId(*id),
4060 paths: PathList::new(&[*dir]),
4061 location: SerializedWorkspaceLocation::Local,
4062 center_group: Default::default(),
4063 window_bounds: Default::default(),
4064 display: Default::default(),
4065 docks: Default::default(),
4066 centered_layout: false,
4067 session_id: Some("test-session".to_owned()),
4068 bookmarks: Default::default(),
4069 breakpoints: Default::default(),
4070 window_id: Some(*window_id),
4071 user_toolchains: Default::default(),
4072 })
4073 .await;
4074 }
4075
4076 let locations = db
4077 .last_session_workspace_locations("test-session", None, fs.as_ref())
4078 .await
4079 .unwrap();
4080
4081 // All 5 workspaces should be returned with their window_ids.
4082 assert_eq!(locations.len(), 5);
4083
4084 // Every entry should have a window_id so the caller can group them.
4085 for session_workspace in &locations {
4086 assert!(
4087 session_workspace.window_id.is_some(),
4088 "workspace {:?} missing window_id",
4089 session_workspace.workspace_id
4090 );
4091 }
4092
4093 // Group by window_id, simulating what the restoration code should do.
4094 let mut by_window: HashMap<WindowId, Vec<WorkspaceId>> = HashMap::default();
4095 for session_workspace in &locations {
4096 if let Some(window_id) = session_workspace.window_id {
4097 by_window
4098 .entry(window_id)
4099 .or_default()
4100 .push(session_workspace.workspace_id);
4101 }
4102 }
4103
4104 // Should produce 3 windows, not 5.
4105 assert_eq!(
4106 by_window.len(),
4107 3,
4108 "Expected 3 window groups, got {}: {:?}",
4109 by_window.len(),
4110 by_window
4111 );
4112
4113 // Window 10 should contain workspaces 1 and 2.
4114 let window_10 = by_window.get(&WindowId::from(10u64)).unwrap();
4115 assert_eq!(window_10.len(), 2);
4116 assert!(window_10.contains(&WorkspaceId(1)));
4117 assert!(window_10.contains(&WorkspaceId(2)));
4118
4119 // Window 20 should contain workspaces 3 and 4.
4120 let window_20 = by_window.get(&WindowId::from(20u64)).unwrap();
4121 assert_eq!(window_20.len(), 2);
4122 assert!(window_20.contains(&WorkspaceId(3)));
4123 assert!(window_20.contains(&WorkspaceId(4)));
4124
4125 // Window 30 should contain only workspace 5.
4126 let window_30 = by_window.get(&WindowId::from(30u64)).unwrap();
4127 assert_eq!(window_30.len(), 1);
4128 assert!(window_30.contains(&WorkspaceId(5)));
4129 }
4130
4131 #[gpui::test]
4132 async fn test_read_serialized_multi_workspaces_with_state(cx: &mut gpui::TestAppContext) {
4133 use crate::persistence::model::MultiWorkspaceState;
4134
4135 // Write multi-workspace state for two windows via the scoped KVP.
4136 let window_10 = WindowId::from(10u64);
4137 let window_20 = WindowId::from(20u64);
4138
4139 let kvp = cx.update(|cx| KeyValueStore::global(cx));
4140
4141 write_multi_workspace_state(
4142 &kvp,
4143 window_10,
4144 MultiWorkspaceState {
4145 active_workspace_id: Some(WorkspaceId(2)),
4146 project_groups: vec![],
4147 sidebar_open: true,
4148 sidebar_state: None,
4149 },
4150 )
4151 .await;
4152
4153 write_multi_workspace_state(
4154 &kvp,
4155 window_20,
4156 MultiWorkspaceState {
4157 active_workspace_id: Some(WorkspaceId(3)),
4158 project_groups: vec![],
4159 sidebar_open: false,
4160 sidebar_state: None,
4161 },
4162 )
4163 .await;
4164
4165 // Build session workspaces: two in window 10, one in window 20, one with no window.
4166 let session_workspaces = vec![
4167 SessionWorkspace {
4168 workspace_id: WorkspaceId(1),
4169 location: SerializedWorkspaceLocation::Local,
4170 paths: PathList::new(&["/a"]),
4171 window_id: Some(window_10),
4172 },
4173 SessionWorkspace {
4174 workspace_id: WorkspaceId(2),
4175 location: SerializedWorkspaceLocation::Local,
4176 paths: PathList::new(&["/b"]),
4177 window_id: Some(window_10),
4178 },
4179 SessionWorkspace {
4180 workspace_id: WorkspaceId(3),
4181 location: SerializedWorkspaceLocation::Local,
4182 paths: PathList::new(&["/c"]),
4183 window_id: Some(window_20),
4184 },
4185 SessionWorkspace {
4186 workspace_id: WorkspaceId(4),
4187 location: SerializedWorkspaceLocation::Local,
4188 paths: PathList::new(&["/d"]),
4189 window_id: None,
4190 },
4191 ];
4192
4193 let results = cx.update(|cx| read_serialized_multi_workspaces(session_workspaces, cx));
4194
4195 // Should produce 3 results: window 10, window 20, and the orphan.
4196 assert_eq!(results.len(), 3);
4197
4198 // Window 10: active_workspace_id = 2 picks workspace 2 (paths /b), sidebar open.
4199 let group_10 = &results[0];
4200 assert_eq!(group_10.active_workspace.workspace_id, WorkspaceId(2));
4201 assert_eq!(group_10.state.active_workspace_id, Some(WorkspaceId(2)));
4202 assert_eq!(group_10.state.sidebar_open, true);
4203
4204 // Window 20: active_workspace_id = 3 picks workspace 3 (paths /c), sidebar closed.
4205 let group_20 = &results[1];
4206 assert_eq!(group_20.active_workspace.workspace_id, WorkspaceId(3));
4207 assert_eq!(group_20.state.active_workspace_id, Some(WorkspaceId(3)));
4208 assert_eq!(group_20.state.sidebar_open, false);
4209
4210 // Orphan: no active_workspace_id, falls back to first workspace (id 4).
4211 let group_none = &results[2];
4212 assert_eq!(group_none.active_workspace.workspace_id, WorkspaceId(4));
4213 assert_eq!(group_none.state.active_workspace_id, None);
4214 assert_eq!(group_none.state.sidebar_open, false);
4215 }
4216
4217 #[gpui::test]
4218 async fn test_flush_serialization_completes_before_quit(cx: &mut gpui::TestAppContext) {
4219 crate::tests::init_test(cx);
4220
4221 let fs = fs::FakeFs::new(cx.executor());
4222 let project = Project::test(fs.clone(), [], cx).await;
4223
4224 let (multi_workspace, cx) =
4225 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4226
4227 let workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone());
4228
4229 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4230
4231 // Assign a database_id so serialization will actually persist.
4232 let workspace_id = db.next_id().await.unwrap();
4233 workspace.update(cx, |ws, _cx| {
4234 ws.set_database_id(workspace_id);
4235 });
4236
4237 // Mutate some workspace state.
4238 db.set_centered_layout(workspace_id, true).await.unwrap();
4239
4240 // Call flush_serialization and await the returned task directly
4241 // (without run_until_parked — the point is that awaiting the task
4242 // alone is sufficient).
4243 let task = multi_workspace.update_in(cx, |mw, window, cx| {
4244 mw.workspace()
4245 .update(cx, |ws, cx| ws.flush_serialization(window, cx))
4246 });
4247 task.await;
4248
4249 // Read the workspace back from the DB and verify serialization happened.
4250 let serialized = db.workspace_for_id(workspace_id);
4251 assert!(
4252 serialized.is_some(),
4253 "flush_serialization should have persisted the workspace to DB"
4254 );
4255 }
4256
4257 #[gpui::test]
4258 async fn test_create_workspace_serialization(cx: &mut gpui::TestAppContext) {
4259 crate::tests::init_test(cx);
4260
4261 let fs = fs::FakeFs::new(cx.executor());
4262 let project = Project::test(fs.clone(), [], cx).await;
4263
4264 let (multi_workspace, cx) =
4265 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4266
4267 // Give the first workspace a database_id.
4268 multi_workspace.update_in(cx, |mw, _, cx| {
4269 mw.set_random_database_id(cx);
4270 });
4271
4272 let window_id =
4273 multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
4274
4275 // Create a new workspace via the MultiWorkspace API (triggers next_id()).
4276 multi_workspace.update_in(cx, |mw, window, cx| {
4277 mw.create_test_workspace(window, cx).detach();
4278 });
4279
4280 // Let the async next_id() and re-serialization tasks complete.
4281 cx.run_until_parked();
4282
4283 // The new workspace should now have a database_id.
4284 let new_workspace_db_id =
4285 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
4286 assert!(
4287 new_workspace_db_id.is_some(),
4288 "New workspace should have a database_id after run_until_parked"
4289 );
4290
4291 // The multi-workspace state should record it as the active workspace.
4292 let state = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
4293 assert_eq!(
4294 state.active_workspace_id, new_workspace_db_id,
4295 "Serialized active_workspace_id should match the new workspace's database_id"
4296 );
4297
4298 // The individual workspace row should exist with real data
4299 // (not just the bare DEFAULT VALUES row from next_id).
4300 let workspace_id = new_workspace_db_id.unwrap();
4301 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4302 let serialized = db.workspace_for_id(workspace_id);
4303 assert!(
4304 serialized.is_some(),
4305 "Newly created workspace should be fully serialized in the DB after database_id assignment"
4306 );
4307 }
4308
4309 #[gpui::test]
4310 async fn test_remove_workspace_clears_session_binding(cx: &mut gpui::TestAppContext) {
4311 crate::tests::init_test(cx);
4312
4313 let fs = fs::FakeFs::new(cx.executor());
4314 let dir = unique_test_dir(&fs, "remove").await;
4315 let project1 = Project::test(fs.clone(), [], cx).await;
4316 let project2 = Project::test(fs.clone(), [], cx).await;
4317
4318 let (multi_workspace, cx) =
4319 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4320
4321 multi_workspace.update(cx, |mw, cx| {
4322 mw.open_sidebar(cx);
4323 });
4324
4325 multi_workspace.update_in(cx, |mw, _, cx| {
4326 mw.set_random_database_id(cx);
4327 });
4328
4329 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4330
4331 // Get a real DB id for workspace2 so the row actually exists.
4332 let workspace2_db_id = db.next_id().await.unwrap();
4333
4334 multi_workspace.update_in(cx, |mw, window, cx| {
4335 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4336 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4337 ws.set_database_id(workspace2_db_id)
4338 });
4339 mw.add(workspace.clone(), window, cx);
4340 });
4341
4342 // Save a full workspace row to the DB directly.
4343 let session_id = format!("remove-test-session-{}", Uuid::new_v4());
4344 db.save_workspace(SerializedWorkspace {
4345 id: workspace2_db_id,
4346 paths: PathList::new(&[&dir]),
4347 location: SerializedWorkspaceLocation::Local,
4348 center_group: Default::default(),
4349 window_bounds: Default::default(),
4350 display: Default::default(),
4351 docks: Default::default(),
4352 centered_layout: false,
4353 session_id: Some(session_id.clone()),
4354 bookmarks: Default::default(),
4355 breakpoints: Default::default(),
4356 window_id: Some(99),
4357 user_toolchains: Default::default(),
4358 })
4359 .await;
4360
4361 assert!(
4362 db.workspace_for_id(workspace2_db_id).is_some(),
4363 "Workspace2 should exist in DB before removal"
4364 );
4365
4366 // Remove workspace at index 1 (the second workspace).
4367 multi_workspace.update_in(cx, |mw, window, cx| {
4368 let ws = mw.workspaces().nth(1).unwrap().clone();
4369 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4370 .detach_and_log_err(cx);
4371 });
4372
4373 cx.run_until_parked();
4374
4375 // The row should still exist so it continues to appear in recent
4376 // projects, but the session binding should be cleared so it is not
4377 // restored as part of any future session.
4378 assert!(
4379 db.workspace_for_id(workspace2_db_id).is_some(),
4380 "Removed workspace's DB row should be preserved for recent projects"
4381 );
4382
4383 let session_workspaces = db
4384 .last_session_workspace_locations("remove-test-session", None, fs.as_ref())
4385 .await
4386 .unwrap();
4387 let restored_ids: Vec<WorkspaceId> = session_workspaces
4388 .iter()
4389 .map(|sw| sw.workspace_id)
4390 .collect();
4391 assert!(
4392 !restored_ids.contains(&workspace2_db_id),
4393 "Removed workspace should not appear in session restoration"
4394 );
4395 }
4396
4397 #[gpui::test]
4398 async fn test_remove_workspace_not_restored_as_zombie(cx: &mut gpui::TestAppContext) {
4399 crate::tests::init_test(cx);
4400
4401 let fs = fs::FakeFs::new(cx.executor());
4402 let dir1 = tempfile::TempDir::with_prefix("zombie_test1").unwrap();
4403 let dir2 = tempfile::TempDir::with_prefix("zombie_test2").unwrap();
4404 fs.insert_tree(dir1.path(), json!({})).await;
4405 fs.insert_tree(dir2.path(), json!({})).await;
4406
4407 let project1 = Project::test(fs.clone(), [], cx).await;
4408 let project2 = Project::test(fs.clone(), [], cx).await;
4409
4410 let db = cx.update(|cx| WorkspaceDb::global(cx));
4411
4412 // Get real DB ids so the rows actually exist.
4413 let ws1_id = db.next_id().await.unwrap();
4414 let ws2_id = db.next_id().await.unwrap();
4415
4416 let (multi_workspace, cx) =
4417 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4418
4419 multi_workspace.update(cx, |mw, cx| {
4420 mw.open_sidebar(cx);
4421 });
4422
4423 multi_workspace.update_in(cx, |mw, _, cx| {
4424 mw.workspace().update(cx, |ws, _cx| {
4425 ws.set_database_id(ws1_id);
4426 });
4427 });
4428
4429 multi_workspace.update_in(cx, |mw, window, cx| {
4430 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4431 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4432 ws.set_database_id(ws2_id)
4433 });
4434 mw.add(workspace.clone(), window, cx);
4435 });
4436
4437 let session_id = "test-zombie-session";
4438 let window_id_val: u64 = 42;
4439
4440 db.save_workspace(SerializedWorkspace {
4441 id: ws1_id,
4442 paths: PathList::new(&[dir1.path()]),
4443 location: SerializedWorkspaceLocation::Local,
4444 center_group: Default::default(),
4445 window_bounds: Default::default(),
4446 display: Default::default(),
4447 docks: Default::default(),
4448 centered_layout: false,
4449 session_id: Some(session_id.to_owned()),
4450 bookmarks: Default::default(),
4451 breakpoints: Default::default(),
4452 window_id: Some(window_id_val),
4453 user_toolchains: Default::default(),
4454 })
4455 .await;
4456
4457 db.save_workspace(SerializedWorkspace {
4458 id: ws2_id,
4459 paths: PathList::new(&[dir2.path()]),
4460 location: SerializedWorkspaceLocation::Local,
4461 center_group: Default::default(),
4462 window_bounds: Default::default(),
4463 display: Default::default(),
4464 docks: Default::default(),
4465 centered_layout: false,
4466 session_id: Some(session_id.to_owned()),
4467 bookmarks: Default::default(),
4468 breakpoints: Default::default(),
4469 window_id: Some(window_id_val),
4470 user_toolchains: Default::default(),
4471 })
4472 .await;
4473
4474 // Remove workspace2 (index 1).
4475 multi_workspace.update_in(cx, |mw, window, cx| {
4476 let ws = mw.workspaces().nth(1).unwrap().clone();
4477 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4478 .detach_and_log_err(cx);
4479 });
4480
4481 cx.run_until_parked();
4482
4483 // The removed workspace should NOT appear in session restoration.
4484 let locations = db
4485 .last_session_workspace_locations(session_id, None, fs.as_ref())
4486 .await
4487 .unwrap();
4488
4489 let restored_ids: Vec<WorkspaceId> = locations.iter().map(|sw| sw.workspace_id).collect();
4490 assert!(
4491 !restored_ids.contains(&ws2_id),
4492 "Removed workspace should not appear in session restoration list. Found: {:?}",
4493 restored_ids
4494 );
4495 assert!(
4496 restored_ids.contains(&ws1_id),
4497 "Remaining workspace should still appear in session restoration list"
4498 );
4499 }
4500
4501 #[gpui::test]
4502 async fn test_pending_removal_tasks_drained_on_flush(cx: &mut gpui::TestAppContext) {
4503 crate::tests::init_test(cx);
4504
4505 let fs = fs::FakeFs::new(cx.executor());
4506 let dir = unique_test_dir(&fs, "pending-removal").await;
4507 let project1 = Project::test(fs.clone(), [], cx).await;
4508 let project2 = Project::test(fs.clone(), [], cx).await;
4509
4510 let db = cx.update(|cx| WorkspaceDb::global(cx));
4511
4512 // Get a real DB id for workspace2 so the row actually exists.
4513 let workspace2_db_id = db.next_id().await.unwrap();
4514
4515 let (multi_workspace, cx) =
4516 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4517
4518 multi_workspace.update(cx, |mw, cx| {
4519 mw.open_sidebar(cx);
4520 });
4521
4522 multi_workspace.update_in(cx, |mw, _, cx| {
4523 mw.set_random_database_id(cx);
4524 });
4525
4526 multi_workspace.update_in(cx, |mw, window, cx| {
4527 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4528 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4529 ws.set_database_id(workspace2_db_id)
4530 });
4531 mw.add(workspace.clone(), window, cx);
4532 });
4533
4534 // Save a full workspace row to the DB directly and let it settle.
4535 let session_id = format!("pending-removal-session-{}", Uuid::new_v4());
4536 db.save_workspace(SerializedWorkspace {
4537 id: workspace2_db_id,
4538 paths: PathList::new(&[&dir]),
4539 location: SerializedWorkspaceLocation::Local,
4540 center_group: Default::default(),
4541 window_bounds: Default::default(),
4542 display: Default::default(),
4543 docks: Default::default(),
4544 centered_layout: false,
4545 session_id: Some(session_id.clone()),
4546 bookmarks: Default::default(),
4547 breakpoints: Default::default(),
4548 window_id: Some(88),
4549 user_toolchains: Default::default(),
4550 })
4551 .await;
4552 cx.run_until_parked();
4553
4554 // Remove workspace2 — this pushes a task to pending_removal_tasks.
4555 multi_workspace.update_in(cx, |mw, window, cx| {
4556 let ws = mw.workspaces().nth(1).unwrap().clone();
4557 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4558 .detach_and_log_err(cx);
4559 });
4560
4561 // Simulate the quit handler pattern: collect flush tasks + pending
4562 // removal tasks and await them all.
4563 let all_tasks = multi_workspace.update_in(cx, |mw, window, cx| {
4564 let mut tasks: Vec<Task<()>> = mw
4565 .workspaces()
4566 .map(|workspace| {
4567 workspace.update(cx, |workspace, cx| {
4568 workspace.flush_serialization(window, cx)
4569 })
4570 })
4571 .collect();
4572 let mut removal_tasks = mw.take_pending_removal_tasks();
4573 // Note: removal_tasks may be empty if the background task already
4574 // completed (take_pending_removal_tasks filters out ready tasks).
4575 tasks.append(&mut removal_tasks);
4576 tasks.push(mw.flush_serialization());
4577 tasks
4578 });
4579 futures::future::join_all(all_tasks).await;
4580
4581 // The row should still exist (for recent projects), but the session
4582 // binding should have been cleared by the pending removal task.
4583 assert!(
4584 db.workspace_for_id(workspace2_db_id).is_some(),
4585 "Workspace row should be preserved for recent projects"
4586 );
4587
4588 let session_workspaces = db
4589 .last_session_workspace_locations("pending-removal-session", None, fs.as_ref())
4590 .await
4591 .unwrap();
4592 let restored_ids: Vec<WorkspaceId> = session_workspaces
4593 .iter()
4594 .map(|sw| sw.workspace_id)
4595 .collect();
4596 assert!(
4597 !restored_ids.contains(&workspace2_db_id),
4598 "Pending removal task should have cleared the session binding"
4599 );
4600 }
4601
4602 #[gpui::test]
4603 async fn test_create_workspace_bounds_observer_uses_fresh_id(cx: &mut gpui::TestAppContext) {
4604 crate::tests::init_test(cx);
4605
4606 let fs = fs::FakeFs::new(cx.executor());
4607 let project = Project::test(fs.clone(), [], cx).await;
4608
4609 let (multi_workspace, cx) =
4610 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4611
4612 multi_workspace.update_in(cx, |mw, _, cx| {
4613 mw.set_random_database_id(cx);
4614 });
4615
4616 let task =
4617 multi_workspace.update_in(cx, |mw, window, cx| mw.create_test_workspace(window, cx));
4618 task.await;
4619
4620 let new_workspace_db_id =
4621 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
4622 assert!(
4623 new_workspace_db_id.is_some(),
4624 "After run_until_parked, the workspace should have a database_id"
4625 );
4626
4627 let workspace_id = new_workspace_db_id.unwrap();
4628
4629 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4630
4631 assert!(
4632 db.workspace_for_id(workspace_id).is_some(),
4633 "The workspace row should exist in the DB"
4634 );
4635
4636 cx.simulate_resize(gpui::size(px(1024.0), px(768.0)));
4637
4638 // Advance the clock past the 100ms debounce timer so the bounds
4639 // observer task fires
4640 cx.executor().advance_clock(Duration::from_millis(200));
4641 cx.run_until_parked();
4642
4643 let serialized = db
4644 .workspace_for_id(workspace_id)
4645 .expect("workspace row should still exist");
4646 assert!(
4647 serialized.window_bounds.is_some(),
4648 "The bounds observer should write bounds for the workspace's real DB ID, \
4649 even when the workspace was created via create_workspace (where the ID \
4650 is assigned asynchronously after construction)."
4651 );
4652 }
4653
4654 #[gpui::test]
4655 async fn test_flush_serialization_writes_bounds(cx: &mut gpui::TestAppContext) {
4656 crate::tests::init_test(cx);
4657
4658 let fs = fs::FakeFs::new(cx.executor());
4659 let dir = tempfile::TempDir::with_prefix("flush_bounds_test").unwrap();
4660 fs.insert_tree(dir.path(), json!({})).await;
4661
4662 let project = Project::test(fs.clone(), [dir.path()], cx).await;
4663
4664 let (multi_workspace, cx) =
4665 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4666
4667 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4668 let workspace_id = db.next_id().await.unwrap();
4669 multi_workspace.update_in(cx, |mw, _, cx| {
4670 mw.workspace().update(cx, |ws, _cx| {
4671 ws.set_database_id(workspace_id);
4672 });
4673 });
4674
4675 let task = multi_workspace.update_in(cx, |mw, window, cx| {
4676 mw.workspace()
4677 .update(cx, |ws, cx| ws.flush_serialization(window, cx))
4678 });
4679 task.await;
4680
4681 let after = db
4682 .workspace_for_id(workspace_id)
4683 .expect("workspace row should exist after flush_serialization");
4684 assert!(
4685 !after.paths.is_empty(),
4686 "flush_serialization should have written paths via save_workspace"
4687 );
4688 assert!(
4689 after.window_bounds.is_some(),
4690 "flush_serialization should ensure window bounds are persisted to the DB \
4691 before the process exits."
4692 );
4693 }
4694
4695 #[gpui::test]
4696 async fn test_resolve_worktree_workspaces(cx: &mut gpui::TestAppContext) {
4697 let fs = fs::FakeFs::new(cx.executor());
4698
4699 // Main repo with a linked worktree entry
4700 fs.insert_tree(
4701 "/repo",
4702 json!({
4703 ".git": {
4704 "worktrees": {
4705 "feature": {
4706 "commondir": "../../",
4707 "HEAD": "ref: refs/heads/feature"
4708 }
4709 }
4710 },
4711 "src": { "main.rs": "" }
4712 }),
4713 )
4714 .await;
4715
4716 // Linked worktree checkout pointing back to /repo
4717 fs.insert_tree(
4718 "/worktree",
4719 json!({
4720 ".git": "gitdir: /repo/.git/worktrees/feature",
4721 "src": { "main.rs": "" }
4722 }),
4723 )
4724 .await;
4725
4726 // A plain non-git project
4727 fs.insert_tree(
4728 "/plain-project",
4729 json!({
4730 "src": { "main.rs": "" }
4731 }),
4732 )
4733 .await;
4734
4735 // Another normal git repo (used in mixed-path entry)
4736 fs.insert_tree(
4737 "/other-repo",
4738 json!({
4739 ".git": {},
4740 "src": { "lib.rs": "" }
4741 }),
4742 )
4743 .await;
4744
4745 let t0 = Utc::now() - chrono::Duration::hours(4);
4746 let t1 = Utc::now() - chrono::Duration::hours(3);
4747 let t2 = Utc::now() - chrono::Duration::hours(2);
4748 let t3 = Utc::now() - chrono::Duration::hours(1);
4749
4750 let workspaces = vec![
4751 // 1: Main checkout of /repo (opened earlier)
4752 (
4753 WorkspaceId(1),
4754 SerializedWorkspaceLocation::Local,
4755 PathList::new(&["/repo"]),
4756 t0,
4757 ),
4758 // 2: Linked worktree of /repo (opened more recently)
4759 // Should dedup with #1; more recent timestamp wins.
4760 (
4761 WorkspaceId(2),
4762 SerializedWorkspaceLocation::Local,
4763 PathList::new(&["/worktree"]),
4764 t1,
4765 ),
4766 // 3: Mixed-path workspace: one root is a linked worktree,
4767 // the other is a normal repo. The worktree path should be
4768 // resolved; the normal path kept as-is.
4769 (
4770 WorkspaceId(3),
4771 SerializedWorkspaceLocation::Local,
4772 PathList::new(&["/other-repo", "/worktree"]),
4773 t2,
4774 ),
4775 // 4: Non-git project — passed through unchanged.
4776 (
4777 WorkspaceId(4),
4778 SerializedWorkspaceLocation::Local,
4779 PathList::new(&["/plain-project"]),
4780 t3,
4781 ),
4782 ];
4783
4784 let result = resolve_worktree_workspaces(workspaces, fs.as_ref()).await;
4785
4786 // Should have 3 entries: #1 and #2 deduped into one, plus #3 and #4.
4787 assert_eq!(result.len(), 3);
4788
4789 // First entry: /repo — deduplicated from #1 and #2.
4790 // Keeps the position of #1 (first seen), but with #2's later timestamp.
4791 assert_eq!(result[0].2.paths(), &[PathBuf::from("/repo")]);
4792 assert_eq!(result[0].3, t1);
4793
4794 // Second entry: mixed-path workspace with worktree resolved.
4795 // /worktree → /repo, so paths become [/other-repo, /repo] (sorted).
4796 assert_eq!(
4797 result[1].2.paths(),
4798 &[PathBuf::from("/other-repo"), PathBuf::from("/repo")]
4799 );
4800 assert_eq!(result[1].0, WorkspaceId(3));
4801
4802 // Third entry: non-git project, unchanged.
4803 assert_eq!(result[2].2.paths(), &[PathBuf::from("/plain-project")]);
4804 assert_eq!(result[2].0, WorkspaceId(4));
4805 }
4806
4807 #[gpui::test]
4808 async fn test_resolve_worktree_workspaces_bare_repo(cx: &mut gpui::TestAppContext) {
4809 let fs = fs::FakeFs::new(cx.executor());
4810
4811 // Bare repo at /foo/.bare (commondir doesn't end with .git)
4812 fs.insert_tree(
4813 "/foo/.bare",
4814 json!({
4815 "worktrees": {
4816 "my-feature": {
4817 "commondir": "../../",
4818 "HEAD": "ref: refs/heads/my-feature"
4819 }
4820 }
4821 }),
4822 )
4823 .await;
4824
4825 // Linked worktree whose commondir resolves to a bare repo (/foo/.bare)
4826 fs.insert_tree(
4827 "/foo/my-feature",
4828 json!({
4829 ".git": "gitdir: /foo/.bare/worktrees/my-feature",
4830 "src": { "main.rs": "" }
4831 }),
4832 )
4833 .await;
4834
4835 let t0 = Utc::now();
4836
4837 let workspaces = vec![(
4838 WorkspaceId(1),
4839 SerializedWorkspaceLocation::Local,
4840 PathList::new(&["/foo/my-feature"]),
4841 t0,
4842 )];
4843
4844 let result = resolve_worktree_workspaces(workspaces, fs.as_ref()).await;
4845
4846 // The worktree path must be preserved unchanged — /foo/.bare is a bare repo
4847 // and cannot serve as a working-tree root, so resolution must return None.
4848 assert_eq!(result.len(), 1);
4849 assert_eq!(result[0].2.paths(), &[PathBuf::from("/foo/my-feature")]);
4850 }
4851
4852 #[gpui::test]
4853 async fn test_restore_window_with_linked_worktree_and_multiple_project_groups(
4854 cx: &mut gpui::TestAppContext,
4855 ) {
4856 crate::tests::init_test(cx);
4857
4858 let fs = fs::FakeFs::new(cx.executor());
4859
4860 // Main git repo at /repo
4861 fs.insert_tree(
4862 "/repo",
4863 json!({
4864 ".git": {
4865 "HEAD": "ref: refs/heads/main",
4866 "worktrees": {
4867 "feature": {
4868 "commondir": "../../",
4869 "HEAD": "ref: refs/heads/feature"
4870 }
4871 }
4872 },
4873 "src": { "main.rs": "" }
4874 }),
4875 )
4876 .await;
4877
4878 // Linked worktree checkout pointing back to /repo
4879 fs.insert_tree(
4880 "/worktree-feature",
4881 json!({
4882 ".git": "gitdir: /repo/.git/worktrees/feature",
4883 "src": { "lib.rs": "" }
4884 }),
4885 )
4886 .await;
4887
4888 // --- Phase 1: Set up the original multi-workspace window ---
4889
4890 let project_1 = Project::test(fs.clone(), ["/repo".as_ref()], cx).await;
4891 let project_1_linked_worktree =
4892 Project::test(fs.clone(), ["/worktree-feature".as_ref()], cx).await;
4893
4894 // Wait for git discovery to finish.
4895 cx.run_until_parked();
4896
4897 // Create a second, unrelated project so we have two distinct project groups.
4898 fs.insert_tree(
4899 "/other-project",
4900 json!({
4901 ".git": { "HEAD": "ref: refs/heads/main" },
4902 "readme.md": ""
4903 }),
4904 )
4905 .await;
4906 let project_2 = Project::test(fs.clone(), ["/other-project".as_ref()], cx).await;
4907 cx.run_until_parked();
4908
4909 // Create the MultiWorkspace with project_2, then add the main repo
4910 // and its linked worktree. The linked worktree is added last and
4911 // becomes the active workspace.
4912 let (multi_workspace, cx) = cx
4913 .add_window_view(|window, cx| MultiWorkspace::test_new(project_2.clone(), window, cx));
4914
4915 multi_workspace.update(cx, |mw, cx| {
4916 mw.open_sidebar(cx);
4917 });
4918
4919 multi_workspace.update_in(cx, |mw, window, cx| {
4920 mw.test_add_workspace(project_1.clone(), window, cx);
4921 });
4922
4923 let workspace_worktree = multi_workspace.update_in(cx, |mw, window, cx| {
4924 mw.test_add_workspace(project_1_linked_worktree.clone(), window, cx)
4925 });
4926
4927 let tasks =
4928 multi_workspace.update_in(cx, |mw, window, cx| mw.flush_all_serialization(window, cx));
4929 cx.run_until_parked();
4930 for task in tasks {
4931 task.await;
4932 }
4933 cx.run_until_parked();
4934
4935 let active_db_id = workspace_worktree.read_with(cx, |ws, _| ws.database_id());
4936 assert!(
4937 active_db_id.is_some(),
4938 "Active workspace should have a database ID"
4939 );
4940
4941 // --- Phase 2: Read back and verify the serialized state ---
4942
4943 let session_id = multi_workspace
4944 .read_with(cx, |mw, cx| mw.workspace().read(cx).session_id())
4945 .unwrap();
4946 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4947 let session_workspaces = db
4948 .last_session_workspace_locations(&session_id, None, fs.as_ref())
4949 .await
4950 .expect("should load session workspaces");
4951 assert!(
4952 !session_workspaces.is_empty(),
4953 "Should have at least one session workspace"
4954 );
4955
4956 let multi_workspaces =
4957 cx.update(|_, cx| read_serialized_multi_workspaces(session_workspaces, cx));
4958 assert_eq!(
4959 multi_workspaces.len(),
4960 1,
4961 "All workspaces share one window, so there should be exactly one multi-workspace"
4962 );
4963
4964 let serialized = &multi_workspaces[0];
4965 assert_eq!(
4966 serialized.active_workspace.workspace_id,
4967 active_db_id.unwrap(),
4968 );
4969 assert_eq!(serialized.state.project_groups.len(), 2,);
4970
4971 // Verify the serialized project group keys round-trip back to the
4972 // originals.
4973 let restored_keys: Vec<ProjectGroupKey> = serialized
4974 .state
4975 .project_groups
4976 .iter()
4977 .cloned()
4978 .map(Into::into)
4979 .collect();
4980 let expected_keys = vec![
4981 ProjectGroupKey::new(None, PathList::new(&["/repo"])),
4982 ProjectGroupKey::new(None, PathList::new(&["/other-project"])),
4983 ];
4984 assert_eq!(
4985 restored_keys, expected_keys,
4986 "Deserialized project group keys should match the originals"
4987 );
4988
4989 // --- Phase 3: Restore the window and verify the result ---
4990
4991 let app_state =
4992 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).app_state().clone());
4993
4994 let serialized_mw = multi_workspaces.into_iter().next().unwrap();
4995 let restored_handle: gpui::WindowHandle<MultiWorkspace> = cx
4996 .update(|_, cx| {
4997 cx.spawn(async move |mut cx| {
4998 crate::restore_multiworkspace(serialized_mw, app_state, &mut cx).await
4999 })
5000 })
5001 .await
5002 .expect("restore_multiworkspace should succeed");
5003
5004 cx.run_until_parked();
5005
5006 // The restored window should have the same project group keys.
5007 let restored_keys: Vec<ProjectGroupKey> = restored_handle
5008 .read_with(cx, |mw: &MultiWorkspace, _cx| mw.project_group_keys())
5009 .unwrap();
5010 assert_eq!(
5011 restored_keys, expected_keys,
5012 "Restored window should have the same project group keys as the original"
5013 );
5014
5015 // The active workspace in the restored window should have the linked
5016 // worktree paths.
5017 let active_paths: Vec<PathBuf> = restored_handle
5018 .read_with(cx, |mw: &MultiWorkspace, cx| {
5019 mw.workspace()
5020 .read(cx)
5021 .root_paths(cx)
5022 .into_iter()
5023 .map(|p: Arc<Path>| p.to_path_buf())
5024 .collect()
5025 })
5026 .unwrap();
5027 assert_eq!(
5028 active_paths,
5029 vec![PathBuf::from("/worktree-feature")],
5030 "The restored active workspace should be the linked worktree project"
5031 );
5032 }
5033
5034 #[gpui::test]
5035 async fn test_remove_project_group_falls_back_to_neighbor(cx: &mut gpui::TestAppContext) {
5036 crate::tests::init_test(cx);
5037
5038 let fs = fs::FakeFs::new(cx.executor());
5039 let dir_a = unique_test_dir(&fs, "group-a").await;
5040 let dir_b = unique_test_dir(&fs, "group-b").await;
5041 let dir_c = unique_test_dir(&fs, "group-c").await;
5042
5043 let project_a = Project::test(fs.clone(), [dir_a.as_path()], cx).await;
5044 let project_b = Project::test(fs.clone(), [dir_b.as_path()], cx).await;
5045 let project_c = Project::test(fs.clone(), [dir_c.as_path()], cx).await;
5046
5047 // Create a multi-workspace with project A, then add B and C.
5048 // project_groups stores newest first: [C, B, A].
5049 // Sidebar displays in the same order: C (top), B (middle), A (bottom).
5050 let (multi_workspace, cx) = cx
5051 .add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx));
5052
5053 multi_workspace.update(cx, |mw, cx| mw.open_sidebar(cx));
5054
5055 let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| {
5056 mw.test_add_workspace(project_b.clone(), window, cx)
5057 });
5058 let _workspace_c = multi_workspace.update_in(cx, |mw, window, cx| {
5059 mw.test_add_workspace(project_c.clone(), window, cx)
5060 });
5061 cx.run_until_parked();
5062
5063 let key_a = project_a.read_with(cx, |p, cx| p.project_group_key(cx));
5064 let key_b = project_b.read_with(cx, |p, cx| p.project_group_key(cx));
5065 let key_c = project_c.read_with(cx, |p, cx| p.project_group_key(cx));
5066
5067 // Activate workspace B so removing its group exercises the fallback.
5068 multi_workspace.update_in(cx, |mw, window, cx| {
5069 mw.activate(workspace_b.clone(), window, cx);
5070 });
5071 cx.run_until_parked();
5072
5073 // --- Remove group B (the middle one). ---
5074 // In the sidebar [C, B, A], "below" B is A.
5075 multi_workspace.update_in(cx, |mw, window, cx| {
5076 mw.remove_project_group(&key_b, window, cx)
5077 .detach_and_log_err(cx);
5078 });
5079 cx.run_until_parked();
5080
5081 let active_paths =
5082 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
5083 assert_eq!(
5084 active_paths
5085 .iter()
5086 .map(|p| p.to_path_buf())
5087 .collect::<Vec<_>>(),
5088 vec![dir_a.clone()],
5089 "After removing the middle group, should fall back to the group below (A)"
5090 );
5091
5092 // After removing B, keys = [A, C], sidebar = [C, A].
5093 // Activate workspace A (the bottom) so removing it tests the
5094 // "fall back upward" path.
5095 let workspace_a =
5096 multi_workspace.read_with(cx, |mw, _cx| mw.workspaces().next().unwrap().clone());
5097 multi_workspace.update_in(cx, |mw, window, cx| {
5098 mw.activate(workspace_a.clone(), window, cx);
5099 });
5100 cx.run_until_parked();
5101
5102 // --- Remove group A (the bottom one in sidebar). ---
5103 // Nothing below A, so should fall back upward to C.
5104 multi_workspace.update_in(cx, |mw, window, cx| {
5105 mw.remove_project_group(&key_a, window, cx)
5106 .detach_and_log_err(cx);
5107 });
5108 cx.run_until_parked();
5109
5110 let active_paths =
5111 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
5112 assert_eq!(
5113 active_paths
5114 .iter()
5115 .map(|p| p.to_path_buf())
5116 .collect::<Vec<_>>(),
5117 vec![dir_c.clone()],
5118 "After removing the bottom group, should fall back to the group above (C)"
5119 );
5120
5121 // --- Remove group C (the only one remaining). ---
5122 // Should create an empty workspace.
5123 multi_workspace.update_in(cx, |mw, window, cx| {
5124 mw.remove_project_group(&key_c, window, cx)
5125 .detach_and_log_err(cx);
5126 });
5127 cx.run_until_parked();
5128
5129 let active_paths =
5130 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
5131 assert!(
5132 active_paths.is_empty(),
5133 "After removing the only remaining group, should have an empty workspace"
5134 );
5135 }
5136
5137 /// Regression test for a crash where `find_or_create_local_workspace`
5138 /// returned a workspace that was about to be removed, hitting an assert
5139 /// in `MultiWorkspace::remove`.
5140 ///
5141 /// The scenario: two workspaces share the same root paths (e.g. due to
5142 /// a provisional key mismatch). When the first is removed and the
5143 /// fallback searches for the same paths, `workspace_for_paths` must
5144 /// skip the doomed workspace so the assert in `remove` is satisfied.
5145 #[gpui::test]
5146 async fn test_remove_fallback_skips_excluded_workspaces(cx: &mut gpui::TestAppContext) {
5147 crate::tests::init_test(cx);
5148
5149 let fs = fs::FakeFs::new(cx.executor());
5150 let dir = unique_test_dir(&fs, "shared").await;
5151
5152 // Two projects that open the same directory — this creates two
5153 // workspaces whose root_paths are identical.
5154 let project_a = Project::test(fs.clone(), [dir.as_path()], cx).await;
5155 let project_b = Project::test(fs.clone(), [dir.as_path()], cx).await;
5156
5157 let (multi_workspace, cx) = cx
5158 .add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx));
5159
5160 multi_workspace.update(cx, |mw, cx| mw.open_sidebar(cx));
5161
5162 let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| {
5163 mw.test_add_workspace(project_b.clone(), window, cx)
5164 });
5165 cx.run_until_parked();
5166
5167 // workspace_a is first in the workspaces vec.
5168 let workspace_a =
5169 multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().cloned().unwrap());
5170 assert_ne!(workspace_a, workspace_b);
5171
5172 // Activate workspace_a so removing it triggers the fallback path.
5173 multi_workspace.update_in(cx, |mw, window, cx| {
5174 mw.activate(workspace_a.clone(), window, cx);
5175 });
5176 cx.run_until_parked();
5177
5178 // Remove workspace_a. The fallback searches for the same paths.
5179 // Without the `excluding` parameter, `workspace_for_paths` would
5180 // return workspace_a (first match) and the assert in `remove`
5181 // would fire. With the fix, workspace_a is skipped and
5182 // workspace_b is found instead.
5183 let path_list = PathList::new(std::slice::from_ref(&dir));
5184 let excluded = vec![workspace_a.clone()];
5185 multi_workspace.update_in(cx, |mw, window, cx| {
5186 mw.remove(
5187 vec![workspace_a.clone()],
5188 move |this, window, cx| {
5189 this.find_or_create_local_workspace(
5190 path_list,
5191 None,
5192 &excluded,
5193 None,
5194 OpenMode::Activate,
5195 window,
5196 cx,
5197 )
5198 },
5199 window,
5200 cx,
5201 )
5202 .detach_and_log_err(cx);
5203 });
5204 cx.run_until_parked();
5205
5206 // workspace_b should now be active — workspace_a was removed.
5207 multi_workspace.read_with(cx, |mw, _cx| {
5208 assert_eq!(
5209 mw.workspace(),
5210 &workspace_b,
5211 "fallback should have found workspace_b, not the excluded workspace_a"
5212 );
5213 });
5214 }
5215}