1pub mod model;
2
3use std::{
4 borrow::Cow,
5 collections::BTreeMap,
6 path::{Path, PathBuf},
7 str::FromStr,
8 sync::Arc,
9};
10
11use chrono::{DateTime, NaiveDateTime, Utc};
12use fs::Fs;
13
14use anyhow::{Context as _, Result, bail};
15use collections::{HashMap, HashSet, IndexSet};
16use db::{
17 kvp::KeyValueStore,
18 query,
19 sqlez::{connection::Connection, domain::Domain},
20 sqlez_macros::sql,
21};
22use gpui::{Axis, Bounds, Task, WindowBounds, WindowId, point, size};
23use project::{
24 debugger::breakpoint_store::{BreakpointState, SourceBreakpoint},
25 trusted_worktrees::{DbTrustedPaths, RemoteHostLocation},
26};
27
28use language::{LanguageName, Toolchain, ToolchainScope};
29use remote::{
30 DockerConnectionOptions, RemoteConnectionOptions, SshConnectionOptions, WslConnectionOptions,
31};
32use serde::{Deserialize, Serialize};
33use sqlez::{
34 bindable::{Bind, Column, StaticColumnCount},
35 statement::Statement,
36 thread_safe_connection::ThreadSafeConnection,
37};
38
39use ui::{App, SharedString, px};
40use util::{ResultExt, maybe, rel_path::RelPath};
41use uuid::Uuid;
42
43use crate::{
44 WorkspaceId,
45 path_list::{PathList, SerializedPathList},
46 persistence::model::RemoteConnectionKind,
47};
48
49use model::{
50 GroupId, ItemId, PaneId, RemoteConnectionId, SerializedItem, SerializedPane,
51 SerializedPaneGroup, SerializedWorkspace,
52};
53
54use self::model::{DockStructure, SerializedWorkspaceLocation, SessionWorkspace};
55
56// https://www.sqlite.org/limits.html
57// > <..> the maximum value of a host parameter number is SQLITE_MAX_VARIABLE_NUMBER,
58// > which defaults to <..> 32766 for SQLite versions after 3.32.0.
59const MAX_QUERY_PLACEHOLDERS: usize = 32000;
60
61fn parse_timestamp(text: &str) -> DateTime<Utc> {
62 NaiveDateTime::parse_from_str(text, "%Y-%m-%d %H:%M:%S")
63 .map(|naive| naive.and_utc())
64 .unwrap_or_else(|_| Utc::now())
65}
66
67#[derive(Copy, Clone, Debug, PartialEq)]
68pub(crate) struct SerializedAxis(pub(crate) gpui::Axis);
69impl sqlez::bindable::StaticColumnCount for SerializedAxis {}
70impl sqlez::bindable::Bind for SerializedAxis {
71 fn bind(
72 &self,
73 statement: &sqlez::statement::Statement,
74 start_index: i32,
75 ) -> anyhow::Result<i32> {
76 match self.0 {
77 gpui::Axis::Horizontal => "Horizontal",
78 gpui::Axis::Vertical => "Vertical",
79 }
80 .bind(statement, start_index)
81 }
82}
83
84impl sqlez::bindable::Column for SerializedAxis {
85 fn column(
86 statement: &mut sqlez::statement::Statement,
87 start_index: i32,
88 ) -> anyhow::Result<(Self, i32)> {
89 String::column(statement, start_index).and_then(|(axis_text, next_index)| {
90 Ok((
91 match axis_text.as_str() {
92 "Horizontal" => Self(Axis::Horizontal),
93 "Vertical" => Self(Axis::Vertical),
94 _ => anyhow::bail!("Stored serialized item kind is incorrect"),
95 },
96 next_index,
97 ))
98 })
99 }
100}
101
102#[derive(Copy, Clone, Debug, PartialEq, Default)]
103pub(crate) struct SerializedWindowBounds(pub(crate) WindowBounds);
104
105impl StaticColumnCount for SerializedWindowBounds {
106 fn column_count() -> usize {
107 5
108 }
109}
110
111impl Bind for SerializedWindowBounds {
112 fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
113 match self.0 {
114 WindowBounds::Windowed(bounds) => {
115 let next_index = statement.bind(&"Windowed", start_index)?;
116 statement.bind(
117 &(
118 SerializedPixels(bounds.origin.x),
119 SerializedPixels(bounds.origin.y),
120 SerializedPixels(bounds.size.width),
121 SerializedPixels(bounds.size.height),
122 ),
123 next_index,
124 )
125 }
126 WindowBounds::Maximized(bounds) => {
127 let next_index = statement.bind(&"Maximized", start_index)?;
128 statement.bind(
129 &(
130 SerializedPixels(bounds.origin.x),
131 SerializedPixels(bounds.origin.y),
132 SerializedPixels(bounds.size.width),
133 SerializedPixels(bounds.size.height),
134 ),
135 next_index,
136 )
137 }
138 WindowBounds::Fullscreen(bounds) => {
139 let next_index = statement.bind(&"FullScreen", start_index)?;
140 statement.bind(
141 &(
142 SerializedPixels(bounds.origin.x),
143 SerializedPixels(bounds.origin.y),
144 SerializedPixels(bounds.size.width),
145 SerializedPixels(bounds.size.height),
146 ),
147 next_index,
148 )
149 }
150 }
151 }
152}
153
154impl Column for SerializedWindowBounds {
155 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
156 let (window_state, next_index) = String::column(statement, start_index)?;
157 let ((x, y, width, height), _): ((i32, i32, i32, i32), _) =
158 Column::column(statement, next_index)?;
159 let bounds = Bounds {
160 origin: point(px(x as f32), px(y as f32)),
161 size: size(px(width as f32), px(height as f32)),
162 };
163
164 let status = match window_state.as_str() {
165 "Windowed" | "Fixed" => SerializedWindowBounds(WindowBounds::Windowed(bounds)),
166 "Maximized" => SerializedWindowBounds(WindowBounds::Maximized(bounds)),
167 "FullScreen" => SerializedWindowBounds(WindowBounds::Fullscreen(bounds)),
168 _ => bail!("Window State did not have a valid string"),
169 };
170
171 Ok((status, next_index + 4))
172 }
173}
174
175const DEFAULT_WINDOW_BOUNDS_KEY: &str = "default_window_bounds";
176
177pub fn read_default_window_bounds(kvp: &KeyValueStore) -> Option<(Uuid, WindowBounds)> {
178 let json_str = kvp
179 .read_kvp(DEFAULT_WINDOW_BOUNDS_KEY)
180 .log_err()
181 .flatten()?;
182
183 let (display_uuid, persisted) =
184 serde_json::from_str::<(Uuid, WindowBoundsJson)>(&json_str).ok()?;
185 Some((display_uuid, persisted.into()))
186}
187
188pub async fn write_default_window_bounds(
189 kvp: &KeyValueStore,
190 bounds: WindowBounds,
191 display_uuid: Uuid,
192) -> anyhow::Result<()> {
193 let persisted = WindowBoundsJson::from(bounds);
194 let json_str = serde_json::to_string(&(display_uuid, persisted))?;
195 kvp.write_kvp(DEFAULT_WINDOW_BOUNDS_KEY.to_string(), json_str)
196 .await?;
197 Ok(())
198}
199
200#[derive(Serialize, Deserialize)]
201pub enum WindowBoundsJson {
202 Windowed {
203 x: i32,
204 y: i32,
205 width: i32,
206 height: i32,
207 },
208 Maximized {
209 x: i32,
210 y: i32,
211 width: i32,
212 height: i32,
213 },
214 Fullscreen {
215 x: i32,
216 y: i32,
217 width: i32,
218 height: i32,
219 },
220}
221
222impl From<WindowBounds> for WindowBoundsJson {
223 fn from(b: WindowBounds) -> Self {
224 match b {
225 WindowBounds::Windowed(bounds) => {
226 let origin = bounds.origin;
227 let size = bounds.size;
228 WindowBoundsJson::Windowed {
229 x: f32::from(origin.x).round() as i32,
230 y: f32::from(origin.y).round() as i32,
231 width: f32::from(size.width).round() as i32,
232 height: f32::from(size.height).round() as i32,
233 }
234 }
235 WindowBounds::Maximized(bounds) => {
236 let origin = bounds.origin;
237 let size = bounds.size;
238 WindowBoundsJson::Maximized {
239 x: f32::from(origin.x).round() as i32,
240 y: f32::from(origin.y).round() as i32,
241 width: f32::from(size.width).round() as i32,
242 height: f32::from(size.height).round() as i32,
243 }
244 }
245 WindowBounds::Fullscreen(bounds) => {
246 let origin = bounds.origin;
247 let size = bounds.size;
248 WindowBoundsJson::Fullscreen {
249 x: f32::from(origin.x).round() as i32,
250 y: f32::from(origin.y).round() as i32,
251 width: f32::from(size.width).round() as i32,
252 height: f32::from(size.height).round() as i32,
253 }
254 }
255 }
256 }
257}
258
259impl From<WindowBoundsJson> for WindowBounds {
260 fn from(n: WindowBoundsJson) -> Self {
261 match n {
262 WindowBoundsJson::Windowed {
263 x,
264 y,
265 width,
266 height,
267 } => WindowBounds::Windowed(Bounds {
268 origin: point(px(x as f32), px(y as f32)),
269 size: size(px(width as f32), px(height as f32)),
270 }),
271 WindowBoundsJson::Maximized {
272 x,
273 y,
274 width,
275 height,
276 } => WindowBounds::Maximized(Bounds {
277 origin: point(px(x as f32), px(y as f32)),
278 size: size(px(width as f32), px(height as f32)),
279 }),
280 WindowBoundsJson::Fullscreen {
281 x,
282 y,
283 width,
284 height,
285 } => WindowBounds::Fullscreen(Bounds {
286 origin: point(px(x as f32), px(y as f32)),
287 size: size(px(width as f32), px(height as f32)),
288 }),
289 }
290 }
291}
292
293fn read_multi_workspace_state(window_id: WindowId, cx: &App) -> model::MultiWorkspaceState {
294 let kvp = KeyValueStore::global(cx);
295 kvp.scoped("multi_workspace_state")
296 .read(&window_id.as_u64().to_string())
297 .log_err()
298 .flatten()
299 .and_then(|json| serde_json::from_str(&json).ok())
300 .unwrap_or_default()
301}
302
303pub async fn write_multi_workspace_state(
304 kvp: &KeyValueStore,
305 window_id: WindowId,
306 state: model::MultiWorkspaceState,
307) {
308 if let Ok(json_str) = serde_json::to_string(&state) {
309 kvp.scoped("multi_workspace_state")
310 .write(window_id.as_u64().to_string(), json_str)
311 .await
312 .log_err();
313 }
314}
315
316pub fn read_serialized_multi_workspaces(
317 session_workspaces: Vec<model::SessionWorkspace>,
318 cx: &App,
319) -> Vec<model::SerializedMultiWorkspace> {
320 let mut window_groups: Vec<Vec<model::SessionWorkspace>> = Vec::new();
321 let mut window_id_to_group: HashMap<WindowId, usize> = HashMap::default();
322
323 for session_workspace in session_workspaces {
324 match session_workspace.window_id {
325 Some(window_id) => {
326 let group_index = *window_id_to_group.entry(window_id).or_insert_with(|| {
327 window_groups.push(Vec::new());
328 window_groups.len() - 1
329 });
330 window_groups[group_index].push(session_workspace);
331 }
332 None => {
333 window_groups.push(vec![session_workspace]);
334 }
335 }
336 }
337
338 window_groups
339 .into_iter()
340 .filter_map(|group| {
341 let window_id = group.first().and_then(|sw| sw.window_id);
342 let state = window_id
343 .map(|wid| read_multi_workspace_state(wid, cx))
344 .unwrap_or_default();
345 let active_workspace = state
346 .active_workspace_id
347 .and_then(|id| group.iter().position(|ws| ws.workspace_id == id))
348 .or(Some(0))
349 .and_then(|index| group.into_iter().nth(index))?;
350 Some(model::SerializedMultiWorkspace {
351 active_workspace,
352 state,
353 })
354 })
355 .collect()
356}
357
358const DEFAULT_DOCK_STATE_KEY: &str = "default_dock_state";
359
360pub fn read_default_dock_state(kvp: &KeyValueStore) -> Option<DockStructure> {
361 let json_str = kvp.read_kvp(DEFAULT_DOCK_STATE_KEY).log_err().flatten()?;
362
363 serde_json::from_str::<DockStructure>(&json_str).ok()
364}
365
366pub async fn write_default_dock_state(
367 kvp: &KeyValueStore,
368 docks: DockStructure,
369) -> anyhow::Result<()> {
370 let json_str = serde_json::to_string(&docks)?;
371 kvp.write_kvp(DEFAULT_DOCK_STATE_KEY.to_string(), json_str)
372 .await?;
373 Ok(())
374}
375
376#[derive(Debug)]
377pub struct Breakpoint {
378 pub position: u32,
379 pub message: Option<Arc<str>>,
380 pub condition: Option<Arc<str>>,
381 pub hit_condition: Option<Arc<str>>,
382 pub state: BreakpointState,
383}
384
385/// Wrapper for DB type of a breakpoint
386struct BreakpointStateWrapper<'a>(Cow<'a, BreakpointState>);
387
388impl From<BreakpointState> for BreakpointStateWrapper<'static> {
389 fn from(kind: BreakpointState) -> Self {
390 BreakpointStateWrapper(Cow::Owned(kind))
391 }
392}
393
394impl StaticColumnCount for BreakpointStateWrapper<'_> {
395 fn column_count() -> usize {
396 1
397 }
398}
399
400impl Bind for BreakpointStateWrapper<'_> {
401 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
402 statement.bind(&self.0.to_int(), start_index)
403 }
404}
405
406impl Column for BreakpointStateWrapper<'_> {
407 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
408 let state = statement.column_int(start_index)?;
409
410 match state {
411 0 => Ok((BreakpointState::Enabled.into(), start_index + 1)),
412 1 => Ok((BreakpointState::Disabled.into(), start_index + 1)),
413 _ => anyhow::bail!("Invalid BreakpointState discriminant {state}"),
414 }
415 }
416}
417
418impl sqlez::bindable::StaticColumnCount for Breakpoint {
419 fn column_count() -> usize {
420 // Position, log message, condition message, and hit condition message
421 4 + BreakpointStateWrapper::column_count()
422 }
423}
424
425impl sqlez::bindable::Bind for Breakpoint {
426 fn bind(
427 &self,
428 statement: &sqlez::statement::Statement,
429 start_index: i32,
430 ) -> anyhow::Result<i32> {
431 let next_index = statement.bind(&self.position, start_index)?;
432 let next_index = statement.bind(&self.message, next_index)?;
433 let next_index = statement.bind(&self.condition, next_index)?;
434 let next_index = statement.bind(&self.hit_condition, next_index)?;
435 statement.bind(
436 &BreakpointStateWrapper(Cow::Borrowed(&self.state)),
437 next_index,
438 )
439 }
440}
441
442impl Column for Breakpoint {
443 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
444 let position = statement
445 .column_int(start_index)
446 .with_context(|| format!("Failed to read BreakPoint at index {start_index}"))?
447 as u32;
448 let (message, next_index) = Option::<String>::column(statement, start_index + 1)?;
449 let (condition, next_index) = Option::<String>::column(statement, next_index)?;
450 let (hit_condition, next_index) = Option::<String>::column(statement, next_index)?;
451 let (state, next_index) = BreakpointStateWrapper::column(statement, next_index)?;
452
453 Ok((
454 Breakpoint {
455 position,
456 message: message.map(Arc::from),
457 condition: condition.map(Arc::from),
458 hit_condition: hit_condition.map(Arc::from),
459 state: state.0.into_owned(),
460 },
461 next_index,
462 ))
463 }
464}
465
466#[derive(Clone, Debug, PartialEq)]
467struct SerializedPixels(gpui::Pixels);
468impl sqlez::bindable::StaticColumnCount for SerializedPixels {}
469
470impl sqlez::bindable::Bind for SerializedPixels {
471 fn bind(
472 &self,
473 statement: &sqlez::statement::Statement,
474 start_index: i32,
475 ) -> anyhow::Result<i32> {
476 let this: i32 = u32::from(self.0) as _;
477 this.bind(statement, start_index)
478 }
479}
480
481pub struct WorkspaceDb(ThreadSafeConnection);
482
483impl Domain for WorkspaceDb {
484 const NAME: &str = stringify!(WorkspaceDb);
485
486 const MIGRATIONS: &[&str] = &[
487 sql!(
488 CREATE TABLE workspaces(
489 workspace_id INTEGER PRIMARY KEY,
490 workspace_location BLOB UNIQUE,
491 dock_visible INTEGER, // Deprecated. Preserving so users can downgrade Zed.
492 dock_anchor TEXT, // Deprecated. Preserving so users can downgrade Zed.
493 dock_pane INTEGER, // Deprecated. Preserving so users can downgrade Zed.
494 left_sidebar_open INTEGER, // Boolean
495 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
496 FOREIGN KEY(dock_pane) REFERENCES panes(pane_id)
497 ) STRICT;
498
499 CREATE TABLE pane_groups(
500 group_id INTEGER PRIMARY KEY,
501 workspace_id INTEGER NOT NULL,
502 parent_group_id INTEGER, // NULL indicates that this is a root node
503 position INTEGER, // NULL indicates that this is a root node
504 axis TEXT NOT NULL, // Enum: 'Vertical' / 'Horizontal'
505 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
506 ON DELETE CASCADE
507 ON UPDATE CASCADE,
508 FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
509 ) STRICT;
510
511 CREATE TABLE panes(
512 pane_id INTEGER PRIMARY KEY,
513 workspace_id INTEGER NOT NULL,
514 active INTEGER NOT NULL, // Boolean
515 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
516 ON DELETE CASCADE
517 ON UPDATE CASCADE
518 ) STRICT;
519
520 CREATE TABLE center_panes(
521 pane_id INTEGER PRIMARY KEY,
522 parent_group_id INTEGER, // NULL means that this is a root pane
523 position INTEGER, // NULL means that this is a root pane
524 FOREIGN KEY(pane_id) REFERENCES panes(pane_id)
525 ON DELETE CASCADE,
526 FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
527 ) STRICT;
528
529 CREATE TABLE items(
530 item_id INTEGER NOT NULL, // This is the item's view id, so this is not unique
531 workspace_id INTEGER NOT NULL,
532 pane_id INTEGER NOT NULL,
533 kind TEXT NOT NULL,
534 position INTEGER NOT NULL,
535 active INTEGER NOT NULL,
536 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
537 ON DELETE CASCADE
538 ON UPDATE CASCADE,
539 FOREIGN KEY(pane_id) REFERENCES panes(pane_id)
540 ON DELETE CASCADE,
541 PRIMARY KEY(item_id, workspace_id)
542 ) STRICT;
543 ),
544 sql!(
545 ALTER TABLE workspaces ADD COLUMN window_state TEXT;
546 ALTER TABLE workspaces ADD COLUMN window_x REAL;
547 ALTER TABLE workspaces ADD COLUMN window_y REAL;
548 ALTER TABLE workspaces ADD COLUMN window_width REAL;
549 ALTER TABLE workspaces ADD COLUMN window_height REAL;
550 ALTER TABLE workspaces ADD COLUMN display BLOB;
551 ),
552 // Drop foreign key constraint from workspaces.dock_pane to panes table.
553 sql!(
554 CREATE TABLE workspaces_2(
555 workspace_id INTEGER PRIMARY KEY,
556 workspace_location BLOB UNIQUE,
557 dock_visible INTEGER, // Deprecated. Preserving so users can downgrade Zed.
558 dock_anchor TEXT, // Deprecated. Preserving so users can downgrade Zed.
559 dock_pane INTEGER, // Deprecated. Preserving so users can downgrade Zed.
560 left_sidebar_open INTEGER, // Boolean
561 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
562 window_state TEXT,
563 window_x REAL,
564 window_y REAL,
565 window_width REAL,
566 window_height REAL,
567 display BLOB
568 ) STRICT;
569 INSERT INTO workspaces_2 SELECT * FROM workspaces;
570 DROP TABLE workspaces;
571 ALTER TABLE workspaces_2 RENAME TO workspaces;
572 ),
573 // Add panels related information
574 sql!(
575 ALTER TABLE workspaces ADD COLUMN left_dock_visible INTEGER; //bool
576 ALTER TABLE workspaces ADD COLUMN left_dock_active_panel TEXT;
577 ALTER TABLE workspaces ADD COLUMN right_dock_visible INTEGER; //bool
578 ALTER TABLE workspaces ADD COLUMN right_dock_active_panel TEXT;
579 ALTER TABLE workspaces ADD COLUMN bottom_dock_visible INTEGER; //bool
580 ALTER TABLE workspaces ADD COLUMN bottom_dock_active_panel TEXT;
581 ),
582 // Add panel zoom persistence
583 sql!(
584 ALTER TABLE workspaces ADD COLUMN left_dock_zoom INTEGER; //bool
585 ALTER TABLE workspaces ADD COLUMN right_dock_zoom INTEGER; //bool
586 ALTER TABLE workspaces ADD COLUMN bottom_dock_zoom INTEGER; //bool
587 ),
588 // Add pane group flex data
589 sql!(
590 ALTER TABLE pane_groups ADD COLUMN flexes TEXT;
591 ),
592 // Add fullscreen field to workspace
593 // Deprecated, `WindowBounds` holds the fullscreen state now.
594 // Preserving so users can downgrade Zed.
595 sql!(
596 ALTER TABLE workspaces ADD COLUMN fullscreen INTEGER; //bool
597 ),
598 // Add preview field to items
599 sql!(
600 ALTER TABLE items ADD COLUMN preview INTEGER; //bool
601 ),
602 // Add centered_layout field to workspace
603 sql!(
604 ALTER TABLE workspaces ADD COLUMN centered_layout INTEGER; //bool
605 ),
606 sql!(
607 CREATE TABLE remote_projects (
608 remote_project_id INTEGER NOT NULL UNIQUE,
609 path TEXT,
610 dev_server_name TEXT
611 );
612 ALTER TABLE workspaces ADD COLUMN remote_project_id INTEGER;
613 ALTER TABLE workspaces RENAME COLUMN workspace_location TO local_paths;
614 ),
615 sql!(
616 DROP TABLE remote_projects;
617 CREATE TABLE dev_server_projects (
618 id INTEGER NOT NULL UNIQUE,
619 path TEXT,
620 dev_server_name TEXT
621 );
622 ALTER TABLE workspaces DROP COLUMN remote_project_id;
623 ALTER TABLE workspaces ADD COLUMN dev_server_project_id INTEGER;
624 ),
625 sql!(
626 ALTER TABLE workspaces ADD COLUMN local_paths_order BLOB;
627 ),
628 sql!(
629 ALTER TABLE workspaces ADD COLUMN session_id TEXT DEFAULT NULL;
630 ),
631 sql!(
632 ALTER TABLE workspaces ADD COLUMN window_id INTEGER DEFAULT NULL;
633 ),
634 sql!(
635 ALTER TABLE panes ADD COLUMN pinned_count INTEGER DEFAULT 0;
636 ),
637 sql!(
638 CREATE TABLE ssh_projects (
639 id INTEGER PRIMARY KEY,
640 host TEXT NOT NULL,
641 port INTEGER,
642 path TEXT NOT NULL,
643 user TEXT
644 );
645 ALTER TABLE workspaces ADD COLUMN ssh_project_id INTEGER REFERENCES ssh_projects(id) ON DELETE CASCADE;
646 ),
647 sql!(
648 ALTER TABLE ssh_projects RENAME COLUMN path TO paths;
649 ),
650 sql!(
651 CREATE TABLE toolchains (
652 workspace_id INTEGER,
653 worktree_id INTEGER,
654 language_name TEXT NOT NULL,
655 name TEXT NOT NULL,
656 path TEXT NOT NULL,
657 PRIMARY KEY (workspace_id, worktree_id, language_name)
658 );
659 ),
660 sql!(
661 ALTER TABLE toolchains ADD COLUMN raw_json TEXT DEFAULT "{}";
662 ),
663 sql!(
664 CREATE TABLE breakpoints (
665 workspace_id INTEGER NOT NULL,
666 path TEXT NOT NULL,
667 breakpoint_location INTEGER NOT NULL,
668 kind INTEGER NOT NULL,
669 log_message TEXT,
670 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
671 ON DELETE CASCADE
672 ON UPDATE CASCADE
673 );
674 ),
675 sql!(
676 ALTER TABLE workspaces ADD COLUMN local_paths_array TEXT;
677 CREATE UNIQUE INDEX local_paths_array_uq ON workspaces(local_paths_array);
678 ALTER TABLE workspaces ADD COLUMN local_paths_order_array TEXT;
679 ),
680 sql!(
681 ALTER TABLE breakpoints ADD COLUMN state INTEGER DEFAULT(0) NOT NULL
682 ),
683 sql!(
684 ALTER TABLE breakpoints DROP COLUMN kind
685 ),
686 sql!(ALTER TABLE toolchains ADD COLUMN relative_worktree_path TEXT DEFAULT "" NOT NULL),
687 sql!(
688 ALTER TABLE breakpoints ADD COLUMN condition TEXT;
689 ALTER TABLE breakpoints ADD COLUMN hit_condition TEXT;
690 ),
691 sql!(CREATE TABLE toolchains2 (
692 workspace_id INTEGER,
693 worktree_id INTEGER,
694 language_name TEXT NOT NULL,
695 name TEXT NOT NULL,
696 path TEXT NOT NULL,
697 raw_json TEXT NOT NULL,
698 relative_worktree_path TEXT NOT NULL,
699 PRIMARY KEY (workspace_id, worktree_id, language_name, relative_worktree_path)) STRICT;
700 INSERT INTO toolchains2
701 SELECT * FROM toolchains;
702 DROP TABLE toolchains;
703 ALTER TABLE toolchains2 RENAME TO toolchains;
704 ),
705 sql!(
706 CREATE TABLE ssh_connections (
707 id INTEGER PRIMARY KEY,
708 host TEXT NOT NULL,
709 port INTEGER,
710 user TEXT
711 );
712
713 INSERT INTO ssh_connections (host, port, user)
714 SELECT DISTINCT host, port, user
715 FROM ssh_projects;
716
717 CREATE TABLE workspaces_2(
718 workspace_id INTEGER PRIMARY KEY,
719 paths TEXT,
720 paths_order TEXT,
721 ssh_connection_id INTEGER REFERENCES ssh_connections(id),
722 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
723 window_state TEXT,
724 window_x REAL,
725 window_y REAL,
726 window_width REAL,
727 window_height REAL,
728 display BLOB,
729 left_dock_visible INTEGER,
730 left_dock_active_panel TEXT,
731 right_dock_visible INTEGER,
732 right_dock_active_panel TEXT,
733 bottom_dock_visible INTEGER,
734 bottom_dock_active_panel TEXT,
735 left_dock_zoom INTEGER,
736 right_dock_zoom INTEGER,
737 bottom_dock_zoom INTEGER,
738 fullscreen INTEGER,
739 centered_layout INTEGER,
740 session_id TEXT,
741 window_id INTEGER
742 ) STRICT;
743
744 INSERT
745 INTO workspaces_2
746 SELECT
747 workspaces.workspace_id,
748 CASE
749 WHEN ssh_projects.id IS NOT NULL THEN ssh_projects.paths
750 ELSE
751 CASE
752 WHEN workspaces.local_paths_array IS NULL OR workspaces.local_paths_array = "" THEN
753 NULL
754 ELSE
755 replace(workspaces.local_paths_array, ',', CHAR(10))
756 END
757 END as paths,
758
759 CASE
760 WHEN ssh_projects.id IS NOT NULL THEN ""
761 ELSE workspaces.local_paths_order_array
762 END as paths_order,
763
764 CASE
765 WHEN ssh_projects.id IS NOT NULL THEN (
766 SELECT ssh_connections.id
767 FROM ssh_connections
768 WHERE
769 ssh_connections.host IS ssh_projects.host AND
770 ssh_connections.port IS ssh_projects.port AND
771 ssh_connections.user IS ssh_projects.user
772 )
773 ELSE NULL
774 END as ssh_connection_id,
775
776 workspaces.timestamp,
777 workspaces.window_state,
778 workspaces.window_x,
779 workspaces.window_y,
780 workspaces.window_width,
781 workspaces.window_height,
782 workspaces.display,
783 workspaces.left_dock_visible,
784 workspaces.left_dock_active_panel,
785 workspaces.right_dock_visible,
786 workspaces.right_dock_active_panel,
787 workspaces.bottom_dock_visible,
788 workspaces.bottom_dock_active_panel,
789 workspaces.left_dock_zoom,
790 workspaces.right_dock_zoom,
791 workspaces.bottom_dock_zoom,
792 workspaces.fullscreen,
793 workspaces.centered_layout,
794 workspaces.session_id,
795 workspaces.window_id
796 FROM
797 workspaces LEFT JOIN
798 ssh_projects ON
799 workspaces.ssh_project_id = ssh_projects.id;
800
801 DELETE FROM workspaces_2
802 WHERE workspace_id NOT IN (
803 SELECT MAX(workspace_id)
804 FROM workspaces_2
805 GROUP BY ssh_connection_id, paths
806 );
807
808 DROP TABLE ssh_projects;
809 DROP TABLE workspaces;
810 ALTER TABLE workspaces_2 RENAME TO workspaces;
811
812 CREATE UNIQUE INDEX ix_workspaces_location ON workspaces(ssh_connection_id, paths);
813 ),
814 // Fix any data from when workspaces.paths were briefly encoded as JSON arrays
815 sql!(
816 UPDATE workspaces
817 SET paths = CASE
818 WHEN substr(paths, 1, 2) = '[' || '"' AND substr(paths, -2, 2) = '"' || ']' THEN
819 replace(
820 substr(paths, 3, length(paths) - 4),
821 '"' || ',' || '"',
822 CHAR(10)
823 )
824 ELSE
825 replace(paths, ',', CHAR(10))
826 END
827 WHERE paths IS NOT NULL
828 ),
829 sql!(
830 CREATE TABLE remote_connections(
831 id INTEGER PRIMARY KEY,
832 kind TEXT NOT NULL,
833 host TEXT,
834 port INTEGER,
835 user TEXT,
836 distro TEXT
837 );
838
839 CREATE TABLE workspaces_2(
840 workspace_id INTEGER PRIMARY KEY,
841 paths TEXT,
842 paths_order TEXT,
843 remote_connection_id INTEGER REFERENCES remote_connections(id),
844 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
845 window_state TEXT,
846 window_x REAL,
847 window_y REAL,
848 window_width REAL,
849 window_height REAL,
850 display BLOB,
851 left_dock_visible INTEGER,
852 left_dock_active_panel TEXT,
853 right_dock_visible INTEGER,
854 right_dock_active_panel TEXT,
855 bottom_dock_visible INTEGER,
856 bottom_dock_active_panel TEXT,
857 left_dock_zoom INTEGER,
858 right_dock_zoom INTEGER,
859 bottom_dock_zoom INTEGER,
860 fullscreen INTEGER,
861 centered_layout INTEGER,
862 session_id TEXT,
863 window_id INTEGER
864 ) STRICT;
865
866 INSERT INTO remote_connections
867 SELECT
868 id,
869 "ssh" as kind,
870 host,
871 port,
872 user,
873 NULL as distro
874 FROM ssh_connections;
875
876 INSERT
877 INTO workspaces_2
878 SELECT
879 workspace_id,
880 paths,
881 paths_order,
882 ssh_connection_id as remote_connection_id,
883 timestamp,
884 window_state,
885 window_x,
886 window_y,
887 window_width,
888 window_height,
889 display,
890 left_dock_visible,
891 left_dock_active_panel,
892 right_dock_visible,
893 right_dock_active_panel,
894 bottom_dock_visible,
895 bottom_dock_active_panel,
896 left_dock_zoom,
897 right_dock_zoom,
898 bottom_dock_zoom,
899 fullscreen,
900 centered_layout,
901 session_id,
902 window_id
903 FROM
904 workspaces;
905
906 DROP TABLE workspaces;
907 ALTER TABLE workspaces_2 RENAME TO workspaces;
908
909 CREATE UNIQUE INDEX ix_workspaces_location ON workspaces(remote_connection_id, paths);
910 ),
911 sql!(CREATE TABLE user_toolchains (
912 remote_connection_id INTEGER,
913 workspace_id INTEGER NOT NULL,
914 worktree_id INTEGER NOT NULL,
915 relative_worktree_path TEXT NOT NULL,
916 language_name TEXT NOT NULL,
917 name TEXT NOT NULL,
918 path TEXT NOT NULL,
919 raw_json TEXT NOT NULL,
920
921 PRIMARY KEY (workspace_id, worktree_id, relative_worktree_path, language_name, name, path, raw_json)
922 ) STRICT;),
923 sql!(
924 DROP TABLE ssh_connections;
925 ),
926 sql!(
927 ALTER TABLE remote_connections ADD COLUMN name TEXT;
928 ALTER TABLE remote_connections ADD COLUMN container_id TEXT;
929 ),
930 sql!(
931 CREATE TABLE IF NOT EXISTS trusted_worktrees (
932 trust_id INTEGER PRIMARY KEY AUTOINCREMENT,
933 absolute_path TEXT,
934 user_name TEXT,
935 host_name TEXT
936 ) STRICT;
937 ),
938 sql!(CREATE TABLE toolchains2 (
939 workspace_id INTEGER,
940 worktree_root_path TEXT NOT NULL,
941 language_name TEXT NOT NULL,
942 name TEXT NOT NULL,
943 path TEXT NOT NULL,
944 raw_json TEXT NOT NULL,
945 relative_worktree_path TEXT NOT NULL,
946 PRIMARY KEY (workspace_id, worktree_root_path, language_name, relative_worktree_path)) STRICT;
947 INSERT OR REPLACE INTO toolchains2
948 // The `instr(paths, '\n') = 0` part allows us to find all
949 // workspaces that have a single worktree, as `\n` is used as a
950 // separator when serializing the workspace paths, so if no `\n` is
951 // found, we know we have a single worktree.
952 SELECT toolchains.workspace_id, paths, language_name, name, path, raw_json, relative_worktree_path FROM toolchains INNER JOIN workspaces ON toolchains.workspace_id = workspaces.workspace_id AND instr(paths, '\n') = 0;
953 DROP TABLE toolchains;
954 ALTER TABLE toolchains2 RENAME TO toolchains;
955 ),
956 sql!(CREATE TABLE user_toolchains2 (
957 remote_connection_id INTEGER,
958 workspace_id INTEGER NOT NULL,
959 worktree_root_path TEXT NOT NULL,
960 relative_worktree_path TEXT NOT NULL,
961 language_name TEXT NOT NULL,
962 name TEXT NOT NULL,
963 path TEXT NOT NULL,
964 raw_json TEXT NOT NULL,
965
966 PRIMARY KEY (workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json)) STRICT;
967 INSERT OR REPLACE INTO user_toolchains2
968 // The `instr(paths, '\n') = 0` part allows us to find all
969 // workspaces that have a single worktree, as `\n` is used as a
970 // separator when serializing the workspace paths, so if no `\n` is
971 // found, we know we have a single worktree.
972 SELECT user_toolchains.remote_connection_id, user_toolchains.workspace_id, paths, relative_worktree_path, language_name, name, path, raw_json FROM user_toolchains INNER JOIN workspaces ON user_toolchains.workspace_id = workspaces.workspace_id AND instr(paths, '\n') = 0;
973 DROP TABLE user_toolchains;
974 ALTER TABLE user_toolchains2 RENAME TO user_toolchains;
975 ),
976 sql!(
977 ALTER TABLE remote_connections ADD COLUMN use_podman BOOLEAN;
978 ),
979 sql!(
980 ALTER TABLE remote_connections ADD COLUMN remote_env TEXT;
981 ),
982 ];
983
984 // Allow recovering from bad migration that was initially shipped to nightly
985 // when introducing the ssh_connections table.
986 fn should_allow_migration_change(_index: usize, old: &str, new: &str) -> bool {
987 old.starts_with("CREATE TABLE ssh_connections")
988 && new.starts_with("CREATE TABLE ssh_connections")
989 }
990}
991
992db::static_connection!(WorkspaceDb, []);
993
994impl WorkspaceDb {
995 /// Returns a serialized workspace for the given worktree_roots. If the passed array
996 /// is empty, the most recent workspace is returned instead. If no workspace for the
997 /// passed roots is stored, returns none.
998 pub(crate) fn workspace_for_roots<P: AsRef<Path>>(
999 &self,
1000 worktree_roots: &[P],
1001 ) -> Option<SerializedWorkspace> {
1002 self.workspace_for_roots_internal(worktree_roots, None)
1003 }
1004
1005 pub(crate) fn remote_workspace_for_roots<P: AsRef<Path>>(
1006 &self,
1007 worktree_roots: &[P],
1008 remote_project_id: RemoteConnectionId,
1009 ) -> Option<SerializedWorkspace> {
1010 self.workspace_for_roots_internal(worktree_roots, Some(remote_project_id))
1011 }
1012
1013 pub(crate) fn workspace_for_roots_internal<P: AsRef<Path>>(
1014 &self,
1015 worktree_roots: &[P],
1016 remote_connection_id: Option<RemoteConnectionId>,
1017 ) -> Option<SerializedWorkspace> {
1018 // paths are sorted before db interactions to ensure that the order of the paths
1019 // doesn't affect the workspace selection for existing workspaces
1020 let root_paths = PathList::new(worktree_roots);
1021
1022 // Empty workspaces cannot be matched by paths (all empty workspaces have paths = "").
1023 // They should only be restored via workspace_for_id during session restoration.
1024 if root_paths.is_empty() && remote_connection_id.is_none() {
1025 return None;
1026 }
1027
1028 // Note that we re-assign the workspace_id here in case it's empty
1029 // and we've grabbed the most recent workspace
1030 let (
1031 workspace_id,
1032 paths,
1033 paths_order,
1034 window_bounds,
1035 display,
1036 centered_layout,
1037 docks,
1038 window_id,
1039 ): (
1040 WorkspaceId,
1041 String,
1042 String,
1043 Option<SerializedWindowBounds>,
1044 Option<Uuid>,
1045 Option<bool>,
1046 DockStructure,
1047 Option<u64>,
1048 ) = self
1049 .select_row_bound(sql! {
1050 SELECT
1051 workspace_id,
1052 paths,
1053 paths_order,
1054 window_state,
1055 window_x,
1056 window_y,
1057 window_width,
1058 window_height,
1059 display,
1060 centered_layout,
1061 left_dock_visible,
1062 left_dock_active_panel,
1063 left_dock_zoom,
1064 right_dock_visible,
1065 right_dock_active_panel,
1066 right_dock_zoom,
1067 bottom_dock_visible,
1068 bottom_dock_active_panel,
1069 bottom_dock_zoom,
1070 window_id
1071 FROM workspaces
1072 WHERE
1073 paths IS ? AND
1074 remote_connection_id IS ?
1075 LIMIT 1
1076 })
1077 .and_then(|mut prepared_statement| {
1078 (prepared_statement)((
1079 root_paths.serialize().paths,
1080 remote_connection_id.map(|id| id.0 as i32),
1081 ))
1082 })
1083 .context("No workspaces found")
1084 .warn_on_err()
1085 .flatten()?;
1086
1087 let paths = PathList::deserialize(&SerializedPathList {
1088 paths,
1089 order: paths_order,
1090 });
1091
1092 let remote_connection_options = if let Some(remote_connection_id) = remote_connection_id {
1093 self.remote_connection(remote_connection_id)
1094 .context("Get remote connection")
1095 .log_err()
1096 } else {
1097 None
1098 };
1099
1100 Some(SerializedWorkspace {
1101 id: workspace_id,
1102 location: match remote_connection_options {
1103 Some(options) => SerializedWorkspaceLocation::Remote(options),
1104 None => SerializedWorkspaceLocation::Local,
1105 },
1106 paths,
1107 center_group: self
1108 .get_center_pane_group(workspace_id)
1109 .context("Getting center group")
1110 .log_err()?,
1111 window_bounds,
1112 centered_layout: centered_layout.unwrap_or(false),
1113 display,
1114 docks,
1115 session_id: None,
1116 breakpoints: self.breakpoints(workspace_id),
1117 window_id,
1118 user_toolchains: self.user_toolchains(workspace_id, remote_connection_id),
1119 })
1120 }
1121
1122 /// Returns the workspace with the given ID, loading all associated data.
1123 pub(crate) fn workspace_for_id(
1124 &self,
1125 workspace_id: WorkspaceId,
1126 ) -> Option<SerializedWorkspace> {
1127 let (
1128 paths,
1129 paths_order,
1130 window_bounds,
1131 display,
1132 centered_layout,
1133 docks,
1134 window_id,
1135 remote_connection_id,
1136 ): (
1137 String,
1138 String,
1139 Option<SerializedWindowBounds>,
1140 Option<Uuid>,
1141 Option<bool>,
1142 DockStructure,
1143 Option<u64>,
1144 Option<i32>,
1145 ) = self
1146 .select_row_bound(sql! {
1147 SELECT
1148 paths,
1149 paths_order,
1150 window_state,
1151 window_x,
1152 window_y,
1153 window_width,
1154 window_height,
1155 display,
1156 centered_layout,
1157 left_dock_visible,
1158 left_dock_active_panel,
1159 left_dock_zoom,
1160 right_dock_visible,
1161 right_dock_active_panel,
1162 right_dock_zoom,
1163 bottom_dock_visible,
1164 bottom_dock_active_panel,
1165 bottom_dock_zoom,
1166 window_id,
1167 remote_connection_id
1168 FROM workspaces
1169 WHERE workspace_id = ?
1170 })
1171 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id))
1172 .context("No workspace found for id")
1173 .warn_on_err()
1174 .flatten()?;
1175
1176 let paths = PathList::deserialize(&SerializedPathList {
1177 paths,
1178 order: paths_order,
1179 });
1180
1181 let remote_connection_id = remote_connection_id.map(|id| RemoteConnectionId(id as u64));
1182 let remote_connection_options = if let Some(remote_connection_id) = remote_connection_id {
1183 self.remote_connection(remote_connection_id)
1184 .context("Get remote connection")
1185 .log_err()
1186 } else {
1187 None
1188 };
1189
1190 Some(SerializedWorkspace {
1191 id: workspace_id,
1192 location: match remote_connection_options {
1193 Some(options) => SerializedWorkspaceLocation::Remote(options),
1194 None => SerializedWorkspaceLocation::Local,
1195 },
1196 paths,
1197 center_group: self
1198 .get_center_pane_group(workspace_id)
1199 .context("Getting center group")
1200 .log_err()?,
1201 window_bounds,
1202 centered_layout: centered_layout.unwrap_or(false),
1203 display,
1204 docks,
1205 session_id: None,
1206 breakpoints: self.breakpoints(workspace_id),
1207 window_id,
1208 user_toolchains: self.user_toolchains(workspace_id, remote_connection_id),
1209 })
1210 }
1211
1212 fn breakpoints(&self, workspace_id: WorkspaceId) -> BTreeMap<Arc<Path>, Vec<SourceBreakpoint>> {
1213 let breakpoints: Result<Vec<(PathBuf, Breakpoint)>> = self
1214 .select_bound(sql! {
1215 SELECT path, breakpoint_location, log_message, condition, hit_condition, state
1216 FROM breakpoints
1217 WHERE workspace_id = ?
1218 })
1219 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id));
1220
1221 match breakpoints {
1222 Ok(bp) => {
1223 if bp.is_empty() {
1224 log::debug!("Breakpoints are empty after querying database for them");
1225 }
1226
1227 let mut map: BTreeMap<Arc<Path>, Vec<SourceBreakpoint>> = Default::default();
1228
1229 for (path, breakpoint) in bp {
1230 let path: Arc<Path> = path.into();
1231 map.entry(path.clone()).or_default().push(SourceBreakpoint {
1232 row: breakpoint.position,
1233 path,
1234 message: breakpoint.message,
1235 condition: breakpoint.condition,
1236 hit_condition: breakpoint.hit_condition,
1237 state: breakpoint.state,
1238 });
1239 }
1240
1241 for (path, bps) in map.iter() {
1242 log::info!(
1243 "Got {} breakpoints from database at path: {}",
1244 bps.len(),
1245 path.to_string_lossy()
1246 );
1247 }
1248
1249 map
1250 }
1251 Err(msg) => {
1252 log::error!("Breakpoints query failed with msg: {msg}");
1253 Default::default()
1254 }
1255 }
1256 }
1257
1258 fn user_toolchains(
1259 &self,
1260 workspace_id: WorkspaceId,
1261 remote_connection_id: Option<RemoteConnectionId>,
1262 ) -> BTreeMap<ToolchainScope, IndexSet<Toolchain>> {
1263 type RowKind = (WorkspaceId, String, String, String, String, String, String);
1264
1265 let toolchains: Vec<RowKind> = self
1266 .select_bound(sql! {
1267 SELECT workspace_id, worktree_root_path, relative_worktree_path,
1268 language_name, name, path, raw_json
1269 FROM user_toolchains WHERE remote_connection_id IS ?1 AND (
1270 workspace_id IN (0, ?2)
1271 )
1272 })
1273 .and_then(|mut statement| {
1274 (statement)((remote_connection_id.map(|id| id.0), workspace_id))
1275 })
1276 .unwrap_or_default();
1277 let mut ret = BTreeMap::<_, IndexSet<_>>::default();
1278
1279 for (
1280 _workspace_id,
1281 worktree_root_path,
1282 relative_worktree_path,
1283 language_name,
1284 name,
1285 path,
1286 raw_json,
1287 ) in toolchains
1288 {
1289 // INTEGER's that are primary keys (like workspace ids, remote connection ids and such) start at 1, so we're safe to
1290 let scope = if _workspace_id == WorkspaceId(0) {
1291 debug_assert_eq!(worktree_root_path, String::default());
1292 debug_assert_eq!(relative_worktree_path, String::default());
1293 ToolchainScope::Global
1294 } else {
1295 debug_assert_eq!(workspace_id, _workspace_id);
1296 debug_assert_eq!(
1297 worktree_root_path == String::default(),
1298 relative_worktree_path == String::default()
1299 );
1300
1301 let Some(relative_path) = RelPath::unix(&relative_worktree_path).log_err() else {
1302 continue;
1303 };
1304 if worktree_root_path != String::default()
1305 && relative_worktree_path != String::default()
1306 {
1307 ToolchainScope::Subproject(
1308 Arc::from(worktree_root_path.as_ref()),
1309 relative_path.into(),
1310 )
1311 } else {
1312 ToolchainScope::Project
1313 }
1314 };
1315 let Ok(as_json) = serde_json::from_str(&raw_json) else {
1316 continue;
1317 };
1318 let toolchain = Toolchain {
1319 name: SharedString::from(name),
1320 path: SharedString::from(path),
1321 language_name: LanguageName::from_proto(language_name),
1322 as_json,
1323 };
1324 ret.entry(scope).or_default().insert(toolchain);
1325 }
1326
1327 ret
1328 }
1329
1330 /// Saves a workspace using the worktree roots. Will garbage collect any workspaces
1331 /// that used this workspace previously
1332 pub(crate) async fn save_workspace(&self, workspace: SerializedWorkspace) {
1333 let paths = workspace.paths.serialize();
1334 log::debug!("Saving workspace at location: {:?}", workspace.location);
1335 self.write(move |conn| {
1336 conn.with_savepoint("update_worktrees", || {
1337 let remote_connection_id = match workspace.location.clone() {
1338 SerializedWorkspaceLocation::Local => None,
1339 SerializedWorkspaceLocation::Remote(connection_options) => {
1340 Some(Self::get_or_create_remote_connection_internal(
1341 conn,
1342 connection_options
1343 )?.0)
1344 }
1345 };
1346
1347 // Clear out panes and pane_groups
1348 conn.exec_bound(sql!(
1349 DELETE FROM pane_groups WHERE workspace_id = ?1;
1350 DELETE FROM panes WHERE workspace_id = ?1;))?(workspace.id)
1351 .context("Clearing old panes")?;
1352
1353 conn.exec_bound(
1354 sql!(
1355 DELETE FROM breakpoints WHERE workspace_id = ?1;
1356 )
1357 )?(workspace.id).context("Clearing old breakpoints")?;
1358
1359 for (path, breakpoints) in workspace.breakpoints {
1360 for bp in breakpoints {
1361 let state = BreakpointStateWrapper::from(bp.state);
1362 match conn.exec_bound(sql!(
1363 INSERT INTO breakpoints (workspace_id, path, breakpoint_location, log_message, condition, hit_condition, state)
1364 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7);))?
1365
1366 ((
1367 workspace.id,
1368 path.as_ref(),
1369 bp.row,
1370 bp.message,
1371 bp.condition,
1372 bp.hit_condition,
1373 state,
1374 )) {
1375 Ok(_) => {
1376 log::debug!("Stored breakpoint at row: {} in path: {}", bp.row, path.to_string_lossy())
1377 }
1378 Err(err) => {
1379 log::error!("{err}");
1380 continue;
1381 }
1382 }
1383 }
1384 }
1385
1386 conn.exec_bound(
1387 sql!(
1388 DELETE FROM user_toolchains WHERE workspace_id = ?1;
1389 )
1390 )?(workspace.id).context("Clearing old user toolchains")?;
1391
1392 for (scope, toolchains) in workspace.user_toolchains {
1393 for toolchain in toolchains {
1394 let query = sql!(INSERT OR REPLACE INTO user_toolchains(remote_connection_id, workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8));
1395 let (workspace_id, worktree_root_path, relative_worktree_path) = match scope {
1396 ToolchainScope::Subproject(ref worktree_root_path, ref path) => (Some(workspace.id), Some(worktree_root_path.to_string_lossy().into_owned()), Some(path.as_unix_str().to_owned())),
1397 ToolchainScope::Project => (Some(workspace.id), None, None),
1398 ToolchainScope::Global => (None, None, None),
1399 };
1400 let args = (remote_connection_id, workspace_id.unwrap_or(WorkspaceId(0)), worktree_root_path.unwrap_or_default(), relative_worktree_path.unwrap_or_default(),
1401 toolchain.language_name.as_ref().to_owned(), toolchain.name.to_string(), toolchain.path.to_string(), toolchain.as_json.to_string());
1402 if let Err(err) = conn.exec_bound(query)?(args) {
1403 log::error!("{err}");
1404 continue;
1405 }
1406 }
1407 }
1408
1409 // Clear out old workspaces with the same paths.
1410 // Skip this for empty workspaces - they are identified by workspace_id, not paths.
1411 // Multiple empty workspaces with different content should coexist.
1412 if !paths.paths.is_empty() {
1413 conn.exec_bound(sql!(
1414 DELETE
1415 FROM workspaces
1416 WHERE
1417 workspace_id != ?1 AND
1418 paths IS ?2 AND
1419 remote_connection_id IS ?3
1420 ))?((
1421 workspace.id,
1422 paths.paths.clone(),
1423 remote_connection_id,
1424 ))
1425 .context("clearing out old locations")?;
1426 }
1427
1428 // Upsert
1429 let query = sql!(
1430 INSERT INTO workspaces(
1431 workspace_id,
1432 paths,
1433 paths_order,
1434 remote_connection_id,
1435 left_dock_visible,
1436 left_dock_active_panel,
1437 left_dock_zoom,
1438 right_dock_visible,
1439 right_dock_active_panel,
1440 right_dock_zoom,
1441 bottom_dock_visible,
1442 bottom_dock_active_panel,
1443 bottom_dock_zoom,
1444 session_id,
1445 window_id,
1446 timestamp
1447 )
1448 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, CURRENT_TIMESTAMP)
1449 ON CONFLICT DO
1450 UPDATE SET
1451 paths = ?2,
1452 paths_order = ?3,
1453 remote_connection_id = ?4,
1454 left_dock_visible = ?5,
1455 left_dock_active_panel = ?6,
1456 left_dock_zoom = ?7,
1457 right_dock_visible = ?8,
1458 right_dock_active_panel = ?9,
1459 right_dock_zoom = ?10,
1460 bottom_dock_visible = ?11,
1461 bottom_dock_active_panel = ?12,
1462 bottom_dock_zoom = ?13,
1463 session_id = ?14,
1464 window_id = ?15,
1465 timestamp = CURRENT_TIMESTAMP
1466 );
1467 let mut prepared_query = conn.exec_bound(query)?;
1468 let args = (
1469 workspace.id,
1470 paths.paths.clone(),
1471 paths.order.clone(),
1472 remote_connection_id,
1473 workspace.docks,
1474 workspace.session_id,
1475 workspace.window_id,
1476 );
1477
1478 prepared_query(args).context("Updating workspace")?;
1479
1480 // Save center pane group
1481 Self::save_pane_group(conn, workspace.id, &workspace.center_group, None)
1482 .context("save pane group in save workspace")?;
1483
1484 Ok(())
1485 })
1486 .log_err();
1487 })
1488 .await;
1489 }
1490
1491 pub(crate) async fn get_or_create_remote_connection(
1492 &self,
1493 options: RemoteConnectionOptions,
1494 ) -> Result<RemoteConnectionId> {
1495 self.write(move |conn| Self::get_or_create_remote_connection_internal(conn, options))
1496 .await
1497 }
1498
1499 fn get_or_create_remote_connection_internal(
1500 this: &Connection,
1501 options: RemoteConnectionOptions,
1502 ) -> Result<RemoteConnectionId> {
1503 let kind;
1504 let user: Option<String>;
1505 let mut host = None;
1506 let mut port = None;
1507 let mut distro = None;
1508 let mut name = None;
1509 let mut container_id = None;
1510 let mut use_podman = None;
1511 let mut remote_env = None;
1512 match options {
1513 RemoteConnectionOptions::Ssh(options) => {
1514 kind = RemoteConnectionKind::Ssh;
1515 host = Some(options.host.to_string());
1516 port = options.port;
1517 user = options.username;
1518 }
1519 RemoteConnectionOptions::Wsl(options) => {
1520 kind = RemoteConnectionKind::Wsl;
1521 distro = Some(options.distro_name);
1522 user = options.user;
1523 }
1524 RemoteConnectionOptions::Docker(options) => {
1525 kind = RemoteConnectionKind::Docker;
1526 container_id = Some(options.container_id);
1527 name = Some(options.name);
1528 use_podman = Some(options.use_podman);
1529 user = Some(options.remote_user);
1530 remote_env = serde_json::to_string(&options.remote_env).ok();
1531 }
1532 #[cfg(any(test, feature = "test-support"))]
1533 RemoteConnectionOptions::Mock(options) => {
1534 kind = RemoteConnectionKind::Ssh;
1535 host = Some(format!("mock-{}", options.id));
1536 user = Some(format!("mock-user-{}", options.id));
1537 }
1538 }
1539 Self::get_or_create_remote_connection_query(
1540 this,
1541 kind,
1542 host,
1543 port,
1544 user,
1545 distro,
1546 name,
1547 container_id,
1548 use_podman,
1549 remote_env,
1550 )
1551 }
1552
1553 fn get_or_create_remote_connection_query(
1554 this: &Connection,
1555 kind: RemoteConnectionKind,
1556 host: Option<String>,
1557 port: Option<u16>,
1558 user: Option<String>,
1559 distro: Option<String>,
1560 name: Option<String>,
1561 container_id: Option<String>,
1562 use_podman: Option<bool>,
1563 remote_env: Option<String>,
1564 ) -> Result<RemoteConnectionId> {
1565 if let Some(id) = this.select_row_bound(sql!(
1566 SELECT id
1567 FROM remote_connections
1568 WHERE
1569 kind IS ? AND
1570 host IS ? AND
1571 port IS ? AND
1572 user IS ? AND
1573 distro IS ? AND
1574 name IS ? AND
1575 container_id IS ?
1576 LIMIT 1
1577 ))?((
1578 kind.serialize(),
1579 host.clone(),
1580 port,
1581 user.clone(),
1582 distro.clone(),
1583 name.clone(),
1584 container_id.clone(),
1585 ))? {
1586 Ok(RemoteConnectionId(id))
1587 } else {
1588 let id = this.select_row_bound(sql!(
1589 INSERT INTO remote_connections (
1590 kind,
1591 host,
1592 port,
1593 user,
1594 distro,
1595 name,
1596 container_id,
1597 use_podman,
1598 remote_env
1599 ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)
1600 RETURNING id
1601 ))?((
1602 kind.serialize(),
1603 host,
1604 port,
1605 user,
1606 distro,
1607 name,
1608 container_id,
1609 use_podman,
1610 remote_env,
1611 ))?
1612 .context("failed to insert remote project")?;
1613 Ok(RemoteConnectionId(id))
1614 }
1615 }
1616
1617 query! {
1618 pub async fn next_id() -> Result<WorkspaceId> {
1619 INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id
1620 }
1621 }
1622
1623 fn recent_workspaces(
1624 &self,
1625 ) -> Result<
1626 Vec<(
1627 WorkspaceId,
1628 PathList,
1629 Option<RemoteConnectionId>,
1630 DateTime<Utc>,
1631 )>,
1632 > {
1633 Ok(self
1634 .recent_workspaces_query()?
1635 .into_iter()
1636 .map(|(id, paths, order, remote_connection_id, timestamp)| {
1637 (
1638 id,
1639 PathList::deserialize(&SerializedPathList { paths, order }),
1640 remote_connection_id.map(RemoteConnectionId),
1641 parse_timestamp(×tamp),
1642 )
1643 })
1644 .collect())
1645 }
1646
1647 query! {
1648 fn recent_workspaces_query() -> Result<Vec<(WorkspaceId, String, String, Option<u64>, String)>> {
1649 SELECT workspace_id, paths, paths_order, remote_connection_id, timestamp
1650 FROM workspaces
1651 WHERE
1652 paths IS NOT NULL OR
1653 remote_connection_id IS NOT NULL
1654 ORDER BY timestamp DESC
1655 }
1656 }
1657
1658 fn session_workspaces(
1659 &self,
1660 session_id: String,
1661 ) -> Result<
1662 Vec<(
1663 WorkspaceId,
1664 PathList,
1665 Option<u64>,
1666 Option<RemoteConnectionId>,
1667 )>,
1668 > {
1669 Ok(self
1670 .session_workspaces_query(session_id)?
1671 .into_iter()
1672 .map(
1673 |(workspace_id, paths, order, window_id, remote_connection_id)| {
1674 (
1675 WorkspaceId(workspace_id),
1676 PathList::deserialize(&SerializedPathList { paths, order }),
1677 window_id,
1678 remote_connection_id.map(RemoteConnectionId),
1679 )
1680 },
1681 )
1682 .collect())
1683 }
1684
1685 query! {
1686 fn session_workspaces_query(session_id: String) -> Result<Vec<(i64, String, String, Option<u64>, Option<u64>)>> {
1687 SELECT workspace_id, paths, paths_order, window_id, remote_connection_id
1688 FROM workspaces
1689 WHERE session_id = ?1
1690 ORDER BY timestamp DESC
1691 }
1692 }
1693
1694 query! {
1695 pub fn breakpoints_for_file(workspace_id: WorkspaceId, file_path: &Path) -> Result<Vec<Breakpoint>> {
1696 SELECT breakpoint_location
1697 FROM breakpoints
1698 WHERE workspace_id= ?1 AND path = ?2
1699 }
1700 }
1701
1702 query! {
1703 pub fn clear_breakpoints(file_path: &Path) -> Result<()> {
1704 DELETE FROM breakpoints
1705 WHERE file_path = ?2
1706 }
1707 }
1708
1709 fn remote_connections(&self) -> Result<HashMap<RemoteConnectionId, RemoteConnectionOptions>> {
1710 Ok(self.select(sql!(
1711 SELECT
1712 id, kind, host, port, user, distro, container_id, name, use_podman, remote_env
1713 FROM
1714 remote_connections
1715 ))?()?
1716 .into_iter()
1717 .filter_map(
1718 |(id, kind, host, port, user, distro, container_id, name, use_podman, remote_env)| {
1719 Some((
1720 RemoteConnectionId(id),
1721 Self::remote_connection_from_row(
1722 kind,
1723 host,
1724 port,
1725 user,
1726 distro,
1727 container_id,
1728 name,
1729 use_podman,
1730 remote_env,
1731 )?,
1732 ))
1733 },
1734 )
1735 .collect())
1736 }
1737
1738 pub(crate) fn remote_connection(
1739 &self,
1740 id: RemoteConnectionId,
1741 ) -> Result<RemoteConnectionOptions> {
1742 let (kind, host, port, user, distro, container_id, name, use_podman, remote_env) =
1743 self.select_row_bound(sql!(
1744 SELECT kind, host, port, user, distro, container_id, name, use_podman, remote_env
1745 FROM remote_connections
1746 WHERE id = ?
1747 ))?(id.0)?
1748 .context("no such remote connection")?;
1749 Self::remote_connection_from_row(
1750 kind,
1751 host,
1752 port,
1753 user,
1754 distro,
1755 container_id,
1756 name,
1757 use_podman,
1758 remote_env,
1759 )
1760 .context("invalid remote_connection row")
1761 }
1762
1763 fn remote_connection_from_row(
1764 kind: String,
1765 host: Option<String>,
1766 port: Option<u16>,
1767 user: Option<String>,
1768 distro: Option<String>,
1769 container_id: Option<String>,
1770 name: Option<String>,
1771 use_podman: Option<bool>,
1772 remote_env: Option<String>,
1773 ) -> Option<RemoteConnectionOptions> {
1774 match RemoteConnectionKind::deserialize(&kind)? {
1775 RemoteConnectionKind::Wsl => Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
1776 distro_name: distro?,
1777 user: user,
1778 })),
1779 RemoteConnectionKind::Ssh => Some(RemoteConnectionOptions::Ssh(SshConnectionOptions {
1780 host: host?.into(),
1781 port,
1782 username: user,
1783 ..Default::default()
1784 })),
1785 RemoteConnectionKind::Docker => {
1786 let remote_env: BTreeMap<String, String> =
1787 serde_json::from_str(&remote_env?).ok()?;
1788 Some(RemoteConnectionOptions::Docker(DockerConnectionOptions {
1789 container_id: container_id?,
1790 name: name?,
1791 remote_user: user?,
1792 upload_binary_over_docker_exec: false,
1793 use_podman: use_podman?,
1794 remote_env,
1795 }))
1796 }
1797 }
1798 }
1799
1800 query! {
1801 pub async fn delete_workspace_by_id(id: WorkspaceId) -> Result<()> {
1802 DELETE FROM workspaces
1803 WHERE workspace_id IS ?
1804 }
1805 }
1806
1807 async fn all_paths_exist_with_a_directory(
1808 paths: &[PathBuf],
1809 fs: &dyn Fs,
1810 timestamp: Option<DateTime<Utc>>,
1811 ) -> bool {
1812 let mut any_dir = false;
1813 for path in paths {
1814 match fs.metadata(path).await.ok().flatten() {
1815 None => {
1816 return timestamp.is_some_and(|t| Utc::now() - t < chrono::Duration::days(7));
1817 }
1818 Some(meta) => {
1819 if meta.is_dir {
1820 any_dir = true;
1821 }
1822 }
1823 }
1824 }
1825 any_dir
1826 }
1827
1828 // Returns the recent locations which are still valid on disk and deletes ones which no longer
1829 // exist.
1830 pub async fn recent_workspaces_on_disk(
1831 &self,
1832 fs: &dyn Fs,
1833 ) -> Result<
1834 Vec<(
1835 WorkspaceId,
1836 SerializedWorkspaceLocation,
1837 PathList,
1838 DateTime<Utc>,
1839 )>,
1840 > {
1841 let mut result = Vec::new();
1842 let mut delete_tasks = Vec::new();
1843 let remote_connections = self.remote_connections()?;
1844
1845 for (id, paths, remote_connection_id, timestamp) in self.recent_workspaces()? {
1846 if let Some(remote_connection_id) = remote_connection_id {
1847 if let Some(connection_options) = remote_connections.get(&remote_connection_id) {
1848 result.push((
1849 id,
1850 SerializedWorkspaceLocation::Remote(connection_options.clone()),
1851 paths,
1852 timestamp,
1853 ));
1854 } else {
1855 delete_tasks.push(self.delete_workspace_by_id(id));
1856 }
1857 continue;
1858 }
1859
1860 let has_wsl_path = if cfg!(windows) {
1861 paths
1862 .paths()
1863 .iter()
1864 .any(|path| util::paths::WslPath::from_path(path).is_some())
1865 } else {
1866 false
1867 };
1868
1869 // Delete the workspace if any of the paths are WSL paths.
1870 // If a local workspace points to WSL, this check will cause us to wait for the
1871 // WSL VM and file server to boot up. This can block for many seconds.
1872 // Supported scenarios use remote workspaces.
1873 if !has_wsl_path
1874 && Self::all_paths_exist_with_a_directory(paths.paths(), fs, Some(timestamp)).await
1875 {
1876 result.push((id, SerializedWorkspaceLocation::Local, paths, timestamp));
1877 } else {
1878 delete_tasks.push(self.delete_workspace_by_id(id));
1879 }
1880 }
1881
1882 futures::future::join_all(delete_tasks).await;
1883 Ok(result)
1884 }
1885
1886 pub async fn last_workspace(
1887 &self,
1888 fs: &dyn Fs,
1889 ) -> Result<
1890 Option<(
1891 WorkspaceId,
1892 SerializedWorkspaceLocation,
1893 PathList,
1894 DateTime<Utc>,
1895 )>,
1896 > {
1897 Ok(self.recent_workspaces_on_disk(fs).await?.into_iter().next())
1898 }
1899
1900 // Returns the locations of the workspaces that were still opened when the last
1901 // session was closed (i.e. when Zed was quit).
1902 // If `last_session_window_order` is provided, the returned locations are ordered
1903 // according to that.
1904 pub async fn last_session_workspace_locations(
1905 &self,
1906 last_session_id: &str,
1907 last_session_window_stack: Option<Vec<WindowId>>,
1908 fs: &dyn Fs,
1909 ) -> Result<Vec<SessionWorkspace>> {
1910 let mut workspaces = Vec::new();
1911
1912 for (workspace_id, paths, window_id, remote_connection_id) in
1913 self.session_workspaces(last_session_id.to_owned())?
1914 {
1915 let window_id = window_id.map(WindowId::from);
1916
1917 if let Some(remote_connection_id) = remote_connection_id {
1918 workspaces.push(SessionWorkspace {
1919 workspace_id,
1920 location: SerializedWorkspaceLocation::Remote(
1921 self.remote_connection(remote_connection_id)?,
1922 ),
1923 paths,
1924 window_id,
1925 });
1926 } else if paths.is_empty() {
1927 // Empty workspace with items (drafts, files) - include for restoration
1928 workspaces.push(SessionWorkspace {
1929 workspace_id,
1930 location: SerializedWorkspaceLocation::Local,
1931 paths,
1932 window_id,
1933 });
1934 } else {
1935 if Self::all_paths_exist_with_a_directory(paths.paths(), fs, None).await {
1936 workspaces.push(SessionWorkspace {
1937 workspace_id,
1938 location: SerializedWorkspaceLocation::Local,
1939 paths,
1940 window_id,
1941 });
1942 }
1943 }
1944 }
1945
1946 if let Some(stack) = last_session_window_stack {
1947 workspaces.sort_by_key(|workspace| {
1948 workspace
1949 .window_id
1950 .and_then(|id| stack.iter().position(|&order_id| order_id == id))
1951 .unwrap_or(usize::MAX)
1952 });
1953 }
1954
1955 Ok(workspaces)
1956 }
1957
1958 fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result<SerializedPaneGroup> {
1959 Ok(self
1960 .get_pane_group(workspace_id, None)?
1961 .into_iter()
1962 .next()
1963 .unwrap_or_else(|| {
1964 SerializedPaneGroup::Pane(SerializedPane {
1965 active: true,
1966 children: vec![],
1967 pinned_count: 0,
1968 })
1969 }))
1970 }
1971
1972 fn get_pane_group(
1973 &self,
1974 workspace_id: WorkspaceId,
1975 group_id: Option<GroupId>,
1976 ) -> Result<Vec<SerializedPaneGroup>> {
1977 type GroupKey = (Option<GroupId>, WorkspaceId);
1978 type GroupOrPane = (
1979 Option<GroupId>,
1980 Option<SerializedAxis>,
1981 Option<PaneId>,
1982 Option<bool>,
1983 Option<usize>,
1984 Option<String>,
1985 );
1986 self.select_bound::<GroupKey, GroupOrPane>(sql!(
1987 SELECT group_id, axis, pane_id, active, pinned_count, flexes
1988 FROM (SELECT
1989 group_id,
1990 axis,
1991 NULL as pane_id,
1992 NULL as active,
1993 NULL as pinned_count,
1994 position,
1995 parent_group_id,
1996 workspace_id,
1997 flexes
1998 FROM pane_groups
1999 UNION
2000 SELECT
2001 NULL,
2002 NULL,
2003 center_panes.pane_id,
2004 panes.active as active,
2005 pinned_count,
2006 position,
2007 parent_group_id,
2008 panes.workspace_id as workspace_id,
2009 NULL
2010 FROM center_panes
2011 JOIN panes ON center_panes.pane_id = panes.pane_id)
2012 WHERE parent_group_id IS ? AND workspace_id = ?
2013 ORDER BY position
2014 ))?((group_id, workspace_id))?
2015 .into_iter()
2016 .map(|(group_id, axis, pane_id, active, pinned_count, flexes)| {
2017 let maybe_pane = maybe!({ Some((pane_id?, active?, pinned_count?)) });
2018 if let Some((group_id, axis)) = group_id.zip(axis) {
2019 let flexes = flexes
2020 .map(|flexes: String| serde_json::from_str::<Vec<f32>>(&flexes))
2021 .transpose()?;
2022
2023 Ok(SerializedPaneGroup::Group {
2024 axis,
2025 children: self.get_pane_group(workspace_id, Some(group_id))?,
2026 flexes,
2027 })
2028 } else if let Some((pane_id, active, pinned_count)) = maybe_pane {
2029 Ok(SerializedPaneGroup::Pane(SerializedPane::new(
2030 self.get_items(pane_id)?,
2031 active,
2032 pinned_count,
2033 )))
2034 } else {
2035 bail!("Pane Group Child was neither a pane group or a pane");
2036 }
2037 })
2038 // Filter out panes and pane groups which don't have any children or items
2039 .filter(|pane_group| match pane_group {
2040 Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(),
2041 Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(),
2042 _ => true,
2043 })
2044 .collect::<Result<_>>()
2045 }
2046
2047 fn save_pane_group(
2048 conn: &Connection,
2049 workspace_id: WorkspaceId,
2050 pane_group: &SerializedPaneGroup,
2051 parent: Option<(GroupId, usize)>,
2052 ) -> Result<()> {
2053 if parent.is_none() {
2054 log::debug!("Saving a pane group for workspace {workspace_id:?}");
2055 }
2056 match pane_group {
2057 SerializedPaneGroup::Group {
2058 axis,
2059 children,
2060 flexes,
2061 } => {
2062 let (parent_id, position) = parent.unzip();
2063
2064 let flex_string = flexes
2065 .as_ref()
2066 .map(|flexes| serde_json::json!(flexes).to_string());
2067
2068 let group_id = conn.select_row_bound::<_, i64>(sql!(
2069 INSERT INTO pane_groups(
2070 workspace_id,
2071 parent_group_id,
2072 position,
2073 axis,
2074 flexes
2075 )
2076 VALUES (?, ?, ?, ?, ?)
2077 RETURNING group_id
2078 ))?((
2079 workspace_id,
2080 parent_id,
2081 position,
2082 *axis,
2083 flex_string,
2084 ))?
2085 .context("Couldn't retrieve group_id from inserted pane_group")?;
2086
2087 for (position, group) in children.iter().enumerate() {
2088 Self::save_pane_group(conn, workspace_id, group, Some((group_id, position)))?
2089 }
2090
2091 Ok(())
2092 }
2093 SerializedPaneGroup::Pane(pane) => {
2094 Self::save_pane(conn, workspace_id, pane, parent)?;
2095 Ok(())
2096 }
2097 }
2098 }
2099
2100 fn save_pane(
2101 conn: &Connection,
2102 workspace_id: WorkspaceId,
2103 pane: &SerializedPane,
2104 parent: Option<(GroupId, usize)>,
2105 ) -> Result<PaneId> {
2106 let pane_id = conn.select_row_bound::<_, i64>(sql!(
2107 INSERT INTO panes(workspace_id, active, pinned_count)
2108 VALUES (?, ?, ?)
2109 RETURNING pane_id
2110 ))?((workspace_id, pane.active, pane.pinned_count))?
2111 .context("Could not retrieve inserted pane_id")?;
2112
2113 let (parent_id, order) = parent.unzip();
2114 conn.exec_bound(sql!(
2115 INSERT INTO center_panes(pane_id, parent_group_id, position)
2116 VALUES (?, ?, ?)
2117 ))?((pane_id, parent_id, order))?;
2118
2119 Self::save_items(conn, workspace_id, pane_id, &pane.children).context("Saving items")?;
2120
2121 Ok(pane_id)
2122 }
2123
2124 fn get_items(&self, pane_id: PaneId) -> Result<Vec<SerializedItem>> {
2125 self.select_bound(sql!(
2126 SELECT kind, item_id, active, preview FROM items
2127 WHERE pane_id = ?
2128 ORDER BY position
2129 ))?(pane_id)
2130 }
2131
2132 fn save_items(
2133 conn: &Connection,
2134 workspace_id: WorkspaceId,
2135 pane_id: PaneId,
2136 items: &[SerializedItem],
2137 ) -> Result<()> {
2138 let mut insert = conn.exec_bound(sql!(
2139 INSERT INTO items(workspace_id, pane_id, position, kind, item_id, active, preview) VALUES (?, ?, ?, ?, ?, ?, ?)
2140 )).context("Preparing insertion")?;
2141 for (position, item) in items.iter().enumerate() {
2142 insert((workspace_id, pane_id, position, item))?;
2143 }
2144
2145 Ok(())
2146 }
2147
2148 query! {
2149 pub async fn update_timestamp(workspace_id: WorkspaceId) -> Result<()> {
2150 UPDATE workspaces
2151 SET timestamp = CURRENT_TIMESTAMP
2152 WHERE workspace_id = ?
2153 }
2154 }
2155
2156 query! {
2157 pub(crate) async fn set_window_open_status(workspace_id: WorkspaceId, bounds: SerializedWindowBounds, display: Uuid) -> Result<()> {
2158 UPDATE workspaces
2159 SET window_state = ?2,
2160 window_x = ?3,
2161 window_y = ?4,
2162 window_width = ?5,
2163 window_height = ?6,
2164 display = ?7
2165 WHERE workspace_id = ?1
2166 }
2167 }
2168
2169 query! {
2170 pub(crate) async fn set_centered_layout(workspace_id: WorkspaceId, centered_layout: bool) -> Result<()> {
2171 UPDATE workspaces
2172 SET centered_layout = ?2
2173 WHERE workspace_id = ?1
2174 }
2175 }
2176
2177 query! {
2178 pub(crate) async fn set_session_id(workspace_id: WorkspaceId, session_id: Option<String>) -> Result<()> {
2179 UPDATE workspaces
2180 SET session_id = ?2
2181 WHERE workspace_id = ?1
2182 }
2183 }
2184
2185 query! {
2186 pub(crate) async fn set_session_binding(workspace_id: WorkspaceId, session_id: Option<String>, window_id: Option<u64>) -> Result<()> {
2187 UPDATE workspaces
2188 SET session_id = ?2, window_id = ?3
2189 WHERE workspace_id = ?1
2190 }
2191 }
2192
2193 pub(crate) async fn toolchains(
2194 &self,
2195 workspace_id: WorkspaceId,
2196 ) -> Result<Vec<(Toolchain, Arc<Path>, Arc<RelPath>)>> {
2197 self.write(move |this| {
2198 let mut select = this
2199 .select_bound(sql!(
2200 SELECT
2201 name, path, worktree_root_path, relative_worktree_path, language_name, raw_json
2202 FROM toolchains
2203 WHERE workspace_id = ?
2204 ))
2205 .context("select toolchains")?;
2206
2207 let toolchain: Vec<(String, String, String, String, String, String)> =
2208 select(workspace_id)?;
2209
2210 Ok(toolchain
2211 .into_iter()
2212 .filter_map(
2213 |(name, path, worktree_root_path, relative_worktree_path, language, json)| {
2214 Some((
2215 Toolchain {
2216 name: name.into(),
2217 path: path.into(),
2218 language_name: LanguageName::new(&language),
2219 as_json: serde_json::Value::from_str(&json).ok()?,
2220 },
2221 Arc::from(worktree_root_path.as_ref()),
2222 RelPath::from_proto(&relative_worktree_path).log_err()?,
2223 ))
2224 },
2225 )
2226 .collect())
2227 })
2228 .await
2229 }
2230
2231 pub async fn set_toolchain(
2232 &self,
2233 workspace_id: WorkspaceId,
2234 worktree_root_path: Arc<Path>,
2235 relative_worktree_path: Arc<RelPath>,
2236 toolchain: Toolchain,
2237 ) -> Result<()> {
2238 log::debug!(
2239 "Setting toolchain for workspace, worktree: {worktree_root_path:?}, relative path: {relative_worktree_path:?}, toolchain: {}",
2240 toolchain.name
2241 );
2242 self.write(move |conn| {
2243 let mut insert = conn
2244 .exec_bound(sql!(
2245 INSERT INTO toolchains(workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json) VALUES (?, ?, ?, ?, ?, ?, ?)
2246 ON CONFLICT DO
2247 UPDATE SET
2248 name = ?5,
2249 path = ?6,
2250 raw_json = ?7
2251 ))
2252 .context("Preparing insertion")?;
2253
2254 insert((
2255 workspace_id,
2256 worktree_root_path.to_string_lossy().into_owned(),
2257 relative_worktree_path.as_unix_str(),
2258 toolchain.language_name.as_ref(),
2259 toolchain.name.as_ref(),
2260 toolchain.path.as_ref(),
2261 toolchain.as_json.to_string(),
2262 ))?;
2263
2264 Ok(())
2265 }).await
2266 }
2267
2268 pub(crate) async fn save_trusted_worktrees(
2269 &self,
2270 trusted_worktrees: HashMap<Option<RemoteHostLocation>, HashSet<PathBuf>>,
2271 ) -> anyhow::Result<()> {
2272 use anyhow::Context as _;
2273 use db::sqlez::statement::Statement;
2274 use itertools::Itertools as _;
2275
2276 self.clear_trusted_worktrees()
2277 .await
2278 .context("clearing previous trust state")?;
2279
2280 let trusted_worktrees = trusted_worktrees
2281 .into_iter()
2282 .flat_map(|(host, abs_paths)| {
2283 abs_paths
2284 .into_iter()
2285 .map(move |abs_path| (Some(abs_path), host.clone()))
2286 })
2287 .collect::<Vec<_>>();
2288 let mut first_worktree;
2289 let mut last_worktree = 0_usize;
2290 for (count, placeholders) in std::iter::once("(?, ?, ?)")
2291 .cycle()
2292 .take(trusted_worktrees.len())
2293 .chunks(MAX_QUERY_PLACEHOLDERS / 3)
2294 .into_iter()
2295 .map(|chunk| {
2296 let mut count = 0;
2297 let placeholders = chunk
2298 .inspect(|_| {
2299 count += 1;
2300 })
2301 .join(", ");
2302 (count, placeholders)
2303 })
2304 .collect::<Vec<_>>()
2305 {
2306 first_worktree = last_worktree;
2307 last_worktree = last_worktree + count;
2308 let query = format!(
2309 r#"INSERT INTO trusted_worktrees(absolute_path, user_name, host_name)
2310VALUES {placeholders};"#
2311 );
2312
2313 let trusted_worktrees = trusted_worktrees[first_worktree..last_worktree].to_vec();
2314 self.write(move |conn| {
2315 let mut statement = Statement::prepare(conn, query)?;
2316 let mut next_index = 1;
2317 for (abs_path, host) in trusted_worktrees {
2318 let abs_path = abs_path.as_ref().map(|abs_path| abs_path.to_string_lossy());
2319 next_index = statement.bind(
2320 &abs_path.as_ref().map(|abs_path| abs_path.as_ref()),
2321 next_index,
2322 )?;
2323 next_index = statement.bind(
2324 &host
2325 .as_ref()
2326 .and_then(|host| Some(host.user_name.as_ref()?.as_str())),
2327 next_index,
2328 )?;
2329 next_index = statement.bind(
2330 &host.as_ref().map(|host| host.host_identifier.as_str()),
2331 next_index,
2332 )?;
2333 }
2334 statement.exec()
2335 })
2336 .await
2337 .context("inserting new trusted state")?;
2338 }
2339 Ok(())
2340 }
2341
2342 pub fn fetch_trusted_worktrees(&self) -> Result<DbTrustedPaths> {
2343 let trusted_worktrees = self.trusted_worktrees()?;
2344 Ok(trusted_worktrees
2345 .into_iter()
2346 .filter_map(|(abs_path, user_name, host_name)| {
2347 let db_host = match (user_name, host_name) {
2348 (None, Some(host_name)) => Some(RemoteHostLocation {
2349 user_name: None,
2350 host_identifier: SharedString::new(host_name),
2351 }),
2352 (Some(user_name), Some(host_name)) => Some(RemoteHostLocation {
2353 user_name: Some(SharedString::new(user_name)),
2354 host_identifier: SharedString::new(host_name),
2355 }),
2356 _ => None,
2357 };
2358 Some((db_host, abs_path?))
2359 })
2360 .fold(HashMap::default(), |mut acc, (remote_host, abs_path)| {
2361 acc.entry(remote_host)
2362 .or_insert_with(HashSet::default)
2363 .insert(abs_path);
2364 acc
2365 }))
2366 }
2367
2368 query! {
2369 fn trusted_worktrees() -> Result<Vec<(Option<PathBuf>, Option<String>, Option<String>)>> {
2370 SELECT absolute_path, user_name, host_name
2371 FROM trusted_worktrees
2372 }
2373 }
2374
2375 query! {
2376 pub async fn clear_trusted_worktrees() -> Result<()> {
2377 DELETE FROM trusted_worktrees
2378 }
2379 }
2380}
2381
2382type WorkspaceEntry = (
2383 WorkspaceId,
2384 SerializedWorkspaceLocation,
2385 PathList,
2386 DateTime<Utc>,
2387);
2388
2389/// Resolves workspace entries whose paths are git linked worktree checkouts
2390/// to their main repository paths.
2391///
2392/// For each workspace entry:
2393/// - If any path is a linked worktree checkout, all worktree paths in that
2394/// entry are resolved to their main repository paths, producing a new
2395/// `PathList`.
2396/// - The resolved entry is then deduplicated against existing entries: if a
2397/// workspace with the same paths already exists, the entry with the most
2398/// recent timestamp is kept.
2399pub async fn resolve_worktree_workspaces(
2400 workspaces: impl IntoIterator<Item = WorkspaceEntry>,
2401 fs: &dyn Fs,
2402) -> Vec<WorkspaceEntry> {
2403 // First pass: resolve worktree paths to main repo paths concurrently.
2404 let resolved = futures::future::join_all(workspaces.into_iter().map(|entry| async move {
2405 let paths = entry.2.paths();
2406 if paths.is_empty() {
2407 return entry;
2408 }
2409
2410 // Resolve each path concurrently
2411 let resolved_paths = futures::future::join_all(
2412 paths
2413 .iter()
2414 .map(|path| project::git_store::resolve_git_worktree_to_main_repo(fs, path)),
2415 )
2416 .await;
2417
2418 // If no paths were resolved, this entry is not a worktree — keep as-is
2419 if resolved_paths.iter().all(|r| r.is_none()) {
2420 return entry;
2421 }
2422
2423 // Build new path list, substituting resolved paths
2424 let new_paths: Vec<PathBuf> = paths
2425 .iter()
2426 .zip(resolved_paths.iter())
2427 .map(|(original, resolved)| {
2428 resolved
2429 .as_ref()
2430 .cloned()
2431 .unwrap_or_else(|| original.clone())
2432 })
2433 .collect();
2434
2435 let new_path_refs: Vec<&Path> = new_paths.iter().map(|p| p.as_path()).collect();
2436 (entry.0, entry.1, PathList::new(&new_path_refs), entry.3)
2437 }))
2438 .await;
2439
2440 // Second pass: deduplicate by PathList.
2441 // When two entries resolve to the same paths, keep the one with the
2442 // more recent timestamp.
2443 let mut seen: collections::HashMap<Vec<PathBuf>, usize> = collections::HashMap::default();
2444 let mut result: Vec<WorkspaceEntry> = Vec::new();
2445
2446 for entry in resolved {
2447 let key: Vec<PathBuf> = entry.2.paths().to_vec();
2448 if let Some(&existing_idx) = seen.get(&key) {
2449 // Keep the entry with the more recent timestamp
2450 if entry.3 > result[existing_idx].3 {
2451 result[existing_idx] = entry;
2452 }
2453 } else {
2454 seen.insert(key, result.len());
2455 result.push(entry);
2456 }
2457 }
2458
2459 result
2460}
2461
2462pub fn delete_unloaded_items(
2463 alive_items: Vec<ItemId>,
2464 workspace_id: WorkspaceId,
2465 table: &'static str,
2466 db: &ThreadSafeConnection,
2467 cx: &mut App,
2468) -> Task<Result<()>> {
2469 let db = db.clone();
2470 cx.spawn(async move |_| {
2471 let placeholders = alive_items
2472 .iter()
2473 .map(|_| "?")
2474 .collect::<Vec<&str>>()
2475 .join(", ");
2476
2477 let query = format!(
2478 "DELETE FROM {table} WHERE workspace_id = ? AND item_id NOT IN ({placeholders})"
2479 );
2480
2481 db.write(move |conn| {
2482 let mut statement = Statement::prepare(conn, query)?;
2483 let mut next_index = statement.bind(&workspace_id, 1)?;
2484 for id in alive_items {
2485 next_index = statement.bind(&id, next_index)?;
2486 }
2487 statement.exec()
2488 })
2489 .await
2490 })
2491}
2492
2493#[cfg(test)]
2494mod tests {
2495 use super::*;
2496 use crate::PathList;
2497 use crate::{
2498 multi_workspace::MultiWorkspace,
2499 persistence::{
2500 model::{
2501 SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace,
2502 SessionWorkspace,
2503 },
2504 read_multi_workspace_state,
2505 },
2506 };
2507 use feature_flags::FeatureFlagAppExt;
2508 use gpui::AppContext as _;
2509 use pretty_assertions::assert_eq;
2510 use project::{Project, ProjectGroupKey};
2511 use remote::SshConnectionOptions;
2512 use serde_json::json;
2513 use std::{thread, time::Duration};
2514
2515 /// Creates a unique directory in a FakeFs, returning the path.
2516 /// Uses a UUID suffix to avoid collisions with other tests sharing the global DB.
2517 async fn unique_test_dir(fs: &fs::FakeFs, prefix: &str) -> PathBuf {
2518 let dir = PathBuf::from(format!("/test-dirs/{}-{}", prefix, uuid::Uuid::new_v4()));
2519 fs.insert_tree(&dir, json!({})).await;
2520 dir
2521 }
2522
2523 #[gpui::test]
2524 async fn test_multi_workspace_serializes_on_add_and_remove(cx: &mut gpui::TestAppContext) {
2525 crate::tests::init_test(cx);
2526
2527 cx.update(|cx| {
2528 cx.set_staff(true);
2529 });
2530
2531 let fs = fs::FakeFs::new(cx.executor());
2532 let project1 = Project::test(fs.clone(), [], cx).await;
2533 let project2 = Project::test(fs.clone(), [], cx).await;
2534
2535 let (multi_workspace, cx) =
2536 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
2537
2538 multi_workspace.update(cx, |mw, cx| {
2539 mw.open_sidebar(cx);
2540 });
2541
2542 multi_workspace.update_in(cx, |mw, _, cx| {
2543 mw.set_random_database_id(cx);
2544 });
2545
2546 let window_id =
2547 multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
2548
2549 // --- Add a second workspace ---
2550 let workspace2 = multi_workspace.update_in(cx, |mw, window, cx| {
2551 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
2552 workspace.update(cx, |ws, _cx| ws.set_random_database_id());
2553 mw.activate(workspace.clone(), window, cx);
2554 workspace
2555 });
2556
2557 // Run background tasks so serialize has a chance to flush.
2558 cx.run_until_parked();
2559
2560 // Read back the persisted state and check that the active workspace ID was written.
2561 let state_after_add = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
2562 let active_workspace2_db_id = workspace2.read_with(cx, |ws, _| ws.database_id());
2563 assert_eq!(
2564 state_after_add.active_workspace_id, active_workspace2_db_id,
2565 "After adding a second workspace, the serialized active_workspace_id should match \
2566 the newly activated workspace's database id"
2567 );
2568
2569 // --- Remove the first workspace (index 0, which is not the active one) ---
2570 multi_workspace.update_in(cx, |mw, window, cx| {
2571 let ws = mw.workspaces().nth(0).unwrap().clone();
2572 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
2573 .detach_and_log_err(cx);
2574 });
2575
2576 cx.run_until_parked();
2577
2578 let state_after_remove = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
2579 let remaining_db_id =
2580 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
2581 assert_eq!(
2582 state_after_remove.active_workspace_id, remaining_db_id,
2583 "After removing a workspace, the serialized active_workspace_id should match \
2584 the remaining active workspace's database id"
2585 );
2586 }
2587
2588 #[gpui::test]
2589 async fn test_breakpoints() {
2590 zlog::init_test();
2591
2592 let db = WorkspaceDb::open_test_db("test_breakpoints").await;
2593 let id = db.next_id().await.unwrap();
2594
2595 let path = Path::new("/tmp/test.rs");
2596
2597 let breakpoint = Breakpoint {
2598 position: 123,
2599 message: None,
2600 state: BreakpointState::Enabled,
2601 condition: None,
2602 hit_condition: None,
2603 };
2604
2605 let log_breakpoint = Breakpoint {
2606 position: 456,
2607 message: Some("Test log message".into()),
2608 state: BreakpointState::Enabled,
2609 condition: None,
2610 hit_condition: None,
2611 };
2612
2613 let disable_breakpoint = Breakpoint {
2614 position: 578,
2615 message: None,
2616 state: BreakpointState::Disabled,
2617 condition: None,
2618 hit_condition: None,
2619 };
2620
2621 let condition_breakpoint = Breakpoint {
2622 position: 789,
2623 message: None,
2624 state: BreakpointState::Enabled,
2625 condition: Some("x > 5".into()),
2626 hit_condition: None,
2627 };
2628
2629 let hit_condition_breakpoint = Breakpoint {
2630 position: 999,
2631 message: None,
2632 state: BreakpointState::Enabled,
2633 condition: None,
2634 hit_condition: Some(">= 3".into()),
2635 };
2636
2637 let workspace = SerializedWorkspace {
2638 id,
2639 paths: PathList::new(&["/tmp"]),
2640 location: SerializedWorkspaceLocation::Local,
2641 center_group: Default::default(),
2642 window_bounds: Default::default(),
2643 display: Default::default(),
2644 docks: Default::default(),
2645 centered_layout: false,
2646 breakpoints: {
2647 let mut map = collections::BTreeMap::default();
2648 map.insert(
2649 Arc::from(path),
2650 vec![
2651 SourceBreakpoint {
2652 row: breakpoint.position,
2653 path: Arc::from(path),
2654 message: breakpoint.message.clone(),
2655 state: breakpoint.state,
2656 condition: breakpoint.condition.clone(),
2657 hit_condition: breakpoint.hit_condition.clone(),
2658 },
2659 SourceBreakpoint {
2660 row: log_breakpoint.position,
2661 path: Arc::from(path),
2662 message: log_breakpoint.message.clone(),
2663 state: log_breakpoint.state,
2664 condition: log_breakpoint.condition.clone(),
2665 hit_condition: log_breakpoint.hit_condition.clone(),
2666 },
2667 SourceBreakpoint {
2668 row: disable_breakpoint.position,
2669 path: Arc::from(path),
2670 message: disable_breakpoint.message.clone(),
2671 state: disable_breakpoint.state,
2672 condition: disable_breakpoint.condition.clone(),
2673 hit_condition: disable_breakpoint.hit_condition.clone(),
2674 },
2675 SourceBreakpoint {
2676 row: condition_breakpoint.position,
2677 path: Arc::from(path),
2678 message: condition_breakpoint.message.clone(),
2679 state: condition_breakpoint.state,
2680 condition: condition_breakpoint.condition.clone(),
2681 hit_condition: condition_breakpoint.hit_condition.clone(),
2682 },
2683 SourceBreakpoint {
2684 row: hit_condition_breakpoint.position,
2685 path: Arc::from(path),
2686 message: hit_condition_breakpoint.message.clone(),
2687 state: hit_condition_breakpoint.state,
2688 condition: hit_condition_breakpoint.condition.clone(),
2689 hit_condition: hit_condition_breakpoint.hit_condition.clone(),
2690 },
2691 ],
2692 );
2693 map
2694 },
2695 session_id: None,
2696 window_id: None,
2697 user_toolchains: Default::default(),
2698 };
2699
2700 db.save_workspace(workspace.clone()).await;
2701
2702 let loaded = db.workspace_for_roots(&["/tmp"]).unwrap();
2703 let loaded_breakpoints = loaded.breakpoints.get(&Arc::from(path)).unwrap();
2704
2705 assert_eq!(loaded_breakpoints.len(), 5);
2706
2707 // normal breakpoint
2708 assert_eq!(loaded_breakpoints[0].row, breakpoint.position);
2709 assert_eq!(loaded_breakpoints[0].message, breakpoint.message);
2710 assert_eq!(loaded_breakpoints[0].condition, breakpoint.condition);
2711 assert_eq!(
2712 loaded_breakpoints[0].hit_condition,
2713 breakpoint.hit_condition
2714 );
2715 assert_eq!(loaded_breakpoints[0].state, breakpoint.state);
2716 assert_eq!(loaded_breakpoints[0].path, Arc::from(path));
2717
2718 // enabled breakpoint
2719 assert_eq!(loaded_breakpoints[1].row, log_breakpoint.position);
2720 assert_eq!(loaded_breakpoints[1].message, log_breakpoint.message);
2721 assert_eq!(loaded_breakpoints[1].condition, log_breakpoint.condition);
2722 assert_eq!(
2723 loaded_breakpoints[1].hit_condition,
2724 log_breakpoint.hit_condition
2725 );
2726 assert_eq!(loaded_breakpoints[1].state, log_breakpoint.state);
2727 assert_eq!(loaded_breakpoints[1].path, Arc::from(path));
2728
2729 // disable breakpoint
2730 assert_eq!(loaded_breakpoints[2].row, disable_breakpoint.position);
2731 assert_eq!(loaded_breakpoints[2].message, disable_breakpoint.message);
2732 assert_eq!(
2733 loaded_breakpoints[2].condition,
2734 disable_breakpoint.condition
2735 );
2736 assert_eq!(
2737 loaded_breakpoints[2].hit_condition,
2738 disable_breakpoint.hit_condition
2739 );
2740 assert_eq!(loaded_breakpoints[2].state, disable_breakpoint.state);
2741 assert_eq!(loaded_breakpoints[2].path, Arc::from(path));
2742
2743 // condition breakpoint
2744 assert_eq!(loaded_breakpoints[3].row, condition_breakpoint.position);
2745 assert_eq!(loaded_breakpoints[3].message, condition_breakpoint.message);
2746 assert_eq!(
2747 loaded_breakpoints[3].condition,
2748 condition_breakpoint.condition
2749 );
2750 assert_eq!(
2751 loaded_breakpoints[3].hit_condition,
2752 condition_breakpoint.hit_condition
2753 );
2754 assert_eq!(loaded_breakpoints[3].state, condition_breakpoint.state);
2755 assert_eq!(loaded_breakpoints[3].path, Arc::from(path));
2756
2757 // hit condition breakpoint
2758 assert_eq!(loaded_breakpoints[4].row, hit_condition_breakpoint.position);
2759 assert_eq!(
2760 loaded_breakpoints[4].message,
2761 hit_condition_breakpoint.message
2762 );
2763 assert_eq!(
2764 loaded_breakpoints[4].condition,
2765 hit_condition_breakpoint.condition
2766 );
2767 assert_eq!(
2768 loaded_breakpoints[4].hit_condition,
2769 hit_condition_breakpoint.hit_condition
2770 );
2771 assert_eq!(loaded_breakpoints[4].state, hit_condition_breakpoint.state);
2772 assert_eq!(loaded_breakpoints[4].path, Arc::from(path));
2773 }
2774
2775 #[gpui::test]
2776 async fn test_remove_last_breakpoint() {
2777 zlog::init_test();
2778
2779 let db = WorkspaceDb::open_test_db("test_remove_last_breakpoint").await;
2780 let id = db.next_id().await.unwrap();
2781
2782 let singular_path = Path::new("/tmp/test_remove_last_breakpoint.rs");
2783
2784 let breakpoint_to_remove = Breakpoint {
2785 position: 100,
2786 message: None,
2787 state: BreakpointState::Enabled,
2788 condition: None,
2789 hit_condition: None,
2790 };
2791
2792 let workspace = SerializedWorkspace {
2793 id,
2794 paths: PathList::new(&["/tmp"]),
2795 location: SerializedWorkspaceLocation::Local,
2796 center_group: Default::default(),
2797 window_bounds: Default::default(),
2798 display: Default::default(),
2799 docks: Default::default(),
2800 centered_layout: false,
2801 breakpoints: {
2802 let mut map = collections::BTreeMap::default();
2803 map.insert(
2804 Arc::from(singular_path),
2805 vec![SourceBreakpoint {
2806 row: breakpoint_to_remove.position,
2807 path: Arc::from(singular_path),
2808 message: None,
2809 state: BreakpointState::Enabled,
2810 condition: None,
2811 hit_condition: None,
2812 }],
2813 );
2814 map
2815 },
2816 session_id: None,
2817 window_id: None,
2818 user_toolchains: Default::default(),
2819 };
2820
2821 db.save_workspace(workspace.clone()).await;
2822
2823 let loaded = db.workspace_for_roots(&["/tmp"]).unwrap();
2824 let loaded_breakpoints = loaded.breakpoints.get(&Arc::from(singular_path)).unwrap();
2825
2826 assert_eq!(loaded_breakpoints.len(), 1);
2827 assert_eq!(loaded_breakpoints[0].row, breakpoint_to_remove.position);
2828 assert_eq!(loaded_breakpoints[0].message, breakpoint_to_remove.message);
2829 assert_eq!(
2830 loaded_breakpoints[0].condition,
2831 breakpoint_to_remove.condition
2832 );
2833 assert_eq!(
2834 loaded_breakpoints[0].hit_condition,
2835 breakpoint_to_remove.hit_condition
2836 );
2837 assert_eq!(loaded_breakpoints[0].state, breakpoint_to_remove.state);
2838 assert_eq!(loaded_breakpoints[0].path, Arc::from(singular_path));
2839
2840 let workspace_without_breakpoint = SerializedWorkspace {
2841 id,
2842 paths: PathList::new(&["/tmp"]),
2843 location: SerializedWorkspaceLocation::Local,
2844 center_group: Default::default(),
2845 window_bounds: Default::default(),
2846 display: Default::default(),
2847 docks: Default::default(),
2848 centered_layout: false,
2849 breakpoints: collections::BTreeMap::default(),
2850 session_id: None,
2851 window_id: None,
2852 user_toolchains: Default::default(),
2853 };
2854
2855 db.save_workspace(workspace_without_breakpoint.clone())
2856 .await;
2857
2858 let loaded_after_remove = db.workspace_for_roots(&["/tmp"]).unwrap();
2859 let empty_breakpoints = loaded_after_remove
2860 .breakpoints
2861 .get(&Arc::from(singular_path));
2862
2863 assert!(empty_breakpoints.is_none());
2864 }
2865
2866 #[gpui::test]
2867 async fn test_next_id_stability() {
2868 zlog::init_test();
2869
2870 let db = WorkspaceDb::open_test_db("test_next_id_stability").await;
2871
2872 db.write(|conn| {
2873 conn.migrate(
2874 "test_table",
2875 &[sql!(
2876 CREATE TABLE test_table(
2877 text TEXT,
2878 workspace_id INTEGER,
2879 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
2880 ON DELETE CASCADE
2881 ) STRICT;
2882 )],
2883 &mut |_, _, _| false,
2884 )
2885 .unwrap();
2886 })
2887 .await;
2888
2889 let id = db.next_id().await.unwrap();
2890 // Assert the empty row got inserted
2891 assert_eq!(
2892 Some(id),
2893 db.select_row_bound::<WorkspaceId, WorkspaceId>(sql!(
2894 SELECT workspace_id FROM workspaces WHERE workspace_id = ?
2895 ))
2896 .unwrap()(id)
2897 .unwrap()
2898 );
2899
2900 db.write(move |conn| {
2901 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
2902 .unwrap()(("test-text-1", id))
2903 .unwrap()
2904 })
2905 .await;
2906
2907 let test_text_1 = db
2908 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
2909 .unwrap()(1)
2910 .unwrap()
2911 .unwrap();
2912 assert_eq!(test_text_1, "test-text-1");
2913 }
2914
2915 #[gpui::test]
2916 async fn test_workspace_id_stability() {
2917 zlog::init_test();
2918
2919 let db = WorkspaceDb::open_test_db("test_workspace_id_stability").await;
2920
2921 db.write(|conn| {
2922 conn.migrate(
2923 "test_table",
2924 &[sql!(
2925 CREATE TABLE test_table(
2926 text TEXT,
2927 workspace_id INTEGER,
2928 FOREIGN KEY(workspace_id)
2929 REFERENCES workspaces(workspace_id)
2930 ON DELETE CASCADE
2931 ) STRICT;)],
2932 &mut |_, _, _| false,
2933 )
2934 })
2935 .await
2936 .unwrap();
2937
2938 let mut workspace_1 = SerializedWorkspace {
2939 id: WorkspaceId(1),
2940 paths: PathList::new(&["/tmp", "/tmp2"]),
2941 location: SerializedWorkspaceLocation::Local,
2942 center_group: Default::default(),
2943 window_bounds: Default::default(),
2944 display: Default::default(),
2945 docks: Default::default(),
2946 centered_layout: false,
2947 breakpoints: Default::default(),
2948 session_id: None,
2949 window_id: None,
2950 user_toolchains: Default::default(),
2951 };
2952
2953 let workspace_2 = SerializedWorkspace {
2954 id: WorkspaceId(2),
2955 paths: PathList::new(&["/tmp"]),
2956 location: SerializedWorkspaceLocation::Local,
2957 center_group: Default::default(),
2958 window_bounds: Default::default(),
2959 display: Default::default(),
2960 docks: Default::default(),
2961 centered_layout: false,
2962 breakpoints: Default::default(),
2963 session_id: None,
2964 window_id: None,
2965 user_toolchains: Default::default(),
2966 };
2967
2968 db.save_workspace(workspace_1.clone()).await;
2969
2970 db.write(|conn| {
2971 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
2972 .unwrap()(("test-text-1", 1))
2973 .unwrap();
2974 })
2975 .await;
2976
2977 db.save_workspace(workspace_2.clone()).await;
2978
2979 db.write(|conn| {
2980 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
2981 .unwrap()(("test-text-2", 2))
2982 .unwrap();
2983 })
2984 .await;
2985
2986 workspace_1.paths = PathList::new(&["/tmp", "/tmp3"]);
2987 db.save_workspace(workspace_1.clone()).await;
2988 db.save_workspace(workspace_1).await;
2989 db.save_workspace(workspace_2).await;
2990
2991 let test_text_2 = db
2992 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
2993 .unwrap()(2)
2994 .unwrap()
2995 .unwrap();
2996 assert_eq!(test_text_2, "test-text-2");
2997
2998 let test_text_1 = db
2999 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
3000 .unwrap()(1)
3001 .unwrap()
3002 .unwrap();
3003 assert_eq!(test_text_1, "test-text-1");
3004 }
3005
3006 fn group(axis: Axis, children: Vec<SerializedPaneGroup>) -> SerializedPaneGroup {
3007 SerializedPaneGroup::Group {
3008 axis: SerializedAxis(axis),
3009 flexes: None,
3010 children,
3011 }
3012 }
3013
3014 #[gpui::test]
3015 async fn test_full_workspace_serialization() {
3016 zlog::init_test();
3017
3018 let db = WorkspaceDb::open_test_db("test_full_workspace_serialization").await;
3019
3020 // -----------------
3021 // | 1,2 | 5,6 |
3022 // | - - - | |
3023 // | 3,4 | |
3024 // -----------------
3025 let center_group = group(
3026 Axis::Horizontal,
3027 vec![
3028 group(
3029 Axis::Vertical,
3030 vec![
3031 SerializedPaneGroup::Pane(SerializedPane::new(
3032 vec![
3033 SerializedItem::new("Terminal", 5, false, false),
3034 SerializedItem::new("Terminal", 6, true, false),
3035 ],
3036 false,
3037 0,
3038 )),
3039 SerializedPaneGroup::Pane(SerializedPane::new(
3040 vec![
3041 SerializedItem::new("Terminal", 7, true, false),
3042 SerializedItem::new("Terminal", 8, false, false),
3043 ],
3044 false,
3045 0,
3046 )),
3047 ],
3048 ),
3049 SerializedPaneGroup::Pane(SerializedPane::new(
3050 vec![
3051 SerializedItem::new("Terminal", 9, false, false),
3052 SerializedItem::new("Terminal", 10, true, false),
3053 ],
3054 false,
3055 0,
3056 )),
3057 ],
3058 );
3059
3060 let workspace = SerializedWorkspace {
3061 id: WorkspaceId(5),
3062 paths: PathList::new(&["/tmp", "/tmp2"]),
3063 location: SerializedWorkspaceLocation::Local,
3064 center_group,
3065 window_bounds: Default::default(),
3066 breakpoints: Default::default(),
3067 display: Default::default(),
3068 docks: Default::default(),
3069 centered_layout: false,
3070 session_id: None,
3071 window_id: Some(999),
3072 user_toolchains: Default::default(),
3073 };
3074
3075 db.save_workspace(workspace.clone()).await;
3076
3077 let round_trip_workspace = db.workspace_for_roots(&["/tmp2", "/tmp"]);
3078 assert_eq!(workspace, round_trip_workspace.unwrap());
3079
3080 // Test guaranteed duplicate IDs
3081 db.save_workspace(workspace.clone()).await;
3082 db.save_workspace(workspace.clone()).await;
3083
3084 let round_trip_workspace = db.workspace_for_roots(&["/tmp", "/tmp2"]);
3085 assert_eq!(workspace, round_trip_workspace.unwrap());
3086 }
3087
3088 #[gpui::test]
3089 async fn test_workspace_assignment() {
3090 zlog::init_test();
3091
3092 let db = WorkspaceDb::open_test_db("test_basic_functionality").await;
3093
3094 let workspace_1 = SerializedWorkspace {
3095 id: WorkspaceId(1),
3096 paths: PathList::new(&["/tmp", "/tmp2"]),
3097 location: SerializedWorkspaceLocation::Local,
3098 center_group: Default::default(),
3099 window_bounds: Default::default(),
3100 breakpoints: Default::default(),
3101 display: Default::default(),
3102 docks: Default::default(),
3103 centered_layout: false,
3104 session_id: None,
3105 window_id: Some(1),
3106 user_toolchains: Default::default(),
3107 };
3108
3109 let mut workspace_2 = SerializedWorkspace {
3110 id: WorkspaceId(2),
3111 paths: PathList::new(&["/tmp"]),
3112 location: SerializedWorkspaceLocation::Local,
3113 center_group: Default::default(),
3114 window_bounds: Default::default(),
3115 display: Default::default(),
3116 docks: Default::default(),
3117 centered_layout: false,
3118 breakpoints: Default::default(),
3119 session_id: None,
3120 window_id: Some(2),
3121 user_toolchains: Default::default(),
3122 };
3123
3124 db.save_workspace(workspace_1.clone()).await;
3125 db.save_workspace(workspace_2.clone()).await;
3126
3127 // Test that paths are treated as a set
3128 assert_eq!(
3129 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3130 workspace_1
3131 );
3132 assert_eq!(
3133 db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(),
3134 workspace_1
3135 );
3136
3137 // Make sure that other keys work
3138 assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2);
3139 assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None);
3140
3141 // Test 'mutate' case of updating a pre-existing id
3142 workspace_2.paths = PathList::new(&["/tmp", "/tmp2"]);
3143
3144 db.save_workspace(workspace_2.clone()).await;
3145 assert_eq!(
3146 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3147 workspace_2
3148 );
3149
3150 // Test other mechanism for mutating
3151 let mut workspace_3 = SerializedWorkspace {
3152 id: WorkspaceId(3),
3153 paths: PathList::new(&["/tmp2", "/tmp"]),
3154 location: SerializedWorkspaceLocation::Local,
3155 center_group: Default::default(),
3156 window_bounds: Default::default(),
3157 breakpoints: Default::default(),
3158 display: Default::default(),
3159 docks: Default::default(),
3160 centered_layout: false,
3161 session_id: None,
3162 window_id: Some(3),
3163 user_toolchains: Default::default(),
3164 };
3165
3166 db.save_workspace(workspace_3.clone()).await;
3167 assert_eq!(
3168 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3169 workspace_3
3170 );
3171
3172 // Make sure that updating paths differently also works
3173 workspace_3.paths = PathList::new(&["/tmp3", "/tmp4", "/tmp2"]);
3174 db.save_workspace(workspace_3.clone()).await;
3175 assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None);
3176 assert_eq!(
3177 db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"])
3178 .unwrap(),
3179 workspace_3
3180 );
3181 }
3182
3183 #[gpui::test]
3184 async fn test_session_workspaces() {
3185 zlog::init_test();
3186
3187 let db = WorkspaceDb::open_test_db("test_serializing_workspaces_session_id").await;
3188
3189 let workspace_1 = SerializedWorkspace {
3190 id: WorkspaceId(1),
3191 paths: PathList::new(&["/tmp1"]),
3192 location: SerializedWorkspaceLocation::Local,
3193 center_group: Default::default(),
3194 window_bounds: Default::default(),
3195 display: Default::default(),
3196 docks: Default::default(),
3197 centered_layout: false,
3198 breakpoints: Default::default(),
3199 session_id: Some("session-id-1".to_owned()),
3200 window_id: Some(10),
3201 user_toolchains: Default::default(),
3202 };
3203
3204 let workspace_2 = SerializedWorkspace {
3205 id: WorkspaceId(2),
3206 paths: PathList::new(&["/tmp2"]),
3207 location: SerializedWorkspaceLocation::Local,
3208 center_group: Default::default(),
3209 window_bounds: Default::default(),
3210 display: Default::default(),
3211 docks: Default::default(),
3212 centered_layout: false,
3213 breakpoints: Default::default(),
3214 session_id: Some("session-id-1".to_owned()),
3215 window_id: Some(20),
3216 user_toolchains: Default::default(),
3217 };
3218
3219 let workspace_3 = SerializedWorkspace {
3220 id: WorkspaceId(3),
3221 paths: PathList::new(&["/tmp3"]),
3222 location: SerializedWorkspaceLocation::Local,
3223 center_group: Default::default(),
3224 window_bounds: Default::default(),
3225 display: Default::default(),
3226 docks: Default::default(),
3227 centered_layout: false,
3228 breakpoints: Default::default(),
3229 session_id: Some("session-id-2".to_owned()),
3230 window_id: Some(30),
3231 user_toolchains: Default::default(),
3232 };
3233
3234 let workspace_4 = SerializedWorkspace {
3235 id: WorkspaceId(4),
3236 paths: PathList::new(&["/tmp4"]),
3237 location: SerializedWorkspaceLocation::Local,
3238 center_group: Default::default(),
3239 window_bounds: Default::default(),
3240 display: Default::default(),
3241 docks: Default::default(),
3242 centered_layout: false,
3243 breakpoints: Default::default(),
3244 session_id: None,
3245 window_id: None,
3246 user_toolchains: Default::default(),
3247 };
3248
3249 let connection_id = db
3250 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3251 host: "my-host".into(),
3252 port: Some(1234),
3253 ..Default::default()
3254 }))
3255 .await
3256 .unwrap();
3257
3258 let workspace_5 = SerializedWorkspace {
3259 id: WorkspaceId(5),
3260 paths: PathList::default(),
3261 location: SerializedWorkspaceLocation::Remote(
3262 db.remote_connection(connection_id).unwrap(),
3263 ),
3264 center_group: Default::default(),
3265 window_bounds: Default::default(),
3266 display: Default::default(),
3267 docks: Default::default(),
3268 centered_layout: false,
3269 breakpoints: Default::default(),
3270 session_id: Some("session-id-2".to_owned()),
3271 window_id: Some(50),
3272 user_toolchains: Default::default(),
3273 };
3274
3275 let workspace_6 = SerializedWorkspace {
3276 id: WorkspaceId(6),
3277 paths: PathList::new(&["/tmp6c", "/tmp6b", "/tmp6a"]),
3278 location: SerializedWorkspaceLocation::Local,
3279 center_group: Default::default(),
3280 window_bounds: Default::default(),
3281 breakpoints: Default::default(),
3282 display: Default::default(),
3283 docks: Default::default(),
3284 centered_layout: false,
3285 session_id: Some("session-id-3".to_owned()),
3286 window_id: Some(60),
3287 user_toolchains: Default::default(),
3288 };
3289
3290 db.save_workspace(workspace_1.clone()).await;
3291 thread::sleep(Duration::from_millis(1000)); // Force timestamps to increment
3292 db.save_workspace(workspace_2.clone()).await;
3293 db.save_workspace(workspace_3.clone()).await;
3294 thread::sleep(Duration::from_millis(1000)); // Force timestamps to increment
3295 db.save_workspace(workspace_4.clone()).await;
3296 db.save_workspace(workspace_5.clone()).await;
3297 db.save_workspace(workspace_6.clone()).await;
3298
3299 let locations = db.session_workspaces("session-id-1".to_owned()).unwrap();
3300 assert_eq!(locations.len(), 2);
3301 assert_eq!(locations[0].0, WorkspaceId(2));
3302 assert_eq!(locations[0].1, PathList::new(&["/tmp2"]));
3303 assert_eq!(locations[0].2, Some(20));
3304 assert_eq!(locations[1].0, WorkspaceId(1));
3305 assert_eq!(locations[1].1, PathList::new(&["/tmp1"]));
3306 assert_eq!(locations[1].2, Some(10));
3307
3308 let locations = db.session_workspaces("session-id-2".to_owned()).unwrap();
3309 assert_eq!(locations.len(), 2);
3310 assert_eq!(locations[0].0, WorkspaceId(5));
3311 assert_eq!(locations[0].1, PathList::default());
3312 assert_eq!(locations[0].2, Some(50));
3313 assert_eq!(locations[0].3, Some(connection_id));
3314 assert_eq!(locations[1].0, WorkspaceId(3));
3315 assert_eq!(locations[1].1, PathList::new(&["/tmp3"]));
3316 assert_eq!(locations[1].2, Some(30));
3317
3318 let locations = db.session_workspaces("session-id-3".to_owned()).unwrap();
3319 assert_eq!(locations.len(), 1);
3320 assert_eq!(locations[0].0, WorkspaceId(6));
3321 assert_eq!(
3322 locations[0].1,
3323 PathList::new(&["/tmp6c", "/tmp6b", "/tmp6a"]),
3324 );
3325 assert_eq!(locations[0].2, Some(60));
3326 }
3327
3328 fn default_workspace<P: AsRef<Path>>(
3329 paths: &[P],
3330 center_group: &SerializedPaneGroup,
3331 ) -> SerializedWorkspace {
3332 SerializedWorkspace {
3333 id: WorkspaceId(4),
3334 paths: PathList::new(paths),
3335 location: SerializedWorkspaceLocation::Local,
3336 center_group: center_group.clone(),
3337 window_bounds: Default::default(),
3338 display: Default::default(),
3339 docks: Default::default(),
3340 breakpoints: Default::default(),
3341 centered_layout: false,
3342 session_id: None,
3343 window_id: None,
3344 user_toolchains: Default::default(),
3345 }
3346 }
3347
3348 #[gpui::test]
3349 async fn test_last_session_workspace_locations(cx: &mut gpui::TestAppContext) {
3350 let dir1 = tempfile::TempDir::with_prefix("dir1").unwrap();
3351 let dir2 = tempfile::TempDir::with_prefix("dir2").unwrap();
3352 let dir3 = tempfile::TempDir::with_prefix("dir3").unwrap();
3353 let dir4 = tempfile::TempDir::with_prefix("dir4").unwrap();
3354
3355 let fs = fs::FakeFs::new(cx.executor());
3356 fs.insert_tree(dir1.path(), json!({})).await;
3357 fs.insert_tree(dir2.path(), json!({})).await;
3358 fs.insert_tree(dir3.path(), json!({})).await;
3359 fs.insert_tree(dir4.path(), json!({})).await;
3360
3361 let db =
3362 WorkspaceDb::open_test_db("test_serializing_workspaces_last_session_workspaces").await;
3363
3364 let workspaces = [
3365 (1, vec![dir1.path()], 9),
3366 (2, vec![dir2.path()], 5),
3367 (3, vec![dir3.path()], 8),
3368 (4, vec![dir4.path()], 2),
3369 (5, vec![dir1.path(), dir2.path(), dir3.path()], 3),
3370 (6, vec![dir4.path(), dir3.path(), dir2.path()], 4),
3371 ]
3372 .into_iter()
3373 .map(|(id, paths, window_id)| SerializedWorkspace {
3374 id: WorkspaceId(id),
3375 paths: PathList::new(paths.as_slice()),
3376 location: SerializedWorkspaceLocation::Local,
3377 center_group: Default::default(),
3378 window_bounds: Default::default(),
3379 display: Default::default(),
3380 docks: Default::default(),
3381 centered_layout: false,
3382 session_id: Some("one-session".to_owned()),
3383 breakpoints: Default::default(),
3384 window_id: Some(window_id),
3385 user_toolchains: Default::default(),
3386 })
3387 .collect::<Vec<_>>();
3388
3389 for workspace in workspaces.iter() {
3390 db.save_workspace(workspace.clone()).await;
3391 }
3392
3393 let stack = Some(Vec::from([
3394 WindowId::from(2), // Top
3395 WindowId::from(8),
3396 WindowId::from(5),
3397 WindowId::from(9),
3398 WindowId::from(3),
3399 WindowId::from(4), // Bottom
3400 ]));
3401
3402 let locations = db
3403 .last_session_workspace_locations("one-session", stack, fs.as_ref())
3404 .await
3405 .unwrap();
3406 assert_eq!(
3407 locations,
3408 [
3409 SessionWorkspace {
3410 workspace_id: WorkspaceId(4),
3411 location: SerializedWorkspaceLocation::Local,
3412 paths: PathList::new(&[dir4.path()]),
3413 window_id: Some(WindowId::from(2u64)),
3414 },
3415 SessionWorkspace {
3416 workspace_id: WorkspaceId(3),
3417 location: SerializedWorkspaceLocation::Local,
3418 paths: PathList::new(&[dir3.path()]),
3419 window_id: Some(WindowId::from(8u64)),
3420 },
3421 SessionWorkspace {
3422 workspace_id: WorkspaceId(2),
3423 location: SerializedWorkspaceLocation::Local,
3424 paths: PathList::new(&[dir2.path()]),
3425 window_id: Some(WindowId::from(5u64)),
3426 },
3427 SessionWorkspace {
3428 workspace_id: WorkspaceId(1),
3429 location: SerializedWorkspaceLocation::Local,
3430 paths: PathList::new(&[dir1.path()]),
3431 window_id: Some(WindowId::from(9u64)),
3432 },
3433 SessionWorkspace {
3434 workspace_id: WorkspaceId(5),
3435 location: SerializedWorkspaceLocation::Local,
3436 paths: PathList::new(&[dir1.path(), dir2.path(), dir3.path()]),
3437 window_id: Some(WindowId::from(3u64)),
3438 },
3439 SessionWorkspace {
3440 workspace_id: WorkspaceId(6),
3441 location: SerializedWorkspaceLocation::Local,
3442 paths: PathList::new(&[dir4.path(), dir3.path(), dir2.path()]),
3443 window_id: Some(WindowId::from(4u64)),
3444 },
3445 ]
3446 );
3447 }
3448
3449 #[gpui::test]
3450 async fn test_last_session_workspace_locations_remote(cx: &mut gpui::TestAppContext) {
3451 let fs = fs::FakeFs::new(cx.executor());
3452 let db =
3453 WorkspaceDb::open_test_db("test_serializing_workspaces_last_session_workspaces_remote")
3454 .await;
3455
3456 let remote_connections = [
3457 ("host-1", "my-user-1"),
3458 ("host-2", "my-user-2"),
3459 ("host-3", "my-user-3"),
3460 ("host-4", "my-user-4"),
3461 ]
3462 .into_iter()
3463 .map(|(host, user)| async {
3464 let options = RemoteConnectionOptions::Ssh(SshConnectionOptions {
3465 host: host.into(),
3466 username: Some(user.to_string()),
3467 ..Default::default()
3468 });
3469 db.get_or_create_remote_connection(options.clone())
3470 .await
3471 .unwrap();
3472 options
3473 })
3474 .collect::<Vec<_>>();
3475
3476 let remote_connections = futures::future::join_all(remote_connections).await;
3477
3478 let workspaces = [
3479 (1, remote_connections[0].clone(), 9),
3480 (2, remote_connections[1].clone(), 5),
3481 (3, remote_connections[2].clone(), 8),
3482 (4, remote_connections[3].clone(), 2),
3483 ]
3484 .into_iter()
3485 .map(|(id, remote_connection, window_id)| SerializedWorkspace {
3486 id: WorkspaceId(id),
3487 paths: PathList::default(),
3488 location: SerializedWorkspaceLocation::Remote(remote_connection),
3489 center_group: Default::default(),
3490 window_bounds: Default::default(),
3491 display: Default::default(),
3492 docks: Default::default(),
3493 centered_layout: false,
3494 session_id: Some("one-session".to_owned()),
3495 breakpoints: Default::default(),
3496 window_id: Some(window_id),
3497 user_toolchains: Default::default(),
3498 })
3499 .collect::<Vec<_>>();
3500
3501 for workspace in workspaces.iter() {
3502 db.save_workspace(workspace.clone()).await;
3503 }
3504
3505 let stack = Some(Vec::from([
3506 WindowId::from(2), // Top
3507 WindowId::from(8),
3508 WindowId::from(5),
3509 WindowId::from(9), // Bottom
3510 ]));
3511
3512 let have = db
3513 .last_session_workspace_locations("one-session", stack, fs.as_ref())
3514 .await
3515 .unwrap();
3516 assert_eq!(have.len(), 4);
3517 assert_eq!(
3518 have[0],
3519 SessionWorkspace {
3520 workspace_id: WorkspaceId(4),
3521 location: SerializedWorkspaceLocation::Remote(remote_connections[3].clone()),
3522 paths: PathList::default(),
3523 window_id: Some(WindowId::from(2u64)),
3524 }
3525 );
3526 assert_eq!(
3527 have[1],
3528 SessionWorkspace {
3529 workspace_id: WorkspaceId(3),
3530 location: SerializedWorkspaceLocation::Remote(remote_connections[2].clone()),
3531 paths: PathList::default(),
3532 window_id: Some(WindowId::from(8u64)),
3533 }
3534 );
3535 assert_eq!(
3536 have[2],
3537 SessionWorkspace {
3538 workspace_id: WorkspaceId(2),
3539 location: SerializedWorkspaceLocation::Remote(remote_connections[1].clone()),
3540 paths: PathList::default(),
3541 window_id: Some(WindowId::from(5u64)),
3542 }
3543 );
3544 assert_eq!(
3545 have[3],
3546 SessionWorkspace {
3547 workspace_id: WorkspaceId(1),
3548 location: SerializedWorkspaceLocation::Remote(remote_connections[0].clone()),
3549 paths: PathList::default(),
3550 window_id: Some(WindowId::from(9u64)),
3551 }
3552 );
3553 }
3554
3555 #[gpui::test]
3556 async fn test_get_or_create_ssh_project() {
3557 let db = WorkspaceDb::open_test_db("test_get_or_create_ssh_project").await;
3558
3559 let host = "example.com".to_string();
3560 let port = Some(22_u16);
3561 let user = Some("user".to_string());
3562
3563 let connection_id = db
3564 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3565 host: host.clone().into(),
3566 port,
3567 username: user.clone(),
3568 ..Default::default()
3569 }))
3570 .await
3571 .unwrap();
3572
3573 // Test that calling the function again with the same parameters returns the same project
3574 let same_connection = db
3575 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3576 host: host.clone().into(),
3577 port,
3578 username: user.clone(),
3579 ..Default::default()
3580 }))
3581 .await
3582 .unwrap();
3583
3584 assert_eq!(connection_id, same_connection);
3585
3586 // Test with different parameters
3587 let host2 = "otherexample.com".to_string();
3588 let port2 = None;
3589 let user2 = Some("otheruser".to_string());
3590
3591 let different_connection = db
3592 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3593 host: host2.clone().into(),
3594 port: port2,
3595 username: user2.clone(),
3596 ..Default::default()
3597 }))
3598 .await
3599 .unwrap();
3600
3601 assert_ne!(connection_id, different_connection);
3602 }
3603
3604 #[gpui::test]
3605 async fn test_get_or_create_ssh_project_with_null_user() {
3606 let db = WorkspaceDb::open_test_db("test_get_or_create_ssh_project_with_null_user").await;
3607
3608 let (host, port, user) = ("example.com".to_string(), None, None);
3609
3610 let connection_id = db
3611 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3612 host: host.clone().into(),
3613 port,
3614 username: None,
3615 ..Default::default()
3616 }))
3617 .await
3618 .unwrap();
3619
3620 let same_connection_id = db
3621 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3622 host: host.clone().into(),
3623 port,
3624 username: user.clone(),
3625 ..Default::default()
3626 }))
3627 .await
3628 .unwrap();
3629
3630 assert_eq!(connection_id, same_connection_id);
3631 }
3632
3633 #[gpui::test]
3634 async fn test_get_remote_connections() {
3635 let db = WorkspaceDb::open_test_db("test_get_remote_connections").await;
3636
3637 let connections = [
3638 ("example.com".to_string(), None, None),
3639 (
3640 "anotherexample.com".to_string(),
3641 Some(123_u16),
3642 Some("user2".to_string()),
3643 ),
3644 ("yetanother.com".to_string(), Some(345_u16), None),
3645 ];
3646
3647 let mut ids = Vec::new();
3648 for (host, port, user) in connections.iter() {
3649 ids.push(
3650 db.get_or_create_remote_connection(RemoteConnectionOptions::Ssh(
3651 SshConnectionOptions {
3652 host: host.clone().into(),
3653 port: *port,
3654 username: user.clone(),
3655 ..Default::default()
3656 },
3657 ))
3658 .await
3659 .unwrap(),
3660 );
3661 }
3662
3663 let stored_connections = db.remote_connections().unwrap();
3664 assert_eq!(
3665 stored_connections,
3666 [
3667 (
3668 ids[0],
3669 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3670 host: "example.com".into(),
3671 port: None,
3672 username: None,
3673 ..Default::default()
3674 }),
3675 ),
3676 (
3677 ids[1],
3678 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3679 host: "anotherexample.com".into(),
3680 port: Some(123),
3681 username: Some("user2".into()),
3682 ..Default::default()
3683 }),
3684 ),
3685 (
3686 ids[2],
3687 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3688 host: "yetanother.com".into(),
3689 port: Some(345),
3690 username: None,
3691 ..Default::default()
3692 }),
3693 ),
3694 ]
3695 .into_iter()
3696 .collect::<HashMap<_, _>>(),
3697 );
3698 }
3699
3700 #[gpui::test]
3701 async fn test_simple_split() {
3702 zlog::init_test();
3703
3704 let db = WorkspaceDb::open_test_db("simple_split").await;
3705
3706 // -----------------
3707 // | 1,2 | 5,6 |
3708 // | - - - | |
3709 // | 3,4 | |
3710 // -----------------
3711 let center_pane = group(
3712 Axis::Horizontal,
3713 vec![
3714 group(
3715 Axis::Vertical,
3716 vec![
3717 SerializedPaneGroup::Pane(SerializedPane::new(
3718 vec![
3719 SerializedItem::new("Terminal", 1, false, false),
3720 SerializedItem::new("Terminal", 2, true, false),
3721 ],
3722 false,
3723 0,
3724 )),
3725 SerializedPaneGroup::Pane(SerializedPane::new(
3726 vec![
3727 SerializedItem::new("Terminal", 4, false, false),
3728 SerializedItem::new("Terminal", 3, true, false),
3729 ],
3730 true,
3731 0,
3732 )),
3733 ],
3734 ),
3735 SerializedPaneGroup::Pane(SerializedPane::new(
3736 vec![
3737 SerializedItem::new("Terminal", 5, true, false),
3738 SerializedItem::new("Terminal", 6, false, false),
3739 ],
3740 false,
3741 0,
3742 )),
3743 ],
3744 );
3745
3746 let workspace = default_workspace(&["/tmp"], ¢er_pane);
3747
3748 db.save_workspace(workspace.clone()).await;
3749
3750 let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap();
3751
3752 assert_eq!(workspace.center_group, new_workspace.center_group);
3753 }
3754
3755 #[gpui::test]
3756 async fn test_cleanup_panes() {
3757 zlog::init_test();
3758
3759 let db = WorkspaceDb::open_test_db("test_cleanup_panes").await;
3760
3761 let center_pane = group(
3762 Axis::Horizontal,
3763 vec![
3764 group(
3765 Axis::Vertical,
3766 vec![
3767 SerializedPaneGroup::Pane(SerializedPane::new(
3768 vec![
3769 SerializedItem::new("Terminal", 1, false, false),
3770 SerializedItem::new("Terminal", 2, true, false),
3771 ],
3772 false,
3773 0,
3774 )),
3775 SerializedPaneGroup::Pane(SerializedPane::new(
3776 vec![
3777 SerializedItem::new("Terminal", 4, false, false),
3778 SerializedItem::new("Terminal", 3, true, false),
3779 ],
3780 true,
3781 0,
3782 )),
3783 ],
3784 ),
3785 SerializedPaneGroup::Pane(SerializedPane::new(
3786 vec![
3787 SerializedItem::new("Terminal", 5, false, false),
3788 SerializedItem::new("Terminal", 6, true, false),
3789 ],
3790 false,
3791 0,
3792 )),
3793 ],
3794 );
3795
3796 let id = &["/tmp"];
3797
3798 let mut workspace = default_workspace(id, ¢er_pane);
3799
3800 db.save_workspace(workspace.clone()).await;
3801
3802 workspace.center_group = group(
3803 Axis::Vertical,
3804 vec![
3805 SerializedPaneGroup::Pane(SerializedPane::new(
3806 vec![
3807 SerializedItem::new("Terminal", 1, false, false),
3808 SerializedItem::new("Terminal", 2, true, false),
3809 ],
3810 false,
3811 0,
3812 )),
3813 SerializedPaneGroup::Pane(SerializedPane::new(
3814 vec![
3815 SerializedItem::new("Terminal", 4, true, false),
3816 SerializedItem::new("Terminal", 3, false, false),
3817 ],
3818 true,
3819 0,
3820 )),
3821 ],
3822 );
3823
3824 db.save_workspace(workspace.clone()).await;
3825
3826 let new_workspace = db.workspace_for_roots(id).unwrap();
3827
3828 assert_eq!(workspace.center_group, new_workspace.center_group);
3829 }
3830
3831 #[gpui::test]
3832 async fn test_empty_workspace_window_bounds() {
3833 zlog::init_test();
3834
3835 let db = WorkspaceDb::open_test_db("test_empty_workspace_window_bounds").await;
3836 let id = db.next_id().await.unwrap();
3837
3838 // Create a workspace with empty paths (empty workspace)
3839 let empty_paths: &[&str] = &[];
3840 let display_uuid = Uuid::new_v4();
3841 let window_bounds = SerializedWindowBounds(WindowBounds::Windowed(Bounds {
3842 origin: point(px(100.0), px(200.0)),
3843 size: size(px(800.0), px(600.0)),
3844 }));
3845
3846 let workspace = SerializedWorkspace {
3847 id,
3848 paths: PathList::new(empty_paths),
3849 location: SerializedWorkspaceLocation::Local,
3850 center_group: Default::default(),
3851 window_bounds: None,
3852 display: None,
3853 docks: Default::default(),
3854 breakpoints: Default::default(),
3855 centered_layout: false,
3856 session_id: None,
3857 window_id: None,
3858 user_toolchains: Default::default(),
3859 };
3860
3861 // Save the workspace (this creates the record with empty paths)
3862 db.save_workspace(workspace.clone()).await;
3863
3864 // Save window bounds separately (as the actual code does via set_window_open_status)
3865 db.set_window_open_status(id, window_bounds, display_uuid)
3866 .await
3867 .unwrap();
3868
3869 // Empty workspaces cannot be retrieved by paths (they'd all match).
3870 // They must be retrieved by workspace_id.
3871 assert!(db.workspace_for_roots(empty_paths).is_none());
3872
3873 // Retrieve using workspace_for_id instead
3874 let retrieved = db.workspace_for_id(id).unwrap();
3875
3876 // Verify window bounds were persisted
3877 assert_eq!(retrieved.id, id);
3878 assert!(retrieved.window_bounds.is_some());
3879 assert_eq!(retrieved.window_bounds.unwrap().0, window_bounds.0);
3880 assert!(retrieved.display.is_some());
3881 assert_eq!(retrieved.display.unwrap(), display_uuid);
3882 }
3883
3884 #[gpui::test]
3885 async fn test_last_session_workspace_locations_groups_by_window_id(
3886 cx: &mut gpui::TestAppContext,
3887 ) {
3888 let dir1 = tempfile::TempDir::with_prefix("dir1").unwrap();
3889 let dir2 = tempfile::TempDir::with_prefix("dir2").unwrap();
3890 let dir3 = tempfile::TempDir::with_prefix("dir3").unwrap();
3891 let dir4 = tempfile::TempDir::with_prefix("dir4").unwrap();
3892 let dir5 = tempfile::TempDir::with_prefix("dir5").unwrap();
3893
3894 let fs = fs::FakeFs::new(cx.executor());
3895 fs.insert_tree(dir1.path(), json!({})).await;
3896 fs.insert_tree(dir2.path(), json!({})).await;
3897 fs.insert_tree(dir3.path(), json!({})).await;
3898 fs.insert_tree(dir4.path(), json!({})).await;
3899 fs.insert_tree(dir5.path(), json!({})).await;
3900
3901 let db =
3902 WorkspaceDb::open_test_db("test_last_session_workspace_locations_groups_by_window_id")
3903 .await;
3904
3905 // Simulate two MultiWorkspace windows each containing two workspaces,
3906 // plus one single-workspace window:
3907 // Window 10: workspace 1, workspace 2
3908 // Window 20: workspace 3, workspace 4
3909 // Window 30: workspace 5 (only one)
3910 //
3911 // On session restore, the caller should be able to group these by
3912 // window_id to reconstruct the MultiWorkspace windows.
3913 let workspaces_data: Vec<(i64, &Path, u64)> = vec![
3914 (1, dir1.path(), 10),
3915 (2, dir2.path(), 10),
3916 (3, dir3.path(), 20),
3917 (4, dir4.path(), 20),
3918 (5, dir5.path(), 30),
3919 ];
3920
3921 for (id, dir, window_id) in &workspaces_data {
3922 db.save_workspace(SerializedWorkspace {
3923 id: WorkspaceId(*id),
3924 paths: PathList::new(&[*dir]),
3925 location: SerializedWorkspaceLocation::Local,
3926 center_group: Default::default(),
3927 window_bounds: Default::default(),
3928 display: Default::default(),
3929 docks: Default::default(),
3930 centered_layout: false,
3931 session_id: Some("test-session".to_owned()),
3932 breakpoints: Default::default(),
3933 window_id: Some(*window_id),
3934 user_toolchains: Default::default(),
3935 })
3936 .await;
3937 }
3938
3939 let locations = db
3940 .last_session_workspace_locations("test-session", None, fs.as_ref())
3941 .await
3942 .unwrap();
3943
3944 // All 5 workspaces should be returned with their window_ids.
3945 assert_eq!(locations.len(), 5);
3946
3947 // Every entry should have a window_id so the caller can group them.
3948 for session_workspace in &locations {
3949 assert!(
3950 session_workspace.window_id.is_some(),
3951 "workspace {:?} missing window_id",
3952 session_workspace.workspace_id
3953 );
3954 }
3955
3956 // Group by window_id, simulating what the restoration code should do.
3957 let mut by_window: HashMap<WindowId, Vec<WorkspaceId>> = HashMap::default();
3958 for session_workspace in &locations {
3959 if let Some(window_id) = session_workspace.window_id {
3960 by_window
3961 .entry(window_id)
3962 .or_default()
3963 .push(session_workspace.workspace_id);
3964 }
3965 }
3966
3967 // Should produce 3 windows, not 5.
3968 assert_eq!(
3969 by_window.len(),
3970 3,
3971 "Expected 3 window groups, got {}: {:?}",
3972 by_window.len(),
3973 by_window
3974 );
3975
3976 // Window 10 should contain workspaces 1 and 2.
3977 let window_10 = by_window.get(&WindowId::from(10u64)).unwrap();
3978 assert_eq!(window_10.len(), 2);
3979 assert!(window_10.contains(&WorkspaceId(1)));
3980 assert!(window_10.contains(&WorkspaceId(2)));
3981
3982 // Window 20 should contain workspaces 3 and 4.
3983 let window_20 = by_window.get(&WindowId::from(20u64)).unwrap();
3984 assert_eq!(window_20.len(), 2);
3985 assert!(window_20.contains(&WorkspaceId(3)));
3986 assert!(window_20.contains(&WorkspaceId(4)));
3987
3988 // Window 30 should contain only workspace 5.
3989 let window_30 = by_window.get(&WindowId::from(30u64)).unwrap();
3990 assert_eq!(window_30.len(), 1);
3991 assert!(window_30.contains(&WorkspaceId(5)));
3992 }
3993
3994 #[gpui::test]
3995 async fn test_read_serialized_multi_workspaces_with_state(cx: &mut gpui::TestAppContext) {
3996 use crate::persistence::model::MultiWorkspaceState;
3997
3998 // Write multi-workspace state for two windows via the scoped KVP.
3999 let window_10 = WindowId::from(10u64);
4000 let window_20 = WindowId::from(20u64);
4001
4002 let kvp = cx.update(|cx| KeyValueStore::global(cx));
4003
4004 write_multi_workspace_state(
4005 &kvp,
4006 window_10,
4007 MultiWorkspaceState {
4008 active_workspace_id: Some(WorkspaceId(2)),
4009 project_group_keys: vec![],
4010 sidebar_open: true,
4011 sidebar_state: None,
4012 },
4013 )
4014 .await;
4015
4016 write_multi_workspace_state(
4017 &kvp,
4018 window_20,
4019 MultiWorkspaceState {
4020 active_workspace_id: Some(WorkspaceId(3)),
4021 project_group_keys: vec![],
4022 sidebar_open: false,
4023 sidebar_state: None,
4024 },
4025 )
4026 .await;
4027
4028 // Build session workspaces: two in window 10, one in window 20, one with no window.
4029 let session_workspaces = vec![
4030 SessionWorkspace {
4031 workspace_id: WorkspaceId(1),
4032 location: SerializedWorkspaceLocation::Local,
4033 paths: PathList::new(&["/a"]),
4034 window_id: Some(window_10),
4035 },
4036 SessionWorkspace {
4037 workspace_id: WorkspaceId(2),
4038 location: SerializedWorkspaceLocation::Local,
4039 paths: PathList::new(&["/b"]),
4040 window_id: Some(window_10),
4041 },
4042 SessionWorkspace {
4043 workspace_id: WorkspaceId(3),
4044 location: SerializedWorkspaceLocation::Local,
4045 paths: PathList::new(&["/c"]),
4046 window_id: Some(window_20),
4047 },
4048 SessionWorkspace {
4049 workspace_id: WorkspaceId(4),
4050 location: SerializedWorkspaceLocation::Local,
4051 paths: PathList::new(&["/d"]),
4052 window_id: None,
4053 },
4054 ];
4055
4056 let results = cx.update(|cx| read_serialized_multi_workspaces(session_workspaces, cx));
4057
4058 // Should produce 3 results: window 10, window 20, and the orphan.
4059 assert_eq!(results.len(), 3);
4060
4061 // Window 10: active_workspace_id = 2 picks workspace 2 (paths /b), sidebar open.
4062 let group_10 = &results[0];
4063 assert_eq!(group_10.active_workspace.workspace_id, WorkspaceId(2));
4064 assert_eq!(group_10.state.active_workspace_id, Some(WorkspaceId(2)));
4065 assert_eq!(group_10.state.sidebar_open, true);
4066
4067 // Window 20: active_workspace_id = 3 picks workspace 3 (paths /c), sidebar closed.
4068 let group_20 = &results[1];
4069 assert_eq!(group_20.active_workspace.workspace_id, WorkspaceId(3));
4070 assert_eq!(group_20.state.active_workspace_id, Some(WorkspaceId(3)));
4071 assert_eq!(group_20.state.sidebar_open, false);
4072
4073 // Orphan: no active_workspace_id, falls back to first workspace (id 4).
4074 let group_none = &results[2];
4075 assert_eq!(group_none.active_workspace.workspace_id, WorkspaceId(4));
4076 assert_eq!(group_none.state.active_workspace_id, None);
4077 assert_eq!(group_none.state.sidebar_open, false);
4078 }
4079
4080 #[gpui::test]
4081 async fn test_flush_serialization_completes_before_quit(cx: &mut gpui::TestAppContext) {
4082 crate::tests::init_test(cx);
4083
4084 cx.update(|cx| {
4085 cx.set_staff(true);
4086 });
4087
4088 let fs = fs::FakeFs::new(cx.executor());
4089 let project = Project::test(fs.clone(), [], cx).await;
4090
4091 let (multi_workspace, cx) =
4092 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4093
4094 let workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone());
4095
4096 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4097
4098 // Assign a database_id so serialization will actually persist.
4099 let workspace_id = db.next_id().await.unwrap();
4100 workspace.update(cx, |ws, _cx| {
4101 ws.set_database_id(workspace_id);
4102 });
4103
4104 // Mutate some workspace state.
4105 db.set_centered_layout(workspace_id, true).await.unwrap();
4106
4107 // Call flush_serialization and await the returned task directly
4108 // (without run_until_parked — the point is that awaiting the task
4109 // alone is sufficient).
4110 let task = multi_workspace.update_in(cx, |mw, window, cx| {
4111 mw.workspace()
4112 .update(cx, |ws, cx| ws.flush_serialization(window, cx))
4113 });
4114 task.await;
4115
4116 // Read the workspace back from the DB and verify serialization happened.
4117 let serialized = db.workspace_for_id(workspace_id);
4118 assert!(
4119 serialized.is_some(),
4120 "flush_serialization should have persisted the workspace to DB"
4121 );
4122 }
4123
4124 #[gpui::test]
4125 async fn test_create_workspace_serialization(cx: &mut gpui::TestAppContext) {
4126 crate::tests::init_test(cx);
4127
4128 cx.update(|cx| {
4129 cx.set_staff(true);
4130 });
4131
4132 let fs = fs::FakeFs::new(cx.executor());
4133 let project = Project::test(fs.clone(), [], cx).await;
4134
4135 let (multi_workspace, cx) =
4136 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4137
4138 // Give the first workspace a database_id.
4139 multi_workspace.update_in(cx, |mw, _, cx| {
4140 mw.set_random_database_id(cx);
4141 });
4142
4143 let window_id =
4144 multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
4145
4146 // Create a new workspace via the MultiWorkspace API (triggers next_id()).
4147 multi_workspace.update_in(cx, |mw, window, cx| {
4148 mw.create_test_workspace(window, cx).detach();
4149 });
4150
4151 // Let the async next_id() and re-serialization tasks complete.
4152 cx.run_until_parked();
4153
4154 // The new workspace should now have a database_id.
4155 let new_workspace_db_id =
4156 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
4157 assert!(
4158 new_workspace_db_id.is_some(),
4159 "New workspace should have a database_id after run_until_parked"
4160 );
4161
4162 // The multi-workspace state should record it as the active workspace.
4163 let state = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
4164 assert_eq!(
4165 state.active_workspace_id, new_workspace_db_id,
4166 "Serialized active_workspace_id should match the new workspace's database_id"
4167 );
4168
4169 // The individual workspace row should exist with real data
4170 // (not just the bare DEFAULT VALUES row from next_id).
4171 let workspace_id = new_workspace_db_id.unwrap();
4172 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4173 let serialized = db.workspace_for_id(workspace_id);
4174 assert!(
4175 serialized.is_some(),
4176 "Newly created workspace should be fully serialized in the DB after database_id assignment"
4177 );
4178 }
4179
4180 #[gpui::test]
4181 async fn test_remove_workspace_clears_session_binding(cx: &mut gpui::TestAppContext) {
4182 crate::tests::init_test(cx);
4183
4184 cx.update(|cx| {
4185 cx.set_staff(true);
4186 });
4187
4188 let fs = fs::FakeFs::new(cx.executor());
4189 let dir = unique_test_dir(&fs, "remove").await;
4190 let project1 = Project::test(fs.clone(), [], cx).await;
4191 let project2 = Project::test(fs.clone(), [], cx).await;
4192
4193 let (multi_workspace, cx) =
4194 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4195
4196 multi_workspace.update(cx, |mw, cx| {
4197 mw.open_sidebar(cx);
4198 });
4199
4200 multi_workspace.update_in(cx, |mw, _, cx| {
4201 mw.set_random_database_id(cx);
4202 });
4203
4204 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4205
4206 // Get a real DB id for workspace2 so the row actually exists.
4207 let workspace2_db_id = db.next_id().await.unwrap();
4208
4209 multi_workspace.update_in(cx, |mw, window, cx| {
4210 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4211 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4212 ws.set_database_id(workspace2_db_id)
4213 });
4214 mw.add(workspace.clone(), window, cx);
4215 });
4216
4217 // Save a full workspace row to the DB directly.
4218 let session_id = format!("remove-test-session-{}", Uuid::new_v4());
4219 db.save_workspace(SerializedWorkspace {
4220 id: workspace2_db_id,
4221 paths: PathList::new(&[&dir]),
4222 location: SerializedWorkspaceLocation::Local,
4223 center_group: Default::default(),
4224 window_bounds: Default::default(),
4225 display: Default::default(),
4226 docks: Default::default(),
4227 centered_layout: false,
4228 session_id: Some(session_id.clone()),
4229 breakpoints: Default::default(),
4230 window_id: Some(99),
4231 user_toolchains: Default::default(),
4232 })
4233 .await;
4234
4235 assert!(
4236 db.workspace_for_id(workspace2_db_id).is_some(),
4237 "Workspace2 should exist in DB before removal"
4238 );
4239
4240 // Remove workspace at index 1 (the second workspace).
4241 multi_workspace.update_in(cx, |mw, window, cx| {
4242 let ws = mw.workspaces().nth(1).unwrap().clone();
4243 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4244 .detach_and_log_err(cx);
4245 });
4246
4247 cx.run_until_parked();
4248
4249 // The row should still exist so it continues to appear in recent
4250 // projects, but the session binding should be cleared so it is not
4251 // restored as part of any future session.
4252 assert!(
4253 db.workspace_for_id(workspace2_db_id).is_some(),
4254 "Removed workspace's DB row should be preserved for recent projects"
4255 );
4256
4257 let session_workspaces = db
4258 .last_session_workspace_locations("remove-test-session", None, fs.as_ref())
4259 .await
4260 .unwrap();
4261 let restored_ids: Vec<WorkspaceId> = session_workspaces
4262 .iter()
4263 .map(|sw| sw.workspace_id)
4264 .collect();
4265 assert!(
4266 !restored_ids.contains(&workspace2_db_id),
4267 "Removed workspace should not appear in session restoration"
4268 );
4269 }
4270
4271 #[gpui::test]
4272 async fn test_remove_workspace_not_restored_as_zombie(cx: &mut gpui::TestAppContext) {
4273 crate::tests::init_test(cx);
4274
4275 cx.update(|cx| {
4276 cx.set_staff(true);
4277 });
4278
4279 let fs = fs::FakeFs::new(cx.executor());
4280 let dir1 = tempfile::TempDir::with_prefix("zombie_test1").unwrap();
4281 let dir2 = tempfile::TempDir::with_prefix("zombie_test2").unwrap();
4282 fs.insert_tree(dir1.path(), json!({})).await;
4283 fs.insert_tree(dir2.path(), json!({})).await;
4284
4285 let project1 = Project::test(fs.clone(), [], cx).await;
4286 let project2 = Project::test(fs.clone(), [], cx).await;
4287
4288 let db = cx.update(|cx| WorkspaceDb::global(cx));
4289
4290 // Get real DB ids so the rows actually exist.
4291 let ws1_id = db.next_id().await.unwrap();
4292 let ws2_id = db.next_id().await.unwrap();
4293
4294 let (multi_workspace, cx) =
4295 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4296
4297 multi_workspace.update(cx, |mw, cx| {
4298 mw.open_sidebar(cx);
4299 });
4300
4301 multi_workspace.update_in(cx, |mw, _, cx| {
4302 mw.workspace().update(cx, |ws, _cx| {
4303 ws.set_database_id(ws1_id);
4304 });
4305 });
4306
4307 multi_workspace.update_in(cx, |mw, window, cx| {
4308 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4309 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4310 ws.set_database_id(ws2_id)
4311 });
4312 mw.add(workspace.clone(), window, cx);
4313 });
4314
4315 let session_id = "test-zombie-session";
4316 let window_id_val: u64 = 42;
4317
4318 db.save_workspace(SerializedWorkspace {
4319 id: ws1_id,
4320 paths: PathList::new(&[dir1.path()]),
4321 location: SerializedWorkspaceLocation::Local,
4322 center_group: Default::default(),
4323 window_bounds: Default::default(),
4324 display: Default::default(),
4325 docks: Default::default(),
4326 centered_layout: false,
4327 session_id: Some(session_id.to_owned()),
4328 breakpoints: Default::default(),
4329 window_id: Some(window_id_val),
4330 user_toolchains: Default::default(),
4331 })
4332 .await;
4333
4334 db.save_workspace(SerializedWorkspace {
4335 id: ws2_id,
4336 paths: PathList::new(&[dir2.path()]),
4337 location: SerializedWorkspaceLocation::Local,
4338 center_group: Default::default(),
4339 window_bounds: Default::default(),
4340 display: Default::default(),
4341 docks: Default::default(),
4342 centered_layout: false,
4343 session_id: Some(session_id.to_owned()),
4344 breakpoints: Default::default(),
4345 window_id: Some(window_id_val),
4346 user_toolchains: Default::default(),
4347 })
4348 .await;
4349
4350 // Remove workspace2 (index 1).
4351 multi_workspace.update_in(cx, |mw, window, cx| {
4352 let ws = mw.workspaces().nth(1).unwrap().clone();
4353 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4354 .detach_and_log_err(cx);
4355 });
4356
4357 cx.run_until_parked();
4358
4359 // The removed workspace should NOT appear in session restoration.
4360 let locations = db
4361 .last_session_workspace_locations(session_id, None, fs.as_ref())
4362 .await
4363 .unwrap();
4364
4365 let restored_ids: Vec<WorkspaceId> = locations.iter().map(|sw| sw.workspace_id).collect();
4366 assert!(
4367 !restored_ids.contains(&ws2_id),
4368 "Removed workspace should not appear in session restoration list. Found: {:?}",
4369 restored_ids
4370 );
4371 assert!(
4372 restored_ids.contains(&ws1_id),
4373 "Remaining workspace should still appear in session restoration list"
4374 );
4375 }
4376
4377 #[gpui::test]
4378 async fn test_pending_removal_tasks_drained_on_flush(cx: &mut gpui::TestAppContext) {
4379 crate::tests::init_test(cx);
4380
4381 cx.update(|cx| {
4382 cx.set_staff(true);
4383 });
4384
4385 let fs = fs::FakeFs::new(cx.executor());
4386 let dir = unique_test_dir(&fs, "pending-removal").await;
4387 let project1 = Project::test(fs.clone(), [], cx).await;
4388 let project2 = Project::test(fs.clone(), [], cx).await;
4389
4390 let db = cx.update(|cx| WorkspaceDb::global(cx));
4391
4392 // Get a real DB id for workspace2 so the row actually exists.
4393 let workspace2_db_id = db.next_id().await.unwrap();
4394
4395 let (multi_workspace, cx) =
4396 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4397
4398 multi_workspace.update(cx, |mw, cx| {
4399 mw.open_sidebar(cx);
4400 });
4401
4402 multi_workspace.update_in(cx, |mw, _, cx| {
4403 mw.set_random_database_id(cx);
4404 });
4405
4406 multi_workspace.update_in(cx, |mw, window, cx| {
4407 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4408 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4409 ws.set_database_id(workspace2_db_id)
4410 });
4411 mw.add(workspace.clone(), window, cx);
4412 });
4413
4414 // Save a full workspace row to the DB directly and let it settle.
4415 let session_id = format!("pending-removal-session-{}", Uuid::new_v4());
4416 db.save_workspace(SerializedWorkspace {
4417 id: workspace2_db_id,
4418 paths: PathList::new(&[&dir]),
4419 location: SerializedWorkspaceLocation::Local,
4420 center_group: Default::default(),
4421 window_bounds: Default::default(),
4422 display: Default::default(),
4423 docks: Default::default(),
4424 centered_layout: false,
4425 session_id: Some(session_id.clone()),
4426 breakpoints: Default::default(),
4427 window_id: Some(88),
4428 user_toolchains: Default::default(),
4429 })
4430 .await;
4431 cx.run_until_parked();
4432
4433 // Remove workspace2 — this pushes a task to pending_removal_tasks.
4434 multi_workspace.update_in(cx, |mw, window, cx| {
4435 let ws = mw.workspaces().nth(1).unwrap().clone();
4436 mw.remove([ws], |_, _, _| unreachable!(), window, cx)
4437 .detach_and_log_err(cx);
4438 });
4439
4440 // Simulate the quit handler pattern: collect flush tasks + pending
4441 // removal tasks and await them all.
4442 let all_tasks = multi_workspace.update_in(cx, |mw, window, cx| {
4443 let mut tasks: Vec<Task<()>> = mw
4444 .workspaces()
4445 .map(|workspace| {
4446 workspace.update(cx, |workspace, cx| {
4447 workspace.flush_serialization(window, cx)
4448 })
4449 })
4450 .collect();
4451 let mut removal_tasks = mw.take_pending_removal_tasks();
4452 // Note: removal_tasks may be empty if the background task already
4453 // completed (take_pending_removal_tasks filters out ready tasks).
4454 tasks.append(&mut removal_tasks);
4455 tasks.push(mw.flush_serialization());
4456 tasks
4457 });
4458 futures::future::join_all(all_tasks).await;
4459
4460 // The row should still exist (for recent projects), but the session
4461 // binding should have been cleared by the pending removal task.
4462 assert!(
4463 db.workspace_for_id(workspace2_db_id).is_some(),
4464 "Workspace row should be preserved for recent projects"
4465 );
4466
4467 let session_workspaces = db
4468 .last_session_workspace_locations("pending-removal-session", None, fs.as_ref())
4469 .await
4470 .unwrap();
4471 let restored_ids: Vec<WorkspaceId> = session_workspaces
4472 .iter()
4473 .map(|sw| sw.workspace_id)
4474 .collect();
4475 assert!(
4476 !restored_ids.contains(&workspace2_db_id),
4477 "Pending removal task should have cleared the session binding"
4478 );
4479 }
4480
4481 #[gpui::test]
4482 async fn test_create_workspace_bounds_observer_uses_fresh_id(cx: &mut gpui::TestAppContext) {
4483 crate::tests::init_test(cx);
4484
4485 cx.update(|cx| {
4486 cx.set_staff(true);
4487 });
4488
4489 let fs = fs::FakeFs::new(cx.executor());
4490 let project = Project::test(fs.clone(), [], cx).await;
4491
4492 let (multi_workspace, cx) =
4493 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4494
4495 multi_workspace.update_in(cx, |mw, _, cx| {
4496 mw.set_random_database_id(cx);
4497 });
4498
4499 let task =
4500 multi_workspace.update_in(cx, |mw, window, cx| mw.create_test_workspace(window, cx));
4501 task.await;
4502
4503 let new_workspace_db_id =
4504 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
4505 assert!(
4506 new_workspace_db_id.is_some(),
4507 "After run_until_parked, the workspace should have a database_id"
4508 );
4509
4510 let workspace_id = new_workspace_db_id.unwrap();
4511
4512 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4513
4514 assert!(
4515 db.workspace_for_id(workspace_id).is_some(),
4516 "The workspace row should exist in the DB"
4517 );
4518
4519 cx.simulate_resize(gpui::size(px(1024.0), px(768.0)));
4520
4521 // Advance the clock past the 100ms debounce timer so the bounds
4522 // observer task fires
4523 cx.executor().advance_clock(Duration::from_millis(200));
4524 cx.run_until_parked();
4525
4526 let serialized = db
4527 .workspace_for_id(workspace_id)
4528 .expect("workspace row should still exist");
4529 assert!(
4530 serialized.window_bounds.is_some(),
4531 "The bounds observer should write bounds for the workspace's real DB ID, \
4532 even when the workspace was created via create_workspace (where the ID \
4533 is assigned asynchronously after construction)."
4534 );
4535 }
4536
4537 #[gpui::test]
4538 async fn test_flush_serialization_writes_bounds(cx: &mut gpui::TestAppContext) {
4539 crate::tests::init_test(cx);
4540
4541 cx.update(|cx| {
4542 cx.set_staff(true);
4543 });
4544
4545 let fs = fs::FakeFs::new(cx.executor());
4546 let dir = tempfile::TempDir::with_prefix("flush_bounds_test").unwrap();
4547 fs.insert_tree(dir.path(), json!({})).await;
4548
4549 let project = Project::test(fs.clone(), [dir.path()], cx).await;
4550
4551 let (multi_workspace, cx) =
4552 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4553
4554 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4555 let workspace_id = db.next_id().await.unwrap();
4556 multi_workspace.update_in(cx, |mw, _, cx| {
4557 mw.workspace().update(cx, |ws, _cx| {
4558 ws.set_database_id(workspace_id);
4559 });
4560 });
4561
4562 let task = multi_workspace.update_in(cx, |mw, window, cx| {
4563 mw.workspace()
4564 .update(cx, |ws, cx| ws.flush_serialization(window, cx))
4565 });
4566 task.await;
4567
4568 let after = db
4569 .workspace_for_id(workspace_id)
4570 .expect("workspace row should exist after flush_serialization");
4571 assert!(
4572 !after.paths.is_empty(),
4573 "flush_serialization should have written paths via save_workspace"
4574 );
4575 assert!(
4576 after.window_bounds.is_some(),
4577 "flush_serialization should ensure window bounds are persisted to the DB \
4578 before the process exits."
4579 );
4580 }
4581
4582 #[gpui::test]
4583 async fn test_resolve_worktree_workspaces(cx: &mut gpui::TestAppContext) {
4584 let fs = fs::FakeFs::new(cx.executor());
4585
4586 // Main repo with a linked worktree entry
4587 fs.insert_tree(
4588 "/repo",
4589 json!({
4590 ".git": {
4591 "worktrees": {
4592 "feature": {
4593 "commondir": "../../",
4594 "HEAD": "ref: refs/heads/feature"
4595 }
4596 }
4597 },
4598 "src": { "main.rs": "" }
4599 }),
4600 )
4601 .await;
4602
4603 // Linked worktree checkout pointing back to /repo
4604 fs.insert_tree(
4605 "/worktree",
4606 json!({
4607 ".git": "gitdir: /repo/.git/worktrees/feature",
4608 "src": { "main.rs": "" }
4609 }),
4610 )
4611 .await;
4612
4613 // A plain non-git project
4614 fs.insert_tree(
4615 "/plain-project",
4616 json!({
4617 "src": { "main.rs": "" }
4618 }),
4619 )
4620 .await;
4621
4622 // Another normal git repo (used in mixed-path entry)
4623 fs.insert_tree(
4624 "/other-repo",
4625 json!({
4626 ".git": {},
4627 "src": { "lib.rs": "" }
4628 }),
4629 )
4630 .await;
4631
4632 let t0 = Utc::now() - chrono::Duration::hours(4);
4633 let t1 = Utc::now() - chrono::Duration::hours(3);
4634 let t2 = Utc::now() - chrono::Duration::hours(2);
4635 let t3 = Utc::now() - chrono::Duration::hours(1);
4636
4637 let workspaces = vec![
4638 // 1: Main checkout of /repo (opened earlier)
4639 (
4640 WorkspaceId(1),
4641 SerializedWorkspaceLocation::Local,
4642 PathList::new(&["/repo"]),
4643 t0,
4644 ),
4645 // 2: Linked worktree of /repo (opened more recently)
4646 // Should dedup with #1; more recent timestamp wins.
4647 (
4648 WorkspaceId(2),
4649 SerializedWorkspaceLocation::Local,
4650 PathList::new(&["/worktree"]),
4651 t1,
4652 ),
4653 // 3: Mixed-path workspace: one root is a linked worktree,
4654 // the other is a normal repo. The worktree path should be
4655 // resolved; the normal path kept as-is.
4656 (
4657 WorkspaceId(3),
4658 SerializedWorkspaceLocation::Local,
4659 PathList::new(&["/other-repo", "/worktree"]),
4660 t2,
4661 ),
4662 // 4: Non-git project — passed through unchanged.
4663 (
4664 WorkspaceId(4),
4665 SerializedWorkspaceLocation::Local,
4666 PathList::new(&["/plain-project"]),
4667 t3,
4668 ),
4669 ];
4670
4671 let result = resolve_worktree_workspaces(workspaces, fs.as_ref()).await;
4672
4673 // Should have 3 entries: #1 and #2 deduped into one, plus #3 and #4.
4674 assert_eq!(result.len(), 3);
4675
4676 // First entry: /repo — deduplicated from #1 and #2.
4677 // Keeps the position of #1 (first seen), but with #2's later timestamp.
4678 assert_eq!(result[0].2.paths(), &[PathBuf::from("/repo")]);
4679 assert_eq!(result[0].3, t1);
4680
4681 // Second entry: mixed-path workspace with worktree resolved.
4682 // /worktree → /repo, so paths become [/other-repo, /repo] (sorted).
4683 assert_eq!(
4684 result[1].2.paths(),
4685 &[PathBuf::from("/other-repo"), PathBuf::from("/repo")]
4686 );
4687 assert_eq!(result[1].0, WorkspaceId(3));
4688
4689 // Third entry: non-git project, unchanged.
4690 assert_eq!(result[2].2.paths(), &[PathBuf::from("/plain-project")]);
4691 assert_eq!(result[2].0, WorkspaceId(4));
4692 }
4693
4694 #[gpui::test]
4695 async fn test_restore_window_with_linked_worktree_and_multiple_project_groups(
4696 cx: &mut gpui::TestAppContext,
4697 ) {
4698 crate::tests::init_test(cx);
4699
4700 cx.update(|cx| {
4701 cx.set_staff(true);
4702 });
4703
4704 let fs = fs::FakeFs::new(cx.executor());
4705
4706 // Main git repo at /repo
4707 fs.insert_tree(
4708 "/repo",
4709 json!({
4710 ".git": {
4711 "HEAD": "ref: refs/heads/main",
4712 "worktrees": {
4713 "feature": {
4714 "commondir": "../../",
4715 "HEAD": "ref: refs/heads/feature"
4716 }
4717 }
4718 },
4719 "src": { "main.rs": "" }
4720 }),
4721 )
4722 .await;
4723
4724 // Linked worktree checkout pointing back to /repo
4725 fs.insert_tree(
4726 "/worktree-feature",
4727 json!({
4728 ".git": "gitdir: /repo/.git/worktrees/feature",
4729 "src": { "lib.rs": "" }
4730 }),
4731 )
4732 .await;
4733
4734 // --- Phase 1: Set up the original multi-workspace window ---
4735
4736 let project_1 = Project::test(fs.clone(), ["/repo".as_ref()], cx).await;
4737 let project_1_linked_worktree =
4738 Project::test(fs.clone(), ["/worktree-feature".as_ref()], cx).await;
4739
4740 // Wait for git discovery to finish.
4741 cx.run_until_parked();
4742
4743 // Create a second, unrelated project so we have two distinct project groups.
4744 fs.insert_tree(
4745 "/other-project",
4746 json!({
4747 ".git": { "HEAD": "ref: refs/heads/main" },
4748 "readme.md": ""
4749 }),
4750 )
4751 .await;
4752 let project_2 = Project::test(fs.clone(), ["/other-project".as_ref()], cx).await;
4753 cx.run_until_parked();
4754
4755 // Create the MultiWorkspace with project_2, then add the main repo
4756 // and its linked worktree. The linked worktree is added last and
4757 // becomes the active workspace.
4758 let (multi_workspace, cx) = cx
4759 .add_window_view(|window, cx| MultiWorkspace::test_new(project_2.clone(), window, cx));
4760
4761 multi_workspace.update(cx, |mw, cx| {
4762 mw.open_sidebar(cx);
4763 });
4764
4765 multi_workspace.update_in(cx, |mw, window, cx| {
4766 mw.test_add_workspace(project_1.clone(), window, cx);
4767 });
4768
4769 let workspace_worktree = multi_workspace.update_in(cx, |mw, window, cx| {
4770 mw.test_add_workspace(project_1_linked_worktree.clone(), window, cx)
4771 });
4772
4773 // Assign database IDs and set up session bindings so serialization
4774 // writes real rows.
4775 multi_workspace.update_in(cx, |mw, _, cx| {
4776 for workspace in mw.workspaces() {
4777 workspace.update(cx, |ws, _cx| {
4778 ws.set_random_database_id();
4779 });
4780 }
4781 });
4782
4783 // Flush serialization for each individual workspace (writes to SQLite)
4784 // and for the MultiWorkspace (writes to KVP).
4785 let tasks = multi_workspace.update_in(cx, |mw, window, cx| {
4786 let session_id = mw.workspace().read(cx).session_id();
4787 let window_id_u64 = window.window_handle().window_id().as_u64();
4788
4789 let mut tasks: Vec<Task<()>> = Vec::new();
4790 for workspace in mw.workspaces() {
4791 tasks.push(workspace.update(cx, |ws, cx| ws.flush_serialization(window, cx)));
4792 if let Some(db_id) = workspace.read(cx).database_id() {
4793 let db = WorkspaceDb::global(cx);
4794 let session_id = session_id.clone();
4795 tasks.push(cx.background_spawn(async move {
4796 db.set_session_binding(db_id, session_id, Some(window_id_u64))
4797 .await
4798 .log_err();
4799 }));
4800 }
4801 }
4802 mw.serialize(cx);
4803 tasks
4804 });
4805 cx.run_until_parked();
4806 for task in tasks {
4807 task.await;
4808 }
4809 cx.run_until_parked();
4810
4811 let active_db_id = workspace_worktree.read_with(cx, |ws, _| ws.database_id());
4812 assert!(
4813 active_db_id.is_some(),
4814 "Active workspace should have a database ID"
4815 );
4816
4817 // --- Phase 2: Read back and verify the serialized state ---
4818
4819 let session_id = multi_workspace
4820 .read_with(cx, |mw, cx| mw.workspace().read(cx).session_id())
4821 .unwrap();
4822 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4823 let session_workspaces = db
4824 .last_session_workspace_locations(&session_id, None, fs.as_ref())
4825 .await
4826 .expect("should load session workspaces");
4827 assert!(
4828 !session_workspaces.is_empty(),
4829 "Should have at least one session workspace"
4830 );
4831
4832 let multi_workspaces =
4833 cx.update(|_, cx| read_serialized_multi_workspaces(session_workspaces, cx));
4834 assert_eq!(
4835 multi_workspaces.len(),
4836 1,
4837 "All workspaces share one window, so there should be exactly one multi-workspace"
4838 );
4839
4840 let serialized = &multi_workspaces[0];
4841 assert_eq!(
4842 serialized.active_workspace.workspace_id,
4843 active_db_id.unwrap(),
4844 );
4845 assert_eq!(serialized.state.project_group_keys.len(), 2,);
4846
4847 // Verify the serialized project group keys round-trip back to the
4848 // originals.
4849 let restored_keys: Vec<ProjectGroupKey> = serialized
4850 .state
4851 .project_group_keys
4852 .iter()
4853 .cloned()
4854 .map(Into::into)
4855 .collect();
4856 let expected_keys = vec![
4857 ProjectGroupKey::new(None, PathList::new(&["/repo"])),
4858 ProjectGroupKey::new(None, PathList::new(&["/other-project"])),
4859 ];
4860 assert_eq!(
4861 restored_keys, expected_keys,
4862 "Deserialized project group keys should match the originals"
4863 );
4864
4865 // --- Phase 3: Restore the window and verify the result ---
4866
4867 let app_state =
4868 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).app_state().clone());
4869
4870 let serialized_mw = multi_workspaces.into_iter().next().unwrap();
4871 let restored_handle: gpui::WindowHandle<MultiWorkspace> = cx
4872 .update(|_, cx| {
4873 cx.spawn(async move |mut cx| {
4874 crate::restore_multiworkspace(serialized_mw, app_state, &mut cx).await
4875 })
4876 })
4877 .await
4878 .expect("restore_multiworkspace should succeed");
4879
4880 cx.run_until_parked();
4881
4882 // The restored window should have the same project group keys.
4883 let restored_keys: Vec<ProjectGroupKey> = restored_handle
4884 .read_with(cx, |mw: &MultiWorkspace, _cx| {
4885 mw.project_group_keys().cloned().collect()
4886 })
4887 .unwrap();
4888 assert_eq!(
4889 restored_keys, expected_keys,
4890 "Restored window should have the same project group keys as the original"
4891 );
4892
4893 // The active workspace in the restored window should have the linked
4894 // worktree paths.
4895 let active_paths: Vec<PathBuf> = restored_handle
4896 .read_with(cx, |mw: &MultiWorkspace, cx| {
4897 mw.workspace()
4898 .read(cx)
4899 .root_paths(cx)
4900 .into_iter()
4901 .map(|p: Arc<Path>| p.to_path_buf())
4902 .collect()
4903 })
4904 .unwrap();
4905 assert_eq!(
4906 active_paths,
4907 vec![PathBuf::from("/worktree-feature")],
4908 "The restored active workspace should be the linked worktree project"
4909 );
4910 }
4911
4912 #[gpui::test]
4913 async fn test_remove_project_group_falls_back_to_neighbor(cx: &mut gpui::TestAppContext) {
4914 crate::tests::init_test(cx);
4915 cx.update(|cx| {
4916 cx.set_staff(true);
4917 cx.update_flags(true, vec!["agent-v2".to_string()]);
4918 });
4919
4920 let fs = fs::FakeFs::new(cx.executor());
4921 let dir_a = unique_test_dir(&fs, "group-a").await;
4922 let dir_b = unique_test_dir(&fs, "group-b").await;
4923 let dir_c = unique_test_dir(&fs, "group-c").await;
4924
4925 let project_a = Project::test(fs.clone(), [dir_a.as_path()], cx).await;
4926 let project_b = Project::test(fs.clone(), [dir_b.as_path()], cx).await;
4927 let project_c = Project::test(fs.clone(), [dir_c.as_path()], cx).await;
4928
4929 // Create a multi-workspace with project A, then add B and C.
4930 // project_group_keys stores newest first: [C, B, A].
4931 // Sidebar displays in the same order: C (top), B (middle), A (bottom).
4932 let (multi_workspace, cx) = cx
4933 .add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx));
4934
4935 multi_workspace.update(cx, |mw, cx| mw.open_sidebar(cx));
4936
4937 let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| {
4938 mw.test_add_workspace(project_b.clone(), window, cx)
4939 });
4940 let _workspace_c = multi_workspace.update_in(cx, |mw, window, cx| {
4941 mw.test_add_workspace(project_c.clone(), window, cx)
4942 });
4943 cx.run_until_parked();
4944
4945 let key_a = project_a.read_with(cx, |p, cx| p.project_group_key(cx));
4946 let key_b = project_b.read_with(cx, |p, cx| p.project_group_key(cx));
4947 let key_c = project_c.read_with(cx, |p, cx| p.project_group_key(cx));
4948
4949 // Activate workspace B so removing its group exercises the fallback.
4950 multi_workspace.update_in(cx, |mw, window, cx| {
4951 mw.activate(workspace_b.clone(), window, cx);
4952 });
4953 cx.run_until_parked();
4954
4955 // --- Remove group B (the middle one). ---
4956 // In the sidebar [C, B, A], "below" B is A.
4957 multi_workspace.update_in(cx, |mw, window, cx| {
4958 mw.remove_project_group(&key_b, window, cx)
4959 .detach_and_log_err(cx);
4960 });
4961 cx.run_until_parked();
4962
4963 let active_paths =
4964 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
4965 assert_eq!(
4966 active_paths
4967 .iter()
4968 .map(|p| p.to_path_buf())
4969 .collect::<Vec<_>>(),
4970 vec![dir_a.clone()],
4971 "After removing the middle group, should fall back to the group below (A)"
4972 );
4973
4974 // After removing B, keys = [A, C], sidebar = [C, A].
4975 // Activate workspace A (the bottom) so removing it tests the
4976 // "fall back upward" path.
4977 let workspace_a =
4978 multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().cloned().unwrap());
4979 multi_workspace.update_in(cx, |mw, window, cx| {
4980 mw.activate(workspace_a.clone(), window, cx);
4981 });
4982 cx.run_until_parked();
4983
4984 // --- Remove group A (the bottom one in sidebar). ---
4985 // Nothing below A, so should fall back upward to C.
4986 multi_workspace.update_in(cx, |mw, window, cx| {
4987 mw.remove_project_group(&key_a, window, cx)
4988 .detach_and_log_err(cx);
4989 });
4990 cx.run_until_parked();
4991
4992 let active_paths =
4993 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
4994 assert_eq!(
4995 active_paths
4996 .iter()
4997 .map(|p| p.to_path_buf())
4998 .collect::<Vec<_>>(),
4999 vec![dir_c.clone()],
5000 "After removing the bottom group, should fall back to the group above (C)"
5001 );
5002
5003 // --- Remove group C (the only one remaining). ---
5004 // Should create an empty workspace.
5005 multi_workspace.update_in(cx, |mw, window, cx| {
5006 mw.remove_project_group(&key_c, window, cx)
5007 .detach_and_log_err(cx);
5008 });
5009 cx.run_until_parked();
5010
5011 let active_paths =
5012 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).root_paths(cx));
5013 assert!(
5014 active_paths.is_empty(),
5015 "After removing the only remaining group, should have an empty workspace"
5016 );
5017 }
5018
5019 /// Regression test for a crash where `find_or_create_local_workspace`
5020 /// returned a workspace that was about to be removed, hitting an assert
5021 /// in `MultiWorkspace::remove`.
5022 ///
5023 /// The scenario: two workspaces share the same root paths (e.g. due to
5024 /// a provisional key mismatch). When the first is removed and the
5025 /// fallback searches for the same paths, `workspace_for_paths` must
5026 /// skip the doomed workspace so the assert in `remove` is satisfied.
5027 #[gpui::test]
5028 async fn test_remove_fallback_skips_excluded_workspaces(cx: &mut gpui::TestAppContext) {
5029 crate::tests::init_test(cx);
5030 cx.update(|cx| {
5031 cx.set_staff(true);
5032 cx.update_flags(true, vec!["agent-v2".to_string()]);
5033 });
5034
5035 let fs = fs::FakeFs::new(cx.executor());
5036 let dir = unique_test_dir(&fs, "shared").await;
5037
5038 // Two projects that open the same directory — this creates two
5039 // workspaces whose root_paths are identical.
5040 let project_a = Project::test(fs.clone(), [dir.as_path()], cx).await;
5041 let project_b = Project::test(fs.clone(), [dir.as_path()], cx).await;
5042
5043 let (multi_workspace, cx) = cx
5044 .add_window_view(|window, cx| MultiWorkspace::test_new(project_a.clone(), window, cx));
5045
5046 multi_workspace.update(cx, |mw, cx| mw.open_sidebar(cx));
5047
5048 let workspace_b = multi_workspace.update_in(cx, |mw, window, cx| {
5049 mw.test_add_workspace(project_b.clone(), window, cx)
5050 });
5051 cx.run_until_parked();
5052
5053 // workspace_a is first in the workspaces vec.
5054 let workspace_a =
5055 multi_workspace.read_with(cx, |mw, _| mw.workspaces().next().cloned().unwrap());
5056 assert_ne!(workspace_a, workspace_b);
5057
5058 // Activate workspace_a so removing it triggers the fallback path.
5059 multi_workspace.update_in(cx, |mw, window, cx| {
5060 mw.activate(workspace_a.clone(), window, cx);
5061 });
5062 cx.run_until_parked();
5063
5064 // Remove workspace_a. The fallback searches for the same paths.
5065 // Without the `excluding` parameter, `workspace_for_paths` would
5066 // return workspace_a (first match) and the assert in `remove`
5067 // would fire. With the fix, workspace_a is skipped and
5068 // workspace_b is found instead.
5069 let path_list = PathList::new(std::slice::from_ref(&dir));
5070 let excluded = vec![workspace_a.clone()];
5071 multi_workspace.update_in(cx, |mw, window, cx| {
5072 mw.remove(
5073 vec![workspace_a.clone()],
5074 move |this, window, cx| {
5075 this.find_or_create_local_workspace(path_list, &excluded, window, cx)
5076 },
5077 window,
5078 cx,
5079 )
5080 .detach_and_log_err(cx);
5081 });
5082 cx.run_until_parked();
5083
5084 // workspace_b should now be active — workspace_a was removed.
5085 multi_workspace.read_with(cx, |mw, _cx| {
5086 assert_eq!(
5087 mw.workspace(),
5088 &workspace_b,
5089 "fallback should have found workspace_b, not the excluded workspace_a"
5090 );
5091 });
5092 }
5093}