1pub mod model;
2
3use std::{
4 borrow::Cow,
5 collections::BTreeMap,
6 path::{Path, PathBuf},
7 str::FromStr,
8 sync::Arc,
9};
10
11use chrono::{DateTime, NaiveDateTime, Utc};
12use fs::Fs;
13
14use anyhow::{Context as _, Result, bail};
15use collections::{HashMap, HashSet, IndexSet};
16use db::{
17 kvp::KeyValueStore,
18 query,
19 sqlez::{connection::Connection, domain::Domain},
20 sqlez_macros::sql,
21};
22use gpui::{Axis, Bounds, Task, WindowBounds, WindowId, point, size};
23use project::{
24 debugger::breakpoint_store::{BreakpointState, SourceBreakpoint},
25 trusted_worktrees::{DbTrustedPaths, RemoteHostLocation},
26};
27
28use language::{LanguageName, Toolchain, ToolchainScope};
29use remote::{
30 DockerConnectionOptions, RemoteConnectionOptions, SshConnectionOptions, WslConnectionOptions,
31};
32use serde::{Deserialize, Serialize};
33use sqlez::{
34 bindable::{Bind, Column, StaticColumnCount},
35 statement::Statement,
36 thread_safe_connection::ThreadSafeConnection,
37};
38
39use ui::{App, SharedString, px};
40use util::{ResultExt, maybe, rel_path::RelPath};
41use uuid::Uuid;
42
43use crate::{
44 WorkspaceId,
45 path_list::{PathList, SerializedPathList},
46 persistence::model::RemoteConnectionKind,
47};
48
49use model::{
50 GroupId, ItemId, PaneId, RemoteConnectionId, SerializedItem, SerializedPane,
51 SerializedPaneGroup, SerializedWorkspace,
52};
53
54use self::model::{DockStructure, SerializedWorkspaceLocation, SessionWorkspace};
55
56// https://www.sqlite.org/limits.html
57// > <..> the maximum value of a host parameter number is SQLITE_MAX_VARIABLE_NUMBER,
58// > which defaults to <..> 32766 for SQLite versions after 3.32.0.
59const MAX_QUERY_PLACEHOLDERS: usize = 32000;
60
61fn parse_timestamp(text: &str) -> DateTime<Utc> {
62 NaiveDateTime::parse_from_str(text, "%Y-%m-%d %H:%M:%S")
63 .map(|naive| naive.and_utc())
64 .unwrap_or_else(|_| Utc::now())
65}
66
67#[derive(Copy, Clone, Debug, PartialEq)]
68pub(crate) struct SerializedAxis(pub(crate) gpui::Axis);
69impl sqlez::bindable::StaticColumnCount for SerializedAxis {}
70impl sqlez::bindable::Bind for SerializedAxis {
71 fn bind(
72 &self,
73 statement: &sqlez::statement::Statement,
74 start_index: i32,
75 ) -> anyhow::Result<i32> {
76 match self.0 {
77 gpui::Axis::Horizontal => "Horizontal",
78 gpui::Axis::Vertical => "Vertical",
79 }
80 .bind(statement, start_index)
81 }
82}
83
84impl sqlez::bindable::Column for SerializedAxis {
85 fn column(
86 statement: &mut sqlez::statement::Statement,
87 start_index: i32,
88 ) -> anyhow::Result<(Self, i32)> {
89 String::column(statement, start_index).and_then(|(axis_text, next_index)| {
90 Ok((
91 match axis_text.as_str() {
92 "Horizontal" => Self(Axis::Horizontal),
93 "Vertical" => Self(Axis::Vertical),
94 _ => anyhow::bail!("Stored serialized item kind is incorrect"),
95 },
96 next_index,
97 ))
98 })
99 }
100}
101
102#[derive(Copy, Clone, Debug, PartialEq, Default)]
103pub(crate) struct SerializedWindowBounds(pub(crate) WindowBounds);
104
105impl StaticColumnCount for SerializedWindowBounds {
106 fn column_count() -> usize {
107 5
108 }
109}
110
111impl Bind for SerializedWindowBounds {
112 fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
113 match self.0 {
114 WindowBounds::Windowed(bounds) => {
115 let next_index = statement.bind(&"Windowed", start_index)?;
116 statement.bind(
117 &(
118 SerializedPixels(bounds.origin.x),
119 SerializedPixels(bounds.origin.y),
120 SerializedPixels(bounds.size.width),
121 SerializedPixels(bounds.size.height),
122 ),
123 next_index,
124 )
125 }
126 WindowBounds::Maximized(bounds) => {
127 let next_index = statement.bind(&"Maximized", start_index)?;
128 statement.bind(
129 &(
130 SerializedPixels(bounds.origin.x),
131 SerializedPixels(bounds.origin.y),
132 SerializedPixels(bounds.size.width),
133 SerializedPixels(bounds.size.height),
134 ),
135 next_index,
136 )
137 }
138 WindowBounds::Fullscreen(bounds) => {
139 let next_index = statement.bind(&"FullScreen", start_index)?;
140 statement.bind(
141 &(
142 SerializedPixels(bounds.origin.x),
143 SerializedPixels(bounds.origin.y),
144 SerializedPixels(bounds.size.width),
145 SerializedPixels(bounds.size.height),
146 ),
147 next_index,
148 )
149 }
150 }
151 }
152}
153
154impl Column for SerializedWindowBounds {
155 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
156 let (window_state, next_index) = String::column(statement, start_index)?;
157 let ((x, y, width, height), _): ((i32, i32, i32, i32), _) =
158 Column::column(statement, next_index)?;
159 let bounds = Bounds {
160 origin: point(px(x as f32), px(y as f32)),
161 size: size(px(width as f32), px(height as f32)),
162 };
163
164 let status = match window_state.as_str() {
165 "Windowed" | "Fixed" => SerializedWindowBounds(WindowBounds::Windowed(bounds)),
166 "Maximized" => SerializedWindowBounds(WindowBounds::Maximized(bounds)),
167 "FullScreen" => SerializedWindowBounds(WindowBounds::Fullscreen(bounds)),
168 _ => bail!("Window State did not have a valid string"),
169 };
170
171 Ok((status, next_index + 4))
172 }
173}
174
175const DEFAULT_WINDOW_BOUNDS_KEY: &str = "default_window_bounds";
176
177pub fn read_default_window_bounds(kvp: &KeyValueStore) -> Option<(Uuid, WindowBounds)> {
178 let json_str = kvp
179 .read_kvp(DEFAULT_WINDOW_BOUNDS_KEY)
180 .log_err()
181 .flatten()?;
182
183 let (display_uuid, persisted) =
184 serde_json::from_str::<(Uuid, WindowBoundsJson)>(&json_str).ok()?;
185 Some((display_uuid, persisted.into()))
186}
187
188pub async fn write_default_window_bounds(
189 kvp: &KeyValueStore,
190 bounds: WindowBounds,
191 display_uuid: Uuid,
192) -> anyhow::Result<()> {
193 let persisted = WindowBoundsJson::from(bounds);
194 let json_str = serde_json::to_string(&(display_uuid, persisted))?;
195 kvp.write_kvp(DEFAULT_WINDOW_BOUNDS_KEY.to_string(), json_str)
196 .await?;
197 Ok(())
198}
199
200#[derive(Serialize, Deserialize)]
201pub enum WindowBoundsJson {
202 Windowed {
203 x: i32,
204 y: i32,
205 width: i32,
206 height: i32,
207 },
208 Maximized {
209 x: i32,
210 y: i32,
211 width: i32,
212 height: i32,
213 },
214 Fullscreen {
215 x: i32,
216 y: i32,
217 width: i32,
218 height: i32,
219 },
220}
221
222impl From<WindowBounds> for WindowBoundsJson {
223 fn from(b: WindowBounds) -> Self {
224 match b {
225 WindowBounds::Windowed(bounds) => {
226 let origin = bounds.origin;
227 let size = bounds.size;
228 WindowBoundsJson::Windowed {
229 x: f32::from(origin.x).round() as i32,
230 y: f32::from(origin.y).round() as i32,
231 width: f32::from(size.width).round() as i32,
232 height: f32::from(size.height).round() as i32,
233 }
234 }
235 WindowBounds::Maximized(bounds) => {
236 let origin = bounds.origin;
237 let size = bounds.size;
238 WindowBoundsJson::Maximized {
239 x: f32::from(origin.x).round() as i32,
240 y: f32::from(origin.y).round() as i32,
241 width: f32::from(size.width).round() as i32,
242 height: f32::from(size.height).round() as i32,
243 }
244 }
245 WindowBounds::Fullscreen(bounds) => {
246 let origin = bounds.origin;
247 let size = bounds.size;
248 WindowBoundsJson::Fullscreen {
249 x: f32::from(origin.x).round() as i32,
250 y: f32::from(origin.y).round() as i32,
251 width: f32::from(size.width).round() as i32,
252 height: f32::from(size.height).round() as i32,
253 }
254 }
255 }
256 }
257}
258
259impl From<WindowBoundsJson> for WindowBounds {
260 fn from(n: WindowBoundsJson) -> Self {
261 match n {
262 WindowBoundsJson::Windowed {
263 x,
264 y,
265 width,
266 height,
267 } => WindowBounds::Windowed(Bounds {
268 origin: point(px(x as f32), px(y as f32)),
269 size: size(px(width as f32), px(height as f32)),
270 }),
271 WindowBoundsJson::Maximized {
272 x,
273 y,
274 width,
275 height,
276 } => WindowBounds::Maximized(Bounds {
277 origin: point(px(x as f32), px(y as f32)),
278 size: size(px(width as f32), px(height as f32)),
279 }),
280 WindowBoundsJson::Fullscreen {
281 x,
282 y,
283 width,
284 height,
285 } => WindowBounds::Fullscreen(Bounds {
286 origin: point(px(x as f32), px(y as f32)),
287 size: size(px(width as f32), px(height as f32)),
288 }),
289 }
290 }
291}
292
293fn read_multi_workspace_state(window_id: WindowId, cx: &App) -> model::MultiWorkspaceState {
294 let kvp = KeyValueStore::global(cx);
295 kvp.scoped("multi_workspace_state")
296 .read(&window_id.as_u64().to_string())
297 .log_err()
298 .flatten()
299 .and_then(|json| serde_json::from_str(&json).ok())
300 .unwrap_or_default()
301}
302
303pub async fn write_multi_workspace_state(
304 kvp: &KeyValueStore,
305 window_id: WindowId,
306 state: model::MultiWorkspaceState,
307) {
308 if let Ok(json_str) = serde_json::to_string(&state) {
309 kvp.scoped("multi_workspace_state")
310 .write(window_id.as_u64().to_string(), json_str)
311 .await
312 .log_err();
313 }
314}
315
316pub fn read_serialized_multi_workspaces(
317 session_workspaces: Vec<model::SessionWorkspace>,
318 cx: &App,
319) -> Vec<model::SerializedMultiWorkspace> {
320 let mut window_groups: Vec<Vec<model::SessionWorkspace>> = Vec::new();
321 let mut window_id_to_group: HashMap<WindowId, usize> = HashMap::default();
322
323 for session_workspace in session_workspaces {
324 match session_workspace.window_id {
325 Some(window_id) => {
326 let group_index = *window_id_to_group.entry(window_id).or_insert_with(|| {
327 window_groups.push(Vec::new());
328 window_groups.len() - 1
329 });
330 window_groups[group_index].push(session_workspace);
331 }
332 None => {
333 window_groups.push(vec![session_workspace]);
334 }
335 }
336 }
337
338 window_groups
339 .into_iter()
340 .map(|group| {
341 let window_id = group.first().and_then(|sw| sw.window_id);
342 let state = window_id
343 .map(|wid| read_multi_workspace_state(wid, cx))
344 .unwrap_or_default();
345 model::SerializedMultiWorkspace {
346 workspaces: group,
347 state,
348 }
349 })
350 .collect()
351}
352
353const DEFAULT_DOCK_STATE_KEY: &str = "default_dock_state";
354
355pub fn read_default_dock_state(kvp: &KeyValueStore) -> Option<DockStructure> {
356 let json_str = kvp.read_kvp(DEFAULT_DOCK_STATE_KEY).log_err().flatten()?;
357
358 serde_json::from_str::<DockStructure>(&json_str).ok()
359}
360
361pub async fn write_default_dock_state(
362 kvp: &KeyValueStore,
363 docks: DockStructure,
364) -> anyhow::Result<()> {
365 let json_str = serde_json::to_string(&docks)?;
366 kvp.write_kvp(DEFAULT_DOCK_STATE_KEY.to_string(), json_str)
367 .await?;
368 Ok(())
369}
370
371#[derive(Debug)]
372pub struct Breakpoint {
373 pub position: u32,
374 pub message: Option<Arc<str>>,
375 pub condition: Option<Arc<str>>,
376 pub hit_condition: Option<Arc<str>>,
377 pub state: BreakpointState,
378}
379
380/// Wrapper for DB type of a breakpoint
381struct BreakpointStateWrapper<'a>(Cow<'a, BreakpointState>);
382
383impl From<BreakpointState> for BreakpointStateWrapper<'static> {
384 fn from(kind: BreakpointState) -> Self {
385 BreakpointStateWrapper(Cow::Owned(kind))
386 }
387}
388
389impl StaticColumnCount for BreakpointStateWrapper<'_> {
390 fn column_count() -> usize {
391 1
392 }
393}
394
395impl Bind for BreakpointStateWrapper<'_> {
396 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
397 statement.bind(&self.0.to_int(), start_index)
398 }
399}
400
401impl Column for BreakpointStateWrapper<'_> {
402 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
403 let state = statement.column_int(start_index)?;
404
405 match state {
406 0 => Ok((BreakpointState::Enabled.into(), start_index + 1)),
407 1 => Ok((BreakpointState::Disabled.into(), start_index + 1)),
408 _ => anyhow::bail!("Invalid BreakpointState discriminant {state}"),
409 }
410 }
411}
412
413impl sqlez::bindable::StaticColumnCount for Breakpoint {
414 fn column_count() -> usize {
415 // Position, log message, condition message, and hit condition message
416 4 + BreakpointStateWrapper::column_count()
417 }
418}
419
420impl sqlez::bindable::Bind for Breakpoint {
421 fn bind(
422 &self,
423 statement: &sqlez::statement::Statement,
424 start_index: i32,
425 ) -> anyhow::Result<i32> {
426 let next_index = statement.bind(&self.position, start_index)?;
427 let next_index = statement.bind(&self.message, next_index)?;
428 let next_index = statement.bind(&self.condition, next_index)?;
429 let next_index = statement.bind(&self.hit_condition, next_index)?;
430 statement.bind(
431 &BreakpointStateWrapper(Cow::Borrowed(&self.state)),
432 next_index,
433 )
434 }
435}
436
437impl Column for Breakpoint {
438 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
439 let position = statement
440 .column_int(start_index)
441 .with_context(|| format!("Failed to read BreakPoint at index {start_index}"))?
442 as u32;
443 let (message, next_index) = Option::<String>::column(statement, start_index + 1)?;
444 let (condition, next_index) = Option::<String>::column(statement, next_index)?;
445 let (hit_condition, next_index) = Option::<String>::column(statement, next_index)?;
446 let (state, next_index) = BreakpointStateWrapper::column(statement, next_index)?;
447
448 Ok((
449 Breakpoint {
450 position,
451 message: message.map(Arc::from),
452 condition: condition.map(Arc::from),
453 hit_condition: hit_condition.map(Arc::from),
454 state: state.0.into_owned(),
455 },
456 next_index,
457 ))
458 }
459}
460
461#[derive(Clone, Debug, PartialEq)]
462struct SerializedPixels(gpui::Pixels);
463impl sqlez::bindable::StaticColumnCount for SerializedPixels {}
464
465impl sqlez::bindable::Bind for SerializedPixels {
466 fn bind(
467 &self,
468 statement: &sqlez::statement::Statement,
469 start_index: i32,
470 ) -> anyhow::Result<i32> {
471 let this: i32 = u32::from(self.0) as _;
472 this.bind(statement, start_index)
473 }
474}
475
476pub struct WorkspaceDb(ThreadSafeConnection);
477
478impl Domain for WorkspaceDb {
479 const NAME: &str = stringify!(WorkspaceDb);
480
481 const MIGRATIONS: &[&str] = &[
482 sql!(
483 CREATE TABLE workspaces(
484 workspace_id INTEGER PRIMARY KEY,
485 workspace_location BLOB UNIQUE,
486 dock_visible INTEGER, // Deprecated. Preserving so users can downgrade Zed.
487 dock_anchor TEXT, // Deprecated. Preserving so users can downgrade Zed.
488 dock_pane INTEGER, // Deprecated. Preserving so users can downgrade Zed.
489 left_sidebar_open INTEGER, // Boolean
490 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
491 FOREIGN KEY(dock_pane) REFERENCES panes(pane_id)
492 ) STRICT;
493
494 CREATE TABLE pane_groups(
495 group_id INTEGER PRIMARY KEY,
496 workspace_id INTEGER NOT NULL,
497 parent_group_id INTEGER, // NULL indicates that this is a root node
498 position INTEGER, // NULL indicates that this is a root node
499 axis TEXT NOT NULL, // Enum: 'Vertical' / 'Horizontal'
500 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
501 ON DELETE CASCADE
502 ON UPDATE CASCADE,
503 FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
504 ) STRICT;
505
506 CREATE TABLE panes(
507 pane_id INTEGER PRIMARY KEY,
508 workspace_id INTEGER NOT NULL,
509 active INTEGER NOT NULL, // Boolean
510 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
511 ON DELETE CASCADE
512 ON UPDATE CASCADE
513 ) STRICT;
514
515 CREATE TABLE center_panes(
516 pane_id INTEGER PRIMARY KEY,
517 parent_group_id INTEGER, // NULL means that this is a root pane
518 position INTEGER, // NULL means that this is a root pane
519 FOREIGN KEY(pane_id) REFERENCES panes(pane_id)
520 ON DELETE CASCADE,
521 FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
522 ) STRICT;
523
524 CREATE TABLE items(
525 item_id INTEGER NOT NULL, // This is the item's view id, so this is not unique
526 workspace_id INTEGER NOT NULL,
527 pane_id INTEGER NOT NULL,
528 kind TEXT NOT NULL,
529 position INTEGER NOT NULL,
530 active INTEGER NOT NULL,
531 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
532 ON DELETE CASCADE
533 ON UPDATE CASCADE,
534 FOREIGN KEY(pane_id) REFERENCES panes(pane_id)
535 ON DELETE CASCADE,
536 PRIMARY KEY(item_id, workspace_id)
537 ) STRICT;
538 ),
539 sql!(
540 ALTER TABLE workspaces ADD COLUMN window_state TEXT;
541 ALTER TABLE workspaces ADD COLUMN window_x REAL;
542 ALTER TABLE workspaces ADD COLUMN window_y REAL;
543 ALTER TABLE workspaces ADD COLUMN window_width REAL;
544 ALTER TABLE workspaces ADD COLUMN window_height REAL;
545 ALTER TABLE workspaces ADD COLUMN display BLOB;
546 ),
547 // Drop foreign key constraint from workspaces.dock_pane to panes table.
548 sql!(
549 CREATE TABLE workspaces_2(
550 workspace_id INTEGER PRIMARY KEY,
551 workspace_location BLOB UNIQUE,
552 dock_visible INTEGER, // Deprecated. Preserving so users can downgrade Zed.
553 dock_anchor TEXT, // Deprecated. Preserving so users can downgrade Zed.
554 dock_pane INTEGER, // Deprecated. Preserving so users can downgrade Zed.
555 left_sidebar_open INTEGER, // Boolean
556 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
557 window_state TEXT,
558 window_x REAL,
559 window_y REAL,
560 window_width REAL,
561 window_height REAL,
562 display BLOB
563 ) STRICT;
564 INSERT INTO workspaces_2 SELECT * FROM workspaces;
565 DROP TABLE workspaces;
566 ALTER TABLE workspaces_2 RENAME TO workspaces;
567 ),
568 // Add panels related information
569 sql!(
570 ALTER TABLE workspaces ADD COLUMN left_dock_visible INTEGER; //bool
571 ALTER TABLE workspaces ADD COLUMN left_dock_active_panel TEXT;
572 ALTER TABLE workspaces ADD COLUMN right_dock_visible INTEGER; //bool
573 ALTER TABLE workspaces ADD COLUMN right_dock_active_panel TEXT;
574 ALTER TABLE workspaces ADD COLUMN bottom_dock_visible INTEGER; //bool
575 ALTER TABLE workspaces ADD COLUMN bottom_dock_active_panel TEXT;
576 ),
577 // Add panel zoom persistence
578 sql!(
579 ALTER TABLE workspaces ADD COLUMN left_dock_zoom INTEGER; //bool
580 ALTER TABLE workspaces ADD COLUMN right_dock_zoom INTEGER; //bool
581 ALTER TABLE workspaces ADD COLUMN bottom_dock_zoom INTEGER; //bool
582 ),
583 // Add pane group flex data
584 sql!(
585 ALTER TABLE pane_groups ADD COLUMN flexes TEXT;
586 ),
587 // Add fullscreen field to workspace
588 // Deprecated, `WindowBounds` holds the fullscreen state now.
589 // Preserving so users can downgrade Zed.
590 sql!(
591 ALTER TABLE workspaces ADD COLUMN fullscreen INTEGER; //bool
592 ),
593 // Add preview field to items
594 sql!(
595 ALTER TABLE items ADD COLUMN preview INTEGER; //bool
596 ),
597 // Add centered_layout field to workspace
598 sql!(
599 ALTER TABLE workspaces ADD COLUMN centered_layout INTEGER; //bool
600 ),
601 sql!(
602 CREATE TABLE remote_projects (
603 remote_project_id INTEGER NOT NULL UNIQUE,
604 path TEXT,
605 dev_server_name TEXT
606 );
607 ALTER TABLE workspaces ADD COLUMN remote_project_id INTEGER;
608 ALTER TABLE workspaces RENAME COLUMN workspace_location TO local_paths;
609 ),
610 sql!(
611 DROP TABLE remote_projects;
612 CREATE TABLE dev_server_projects (
613 id INTEGER NOT NULL UNIQUE,
614 path TEXT,
615 dev_server_name TEXT
616 );
617 ALTER TABLE workspaces DROP COLUMN remote_project_id;
618 ALTER TABLE workspaces ADD COLUMN dev_server_project_id INTEGER;
619 ),
620 sql!(
621 ALTER TABLE workspaces ADD COLUMN local_paths_order BLOB;
622 ),
623 sql!(
624 ALTER TABLE workspaces ADD COLUMN session_id TEXT DEFAULT NULL;
625 ),
626 sql!(
627 ALTER TABLE workspaces ADD COLUMN window_id INTEGER DEFAULT NULL;
628 ),
629 sql!(
630 ALTER TABLE panes ADD COLUMN pinned_count INTEGER DEFAULT 0;
631 ),
632 sql!(
633 CREATE TABLE ssh_projects (
634 id INTEGER PRIMARY KEY,
635 host TEXT NOT NULL,
636 port INTEGER,
637 path TEXT NOT NULL,
638 user TEXT
639 );
640 ALTER TABLE workspaces ADD COLUMN ssh_project_id INTEGER REFERENCES ssh_projects(id) ON DELETE CASCADE;
641 ),
642 sql!(
643 ALTER TABLE ssh_projects RENAME COLUMN path TO paths;
644 ),
645 sql!(
646 CREATE TABLE toolchains (
647 workspace_id INTEGER,
648 worktree_id INTEGER,
649 language_name TEXT NOT NULL,
650 name TEXT NOT NULL,
651 path TEXT NOT NULL,
652 PRIMARY KEY (workspace_id, worktree_id, language_name)
653 );
654 ),
655 sql!(
656 ALTER TABLE toolchains ADD COLUMN raw_json TEXT DEFAULT "{}";
657 ),
658 sql!(
659 CREATE TABLE breakpoints (
660 workspace_id INTEGER NOT NULL,
661 path TEXT NOT NULL,
662 breakpoint_location INTEGER NOT NULL,
663 kind INTEGER NOT NULL,
664 log_message TEXT,
665 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
666 ON DELETE CASCADE
667 ON UPDATE CASCADE
668 );
669 ),
670 sql!(
671 ALTER TABLE workspaces ADD COLUMN local_paths_array TEXT;
672 CREATE UNIQUE INDEX local_paths_array_uq ON workspaces(local_paths_array);
673 ALTER TABLE workspaces ADD COLUMN local_paths_order_array TEXT;
674 ),
675 sql!(
676 ALTER TABLE breakpoints ADD COLUMN state INTEGER DEFAULT(0) NOT NULL
677 ),
678 sql!(
679 ALTER TABLE breakpoints DROP COLUMN kind
680 ),
681 sql!(ALTER TABLE toolchains ADD COLUMN relative_worktree_path TEXT DEFAULT "" NOT NULL),
682 sql!(
683 ALTER TABLE breakpoints ADD COLUMN condition TEXT;
684 ALTER TABLE breakpoints ADD COLUMN hit_condition TEXT;
685 ),
686 sql!(CREATE TABLE toolchains2 (
687 workspace_id INTEGER,
688 worktree_id INTEGER,
689 language_name TEXT NOT NULL,
690 name TEXT NOT NULL,
691 path TEXT NOT NULL,
692 raw_json TEXT NOT NULL,
693 relative_worktree_path TEXT NOT NULL,
694 PRIMARY KEY (workspace_id, worktree_id, language_name, relative_worktree_path)) STRICT;
695 INSERT INTO toolchains2
696 SELECT * FROM toolchains;
697 DROP TABLE toolchains;
698 ALTER TABLE toolchains2 RENAME TO toolchains;
699 ),
700 sql!(
701 CREATE TABLE ssh_connections (
702 id INTEGER PRIMARY KEY,
703 host TEXT NOT NULL,
704 port INTEGER,
705 user TEXT
706 );
707
708 INSERT INTO ssh_connections (host, port, user)
709 SELECT DISTINCT host, port, user
710 FROM ssh_projects;
711
712 CREATE TABLE workspaces_2(
713 workspace_id INTEGER PRIMARY KEY,
714 paths TEXT,
715 paths_order TEXT,
716 ssh_connection_id INTEGER REFERENCES ssh_connections(id),
717 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
718 window_state TEXT,
719 window_x REAL,
720 window_y REAL,
721 window_width REAL,
722 window_height REAL,
723 display BLOB,
724 left_dock_visible INTEGER,
725 left_dock_active_panel TEXT,
726 right_dock_visible INTEGER,
727 right_dock_active_panel TEXT,
728 bottom_dock_visible INTEGER,
729 bottom_dock_active_panel TEXT,
730 left_dock_zoom INTEGER,
731 right_dock_zoom INTEGER,
732 bottom_dock_zoom INTEGER,
733 fullscreen INTEGER,
734 centered_layout INTEGER,
735 session_id TEXT,
736 window_id INTEGER
737 ) STRICT;
738
739 INSERT
740 INTO workspaces_2
741 SELECT
742 workspaces.workspace_id,
743 CASE
744 WHEN ssh_projects.id IS NOT NULL THEN ssh_projects.paths
745 ELSE
746 CASE
747 WHEN workspaces.local_paths_array IS NULL OR workspaces.local_paths_array = "" THEN
748 NULL
749 ELSE
750 replace(workspaces.local_paths_array, ',', CHAR(10))
751 END
752 END as paths,
753
754 CASE
755 WHEN ssh_projects.id IS NOT NULL THEN ""
756 ELSE workspaces.local_paths_order_array
757 END as paths_order,
758
759 CASE
760 WHEN ssh_projects.id IS NOT NULL THEN (
761 SELECT ssh_connections.id
762 FROM ssh_connections
763 WHERE
764 ssh_connections.host IS ssh_projects.host AND
765 ssh_connections.port IS ssh_projects.port AND
766 ssh_connections.user IS ssh_projects.user
767 )
768 ELSE NULL
769 END as ssh_connection_id,
770
771 workspaces.timestamp,
772 workspaces.window_state,
773 workspaces.window_x,
774 workspaces.window_y,
775 workspaces.window_width,
776 workspaces.window_height,
777 workspaces.display,
778 workspaces.left_dock_visible,
779 workspaces.left_dock_active_panel,
780 workspaces.right_dock_visible,
781 workspaces.right_dock_active_panel,
782 workspaces.bottom_dock_visible,
783 workspaces.bottom_dock_active_panel,
784 workspaces.left_dock_zoom,
785 workspaces.right_dock_zoom,
786 workspaces.bottom_dock_zoom,
787 workspaces.fullscreen,
788 workspaces.centered_layout,
789 workspaces.session_id,
790 workspaces.window_id
791 FROM
792 workspaces LEFT JOIN
793 ssh_projects ON
794 workspaces.ssh_project_id = ssh_projects.id;
795
796 DELETE FROM workspaces_2
797 WHERE workspace_id NOT IN (
798 SELECT MAX(workspace_id)
799 FROM workspaces_2
800 GROUP BY ssh_connection_id, paths
801 );
802
803 DROP TABLE ssh_projects;
804 DROP TABLE workspaces;
805 ALTER TABLE workspaces_2 RENAME TO workspaces;
806
807 CREATE UNIQUE INDEX ix_workspaces_location ON workspaces(ssh_connection_id, paths);
808 ),
809 // Fix any data from when workspaces.paths were briefly encoded as JSON arrays
810 sql!(
811 UPDATE workspaces
812 SET paths = CASE
813 WHEN substr(paths, 1, 2) = '[' || '"' AND substr(paths, -2, 2) = '"' || ']' THEN
814 replace(
815 substr(paths, 3, length(paths) - 4),
816 '"' || ',' || '"',
817 CHAR(10)
818 )
819 ELSE
820 replace(paths, ',', CHAR(10))
821 END
822 WHERE paths IS NOT NULL
823 ),
824 sql!(
825 CREATE TABLE remote_connections(
826 id INTEGER PRIMARY KEY,
827 kind TEXT NOT NULL,
828 host TEXT,
829 port INTEGER,
830 user TEXT,
831 distro TEXT
832 );
833
834 CREATE TABLE workspaces_2(
835 workspace_id INTEGER PRIMARY KEY,
836 paths TEXT,
837 paths_order TEXT,
838 remote_connection_id INTEGER REFERENCES remote_connections(id),
839 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL,
840 window_state TEXT,
841 window_x REAL,
842 window_y REAL,
843 window_width REAL,
844 window_height REAL,
845 display BLOB,
846 left_dock_visible INTEGER,
847 left_dock_active_panel TEXT,
848 right_dock_visible INTEGER,
849 right_dock_active_panel TEXT,
850 bottom_dock_visible INTEGER,
851 bottom_dock_active_panel TEXT,
852 left_dock_zoom INTEGER,
853 right_dock_zoom INTEGER,
854 bottom_dock_zoom INTEGER,
855 fullscreen INTEGER,
856 centered_layout INTEGER,
857 session_id TEXT,
858 window_id INTEGER
859 ) STRICT;
860
861 INSERT INTO remote_connections
862 SELECT
863 id,
864 "ssh" as kind,
865 host,
866 port,
867 user,
868 NULL as distro
869 FROM ssh_connections;
870
871 INSERT
872 INTO workspaces_2
873 SELECT
874 workspace_id,
875 paths,
876 paths_order,
877 ssh_connection_id as remote_connection_id,
878 timestamp,
879 window_state,
880 window_x,
881 window_y,
882 window_width,
883 window_height,
884 display,
885 left_dock_visible,
886 left_dock_active_panel,
887 right_dock_visible,
888 right_dock_active_panel,
889 bottom_dock_visible,
890 bottom_dock_active_panel,
891 left_dock_zoom,
892 right_dock_zoom,
893 bottom_dock_zoom,
894 fullscreen,
895 centered_layout,
896 session_id,
897 window_id
898 FROM
899 workspaces;
900
901 DROP TABLE workspaces;
902 ALTER TABLE workspaces_2 RENAME TO workspaces;
903
904 CREATE UNIQUE INDEX ix_workspaces_location ON workspaces(remote_connection_id, paths);
905 ),
906 sql!(CREATE TABLE user_toolchains (
907 remote_connection_id INTEGER,
908 workspace_id INTEGER NOT NULL,
909 worktree_id INTEGER NOT NULL,
910 relative_worktree_path TEXT NOT NULL,
911 language_name TEXT NOT NULL,
912 name TEXT NOT NULL,
913 path TEXT NOT NULL,
914 raw_json TEXT NOT NULL,
915
916 PRIMARY KEY (workspace_id, worktree_id, relative_worktree_path, language_name, name, path, raw_json)
917 ) STRICT;),
918 sql!(
919 DROP TABLE ssh_connections;
920 ),
921 sql!(
922 ALTER TABLE remote_connections ADD COLUMN name TEXT;
923 ALTER TABLE remote_connections ADD COLUMN container_id TEXT;
924 ),
925 sql!(
926 CREATE TABLE IF NOT EXISTS trusted_worktrees (
927 trust_id INTEGER PRIMARY KEY AUTOINCREMENT,
928 absolute_path TEXT,
929 user_name TEXT,
930 host_name TEXT
931 ) STRICT;
932 ),
933 sql!(CREATE TABLE toolchains2 (
934 workspace_id INTEGER,
935 worktree_root_path TEXT NOT NULL,
936 language_name TEXT NOT NULL,
937 name TEXT NOT NULL,
938 path TEXT NOT NULL,
939 raw_json TEXT NOT NULL,
940 relative_worktree_path TEXT NOT NULL,
941 PRIMARY KEY (workspace_id, worktree_root_path, language_name, relative_worktree_path)) STRICT;
942 INSERT OR REPLACE INTO toolchains2
943 // The `instr(paths, '\n') = 0` part allows us to find all
944 // workspaces that have a single worktree, as `\n` is used as a
945 // separator when serializing the workspace paths, so if no `\n` is
946 // found, we know we have a single worktree.
947 SELECT toolchains.workspace_id, paths, language_name, name, path, raw_json, relative_worktree_path FROM toolchains INNER JOIN workspaces ON toolchains.workspace_id = workspaces.workspace_id AND instr(paths, '\n') = 0;
948 DROP TABLE toolchains;
949 ALTER TABLE toolchains2 RENAME TO toolchains;
950 ),
951 sql!(CREATE TABLE user_toolchains2 (
952 remote_connection_id INTEGER,
953 workspace_id INTEGER NOT NULL,
954 worktree_root_path TEXT NOT NULL,
955 relative_worktree_path TEXT NOT NULL,
956 language_name TEXT NOT NULL,
957 name TEXT NOT NULL,
958 path TEXT NOT NULL,
959 raw_json TEXT NOT NULL,
960
961 PRIMARY KEY (workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json)) STRICT;
962 INSERT OR REPLACE INTO user_toolchains2
963 // The `instr(paths, '\n') = 0` part allows us to find all
964 // workspaces that have a single worktree, as `\n` is used as a
965 // separator when serializing the workspace paths, so if no `\n` is
966 // found, we know we have a single worktree.
967 SELECT user_toolchains.remote_connection_id, user_toolchains.workspace_id, paths, relative_worktree_path, language_name, name, path, raw_json FROM user_toolchains INNER JOIN workspaces ON user_toolchains.workspace_id = workspaces.workspace_id AND instr(paths, '\n') = 0;
968 DROP TABLE user_toolchains;
969 ALTER TABLE user_toolchains2 RENAME TO user_toolchains;
970 ),
971 sql!(
972 ALTER TABLE remote_connections ADD COLUMN use_podman BOOLEAN;
973 ),
974 sql!(
975 ALTER TABLE remote_connections ADD COLUMN remote_env TEXT;
976 ),
977 ];
978
979 // Allow recovering from bad migration that was initially shipped to nightly
980 // when introducing the ssh_connections table.
981 fn should_allow_migration_change(_index: usize, old: &str, new: &str) -> bool {
982 old.starts_with("CREATE TABLE ssh_connections")
983 && new.starts_with("CREATE TABLE ssh_connections")
984 }
985}
986
987db::static_connection!(WorkspaceDb, []);
988
989impl WorkspaceDb {
990 /// Returns a serialized workspace for the given worktree_roots. If the passed array
991 /// is empty, the most recent workspace is returned instead. If no workspace for the
992 /// passed roots is stored, returns none.
993 pub(crate) fn workspace_for_roots<P: AsRef<Path>>(
994 &self,
995 worktree_roots: &[P],
996 ) -> Option<SerializedWorkspace> {
997 self.workspace_for_roots_internal(worktree_roots, None)
998 }
999
1000 pub(crate) fn remote_workspace_for_roots<P: AsRef<Path>>(
1001 &self,
1002 worktree_roots: &[P],
1003 remote_project_id: RemoteConnectionId,
1004 ) -> Option<SerializedWorkspace> {
1005 self.workspace_for_roots_internal(worktree_roots, Some(remote_project_id))
1006 }
1007
1008 pub(crate) fn workspace_for_roots_internal<P: AsRef<Path>>(
1009 &self,
1010 worktree_roots: &[P],
1011 remote_connection_id: Option<RemoteConnectionId>,
1012 ) -> Option<SerializedWorkspace> {
1013 // paths are sorted before db interactions to ensure that the order of the paths
1014 // doesn't affect the workspace selection for existing workspaces
1015 let root_paths = PathList::new(worktree_roots);
1016
1017 // Empty workspaces cannot be matched by paths (all empty workspaces have paths = "").
1018 // They should only be restored via workspace_for_id during session restoration.
1019 if root_paths.is_empty() && remote_connection_id.is_none() {
1020 return None;
1021 }
1022
1023 // Note that we re-assign the workspace_id here in case it's empty
1024 // and we've grabbed the most recent workspace
1025 let (
1026 workspace_id,
1027 paths,
1028 paths_order,
1029 window_bounds,
1030 display,
1031 centered_layout,
1032 docks,
1033 window_id,
1034 ): (
1035 WorkspaceId,
1036 String,
1037 String,
1038 Option<SerializedWindowBounds>,
1039 Option<Uuid>,
1040 Option<bool>,
1041 DockStructure,
1042 Option<u64>,
1043 ) = self
1044 .select_row_bound(sql! {
1045 SELECT
1046 workspace_id,
1047 paths,
1048 paths_order,
1049 window_state,
1050 window_x,
1051 window_y,
1052 window_width,
1053 window_height,
1054 display,
1055 centered_layout,
1056 left_dock_visible,
1057 left_dock_active_panel,
1058 left_dock_zoom,
1059 right_dock_visible,
1060 right_dock_active_panel,
1061 right_dock_zoom,
1062 bottom_dock_visible,
1063 bottom_dock_active_panel,
1064 bottom_dock_zoom,
1065 window_id
1066 FROM workspaces
1067 WHERE
1068 paths IS ? AND
1069 remote_connection_id IS ?
1070 LIMIT 1
1071 })
1072 .and_then(|mut prepared_statement| {
1073 (prepared_statement)((
1074 root_paths.serialize().paths,
1075 remote_connection_id.map(|id| id.0 as i32),
1076 ))
1077 })
1078 .context("No workspaces found")
1079 .warn_on_err()
1080 .flatten()?;
1081
1082 let paths = PathList::deserialize(&SerializedPathList {
1083 paths,
1084 order: paths_order,
1085 });
1086
1087 let remote_connection_options = if let Some(remote_connection_id) = remote_connection_id {
1088 self.remote_connection(remote_connection_id)
1089 .context("Get remote connection")
1090 .log_err()
1091 } else {
1092 None
1093 };
1094
1095 Some(SerializedWorkspace {
1096 id: workspace_id,
1097 location: match remote_connection_options {
1098 Some(options) => SerializedWorkspaceLocation::Remote(options),
1099 None => SerializedWorkspaceLocation::Local,
1100 },
1101 paths,
1102 center_group: self
1103 .get_center_pane_group(workspace_id)
1104 .context("Getting center group")
1105 .log_err()?,
1106 window_bounds,
1107 centered_layout: centered_layout.unwrap_or(false),
1108 display,
1109 docks,
1110 session_id: None,
1111 breakpoints: self.breakpoints(workspace_id),
1112 window_id,
1113 user_toolchains: self.user_toolchains(workspace_id, remote_connection_id),
1114 })
1115 }
1116
1117 /// Returns the workspace with the given ID, loading all associated data.
1118 pub(crate) fn workspace_for_id(
1119 &self,
1120 workspace_id: WorkspaceId,
1121 ) -> Option<SerializedWorkspace> {
1122 let (
1123 paths,
1124 paths_order,
1125 window_bounds,
1126 display,
1127 centered_layout,
1128 docks,
1129 window_id,
1130 remote_connection_id,
1131 ): (
1132 String,
1133 String,
1134 Option<SerializedWindowBounds>,
1135 Option<Uuid>,
1136 Option<bool>,
1137 DockStructure,
1138 Option<u64>,
1139 Option<i32>,
1140 ) = self
1141 .select_row_bound(sql! {
1142 SELECT
1143 paths,
1144 paths_order,
1145 window_state,
1146 window_x,
1147 window_y,
1148 window_width,
1149 window_height,
1150 display,
1151 centered_layout,
1152 left_dock_visible,
1153 left_dock_active_panel,
1154 left_dock_zoom,
1155 right_dock_visible,
1156 right_dock_active_panel,
1157 right_dock_zoom,
1158 bottom_dock_visible,
1159 bottom_dock_active_panel,
1160 bottom_dock_zoom,
1161 window_id,
1162 remote_connection_id
1163 FROM workspaces
1164 WHERE workspace_id = ?
1165 })
1166 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id))
1167 .context("No workspace found for id")
1168 .warn_on_err()
1169 .flatten()?;
1170
1171 let paths = PathList::deserialize(&SerializedPathList {
1172 paths,
1173 order: paths_order,
1174 });
1175
1176 let remote_connection_id = remote_connection_id.map(|id| RemoteConnectionId(id as u64));
1177 let remote_connection_options = if let Some(remote_connection_id) = remote_connection_id {
1178 self.remote_connection(remote_connection_id)
1179 .context("Get remote connection")
1180 .log_err()
1181 } else {
1182 None
1183 };
1184
1185 Some(SerializedWorkspace {
1186 id: workspace_id,
1187 location: match remote_connection_options {
1188 Some(options) => SerializedWorkspaceLocation::Remote(options),
1189 None => SerializedWorkspaceLocation::Local,
1190 },
1191 paths,
1192 center_group: self
1193 .get_center_pane_group(workspace_id)
1194 .context("Getting center group")
1195 .log_err()?,
1196 window_bounds,
1197 centered_layout: centered_layout.unwrap_or(false),
1198 display,
1199 docks,
1200 session_id: None,
1201 breakpoints: self.breakpoints(workspace_id),
1202 window_id,
1203 user_toolchains: self.user_toolchains(workspace_id, remote_connection_id),
1204 })
1205 }
1206
1207 fn breakpoints(&self, workspace_id: WorkspaceId) -> BTreeMap<Arc<Path>, Vec<SourceBreakpoint>> {
1208 let breakpoints: Result<Vec<(PathBuf, Breakpoint)>> = self
1209 .select_bound(sql! {
1210 SELECT path, breakpoint_location, log_message, condition, hit_condition, state
1211 FROM breakpoints
1212 WHERE workspace_id = ?
1213 })
1214 .and_then(|mut prepared_statement| (prepared_statement)(workspace_id));
1215
1216 match breakpoints {
1217 Ok(bp) => {
1218 if bp.is_empty() {
1219 log::debug!("Breakpoints are empty after querying database for them");
1220 }
1221
1222 let mut map: BTreeMap<Arc<Path>, Vec<SourceBreakpoint>> = Default::default();
1223
1224 for (path, breakpoint) in bp {
1225 let path: Arc<Path> = path.into();
1226 map.entry(path.clone()).or_default().push(SourceBreakpoint {
1227 row: breakpoint.position,
1228 path,
1229 message: breakpoint.message,
1230 condition: breakpoint.condition,
1231 hit_condition: breakpoint.hit_condition,
1232 state: breakpoint.state,
1233 });
1234 }
1235
1236 for (path, bps) in map.iter() {
1237 log::info!(
1238 "Got {} breakpoints from database at path: {}",
1239 bps.len(),
1240 path.to_string_lossy()
1241 );
1242 }
1243
1244 map
1245 }
1246 Err(msg) => {
1247 log::error!("Breakpoints query failed with msg: {msg}");
1248 Default::default()
1249 }
1250 }
1251 }
1252
1253 fn user_toolchains(
1254 &self,
1255 workspace_id: WorkspaceId,
1256 remote_connection_id: Option<RemoteConnectionId>,
1257 ) -> BTreeMap<ToolchainScope, IndexSet<Toolchain>> {
1258 type RowKind = (WorkspaceId, String, String, String, String, String, String);
1259
1260 let toolchains: Vec<RowKind> = self
1261 .select_bound(sql! {
1262 SELECT workspace_id, worktree_root_path, relative_worktree_path,
1263 language_name, name, path, raw_json
1264 FROM user_toolchains WHERE remote_connection_id IS ?1 AND (
1265 workspace_id IN (0, ?2)
1266 )
1267 })
1268 .and_then(|mut statement| {
1269 (statement)((remote_connection_id.map(|id| id.0), workspace_id))
1270 })
1271 .unwrap_or_default();
1272 let mut ret = BTreeMap::<_, IndexSet<_>>::default();
1273
1274 for (
1275 _workspace_id,
1276 worktree_root_path,
1277 relative_worktree_path,
1278 language_name,
1279 name,
1280 path,
1281 raw_json,
1282 ) in toolchains
1283 {
1284 // INTEGER's that are primary keys (like workspace ids, remote connection ids and such) start at 1, so we're safe to
1285 let scope = if _workspace_id == WorkspaceId(0) {
1286 debug_assert_eq!(worktree_root_path, String::default());
1287 debug_assert_eq!(relative_worktree_path, String::default());
1288 ToolchainScope::Global
1289 } else {
1290 debug_assert_eq!(workspace_id, _workspace_id);
1291 debug_assert_eq!(
1292 worktree_root_path == String::default(),
1293 relative_worktree_path == String::default()
1294 );
1295
1296 let Some(relative_path) = RelPath::unix(&relative_worktree_path).log_err() else {
1297 continue;
1298 };
1299 if worktree_root_path != String::default()
1300 && relative_worktree_path != String::default()
1301 {
1302 ToolchainScope::Subproject(
1303 Arc::from(worktree_root_path.as_ref()),
1304 relative_path.into(),
1305 )
1306 } else {
1307 ToolchainScope::Project
1308 }
1309 };
1310 let Ok(as_json) = serde_json::from_str(&raw_json) else {
1311 continue;
1312 };
1313 let toolchain = Toolchain {
1314 name: SharedString::from(name),
1315 path: SharedString::from(path),
1316 language_name: LanguageName::from_proto(language_name),
1317 as_json,
1318 };
1319 ret.entry(scope).or_default().insert(toolchain);
1320 }
1321
1322 ret
1323 }
1324
1325 /// Saves a workspace using the worktree roots. Will garbage collect any workspaces
1326 /// that used this workspace previously
1327 pub(crate) async fn save_workspace(&self, workspace: SerializedWorkspace) {
1328 let paths = workspace.paths.serialize();
1329 log::debug!("Saving workspace at location: {:?}", workspace.location);
1330 self.write(move |conn| {
1331 conn.with_savepoint("update_worktrees", || {
1332 let remote_connection_id = match workspace.location.clone() {
1333 SerializedWorkspaceLocation::Local => None,
1334 SerializedWorkspaceLocation::Remote(connection_options) => {
1335 Some(Self::get_or_create_remote_connection_internal(
1336 conn,
1337 connection_options
1338 )?.0)
1339 }
1340 };
1341
1342 // Clear out panes and pane_groups
1343 conn.exec_bound(sql!(
1344 DELETE FROM pane_groups WHERE workspace_id = ?1;
1345 DELETE FROM panes WHERE workspace_id = ?1;))?(workspace.id)
1346 .context("Clearing old panes")?;
1347
1348 conn.exec_bound(
1349 sql!(
1350 DELETE FROM breakpoints WHERE workspace_id = ?1;
1351 )
1352 )?(workspace.id).context("Clearing old breakpoints")?;
1353
1354 for (path, breakpoints) in workspace.breakpoints {
1355 for bp in breakpoints {
1356 let state = BreakpointStateWrapper::from(bp.state);
1357 match conn.exec_bound(sql!(
1358 INSERT INTO breakpoints (workspace_id, path, breakpoint_location, log_message, condition, hit_condition, state)
1359 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7);))?
1360
1361 ((
1362 workspace.id,
1363 path.as_ref(),
1364 bp.row,
1365 bp.message,
1366 bp.condition,
1367 bp.hit_condition,
1368 state,
1369 )) {
1370 Ok(_) => {
1371 log::debug!("Stored breakpoint at row: {} in path: {}", bp.row, path.to_string_lossy())
1372 }
1373 Err(err) => {
1374 log::error!("{err}");
1375 continue;
1376 }
1377 }
1378 }
1379 }
1380
1381 conn.exec_bound(
1382 sql!(
1383 DELETE FROM user_toolchains WHERE workspace_id = ?1;
1384 )
1385 )?(workspace.id).context("Clearing old user toolchains")?;
1386
1387 for (scope, toolchains) in workspace.user_toolchains {
1388 for toolchain in toolchains {
1389 let query = sql!(INSERT OR REPLACE INTO user_toolchains(remote_connection_id, workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8));
1390 let (workspace_id, worktree_root_path, relative_worktree_path) = match scope {
1391 ToolchainScope::Subproject(ref worktree_root_path, ref path) => (Some(workspace.id), Some(worktree_root_path.to_string_lossy().into_owned()), Some(path.as_unix_str().to_owned())),
1392 ToolchainScope::Project => (Some(workspace.id), None, None),
1393 ToolchainScope::Global => (None, None, None),
1394 };
1395 let args = (remote_connection_id, workspace_id.unwrap_or(WorkspaceId(0)), worktree_root_path.unwrap_or_default(), relative_worktree_path.unwrap_or_default(),
1396 toolchain.language_name.as_ref().to_owned(), toolchain.name.to_string(), toolchain.path.to_string(), toolchain.as_json.to_string());
1397 if let Err(err) = conn.exec_bound(query)?(args) {
1398 log::error!("{err}");
1399 continue;
1400 }
1401 }
1402 }
1403
1404 // Clear out old workspaces with the same paths.
1405 // Skip this for empty workspaces - they are identified by workspace_id, not paths.
1406 // Multiple empty workspaces with different content should coexist.
1407 if !paths.paths.is_empty() {
1408 conn.exec_bound(sql!(
1409 DELETE
1410 FROM workspaces
1411 WHERE
1412 workspace_id != ?1 AND
1413 paths IS ?2 AND
1414 remote_connection_id IS ?3
1415 ))?((
1416 workspace.id,
1417 paths.paths.clone(),
1418 remote_connection_id,
1419 ))
1420 .context("clearing out old locations")?;
1421 }
1422
1423 // Upsert
1424 let query = sql!(
1425 INSERT INTO workspaces(
1426 workspace_id,
1427 paths,
1428 paths_order,
1429 remote_connection_id,
1430 left_dock_visible,
1431 left_dock_active_panel,
1432 left_dock_zoom,
1433 right_dock_visible,
1434 right_dock_active_panel,
1435 right_dock_zoom,
1436 bottom_dock_visible,
1437 bottom_dock_active_panel,
1438 bottom_dock_zoom,
1439 session_id,
1440 window_id,
1441 timestamp
1442 )
1443 VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, CURRENT_TIMESTAMP)
1444 ON CONFLICT DO
1445 UPDATE SET
1446 paths = ?2,
1447 paths_order = ?3,
1448 remote_connection_id = ?4,
1449 left_dock_visible = ?5,
1450 left_dock_active_panel = ?6,
1451 left_dock_zoom = ?7,
1452 right_dock_visible = ?8,
1453 right_dock_active_panel = ?9,
1454 right_dock_zoom = ?10,
1455 bottom_dock_visible = ?11,
1456 bottom_dock_active_panel = ?12,
1457 bottom_dock_zoom = ?13,
1458 session_id = ?14,
1459 window_id = ?15,
1460 timestamp = CURRENT_TIMESTAMP
1461 );
1462 let mut prepared_query = conn.exec_bound(query)?;
1463 let args = (
1464 workspace.id,
1465 paths.paths.clone(),
1466 paths.order.clone(),
1467 remote_connection_id,
1468 workspace.docks,
1469 workspace.session_id,
1470 workspace.window_id,
1471 );
1472
1473 prepared_query(args).context("Updating workspace")?;
1474
1475 // Save center pane group
1476 Self::save_pane_group(conn, workspace.id, &workspace.center_group, None)
1477 .context("save pane group in save workspace")?;
1478
1479 Ok(())
1480 })
1481 .log_err();
1482 })
1483 .await;
1484 }
1485
1486 pub(crate) async fn get_or_create_remote_connection(
1487 &self,
1488 options: RemoteConnectionOptions,
1489 ) -> Result<RemoteConnectionId> {
1490 self.write(move |conn| Self::get_or_create_remote_connection_internal(conn, options))
1491 .await
1492 }
1493
1494 fn get_or_create_remote_connection_internal(
1495 this: &Connection,
1496 options: RemoteConnectionOptions,
1497 ) -> Result<RemoteConnectionId> {
1498 let kind;
1499 let user: Option<String>;
1500 let mut host = None;
1501 let mut port = None;
1502 let mut distro = None;
1503 let mut name = None;
1504 let mut container_id = None;
1505 let mut use_podman = None;
1506 let mut remote_env = None;
1507 match options {
1508 RemoteConnectionOptions::Ssh(options) => {
1509 kind = RemoteConnectionKind::Ssh;
1510 host = Some(options.host.to_string());
1511 port = options.port;
1512 user = options.username;
1513 }
1514 RemoteConnectionOptions::Wsl(options) => {
1515 kind = RemoteConnectionKind::Wsl;
1516 distro = Some(options.distro_name);
1517 user = options.user;
1518 }
1519 RemoteConnectionOptions::Docker(options) => {
1520 kind = RemoteConnectionKind::Docker;
1521 container_id = Some(options.container_id);
1522 name = Some(options.name);
1523 use_podman = Some(options.use_podman);
1524 user = Some(options.remote_user);
1525 remote_env = serde_json::to_string(&options.remote_env).ok();
1526 }
1527 #[cfg(any(test, feature = "test-support"))]
1528 RemoteConnectionOptions::Mock(options) => {
1529 kind = RemoteConnectionKind::Ssh;
1530 host = Some(format!("mock-{}", options.id));
1531 user = Some(format!("mock-user-{}", options.id));
1532 }
1533 }
1534 Self::get_or_create_remote_connection_query(
1535 this,
1536 kind,
1537 host,
1538 port,
1539 user,
1540 distro,
1541 name,
1542 container_id,
1543 use_podman,
1544 remote_env,
1545 )
1546 }
1547
1548 fn get_or_create_remote_connection_query(
1549 this: &Connection,
1550 kind: RemoteConnectionKind,
1551 host: Option<String>,
1552 port: Option<u16>,
1553 user: Option<String>,
1554 distro: Option<String>,
1555 name: Option<String>,
1556 container_id: Option<String>,
1557 use_podman: Option<bool>,
1558 remote_env: Option<String>,
1559 ) -> Result<RemoteConnectionId> {
1560 if let Some(id) = this.select_row_bound(sql!(
1561 SELECT id
1562 FROM remote_connections
1563 WHERE
1564 kind IS ? AND
1565 host IS ? AND
1566 port IS ? AND
1567 user IS ? AND
1568 distro IS ? AND
1569 name IS ? AND
1570 container_id IS ?
1571 LIMIT 1
1572 ))?((
1573 kind.serialize(),
1574 host.clone(),
1575 port,
1576 user.clone(),
1577 distro.clone(),
1578 name.clone(),
1579 container_id.clone(),
1580 ))? {
1581 Ok(RemoteConnectionId(id))
1582 } else {
1583 let id = this.select_row_bound(sql!(
1584 INSERT INTO remote_connections (
1585 kind,
1586 host,
1587 port,
1588 user,
1589 distro,
1590 name,
1591 container_id,
1592 use_podman,
1593 remote_env
1594 ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)
1595 RETURNING id
1596 ))?((
1597 kind.serialize(),
1598 host,
1599 port,
1600 user,
1601 distro,
1602 name,
1603 container_id,
1604 use_podman,
1605 remote_env,
1606 ))?
1607 .context("failed to insert remote project")?;
1608 Ok(RemoteConnectionId(id))
1609 }
1610 }
1611
1612 query! {
1613 pub async fn next_id() -> Result<WorkspaceId> {
1614 INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id
1615 }
1616 }
1617
1618 fn recent_workspaces(
1619 &self,
1620 ) -> Result<
1621 Vec<(
1622 WorkspaceId,
1623 PathList,
1624 Option<RemoteConnectionId>,
1625 DateTime<Utc>,
1626 )>,
1627 > {
1628 Ok(self
1629 .recent_workspaces_query()?
1630 .into_iter()
1631 .map(|(id, paths, order, remote_connection_id, timestamp)| {
1632 (
1633 id,
1634 PathList::deserialize(&SerializedPathList { paths, order }),
1635 remote_connection_id.map(RemoteConnectionId),
1636 parse_timestamp(×tamp),
1637 )
1638 })
1639 .collect())
1640 }
1641
1642 query! {
1643 fn recent_workspaces_query() -> Result<Vec<(WorkspaceId, String, String, Option<u64>, String)>> {
1644 SELECT workspace_id, paths, paths_order, remote_connection_id, timestamp
1645 FROM workspaces
1646 WHERE
1647 paths IS NOT NULL OR
1648 remote_connection_id IS NOT NULL
1649 ORDER BY timestamp DESC
1650 }
1651 }
1652
1653 fn session_workspaces(
1654 &self,
1655 session_id: String,
1656 ) -> Result<
1657 Vec<(
1658 WorkspaceId,
1659 PathList,
1660 Option<u64>,
1661 Option<RemoteConnectionId>,
1662 )>,
1663 > {
1664 Ok(self
1665 .session_workspaces_query(session_id)?
1666 .into_iter()
1667 .map(
1668 |(workspace_id, paths, order, window_id, remote_connection_id)| {
1669 (
1670 WorkspaceId(workspace_id),
1671 PathList::deserialize(&SerializedPathList { paths, order }),
1672 window_id,
1673 remote_connection_id.map(RemoteConnectionId),
1674 )
1675 },
1676 )
1677 .collect())
1678 }
1679
1680 query! {
1681 fn session_workspaces_query(session_id: String) -> Result<Vec<(i64, String, String, Option<u64>, Option<u64>)>> {
1682 SELECT workspace_id, paths, paths_order, window_id, remote_connection_id
1683 FROM workspaces
1684 WHERE session_id = ?1
1685 ORDER BY timestamp DESC
1686 }
1687 }
1688
1689 query! {
1690 pub fn breakpoints_for_file(workspace_id: WorkspaceId, file_path: &Path) -> Result<Vec<Breakpoint>> {
1691 SELECT breakpoint_location
1692 FROM breakpoints
1693 WHERE workspace_id= ?1 AND path = ?2
1694 }
1695 }
1696
1697 query! {
1698 pub fn clear_breakpoints(file_path: &Path) -> Result<()> {
1699 DELETE FROM breakpoints
1700 WHERE file_path = ?2
1701 }
1702 }
1703
1704 fn remote_connections(&self) -> Result<HashMap<RemoteConnectionId, RemoteConnectionOptions>> {
1705 Ok(self.select(sql!(
1706 SELECT
1707 id, kind, host, port, user, distro, container_id, name, use_podman, remote_env
1708 FROM
1709 remote_connections
1710 ))?()?
1711 .into_iter()
1712 .filter_map(
1713 |(id, kind, host, port, user, distro, container_id, name, use_podman, remote_env)| {
1714 Some((
1715 RemoteConnectionId(id),
1716 Self::remote_connection_from_row(
1717 kind,
1718 host,
1719 port,
1720 user,
1721 distro,
1722 container_id,
1723 name,
1724 use_podman,
1725 remote_env,
1726 )?,
1727 ))
1728 },
1729 )
1730 .collect())
1731 }
1732
1733 pub(crate) fn remote_connection(
1734 &self,
1735 id: RemoteConnectionId,
1736 ) -> Result<RemoteConnectionOptions> {
1737 let (kind, host, port, user, distro, container_id, name, use_podman, remote_env) =
1738 self.select_row_bound(sql!(
1739 SELECT kind, host, port, user, distro, container_id, name, use_podman, remote_env
1740 FROM remote_connections
1741 WHERE id = ?
1742 ))?(id.0)?
1743 .context("no such remote connection")?;
1744 Self::remote_connection_from_row(
1745 kind,
1746 host,
1747 port,
1748 user,
1749 distro,
1750 container_id,
1751 name,
1752 use_podman,
1753 remote_env,
1754 )
1755 .context("invalid remote_connection row")
1756 }
1757
1758 fn remote_connection_from_row(
1759 kind: String,
1760 host: Option<String>,
1761 port: Option<u16>,
1762 user: Option<String>,
1763 distro: Option<String>,
1764 container_id: Option<String>,
1765 name: Option<String>,
1766 use_podman: Option<bool>,
1767 remote_env: Option<String>,
1768 ) -> Option<RemoteConnectionOptions> {
1769 match RemoteConnectionKind::deserialize(&kind)? {
1770 RemoteConnectionKind::Wsl => Some(RemoteConnectionOptions::Wsl(WslConnectionOptions {
1771 distro_name: distro?,
1772 user: user,
1773 })),
1774 RemoteConnectionKind::Ssh => Some(RemoteConnectionOptions::Ssh(SshConnectionOptions {
1775 host: host?.into(),
1776 port,
1777 username: user,
1778 ..Default::default()
1779 })),
1780 RemoteConnectionKind::Docker => {
1781 let remote_env: BTreeMap<String, String> =
1782 serde_json::from_str(&remote_env?).ok()?;
1783 Some(RemoteConnectionOptions::Docker(DockerConnectionOptions {
1784 container_id: container_id?,
1785 name: name?,
1786 remote_user: user?,
1787 upload_binary_over_docker_exec: false,
1788 use_podman: use_podman?,
1789 remote_env,
1790 }))
1791 }
1792 }
1793 }
1794
1795 query! {
1796 pub async fn delete_workspace_by_id(id: WorkspaceId) -> Result<()> {
1797 DELETE FROM workspaces
1798 WHERE workspace_id IS ?
1799 }
1800 }
1801
1802 async fn all_paths_exist_with_a_directory(
1803 paths: &[PathBuf],
1804 fs: &dyn Fs,
1805 timestamp: Option<DateTime<Utc>>,
1806 ) -> bool {
1807 let mut any_dir = false;
1808 for path in paths {
1809 match fs.metadata(path).await.ok().flatten() {
1810 None => {
1811 return timestamp.is_some_and(|t| Utc::now() - t < chrono::Duration::days(7));
1812 }
1813 Some(meta) => {
1814 if meta.is_dir {
1815 any_dir = true;
1816 }
1817 }
1818 }
1819 }
1820 any_dir
1821 }
1822
1823 // Returns the recent locations which are still valid on disk and deletes ones which no longer
1824 // exist.
1825 pub async fn recent_workspaces_on_disk(
1826 &self,
1827 fs: &dyn Fs,
1828 ) -> Result<
1829 Vec<(
1830 WorkspaceId,
1831 SerializedWorkspaceLocation,
1832 PathList,
1833 DateTime<Utc>,
1834 )>,
1835 > {
1836 let mut result = Vec::new();
1837 let mut delete_tasks = Vec::new();
1838 let remote_connections = self.remote_connections()?;
1839
1840 for (id, paths, remote_connection_id, timestamp) in self.recent_workspaces()? {
1841 if let Some(remote_connection_id) = remote_connection_id {
1842 if let Some(connection_options) = remote_connections.get(&remote_connection_id) {
1843 result.push((
1844 id,
1845 SerializedWorkspaceLocation::Remote(connection_options.clone()),
1846 paths,
1847 timestamp,
1848 ));
1849 } else {
1850 delete_tasks.push(self.delete_workspace_by_id(id));
1851 }
1852 continue;
1853 }
1854
1855 let has_wsl_path = if cfg!(windows) {
1856 paths
1857 .paths()
1858 .iter()
1859 .any(|path| util::paths::WslPath::from_path(path).is_some())
1860 } else {
1861 false
1862 };
1863
1864 // Delete the workspace if any of the paths are WSL paths.
1865 // If a local workspace points to WSL, this check will cause us to wait for the
1866 // WSL VM and file server to boot up. This can block for many seconds.
1867 // Supported scenarios use remote workspaces.
1868 if !has_wsl_path
1869 && Self::all_paths_exist_with_a_directory(paths.paths(), fs, Some(timestamp)).await
1870 {
1871 result.push((id, SerializedWorkspaceLocation::Local, paths, timestamp));
1872 } else {
1873 delete_tasks.push(self.delete_workspace_by_id(id));
1874 }
1875 }
1876
1877 futures::future::join_all(delete_tasks).await;
1878 Ok(result)
1879 }
1880
1881 pub async fn last_workspace(
1882 &self,
1883 fs: &dyn Fs,
1884 ) -> Result<
1885 Option<(
1886 WorkspaceId,
1887 SerializedWorkspaceLocation,
1888 PathList,
1889 DateTime<Utc>,
1890 )>,
1891 > {
1892 Ok(self.recent_workspaces_on_disk(fs).await?.into_iter().next())
1893 }
1894
1895 // Returns the locations of the workspaces that were still opened when the last
1896 // session was closed (i.e. when Zed was quit).
1897 // If `last_session_window_order` is provided, the returned locations are ordered
1898 // according to that.
1899 pub async fn last_session_workspace_locations(
1900 &self,
1901 last_session_id: &str,
1902 last_session_window_stack: Option<Vec<WindowId>>,
1903 fs: &dyn Fs,
1904 ) -> Result<Vec<SessionWorkspace>> {
1905 let mut workspaces = Vec::new();
1906
1907 for (workspace_id, paths, window_id, remote_connection_id) in
1908 self.session_workspaces(last_session_id.to_owned())?
1909 {
1910 let window_id = window_id.map(WindowId::from);
1911
1912 if let Some(remote_connection_id) = remote_connection_id {
1913 workspaces.push(SessionWorkspace {
1914 workspace_id,
1915 location: SerializedWorkspaceLocation::Remote(
1916 self.remote_connection(remote_connection_id)?,
1917 ),
1918 paths,
1919 window_id,
1920 });
1921 } else if paths.is_empty() {
1922 // Empty workspace with items (drafts, files) - include for restoration
1923 workspaces.push(SessionWorkspace {
1924 workspace_id,
1925 location: SerializedWorkspaceLocation::Local,
1926 paths,
1927 window_id,
1928 });
1929 } else {
1930 if Self::all_paths_exist_with_a_directory(paths.paths(), fs, None).await {
1931 workspaces.push(SessionWorkspace {
1932 workspace_id,
1933 location: SerializedWorkspaceLocation::Local,
1934 paths,
1935 window_id,
1936 });
1937 }
1938 }
1939 }
1940
1941 if let Some(stack) = last_session_window_stack {
1942 workspaces.sort_by_key(|workspace| {
1943 workspace
1944 .window_id
1945 .and_then(|id| stack.iter().position(|&order_id| order_id == id))
1946 .unwrap_or(usize::MAX)
1947 });
1948 }
1949
1950 Ok(workspaces)
1951 }
1952
1953 fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result<SerializedPaneGroup> {
1954 Ok(self
1955 .get_pane_group(workspace_id, None)?
1956 .into_iter()
1957 .next()
1958 .unwrap_or_else(|| {
1959 SerializedPaneGroup::Pane(SerializedPane {
1960 active: true,
1961 children: vec![],
1962 pinned_count: 0,
1963 })
1964 }))
1965 }
1966
1967 fn get_pane_group(
1968 &self,
1969 workspace_id: WorkspaceId,
1970 group_id: Option<GroupId>,
1971 ) -> Result<Vec<SerializedPaneGroup>> {
1972 type GroupKey = (Option<GroupId>, WorkspaceId);
1973 type GroupOrPane = (
1974 Option<GroupId>,
1975 Option<SerializedAxis>,
1976 Option<PaneId>,
1977 Option<bool>,
1978 Option<usize>,
1979 Option<String>,
1980 );
1981 self.select_bound::<GroupKey, GroupOrPane>(sql!(
1982 SELECT group_id, axis, pane_id, active, pinned_count, flexes
1983 FROM (SELECT
1984 group_id,
1985 axis,
1986 NULL as pane_id,
1987 NULL as active,
1988 NULL as pinned_count,
1989 position,
1990 parent_group_id,
1991 workspace_id,
1992 flexes
1993 FROM pane_groups
1994 UNION
1995 SELECT
1996 NULL,
1997 NULL,
1998 center_panes.pane_id,
1999 panes.active as active,
2000 pinned_count,
2001 position,
2002 parent_group_id,
2003 panes.workspace_id as workspace_id,
2004 NULL
2005 FROM center_panes
2006 JOIN panes ON center_panes.pane_id = panes.pane_id)
2007 WHERE parent_group_id IS ? AND workspace_id = ?
2008 ORDER BY position
2009 ))?((group_id, workspace_id))?
2010 .into_iter()
2011 .map(|(group_id, axis, pane_id, active, pinned_count, flexes)| {
2012 let maybe_pane = maybe!({ Some((pane_id?, active?, pinned_count?)) });
2013 if let Some((group_id, axis)) = group_id.zip(axis) {
2014 let flexes = flexes
2015 .map(|flexes: String| serde_json::from_str::<Vec<f32>>(&flexes))
2016 .transpose()?;
2017
2018 Ok(SerializedPaneGroup::Group {
2019 axis,
2020 children: self.get_pane_group(workspace_id, Some(group_id))?,
2021 flexes,
2022 })
2023 } else if let Some((pane_id, active, pinned_count)) = maybe_pane {
2024 Ok(SerializedPaneGroup::Pane(SerializedPane::new(
2025 self.get_items(pane_id)?,
2026 active,
2027 pinned_count,
2028 )))
2029 } else {
2030 bail!("Pane Group Child was neither a pane group or a pane");
2031 }
2032 })
2033 // Filter out panes and pane groups which don't have any children or items
2034 .filter(|pane_group| match pane_group {
2035 Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(),
2036 Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(),
2037 _ => true,
2038 })
2039 .collect::<Result<_>>()
2040 }
2041
2042 fn save_pane_group(
2043 conn: &Connection,
2044 workspace_id: WorkspaceId,
2045 pane_group: &SerializedPaneGroup,
2046 parent: Option<(GroupId, usize)>,
2047 ) -> Result<()> {
2048 if parent.is_none() {
2049 log::debug!("Saving a pane group for workspace {workspace_id:?}");
2050 }
2051 match pane_group {
2052 SerializedPaneGroup::Group {
2053 axis,
2054 children,
2055 flexes,
2056 } => {
2057 let (parent_id, position) = parent.unzip();
2058
2059 let flex_string = flexes
2060 .as_ref()
2061 .map(|flexes| serde_json::json!(flexes).to_string());
2062
2063 let group_id = conn.select_row_bound::<_, i64>(sql!(
2064 INSERT INTO pane_groups(
2065 workspace_id,
2066 parent_group_id,
2067 position,
2068 axis,
2069 flexes
2070 )
2071 VALUES (?, ?, ?, ?, ?)
2072 RETURNING group_id
2073 ))?((
2074 workspace_id,
2075 parent_id,
2076 position,
2077 *axis,
2078 flex_string,
2079 ))?
2080 .context("Couldn't retrieve group_id from inserted pane_group")?;
2081
2082 for (position, group) in children.iter().enumerate() {
2083 Self::save_pane_group(conn, workspace_id, group, Some((group_id, position)))?
2084 }
2085
2086 Ok(())
2087 }
2088 SerializedPaneGroup::Pane(pane) => {
2089 Self::save_pane(conn, workspace_id, pane, parent)?;
2090 Ok(())
2091 }
2092 }
2093 }
2094
2095 fn save_pane(
2096 conn: &Connection,
2097 workspace_id: WorkspaceId,
2098 pane: &SerializedPane,
2099 parent: Option<(GroupId, usize)>,
2100 ) -> Result<PaneId> {
2101 let pane_id = conn.select_row_bound::<_, i64>(sql!(
2102 INSERT INTO panes(workspace_id, active, pinned_count)
2103 VALUES (?, ?, ?)
2104 RETURNING pane_id
2105 ))?((workspace_id, pane.active, pane.pinned_count))?
2106 .context("Could not retrieve inserted pane_id")?;
2107
2108 let (parent_id, order) = parent.unzip();
2109 conn.exec_bound(sql!(
2110 INSERT INTO center_panes(pane_id, parent_group_id, position)
2111 VALUES (?, ?, ?)
2112 ))?((pane_id, parent_id, order))?;
2113
2114 Self::save_items(conn, workspace_id, pane_id, &pane.children).context("Saving items")?;
2115
2116 Ok(pane_id)
2117 }
2118
2119 fn get_items(&self, pane_id: PaneId) -> Result<Vec<SerializedItem>> {
2120 self.select_bound(sql!(
2121 SELECT kind, item_id, active, preview FROM items
2122 WHERE pane_id = ?
2123 ORDER BY position
2124 ))?(pane_id)
2125 }
2126
2127 fn save_items(
2128 conn: &Connection,
2129 workspace_id: WorkspaceId,
2130 pane_id: PaneId,
2131 items: &[SerializedItem],
2132 ) -> Result<()> {
2133 let mut insert = conn.exec_bound(sql!(
2134 INSERT INTO items(workspace_id, pane_id, position, kind, item_id, active, preview) VALUES (?, ?, ?, ?, ?, ?, ?)
2135 )).context("Preparing insertion")?;
2136 for (position, item) in items.iter().enumerate() {
2137 insert((workspace_id, pane_id, position, item))?;
2138 }
2139
2140 Ok(())
2141 }
2142
2143 query! {
2144 pub async fn update_timestamp(workspace_id: WorkspaceId) -> Result<()> {
2145 UPDATE workspaces
2146 SET timestamp = CURRENT_TIMESTAMP
2147 WHERE workspace_id = ?
2148 }
2149 }
2150
2151 query! {
2152 pub(crate) async fn set_window_open_status(workspace_id: WorkspaceId, bounds: SerializedWindowBounds, display: Uuid) -> Result<()> {
2153 UPDATE workspaces
2154 SET window_state = ?2,
2155 window_x = ?3,
2156 window_y = ?4,
2157 window_width = ?5,
2158 window_height = ?6,
2159 display = ?7
2160 WHERE workspace_id = ?1
2161 }
2162 }
2163
2164 query! {
2165 pub(crate) async fn set_centered_layout(workspace_id: WorkspaceId, centered_layout: bool) -> Result<()> {
2166 UPDATE workspaces
2167 SET centered_layout = ?2
2168 WHERE workspace_id = ?1
2169 }
2170 }
2171
2172 query! {
2173 pub(crate) async fn set_session_id(workspace_id: WorkspaceId, session_id: Option<String>) -> Result<()> {
2174 UPDATE workspaces
2175 SET session_id = ?2
2176 WHERE workspace_id = ?1
2177 }
2178 }
2179
2180 query! {
2181 pub(crate) async fn set_session_binding(workspace_id: WorkspaceId, session_id: Option<String>, window_id: Option<u64>) -> Result<()> {
2182 UPDATE workspaces
2183 SET session_id = ?2, window_id = ?3
2184 WHERE workspace_id = ?1
2185 }
2186 }
2187
2188 pub(crate) async fn toolchains(
2189 &self,
2190 workspace_id: WorkspaceId,
2191 ) -> Result<Vec<(Toolchain, Arc<Path>, Arc<RelPath>)>> {
2192 self.write(move |this| {
2193 let mut select = this
2194 .select_bound(sql!(
2195 SELECT
2196 name, path, worktree_root_path, relative_worktree_path, language_name, raw_json
2197 FROM toolchains
2198 WHERE workspace_id = ?
2199 ))
2200 .context("select toolchains")?;
2201
2202 let toolchain: Vec<(String, String, String, String, String, String)> =
2203 select(workspace_id)?;
2204
2205 Ok(toolchain
2206 .into_iter()
2207 .filter_map(
2208 |(name, path, worktree_root_path, relative_worktree_path, language, json)| {
2209 Some((
2210 Toolchain {
2211 name: name.into(),
2212 path: path.into(),
2213 language_name: LanguageName::new(&language),
2214 as_json: serde_json::Value::from_str(&json).ok()?,
2215 },
2216 Arc::from(worktree_root_path.as_ref()),
2217 RelPath::from_proto(&relative_worktree_path).log_err()?,
2218 ))
2219 },
2220 )
2221 .collect())
2222 })
2223 .await
2224 }
2225
2226 pub async fn set_toolchain(
2227 &self,
2228 workspace_id: WorkspaceId,
2229 worktree_root_path: Arc<Path>,
2230 relative_worktree_path: Arc<RelPath>,
2231 toolchain: Toolchain,
2232 ) -> Result<()> {
2233 log::debug!(
2234 "Setting toolchain for workspace, worktree: {worktree_root_path:?}, relative path: {relative_worktree_path:?}, toolchain: {}",
2235 toolchain.name
2236 );
2237 self.write(move |conn| {
2238 let mut insert = conn
2239 .exec_bound(sql!(
2240 INSERT INTO toolchains(workspace_id, worktree_root_path, relative_worktree_path, language_name, name, path, raw_json) VALUES (?, ?, ?, ?, ?, ?, ?)
2241 ON CONFLICT DO
2242 UPDATE SET
2243 name = ?5,
2244 path = ?6,
2245 raw_json = ?7
2246 ))
2247 .context("Preparing insertion")?;
2248
2249 insert((
2250 workspace_id,
2251 worktree_root_path.to_string_lossy().into_owned(),
2252 relative_worktree_path.as_unix_str(),
2253 toolchain.language_name.as_ref(),
2254 toolchain.name.as_ref(),
2255 toolchain.path.as_ref(),
2256 toolchain.as_json.to_string(),
2257 ))?;
2258
2259 Ok(())
2260 }).await
2261 }
2262
2263 pub(crate) async fn save_trusted_worktrees(
2264 &self,
2265 trusted_worktrees: HashMap<Option<RemoteHostLocation>, HashSet<PathBuf>>,
2266 ) -> anyhow::Result<()> {
2267 use anyhow::Context as _;
2268 use db::sqlez::statement::Statement;
2269 use itertools::Itertools as _;
2270
2271 self.clear_trusted_worktrees()
2272 .await
2273 .context("clearing previous trust state")?;
2274
2275 let trusted_worktrees = trusted_worktrees
2276 .into_iter()
2277 .flat_map(|(host, abs_paths)| {
2278 abs_paths
2279 .into_iter()
2280 .map(move |abs_path| (Some(abs_path), host.clone()))
2281 })
2282 .collect::<Vec<_>>();
2283 let mut first_worktree;
2284 let mut last_worktree = 0_usize;
2285 for (count, placeholders) in std::iter::once("(?, ?, ?)")
2286 .cycle()
2287 .take(trusted_worktrees.len())
2288 .chunks(MAX_QUERY_PLACEHOLDERS / 3)
2289 .into_iter()
2290 .map(|chunk| {
2291 let mut count = 0;
2292 let placeholders = chunk
2293 .inspect(|_| {
2294 count += 1;
2295 })
2296 .join(", ");
2297 (count, placeholders)
2298 })
2299 .collect::<Vec<_>>()
2300 {
2301 first_worktree = last_worktree;
2302 last_worktree = last_worktree + count;
2303 let query = format!(
2304 r#"INSERT INTO trusted_worktrees(absolute_path, user_name, host_name)
2305VALUES {placeholders};"#
2306 );
2307
2308 let trusted_worktrees = trusted_worktrees[first_worktree..last_worktree].to_vec();
2309 self.write(move |conn| {
2310 let mut statement = Statement::prepare(conn, query)?;
2311 let mut next_index = 1;
2312 for (abs_path, host) in trusted_worktrees {
2313 let abs_path = abs_path.as_ref().map(|abs_path| abs_path.to_string_lossy());
2314 next_index = statement.bind(
2315 &abs_path.as_ref().map(|abs_path| abs_path.as_ref()),
2316 next_index,
2317 )?;
2318 next_index = statement.bind(
2319 &host
2320 .as_ref()
2321 .and_then(|host| Some(host.user_name.as_ref()?.as_str())),
2322 next_index,
2323 )?;
2324 next_index = statement.bind(
2325 &host.as_ref().map(|host| host.host_identifier.as_str()),
2326 next_index,
2327 )?;
2328 }
2329 statement.exec()
2330 })
2331 .await
2332 .context("inserting new trusted state")?;
2333 }
2334 Ok(())
2335 }
2336
2337 pub fn fetch_trusted_worktrees(&self) -> Result<DbTrustedPaths> {
2338 let trusted_worktrees = self.trusted_worktrees()?;
2339 Ok(trusted_worktrees
2340 .into_iter()
2341 .filter_map(|(abs_path, user_name, host_name)| {
2342 let db_host = match (user_name, host_name) {
2343 (None, Some(host_name)) => Some(RemoteHostLocation {
2344 user_name: None,
2345 host_identifier: SharedString::new(host_name),
2346 }),
2347 (Some(user_name), Some(host_name)) => Some(RemoteHostLocation {
2348 user_name: Some(SharedString::new(user_name)),
2349 host_identifier: SharedString::new(host_name),
2350 }),
2351 _ => None,
2352 };
2353 Some((db_host, abs_path?))
2354 })
2355 .fold(HashMap::default(), |mut acc, (remote_host, abs_path)| {
2356 acc.entry(remote_host)
2357 .or_insert_with(HashSet::default)
2358 .insert(abs_path);
2359 acc
2360 }))
2361 }
2362
2363 query! {
2364 fn trusted_worktrees() -> Result<Vec<(Option<PathBuf>, Option<String>, Option<String>)>> {
2365 SELECT absolute_path, user_name, host_name
2366 FROM trusted_worktrees
2367 }
2368 }
2369
2370 query! {
2371 pub async fn clear_trusted_worktrees() -> Result<()> {
2372 DELETE FROM trusted_worktrees
2373 }
2374 }
2375}
2376
2377type WorkspaceEntry = (
2378 WorkspaceId,
2379 SerializedWorkspaceLocation,
2380 PathList,
2381 DateTime<Utc>,
2382);
2383
2384/// Resolves workspace entries whose paths are git linked worktree checkouts
2385/// to their main repository paths.
2386///
2387/// For each workspace entry:
2388/// - If any path is a linked worktree checkout, all worktree paths in that
2389/// entry are resolved to their main repository paths, producing a new
2390/// `PathList`.
2391/// - The resolved entry is then deduplicated against existing entries: if a
2392/// workspace with the same paths already exists, the entry with the most
2393/// recent timestamp is kept.
2394pub async fn resolve_worktree_workspaces(
2395 workspaces: impl IntoIterator<Item = WorkspaceEntry>,
2396 fs: &dyn Fs,
2397) -> Vec<WorkspaceEntry> {
2398 // First pass: resolve worktree paths to main repo paths concurrently.
2399 let resolved = futures::future::join_all(workspaces.into_iter().map(|entry| async move {
2400 let paths = entry.2.paths();
2401 if paths.is_empty() {
2402 return entry;
2403 }
2404
2405 // Resolve each path concurrently
2406 let resolved_paths = futures::future::join_all(
2407 paths
2408 .iter()
2409 .map(|path| project::git_store::resolve_git_worktree_to_main_repo(fs, path)),
2410 )
2411 .await;
2412
2413 // If no paths were resolved, this entry is not a worktree — keep as-is
2414 if resolved_paths.iter().all(|r| r.is_none()) {
2415 return entry;
2416 }
2417
2418 // Build new path list, substituting resolved paths
2419 let new_paths: Vec<PathBuf> = paths
2420 .iter()
2421 .zip(resolved_paths.iter())
2422 .map(|(original, resolved)| {
2423 resolved
2424 .as_ref()
2425 .cloned()
2426 .unwrap_or_else(|| original.clone())
2427 })
2428 .collect();
2429
2430 let new_path_refs: Vec<&Path> = new_paths.iter().map(|p| p.as_path()).collect();
2431 (entry.0, entry.1, PathList::new(&new_path_refs), entry.3)
2432 }))
2433 .await;
2434
2435 // Second pass: deduplicate by PathList.
2436 // When two entries resolve to the same paths, keep the one with the
2437 // more recent timestamp.
2438 let mut seen: collections::HashMap<Vec<PathBuf>, usize> = collections::HashMap::default();
2439 let mut result: Vec<WorkspaceEntry> = Vec::new();
2440
2441 for entry in resolved {
2442 let key: Vec<PathBuf> = entry.2.paths().to_vec();
2443 if let Some(&existing_idx) = seen.get(&key) {
2444 // Keep the entry with the more recent timestamp
2445 if entry.3 > result[existing_idx].3 {
2446 result[existing_idx] = entry;
2447 }
2448 } else {
2449 seen.insert(key, result.len());
2450 result.push(entry);
2451 }
2452 }
2453
2454 result
2455}
2456
2457pub fn delete_unloaded_items(
2458 alive_items: Vec<ItemId>,
2459 workspace_id: WorkspaceId,
2460 table: &'static str,
2461 db: &ThreadSafeConnection,
2462 cx: &mut App,
2463) -> Task<Result<()>> {
2464 let db = db.clone();
2465 cx.spawn(async move |_| {
2466 let placeholders = alive_items
2467 .iter()
2468 .map(|_| "?")
2469 .collect::<Vec<&str>>()
2470 .join(", ");
2471
2472 let query = format!(
2473 "DELETE FROM {table} WHERE workspace_id = ? AND item_id NOT IN ({placeholders})"
2474 );
2475
2476 db.write(move |conn| {
2477 let mut statement = Statement::prepare(conn, query)?;
2478 let mut next_index = statement.bind(&workspace_id, 1)?;
2479 for id in alive_items {
2480 next_index = statement.bind(&id, next_index)?;
2481 }
2482 statement.exec()
2483 })
2484 .await
2485 })
2486}
2487
2488#[cfg(test)]
2489mod tests {
2490 use super::*;
2491 use crate::persistence::model::{
2492 SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace, SessionWorkspace,
2493 };
2494 use gpui;
2495 use pretty_assertions::assert_eq;
2496 use remote::SshConnectionOptions;
2497 use serde_json::json;
2498 use std::{thread, time::Duration};
2499
2500 /// Creates a unique directory in a FakeFs, returning the path.
2501 /// Uses a UUID suffix to avoid collisions with other tests sharing the global DB.
2502 async fn unique_test_dir(fs: &fs::FakeFs, prefix: &str) -> PathBuf {
2503 let dir = PathBuf::from(format!("/test-dirs/{}-{}", prefix, uuid::Uuid::new_v4()));
2504 fs.insert_tree(&dir, json!({})).await;
2505 dir
2506 }
2507
2508 #[gpui::test]
2509 async fn test_multi_workspace_serializes_on_add_and_remove(cx: &mut gpui::TestAppContext) {
2510 use crate::multi_workspace::MultiWorkspace;
2511 use crate::persistence::read_multi_workspace_state;
2512 use feature_flags::FeatureFlagAppExt;
2513 use gpui::AppContext as _;
2514 use project::Project;
2515
2516 crate::tests::init_test(cx);
2517
2518 cx.update(|cx| {
2519 cx.set_staff(true);
2520 cx.update_flags(true, vec!["agent-v2".to_string()]);
2521 });
2522
2523 let fs = fs::FakeFs::new(cx.executor());
2524 let project1 = Project::test(fs.clone(), [], cx).await;
2525 let project2 = Project::test(fs.clone(), [], cx).await;
2526
2527 let (multi_workspace, cx) =
2528 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
2529
2530 multi_workspace.update_in(cx, |mw, _, cx| {
2531 mw.set_random_database_id(cx);
2532 });
2533
2534 let window_id =
2535 multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
2536
2537 // --- Add a second workspace ---
2538 let workspace2 = multi_workspace.update_in(cx, |mw, window, cx| {
2539 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
2540 workspace.update(cx, |ws, _cx| ws.set_random_database_id());
2541 mw.activate(workspace.clone(), window, cx);
2542 workspace
2543 });
2544
2545 // Run background tasks so serialize has a chance to flush.
2546 cx.run_until_parked();
2547
2548 // Read back the persisted state and check that the active workspace ID was written.
2549 let state_after_add = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
2550 let active_workspace2_db_id = workspace2.read_with(cx, |ws, _| ws.database_id());
2551 assert_eq!(
2552 state_after_add.active_workspace_id, active_workspace2_db_id,
2553 "After adding a second workspace, the serialized active_workspace_id should match \
2554 the newly activated workspace's database id"
2555 );
2556
2557 // --- Remove the second workspace (index 1) ---
2558 multi_workspace.update_in(cx, |mw, window, cx| {
2559 let ws = mw.workspaces()[1].clone();
2560 mw.remove(&ws, window, cx);
2561 });
2562
2563 cx.run_until_parked();
2564
2565 let state_after_remove = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
2566 let remaining_db_id =
2567 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
2568 assert_eq!(
2569 state_after_remove.active_workspace_id, remaining_db_id,
2570 "After removing a workspace, the serialized active_workspace_id should match \
2571 the remaining active workspace's database id"
2572 );
2573 }
2574
2575 #[gpui::test]
2576 async fn test_breakpoints() {
2577 zlog::init_test();
2578
2579 let db = WorkspaceDb::open_test_db("test_breakpoints").await;
2580 let id = db.next_id().await.unwrap();
2581
2582 let path = Path::new("/tmp/test.rs");
2583
2584 let breakpoint = Breakpoint {
2585 position: 123,
2586 message: None,
2587 state: BreakpointState::Enabled,
2588 condition: None,
2589 hit_condition: None,
2590 };
2591
2592 let log_breakpoint = Breakpoint {
2593 position: 456,
2594 message: Some("Test log message".into()),
2595 state: BreakpointState::Enabled,
2596 condition: None,
2597 hit_condition: None,
2598 };
2599
2600 let disable_breakpoint = Breakpoint {
2601 position: 578,
2602 message: None,
2603 state: BreakpointState::Disabled,
2604 condition: None,
2605 hit_condition: None,
2606 };
2607
2608 let condition_breakpoint = Breakpoint {
2609 position: 789,
2610 message: None,
2611 state: BreakpointState::Enabled,
2612 condition: Some("x > 5".into()),
2613 hit_condition: None,
2614 };
2615
2616 let hit_condition_breakpoint = Breakpoint {
2617 position: 999,
2618 message: None,
2619 state: BreakpointState::Enabled,
2620 condition: None,
2621 hit_condition: Some(">= 3".into()),
2622 };
2623
2624 let workspace = SerializedWorkspace {
2625 id,
2626 paths: PathList::new(&["/tmp"]),
2627 location: SerializedWorkspaceLocation::Local,
2628 center_group: Default::default(),
2629 window_bounds: Default::default(),
2630 display: Default::default(),
2631 docks: Default::default(),
2632 centered_layout: false,
2633 breakpoints: {
2634 let mut map = collections::BTreeMap::default();
2635 map.insert(
2636 Arc::from(path),
2637 vec![
2638 SourceBreakpoint {
2639 row: breakpoint.position,
2640 path: Arc::from(path),
2641 message: breakpoint.message.clone(),
2642 state: breakpoint.state,
2643 condition: breakpoint.condition.clone(),
2644 hit_condition: breakpoint.hit_condition.clone(),
2645 },
2646 SourceBreakpoint {
2647 row: log_breakpoint.position,
2648 path: Arc::from(path),
2649 message: log_breakpoint.message.clone(),
2650 state: log_breakpoint.state,
2651 condition: log_breakpoint.condition.clone(),
2652 hit_condition: log_breakpoint.hit_condition.clone(),
2653 },
2654 SourceBreakpoint {
2655 row: disable_breakpoint.position,
2656 path: Arc::from(path),
2657 message: disable_breakpoint.message.clone(),
2658 state: disable_breakpoint.state,
2659 condition: disable_breakpoint.condition.clone(),
2660 hit_condition: disable_breakpoint.hit_condition.clone(),
2661 },
2662 SourceBreakpoint {
2663 row: condition_breakpoint.position,
2664 path: Arc::from(path),
2665 message: condition_breakpoint.message.clone(),
2666 state: condition_breakpoint.state,
2667 condition: condition_breakpoint.condition.clone(),
2668 hit_condition: condition_breakpoint.hit_condition.clone(),
2669 },
2670 SourceBreakpoint {
2671 row: hit_condition_breakpoint.position,
2672 path: Arc::from(path),
2673 message: hit_condition_breakpoint.message.clone(),
2674 state: hit_condition_breakpoint.state,
2675 condition: hit_condition_breakpoint.condition.clone(),
2676 hit_condition: hit_condition_breakpoint.hit_condition.clone(),
2677 },
2678 ],
2679 );
2680 map
2681 },
2682 session_id: None,
2683 window_id: None,
2684 user_toolchains: Default::default(),
2685 };
2686
2687 db.save_workspace(workspace.clone()).await;
2688
2689 let loaded = db.workspace_for_roots(&["/tmp"]).unwrap();
2690 let loaded_breakpoints = loaded.breakpoints.get(&Arc::from(path)).unwrap();
2691
2692 assert_eq!(loaded_breakpoints.len(), 5);
2693
2694 // normal breakpoint
2695 assert_eq!(loaded_breakpoints[0].row, breakpoint.position);
2696 assert_eq!(loaded_breakpoints[0].message, breakpoint.message);
2697 assert_eq!(loaded_breakpoints[0].condition, breakpoint.condition);
2698 assert_eq!(
2699 loaded_breakpoints[0].hit_condition,
2700 breakpoint.hit_condition
2701 );
2702 assert_eq!(loaded_breakpoints[0].state, breakpoint.state);
2703 assert_eq!(loaded_breakpoints[0].path, Arc::from(path));
2704
2705 // enabled breakpoint
2706 assert_eq!(loaded_breakpoints[1].row, log_breakpoint.position);
2707 assert_eq!(loaded_breakpoints[1].message, log_breakpoint.message);
2708 assert_eq!(loaded_breakpoints[1].condition, log_breakpoint.condition);
2709 assert_eq!(
2710 loaded_breakpoints[1].hit_condition,
2711 log_breakpoint.hit_condition
2712 );
2713 assert_eq!(loaded_breakpoints[1].state, log_breakpoint.state);
2714 assert_eq!(loaded_breakpoints[1].path, Arc::from(path));
2715
2716 // disable breakpoint
2717 assert_eq!(loaded_breakpoints[2].row, disable_breakpoint.position);
2718 assert_eq!(loaded_breakpoints[2].message, disable_breakpoint.message);
2719 assert_eq!(
2720 loaded_breakpoints[2].condition,
2721 disable_breakpoint.condition
2722 );
2723 assert_eq!(
2724 loaded_breakpoints[2].hit_condition,
2725 disable_breakpoint.hit_condition
2726 );
2727 assert_eq!(loaded_breakpoints[2].state, disable_breakpoint.state);
2728 assert_eq!(loaded_breakpoints[2].path, Arc::from(path));
2729
2730 // condition breakpoint
2731 assert_eq!(loaded_breakpoints[3].row, condition_breakpoint.position);
2732 assert_eq!(loaded_breakpoints[3].message, condition_breakpoint.message);
2733 assert_eq!(
2734 loaded_breakpoints[3].condition,
2735 condition_breakpoint.condition
2736 );
2737 assert_eq!(
2738 loaded_breakpoints[3].hit_condition,
2739 condition_breakpoint.hit_condition
2740 );
2741 assert_eq!(loaded_breakpoints[3].state, condition_breakpoint.state);
2742 assert_eq!(loaded_breakpoints[3].path, Arc::from(path));
2743
2744 // hit condition breakpoint
2745 assert_eq!(loaded_breakpoints[4].row, hit_condition_breakpoint.position);
2746 assert_eq!(
2747 loaded_breakpoints[4].message,
2748 hit_condition_breakpoint.message
2749 );
2750 assert_eq!(
2751 loaded_breakpoints[4].condition,
2752 hit_condition_breakpoint.condition
2753 );
2754 assert_eq!(
2755 loaded_breakpoints[4].hit_condition,
2756 hit_condition_breakpoint.hit_condition
2757 );
2758 assert_eq!(loaded_breakpoints[4].state, hit_condition_breakpoint.state);
2759 assert_eq!(loaded_breakpoints[4].path, Arc::from(path));
2760 }
2761
2762 #[gpui::test]
2763 async fn test_remove_last_breakpoint() {
2764 zlog::init_test();
2765
2766 let db = WorkspaceDb::open_test_db("test_remove_last_breakpoint").await;
2767 let id = db.next_id().await.unwrap();
2768
2769 let singular_path = Path::new("/tmp/test_remove_last_breakpoint.rs");
2770
2771 let breakpoint_to_remove = Breakpoint {
2772 position: 100,
2773 message: None,
2774 state: BreakpointState::Enabled,
2775 condition: None,
2776 hit_condition: None,
2777 };
2778
2779 let workspace = SerializedWorkspace {
2780 id,
2781 paths: PathList::new(&["/tmp"]),
2782 location: SerializedWorkspaceLocation::Local,
2783 center_group: Default::default(),
2784 window_bounds: Default::default(),
2785 display: Default::default(),
2786 docks: Default::default(),
2787 centered_layout: false,
2788 breakpoints: {
2789 let mut map = collections::BTreeMap::default();
2790 map.insert(
2791 Arc::from(singular_path),
2792 vec![SourceBreakpoint {
2793 row: breakpoint_to_remove.position,
2794 path: Arc::from(singular_path),
2795 message: None,
2796 state: BreakpointState::Enabled,
2797 condition: None,
2798 hit_condition: None,
2799 }],
2800 );
2801 map
2802 },
2803 session_id: None,
2804 window_id: None,
2805 user_toolchains: Default::default(),
2806 };
2807
2808 db.save_workspace(workspace.clone()).await;
2809
2810 let loaded = db.workspace_for_roots(&["/tmp"]).unwrap();
2811 let loaded_breakpoints = loaded.breakpoints.get(&Arc::from(singular_path)).unwrap();
2812
2813 assert_eq!(loaded_breakpoints.len(), 1);
2814 assert_eq!(loaded_breakpoints[0].row, breakpoint_to_remove.position);
2815 assert_eq!(loaded_breakpoints[0].message, breakpoint_to_remove.message);
2816 assert_eq!(
2817 loaded_breakpoints[0].condition,
2818 breakpoint_to_remove.condition
2819 );
2820 assert_eq!(
2821 loaded_breakpoints[0].hit_condition,
2822 breakpoint_to_remove.hit_condition
2823 );
2824 assert_eq!(loaded_breakpoints[0].state, breakpoint_to_remove.state);
2825 assert_eq!(loaded_breakpoints[0].path, Arc::from(singular_path));
2826
2827 let workspace_without_breakpoint = SerializedWorkspace {
2828 id,
2829 paths: PathList::new(&["/tmp"]),
2830 location: SerializedWorkspaceLocation::Local,
2831 center_group: Default::default(),
2832 window_bounds: Default::default(),
2833 display: Default::default(),
2834 docks: Default::default(),
2835 centered_layout: false,
2836 breakpoints: collections::BTreeMap::default(),
2837 session_id: None,
2838 window_id: None,
2839 user_toolchains: Default::default(),
2840 };
2841
2842 db.save_workspace(workspace_without_breakpoint.clone())
2843 .await;
2844
2845 let loaded_after_remove = db.workspace_for_roots(&["/tmp"]).unwrap();
2846 let empty_breakpoints = loaded_after_remove
2847 .breakpoints
2848 .get(&Arc::from(singular_path));
2849
2850 assert!(empty_breakpoints.is_none());
2851 }
2852
2853 #[gpui::test]
2854 async fn test_next_id_stability() {
2855 zlog::init_test();
2856
2857 let db = WorkspaceDb::open_test_db("test_next_id_stability").await;
2858
2859 db.write(|conn| {
2860 conn.migrate(
2861 "test_table",
2862 &[sql!(
2863 CREATE TABLE test_table(
2864 text TEXT,
2865 workspace_id INTEGER,
2866 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
2867 ON DELETE CASCADE
2868 ) STRICT;
2869 )],
2870 &mut |_, _, _| false,
2871 )
2872 .unwrap();
2873 })
2874 .await;
2875
2876 let id = db.next_id().await.unwrap();
2877 // Assert the empty row got inserted
2878 assert_eq!(
2879 Some(id),
2880 db.select_row_bound::<WorkspaceId, WorkspaceId>(sql!(
2881 SELECT workspace_id FROM workspaces WHERE workspace_id = ?
2882 ))
2883 .unwrap()(id)
2884 .unwrap()
2885 );
2886
2887 db.write(move |conn| {
2888 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
2889 .unwrap()(("test-text-1", id))
2890 .unwrap()
2891 })
2892 .await;
2893
2894 let test_text_1 = db
2895 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
2896 .unwrap()(1)
2897 .unwrap()
2898 .unwrap();
2899 assert_eq!(test_text_1, "test-text-1");
2900 }
2901
2902 #[gpui::test]
2903 async fn test_workspace_id_stability() {
2904 zlog::init_test();
2905
2906 let db = WorkspaceDb::open_test_db("test_workspace_id_stability").await;
2907
2908 db.write(|conn| {
2909 conn.migrate(
2910 "test_table",
2911 &[sql!(
2912 CREATE TABLE test_table(
2913 text TEXT,
2914 workspace_id INTEGER,
2915 FOREIGN KEY(workspace_id)
2916 REFERENCES workspaces(workspace_id)
2917 ON DELETE CASCADE
2918 ) STRICT;)],
2919 &mut |_, _, _| false,
2920 )
2921 })
2922 .await
2923 .unwrap();
2924
2925 let mut workspace_1 = SerializedWorkspace {
2926 id: WorkspaceId(1),
2927 paths: PathList::new(&["/tmp", "/tmp2"]),
2928 location: SerializedWorkspaceLocation::Local,
2929 center_group: Default::default(),
2930 window_bounds: Default::default(),
2931 display: Default::default(),
2932 docks: Default::default(),
2933 centered_layout: false,
2934 breakpoints: Default::default(),
2935 session_id: None,
2936 window_id: None,
2937 user_toolchains: Default::default(),
2938 };
2939
2940 let workspace_2 = SerializedWorkspace {
2941 id: WorkspaceId(2),
2942 paths: PathList::new(&["/tmp"]),
2943 location: SerializedWorkspaceLocation::Local,
2944 center_group: Default::default(),
2945 window_bounds: Default::default(),
2946 display: Default::default(),
2947 docks: Default::default(),
2948 centered_layout: false,
2949 breakpoints: Default::default(),
2950 session_id: None,
2951 window_id: None,
2952 user_toolchains: Default::default(),
2953 };
2954
2955 db.save_workspace(workspace_1.clone()).await;
2956
2957 db.write(|conn| {
2958 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
2959 .unwrap()(("test-text-1", 1))
2960 .unwrap();
2961 })
2962 .await;
2963
2964 db.save_workspace(workspace_2.clone()).await;
2965
2966 db.write(|conn| {
2967 conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
2968 .unwrap()(("test-text-2", 2))
2969 .unwrap();
2970 })
2971 .await;
2972
2973 workspace_1.paths = PathList::new(&["/tmp", "/tmp3"]);
2974 db.save_workspace(workspace_1.clone()).await;
2975 db.save_workspace(workspace_1).await;
2976 db.save_workspace(workspace_2).await;
2977
2978 let test_text_2 = db
2979 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
2980 .unwrap()(2)
2981 .unwrap()
2982 .unwrap();
2983 assert_eq!(test_text_2, "test-text-2");
2984
2985 let test_text_1 = db
2986 .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
2987 .unwrap()(1)
2988 .unwrap()
2989 .unwrap();
2990 assert_eq!(test_text_1, "test-text-1");
2991 }
2992
2993 fn group(axis: Axis, children: Vec<SerializedPaneGroup>) -> SerializedPaneGroup {
2994 SerializedPaneGroup::Group {
2995 axis: SerializedAxis(axis),
2996 flexes: None,
2997 children,
2998 }
2999 }
3000
3001 #[gpui::test]
3002 async fn test_full_workspace_serialization() {
3003 zlog::init_test();
3004
3005 let db = WorkspaceDb::open_test_db("test_full_workspace_serialization").await;
3006
3007 // -----------------
3008 // | 1,2 | 5,6 |
3009 // | - - - | |
3010 // | 3,4 | |
3011 // -----------------
3012 let center_group = group(
3013 Axis::Horizontal,
3014 vec![
3015 group(
3016 Axis::Vertical,
3017 vec![
3018 SerializedPaneGroup::Pane(SerializedPane::new(
3019 vec![
3020 SerializedItem::new("Terminal", 5, false, false),
3021 SerializedItem::new("Terminal", 6, true, false),
3022 ],
3023 false,
3024 0,
3025 )),
3026 SerializedPaneGroup::Pane(SerializedPane::new(
3027 vec![
3028 SerializedItem::new("Terminal", 7, true, false),
3029 SerializedItem::new("Terminal", 8, false, false),
3030 ],
3031 false,
3032 0,
3033 )),
3034 ],
3035 ),
3036 SerializedPaneGroup::Pane(SerializedPane::new(
3037 vec![
3038 SerializedItem::new("Terminal", 9, false, false),
3039 SerializedItem::new("Terminal", 10, true, false),
3040 ],
3041 false,
3042 0,
3043 )),
3044 ],
3045 );
3046
3047 let workspace = SerializedWorkspace {
3048 id: WorkspaceId(5),
3049 paths: PathList::new(&["/tmp", "/tmp2"]),
3050 location: SerializedWorkspaceLocation::Local,
3051 center_group,
3052 window_bounds: Default::default(),
3053 breakpoints: Default::default(),
3054 display: Default::default(),
3055 docks: Default::default(),
3056 centered_layout: false,
3057 session_id: None,
3058 window_id: Some(999),
3059 user_toolchains: Default::default(),
3060 };
3061
3062 db.save_workspace(workspace.clone()).await;
3063
3064 let round_trip_workspace = db.workspace_for_roots(&["/tmp2", "/tmp"]);
3065 assert_eq!(workspace, round_trip_workspace.unwrap());
3066
3067 // Test guaranteed duplicate IDs
3068 db.save_workspace(workspace.clone()).await;
3069 db.save_workspace(workspace.clone()).await;
3070
3071 let round_trip_workspace = db.workspace_for_roots(&["/tmp", "/tmp2"]);
3072 assert_eq!(workspace, round_trip_workspace.unwrap());
3073 }
3074
3075 #[gpui::test]
3076 async fn test_workspace_assignment() {
3077 zlog::init_test();
3078
3079 let db = WorkspaceDb::open_test_db("test_basic_functionality").await;
3080
3081 let workspace_1 = SerializedWorkspace {
3082 id: WorkspaceId(1),
3083 paths: PathList::new(&["/tmp", "/tmp2"]),
3084 location: SerializedWorkspaceLocation::Local,
3085 center_group: Default::default(),
3086 window_bounds: Default::default(),
3087 breakpoints: Default::default(),
3088 display: Default::default(),
3089 docks: Default::default(),
3090 centered_layout: false,
3091 session_id: None,
3092 window_id: Some(1),
3093 user_toolchains: Default::default(),
3094 };
3095
3096 let mut workspace_2 = SerializedWorkspace {
3097 id: WorkspaceId(2),
3098 paths: PathList::new(&["/tmp"]),
3099 location: SerializedWorkspaceLocation::Local,
3100 center_group: Default::default(),
3101 window_bounds: Default::default(),
3102 display: Default::default(),
3103 docks: Default::default(),
3104 centered_layout: false,
3105 breakpoints: Default::default(),
3106 session_id: None,
3107 window_id: Some(2),
3108 user_toolchains: Default::default(),
3109 };
3110
3111 db.save_workspace(workspace_1.clone()).await;
3112 db.save_workspace(workspace_2.clone()).await;
3113
3114 // Test that paths are treated as a set
3115 assert_eq!(
3116 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3117 workspace_1
3118 );
3119 assert_eq!(
3120 db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(),
3121 workspace_1
3122 );
3123
3124 // Make sure that other keys work
3125 assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2);
3126 assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None);
3127
3128 // Test 'mutate' case of updating a pre-existing id
3129 workspace_2.paths = PathList::new(&["/tmp", "/tmp2"]);
3130
3131 db.save_workspace(workspace_2.clone()).await;
3132 assert_eq!(
3133 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3134 workspace_2
3135 );
3136
3137 // Test other mechanism for mutating
3138 let mut workspace_3 = SerializedWorkspace {
3139 id: WorkspaceId(3),
3140 paths: PathList::new(&["/tmp2", "/tmp"]),
3141 location: SerializedWorkspaceLocation::Local,
3142 center_group: Default::default(),
3143 window_bounds: Default::default(),
3144 breakpoints: Default::default(),
3145 display: Default::default(),
3146 docks: Default::default(),
3147 centered_layout: false,
3148 session_id: None,
3149 window_id: Some(3),
3150 user_toolchains: Default::default(),
3151 };
3152
3153 db.save_workspace(workspace_3.clone()).await;
3154 assert_eq!(
3155 db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
3156 workspace_3
3157 );
3158
3159 // Make sure that updating paths differently also works
3160 workspace_3.paths = PathList::new(&["/tmp3", "/tmp4", "/tmp2"]);
3161 db.save_workspace(workspace_3.clone()).await;
3162 assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None);
3163 assert_eq!(
3164 db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"])
3165 .unwrap(),
3166 workspace_3
3167 );
3168 }
3169
3170 #[gpui::test]
3171 async fn test_session_workspaces() {
3172 zlog::init_test();
3173
3174 let db = WorkspaceDb::open_test_db("test_serializing_workspaces_session_id").await;
3175
3176 let workspace_1 = SerializedWorkspace {
3177 id: WorkspaceId(1),
3178 paths: PathList::new(&["/tmp1"]),
3179 location: SerializedWorkspaceLocation::Local,
3180 center_group: Default::default(),
3181 window_bounds: Default::default(),
3182 display: Default::default(),
3183 docks: Default::default(),
3184 centered_layout: false,
3185 breakpoints: Default::default(),
3186 session_id: Some("session-id-1".to_owned()),
3187 window_id: Some(10),
3188 user_toolchains: Default::default(),
3189 };
3190
3191 let workspace_2 = SerializedWorkspace {
3192 id: WorkspaceId(2),
3193 paths: PathList::new(&["/tmp2"]),
3194 location: SerializedWorkspaceLocation::Local,
3195 center_group: Default::default(),
3196 window_bounds: Default::default(),
3197 display: Default::default(),
3198 docks: Default::default(),
3199 centered_layout: false,
3200 breakpoints: Default::default(),
3201 session_id: Some("session-id-1".to_owned()),
3202 window_id: Some(20),
3203 user_toolchains: Default::default(),
3204 };
3205
3206 let workspace_3 = SerializedWorkspace {
3207 id: WorkspaceId(3),
3208 paths: PathList::new(&["/tmp3"]),
3209 location: SerializedWorkspaceLocation::Local,
3210 center_group: Default::default(),
3211 window_bounds: Default::default(),
3212 display: Default::default(),
3213 docks: Default::default(),
3214 centered_layout: false,
3215 breakpoints: Default::default(),
3216 session_id: Some("session-id-2".to_owned()),
3217 window_id: Some(30),
3218 user_toolchains: Default::default(),
3219 };
3220
3221 let workspace_4 = SerializedWorkspace {
3222 id: WorkspaceId(4),
3223 paths: PathList::new(&["/tmp4"]),
3224 location: SerializedWorkspaceLocation::Local,
3225 center_group: Default::default(),
3226 window_bounds: Default::default(),
3227 display: Default::default(),
3228 docks: Default::default(),
3229 centered_layout: false,
3230 breakpoints: Default::default(),
3231 session_id: None,
3232 window_id: None,
3233 user_toolchains: Default::default(),
3234 };
3235
3236 let connection_id = db
3237 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3238 host: "my-host".into(),
3239 port: Some(1234),
3240 ..Default::default()
3241 }))
3242 .await
3243 .unwrap();
3244
3245 let workspace_5 = SerializedWorkspace {
3246 id: WorkspaceId(5),
3247 paths: PathList::default(),
3248 location: SerializedWorkspaceLocation::Remote(
3249 db.remote_connection(connection_id).unwrap(),
3250 ),
3251 center_group: Default::default(),
3252 window_bounds: Default::default(),
3253 display: Default::default(),
3254 docks: Default::default(),
3255 centered_layout: false,
3256 breakpoints: Default::default(),
3257 session_id: Some("session-id-2".to_owned()),
3258 window_id: Some(50),
3259 user_toolchains: Default::default(),
3260 };
3261
3262 let workspace_6 = SerializedWorkspace {
3263 id: WorkspaceId(6),
3264 paths: PathList::new(&["/tmp6c", "/tmp6b", "/tmp6a"]),
3265 location: SerializedWorkspaceLocation::Local,
3266 center_group: Default::default(),
3267 window_bounds: Default::default(),
3268 breakpoints: Default::default(),
3269 display: Default::default(),
3270 docks: Default::default(),
3271 centered_layout: false,
3272 session_id: Some("session-id-3".to_owned()),
3273 window_id: Some(60),
3274 user_toolchains: Default::default(),
3275 };
3276
3277 db.save_workspace(workspace_1.clone()).await;
3278 thread::sleep(Duration::from_millis(1000)); // Force timestamps to increment
3279 db.save_workspace(workspace_2.clone()).await;
3280 db.save_workspace(workspace_3.clone()).await;
3281 thread::sleep(Duration::from_millis(1000)); // Force timestamps to increment
3282 db.save_workspace(workspace_4.clone()).await;
3283 db.save_workspace(workspace_5.clone()).await;
3284 db.save_workspace(workspace_6.clone()).await;
3285
3286 let locations = db.session_workspaces("session-id-1".to_owned()).unwrap();
3287 assert_eq!(locations.len(), 2);
3288 assert_eq!(locations[0].0, WorkspaceId(2));
3289 assert_eq!(locations[0].1, PathList::new(&["/tmp2"]));
3290 assert_eq!(locations[0].2, Some(20));
3291 assert_eq!(locations[1].0, WorkspaceId(1));
3292 assert_eq!(locations[1].1, PathList::new(&["/tmp1"]));
3293 assert_eq!(locations[1].2, Some(10));
3294
3295 let locations = db.session_workspaces("session-id-2".to_owned()).unwrap();
3296 assert_eq!(locations.len(), 2);
3297 assert_eq!(locations[0].0, WorkspaceId(5));
3298 assert_eq!(locations[0].1, PathList::default());
3299 assert_eq!(locations[0].2, Some(50));
3300 assert_eq!(locations[0].3, Some(connection_id));
3301 assert_eq!(locations[1].0, WorkspaceId(3));
3302 assert_eq!(locations[1].1, PathList::new(&["/tmp3"]));
3303 assert_eq!(locations[1].2, Some(30));
3304
3305 let locations = db.session_workspaces("session-id-3".to_owned()).unwrap();
3306 assert_eq!(locations.len(), 1);
3307 assert_eq!(locations[0].0, WorkspaceId(6));
3308 assert_eq!(
3309 locations[0].1,
3310 PathList::new(&["/tmp6c", "/tmp6b", "/tmp6a"]),
3311 );
3312 assert_eq!(locations[0].2, Some(60));
3313 }
3314
3315 fn default_workspace<P: AsRef<Path>>(
3316 paths: &[P],
3317 center_group: &SerializedPaneGroup,
3318 ) -> SerializedWorkspace {
3319 SerializedWorkspace {
3320 id: WorkspaceId(4),
3321 paths: PathList::new(paths),
3322 location: SerializedWorkspaceLocation::Local,
3323 center_group: center_group.clone(),
3324 window_bounds: Default::default(),
3325 display: Default::default(),
3326 docks: Default::default(),
3327 breakpoints: Default::default(),
3328 centered_layout: false,
3329 session_id: None,
3330 window_id: None,
3331 user_toolchains: Default::default(),
3332 }
3333 }
3334
3335 #[gpui::test]
3336 async fn test_last_session_workspace_locations(cx: &mut gpui::TestAppContext) {
3337 let dir1 = tempfile::TempDir::with_prefix("dir1").unwrap();
3338 let dir2 = tempfile::TempDir::with_prefix("dir2").unwrap();
3339 let dir3 = tempfile::TempDir::with_prefix("dir3").unwrap();
3340 let dir4 = tempfile::TempDir::with_prefix("dir4").unwrap();
3341
3342 let fs = fs::FakeFs::new(cx.executor());
3343 fs.insert_tree(dir1.path(), json!({})).await;
3344 fs.insert_tree(dir2.path(), json!({})).await;
3345 fs.insert_tree(dir3.path(), json!({})).await;
3346 fs.insert_tree(dir4.path(), json!({})).await;
3347
3348 let db =
3349 WorkspaceDb::open_test_db("test_serializing_workspaces_last_session_workspaces").await;
3350
3351 let workspaces = [
3352 (1, vec![dir1.path()], 9),
3353 (2, vec![dir2.path()], 5),
3354 (3, vec![dir3.path()], 8),
3355 (4, vec![dir4.path()], 2),
3356 (5, vec![dir1.path(), dir2.path(), dir3.path()], 3),
3357 (6, vec![dir4.path(), dir3.path(), dir2.path()], 4),
3358 ]
3359 .into_iter()
3360 .map(|(id, paths, window_id)| SerializedWorkspace {
3361 id: WorkspaceId(id),
3362 paths: PathList::new(paths.as_slice()),
3363 location: SerializedWorkspaceLocation::Local,
3364 center_group: Default::default(),
3365 window_bounds: Default::default(),
3366 display: Default::default(),
3367 docks: Default::default(),
3368 centered_layout: false,
3369 session_id: Some("one-session".to_owned()),
3370 breakpoints: Default::default(),
3371 window_id: Some(window_id),
3372 user_toolchains: Default::default(),
3373 })
3374 .collect::<Vec<_>>();
3375
3376 for workspace in workspaces.iter() {
3377 db.save_workspace(workspace.clone()).await;
3378 }
3379
3380 let stack = Some(Vec::from([
3381 WindowId::from(2), // Top
3382 WindowId::from(8),
3383 WindowId::from(5),
3384 WindowId::from(9),
3385 WindowId::from(3),
3386 WindowId::from(4), // Bottom
3387 ]));
3388
3389 let locations = db
3390 .last_session_workspace_locations("one-session", stack, fs.as_ref())
3391 .await
3392 .unwrap();
3393 assert_eq!(
3394 locations,
3395 [
3396 SessionWorkspace {
3397 workspace_id: WorkspaceId(4),
3398 location: SerializedWorkspaceLocation::Local,
3399 paths: PathList::new(&[dir4.path()]),
3400 window_id: Some(WindowId::from(2u64)),
3401 },
3402 SessionWorkspace {
3403 workspace_id: WorkspaceId(3),
3404 location: SerializedWorkspaceLocation::Local,
3405 paths: PathList::new(&[dir3.path()]),
3406 window_id: Some(WindowId::from(8u64)),
3407 },
3408 SessionWorkspace {
3409 workspace_id: WorkspaceId(2),
3410 location: SerializedWorkspaceLocation::Local,
3411 paths: PathList::new(&[dir2.path()]),
3412 window_id: Some(WindowId::from(5u64)),
3413 },
3414 SessionWorkspace {
3415 workspace_id: WorkspaceId(1),
3416 location: SerializedWorkspaceLocation::Local,
3417 paths: PathList::new(&[dir1.path()]),
3418 window_id: Some(WindowId::from(9u64)),
3419 },
3420 SessionWorkspace {
3421 workspace_id: WorkspaceId(5),
3422 location: SerializedWorkspaceLocation::Local,
3423 paths: PathList::new(&[dir1.path(), dir2.path(), dir3.path()]),
3424 window_id: Some(WindowId::from(3u64)),
3425 },
3426 SessionWorkspace {
3427 workspace_id: WorkspaceId(6),
3428 location: SerializedWorkspaceLocation::Local,
3429 paths: PathList::new(&[dir4.path(), dir3.path(), dir2.path()]),
3430 window_id: Some(WindowId::from(4u64)),
3431 },
3432 ]
3433 );
3434 }
3435
3436 #[gpui::test]
3437 async fn test_last_session_workspace_locations_remote(cx: &mut gpui::TestAppContext) {
3438 let fs = fs::FakeFs::new(cx.executor());
3439 let db =
3440 WorkspaceDb::open_test_db("test_serializing_workspaces_last_session_workspaces_remote")
3441 .await;
3442
3443 let remote_connections = [
3444 ("host-1", "my-user-1"),
3445 ("host-2", "my-user-2"),
3446 ("host-3", "my-user-3"),
3447 ("host-4", "my-user-4"),
3448 ]
3449 .into_iter()
3450 .map(|(host, user)| async {
3451 let options = RemoteConnectionOptions::Ssh(SshConnectionOptions {
3452 host: host.into(),
3453 username: Some(user.to_string()),
3454 ..Default::default()
3455 });
3456 db.get_or_create_remote_connection(options.clone())
3457 .await
3458 .unwrap();
3459 options
3460 })
3461 .collect::<Vec<_>>();
3462
3463 let remote_connections = futures::future::join_all(remote_connections).await;
3464
3465 let workspaces = [
3466 (1, remote_connections[0].clone(), 9),
3467 (2, remote_connections[1].clone(), 5),
3468 (3, remote_connections[2].clone(), 8),
3469 (4, remote_connections[3].clone(), 2),
3470 ]
3471 .into_iter()
3472 .map(|(id, remote_connection, window_id)| SerializedWorkspace {
3473 id: WorkspaceId(id),
3474 paths: PathList::default(),
3475 location: SerializedWorkspaceLocation::Remote(remote_connection),
3476 center_group: Default::default(),
3477 window_bounds: Default::default(),
3478 display: Default::default(),
3479 docks: Default::default(),
3480 centered_layout: false,
3481 session_id: Some("one-session".to_owned()),
3482 breakpoints: Default::default(),
3483 window_id: Some(window_id),
3484 user_toolchains: Default::default(),
3485 })
3486 .collect::<Vec<_>>();
3487
3488 for workspace in workspaces.iter() {
3489 db.save_workspace(workspace.clone()).await;
3490 }
3491
3492 let stack = Some(Vec::from([
3493 WindowId::from(2), // Top
3494 WindowId::from(8),
3495 WindowId::from(5),
3496 WindowId::from(9), // Bottom
3497 ]));
3498
3499 let have = db
3500 .last_session_workspace_locations("one-session", stack, fs.as_ref())
3501 .await
3502 .unwrap();
3503 assert_eq!(have.len(), 4);
3504 assert_eq!(
3505 have[0],
3506 SessionWorkspace {
3507 workspace_id: WorkspaceId(4),
3508 location: SerializedWorkspaceLocation::Remote(remote_connections[3].clone()),
3509 paths: PathList::default(),
3510 window_id: Some(WindowId::from(2u64)),
3511 }
3512 );
3513 assert_eq!(
3514 have[1],
3515 SessionWorkspace {
3516 workspace_id: WorkspaceId(3),
3517 location: SerializedWorkspaceLocation::Remote(remote_connections[2].clone()),
3518 paths: PathList::default(),
3519 window_id: Some(WindowId::from(8u64)),
3520 }
3521 );
3522 assert_eq!(
3523 have[2],
3524 SessionWorkspace {
3525 workspace_id: WorkspaceId(2),
3526 location: SerializedWorkspaceLocation::Remote(remote_connections[1].clone()),
3527 paths: PathList::default(),
3528 window_id: Some(WindowId::from(5u64)),
3529 }
3530 );
3531 assert_eq!(
3532 have[3],
3533 SessionWorkspace {
3534 workspace_id: WorkspaceId(1),
3535 location: SerializedWorkspaceLocation::Remote(remote_connections[0].clone()),
3536 paths: PathList::default(),
3537 window_id: Some(WindowId::from(9u64)),
3538 }
3539 );
3540 }
3541
3542 #[gpui::test]
3543 async fn test_get_or_create_ssh_project() {
3544 let db = WorkspaceDb::open_test_db("test_get_or_create_ssh_project").await;
3545
3546 let host = "example.com".to_string();
3547 let port = Some(22_u16);
3548 let user = Some("user".to_string());
3549
3550 let connection_id = db
3551 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3552 host: host.clone().into(),
3553 port,
3554 username: user.clone(),
3555 ..Default::default()
3556 }))
3557 .await
3558 .unwrap();
3559
3560 // Test that calling the function again with the same parameters returns the same project
3561 let same_connection = db
3562 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3563 host: host.clone().into(),
3564 port,
3565 username: user.clone(),
3566 ..Default::default()
3567 }))
3568 .await
3569 .unwrap();
3570
3571 assert_eq!(connection_id, same_connection);
3572
3573 // Test with different parameters
3574 let host2 = "otherexample.com".to_string();
3575 let port2 = None;
3576 let user2 = Some("otheruser".to_string());
3577
3578 let different_connection = db
3579 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3580 host: host2.clone().into(),
3581 port: port2,
3582 username: user2.clone(),
3583 ..Default::default()
3584 }))
3585 .await
3586 .unwrap();
3587
3588 assert_ne!(connection_id, different_connection);
3589 }
3590
3591 #[gpui::test]
3592 async fn test_get_or_create_ssh_project_with_null_user() {
3593 let db = WorkspaceDb::open_test_db("test_get_or_create_ssh_project_with_null_user").await;
3594
3595 let (host, port, user) = ("example.com".to_string(), None, None);
3596
3597 let connection_id = db
3598 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3599 host: host.clone().into(),
3600 port,
3601 username: None,
3602 ..Default::default()
3603 }))
3604 .await
3605 .unwrap();
3606
3607 let same_connection_id = db
3608 .get_or_create_remote_connection(RemoteConnectionOptions::Ssh(SshConnectionOptions {
3609 host: host.clone().into(),
3610 port,
3611 username: user.clone(),
3612 ..Default::default()
3613 }))
3614 .await
3615 .unwrap();
3616
3617 assert_eq!(connection_id, same_connection_id);
3618 }
3619
3620 #[gpui::test]
3621 async fn test_get_remote_connections() {
3622 let db = WorkspaceDb::open_test_db("test_get_remote_connections").await;
3623
3624 let connections = [
3625 ("example.com".to_string(), None, None),
3626 (
3627 "anotherexample.com".to_string(),
3628 Some(123_u16),
3629 Some("user2".to_string()),
3630 ),
3631 ("yetanother.com".to_string(), Some(345_u16), None),
3632 ];
3633
3634 let mut ids = Vec::new();
3635 for (host, port, user) in connections.iter() {
3636 ids.push(
3637 db.get_or_create_remote_connection(RemoteConnectionOptions::Ssh(
3638 SshConnectionOptions {
3639 host: host.clone().into(),
3640 port: *port,
3641 username: user.clone(),
3642 ..Default::default()
3643 },
3644 ))
3645 .await
3646 .unwrap(),
3647 );
3648 }
3649
3650 let stored_connections = db.remote_connections().unwrap();
3651 assert_eq!(
3652 stored_connections,
3653 [
3654 (
3655 ids[0],
3656 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3657 host: "example.com".into(),
3658 port: None,
3659 username: None,
3660 ..Default::default()
3661 }),
3662 ),
3663 (
3664 ids[1],
3665 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3666 host: "anotherexample.com".into(),
3667 port: Some(123),
3668 username: Some("user2".into()),
3669 ..Default::default()
3670 }),
3671 ),
3672 (
3673 ids[2],
3674 RemoteConnectionOptions::Ssh(SshConnectionOptions {
3675 host: "yetanother.com".into(),
3676 port: Some(345),
3677 username: None,
3678 ..Default::default()
3679 }),
3680 ),
3681 ]
3682 .into_iter()
3683 .collect::<HashMap<_, _>>(),
3684 );
3685 }
3686
3687 #[gpui::test]
3688 async fn test_simple_split() {
3689 zlog::init_test();
3690
3691 let db = WorkspaceDb::open_test_db("simple_split").await;
3692
3693 // -----------------
3694 // | 1,2 | 5,6 |
3695 // | - - - | |
3696 // | 3,4 | |
3697 // -----------------
3698 let center_pane = group(
3699 Axis::Horizontal,
3700 vec![
3701 group(
3702 Axis::Vertical,
3703 vec![
3704 SerializedPaneGroup::Pane(SerializedPane::new(
3705 vec![
3706 SerializedItem::new("Terminal", 1, false, false),
3707 SerializedItem::new("Terminal", 2, true, false),
3708 ],
3709 false,
3710 0,
3711 )),
3712 SerializedPaneGroup::Pane(SerializedPane::new(
3713 vec![
3714 SerializedItem::new("Terminal", 4, false, false),
3715 SerializedItem::new("Terminal", 3, true, false),
3716 ],
3717 true,
3718 0,
3719 )),
3720 ],
3721 ),
3722 SerializedPaneGroup::Pane(SerializedPane::new(
3723 vec![
3724 SerializedItem::new("Terminal", 5, true, false),
3725 SerializedItem::new("Terminal", 6, false, false),
3726 ],
3727 false,
3728 0,
3729 )),
3730 ],
3731 );
3732
3733 let workspace = default_workspace(&["/tmp"], ¢er_pane);
3734
3735 db.save_workspace(workspace.clone()).await;
3736
3737 let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap();
3738
3739 assert_eq!(workspace.center_group, new_workspace.center_group);
3740 }
3741
3742 #[gpui::test]
3743 async fn test_cleanup_panes() {
3744 zlog::init_test();
3745
3746 let db = WorkspaceDb::open_test_db("test_cleanup_panes").await;
3747
3748 let center_pane = group(
3749 Axis::Horizontal,
3750 vec![
3751 group(
3752 Axis::Vertical,
3753 vec![
3754 SerializedPaneGroup::Pane(SerializedPane::new(
3755 vec![
3756 SerializedItem::new("Terminal", 1, false, false),
3757 SerializedItem::new("Terminal", 2, true, false),
3758 ],
3759 false,
3760 0,
3761 )),
3762 SerializedPaneGroup::Pane(SerializedPane::new(
3763 vec![
3764 SerializedItem::new("Terminal", 4, false, false),
3765 SerializedItem::new("Terminal", 3, true, false),
3766 ],
3767 true,
3768 0,
3769 )),
3770 ],
3771 ),
3772 SerializedPaneGroup::Pane(SerializedPane::new(
3773 vec![
3774 SerializedItem::new("Terminal", 5, false, false),
3775 SerializedItem::new("Terminal", 6, true, false),
3776 ],
3777 false,
3778 0,
3779 )),
3780 ],
3781 );
3782
3783 let id = &["/tmp"];
3784
3785 let mut workspace = default_workspace(id, ¢er_pane);
3786
3787 db.save_workspace(workspace.clone()).await;
3788
3789 workspace.center_group = group(
3790 Axis::Vertical,
3791 vec![
3792 SerializedPaneGroup::Pane(SerializedPane::new(
3793 vec![
3794 SerializedItem::new("Terminal", 1, false, false),
3795 SerializedItem::new("Terminal", 2, true, false),
3796 ],
3797 false,
3798 0,
3799 )),
3800 SerializedPaneGroup::Pane(SerializedPane::new(
3801 vec![
3802 SerializedItem::new("Terminal", 4, true, false),
3803 SerializedItem::new("Terminal", 3, false, false),
3804 ],
3805 true,
3806 0,
3807 )),
3808 ],
3809 );
3810
3811 db.save_workspace(workspace.clone()).await;
3812
3813 let new_workspace = db.workspace_for_roots(id).unwrap();
3814
3815 assert_eq!(workspace.center_group, new_workspace.center_group);
3816 }
3817
3818 #[gpui::test]
3819 async fn test_empty_workspace_window_bounds() {
3820 zlog::init_test();
3821
3822 let db = WorkspaceDb::open_test_db("test_empty_workspace_window_bounds").await;
3823 let id = db.next_id().await.unwrap();
3824
3825 // Create a workspace with empty paths (empty workspace)
3826 let empty_paths: &[&str] = &[];
3827 let display_uuid = Uuid::new_v4();
3828 let window_bounds = SerializedWindowBounds(WindowBounds::Windowed(Bounds {
3829 origin: point(px(100.0), px(200.0)),
3830 size: size(px(800.0), px(600.0)),
3831 }));
3832
3833 let workspace = SerializedWorkspace {
3834 id,
3835 paths: PathList::new(empty_paths),
3836 location: SerializedWorkspaceLocation::Local,
3837 center_group: Default::default(),
3838 window_bounds: None,
3839 display: None,
3840 docks: Default::default(),
3841 breakpoints: Default::default(),
3842 centered_layout: false,
3843 session_id: None,
3844 window_id: None,
3845 user_toolchains: Default::default(),
3846 };
3847
3848 // Save the workspace (this creates the record with empty paths)
3849 db.save_workspace(workspace.clone()).await;
3850
3851 // Save window bounds separately (as the actual code does via set_window_open_status)
3852 db.set_window_open_status(id, window_bounds, display_uuid)
3853 .await
3854 .unwrap();
3855
3856 // Empty workspaces cannot be retrieved by paths (they'd all match).
3857 // They must be retrieved by workspace_id.
3858 assert!(db.workspace_for_roots(empty_paths).is_none());
3859
3860 // Retrieve using workspace_for_id instead
3861 let retrieved = db.workspace_for_id(id).unwrap();
3862
3863 // Verify window bounds were persisted
3864 assert_eq!(retrieved.id, id);
3865 assert!(retrieved.window_bounds.is_some());
3866 assert_eq!(retrieved.window_bounds.unwrap().0, window_bounds.0);
3867 assert!(retrieved.display.is_some());
3868 assert_eq!(retrieved.display.unwrap(), display_uuid);
3869 }
3870
3871 #[gpui::test]
3872 async fn test_last_session_workspace_locations_groups_by_window_id(
3873 cx: &mut gpui::TestAppContext,
3874 ) {
3875 let dir1 = tempfile::TempDir::with_prefix("dir1").unwrap();
3876 let dir2 = tempfile::TempDir::with_prefix("dir2").unwrap();
3877 let dir3 = tempfile::TempDir::with_prefix("dir3").unwrap();
3878 let dir4 = tempfile::TempDir::with_prefix("dir4").unwrap();
3879 let dir5 = tempfile::TempDir::with_prefix("dir5").unwrap();
3880
3881 let fs = fs::FakeFs::new(cx.executor());
3882 fs.insert_tree(dir1.path(), json!({})).await;
3883 fs.insert_tree(dir2.path(), json!({})).await;
3884 fs.insert_tree(dir3.path(), json!({})).await;
3885 fs.insert_tree(dir4.path(), json!({})).await;
3886 fs.insert_tree(dir5.path(), json!({})).await;
3887
3888 let db =
3889 WorkspaceDb::open_test_db("test_last_session_workspace_locations_groups_by_window_id")
3890 .await;
3891
3892 // Simulate two MultiWorkspace windows each containing two workspaces,
3893 // plus one single-workspace window:
3894 // Window 10: workspace 1, workspace 2
3895 // Window 20: workspace 3, workspace 4
3896 // Window 30: workspace 5 (only one)
3897 //
3898 // On session restore, the caller should be able to group these by
3899 // window_id to reconstruct the MultiWorkspace windows.
3900 let workspaces_data: Vec<(i64, &Path, u64)> = vec![
3901 (1, dir1.path(), 10),
3902 (2, dir2.path(), 10),
3903 (3, dir3.path(), 20),
3904 (4, dir4.path(), 20),
3905 (5, dir5.path(), 30),
3906 ];
3907
3908 for (id, dir, window_id) in &workspaces_data {
3909 db.save_workspace(SerializedWorkspace {
3910 id: WorkspaceId(*id),
3911 paths: PathList::new(&[*dir]),
3912 location: SerializedWorkspaceLocation::Local,
3913 center_group: Default::default(),
3914 window_bounds: Default::default(),
3915 display: Default::default(),
3916 docks: Default::default(),
3917 centered_layout: false,
3918 session_id: Some("test-session".to_owned()),
3919 breakpoints: Default::default(),
3920 window_id: Some(*window_id),
3921 user_toolchains: Default::default(),
3922 })
3923 .await;
3924 }
3925
3926 let locations = db
3927 .last_session_workspace_locations("test-session", None, fs.as_ref())
3928 .await
3929 .unwrap();
3930
3931 // All 5 workspaces should be returned with their window_ids.
3932 assert_eq!(locations.len(), 5);
3933
3934 // Every entry should have a window_id so the caller can group them.
3935 for session_workspace in &locations {
3936 assert!(
3937 session_workspace.window_id.is_some(),
3938 "workspace {:?} missing window_id",
3939 session_workspace.workspace_id
3940 );
3941 }
3942
3943 // Group by window_id, simulating what the restoration code should do.
3944 let mut by_window: HashMap<WindowId, Vec<WorkspaceId>> = HashMap::default();
3945 for session_workspace in &locations {
3946 if let Some(window_id) = session_workspace.window_id {
3947 by_window
3948 .entry(window_id)
3949 .or_default()
3950 .push(session_workspace.workspace_id);
3951 }
3952 }
3953
3954 // Should produce 3 windows, not 5.
3955 assert_eq!(
3956 by_window.len(),
3957 3,
3958 "Expected 3 window groups, got {}: {:?}",
3959 by_window.len(),
3960 by_window
3961 );
3962
3963 // Window 10 should contain workspaces 1 and 2.
3964 let window_10 = by_window.get(&WindowId::from(10u64)).unwrap();
3965 assert_eq!(window_10.len(), 2);
3966 assert!(window_10.contains(&WorkspaceId(1)));
3967 assert!(window_10.contains(&WorkspaceId(2)));
3968
3969 // Window 20 should contain workspaces 3 and 4.
3970 let window_20 = by_window.get(&WindowId::from(20u64)).unwrap();
3971 assert_eq!(window_20.len(), 2);
3972 assert!(window_20.contains(&WorkspaceId(3)));
3973 assert!(window_20.contains(&WorkspaceId(4)));
3974
3975 // Window 30 should contain only workspace 5.
3976 let window_30 = by_window.get(&WindowId::from(30u64)).unwrap();
3977 assert_eq!(window_30.len(), 1);
3978 assert!(window_30.contains(&WorkspaceId(5)));
3979 }
3980
3981 #[gpui::test]
3982 async fn test_read_serialized_multi_workspaces_with_state(cx: &mut gpui::TestAppContext) {
3983 use crate::persistence::model::MultiWorkspaceState;
3984
3985 // Write multi-workspace state for two windows via the scoped KVP.
3986 let window_10 = WindowId::from(10u64);
3987 let window_20 = WindowId::from(20u64);
3988
3989 let kvp = cx.update(|cx| KeyValueStore::global(cx));
3990
3991 write_multi_workspace_state(
3992 &kvp,
3993 window_10,
3994 MultiWorkspaceState {
3995 active_workspace_id: Some(WorkspaceId(2)),
3996 sidebar_open: true,
3997 sidebar_state: None,
3998 },
3999 )
4000 .await;
4001
4002 write_multi_workspace_state(
4003 &kvp,
4004 window_20,
4005 MultiWorkspaceState {
4006 active_workspace_id: Some(WorkspaceId(3)),
4007 sidebar_open: false,
4008 sidebar_state: None,
4009 },
4010 )
4011 .await;
4012
4013 // Build session workspaces: two in window 10, one in window 20, one with no window.
4014 let session_workspaces = vec![
4015 SessionWorkspace {
4016 workspace_id: WorkspaceId(1),
4017 location: SerializedWorkspaceLocation::Local,
4018 paths: PathList::new(&["/a"]),
4019 window_id: Some(window_10),
4020 },
4021 SessionWorkspace {
4022 workspace_id: WorkspaceId(2),
4023 location: SerializedWorkspaceLocation::Local,
4024 paths: PathList::new(&["/b"]),
4025 window_id: Some(window_10),
4026 },
4027 SessionWorkspace {
4028 workspace_id: WorkspaceId(3),
4029 location: SerializedWorkspaceLocation::Local,
4030 paths: PathList::new(&["/c"]),
4031 window_id: Some(window_20),
4032 },
4033 SessionWorkspace {
4034 workspace_id: WorkspaceId(4),
4035 location: SerializedWorkspaceLocation::Local,
4036 paths: PathList::new(&["/d"]),
4037 window_id: None,
4038 },
4039 ];
4040
4041 let results = cx.update(|cx| read_serialized_multi_workspaces(session_workspaces, cx));
4042
4043 // Should produce 3 groups: window 10, window 20, and the orphan.
4044 assert_eq!(results.len(), 3);
4045
4046 // Window 10 group: 2 workspaces, active_workspace_id = 2, sidebar open.
4047 let group_10 = &results[0];
4048 assert_eq!(group_10.workspaces.len(), 2);
4049 assert_eq!(group_10.state.active_workspace_id, Some(WorkspaceId(2)));
4050 assert_eq!(group_10.state.sidebar_open, true);
4051
4052 // Window 20 group: 1 workspace, active_workspace_id = 3, sidebar closed.
4053 let group_20 = &results[1];
4054 assert_eq!(group_20.workspaces.len(), 1);
4055 assert_eq!(group_20.state.active_workspace_id, Some(WorkspaceId(3)));
4056 assert_eq!(group_20.state.sidebar_open, false);
4057
4058 // Orphan group: no window_id, so state is default.
4059 let group_none = &results[2];
4060 assert_eq!(group_none.workspaces.len(), 1);
4061 assert_eq!(group_none.state.active_workspace_id, None);
4062 assert_eq!(group_none.state.sidebar_open, false);
4063 }
4064
4065 #[gpui::test]
4066 async fn test_flush_serialization_completes_before_quit(cx: &mut gpui::TestAppContext) {
4067 use crate::multi_workspace::MultiWorkspace;
4068 use feature_flags::FeatureFlagAppExt;
4069
4070 use project::Project;
4071
4072 crate::tests::init_test(cx);
4073
4074 cx.update(|cx| {
4075 cx.set_staff(true);
4076 cx.update_flags(true, vec!["agent-v2".to_string()]);
4077 });
4078
4079 let fs = fs::FakeFs::new(cx.executor());
4080 let project = Project::test(fs.clone(), [], cx).await;
4081
4082 let (multi_workspace, cx) =
4083 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4084
4085 let workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone());
4086
4087 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4088
4089 // Assign a database_id so serialization will actually persist.
4090 let workspace_id = db.next_id().await.unwrap();
4091 workspace.update(cx, |ws, _cx| {
4092 ws.set_database_id(workspace_id);
4093 });
4094
4095 // Mutate some workspace state.
4096 db.set_centered_layout(workspace_id, true).await.unwrap();
4097
4098 // Call flush_serialization and await the returned task directly
4099 // (without run_until_parked — the point is that awaiting the task
4100 // alone is sufficient).
4101 let task = multi_workspace.update_in(cx, |mw, window, cx| {
4102 mw.workspace()
4103 .update(cx, |ws, cx| ws.flush_serialization(window, cx))
4104 });
4105 task.await;
4106
4107 // Read the workspace back from the DB and verify serialization happened.
4108 let serialized = db.workspace_for_id(workspace_id);
4109 assert!(
4110 serialized.is_some(),
4111 "flush_serialization should have persisted the workspace to DB"
4112 );
4113 }
4114
4115 #[gpui::test]
4116 async fn test_create_workspace_serialization(cx: &mut gpui::TestAppContext) {
4117 use crate::multi_workspace::MultiWorkspace;
4118 use crate::persistence::read_multi_workspace_state;
4119 use feature_flags::FeatureFlagAppExt;
4120
4121 use project::Project;
4122
4123 crate::tests::init_test(cx);
4124
4125 cx.update(|cx| {
4126 cx.set_staff(true);
4127 cx.update_flags(true, vec!["agent-v2".to_string()]);
4128 });
4129
4130 let fs = fs::FakeFs::new(cx.executor());
4131 let project = Project::test(fs.clone(), [], cx).await;
4132
4133 let (multi_workspace, cx) =
4134 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4135
4136 // Give the first workspace a database_id.
4137 multi_workspace.update_in(cx, |mw, _, cx| {
4138 mw.set_random_database_id(cx);
4139 });
4140
4141 let window_id =
4142 multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
4143
4144 // Create a new workspace via the MultiWorkspace API (triggers next_id()).
4145 multi_workspace.update_in(cx, |mw, window, cx| {
4146 mw.create_test_workspace(window, cx).detach();
4147 });
4148
4149 // Let the async next_id() and re-serialization tasks complete.
4150 cx.run_until_parked();
4151
4152 // The new workspace should now have a database_id.
4153 let new_workspace_db_id =
4154 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
4155 assert!(
4156 new_workspace_db_id.is_some(),
4157 "New workspace should have a database_id after run_until_parked"
4158 );
4159
4160 // The multi-workspace state should record it as the active workspace.
4161 let state = cx.update(|_, cx| read_multi_workspace_state(window_id, cx));
4162 assert_eq!(
4163 state.active_workspace_id, new_workspace_db_id,
4164 "Serialized active_workspace_id should match the new workspace's database_id"
4165 );
4166
4167 // The individual workspace row should exist with real data
4168 // (not just the bare DEFAULT VALUES row from next_id).
4169 let workspace_id = new_workspace_db_id.unwrap();
4170 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4171 let serialized = db.workspace_for_id(workspace_id);
4172 assert!(
4173 serialized.is_some(),
4174 "Newly created workspace should be fully serialized in the DB after database_id assignment"
4175 );
4176 }
4177
4178 #[gpui::test]
4179 async fn test_remove_workspace_clears_session_binding(cx: &mut gpui::TestAppContext) {
4180 use crate::multi_workspace::MultiWorkspace;
4181 use feature_flags::FeatureFlagAppExt;
4182 use gpui::AppContext as _;
4183 use project::Project;
4184
4185 crate::tests::init_test(cx);
4186
4187 cx.update(|cx| {
4188 cx.set_staff(true);
4189 cx.update_flags(true, vec!["agent-v2".to_string()]);
4190 });
4191
4192 let fs = fs::FakeFs::new(cx.executor());
4193 let dir = unique_test_dir(&fs, "remove").await;
4194 let project1 = Project::test(fs.clone(), [], cx).await;
4195 let project2 = Project::test(fs.clone(), [], cx).await;
4196
4197 let (multi_workspace, cx) =
4198 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4199
4200 multi_workspace.update_in(cx, |mw, _, cx| {
4201 mw.set_random_database_id(cx);
4202 });
4203
4204 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4205
4206 // Get a real DB id for workspace2 so the row actually exists.
4207 let workspace2_db_id = db.next_id().await.unwrap();
4208
4209 multi_workspace.update_in(cx, |mw, window, cx| {
4210 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4211 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4212 ws.set_database_id(workspace2_db_id)
4213 });
4214 mw.activate(workspace.clone(), window, cx);
4215 });
4216
4217 // Save a full workspace row to the DB directly.
4218 let session_id = format!("remove-test-session-{}", Uuid::new_v4());
4219 db.save_workspace(SerializedWorkspace {
4220 id: workspace2_db_id,
4221 paths: PathList::new(&[&dir]),
4222 location: SerializedWorkspaceLocation::Local,
4223 center_group: Default::default(),
4224 window_bounds: Default::default(),
4225 display: Default::default(),
4226 docks: Default::default(),
4227 centered_layout: false,
4228 session_id: Some(session_id.clone()),
4229 breakpoints: Default::default(),
4230 window_id: Some(99),
4231 user_toolchains: Default::default(),
4232 })
4233 .await;
4234
4235 assert!(
4236 db.workspace_for_id(workspace2_db_id).is_some(),
4237 "Workspace2 should exist in DB before removal"
4238 );
4239
4240 // Remove workspace at index 1 (the second workspace).
4241 multi_workspace.update_in(cx, |mw, window, cx| {
4242 let ws = mw.workspaces()[1].clone();
4243 mw.remove(&ws, window, cx);
4244 });
4245
4246 cx.run_until_parked();
4247
4248 // The row should still exist so it continues to appear in recent
4249 // projects, but the session binding should be cleared so it is not
4250 // restored as part of any future session.
4251 assert!(
4252 db.workspace_for_id(workspace2_db_id).is_some(),
4253 "Removed workspace's DB row should be preserved for recent projects"
4254 );
4255
4256 let session_workspaces = db
4257 .last_session_workspace_locations("remove-test-session", None, fs.as_ref())
4258 .await
4259 .unwrap();
4260 let restored_ids: Vec<WorkspaceId> = session_workspaces
4261 .iter()
4262 .map(|sw| sw.workspace_id)
4263 .collect();
4264 assert!(
4265 !restored_ids.contains(&workspace2_db_id),
4266 "Removed workspace should not appear in session restoration"
4267 );
4268 }
4269
4270 #[gpui::test]
4271 async fn test_remove_workspace_not_restored_as_zombie(cx: &mut gpui::TestAppContext) {
4272 use crate::multi_workspace::MultiWorkspace;
4273 use feature_flags::FeatureFlagAppExt;
4274 use gpui::AppContext as _;
4275 use project::Project;
4276
4277 crate::tests::init_test(cx);
4278
4279 cx.update(|cx| {
4280 cx.set_staff(true);
4281 cx.update_flags(true, vec!["agent-v2".to_string()]);
4282 });
4283
4284 let fs = fs::FakeFs::new(cx.executor());
4285 let dir1 = tempfile::TempDir::with_prefix("zombie_test1").unwrap();
4286 let dir2 = tempfile::TempDir::with_prefix("zombie_test2").unwrap();
4287 fs.insert_tree(dir1.path(), json!({})).await;
4288 fs.insert_tree(dir2.path(), json!({})).await;
4289
4290 let project1 = Project::test(fs.clone(), [], cx).await;
4291 let project2 = Project::test(fs.clone(), [], cx).await;
4292
4293 let db = cx.update(|cx| WorkspaceDb::global(cx));
4294
4295 // Get real DB ids so the rows actually exist.
4296 let ws1_id = db.next_id().await.unwrap();
4297 let ws2_id = db.next_id().await.unwrap();
4298
4299 let (multi_workspace, cx) =
4300 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4301
4302 multi_workspace.update_in(cx, |mw, _, cx| {
4303 mw.workspace().update(cx, |ws, _cx| {
4304 ws.set_database_id(ws1_id);
4305 });
4306 });
4307
4308 multi_workspace.update_in(cx, |mw, window, cx| {
4309 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4310 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4311 ws.set_database_id(ws2_id)
4312 });
4313 mw.activate(workspace.clone(), window, cx);
4314 });
4315
4316 let session_id = "test-zombie-session";
4317 let window_id_val: u64 = 42;
4318
4319 db.save_workspace(SerializedWorkspace {
4320 id: ws1_id,
4321 paths: PathList::new(&[dir1.path()]),
4322 location: SerializedWorkspaceLocation::Local,
4323 center_group: Default::default(),
4324 window_bounds: Default::default(),
4325 display: Default::default(),
4326 docks: Default::default(),
4327 centered_layout: false,
4328 session_id: Some(session_id.to_owned()),
4329 breakpoints: Default::default(),
4330 window_id: Some(window_id_val),
4331 user_toolchains: Default::default(),
4332 })
4333 .await;
4334
4335 db.save_workspace(SerializedWorkspace {
4336 id: ws2_id,
4337 paths: PathList::new(&[dir2.path()]),
4338 location: SerializedWorkspaceLocation::Local,
4339 center_group: Default::default(),
4340 window_bounds: Default::default(),
4341 display: Default::default(),
4342 docks: Default::default(),
4343 centered_layout: false,
4344 session_id: Some(session_id.to_owned()),
4345 breakpoints: Default::default(),
4346 window_id: Some(window_id_val),
4347 user_toolchains: Default::default(),
4348 })
4349 .await;
4350
4351 // Remove workspace2 (index 1).
4352 multi_workspace.update_in(cx, |mw, window, cx| {
4353 let ws = mw.workspaces()[1].clone();
4354 mw.remove(&ws, window, cx);
4355 });
4356
4357 cx.run_until_parked();
4358
4359 // The removed workspace should NOT appear in session restoration.
4360 let locations = db
4361 .last_session_workspace_locations(session_id, None, fs.as_ref())
4362 .await
4363 .unwrap();
4364
4365 let restored_ids: Vec<WorkspaceId> = locations.iter().map(|sw| sw.workspace_id).collect();
4366 assert!(
4367 !restored_ids.contains(&ws2_id),
4368 "Removed workspace should not appear in session restoration list. Found: {:?}",
4369 restored_ids
4370 );
4371 assert!(
4372 restored_ids.contains(&ws1_id),
4373 "Remaining workspace should still appear in session restoration list"
4374 );
4375 }
4376
4377 #[gpui::test]
4378 async fn test_pending_removal_tasks_drained_on_flush(cx: &mut gpui::TestAppContext) {
4379 use crate::multi_workspace::MultiWorkspace;
4380 use feature_flags::FeatureFlagAppExt;
4381 use gpui::AppContext as _;
4382 use project::Project;
4383
4384 crate::tests::init_test(cx);
4385
4386 cx.update(|cx| {
4387 cx.set_staff(true);
4388 cx.update_flags(true, vec!["agent-v2".to_string()]);
4389 });
4390
4391 let fs = fs::FakeFs::new(cx.executor());
4392 let dir = unique_test_dir(&fs, "pending-removal").await;
4393 let project1 = Project::test(fs.clone(), [], cx).await;
4394 let project2 = Project::test(fs.clone(), [], cx).await;
4395
4396 let db = cx.update(|cx| WorkspaceDb::global(cx));
4397
4398 // Get a real DB id for workspace2 so the row actually exists.
4399 let workspace2_db_id = db.next_id().await.unwrap();
4400
4401 let (multi_workspace, cx) =
4402 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
4403
4404 multi_workspace.update_in(cx, |mw, _, cx| {
4405 mw.set_random_database_id(cx);
4406 });
4407
4408 multi_workspace.update_in(cx, |mw, window, cx| {
4409 let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
4410 workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
4411 ws.set_database_id(workspace2_db_id)
4412 });
4413 mw.activate(workspace.clone(), window, cx);
4414 });
4415
4416 // Save a full workspace row to the DB directly and let it settle.
4417 let session_id = format!("pending-removal-session-{}", Uuid::new_v4());
4418 db.save_workspace(SerializedWorkspace {
4419 id: workspace2_db_id,
4420 paths: PathList::new(&[&dir]),
4421 location: SerializedWorkspaceLocation::Local,
4422 center_group: Default::default(),
4423 window_bounds: Default::default(),
4424 display: Default::default(),
4425 docks: Default::default(),
4426 centered_layout: false,
4427 session_id: Some(session_id.clone()),
4428 breakpoints: Default::default(),
4429 window_id: Some(88),
4430 user_toolchains: Default::default(),
4431 })
4432 .await;
4433 cx.run_until_parked();
4434
4435 // Remove workspace2 — this pushes a task to pending_removal_tasks.
4436 multi_workspace.update_in(cx, |mw, window, cx| {
4437 let ws = mw.workspaces()[1].clone();
4438 mw.remove(&ws, window, cx);
4439 });
4440
4441 // Simulate the quit handler pattern: collect flush tasks + pending
4442 // removal tasks and await them all.
4443 let all_tasks = multi_workspace.update_in(cx, |mw, window, cx| {
4444 let mut tasks: Vec<Task<()>> = mw
4445 .workspaces()
4446 .iter()
4447 .map(|workspace| {
4448 workspace.update(cx, |workspace, cx| {
4449 workspace.flush_serialization(window, cx)
4450 })
4451 })
4452 .collect();
4453 let mut removal_tasks = mw.take_pending_removal_tasks();
4454 // Note: removal_tasks may be empty if the background task already
4455 // completed (take_pending_removal_tasks filters out ready tasks).
4456 tasks.append(&mut removal_tasks);
4457 tasks.push(mw.flush_serialization());
4458 tasks
4459 });
4460 futures::future::join_all(all_tasks).await;
4461
4462 // The row should still exist (for recent projects), but the session
4463 // binding should have been cleared by the pending removal task.
4464 assert!(
4465 db.workspace_for_id(workspace2_db_id).is_some(),
4466 "Workspace row should be preserved for recent projects"
4467 );
4468
4469 let session_workspaces = db
4470 .last_session_workspace_locations("pending-removal-session", None, fs.as_ref())
4471 .await
4472 .unwrap();
4473 let restored_ids: Vec<WorkspaceId> = session_workspaces
4474 .iter()
4475 .map(|sw| sw.workspace_id)
4476 .collect();
4477 assert!(
4478 !restored_ids.contains(&workspace2_db_id),
4479 "Pending removal task should have cleared the session binding"
4480 );
4481 }
4482
4483 #[gpui::test]
4484 async fn test_create_workspace_bounds_observer_uses_fresh_id(cx: &mut gpui::TestAppContext) {
4485 use crate::multi_workspace::MultiWorkspace;
4486 use feature_flags::FeatureFlagAppExt;
4487 use project::Project;
4488
4489 crate::tests::init_test(cx);
4490
4491 cx.update(|cx| {
4492 cx.set_staff(true);
4493 cx.update_flags(true, vec!["agent-v2".to_string()]);
4494 });
4495
4496 let fs = fs::FakeFs::new(cx.executor());
4497 let project = Project::test(fs.clone(), [], cx).await;
4498
4499 let (multi_workspace, cx) =
4500 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4501
4502 multi_workspace.update_in(cx, |mw, _, cx| {
4503 mw.set_random_database_id(cx);
4504 });
4505
4506 let task =
4507 multi_workspace.update_in(cx, |mw, window, cx| mw.create_test_workspace(window, cx));
4508 task.await;
4509
4510 let new_workspace_db_id =
4511 multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
4512 assert!(
4513 new_workspace_db_id.is_some(),
4514 "After run_until_parked, the workspace should have a database_id"
4515 );
4516
4517 let workspace_id = new_workspace_db_id.unwrap();
4518
4519 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4520
4521 assert!(
4522 db.workspace_for_id(workspace_id).is_some(),
4523 "The workspace row should exist in the DB"
4524 );
4525
4526 cx.simulate_resize(gpui::size(px(1024.0), px(768.0)));
4527
4528 // Advance the clock past the 100ms debounce timer so the bounds
4529 // observer task fires
4530 cx.executor().advance_clock(Duration::from_millis(200));
4531 cx.run_until_parked();
4532
4533 let serialized = db
4534 .workspace_for_id(workspace_id)
4535 .expect("workspace row should still exist");
4536 assert!(
4537 serialized.window_bounds.is_some(),
4538 "The bounds observer should write bounds for the workspace's real DB ID, \
4539 even when the workspace was created via create_workspace (where the ID \
4540 is assigned asynchronously after construction)."
4541 );
4542 }
4543
4544 #[gpui::test]
4545 async fn test_flush_serialization_writes_bounds(cx: &mut gpui::TestAppContext) {
4546 use crate::multi_workspace::MultiWorkspace;
4547 use feature_flags::FeatureFlagAppExt;
4548 use project::Project;
4549
4550 crate::tests::init_test(cx);
4551
4552 cx.update(|cx| {
4553 cx.set_staff(true);
4554 cx.update_flags(true, vec!["agent-v2".to_string()]);
4555 });
4556
4557 let fs = fs::FakeFs::new(cx.executor());
4558 let dir = tempfile::TempDir::with_prefix("flush_bounds_test").unwrap();
4559 fs.insert_tree(dir.path(), json!({})).await;
4560
4561 let project = Project::test(fs.clone(), [dir.path()], cx).await;
4562
4563 let (multi_workspace, cx) =
4564 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
4565
4566 let db = cx.update(|_, cx| WorkspaceDb::global(cx));
4567 let workspace_id = db.next_id().await.unwrap();
4568 multi_workspace.update_in(cx, |mw, _, cx| {
4569 mw.workspace().update(cx, |ws, _cx| {
4570 ws.set_database_id(workspace_id);
4571 });
4572 });
4573
4574 let task = multi_workspace.update_in(cx, |mw, window, cx| {
4575 mw.workspace()
4576 .update(cx, |ws, cx| ws.flush_serialization(window, cx))
4577 });
4578 task.await;
4579
4580 let after = db
4581 .workspace_for_id(workspace_id)
4582 .expect("workspace row should exist after flush_serialization");
4583 assert!(
4584 !after.paths.is_empty(),
4585 "flush_serialization should have written paths via save_workspace"
4586 );
4587 assert!(
4588 after.window_bounds.is_some(),
4589 "flush_serialization should ensure window bounds are persisted to the DB \
4590 before the process exits."
4591 );
4592 }
4593
4594 #[gpui::test]
4595 async fn test_resolve_worktree_workspaces(cx: &mut gpui::TestAppContext) {
4596 let fs = fs::FakeFs::new(cx.executor());
4597
4598 // Main repo with a linked worktree entry
4599 fs.insert_tree(
4600 "/repo",
4601 json!({
4602 ".git": {
4603 "worktrees": {
4604 "feature": {
4605 "commondir": "../../",
4606 "HEAD": "ref: refs/heads/feature"
4607 }
4608 }
4609 },
4610 "src": { "main.rs": "" }
4611 }),
4612 )
4613 .await;
4614
4615 // Linked worktree checkout pointing back to /repo
4616 fs.insert_tree(
4617 "/worktree",
4618 json!({
4619 ".git": "gitdir: /repo/.git/worktrees/feature",
4620 "src": { "main.rs": "" }
4621 }),
4622 )
4623 .await;
4624
4625 // A plain non-git project
4626 fs.insert_tree(
4627 "/plain-project",
4628 json!({
4629 "src": { "main.rs": "" }
4630 }),
4631 )
4632 .await;
4633
4634 // Another normal git repo (used in mixed-path entry)
4635 fs.insert_tree(
4636 "/other-repo",
4637 json!({
4638 ".git": {},
4639 "src": { "lib.rs": "" }
4640 }),
4641 )
4642 .await;
4643
4644 let t0 = Utc::now() - chrono::Duration::hours(4);
4645 let t1 = Utc::now() - chrono::Duration::hours(3);
4646 let t2 = Utc::now() - chrono::Duration::hours(2);
4647 let t3 = Utc::now() - chrono::Duration::hours(1);
4648
4649 let workspaces = vec![
4650 // 1: Main checkout of /repo (opened earlier)
4651 (
4652 WorkspaceId(1),
4653 SerializedWorkspaceLocation::Local,
4654 PathList::new(&["/repo"]),
4655 t0,
4656 ),
4657 // 2: Linked worktree of /repo (opened more recently)
4658 // Should dedup with #1; more recent timestamp wins.
4659 (
4660 WorkspaceId(2),
4661 SerializedWorkspaceLocation::Local,
4662 PathList::new(&["/worktree"]),
4663 t1,
4664 ),
4665 // 3: Mixed-path workspace: one root is a linked worktree,
4666 // the other is a normal repo. The worktree path should be
4667 // resolved; the normal path kept as-is.
4668 (
4669 WorkspaceId(3),
4670 SerializedWorkspaceLocation::Local,
4671 PathList::new(&["/other-repo", "/worktree"]),
4672 t2,
4673 ),
4674 // 4: Non-git project — passed through unchanged.
4675 (
4676 WorkspaceId(4),
4677 SerializedWorkspaceLocation::Local,
4678 PathList::new(&["/plain-project"]),
4679 t3,
4680 ),
4681 ];
4682
4683 let result = resolve_worktree_workspaces(workspaces, fs.as_ref()).await;
4684
4685 // Should have 3 entries: #1 and #2 deduped into one, plus #3 and #4.
4686 assert_eq!(result.len(), 3);
4687
4688 // First entry: /repo — deduplicated from #1 and #2.
4689 // Keeps the position of #1 (first seen), but with #2's later timestamp.
4690 assert_eq!(result[0].2.paths(), &[PathBuf::from("/repo")]);
4691 assert_eq!(result[0].3, t1);
4692
4693 // Second entry: mixed-path workspace with worktree resolved.
4694 // /worktree → /repo, so paths become [/other-repo, /repo] (sorted).
4695 assert_eq!(
4696 result[1].2.paths(),
4697 &[PathBuf::from("/other-repo"), PathBuf::from("/repo")]
4698 );
4699 assert_eq!(result[1].0, WorkspaceId(3));
4700
4701 // Third entry: non-git project, unchanged.
4702 assert_eq!(result[2].2.paths(), &[PathBuf::from("/plain-project")]);
4703 assert_eq!(result[2].0, WorkspaceId(4));
4704 }
4705}