@@ -6,13 +6,16 @@ use gpui::{
actions, deferred, px,
};
use project::Project;
+use std::future::Future;
use std::path::PathBuf;
use ui::prelude::*;
+use util::ResultExt;
const SIDEBAR_RESIZE_HANDLE_SIZE: Pixels = px(6.0);
use crate::{
- DockPosition, Item, ModalView, Panel, Workspace, WorkspaceId, client_side_decorations,
+ DockPosition, Item, ModalView, Panel, Toast, Workspace, WorkspaceId, client_side_decorations,
+ notifications::NotificationId,
};
actions!(
@@ -99,10 +102,26 @@ pub struct MultiWorkspace {
sidebar: Option<Box<dyn SidebarHandle>>,
sidebar_open: bool,
_sidebar_subscription: Option<Subscription>,
+ pending_removal_tasks: Vec<Task<()>>,
+ _serialize_task: Option<Task<()>>,
+ _create_task: Option<Task<()>>,
+ _subscriptions: Vec<Subscription>,
}
impl MultiWorkspace {
- pub fn new(workspace: Entity<Workspace>, window: &mut Window, _cx: &mut Context<Self>) -> Self {
+ pub fn new(workspace: Entity<Workspace>, window: &mut Window, cx: &mut Context<Self>) -> Self {
+ let release_subscription = cx.on_release(|this: &mut MultiWorkspace, _cx| {
+ if let Some(task) = this._serialize_task.take() {
+ task.detach();
+ }
+ if let Some(task) = this._create_task.take() {
+ task.detach();
+ }
+ for task in std::mem::take(&mut this.pending_removal_tasks) {
+ task.detach();
+ }
+ });
+ let quit_subscription = cx.on_app_quit(Self::app_will_quit);
Self {
window_id: window.window_handle().window_id(),
workspaces: vec![workspace],
@@ -110,6 +129,10 @@ impl MultiWorkspace {
sidebar: None,
sidebar_open: false,
_sidebar_subscription: None,
+ pending_removal_tasks: Vec::new(),
+ _serialize_task: None,
+ _create_task: None,
+ _subscriptions: vec![release_subscription, quit_subscription],
}
}
@@ -238,14 +261,24 @@ impl MultiWorkspace {
return;
}
- let index = self.add_workspace(workspace, cx);
- if self.active_workspace_index != index {
- self.active_workspace_index = index;
+ let old_index = self.active_workspace_index;
+ let new_index = self.set_active_workspace(workspace, cx);
+ if old_index != new_index {
self.serialize(cx);
- cx.notify();
}
}
+ fn set_active_workspace(
+ &mut self,
+ workspace: Entity<Workspace>,
+ cx: &mut Context<Self>,
+ ) -> usize {
+ let index = self.add_workspace(workspace, cx);
+ self.active_workspace_index = index;
+ cx.notify();
+ index
+ }
+
/// Adds a workspace to this window without changing which workspace is active.
/// Returns the index of the workspace (existing or newly inserted).
pub fn add_workspace(&mut self, workspace: Entity<Workspace>, cx: &mut Context<Self>) -> usize {
@@ -292,16 +325,37 @@ impl MultiWorkspace {
}
}
- fn serialize(&self, cx: &mut App) {
+ fn serialize(&mut self, cx: &mut App) {
let window_id = self.window_id;
let state = crate::persistence::model::MultiWorkspaceState {
active_workspace_id: self.workspace().read(cx).database_id(),
sidebar_open: self.sidebar_open,
};
- cx.background_spawn(async move {
+ self._serialize_task = Some(cx.background_spawn(async move {
crate::persistence::write_multi_workspace_state(window_id, state).await;
- })
- .detach();
+ }));
+ }
+
+ /// Returns the in-flight serialization task (if any) so the caller can
+ /// await it. Used by the quit handler to ensure pending DB writes
+ /// complete before the process exits.
+ pub fn flush_serialization(&mut self) -> Task<()> {
+ self._serialize_task.take().unwrap_or(Task::ready(()))
+ }
+
+ fn app_will_quit(&mut self, _cx: &mut Context<Self>) -> impl Future<Output = ()> + use<> {
+ let mut tasks: Vec<Task<()>> = Vec::new();
+ if let Some(task) = self._serialize_task.take() {
+ tasks.push(task);
+ }
+ if let Some(task) = self._create_task.take() {
+ tasks.push(task);
+ }
+ tasks.extend(std::mem::take(&mut self.pending_removal_tasks));
+
+ async move {
+ futures::future::join_all(tasks).await;
+ }
}
fn focus_active_workspace(&self, window: &mut Window, cx: &mut App) {
@@ -397,6 +451,19 @@ impl MultiWorkspace {
self.workspace().read(cx).database_id()
}
+ pub fn take_pending_removal_tasks(&mut self) -> Vec<Task<()>> {
+ let mut tasks: Vec<Task<()>> = std::mem::take(&mut self.pending_removal_tasks)
+ .into_iter()
+ .filter(|task| !task.is_ready())
+ .collect();
+ if let Some(task) = self._create_task.take() {
+ if !task.is_ready() {
+ tasks.push(task);
+ }
+ }
+ tasks
+ }
+
#[cfg(any(test, feature = "test-support"))]
pub fn set_random_database_id(&mut self, cx: &mut Context<Self>) {
self.workspace().update(cx, |workspace, _cx| {
@@ -438,8 +505,57 @@ impl MultiWorkspace {
cx,
);
let new_workspace = cx.new(|cx| Workspace::new(None, project, app_state, window, cx));
- self.activate(new_workspace, cx);
+ self.set_active_workspace(new_workspace.clone(), cx);
self.focus_active_workspace(window, cx);
+
+ let weak_workspace = new_workspace.downgrade();
+ self._create_task = Some(cx.spawn_in(window, async move |this, cx| {
+ let result = crate::persistence::DB.next_id().await;
+ this.update_in(cx, |this, window, cx| match result {
+ Ok(workspace_id) => {
+ if let Some(workspace) = weak_workspace.upgrade() {
+ let session_id = workspace.read(cx).session_id();
+ let window_id = window.window_handle().window_id().as_u64();
+ workspace.update(cx, |workspace, _cx| {
+ workspace.set_database_id(workspace_id);
+ });
+ cx.background_spawn(async move {
+ crate::persistence::DB
+ .set_session_binding(workspace_id, session_id, Some(window_id))
+ .await
+ .log_err();
+ })
+ .detach();
+ } else {
+ cx.background_spawn(async move {
+ crate::persistence::DB
+ .delete_workspace_by_id(workspace_id)
+ .await
+ .log_err();
+ })
+ .detach();
+ }
+ this.serialize(cx);
+ }
+ Err(error) => {
+ log::error!("Failed to create workspace: {error:#}");
+ if let Some(index) = weak_workspace
+ .upgrade()
+ .and_then(|w| this.workspaces.iter().position(|ws| *ws == w))
+ {
+ this.remove_workspace(index, window, cx);
+ }
+ this.workspace().update(cx, |workspace, cx| {
+ let id = NotificationId::unique::<MultiWorkspace>();
+ workspace.show_toast(
+ Toast::new(id, format!("Failed to create workspace: {error}")),
+ cx,
+ );
+ });
+ }
+ })
+ .log_err();
+ }));
}
pub fn remove_workspace(&mut self, index: usize, window: &mut Window, cx: &mut Context<Self>) {
@@ -447,7 +563,7 @@ impl MultiWorkspace {
return;
}
- self.workspaces.remove(index);
+ let removed_workspace = self.workspaces.remove(index);
if self.active_workspace_index >= self.workspaces.len() {
self.active_workspace_index = self.workspaces.len() - 1;
@@ -455,8 +571,19 @@ impl MultiWorkspace {
self.active_workspace_index -= 1;
}
- self.focus_active_workspace(window, cx);
+ if let Some(workspace_id) = removed_workspace.read(cx).database_id() {
+ self.pending_removal_tasks.retain(|task| !task.is_ready());
+ self.pending_removal_tasks
+ .push(cx.background_spawn(async move {
+ crate::persistence::DB
+ .delete_workspace_by_id(workspace_id)
+ .await
+ .log_err();
+ }));
+ }
+
self.serialize(cx);
+ self.focus_active_workspace(window, cx);
cx.notify();
}
@@ -2153,6 +2153,14 @@ impl WorkspaceDb {
}
}
+ query! {
+ pub(crate) async fn set_session_binding(workspace_id: WorkspaceId, session_id: Option<String>, window_id: Option<u64>) -> Result<()> {
+ UPDATE workspaces
+ SET session_id = ?2, window_id = ?3
+ WHERE workspace_id = ?1
+ }
+ }
+
pub(crate) async fn toolchains(
&self,
workspace_id: WorkspaceId,
@@ -3934,4 +3942,421 @@ mod tests {
assert_eq!(group_none.state.active_workspace_id, None);
assert_eq!(group_none.state.sidebar_open, false);
}
+
+ #[gpui::test]
+ async fn test_flush_serialization_completes_before_quit(cx: &mut gpui::TestAppContext) {
+ use crate::multi_workspace::MultiWorkspace;
+ use feature_flags::FeatureFlagAppExt;
+
+ use project::Project;
+
+ crate::tests::init_test(cx);
+
+ cx.update(|cx| {
+ cx.set_staff(true);
+ cx.update_flags(true, vec!["agent-v2".to_string()]);
+ });
+
+ let fs = fs::FakeFs::new(cx.executor());
+ let project = Project::test(fs.clone(), [], cx).await;
+
+ let (multi_workspace, cx) =
+ cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
+
+ let workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone());
+
+ // Assign a database_id so serialization will actually persist.
+ let workspace_id = DB.next_id().await.unwrap();
+ workspace.update(cx, |ws, _cx| {
+ ws.set_database_id(workspace_id);
+ });
+
+ // Mutate some workspace state.
+ DB.set_centered_layout(workspace_id, true).await.unwrap();
+
+ // Call flush_serialization and await the returned task directly
+ // (without run_until_parked — the point is that awaiting the task
+ // alone is sufficient).
+ let task = multi_workspace.update_in(cx, |mw, window, cx| {
+ mw.workspace()
+ .update(cx, |ws, cx| ws.flush_serialization(window, cx))
+ });
+ task.await;
+
+ // Read the workspace back from the DB and verify serialization happened.
+ let serialized = DB.workspace_for_id(workspace_id);
+ assert!(
+ serialized.is_some(),
+ "flush_serialization should have persisted the workspace to DB"
+ );
+ }
+
+ #[gpui::test]
+ async fn test_create_workspace_serializes_active_workspace_id_after_db_id_assigned(
+ cx: &mut gpui::TestAppContext,
+ ) {
+ use crate::multi_workspace::MultiWorkspace;
+ use crate::persistence::read_multi_workspace_state;
+ use feature_flags::FeatureFlagAppExt;
+
+ use project::Project;
+
+ crate::tests::init_test(cx);
+
+ cx.update(|cx| {
+ cx.set_staff(true);
+ cx.update_flags(true, vec!["agent-v2".to_string()]);
+ });
+
+ let fs = fs::FakeFs::new(cx.executor());
+ let project = Project::test(fs.clone(), [], cx).await;
+
+ let (multi_workspace, cx) =
+ cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
+
+ // Give the first workspace a database_id.
+ multi_workspace.update_in(cx, |mw, _, cx| {
+ mw.set_random_database_id(cx);
+ });
+
+ let window_id =
+ multi_workspace.update_in(cx, |_, window, _cx| window.window_handle().window_id());
+
+ // Create a new workspace via the MultiWorkspace API (triggers next_id()).
+ multi_workspace.update_in(cx, |mw, window, cx| {
+ mw.create_workspace(window, cx);
+ });
+
+ // Let the async next_id() and re-serialization tasks complete.
+ cx.run_until_parked();
+
+ // Read back the multi-workspace state.
+ let state = read_multi_workspace_state(window_id);
+
+ // The new workspace should now have a database_id, and the multi-workspace
+ // state should record it as the active workspace.
+ let new_workspace_db_id =
+ multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
+ assert!(
+ new_workspace_db_id.is_some(),
+ "New workspace should have a database_id after run_until_parked"
+ );
+ assert_eq!(
+ state.active_workspace_id, new_workspace_db_id,
+ "Serialized active_workspace_id should match the new workspace's database_id"
+ );
+ }
+
+ #[gpui::test]
+ async fn test_create_workspace_individual_serialization(cx: &mut gpui::TestAppContext) {
+ use crate::multi_workspace::MultiWorkspace;
+ use feature_flags::FeatureFlagAppExt;
+
+ use project::Project;
+
+ crate::tests::init_test(cx);
+
+ cx.update(|cx| {
+ cx.set_staff(true);
+ cx.update_flags(true, vec!["agent-v2".to_string()]);
+ });
+
+ let fs = fs::FakeFs::new(cx.executor());
+ let project = Project::test(fs.clone(), [], cx).await;
+
+ let (multi_workspace, cx) =
+ cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
+
+ multi_workspace.update_in(cx, |mw, _, cx| {
+ mw.set_random_database_id(cx);
+ });
+
+ // Create a new workspace.
+ multi_workspace.update_in(cx, |mw, window, cx| {
+ mw.create_workspace(window, cx);
+ });
+
+ cx.run_until_parked();
+
+ // Get the new workspace's database_id.
+ let new_db_id =
+ multi_workspace.read_with(cx, |mw, cx| mw.workspace().read(cx).database_id());
+ assert!(
+ new_db_id.is_some(),
+ "New workspace should have a database_id"
+ );
+
+ let workspace_id = new_db_id.unwrap();
+
+ // The workspace should have been serialized to the DB with real data
+ // (not just the bare DEFAULT VALUES row from next_id).
+ let serialized = DB.workspace_for_id(workspace_id);
+ assert!(
+ serialized.is_some(),
+ "Newly created workspace should be fully serialized in the DB after database_id assignment"
+ );
+ }
+
+ #[gpui::test]
+ async fn test_remove_workspace_deletes_db_row(cx: &mut gpui::TestAppContext) {
+ use crate::multi_workspace::MultiWorkspace;
+ use feature_flags::FeatureFlagAppExt;
+ use gpui::AppContext as _;
+ use project::Project;
+
+ crate::tests::init_test(cx);
+
+ cx.update(|cx| {
+ cx.set_staff(true);
+ cx.update_flags(true, vec!["agent-v2".to_string()]);
+ });
+
+ let fs = fs::FakeFs::new(cx.executor());
+ let project1 = Project::test(fs.clone(), [], cx).await;
+ let project2 = Project::test(fs.clone(), [], cx).await;
+
+ let (multi_workspace, cx) =
+ cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
+
+ multi_workspace.update_in(cx, |mw, _, cx| {
+ mw.set_random_database_id(cx);
+ });
+
+ // Get a real DB id for workspace2 so the row actually exists.
+ let workspace2_db_id = DB.next_id().await.unwrap();
+
+ multi_workspace.update_in(cx, |mw, window, cx| {
+ let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
+ workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
+ ws.set_database_id(workspace2_db_id)
+ });
+ mw.activate(workspace.clone(), cx);
+ });
+
+ // Save a full workspace row to the DB directly.
+ DB.save_workspace(SerializedWorkspace {
+ id: workspace2_db_id,
+ paths: PathList::new(&["/tmp/remove_test"]),
+ location: SerializedWorkspaceLocation::Local,
+ center_group: Default::default(),
+ window_bounds: Default::default(),
+ display: Default::default(),
+ docks: Default::default(),
+ centered_layout: false,
+ session_id: Some("remove-test-session".to_owned()),
+ breakpoints: Default::default(),
+ window_id: Some(99),
+ user_toolchains: Default::default(),
+ })
+ .await;
+
+ assert!(
+ DB.workspace_for_id(workspace2_db_id).is_some(),
+ "Workspace2 should exist in DB before removal"
+ );
+
+ // Remove workspace at index 1 (the second workspace).
+ multi_workspace.update_in(cx, |mw, window, cx| {
+ mw.remove_workspace(1, window, cx);
+ });
+
+ cx.run_until_parked();
+
+ // The row should be deleted, not just have session_id cleared.
+ assert!(
+ DB.workspace_for_id(workspace2_db_id).is_none(),
+ "Removed workspace's DB row should be deleted entirely"
+ );
+ }
+
+ #[gpui::test]
+ async fn test_remove_workspace_not_restored_as_zombie(cx: &mut gpui::TestAppContext) {
+ use crate::multi_workspace::MultiWorkspace;
+ use feature_flags::FeatureFlagAppExt;
+ use gpui::AppContext as _;
+ use project::Project;
+
+ crate::tests::init_test(cx);
+
+ cx.update(|cx| {
+ cx.set_staff(true);
+ cx.update_flags(true, vec!["agent-v2".to_string()]);
+ });
+
+ let fs = fs::FakeFs::new(cx.executor());
+ let dir1 = tempfile::TempDir::with_prefix("zombie_test1").unwrap();
+ let dir2 = tempfile::TempDir::with_prefix("zombie_test2").unwrap();
+ fs.insert_tree(dir1.path(), json!({})).await;
+ fs.insert_tree(dir2.path(), json!({})).await;
+
+ let project1 = Project::test(fs.clone(), [], cx).await;
+ let project2 = Project::test(fs.clone(), [], cx).await;
+
+ // Get real DB ids so the rows actually exist.
+ let ws1_id = DB.next_id().await.unwrap();
+ let ws2_id = DB.next_id().await.unwrap();
+
+ let (multi_workspace, cx) =
+ cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
+
+ multi_workspace.update_in(cx, |mw, _, cx| {
+ mw.workspace().update(cx, |ws, _cx| {
+ ws.set_database_id(ws1_id);
+ });
+ });
+
+ multi_workspace.update_in(cx, |mw, window, cx| {
+ let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
+ workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
+ ws.set_database_id(ws2_id)
+ });
+ mw.activate(workspace.clone(), cx);
+ });
+
+ let session_id = "test-zombie-session";
+ let window_id_val: u64 = 42;
+
+ DB.save_workspace(SerializedWorkspace {
+ id: ws1_id,
+ paths: PathList::new(&[dir1.path()]),
+ location: SerializedWorkspaceLocation::Local,
+ center_group: Default::default(),
+ window_bounds: Default::default(),
+ display: Default::default(),
+ docks: Default::default(),
+ centered_layout: false,
+ session_id: Some(session_id.to_owned()),
+ breakpoints: Default::default(),
+ window_id: Some(window_id_val),
+ user_toolchains: Default::default(),
+ })
+ .await;
+
+ DB.save_workspace(SerializedWorkspace {
+ id: ws2_id,
+ paths: PathList::new(&[dir2.path()]),
+ location: SerializedWorkspaceLocation::Local,
+ center_group: Default::default(),
+ window_bounds: Default::default(),
+ display: Default::default(),
+ docks: Default::default(),
+ centered_layout: false,
+ session_id: Some(session_id.to_owned()),
+ breakpoints: Default::default(),
+ window_id: Some(window_id_val),
+ user_toolchains: Default::default(),
+ })
+ .await;
+
+ // Remove workspace2 (index 1).
+ multi_workspace.update_in(cx, |mw, window, cx| {
+ mw.remove_workspace(1, window, cx);
+ });
+
+ cx.run_until_parked();
+
+ // The removed workspace should NOT appear in session restoration.
+ let locations = DB
+ .last_session_workspace_locations(session_id, None, fs.as_ref())
+ .await
+ .unwrap();
+
+ let restored_ids: Vec<WorkspaceId> = locations.iter().map(|sw| sw.workspace_id).collect();
+ assert!(
+ !restored_ids.contains(&ws2_id),
+ "Removed workspace should not appear in session restoration list. Found: {:?}",
+ restored_ids
+ );
+ assert!(
+ restored_ids.contains(&ws1_id),
+ "Remaining workspace should still appear in session restoration list"
+ );
+ }
+
+ #[gpui::test]
+ async fn test_pending_removal_tasks_drained_on_flush(cx: &mut gpui::TestAppContext) {
+ use crate::multi_workspace::MultiWorkspace;
+ use feature_flags::FeatureFlagAppExt;
+ use gpui::AppContext as _;
+ use project::Project;
+
+ crate::tests::init_test(cx);
+
+ cx.update(|cx| {
+ cx.set_staff(true);
+ cx.update_flags(true, vec!["agent-v2".to_string()]);
+ });
+
+ let fs = fs::FakeFs::new(cx.executor());
+ let project1 = Project::test(fs.clone(), [], cx).await;
+ let project2 = Project::test(fs.clone(), [], cx).await;
+
+ // Get a real DB id for workspace2 so the row actually exists.
+ let workspace2_db_id = DB.next_id().await.unwrap();
+
+ let (multi_workspace, cx) =
+ cx.add_window_view(|window, cx| MultiWorkspace::test_new(project1.clone(), window, cx));
+
+ multi_workspace.update_in(cx, |mw, _, cx| {
+ mw.set_random_database_id(cx);
+ });
+
+ multi_workspace.update_in(cx, |mw, window, cx| {
+ let workspace = cx.new(|cx| crate::Workspace::test_new(project2.clone(), window, cx));
+ workspace.update(cx, |ws: &mut crate::Workspace, _cx| {
+ ws.set_database_id(workspace2_db_id)
+ });
+ mw.activate(workspace.clone(), cx);
+ });
+
+ // Save a full workspace row to the DB directly and let it settle.
+ DB.save_workspace(SerializedWorkspace {
+ id: workspace2_db_id,
+ paths: PathList::new(&["/tmp/pending_removal_test"]),
+ location: SerializedWorkspaceLocation::Local,
+ center_group: Default::default(),
+ window_bounds: Default::default(),
+ display: Default::default(),
+ docks: Default::default(),
+ centered_layout: false,
+ session_id: Some("pending-removal-session".to_owned()),
+ breakpoints: Default::default(),
+ window_id: Some(88),
+ user_toolchains: Default::default(),
+ })
+ .await;
+ cx.run_until_parked();
+
+ // Remove workspace2 — this pushes a task to pending_removal_tasks.
+ multi_workspace.update_in(cx, |mw, window, cx| {
+ mw.remove_workspace(1, window, cx);
+ });
+
+ // Simulate the quit handler pattern: collect flush tasks + pending
+ // removal tasks and await them all.
+ let all_tasks = multi_workspace.update_in(cx, |mw, window, cx| {
+ let mut tasks: Vec<Task<()>> = mw
+ .workspaces()
+ .iter()
+ .map(|workspace| {
+ workspace.update(cx, |workspace, cx| {
+ workspace.flush_serialization(window, cx)
+ })
+ })
+ .collect();
+ let mut removal_tasks = mw.take_pending_removal_tasks();
+ // Note: removal_tasks may be empty if the background task already
+ // completed (take_pending_removal_tasks filters out ready tasks).
+ tasks.append(&mut removal_tasks);
+ tasks.push(mw.flush_serialization());
+ tasks
+ });
+ futures::future::join_all(all_tasks).await;
+
+ // After awaiting, the DB row should be deleted.
+ assert!(
+ DB.workspace_for_id(workspace2_db_id).is_none(),
+ "Pending removal task should have deleted the workspace row when awaited"
+ );
+ }
}
@@ -1244,6 +1244,7 @@ pub struct Workspace {
_apply_leader_updates: Task<Result<()>>,
_observe_current_user: Task<Result<()>>,
_schedule_serialize_workspace: Option<Task<()>>,
+ _serialize_workspace_task: Option<Task<()>>,
_schedule_serialize_ssh_paths: Option<Task<()>>,
pane_history_timestamp: Arc<AtomicUsize>,
bounds: Bounds<Pixels>,
@@ -1667,6 +1668,7 @@ impl Workspace {
_observe_current_user,
_apply_leader_updates,
_schedule_serialize_workspace: None,
+ _serialize_workspace_task: None,
_schedule_serialize_ssh_paths: None,
leader_updates_tx,
_subscriptions: subscriptions,
@@ -5823,10 +5825,24 @@ impl Workspace {
self.database_id
}
+ pub(crate) fn set_database_id(&mut self, id: WorkspaceId) {
+ self.database_id = Some(id);
+ }
+
pub fn session_id(&self) -> Option<String> {
self.session_id.clone()
}
+ /// Bypass the 200ms serialization throttle and write workspace state to
+ /// the DB immediately. Returns a task the caller can await to ensure the
+ /// write completes. Used by the quit handler so the most recent state
+ /// isn't lost to a pending throttle timer when the process exits.
+ pub fn flush_serialization(&mut self, window: &mut Window, cx: &mut App) -> Task<()> {
+ self._schedule_serialize_workspace.take();
+ self._serialize_workspace_task.take();
+ self.serialize_workspace_internal(window, cx)
+ }
+
pub fn root_paths(&self, cx: &App) -> Vec<Arc<Path>> {
let project = self.project().read(cx);
project
@@ -5883,7 +5899,8 @@ impl Workspace {
.timer(SERIALIZATION_THROTTLE_TIME)
.await;
this.update_in(cx, |this, window, cx| {
- this.serialize_workspace_internal(window, cx).detach();
+ this._serialize_workspace_task =
+ Some(this.serialize_workspace_internal(window, cx));
this._schedule_serialize_workspace.take();
})
.log_err();
@@ -7906,11 +7923,16 @@ pub async fn last_session_workspace_locations(
.log_err()
}
+pub struct MultiWorkspaceRestoreResult {
+ pub window_handle: WindowHandle<MultiWorkspace>,
+ pub errors: Vec<anyhow::Error>,
+}
+
pub async fn restore_multiworkspace(
multi_workspace: SerializedMultiWorkspace,
app_state: Arc<AppState>,
cx: &mut AsyncApp,
-) -> anyhow::Result<WindowHandle<MultiWorkspace>> {
+) -> anyhow::Result<MultiWorkspaceRestoreResult> {
let SerializedMultiWorkspace { workspaces, state } = multi_workspace;
let mut group_iter = workspaces.into_iter();
let first = group_iter
@@ -7936,8 +7958,10 @@ pub async fn restore_multiworkspace(
window
};
+ let mut errors = Vec::new();
+
for session_workspace in group_iter {
- if session_workspace.paths.is_empty() {
+ let error = if session_workspace.paths.is_empty() {
cx.update(|cx| {
open_workspace_by_id(
session_workspace.workspace_id,
@@ -7946,7 +7970,8 @@ pub async fn restore_multiworkspace(
cx,
)
})
- .await?;
+ .await
+ .err()
} else {
cx.update(|cx| {
Workspace::new_local(
@@ -7958,7 +7983,12 @@ pub async fn restore_multiworkspace(
cx,
)
})
- .await?;
+ .await
+ .err()
+ };
+
+ if let Some(error) = error {
+ errors.push(error);
}
}
@@ -8000,7 +8030,10 @@ pub async fn restore_multiworkspace(
})
.ok();
- Ok(window_handle)
+ Ok(MultiWorkspaceRestoreResult {
+ window_handle,
+ errors,
+ })
}
actions!(