From 46f1d5f5c24448abb2dc149689f389b4378858f8 Mon Sep 17 00:00:00 2001 From: Julia Date: Thu, 1 Dec 2022 00:29:58 -0500 Subject: [PATCH 01/86] Avoid moving tab when leader item updates --- crates/workspace/src/workspace.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 7082b61949fd468b9e84795aa39ac135c992ecb8..25fa3654d71c10b5738b39b4a119bcf13a2e6a25 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -2676,7 +2676,12 @@ impl Workspace { } for (pane, item) in items_to_add { - Pane::add_item(self, &pane, item.boxed_clone(), false, false, None, cx); + if let Some(index) = pane.update(cx, |pane, _| pane.index_for_item(item.as_ref())) { + pane.update(cx, |pane, cx| pane.activate_item(index, false, false, cx)); + } else { + Pane::add_item(self, &pane, item.boxed_clone(), false, false, None, cx); + } + if pane == self.active_pane { pane.update(cx, |pane, cx| pane.focus_active_item(cx)); } From 239a04ea5bfc5c318881a7b8c33e7e211af13aa4 Mon Sep 17 00:00:00 2001 From: Julia Date: Fri, 2 Dec 2022 00:31:16 -0500 Subject: [PATCH 02/86] Add test that should have exercised tab reordering while following Except it doesn't, it passes both with and without the prior commit. Investigate further --- crates/collab/src/integration_tests.rs | 121 +++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 3e0b2171a87d19a9f55ffb8c9c4bfc00fb63af02..7115ed6c60ce40ad2c004e47214e54c677a45f0c 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -4953,6 +4953,127 @@ async fn test_following( ); } +#[gpui::test] +async fn test_following_tab_order( + deterministic: Arc, + cx_a: &mut TestAppContext, + cx_b: &mut TestAppContext, +) { + cx_a.update(editor::init); + cx_b.update(editor::init); + + let mut server = TestServer::start(cx_a.background()).await; + let client_a = server.create_client(cx_a, "user_a").await; + let client_b = server.create_client(cx_b, "user_b").await; + server + .create_room(&mut [(&client_a, cx_a), (&client_b, cx_b)]) + .await; + let active_call_a = cx_a.read(ActiveCall::global); + let active_call_b = cx_b.read(ActiveCall::global); + + client_a + .fs + .insert_tree( + "/a", + json!({ + "1.txt": "one", + "2.txt": "two", + "3.txt": "three", + }), + ) + .await; + let (project_a, worktree_id) = client_a.build_local_project("/a", cx_a).await; + active_call_a + .update(cx_a, |call, cx| call.set_location(Some(&project_a), cx)) + .await + .unwrap(); + + let project_id = active_call_a + .update(cx_a, |call, cx| call.share_project(project_a.clone(), cx)) + .await + .unwrap(); + let project_b = client_b.build_remote_project(project_id, cx_b).await; + active_call_b + .update(cx_b, |call, cx| call.set_location(Some(&project_b), cx)) + .await + .unwrap(); + + let workspace_a = client_a.build_workspace(&project_a, cx_a); + let pane_a = workspace_a.read_with(cx_a, |workspace, _| workspace.active_pane().clone()); + + let workspace_b = client_b.build_workspace(&project_b, cx_b); + let pane_b = workspace_b.read_with(cx_b, |workspace, _| workspace.active_pane().clone()); + + let client_b_id = project_a.read_with(cx_a, |project, _| { + project.collaborators().values().next().unwrap().peer_id + }); + + //Open 1, 3 in that order on client A + workspace_a + .update(cx_a, |workspace, cx| { + workspace.open_path((worktree_id, "1.txt"), None, true, cx) + }) + .await + .unwrap(); + workspace_a + .update(cx_a, |workspace, cx| { + workspace.open_path((worktree_id, "3.txt"), None, true, cx) + }) + .await + .unwrap(); + + let pane_paths = |pane: &ViewHandle, cx: &mut TestAppContext| { + pane.update(cx, |pane, cx| { + pane.items() + .map(|item| { + item.project_path(cx) + .unwrap() + .path + .to_str() + .unwrap() + .to_owned() + }) + .collect::>() + }) + }; + + //Verify that the tabs opened in the order we expect + assert_eq!(&pane_paths(&pane_a, cx_a), &["1.txt", "3.txt"]); + + //Open just 2 on client B + workspace_b + .update(cx_b, |workspace, cx| { + workspace.open_path((worktree_id, "2.txt"), None, true, cx) + }) + .await + .unwrap(); + + //Follow client B as client A + workspace_a + .update(cx_a, |workspace, cx| { + workspace + .toggle_follow(&ToggleFollow(client_b_id), cx) + .unwrap() + }) + .await + .unwrap(); + + // Verify that newly opened followed file is at the end + assert_eq!(&pane_paths(&pane_a, cx_a), &["1.txt", "3.txt", "2.txt"]); + + //Open just 1 on client B + workspace_b + .update(cx_b, |workspace, cx| { + workspace.open_path((worktree_id, "1.txt"), None, true, cx) + }) + .await + .unwrap(); + assert_eq!(&pane_paths(&pane_b, cx_b), &["2.txt", "1.txt"]); + + // Verify that following into 1 did not reorder + assert_eq!(&pane_paths(&pane_a, cx_a), &["1.txt", "3.txt", "2.txt"]); +} + #[gpui::test(iterations = 10)] async fn test_peers_following_each_other(cx_a: &mut TestAppContext, cx_b: &mut TestAppContext) { cx_a.foreground().forbid_parking(); From 4bc1d775358f8af756740c4ef5d250ceee560cb2 Mon Sep 17 00:00:00 2001 From: Julia Date: Fri, 2 Dec 2022 16:09:37 -0500 Subject: [PATCH 03/86] Fix tab following order test to wait for file open to propagate Now it can actually repro the original bug Co-Authored-By: Max Brunsfeld --- crates/collab/src/integration_tests.rs | 18 ++++++++++-------- crates/editor/src/items.rs | 6 ++++-- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 7115ed6c60ce40ad2c004e47214e54c677a45f0c..0daa3b69f6f7f2564616449bb9b1f586be4f5651 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -5040,14 +5040,6 @@ async fn test_following_tab_order( //Verify that the tabs opened in the order we expect assert_eq!(&pane_paths(&pane_a, cx_a), &["1.txt", "3.txt"]); - //Open just 2 on client B - workspace_b - .update(cx_b, |workspace, cx| { - workspace.open_path((worktree_id, "2.txt"), None, true, cx) - }) - .await - .unwrap(); - //Follow client B as client A workspace_a .update(cx_a, |workspace, cx| { @@ -5058,6 +5050,15 @@ async fn test_following_tab_order( .await .unwrap(); + //Open just 2 on client B + workspace_b + .update(cx_b, |workspace, cx| { + workspace.open_path((worktree_id, "2.txt"), None, true, cx) + }) + .await + .unwrap(); + deterministic.run_until_parked(); + // Verify that newly opened followed file is at the end assert_eq!(&pane_paths(&pane_a, cx_a), &["1.txt", "3.txt", "2.txt"]); @@ -5069,6 +5070,7 @@ async fn test_following_tab_order( .await .unwrap(); assert_eq!(&pane_paths(&pane_b, cx_b), &["2.txt", "1.txt"]); + deterministic.run_until_parked(); // Verify that following into 1 did not reorder assert_eq!(&pane_paths(&pane_a, cx_a), &["1.txt", "3.txt", "2.txt"]); diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 0cc8575e99156d85366cc3aaec442913b36dd6a4..ccabe81de6cf96cff98b92e52127128b4011ca1b 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -55,9 +55,11 @@ impl FollowableItem for Editor { let buffer = buffer.await?; let editor = pane .read_with(&cx, |pane, cx| { - pane.items_of_type::().find(|editor| { + let existing = pane.items_of_type::().find(|editor| { editor.read(cx).buffer.read(cx).as_singleton().as_ref() == Some(&buffer) - }) + }); + dbg!(&existing); + existing }) .unwrap_or_else(|| { pane.update(&mut cx, |_, cx| { From 57e10b7dd56c18caaf91a176a86481e3ee4c4571 Mon Sep 17 00:00:00 2001 From: Julia Date: Fri, 2 Dec 2022 16:42:49 -0500 Subject: [PATCH 04/86] Cleanup dbg --- crates/editor/src/items.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index ccabe81de6cf96cff98b92e52127128b4011ca1b..0cc8575e99156d85366cc3aaec442913b36dd6a4 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -55,11 +55,9 @@ impl FollowableItem for Editor { let buffer = buffer.await?; let editor = pane .read_with(&cx, |pane, cx| { - let existing = pane.items_of_type::().find(|editor| { + pane.items_of_type::().find(|editor| { editor.read(cx).buffer.read(cx).as_singleton().as_ref() == Some(&buffer) - }); - dbg!(&existing); - existing + }) }) .unwrap_or_else(|| { pane.update(&mut cx, |_, cx| { From 72c1ee904b7335ede76b421a33edb70c07342b2a Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 19 Oct 2022 09:33:16 -0700 Subject: [PATCH 05/86] Fix rebase - Broken tab --- crates/db/src/items.rs | 38 +++++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/crates/db/src/items.rs b/crates/db/src/items.rs index ed4a4f85e31ed869980496fd8aa20980faf1847d..87edbd2c00f93d157358caa0ed6f331310b7edc0 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/items.rs @@ -148,18 +148,38 @@ impl Db { let tx = lock.transaction()?; - // When working with transactions in rusqlite, need to make this kind of scope - // To make the borrow stuff work correctly. Don't know why, rust is wild. - let result = { - let mut editors_stmt = tx.prepare_cached( - r#" + // When working with transactions in rusqlite, need to make this kind of scope + // To make the borrow stuff work correctly. Don't know why, rust is wild. + let result = { + let mut read_editors = tx + .prepare_cached( + r#" SELECT items.id, item_path.path FROM items LEFT JOIN item_path - ON items.id = item_path.item_id - WHERE items.kind = ?; - "#, - )?; + ON items.id = item_path.item_id + WHERE items.kind = "Editor"; + "#r, + )? + .query_map([], |row| { + let buf: Vec = row.get(2)?; + let path: PathBuf = OsStr::from_bytes(&buf).into(); + + Ok(SerializedItem::Editor(id, path)) + })?; + + let mut read_stmt = tx.prepare_cached( + " + SELECT items.id, items.kind, item_path.path, item_query.query + FROM items + LEFT JOIN item_path + ON items.id = item_path.item_id + LEFT JOIN item_query + ON items.id = item_query.item_id + WHERE + ORDER BY items.id; + ", + )?; let editors_iter = editors_stmt.query_map( [SerializedItemKind::Editor.to_string()], From 60ebe33518df5540f98af8e4019c1a72056e0c03 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 19 Oct 2022 11:36:01 -0700 Subject: [PATCH 06/86] Rebase fix - Reworking approach to sql for take --- crates/db/src/db.rs | 1 + crates/db/src/items.rs | 38 +++++++++---------------------------- crates/db/src/migrations.rs | 4 ++-- 3 files changed, 12 insertions(+), 31 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 2949acdb8372c37373d470708b20a92430108e4e..6f1ac7f59f69fcc64c826d8eed6c19624f04fe27 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,3 +1,4 @@ +mod items; mod kvp; mod migrations; diff --git a/crates/db/src/items.rs b/crates/db/src/items.rs index 87edbd2c00f93d157358caa0ed6f331310b7edc0..ed4a4f85e31ed869980496fd8aa20980faf1847d 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/items.rs @@ -148,38 +148,18 @@ impl Db { let tx = lock.transaction()?; - // When working with transactions in rusqlite, need to make this kind of scope - // To make the borrow stuff work correctly. Don't know why, rust is wild. - let result = { - let mut read_editors = tx - .prepare_cached( - r#" + // When working with transactions in rusqlite, need to make this kind of scope + // To make the borrow stuff work correctly. Don't know why, rust is wild. + let result = { + let mut editors_stmt = tx.prepare_cached( + r#" SELECT items.id, item_path.path FROM items LEFT JOIN item_path - ON items.id = item_path.item_id - WHERE items.kind = "Editor"; - "#r, - )? - .query_map([], |row| { - let buf: Vec = row.get(2)?; - let path: PathBuf = OsStr::from_bytes(&buf).into(); - - Ok(SerializedItem::Editor(id, path)) - })?; - - let mut read_stmt = tx.prepare_cached( - " - SELECT items.id, items.kind, item_path.path, item_query.query - FROM items - LEFT JOIN item_path - ON items.id = item_path.item_id - LEFT JOIN item_query - ON items.id = item_query.item_id - WHERE - ORDER BY items.id; - ", - )?; + ON items.id = item_path.item_id + WHERE items.kind = ?; + "#, + )?; let editors_iter = editors_stmt.query_map( [SerializedItemKind::Editor.to_string()], diff --git a/crates/db/src/migrations.rs b/crates/db/src/migrations.rs index 1000543d8ddde8829320de1b0c3e4f630635d3e8..40e5d28b80db2c0a7a40d3811d6dd4e65411d301 100644 --- a/crates/db/src/migrations.rs +++ b/crates/db/src/migrations.rs @@ -1,7 +1,7 @@ use rusqlite_migration::{Migrations, M}; // use crate::items::ITEMS_M_1; -use crate::kvp::KVP_M_1_UP; +use crate::{items::ITEMS_M_1, kvp::KVP_M_1_UP}; // This must be ordered by development time! Only ever add new migrations to the end!! // Bad things will probably happen if you don't monotonically edit this vec!!!! @@ -10,6 +10,6 @@ use crate::kvp::KVP_M_1_UP; lazy_static::lazy_static! { pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![ M::up(KVP_M_1_UP), - // M::up(ITEMS_M_1), + M::up(ITEMS_M_1), ]); } From b48e28b55512f57b0d045aa5de0292d13ad1f2b2 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 19 Oct 2022 17:10:49 -0700 Subject: [PATCH 07/86] Built first draft of workspace serialization schemas, started writing DB tests Co-Authored-By: kay@zed.dev --- crates/db/src/db.rs | 1 + crates/db/src/items.rs | 341 +++++------------------------------- crates/db/src/kvp.rs | 2 +- crates/db/src/migrations.rs | 5 +- crates/db/src/workspace.rs | 180 +++++++++++++++++++ 5 files changed, 231 insertions(+), 298 deletions(-) create mode 100644 crates/db/src/workspace.rs diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 6f1ac7f59f69fcc64c826d8eed6c19624f04fe27..2b4b7cf9c3664cb70bb940e0df772b71debc92c6 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,6 +1,7 @@ mod items; mod kvp; mod migrations; +mod workspace; use std::fs; use std::path::{Path, PathBuf}; diff --git a/crates/db/src/items.rs b/crates/db/src/items.rs index ed4a4f85e31ed869980496fd8aa20980faf1847d..7454f243317312d7995b27864627b665f84ce9d8 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/items.rs @@ -6,306 +6,59 @@ use rusqlite::{named_params, params}; use super::Db; -pub(crate) const ITEMS_M_1: &str = " -CREATE TABLE items( - id INTEGER PRIMARY KEY, - kind TEXT -) STRICT; -CREATE TABLE item_path( - item_id INTEGER PRIMARY KEY, - path BLOB -) STRICT; -CREATE TABLE item_query( - item_id INTEGER PRIMARY KEY, - query TEXT -) STRICT; -"; - -#[derive(PartialEq, Eq, Hash, Debug)] -pub enum SerializedItemKind { - Editor, - Terminal, - ProjectSearch, - Diagnostics, -} - -impl Display for SerializedItemKind { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str(&format!("{:?}", self)) - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum SerializedItem { - Editor(usize, PathBuf), - Terminal(usize), - ProjectSearch(usize, String), - Diagnostics(usize), -} - -impl SerializedItem { - fn kind(&self) -> SerializedItemKind { - match self { - SerializedItem::Editor(_, _) => SerializedItemKind::Editor, - SerializedItem::Terminal(_) => SerializedItemKind::Terminal, - SerializedItem::ProjectSearch(_, _) => SerializedItemKind::ProjectSearch, - SerializedItem::Diagnostics(_) => SerializedItemKind::Diagnostics, - } - } - - fn id(&self) -> usize { - match self { - SerializedItem::Editor(id, _) - | SerializedItem::Terminal(id) - | SerializedItem::ProjectSearch(id, _) - | SerializedItem::Diagnostics(id) => *id, - } - } -} - -impl Db { - fn write_item(&self, serialized_item: SerializedItem) -> Result<()> { - self.real() - .map(|db| { - let mut lock = db.connection.lock(); - let tx = lock.transaction()?; - - // Serialize the item - let id = serialized_item.id(); - { - let mut stmt = tx.prepare_cached( - "INSERT OR REPLACE INTO items(id, kind) VALUES ((?), (?))", - )?; - - dbg!("inserting item"); - stmt.execute(params![id, serialized_item.kind().to_string()])?; - } - - // Serialize item data - match &serialized_item { - SerializedItem::Editor(_, path) => { - dbg!("inserting path"); - let mut stmt = tx.prepare_cached( - "INSERT OR REPLACE INTO item_path(item_id, path) VALUES ((?), (?))", - )?; - - let path_bytes = path.as_os_str().as_bytes(); - stmt.execute(params![id, path_bytes])?; - } - SerializedItem::ProjectSearch(_, query) => { - dbg!("inserting query"); - let mut stmt = tx.prepare_cached( - "INSERT OR REPLACE INTO item_query(item_id, query) VALUES ((?), (?))", - )?; - - stmt.execute(params![id, query])?; - } - _ => {} - } - - tx.commit()?; - - let mut stmt = lock.prepare_cached("SELECT id, kind FROM items")?; - let _ = stmt - .query_map([], |row| { - let zero: usize = row.get(0)?; - let one: String = row.get(1)?; - - dbg!(zero, one); - Ok(()) - })? - .collect::>>(); - - Ok(()) - }) - .unwrap_or(Ok(())) - } - - fn delete_item(&self, item_id: usize) -> Result<()> { - self.real() - .map(|db| { - let lock = db.connection.lock(); - - let mut stmt = lock.prepare_cached( - r#" - DELETE FROM items WHERE id = (:id); - DELETE FROM item_path WHERE id = (:id); - DELETE FROM item_query WHERE id = (:id); - "#, - )?; - - stmt.execute(named_params! {":id": item_id})?; - - Ok(()) - }) - .unwrap_or(Ok(())) - } - - fn take_items(&self) -> Result> { - self.real() - .map(|db| { - let mut lock = db.connection.lock(); - - let tx = lock.transaction()?; +/// Current design makes the cut at the item level, +/// - Maybe A little more bottom up, serialize 'Terminals' and 'Editors' directly, and then make a seperate +/// - items table, with a kind, and an integer that acts as a key to one of these other tables +/// This column is a foreign key to ONE OF: editors, terminals, searches +/// - - // When working with transactions in rusqlite, need to make this kind of scope - // To make the borrow stuff work correctly. Don't know why, rust is wild. - let result = { - let mut editors_stmt = tx.prepare_cached( - r#" - SELECT items.id, item_path.path - FROM items - LEFT JOIN item_path - ON items.id = item_path.item_id - WHERE items.kind = ?; - "#, - )?; +// (workspace_id, item_id) +// kind -> ::Editor:: - let editors_iter = editors_stmt.query_map( - [SerializedItemKind::Editor.to_string()], - |row| { - let id: usize = row.get(0)?; +// -> +// At the workspace level +// -> (Workspace_ID, item_id) +// -> One shot, big query, load everything up: - let buf: Vec = row.get(1)?; - let path: PathBuf = OsStr::from_bytes(&buf).into(); +// -> SerializedWorkspace::deserialize(tx, itemKey) +// -> SerializedEditor::deserialize(tx, itemKey) - Ok(SerializedItem::Editor(id, path)) - }, - )?; +// -> +// -> Workspace::new(SerializedWorkspace) +// -> Editor::new(serialized_workspace[???]serializedEditor) - let mut terminals_stmt = tx.prepare_cached( - r#" - SELECT items.id - FROM items - WHERE items.kind = ?; - "#, - )?; - let terminals_iter = terminals_stmt.query_map( - [SerializedItemKind::Terminal.to_string()], - |row| { - let id: usize = row.get(0)?; +// //Pros: Keeps sql out of every body elese, makes changing it easier (e.g. for loading from a network or RocksDB) +// //Cons: DB has to know the internals of the entire rest of the app - Ok(SerializedItem::Terminal(id)) - }, - )?; +// Workspace +// Worktree roots +// Pane groups +// Dock +// Items +// Sidebars - let mut search_stmt = tx.prepare_cached( - r#" - SELECT items.id, item_query.query - FROM items - LEFT JOIN item_query - ON items.id = item_query.item_id - WHERE items.kind = ?; - "#, - )?; - let searches_iter = search_stmt.query_map( - [SerializedItemKind::ProjectSearch.to_string()], - |row| { - let id: usize = row.get(0)?; - let query = row.get(1)?; - - Ok(SerializedItem::ProjectSearch(id, query)) - }, - )?; - - #[cfg(debug_assertions)] - let tmp = - searches_iter.collect::>>(); - #[cfg(debug_assertions)] - debug_assert!(tmp.len() == 0 || tmp.len() == 1); - #[cfg(debug_assertions)] - let searches_iter = tmp.into_iter(); - - let mut diagnostic_stmt = tx.prepare_cached( - r#" - SELECT items.id - FROM items - WHERE items.kind = ?; - "#, - )?; - - let diagnostics_iter = diagnostic_stmt.query_map( - [SerializedItemKind::Diagnostics.to_string()], - |row| { - let id: usize = row.get(0)?; - - Ok(SerializedItem::Diagnostics(id)) - }, - )?; - - #[cfg(debug_assertions)] - let tmp = - diagnostics_iter.collect::>>(); - #[cfg(debug_assertions)] - debug_assert!(tmp.len() == 0 || tmp.len() == 1); - #[cfg(debug_assertions)] - let diagnostics_iter = tmp.into_iter(); - - let res = editors_iter - .chain(terminals_iter) - .chain(diagnostics_iter) - .chain(searches_iter) - .collect::, rusqlite::Error>>()?; - - let mut delete_stmt = tx.prepare_cached( - r#" - DELETE FROM items; - DELETE FROM item_path; - DELETE FROM item_query; - "#, - )?; - - delete_stmt.execute([])?; - - res - }; - - tx.commit()?; - - Ok(result) - }) - .unwrap_or(Ok(HashSet::default())) - } -} - -#[cfg(test)] -mod test { - use anyhow::Result; - - use super::*; - - #[test] - fn test_items_round_trip() -> Result<()> { - let db = Db::open_in_memory(); - - let mut items = vec![ - SerializedItem::Editor(0, PathBuf::from("/tmp/test.txt")), - SerializedItem::Terminal(1), - SerializedItem::ProjectSearch(2, "Test query!".to_string()), - SerializedItem::Diagnostics(3), - ] - .into_iter() - .collect::>(); - - for item in items.iter() { - dbg!("Inserting... "); - db.write_item(item.clone())?; - } - - assert_eq!(items, db.take_items()?); - - // Check that it's empty, as expected - assert_eq!(HashSet::default(), db.take_items()?); - - for item in items.iter() { - db.write_item(item.clone())?; - } - - items.remove(&SerializedItem::ProjectSearch(2, "Test query!".to_string())); - db.delete_item(2)?; +pub(crate) const ITEMS_M_1: &str = " +CREATE TABLE items( + workspace_id INTEGER, + item_id INTEGER, + kind TEXT NOT NULL, + PRIMARY KEY (workspace_id, item_id) + FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) +) STRICT; - assert_eq!(items, db.take_items()?); +CREATE TABLE project_searches( + workspace_id INTEGER, + item_id INTEGER, + query TEXT, + PRIMARY KEY (workspace_id, item_id) + FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) +) STRICT; - Ok(()) - } -} +CREATE TABLE editors( + workspace_id INTEGER, + item_id INTEGER, + path BLOB NOT NULL, + PRIMARY KEY (workspace_id, item_id) + FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) +) STRICT; +"; diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 534577bc79e0187ec359dd7fe43874de9a0cdd6b..96f13d8040bf6e289711b46462ccf88d1eafc735 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -3,7 +3,7 @@ use rusqlite::OptionalExtension; use super::Db; -pub(crate) const KVP_M_1_UP: &str = " +pub(crate) const KVP_M_1: &str = " CREATE TABLE kv_store( key TEXT PRIMARY KEY, value TEXT NOT NULL diff --git a/crates/db/src/migrations.rs b/crates/db/src/migrations.rs index 40e5d28b80db2c0a7a40d3811d6dd4e65411d301..3a21c7fa6fa6a965b8ceb544d184c536c75f951d 100644 --- a/crates/db/src/migrations.rs +++ b/crates/db/src/migrations.rs @@ -1,7 +1,7 @@ use rusqlite_migration::{Migrations, M}; // use crate::items::ITEMS_M_1; -use crate::{items::ITEMS_M_1, kvp::KVP_M_1_UP}; +use crate::kvp::KVP_M_1; // This must be ordered by development time! Only ever add new migrations to the end!! // Bad things will probably happen if you don't monotonically edit this vec!!!! @@ -9,7 +9,6 @@ use crate::{items::ITEMS_M_1, kvp::KVP_M_1_UP}; // file system and so everything we do here is locked in _f_o_r_e_v_e_r_. lazy_static::lazy_static! { pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![ - M::up(KVP_M_1_UP), - M::up(ITEMS_M_1), + M::up(KVP_M_1), ]); } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs new file mode 100644 index 0000000000000000000000000000000000000000..8ece0d5b7848cfceabfe3bc1fe59cfe450ef8ce4 --- /dev/null +++ b/crates/db/src/workspace.rs @@ -0,0 +1,180 @@ +use std::{path::Path, sync::Arc}; + +use super::Db; + +pub(crate) const WORKSPACE_M_1: &str = " +CREATE TABLE workspaces( + workspace_id INTEGER PRIMARY KEY, + center_group INTEGER NOT NULL, + dock_pane INTEGER NOT NULL, + timestamp INTEGER, + FOREIGN KEY(center_group) REFERENCES pane_groups(group_id) + FOREIGN KEY(dock_pane) REFERENCES pane_items(pane_id) +) STRICT; + +CREATE TABLE worktree_roots( + worktree_root BLOB NOT NULL, + workspace_id INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) +) STRICT; + +CREATE TABLE pane_groups( + workspace_id INTEGER, + group_id INTEGER, + split_direction STRING, -- 'Vertical' / 'Horizontal' / + PRIMARY KEY (workspace_id, group_id) +) STRICT; + +CREATE TABLE pane_group_children( + workspace_id INTEGER, + group_id INTEGER, + child_pane_id INTEGER, -- Nullable + child_group_id INTEGER, -- Nullable + PRIMARY KEY (workspace_id, group_id) +) STRICT; + +CREATE TABLE pane_items( + workspace_id INTEGER, + pane_id INTEGER, + item_id INTEGER, -- Array + PRIMARY KEY (workspace_id, pane_id) +) STRICT; +"; + +// Zed stores items with ids which are a combination of a view id during a given run and a workspace id. This + +// Case 1: Starting Zed Contextless +// > Zed -> Reopen the last +// Case 2: Starting Zed with a project folder +// > Zed ~/projects/Zed +// Case 3: Starting Zed with a file +// > Zed ~/projects/Zed/cargo.toml +// Case 4: Starting Zed with multiple project folders +// > Zed ~/projects/Zed ~/projects/Zed.dev + +#[derive(Debug, PartialEq, Eq)] +pub struct WorkspaceId(usize); + +impl Db { + /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, return the + /// the last workspace id + pub fn workspace_id(&self, worktree_roots: &[Arc]) -> WorkspaceId { + // Find the workspace id which is uniquely identified by this set of paths return it if found + // Otherwise: + // Find the max workspace_id and increment it as our new workspace id + // Store in the worktrees table the mapping from this new id to the set of worktree roots + unimplemented!(); + } + + /// Updates the open paths for the given workspace id. Will garbage collect items from + /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps + /// in the workspace id table + pub fn update_worktree_roots(&self, workspace_id: &WorkspaceId, worktree_roots: &[Arc]) { + // Lookup any WorkspaceIds which have the same set of roots, and delete them. (NOTE: this should garbage collect other tables) + // Remove the old rows which contain workspace_id + // Add rows for the new worktree_roots + + // zed /tree + // -> add tree2 + // -> udpate_worktree_roots() -> ADDs entries for /tree and /tree2, LEAVING BEHIND, the initial entry for /tree + unimplemented!(); + } + + /// Returns the previous workspace ids sorted by last modified + pub fn recent_workspaces(&self) -> Vec<(WorkspaceId, Vec>)> { + // Return all the workspace ids and their associated paths ordered by the access timestamp + //ORDER BY timestamps + unimplemented!(); + } + + pub fn center_pane(&self, workspace: WorkspaceId) -> SerializedPaneGroup {} + + pub fn dock_pane(&self, workspace: WorkspaceId) -> SerializedPane {} +} + +#[cfg(test)] +mod tests { + + use std::{ + path::{Path, PathBuf}, + sync::Arc, + }; + + use crate::Db; + + use super::WorkspaceId; + + fn test_tricky_overlapping_updates() { + // DB state: + // (/tree) -> ID: 1 + // (/tree, /tree2) -> ID: 2 + // (/tree2, /tree3) -> ID: 3 + + // -> User updates 2 to: (/tree2, /tree3) + + // DB state: + // (/tree) -> ID: 1 + // (/tree2, /tree3) -> ID: 2 + // Get rid of 3 for garbage collection + + fn arc_path(path: &'static str) -> Arc { + PathBuf::from(path).into() + } + + let data = &[ + (WorkspaceId(1), vec![arc_path("/tmp")]), + (WorkspaceId(2), vec![arc_path("/tmp"), arc_path("/tmp2")]), + (WorkspaceId(3), vec![arc_path("/tmp2"), arc_path("/tmp3")]), + ]; + + let db = Db::open_in_memory(); + + for (workspace_id, entries) in data { + db.update_worktree_roots(workspace_id, entries); //?? + assert_eq!(&db.workspace_id(&[]), workspace_id) + } + + for (workspace_id, entries) in data { + assert_eq!(&db.workspace_id(entries.as_slice()), workspace_id); + } + + db.update_worktree_roots(&WorkspaceId(2), &[arc_path("/tmp2")]); + // todo!(); // make sure that 3 got garbage collected + + assert_eq!(db.workspace_id(&[arc_path("/tmp2")]), WorkspaceId(2)); + assert_eq!(db.workspace_id(&[arc_path("/tmp")]), WorkspaceId(1)); + + let recent_workspaces = db.recent_workspaces(); + assert_eq!(recent_workspaces.get(0).unwrap().0, WorkspaceId(2)); + assert_eq!(recent_workspaces.get(1).unwrap().0, WorkspaceId(3)); + assert_eq!(recent_workspaces.get(2).unwrap().0, WorkspaceId(1)); + } +} + +// [/tmp, /tmp2] -> ID1? +// [/tmp] -> ID2? + +/* +path | id +/tmp ID1 +/tmp ID2 +/tmp2 ID1 + + +SELECT id +FROM workspace_ids +WHERE path IN (path1, path2) +INTERSECT +SELECT id +FROM workspace_ids +WHERE path = path_2 +... and etc. for each element in path array + +If contains row, yay! If not, +SELECT max(id) FROm workspace_ids + +Select id WHERE path IN paths + +SELECT MAX(id) + +*/ From 0c466f806c50c1d0fd742fbf3bf8f1709bf15eb7 Mon Sep 17 00:00:00 2001 From: K Simmons Date: Thu, 20 Oct 2022 15:07:58 -0700 Subject: [PATCH 08/86] WIP --- Cargo.lock | 1 + crates/db/Cargo.toml | 1 + crates/db/src/db.rs | 1 + crates/db/src/items.rs | 62 +++++++++++++- crates/db/src/pane.rs | 134 +++++++++++++++++++++++++++++ crates/db/src/workspace.rs | 137 +++++++++++++++++++++--------- crates/gpui/src/presenter.rs | 3 +- crates/workspace/Cargo.toml | 1 + crates/workspace/src/dock.rs | 6 +- crates/workspace/src/workspace.rs | 1 + 10 files changed, 302 insertions(+), 45 deletions(-) create mode 100644 crates/db/src/pane.rs diff --git a/Cargo.lock b/Cargo.lock index e04624d686cf723619e3ec966d1f64b241c0ff2c..b381331ef19ffb50812df511cd85234061057436 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7617,6 +7617,7 @@ dependencies = [ "client", "collections", "context_menu", + "db", "drag_and_drop", "fs", "futures 0.3.24", diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index e0b932003e9c842477269dcec7b889a5496cc39c..10f0858a522a37dfac88956eace1862b92a2d803 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -12,6 +12,7 @@ test-support = [] [dependencies] collections = { path = "../collections" } +gpui = { path = "../gpui" } anyhow = "1.0.57" async-trait = "0.1" lazy_static = "1.4.0" diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 2b4b7cf9c3664cb70bb940e0df772b71debc92c6..bf3cd645089d561dc1ccf69c441b68f2448b2b97 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,6 +1,7 @@ mod items; mod kvp; mod migrations; +mod pane; mod workspace; use std::fs; diff --git a/crates/db/src/items.rs b/crates/db/src/items.rs index 7454f243317312d7995b27864627b665f84ce9d8..1b633fdc474ce17450bb3f1cbd562daf7b68842d 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/items.rs @@ -1,8 +1,17 @@ -use std::{ffi::OsStr, fmt::Display, hash::Hash, os::unix::prelude::OsStrExt, path::PathBuf}; +use std::{ + ffi::OsStr, + fmt::Display, + hash::Hash, + os::unix::prelude::OsStrExt, + path::{Path, PathBuf}, + sync::Arc, +}; use anyhow::Result; use collections::HashSet; -use rusqlite::{named_params, params}; +use rusqlite::{named_params, params, types::FromSql}; + +use crate::workspace::WorkspaceId; use super::Db; @@ -62,3 +71,52 @@ CREATE TABLE editors( FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) ) STRICT; "; + +#[derive(Debug, PartialEq, Eq)] +pub struct ItemId { + workspace_id: usize, + item_id: usize, +} + +enum SerializedItemKind { + Editor, + Diagnostics, + ProjectSearch, + Terminal, +} + +struct SerializedItemRow { + kind: SerializedItemKind, + item_id: usize, + path: Option>, + query: Option, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum SerializedItem { + Editor { item_id: usize, path: Arc }, + Diagnostics { item_id: usize }, + ProjectSearch { item_id: usize, query: String }, + Terminal { item_id: usize }, +} + +impl SerializedItem { + pub fn item_id(&self) -> usize { + match self { + SerializedItem::Editor { item_id, .. } => *item_id, + SerializedItem::Diagnostics { item_id } => *item_id, + SerializedItem::ProjectSearch { item_id, .. } => *item_id, + SerializedItem::Terminal { item_id } => *item_id, + } + } +} + +impl Db { + pub fn get_item(&self, item_id: ItemId) -> SerializedItem { + unimplemented!() + } + + pub fn save_item(&self, workspace_id: WorkspaceId, item: &SerializedItem) {} + + pub fn close_item(&self, item_id: ItemId) {} +} diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs new file mode 100644 index 0000000000000000000000000000000000000000..98feb36abf6d3455d9c4a6aea60cbebded701dd5 --- /dev/null +++ b/crates/db/src/pane.rs @@ -0,0 +1,134 @@ +use gpui::Axis; + +use crate::{items::ItemId, workspace::WorkspaceId}; + +use super::Db; + +pub(crate) const PANE_M_1: &str = " +CREATE TABLE pane_groups( + workspace_id INTEGER, + group_id INTEGER, + axis STRING NOT NULL, -- 'Vertical' / 'Horizontal' + PRIMARY KEY (workspace_id, group_id) +) STRICT; + +CREATE TABLE pane_group_children( + workspace_id INTEGER, + group_id INTEGER, + child_pane_id INTEGER, -- Nullable + child_group_id INTEGER, -- Nullable + index INTEGER, + PRIMARY KEY (workspace_id, group_id) +) STRICT; + +CREATE TABLE pane_items( + workspace_id INTEGER, + pane_id INTEGER, + item_id INTEGER, -- Array + index INTEGER, + KEY (workspace_id, pane_id) +) STRICT; +"; + +#[derive(Debug, PartialEq, Eq)] +pub struct PaneId { + workspace_id: WorkspaceId, + pane_id: usize, +} + +#[derive(Debug, PartialEq, Eq)] +pub struct PaneGroupId { + workspace_id: WorkspaceId, + group_id: usize, +} + +impl PaneGroupId { + pub(crate) fn root(workspace_id: WorkspaceId) -> Self { + Self { + workspace_id, + group_id: 0, + } + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct SerializedPaneGroup { + group_id: PaneGroupId, + axis: Axis, + children: Vec, +} + +struct PaneGroupChildRow { + child_pane_id: Option, + child_group_id: Option, + index: usize, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum PaneGroupChild { + Pane(SerializedPane), + Group(SerializedPaneGroup), +} + +#[derive(Debug, PartialEq, Eq)] +pub struct SerializedPane { + pane_id: PaneId, + children: Vec, +} + +impl Db { + pub(crate) fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { + let axis = self.get_pane_group_axis(pane_group_id); + let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); + for child_row in self.get_pane_group_children(pane_group_id) { + if let Some(child_pane_id) = child_row.child_pane_id { + children.push(( + child_row.index, + PaneGroupChild::Pane(self.get_pane(PaneId { + workspace_id: pane_group_id.workspace_id, + pane_id: child_pane_id, + })), + )); + } else if let Some(child_group_id) = child_row.child_group_id { + children.push(( + child_row.index, + PaneGroupChild::Group(self.get_pane_group(PaneGroupId { + workspace_id: pane_group_id.workspace_id, + group_id: child_group_id, + })), + )); + } + } + children.sort_by_key(|(index, _)| index); + + SerializedPaneGroup { + group_id: pane_group_id, + axis, + children: children.into_iter().map(|(_, child)| child).collect(), + } + } + + pub fn get_pane_group_children( + &self, + pane_group_id: PaneGroupId, + ) -> impl Iterator { + unimplemented!() + } + + pub fn get_pane_group_axis(&self, pane_group_id: PaneGroupId) -> Axis { + unimplemented!(); + } + + pub fn save_center_pane_group(&self, center_pane_group: SerializedPaneGroup) { + // Delete the center pane group for this workspace and any of its children + // Generate new pane group IDs as we go through + // insert them + // Items garbage collect themselves when dropped + } + + pub(crate) fn get_pane(&self, pane_id: PaneId) -> SerializedPane { + unimplemented!(); + } + + pub fn save_pane(&self, pane: SerializedPane) {} +} diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 8ece0d5b7848cfceabfe3bc1fe59cfe450ef8ce4..e342391b715eb951784203e66a1dfe4dd9fbdc0f 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,5 +1,7 @@ use std::{path::Path, sync::Arc}; +use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup}; + use super::Db; pub(crate) const WORKSPACE_M_1: &str = " @@ -17,28 +19,6 @@ CREATE TABLE worktree_roots( workspace_id INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) ) STRICT; - -CREATE TABLE pane_groups( - workspace_id INTEGER, - group_id INTEGER, - split_direction STRING, -- 'Vertical' / 'Horizontal' / - PRIMARY KEY (workspace_id, group_id) -) STRICT; - -CREATE TABLE pane_group_children( - workspace_id INTEGER, - group_id INTEGER, - child_pane_id INTEGER, -- Nullable - child_group_id INTEGER, -- Nullable - PRIMARY KEY (workspace_id, group_id) -) STRICT; - -CREATE TABLE pane_items( - workspace_id INTEGER, - pane_id INTEGER, - item_id INTEGER, -- Array - PRIMARY KEY (workspace_id, pane_id) -) STRICT; "; // Zed stores items with ids which are a combination of a view id during a given run and a workspace id. This @@ -52,18 +32,65 @@ CREATE TABLE pane_items( // Case 4: Starting Zed with multiple project folders // > Zed ~/projects/Zed ~/projects/Zed.dev -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct WorkspaceId(usize); +struct WorkspaceRow { + pub workspace_id: WorkspaceId, + pub center_group_id: PaneGroupId, + pub dock_pane_id: PaneId, +} + +pub struct SerializedWorkspace { + pub workspace_id: WorkspaceId, + pub center_group: SerializedPaneGroup, + pub dock_pane: Option, +} + impl Db { /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, return the /// the last workspace id - pub fn workspace_id(&self, worktree_roots: &[Arc]) -> WorkspaceId { + pub fn workspace_for_worktree_roots( + &self, + worktree_roots: &[Arc], + ) -> SerializedWorkspace { // Find the workspace id which is uniquely identified by this set of paths return it if found - // Otherwise: - // Find the max workspace_id and increment it as our new workspace id - // Store in the worktrees table the mapping from this new id to the set of worktree roots - unimplemented!(); + if let Some(workspace_id) = self.workspace_id(worktree_roots) { + let workspace_row = self.get_workspace_row(workspace_id); + let center_group = self.get_pane_group(workspace_row.center_group_id); + let dock_pane = self.get_pane(workspace_row.dock_pane_id); + + SerializedWorkspace { + workspace_id, + center_group, + dock_pane: Some(dock_pane), + } + } else { + let workspace_id = self.get_next_workspace_id(); + let center_group = SerializedPaneGroup { + group_id: PaneGroupId::root(workspace_id), + axis: Default::default(), + children: Default::default(), + }; + + SerializedWorkspace { + workspace_id, + center_group, + dock_pane: None, + } + } + } + + fn get_next_workspace_id(&self) -> WorkspaceId { + unimplemented!() + } + + fn workspace_id(&self, worktree_roots: &[Arc]) -> Option { + unimplemented!() + } + + fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { + unimplemented!() } /// Updates the open paths for the given workspace id. Will garbage collect items from @@ -80,16 +107,12 @@ impl Db { unimplemented!(); } - /// Returns the previous workspace ids sorted by last modified + /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self) -> Vec<(WorkspaceId, Vec>)> { // Return all the workspace ids and their associated paths ordered by the access timestamp //ORDER BY timestamps unimplemented!(); } - - pub fn center_pane(&self, workspace: WorkspaceId) -> SerializedPaneGroup {} - - pub fn dock_pane(&self, workspace: WorkspaceId) -> SerializedPane {} } #[cfg(test)] @@ -104,6 +127,42 @@ mod tests { use super::WorkspaceId; + fn arc_path(path: &'static str) -> Arc { + PathBuf::from(path).into() + } + + fn test_detect_workspace_id() { + let data = &[ + (WorkspaceId(1), vec![arc_path("/tmp")]), + (WorkspaceId(2), vec![arc_path("/tmp"), arc_path("/tmp2")]), + ( + WorkspaceId(3), + vec![arc_path("/tmp"), arc_path("/tmp2"), arc_path("/tmp3")], + ), + ]; + + let db = Db::open_in_memory(); + + for (workspace_id, entries) in data { + db.update_worktree_roots(workspace_id, entries); //?? + } + + assert_eq!(None, db.workspace_id(&[arc_path("/tmp2")])); + assert_eq!( + None, + db.workspace_id(&[arc_path("/tmp2"), arc_path("/tmp3")]) + ); + assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&[arc_path("/tmp")])); + assert_eq!( + Some(WorkspaceId(2)), + db.workspace_id(&[arc_path("/tmp"), arc_path("/tmp2")]) + ); + assert_eq!( + Some(WorkspaceId(3)), + db.workspace_id(&[arc_path("/tmp"), arc_path("/tmp2"), arc_path("/tmp3")]) + ); + } + fn test_tricky_overlapping_updates() { // DB state: // (/tree) -> ID: 1 @@ -117,10 +176,6 @@ mod tests { // (/tree2, /tree3) -> ID: 2 // Get rid of 3 for garbage collection - fn arc_path(path: &'static str) -> Arc { - PathBuf::from(path).into() - } - let data = &[ (WorkspaceId(1), vec![arc_path("/tmp")]), (WorkspaceId(2), vec![arc_path("/tmp"), arc_path("/tmp2")]), @@ -131,18 +186,18 @@ mod tests { for (workspace_id, entries) in data { db.update_worktree_roots(workspace_id, entries); //?? - assert_eq!(&db.workspace_id(&[]), workspace_id) + assert_eq!(&db.workspace_id(&[]), &Some(*workspace_id)) } for (workspace_id, entries) in data { - assert_eq!(&db.workspace_id(entries.as_slice()), workspace_id); + assert_eq!(&db.workspace_id(entries.as_slice()), &Some(*workspace_id)); } db.update_worktree_roots(&WorkspaceId(2), &[arc_path("/tmp2")]); // todo!(); // make sure that 3 got garbage collected - assert_eq!(db.workspace_id(&[arc_path("/tmp2")]), WorkspaceId(2)); - assert_eq!(db.workspace_id(&[arc_path("/tmp")]), WorkspaceId(1)); + assert_eq!(db.workspace_id(&[arc_path("/tmp2")]), Some(WorkspaceId(2))); + assert_eq!(db.workspace_id(&[arc_path("/tmp")]), Some(WorkspaceId(1))); let recent_workspaces = db.recent_workspaces(); assert_eq!(recent_workspaces.get(0).unwrap().0, WorkspaceId(2)); diff --git a/crates/gpui/src/presenter.rs b/crates/gpui/src/presenter.rs index d15051ef126677d516a9986de5e64ee5237b5dcc..27cd2a1347cffd2e192f34885cf17a112fcddd3f 100644 --- a/crates/gpui/src/presenter.rs +++ b/crates/gpui/src/presenter.rs @@ -863,8 +863,9 @@ pub struct DebugContext<'a> { pub app: &'a AppContext, } -#[derive(Clone, Copy, Debug, Eq, PartialEq)] +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] pub enum Axis { + #[default] Horizontal, Vertical, } diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index 2db4ef2d3ddbd7fc4a60429729944533bdc95ffe..c481792f7cc1924befd353d20e92fe9baac1600a 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -18,6 +18,7 @@ test-support = [ ] [dependencies] +db = { path = "../db" } call = { path = "../call" } client = { path = "../client" } collections = { path = "../collections" } diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index b17a7ea22e0bb38293e05c553d0b5519bc698a91..fa8f182a316da29cd2a02350890fbc772f9a4a90 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -137,7 +137,11 @@ pub struct Dock { } impl Dock { - pub fn new(cx: &mut ViewContext, default_item_factory: DefaultItemFactory) -> Self { + pub fn new( + serialized_pane: SerializedPane, + default_item_factory: DefaultItemFactory, + cx: &mut ViewContext, + ) -> Self { let anchor = cx.global::().default_dock_anchor; let pane = cx.add_view(|cx| Pane::new(Some(anchor), cx)); pane.update(cx, |pane, cx| { diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 7082b61949fd468b9e84795aa39ac135c992ecb8..86eff8fb799e5a35ff2e6b63d592fa1423420043 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -1110,6 +1110,7 @@ enum FollowerItem { impl Workspace { pub fn new( + serialized_workspace: SerializedWorkspace, project: ModelHandle, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, From 73f0459a0fadfeebd82472729e6cf5f29b0c41d1 Mon Sep 17 00:00:00 2001 From: K Simmons Date: Thu, 20 Oct 2022 16:24:33 -0700 Subject: [PATCH 09/86] wip --- crates/db/src/db.rs | 1 + crates/db/src/pane.rs | 24 +++++++++++++++++------- crates/db/src/workspace.rs | 8 +------- crates/workspace/src/dock.rs | 6 +----- crates/workspace/src/workspace.rs | 7 +++++-- 5 files changed, 25 insertions(+), 21 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index bf3cd645089d561dc1ccf69c441b68f2448b2b97..9a64986987899a971a80a8189ba51fbeb2ec6094 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -14,6 +14,7 @@ use parking_lot::Mutex; use rusqlite::Connection; use migrations::MIGRATIONS; +pub use workspace::*; #[derive(Clone)] pub enum Db { diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 98feb36abf6d3455d9c4a6aea60cbebded701dd5..8ca1fd5de221d34901f68d39f8841595b7f4773a 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -30,13 +30,13 @@ CREATE TABLE pane_items( ) STRICT; "; -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct PaneId { workspace_id: WorkspaceId, pane_id: usize, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct PaneGroupId { workspace_id: WorkspaceId, group_id: usize, @@ -58,6 +58,16 @@ pub struct SerializedPaneGroup { children: Vec, } +impl SerializedPaneGroup { + pub(crate) fn empty_root(workspace_id: WorkspaceId) -> Self { + Self { + group_id: PaneGroupId::root(workspace_id), + axis: Default::default(), + children: Default::default(), + } + } +} + struct PaneGroupChildRow { child_pane_id: Option, child_group_id: Option, @@ -99,7 +109,7 @@ impl Db { )); } } - children.sort_by_key(|(index, _)| index); + children.sort_by_key(|(index, _)| *index); SerializedPaneGroup { group_id: pane_group_id, @@ -108,18 +118,18 @@ impl Db { } } - pub fn get_pane_group_children( + fn get_pane_group_children( &self, pane_group_id: PaneGroupId, ) -> impl Iterator { - unimplemented!() + Vec::new().into_iter() } - pub fn get_pane_group_axis(&self, pane_group_id: PaneGroupId) -> Axis { + fn get_pane_group_axis(&self, pane_group_id: PaneGroupId) -> Axis { unimplemented!(); } - pub fn save_center_pane_group(&self, center_pane_group: SerializedPaneGroup) { + pub fn save_pane_splits(&self, center_pane_group: SerializedPaneGroup) { // Delete the center pane group for this workspace and any of its children // Generate new pane group IDs as we go through // insert them diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index e342391b715eb951784203e66a1dfe4dd9fbdc0f..e60cb19e3bcda4c95f6df8c82473c2e1a0f50b94 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -36,7 +36,6 @@ CREATE TABLE worktree_roots( pub struct WorkspaceId(usize); struct WorkspaceRow { - pub workspace_id: WorkspaceId, pub center_group_id: PaneGroupId, pub dock_pane_id: PaneId, } @@ -67,15 +66,10 @@ impl Db { } } else { let workspace_id = self.get_next_workspace_id(); - let center_group = SerializedPaneGroup { - group_id: PaneGroupId::root(workspace_id), - axis: Default::default(), - children: Default::default(), - }; SerializedWorkspace { workspace_id, - center_group, + center_group: SerializedPaneGroup::empty_root(workspace_id), dock_pane: None, } } diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index fa8f182a316da29cd2a02350890fbc772f9a4a90..699b9b1d6011a4f074c5400633e7121b6b0727c6 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -137,11 +137,7 @@ pub struct Dock { } impl Dock { - pub fn new( - serialized_pane: SerializedPane, - default_item_factory: DefaultItemFactory, - cx: &mut ViewContext, - ) -> Self { + pub fn new(default_item_factory: DefaultItemFactory, cx: &mut ViewContext) -> Self { let anchor = cx.global::().default_dock_anchor; let pane = cx.add_view(|cx| Pane::new(Some(anchor), cx)); pane.update(cx, |pane, cx| { diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 86eff8fb799e5a35ff2e6b63d592fa1423420043..154cf109129b0df5b1a5074fe44e1ffd6b8ec714 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -15,6 +15,7 @@ use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; +use db::{SerializedWorkspace, WorkspaceId}; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -1064,6 +1065,7 @@ pub enum Event { pub struct Workspace { weak_self: WeakViewHandle, + db_id: WorkspaceId, client: Arc, user_store: ModelHandle, remote_entity_subscription: Option, @@ -1110,8 +1112,8 @@ enum FollowerItem { impl Workspace { pub fn new( - serialized_workspace: SerializedWorkspace, project: ModelHandle, + serialized_workspace: SerializedWorkspace, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, ) -> Self { @@ -1175,7 +1177,7 @@ impl Workspace { cx.emit_global(WorkspaceCreated(weak_handle.clone())); - let dock = Dock::new(cx, dock_default_factory); + let dock = Dock::new(dock_default_factory, cx); let dock_pane = dock.pane().clone(); let left_sidebar = cx.add_view(|_| Sidebar::new(SidebarSide::Left)); @@ -1207,6 +1209,7 @@ impl Workspace { let mut this = Workspace { modal: None, weak_self: weak_handle, + db_id: serialized_workspace.workspace_id, center: PaneGroup::new(center_pane.clone()), dock, // When removing an item, the last element remaining in this array From e5c6393f85b9853ca1512bf2818b155e8e866986 Mon Sep 17 00:00:00 2001 From: K Simmons Date: Fri, 21 Oct 2022 00:09:09 -0700 Subject: [PATCH 10/86] rebase fix - almost have serialize_workspace piped to the workspace constructor. Just a few compile errors left --- crates/db/src/workspace.rs | 5 +- crates/workspace/src/workspace.rs | 164 ++++++++++++++++++------------ crates/zed/src/zed.rs | 6 +- 3 files changed, 106 insertions(+), 69 deletions(-) diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index e60cb19e3bcda4c95f6df8c82473c2e1a0f50b94..aa1ca6efb5383e60575f8cbc86b6caf1bae7bbda 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,4 +1,7 @@ -use std::{path::Path, sync::Arc}; +use std::{ + path::{Path, PathBuf}, + sync::Arc, +}; use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup}; diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 154cf109129b0df5b1a5074fe44e1ffd6b8ec714..7f82a46edf19b44f0e1e8d410caac879c4c1fde6 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -15,7 +15,7 @@ use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; -use db::{SerializedWorkspace, WorkspaceId}; +use db::{Db, SerializedWorkspace, WorkspaceId}; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -180,7 +180,11 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { let app_state = Arc::downgrade(&app_state); move |_: &NewFile, cx: &mut MutableAppContext| { if let Some(app_state) = app_state.upgrade() { - open_new(&app_state, cx) + let task = open_new(&app_state, cx); + cx.spawn(|_| async { + task.await; + }) + .detach(); } } }); @@ -188,7 +192,11 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { let app_state = Arc::downgrade(&app_state); move |_: &NewWindow, cx: &mut MutableAppContext| { if let Some(app_state) = app_state.upgrade() { - open_new(&app_state, cx) + let task = open_new(&app_state, cx); + cx.spawn(|_| async { + task.await; + }) + .detach(); } } }); @@ -1112,8 +1120,8 @@ enum FollowerItem { impl Workspace { pub fn new( - project: ModelHandle, serialized_workspace: SerializedWorkspace, + project: ModelHandle, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, ) -> Self { @@ -1242,6 +1250,74 @@ impl Workspace { this } + fn new_local( + abs_paths: &[PathBuf], + app_state: &Arc, + cx: &mut MutableAppContext, + callback: F, + ) -> Task + where + T: 'static, + F: 'static + FnOnce(&mut Workspace, &mut ViewContext) -> T, + { + let project_handle = Project::local( + app_state.client.clone(), + app_state.user_store.clone(), + app_state.project_store.clone(), + app_state.languages.clone(), + app_state.fs.clone(), + cx, + ); + + cx.spawn(|mut cx| async move { + // Get project paths for all of the abs_paths + let mut worktree_roots: HashSet> = Default::default(); + let mut project_paths = Vec::new(); + for path in abs_paths { + if let Some((worktree, project_entry)) = cx + .update(|cx| Workspace::project_path_for_path(project_handle, path, true, cx)) + .await + .log_err() + { + worktree_roots.insert(worktree.read_with(&mut cx, |tree, _| tree.abs_path())); + project_paths.push(project_entry); + } + } + + // Use the resolved worktree roots to get the serialized_db from the database + let serialized_workspace = cx.read(|cx| { + cx.global::() + .workspace_for_worktree_roots(&Vec::from_iter(worktree_roots.into_iter())[..]) + }); + + // Use the serialized workspace to construct the new window + let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { + let mut workspace = Workspace::new( + serialized_workspace, + project_handle, + app_state.default_item_factory, + cx, + ); + (app_state.initialize_workspace)(&mut workspace, &app_state, cx); + workspace + }); + + // Call open path for each of the project paths + // (this will bring them to the front if they were in kthe serialized workspace) + let tasks = workspace.update(&mut cx, |workspace, cx| { + let tasks = Vec::new(); + for path in project_paths { + tasks.push(workspace.open_path(path, true, cx)); + } + tasks + }); + futures::future::join_all(tasks.into_iter()).await; + + // Finally call callback on the workspace + workspace.update(&mut cx, |workspace, cx| callback(workspace, cx)) + }) + } + pub fn weak_handle(&self) -> WeakViewHandle { self.weak_self.clone() } @@ -1289,34 +1365,18 @@ impl Workspace { /// to the callback. Otherwise, a new empty window will be created. pub fn with_local_workspace( &mut self, + app_state: &Arc, cx: &mut ViewContext, - app_state: Arc, callback: F, - ) -> T + ) -> Task where T: 'static, F: FnOnce(&mut Workspace, &mut ViewContext) -> T, { if self.project.read(cx).is_local() { - callback(self, cx) + Task::Ready(Some(callback(self, cx))) } else { - let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { - let mut workspace = Workspace::new( - Project::local( - app_state.client.clone(), - app_state.user_store.clone(), - app_state.project_store.clone(), - app_state.languages.clone(), - app_state.fs.clone(), - cx, - ), - app_state.default_item_factory, - cx, - ); - (app_state.initialize_workspace)(&mut workspace, &app_state, cx); - workspace - }); - workspace.update(cx, callback) + Self::new_local(&[], app_state, cx, callback) } } @@ -1479,7 +1539,7 @@ impl Workspace { for path in &abs_paths { project_paths.push( this.update(&mut cx, |this, cx| { - this.project_path_for_path(path, visible, cx) + Workspace::project_path_for_path(this.project, path, visible, cx) }) .await .log_err(), @@ -1544,15 +1604,15 @@ impl Workspace { } fn project_path_for_path( - &self, + project: ModelHandle, abs_path: &Path, visible: bool, - cx: &mut ViewContext, + cx: &mut MutableAppContext, ) -> Task, ProjectPath)>> { - let entry = self.project().update(cx, |project, cx| { + let entry = project.update(cx, |project, cx| { project.find_or_create_local_worktree(abs_path, visible, cx) }); - cx.spawn(|_, cx| async move { + cx.spawn(|cx| async move { let (worktree, path) = entry.await?; let worktree_id = worktree.read_with(&cx, |t, _| t.id()); Ok(( @@ -2957,7 +3017,6 @@ pub fn open_paths( let app_state = app_state.clone(); let abs_paths = abs_paths.to_vec(); cx.spawn(|mut cx| async move { - let mut new_project = None; let workspace = if let Some(existing) = existing { existing } else { @@ -2966,24 +3025,15 @@ pub fn open_paths( .await .contains(&false); - cx.add_window((app_state.build_window_options)(), |cx| { - let project = Project::local( - app_state.client.clone(), - app_state.user_store.clone(), - app_state.project_store.clone(), - app_state.languages.clone(), - app_state.fs.clone(), - cx, - ); - new_project = Some(project.clone()); - let mut workspace = Workspace::new(project, app_state.default_item_factory, cx); - (app_state.initialize_workspace)(&mut workspace, &app_state, cx); - if contains_directory { - workspace.toggle_sidebar(SidebarSide::Left, cx); - } - workspace + cx.update(|cx| { + Workspace::new_local(&abs_paths[..], &app_state, cx, move |workspace, cx| { + if contains_directory { + workspace.toggle_sidebar(SidebarSide::Left, cx); + } + cx.handle() + }) }) - .1 + .await }; let items = workspace @@ -2996,24 +3046,8 @@ pub fn open_paths( }) } -fn open_new(app_state: &Arc, cx: &mut MutableAppContext) { - let (window_id, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { - let mut workspace = Workspace::new( - Project::local( - app_state.client.clone(), - app_state.user_store.clone(), - app_state.project_store.clone(), - app_state.languages.clone(), - app_state.fs.clone(), - cx, - ), - app_state.default_item_factory, - cx, - ); - (app_state.initialize_workspace)(&mut workspace, app_state, cx); - workspace - }); - cx.dispatch_action_at(window_id, workspace.id(), NewFile); +fn open_new(app_state: &Arc, cx: &mut MutableAppContext) -> Task<()> { + Workspace::new_local(&[], app_state, cx, |_, cx| cx.dispatch_action(NewFile)) } #[cfg(test)] diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index bb33109d0d873f57c539cfa726012216894e779b..71a99cb3b21c4690524e35cf28e6faa0add81d9c 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -463,7 +463,7 @@ fn open_config_file( workspace .update(&mut cx, |workspace, cx| { - workspace.with_local_workspace(cx, app_state, |workspace, cx| { + workspace.with_local_workspace(app_state, cx, |workspace, cx| { workspace.open_paths(vec![path.to_path_buf()], false, cx) }) }) @@ -480,7 +480,7 @@ fn open_log_file( ) { const MAX_LINES: usize = 1000; - workspace.with_local_workspace(cx, app_state.clone(), |_, cx| { + workspace.with_local_workspace(app_state.clone(), cx, |_, cx| { cx.spawn_weak(|workspace, mut cx| async move { let (old_log, new_log) = futures::join!( app_state.fs.load(&paths::OLD_LOG), @@ -532,7 +532,7 @@ fn open_telemetry_log_file( app_state: Arc, cx: &mut ViewContext, ) { - workspace.with_local_workspace(cx, app_state.clone(), |_, cx| { + workspace.with_local_workspace(app_state.clone(), cx, |_, cx| { cx.spawn_weak(|workspace, mut cx| async move { let workspace = workspace.upgrade(&cx)?; let path = app_state.client.telemetry_log_file_path()?; From 500ecbf91504db010a03fa6dc921b2416d4f22f8 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 24 Oct 2022 16:55:32 -0700 Subject: [PATCH 11/86] Rebase fix + Started writing the real SQL we're going to need --- crates/db/Cargo.toml | 3 +- crates/db/README.md | 5 ++ crates/db/examples/serialize_workspace.rs | 22 ++++++ crates/db/src/db.rs | 56 +++++++++----- crates/db/src/migrations.rs | 3 +- crates/db/src/pane.rs | 7 ++ crates/db/src/workspace.rs | 90 +++++++++-------------- test.rs | 0 8 files changed, 107 insertions(+), 79 deletions(-) create mode 100644 crates/db/README.md create mode 100644 crates/db/examples/serialize_workspace.rs create mode 100644 test.rs diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 10f0858a522a37dfac88956eace1862b92a2d803..9c841519d260bec93c0fac42e23ad8512d3bd15b 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -18,8 +18,9 @@ async-trait = "0.1" lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" -rusqlite = { version = "0.28.0", features = ["bundled", "serde_json"] } +rusqlite = { version = "0.28.0", features = ["bundled", "serde_json", "backup"] } rusqlite_migration = { git = "https://github.com/cljoly/rusqlite_migration", rev = "c433555d7c1b41b103426e35756eb3144d0ebbc6" } +>>>>>>> dd2ddc5e3 (Started writing the real SQL we're going to need) serde = { workspace = true } serde_rusqlite = "0.31.0" diff --git a/crates/db/README.md b/crates/db/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d4ea2fee399edd6842ffd8e48d8d93aa4d7d84d8 --- /dev/null +++ b/crates/db/README.md @@ -0,0 +1,5 @@ +# Building Queries + +First, craft your test data. The examples folder shows a template for building a test-db, and can be ran with `cargo run --example [your-example]`. + +To actually use and test your queries, import the generated DB file into https://sqliteonline.com/ \ No newline at end of file diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs new file mode 100644 index 0000000000000000000000000000000000000000..e2bf28871000da3ceb5664fbe94ffadccd698559 --- /dev/null +++ b/crates/db/examples/serialize_workspace.rs @@ -0,0 +1,22 @@ +use std::{fs::File, path::Path}; + +const TEST_FILE: &'static str = "test-db.db"; + +fn main() -> anyhow::Result<()> { + let db = db::Db::open_in_memory(); + if db.real().is_none() { + return Err(anyhow::anyhow!("Migrations failed")); + } + let file = Path::new(TEST_FILE); + + let f = File::create(file)?; + drop(f); + + db.write_kvp("test", "1")?; + db.write_kvp("test", "2")?; + db.write_to(file).ok(); + + println!("Wrote database!"); + + Ok(()) +} diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 9a64986987899a971a80a8189ba51fbeb2ec6094..320b131ea637309b6cd80fd0f2a752563d89629d 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -11,7 +11,7 @@ use std::sync::Arc; use anyhow::Result; use log::error; use parking_lot::Mutex; -use rusqlite::Connection; +use rusqlite::{backup, Connection}; use migrations::MIGRATIONS; pub use workspace::*; @@ -54,8 +54,29 @@ impl Db { }) } + fn initialize(mut conn: Connection) -> Result> { + MIGRATIONS.to_latest(&mut conn)?; + + conn.pragma_update(None, "journal_mode", "WAL")?; + conn.pragma_update(None, "synchronous", "NORMAL")?; + conn.pragma_update(None, "foreign_keys", true)?; + conn.pragma_update(None, "case_sensitive_like", true)?; + + Ok(Mutex::new(conn)) + } + + pub fn persisting(&self) -> bool { + self.real().and_then(|db| db.path.as_ref()).is_some() + } + + pub fn real(&self) -> Option<&RealDb> { + match self { + Db::Real(db) => Some(&db), + _ => None, + } + } + /// Open a in memory database for testing and as a fallback. - #[cfg(any(test, feature = "test-support"))] pub fn open_in_memory() -> Self { Connection::open_in_memory() .map_err(Into::into) @@ -75,26 +96,21 @@ impl Db { }) } - fn initialize(mut conn: Connection) -> Result> { - MIGRATIONS.to_latest(&mut conn)?; - - conn.pragma_update(None, "journal_mode", "WAL")?; - conn.pragma_update(None, "synchronous", "NORMAL")?; - conn.pragma_update(None, "foreign_keys", true)?; - conn.pragma_update(None, "case_sensitive_like", true)?; - - Ok(Mutex::new(conn)) - } + pub fn write_to>(&self, dest: P) -> Result<()> { + self.real() + .map(|db| { + if db.path.is_some() { + panic!("DB already exists"); + } - pub fn persisting(&self) -> bool { - self.real().and_then(|db| db.path.as_ref()).is_some() - } + let lock = db.connection.lock(); + let mut dst = Connection::open(dest)?; + let backup = backup::Backup::new(&lock, &mut dst)?; + backup.step(-1)?; - pub fn real(&self) -> Option<&RealDb> { - match self { - Db::Real(db) => Some(&db), - _ => None, - } + Ok(()) + }) + .unwrap_or(Ok(())) } } diff --git a/crates/db/src/migrations.rs b/crates/db/src/migrations.rs index 3a21c7fa6fa6a965b8ceb544d184c536c75f951d..e10c388d5c760bdd8c699d8be5d9b1f9a046df1f 100644 --- a/crates/db/src/migrations.rs +++ b/crates/db/src/migrations.rs @@ -1,7 +1,7 @@ use rusqlite_migration::{Migrations, M}; // use crate::items::ITEMS_M_1; -use crate::kvp::KVP_M_1; +use crate::{kvp::KVP_M_1, WORKSPACE_M_1}; // This must be ordered by development time! Only ever add new migrations to the end!! // Bad things will probably happen if you don't monotonically edit this vec!!!! @@ -10,5 +10,6 @@ use crate::kvp::KVP_M_1; lazy_static::lazy_static! { pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![ M::up(KVP_M_1), + M::up(WORKSPACE_M_1) ]); } diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 8ca1fd5de221d34901f68d39f8841595b7f4773a..89721157c3e84785ffc338b4d855abc541198c65 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -28,6 +28,13 @@ CREATE TABLE pane_items( index INTEGER, KEY (workspace_id, pane_id) ) STRICT; + +ALTER TABLE WORKSPACE +ADD THESE COLS: +center_group INTEGER NOT NULL, +dock_pane INTEGER NOT NULL, +-- FOREIGN KEY(center_group) REFERENCES pane_groups(group_id) +-- FOREIGN KEY(dock_pane) REFERENCES pane_items(pane_id) "; #[derive(Debug, PartialEq, Eq, Copy, Clone)] diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index aa1ca6efb5383e60575f8cbc86b6caf1bae7bbda..6093b1035519c4dea7172923514e9b637ef1ef98 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,7 +1,6 @@ -use std::{ - path::{Path, PathBuf}, - sync::Arc, -}; +use anyhow::Result; + +use std::{path::Path, sync::Arc}; use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup}; @@ -9,18 +8,15 @@ use super::Db; pub(crate) const WORKSPACE_M_1: &str = " CREATE TABLE workspaces( - workspace_id INTEGER PRIMARY KEY, - center_group INTEGER NOT NULL, - dock_pane INTEGER NOT NULL, - timestamp INTEGER, - FOREIGN KEY(center_group) REFERENCES pane_groups(group_id) - FOREIGN KEY(dock_pane) REFERENCES pane_items(pane_id) + workspace_id INTEGER PRIMARY KEY AUTOINCREMENT, + timestamp TEXT DEFAULT CURRENT_TIMESTAMP, + dummy_data INTEGER ) STRICT; CREATE TABLE worktree_roots( worktree_root BLOB NOT NULL, workspace_id INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ) STRICT; "; @@ -35,18 +31,19 @@ CREATE TABLE worktree_roots( // Case 4: Starting Zed with multiple project folders // > Zed ~/projects/Zed ~/projects/Zed.dev -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct WorkspaceId(usize); +#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] +pub struct WorkspaceId(i64); struct WorkspaceRow { pub center_group_id: PaneGroupId, pub dock_pane_id: PaneId, } +#[derive(Default)] pub struct SerializedWorkspace { pub workspace_id: WorkspaceId, - pub center_group: SerializedPaneGroup, - pub dock_pane: Option, + // pub center_group: SerializedPaneGroup, + // pub dock_pane: Option, } impl Db { @@ -58,28 +55,33 @@ impl Db { ) -> SerializedWorkspace { // Find the workspace id which is uniquely identified by this set of paths return it if found if let Some(workspace_id) = self.workspace_id(worktree_roots) { - let workspace_row = self.get_workspace_row(workspace_id); - let center_group = self.get_pane_group(workspace_row.center_group_id); - let dock_pane = self.get_pane(workspace_row.dock_pane_id); + // TODO + // let workspace_row = self.get_workspace_row(workspace_id); + // let center_group = self.get_pane_group(workspace_row.center_group_id); + // let dock_pane = self.get_pane(workspace_row.dock_pane_id); SerializedWorkspace { workspace_id, - center_group, - dock_pane: Some(dock_pane), + // center_group, + // dock_pane: Some(dock_pane), } } else { - let workspace_id = self.get_next_workspace_id(); - - SerializedWorkspace { - workspace_id, - center_group: SerializedPaneGroup::empty_root(workspace_id), - dock_pane: None, - } + self.make_new_workspace() } } - fn get_next_workspace_id(&self) -> WorkspaceId { - unimplemented!() + fn make_new_workspace(&self) -> SerializedWorkspace { + self.real() + .map(|db| { + let lock = db.connection.lock(); + match lock.execute("INSERT INTO workspaces(dummy_data) VALUES(1);", []) { + Ok(_) => SerializedWorkspace { + workspace_id: WorkspaceId(lock.last_insert_rowid()), + }, + Err(_) => Default::default(), + } + }) + .unwrap_or_default() } fn workspace_id(&self, worktree_roots: &[Arc]) -> Option { @@ -128,6 +130,7 @@ mod tests { PathBuf::from(path).into() } + #[test] fn test_detect_workspace_id() { let data = &[ (WorkspaceId(1), vec![arc_path("/tmp")]), @@ -160,6 +163,7 @@ mod tests { ); } + #[test] fn test_tricky_overlapping_updates() { // DB state: // (/tree) -> ID: 1 @@ -202,31 +206,3 @@ mod tests { assert_eq!(recent_workspaces.get(2).unwrap().0, WorkspaceId(1)); } } - -// [/tmp, /tmp2] -> ID1? -// [/tmp] -> ID2? - -/* -path | id -/tmp ID1 -/tmp ID2 -/tmp2 ID1 - - -SELECT id -FROM workspace_ids -WHERE path IN (path1, path2) -INTERSECT -SELECT id -FROM workspace_ids -WHERE path = path_2 -... and etc. for each element in path array - -If contains row, yay! If not, -SELECT max(id) FROm workspace_ids - -Select id WHERE path IN paths - -SELECT MAX(id) - -*/ diff --git a/test.rs b/test.rs new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 From d7bbfb82a3c38a3d979990dbacec5c8c65d08746 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 25 Oct 2022 13:18:37 -0700 Subject: [PATCH 12/86] Rebase - Successfully detecting workplace IDs :D --- .gitignore | 4 + crates/db/Cargo.toml | 1 - crates/db/examples/serialize_workspace.rs | 26 ++- crates/db/src/workspace.rs | 242 ++++++++++++++++++---- 4 files changed, 228 insertions(+), 45 deletions(-) diff --git a/.gitignore b/.gitignore index b4eba05582fe7042cb211330b269dc0798acd6c4..da1950f2b386e5655a3d1c3884b42caecf7203e8 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ /assets/themes/Internal/*.json /assets/themes/Experiments/*.json **/venv +<<<<<<< HEAD .build Packages *.xcodeproj @@ -18,3 +19,6 @@ DerivedData/ .swiftpm/config/registries.json .swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata .netrc +======= +crates/db/test-db.db +>>>>>>> 9d9ad38ce (Successfully detecting workplace IDs :D) diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 9c841519d260bec93c0fac42e23ad8512d3bd15b..9fad1aa39a6e85f9dee323fb927d1a65a9626c6a 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -20,7 +20,6 @@ log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" rusqlite = { version = "0.28.0", features = ["bundled", "serde_json", "backup"] } rusqlite_migration = { git = "https://github.com/cljoly/rusqlite_migration", rev = "c433555d7c1b41b103426e35756eb3144d0ebbc6" } ->>>>>>> dd2ddc5e3 (Started writing the real SQL we're going to need) serde = { workspace = true } serde_rusqlite = "0.31.0" diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index e2bf28871000da3ceb5664fbe94ffadccd698559..51082a811ff88252cd87e00ada9280c7456e9b7a 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -13,7 +13,31 @@ fn main() -> anyhow::Result<()> { drop(f); db.write_kvp("test", "1")?; - db.write_kvp("test", "2")?; + db.write_kvp("test-2", "2")?; + + let workspace_1 = db.workspace_for_worktree_roots(&[]); + let workspace_2 = db.workspace_for_worktree_roots(&[]); + let workspace_3 = db.workspace_for_worktree_roots(&[]); + let workspace_4 = db.workspace_for_worktree_roots(&[]); + let workspace_5 = db.workspace_for_worktree_roots(&[]); + let workspace_6 = db.workspace_for_worktree_roots(&[]); + let workspace_7 = db.workspace_for_worktree_roots(&[]); + + db.update_worktree_roots(&workspace_1.workspace_id, &["/tmp1"]) + .unwrap(); + db.update_worktree_roots(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]) + .unwrap(); + db.update_worktree_roots(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]) + .unwrap(); + db.update_worktree_roots(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]) + .unwrap(); + db.update_worktree_roots(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]) + .unwrap(); + db.update_worktree_roots(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]) + .unwrap(); + db.update_worktree_roots(&workspace_7.workspace_id, &["/tmp2"]) + .unwrap(); + db.write_to(file).ok(); println!("Wrote database!"); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 6093b1035519c4dea7172923514e9b637ef1ef98..d60e32f09fc7b2c459612236864e5458a0191c41 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,4 +1,5 @@ use anyhow::Result; +use rusqlite::params; use std::{path::Path, sync::Arc}; @@ -14,9 +15,10 @@ CREATE TABLE workspaces( ) STRICT; CREATE TABLE worktree_roots( - worktree_root BLOB NOT NULL, + worktree_root TEXT NOT NULL, --TODO: Update this to use blobs workspace_id INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + PRIMARY KEY(worktree_root, workspace_id) ) STRICT; "; @@ -39,7 +41,7 @@ struct WorkspaceRow { pub dock_pane_id: PaneId, } -#[derive(Default)] +#[derive(Default, Debug)] pub struct SerializedWorkspace { pub workspace_id: WorkspaceId, // pub center_group: SerializedPaneGroup, @@ -54,7 +56,7 @@ impl Db { worktree_roots: &[Arc], ) -> SerializedWorkspace { // Find the workspace id which is uniquely identified by this set of paths return it if found - if let Some(workspace_id) = self.workspace_id(worktree_roots) { + if let Ok(Some(workspace_id)) = self.workspace_id(worktree_roots) { // TODO // let workspace_row = self.get_workspace_row(workspace_id); // let center_group = self.get_pane_group(workspace_row.center_group_id); @@ -84,8 +86,110 @@ impl Db { .unwrap_or_default() } - fn workspace_id(&self, worktree_roots: &[Arc]) -> Option { - unimplemented!() + fn workspace_id

(&self, worktree_roots: &[P]) -> Result> + where + P: AsRef, + { + self.real() + .map(|db| { + let lock = db.connection.lock(); + + // Prepare the array binding string. SQL doesn't have syntax for this, so + // we have to do it ourselves. + let mut array_binding_stmt = "(".to_string(); + for i in 0..worktree_roots.len() { + array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based + if i < worktree_roots.len() - 1 { + array_binding_stmt.push(','); + array_binding_stmt.push(' '); + } + } + array_binding_stmt.push(')'); + + // Any workspace can have multiple independent paths, and these paths + // can overlap in the database. Take this test data for example: + // + // [/tmp, /tmp2] -> 1 + // [/tmp] -> 2 + // [/tmp2, /tmp3] -> 3 + // + // This would be stred in the database like so: + // + // ID PATH + // 1 /tmp + // 1 /tmp2 + // 2 /tmp + // 3 /tmp2 + // 3 /tmp3 + // + // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. + // So, given an array of worktree roots, how can we find the exactly matching ID? + // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: + // - We start with a join of this table on itself, generating every possible + // pair of ((path, ID), (path, ID)), and filtering the join down to just the + // *overlapping* workspace IDs. For this small data set, this would look like: + // + // wt1.ID wt1.PATH | wt2.ID wt2.PATH + // 3 /tmp3 3 /tmp2 + // + // - Moving one SELECT out, we use the first pair's ID column to invert the selection, + // meaning we now have a list of all the entries for our array and *subsets* + // of our array: + // + // ID PATH + // 1 /tmp + // 2 /tmp + // 2 /tmp2 + // + // - To trim out the subsets, we need to exploit the fact that there can be no duplicate + // entries in this table. We can just use GROUP BY, COUNT, and a WHERE clause that checks + // for the length of our array: + // + // ID num_matching + // 1 2 + // + // And we're done! We've found the matching ID correctly :D + // However, due to limitations in sqlite's query binding, we still have to do some string + // substitution to generate the correct query + let query = format!( + r#" + SELECT workspace_id + FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots + WHERE worktree_root in {array_bind} AND workspace_id NOT IN + (SELECT wt1.workspace_id FROM worktree_roots as wt1 + JOIN worktree_roots as wt2 + ON wt1.workspace_id = wt2.workspace_id + WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) + GROUP BY workspace_id) + WHERE num_matching = ? + "#, + array_bind = array_binding_stmt + ); + + let mut stmt = lock.prepare_cached(&query)?; + + // Make sure we bound the parameters correctly + debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); + + for i in 0..worktree_roots.len() { + // TODO: Update this to use blobs + let path = &worktree_roots[i].as_ref().to_string_lossy().to_string(); + stmt.raw_bind_parameter(i + 1, path)? + } + // No -1, because SQLite is 1 based + stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; + + let mut rows = stmt.raw_query(); + if let Ok(Some(row)) = rows.next() { + return Ok(Some(WorkspaceId(row.get(0)?))) + } + + // Ensure that this query only returns one row + debug_assert!(matches!(rows.next(), Ok(None))); + + Ok(None) + }) + .unwrap_or(Ok(None)) } fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { @@ -95,15 +199,36 @@ impl Db { /// Updates the open paths for the given workspace id. Will garbage collect items from /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps /// in the workspace id table - pub fn update_worktree_roots(&self, workspace_id: &WorkspaceId, worktree_roots: &[Arc]) { + pub fn update_worktree_roots

( + &self, + workspace_id: &WorkspaceId, + worktree_roots: &[P], + ) -> Result<()> + where + P: AsRef, + { // Lookup any WorkspaceIds which have the same set of roots, and delete them. (NOTE: this should garbage collect other tables) + // TODO // Remove the old rows which contain workspace_id + // TODO // Add rows for the new worktree_roots - // zed /tree - // -> add tree2 - // -> udpate_worktree_roots() -> ADDs entries for /tree and /tree2, LEAVING BEHIND, the initial entry for /tree - unimplemented!(); + self.real() + .map(|db| { + let lock = db.connection.lock(); + + for root in worktree_roots { + // TODO: Update this to use blobs + let path = root.as_ref().to_string_lossy().to_string(); + lock.execute( + "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", + params![workspace_id.0, path], + )?; + } + + Ok(()) + }) + .unwrap_or(Ok(())) } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots @@ -117,49 +242,79 @@ impl Db { #[cfg(test)] mod tests { - use std::{ - path::{Path, PathBuf}, - sync::Arc, - }; - use crate::Db; use super::WorkspaceId; - fn arc_path(path: &'static str) -> Arc { - PathBuf::from(path).into() - } - #[test] - fn test_detect_workspace_id() { + fn test_more_workspace_ids() { let data = &[ - (WorkspaceId(1), vec![arc_path("/tmp")]), - (WorkspaceId(2), vec![arc_path("/tmp"), arc_path("/tmp2")]), - ( - WorkspaceId(3), - vec![arc_path("/tmp"), arc_path("/tmp2"), arc_path("/tmp3")], - ), + (WorkspaceId(1), vec!["/tmp1"]), + (WorkspaceId(2), vec!["/tmp1", "/tmp2"]), + (WorkspaceId(3), vec!["/tmp1", "/tmp2", "/tmp3"]), + (WorkspaceId(4), vec!["/tmp2", "/tmp3"]), + (WorkspaceId(5), vec!["/tmp2", "/tmp3", "/tmp4"]), + (WorkspaceId(6), vec!["/tmp2", "/tmp4"]), + (WorkspaceId(7), vec!["/tmp2"]), ]; let db = Db::open_in_memory(); for (workspace_id, entries) in data { - db.update_worktree_roots(workspace_id, entries); //?? + db.make_new_workspace(); + db.update_worktree_roots(workspace_id, entries).unwrap(); } - assert_eq!(None, db.workspace_id(&[arc_path("/tmp2")])); + assert_eq!(WorkspaceId(1), db.workspace_id(&["/tmp1"]).unwrap().unwrap()); + assert_eq!( + WorkspaceId(2), + db.workspace_id(&["/tmp1", "/tmp2"]).unwrap().unwrap() + ); + assert_eq!( + WorkspaceId(3), + db.workspace_id(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().unwrap() + ); assert_eq!( - None, - db.workspace_id(&[arc_path("/tmp2"), arc_path("/tmp3")]) + WorkspaceId(4), + db.workspace_id(&["/tmp2", "/tmp3"]).unwrap().unwrap() ); - assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&[arc_path("/tmp")])); assert_eq!( - Some(WorkspaceId(2)), - db.workspace_id(&[arc_path("/tmp"), arc_path("/tmp2")]) + WorkspaceId(5), + db.workspace_id(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().unwrap() ); + assert_eq!( + WorkspaceId(6), + db.workspace_id(&["/tmp2", "/tmp4"]).unwrap().unwrap() + ); + assert_eq!(WorkspaceId(7), db.workspace_id(&["/tmp2"]).unwrap().unwrap()); + + assert_eq!(None, db.workspace_id(&["/tmp1", "/tmp5"]).unwrap()); + assert_eq!(None, db.workspace_id(&["/tmp5"]).unwrap()); + assert_eq!(None, db.workspace_id(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]).unwrap()); + } + + #[test] + fn test_detect_workspace_id() { + let data = &[ + (WorkspaceId(1), vec!["/tmp"]), + (WorkspaceId(2), vec!["/tmp", "/tmp2"]), + (WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]), + ]; + + let db = Db::open_in_memory(); + + for (workspace_id, entries) in data { + db.make_new_workspace(); + db.update_worktree_roots(workspace_id, entries).unwrap(); + } + + assert_eq!(None, db.workspace_id(&["/tmp2"]).unwrap()); + assert_eq!(None, db.workspace_id(&["/tmp2", "/tmp3"]).unwrap()); + assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp"]).unwrap()); + assert_eq!(Some(WorkspaceId(2)), db.workspace_id(&["/tmp", "/tmp2"]).unwrap()); assert_eq!( Some(WorkspaceId(3)), - db.workspace_id(&[arc_path("/tmp"), arc_path("/tmp2"), arc_path("/tmp3")]) + db.workspace_id(&["/tmp", "/tmp2", "/tmp3"]).unwrap() ); } @@ -178,27 +333,28 @@ mod tests { // Get rid of 3 for garbage collection let data = &[ - (WorkspaceId(1), vec![arc_path("/tmp")]), - (WorkspaceId(2), vec![arc_path("/tmp"), arc_path("/tmp2")]), - (WorkspaceId(3), vec![arc_path("/tmp2"), arc_path("/tmp3")]), + (WorkspaceId(1), vec!["/tmp"]), + (WorkspaceId(2), vec!["/tmp", "/tmp2"]), + (WorkspaceId(3), vec!["/tmp2", "/tmp3"]), ]; let db = Db::open_in_memory(); for (workspace_id, entries) in data { - db.update_worktree_roots(workspace_id, entries); //?? - assert_eq!(&db.workspace_id(&[]), &Some(*workspace_id)) + db.update_worktree_roots(workspace_id, entries).unwrap(); //?? + assert_eq!(&db.workspace_id::(&[]).unwrap(), &Some(*workspace_id)) } for (workspace_id, entries) in data { - assert_eq!(&db.workspace_id(entries.as_slice()), &Some(*workspace_id)); + assert_eq!(&db.workspace_id(entries.as_slice()).unwrap(), &Some(*workspace_id)); } - db.update_worktree_roots(&WorkspaceId(2), &[arc_path("/tmp2")]); + db.update_worktree_roots(&WorkspaceId(2), &["/tmp2"]) + .unwrap(); // todo!(); // make sure that 3 got garbage collected - assert_eq!(db.workspace_id(&[arc_path("/tmp2")]), Some(WorkspaceId(2))); - assert_eq!(db.workspace_id(&[arc_path("/tmp")]), Some(WorkspaceId(1))); + assert_eq!(db.workspace_id(&["/tmp2"]).unwrap(), Some(WorkspaceId(2))); + assert_eq!(db.workspace_id(&["/tmp"]).unwrap(), Some(WorkspaceId(1))); let recent_workspaces = db.recent_workspaces(); assert_eq!(recent_workspaces.get(0).unwrap().0, WorkspaceId(2)); From e9ea751f3d37008c2398528e2ac9a36413373676 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 25 Oct 2022 15:27:51 -0700 Subject: [PATCH 13/86] All workspace tests passing :D --- crates/db/examples/serialize_workspace.rs | 14 +- crates/db/src/workspace.rs | 403 ++++++++++++++-------- 2 files changed, 261 insertions(+), 156 deletions(-) diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index 51082a811ff88252cd87e00ada9280c7456e9b7a..108980ee920aa9995d6ecd4dc2d36a3b5bcfdfb1 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -1,4 +1,4 @@ -use std::{fs::File, path::Path}; +use std::{fs::File, path::Path, thread::sleep, time::Duration}; const TEST_FILE: &'static str = "test-db.db"; @@ -23,20 +23,28 @@ fn main() -> anyhow::Result<()> { let workspace_6 = db.workspace_for_worktree_roots(&[]); let workspace_7 = db.workspace_for_worktree_roots(&[]); + // Order scrambled + sleeps added because sqlite only has 1 second resolution on + // their timestamps + db.update_worktree_roots(&workspace_7.workspace_id, &["/tmp2"]) + .unwrap(); + sleep(Duration::from_secs(1)); db.update_worktree_roots(&workspace_1.workspace_id, &["/tmp1"]) .unwrap(); + sleep(Duration::from_secs(1)); db.update_worktree_roots(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]) .unwrap(); + sleep(Duration::from_secs(1)); db.update_worktree_roots(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]) .unwrap(); + sleep(Duration::from_secs(1)); db.update_worktree_roots(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]) .unwrap(); + sleep(Duration::from_secs(1)); db.update_worktree_roots(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]) .unwrap(); + sleep(Duration::from_secs(1)); db.update_worktree_roots(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]) .unwrap(); - db.update_worktree_roots(&workspace_7.workspace_id, &["/tmp2"]) - .unwrap(); db.write_to(file).ok(); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index d60e32f09fc7b2c459612236864e5458a0191c41..09aa9f53013b2154770f5560a87577dc3946370e 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,23 +1,31 @@ use anyhow::Result; -use rusqlite::params; +use rusqlite::{params, Connection}; -use std::{path::Path, sync::Arc}; +use std::{ + path::{Path, PathBuf}, + sync::Arc, +}; use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup}; use super::Db; +// TODO for workspace serialization: +// - Update return types to unwrap all of the results into dummy values +// - On database failure to initialize, delete the DB file +// - Update paths to be blobs ( :( https://users.rust-lang.org/t/how-to-safely-store-a-path-osstring-in-a-sqllite-database/79712/10 ) +// - Convert hot paths to prepare-cache-execute style + pub(crate) const WORKSPACE_M_1: &str = " CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY AUTOINCREMENT, - timestamp TEXT DEFAULT CURRENT_TIMESTAMP, - dummy_data INTEGER + timestamp TEXT DEFAULT CURRENT_TIMESTAMP ) STRICT; CREATE TABLE worktree_roots( - worktree_root TEXT NOT NULL, --TODO: Update this to use blobs + worktree_root TEXT NOT NULL, workspace_id INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE PRIMARY KEY(worktree_root, workspace_id) ) STRICT; "; @@ -76,7 +84,7 @@ impl Db { self.real() .map(|db| { let lock = db.connection.lock(); - match lock.execute("INSERT INTO workspaces(dummy_data) VALUES(1);", []) { + match lock.execute("INSERT INTO workspaces DEFAULT VALUES;", []) { Ok(_) => SerializedWorkspace { workspace_id: WorkspaceId(lock.last_insert_rowid()), }, @@ -93,108 +101,15 @@ impl Db { self.real() .map(|db| { let lock = db.connection.lock(); - - // Prepare the array binding string. SQL doesn't have syntax for this, so - // we have to do it ourselves. - let mut array_binding_stmt = "(".to_string(); - for i in 0..worktree_roots.len() { - array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based - if i < worktree_roots.len() - 1 { - array_binding_stmt.push(','); - array_binding_stmt.push(' '); - } - } - array_binding_stmt.push(')'); - - // Any workspace can have multiple independent paths, and these paths - // can overlap in the database. Take this test data for example: - // - // [/tmp, /tmp2] -> 1 - // [/tmp] -> 2 - // [/tmp2, /tmp3] -> 3 - // - // This would be stred in the database like so: - // - // ID PATH - // 1 /tmp - // 1 /tmp2 - // 2 /tmp - // 3 /tmp2 - // 3 /tmp3 - // - // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. - // So, given an array of worktree roots, how can we find the exactly matching ID? - // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: - // - We start with a join of this table on itself, generating every possible - // pair of ((path, ID), (path, ID)), and filtering the join down to just the - // *overlapping* workspace IDs. For this small data set, this would look like: - // - // wt1.ID wt1.PATH | wt2.ID wt2.PATH - // 3 /tmp3 3 /tmp2 - // - // - Moving one SELECT out, we use the first pair's ID column to invert the selection, - // meaning we now have a list of all the entries for our array and *subsets* - // of our array: - // - // ID PATH - // 1 /tmp - // 2 /tmp - // 2 /tmp2 - // - // - To trim out the subsets, we need to exploit the fact that there can be no duplicate - // entries in this table. We can just use GROUP BY, COUNT, and a WHERE clause that checks - // for the length of our array: - // - // ID num_matching - // 1 2 - // - // And we're done! We've found the matching ID correctly :D - // However, due to limitations in sqlite's query binding, we still have to do some string - // substitution to generate the correct query - let query = format!( - r#" - SELECT workspace_id - FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots - WHERE worktree_root in {array_bind} AND workspace_id NOT IN - (SELECT wt1.workspace_id FROM worktree_roots as wt1 - JOIN worktree_roots as wt2 - ON wt1.workspace_id = wt2.workspace_id - WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) - GROUP BY workspace_id) - WHERE num_matching = ? - "#, - array_bind = array_binding_stmt - ); - - let mut stmt = lock.prepare_cached(&query)?; - - // Make sure we bound the parameters correctly - debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); - - for i in 0..worktree_roots.len() { - // TODO: Update this to use blobs - let path = &worktree_roots[i].as_ref().to_string_lossy().to_string(); - stmt.raw_bind_parameter(i + 1, path)? - } - // No -1, because SQLite is 1 based - stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; - let mut rows = stmt.raw_query(); - if let Ok(Some(row)) = rows.next() { - return Ok(Some(WorkspaceId(row.get(0)?))) - } - - // Ensure that this query only returns one row - debug_assert!(matches!(rows.next(), Ok(None))); - - Ok(None) + get_workspace_id(worktree_roots, &lock) }) .unwrap_or(Ok(None)) } - fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { - unimplemented!() - } + // fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { + // unimplemented!() + // } /// Updates the open paths for the given workspace id. Will garbage collect items from /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps @@ -207,24 +122,42 @@ impl Db { where P: AsRef, { - // Lookup any WorkspaceIds which have the same set of roots, and delete them. (NOTE: this should garbage collect other tables) - // TODO - // Remove the old rows which contain workspace_id - // TODO - // Add rows for the new worktree_roots - self.real() .map(|db| { - let lock = db.connection.lock(); + let mut lock = db.connection.lock(); + + let tx = lock.transaction()?; + + { + // Lookup any old WorkspaceIds which have the same set of roots, and delete them. + let preexisting_id = get_workspace_id(worktree_roots, &tx)?; + if let Some(preexisting_id) = preexisting_id { + if preexisting_id != *workspace_id { + // Should also delete fields in other tables + tx.execute( + "DELETE FROM workspaces WHERE workspace_id = ?", + [preexisting_id.0], + )?; + } + } - for root in worktree_roots { - // TODO: Update this to use blobs - let path = root.as_ref().to_string_lossy().to_string(); - lock.execute( - "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", - params![workspace_id.0, path], + tx.execute( + "DELETE FROM worktree_roots WHERE workspace_id = ?", + [workspace_id.0], )?; + + for root in worktree_roots { + // TODO: Update this to use blobs + let path = root.as_ref().to_string_lossy().to_string(); + + let mut stmt = tx.prepare_cached("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")?; + stmt.execute(params![workspace_id.0, path])?; + } + + let mut stmt = tx.prepare_cached("UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")?; + stmt.execute([workspace_id.0])?; } + tx.commit()?; Ok(()) }) @@ -232,16 +165,156 @@ impl Db { } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(&self) -> Vec<(WorkspaceId, Vec>)> { + pub fn recent_workspaces(&self, limit: usize) -> Result>)>> { // Return all the workspace ids and their associated paths ordered by the access timestamp //ORDER BY timestamps - unimplemented!(); + self.real() + .map(|db| { + let mut lock = db.connection.lock(); + + let tx = lock.transaction()?; + let result = { + let mut stmt = tx.prepare_cached( + "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", + )?; + let workspace_ids = stmt + .query_map([limit], |row| Ok(WorkspaceId(row.get(0)?)))? + .collect::, rusqlite::Error>>()?; + + let mut result = Vec::new(); + let mut stmt = tx.prepare_cached( + "SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?", + )?; + for workspace_id in workspace_ids { + let roots = stmt + .query_map([workspace_id.0], |row| { + let row = row.get::<_, String>(0)?; + Ok(PathBuf::from(Path::new(&row)).into()) + })? + .collect::, rusqlite::Error>>()?; + result.push((workspace_id, roots)) + } + + result + }; + + tx.commit()?; + + return Ok(result); + }) + .unwrap_or_else(|| Ok(Vec::new())) } } +fn get_workspace_id

( + worktree_roots: &[P], + connection: &Connection, +) -> Result, anyhow::Error> +where + P: AsRef, +{ + // Prepare the array binding string. SQL doesn't have syntax for this, so + // we have to do it ourselves. + let mut array_binding_stmt = "(".to_string(); + for i in 0..worktree_roots.len() { + array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based + if i < worktree_roots.len() - 1 { + array_binding_stmt.push(','); + array_binding_stmt.push(' '); + } + } + array_binding_stmt.push(')'); + // Any workspace can have multiple independent paths, and these paths + // can overlap in the database. Take this test data for example: + // + // [/tmp, /tmp2] -> 1 + // [/tmp] -> 2 + // [/tmp2, /tmp3] -> 3 + // + // This would be stred in the database like so: + // + // ID PATH + // 1 /tmp + // 1 /tmp2 + // 2 /tmp + // 3 /tmp2 + // 3 /tmp3 + // + // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. + // So, given an array of worktree roots, how can we find the exactly matching ID? + // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: + // - We start with a join of this table on itself, generating every possible + // pair of ((path, ID), (path, ID)), and filtering the join down to just the + // *overlapping* workspace IDs. For this small data set, this would look like: + // + // wt1.ID wt1.PATH | wt2.ID wt2.PATH + // 3 /tmp3 3 /tmp2 + // + // - Moving one SELECT out, we use the first pair's ID column to invert the selection, + // meaning we now have a list of all the entries for our array and *subsets* + // of our array: + // + // ID PATH + // 1 /tmp + // 2 /tmp + // 2 /tmp2 + // + // - To trim out the subsets, we need to exploit the fact that there can be no duplicate + // entries in this table. We can just use GROUP BY, COUNT, and a WHERE clause that checks + // for the length of our array: + // + // ID num_matching + // 1 2 + // + // And we're done! We've found the matching ID correctly :D + // However, due to limitations in sqlite's query binding, we still have to do some string + // substitution to generate the correct query + let query = format!( + r#" + SELECT workspace_id + FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots + WHERE worktree_root in {array_bind} AND workspace_id NOT IN + (SELECT wt1.workspace_id FROM worktree_roots as wt1 + JOIN worktree_roots as wt2 + ON wt1.workspace_id = wt2.workspace_id + WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) + GROUP BY workspace_id) + WHERE num_matching = ? + "#, + array_bind = array_binding_stmt + ); + let mut stmt = connection.prepare_cached(&query)?; + // Make sure we bound the parameters correctly + debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); + + for i in 0..worktree_roots.len() { + // TODO: Update this to use blobs + let path = &worktree_roots[i].as_ref().to_string_lossy().to_string(); + stmt.raw_bind_parameter(i + 1, path)? + } + // No -1, because SQLite is 1 based + stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; + + let mut rows = stmt.raw_query(); + if let Ok(Some(row)) = rows.next() { + return Ok(Some(WorkspaceId(row.get(0)?))); + } + // Ensure that this query only returns one row. The PRIMARY KEY constraint should catch this case + // but this is here to catch it if someone refactors that constraint out. + debug_assert!(matches!(rows.next(), Ok(None))); + Ok(None) +} + #[cfg(test)] mod tests { + use std::{ + path::{Path, PathBuf}, + sync::Arc, + thread::sleep, + time::Duration, + }; + use crate::Db; use super::WorkspaceId; @@ -265,32 +338,36 @@ mod tests { db.update_worktree_roots(workspace_id, entries).unwrap(); } - assert_eq!(WorkspaceId(1), db.workspace_id(&["/tmp1"]).unwrap().unwrap()); + assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp1"]).unwrap()); assert_eq!( - WorkspaceId(2), - db.workspace_id(&["/tmp1", "/tmp2"]).unwrap().unwrap() + db.workspace_id(&["/tmp1", "/tmp2"]).unwrap(), + Some(WorkspaceId(2)) ); assert_eq!( - WorkspaceId(3), - db.workspace_id(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().unwrap() + db.workspace_id(&["/tmp1", "/tmp2", "/tmp3"]).unwrap(), + Some(WorkspaceId(3)) ); assert_eq!( - WorkspaceId(4), - db.workspace_id(&["/tmp2", "/tmp3"]).unwrap().unwrap() + db.workspace_id(&["/tmp2", "/tmp3"]).unwrap(), + Some(WorkspaceId(4)) ); assert_eq!( - WorkspaceId(5), - db.workspace_id(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().unwrap() + db.workspace_id(&["/tmp2", "/tmp3", "/tmp4"]).unwrap(), + Some(WorkspaceId(5)) ); assert_eq!( - WorkspaceId(6), - db.workspace_id(&["/tmp2", "/tmp4"]).unwrap().unwrap() + db.workspace_id(&["/tmp2", "/tmp4"]).unwrap(), + Some(WorkspaceId(6)) ); - assert_eq!(WorkspaceId(7), db.workspace_id(&["/tmp2"]).unwrap().unwrap()); + assert_eq!(db.workspace_id(&["/tmp2"]).unwrap(), Some(WorkspaceId(7))); - assert_eq!(None, db.workspace_id(&["/tmp1", "/tmp5"]).unwrap()); - assert_eq!(None, db.workspace_id(&["/tmp5"]).unwrap()); - assert_eq!(None, db.workspace_id(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]).unwrap()); + assert_eq!(db.workspace_id(&["/tmp1", "/tmp5"]).unwrap(), None); + assert_eq!(db.workspace_id(&["/tmp5"]).unwrap(), None); + assert_eq!( + db.workspace_id(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]) + .unwrap(), + None + ); } #[test] @@ -308,16 +385,23 @@ mod tests { db.update_worktree_roots(workspace_id, entries).unwrap(); } - assert_eq!(None, db.workspace_id(&["/tmp2"]).unwrap()); - assert_eq!(None, db.workspace_id(&["/tmp2", "/tmp3"]).unwrap()); - assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp"]).unwrap()); - assert_eq!(Some(WorkspaceId(2)), db.workspace_id(&["/tmp", "/tmp2"]).unwrap()); + assert_eq!(db.workspace_id(&["/tmp2"]).unwrap(), None); + assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]).unwrap(), None); + assert_eq!(db.workspace_id(&["/tmp"]).unwrap(), Some(WorkspaceId(1))); + assert_eq!( + db.workspace_id(&["/tmp", "/tmp2"]).unwrap(), + Some(WorkspaceId(2)) + ); assert_eq!( - Some(WorkspaceId(3)), - db.workspace_id(&["/tmp", "/tmp2", "/tmp3"]).unwrap() + db.workspace_id(&["/tmp", "/tmp2", "/tmp3"]).unwrap(), + Some(WorkspaceId(3)) ); } + fn arc_path(path: &'static str) -> Arc { + PathBuf::from(path).into() + } + #[test] fn test_tricky_overlapping_updates() { // DB state: @@ -340,25 +424,38 @@ mod tests { let db = Db::open_in_memory(); + // Load in the test data for (workspace_id, entries) in data { - db.update_worktree_roots(workspace_id, entries).unwrap(); //?? - assert_eq!(&db.workspace_id::(&[]).unwrap(), &Some(*workspace_id)) - } - - for (workspace_id, entries) in data { - assert_eq!(&db.workspace_id(entries.as_slice()).unwrap(), &Some(*workspace_id)); + db.workspace_for_worktree_roots(&[]); + db.update_worktree_roots(workspace_id, entries).unwrap(); } - db.update_worktree_roots(&WorkspaceId(2), &["/tmp2"]) + // Make sure the timestamp updates + sleep(Duration::from_secs(1)); + // Execute the update + db.update_worktree_roots(&WorkspaceId(2), &["/tmp2", "/tmp3"]) .unwrap(); - // todo!(); // make sure that 3 got garbage collected - assert_eq!(db.workspace_id(&["/tmp2"]).unwrap(), Some(WorkspaceId(2))); + // Make sure that workspace 3 doesn't exist + assert_eq!( + db.workspace_id(&["/tmp2", "/tmp3"]).unwrap(), + Some(WorkspaceId(2)) + ); + + // And that workspace 1 was untouched assert_eq!(db.workspace_id(&["/tmp"]).unwrap(), Some(WorkspaceId(1))); - let recent_workspaces = db.recent_workspaces(); - assert_eq!(recent_workspaces.get(0).unwrap().0, WorkspaceId(2)); - assert_eq!(recent_workspaces.get(1).unwrap().0, WorkspaceId(3)); - assert_eq!(recent_workspaces.get(2).unwrap().0, WorkspaceId(1)); + // And that workspace 2 is no longer registered under this + assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]).unwrap(), None); + + let recent_workspaces = db.recent_workspaces(10).unwrap(); + assert_eq!( + recent_workspaces.get(0).unwrap(), + &(WorkspaceId(2), vec![arc_path("/tmp2"), arc_path("/tmp3")]) + ); + assert_eq!( + recent_workspaces.get(1).unwrap(), + &(WorkspaceId(1), vec![arc_path("/tmp")]) + ); } } From 7d33520b2c2954fbee631bd16c62e435fe81f85b Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 25 Oct 2022 16:55:20 -0700 Subject: [PATCH 14/86] Tidied up code, managed errors, etc. --- crates/db/examples/serialize_workspace.rs | 21 +- crates/db/src/workspace.rs | 510 ++++++++++++---------- 2 files changed, 288 insertions(+), 243 deletions(-) diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index 108980ee920aa9995d6ecd4dc2d36a3b5bcfdfb1..6de6d9daf2f38bbac621af06755715cbeb4766b8 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -25,26 +25,19 @@ fn main() -> anyhow::Result<()> { // Order scrambled + sleeps added because sqlite only has 1 second resolution on // their timestamps - db.update_worktree_roots(&workspace_7.workspace_id, &["/tmp2"]) - .unwrap(); + db.update_worktree_roots(&workspace_7.workspace_id, &["/tmp2"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_1.workspace_id, &["/tmp1"]) - .unwrap(); + db.update_worktree_roots(&workspace_1.workspace_id, &["/tmp1"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]) - .unwrap(); + db.update_worktree_roots(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]) - .unwrap(); + db.update_worktree_roots(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]) - .unwrap(); + db.update_worktree_roots(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]) - .unwrap(); + db.update_worktree_roots(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]) - .unwrap(); + db.update_worktree_roots(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]); db.write_to(file).ok(); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 09aa9f53013b2154770f5560a87577dc3946370e..e2c4d6319c2b82380777a313c42df294bc9445bd 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,7 +1,10 @@ use anyhow::Result; -use rusqlite::{params, Connection}; +use rusqlite::{params, Connection, OptionalExtension}; use std::{ + ffi::OsStr, + fmt::Debug, + os::unix::prelude::OsStrExt, path::{Path, PathBuf}, sync::Arc, }; @@ -10,12 +13,6 @@ use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup}; use super::Db; -// TODO for workspace serialization: -// - Update return types to unwrap all of the results into dummy values -// - On database failure to initialize, delete the DB file -// - Update paths to be blobs ( :( https://users.rust-lang.org/t/how-to-safely-store-a-path-osstring-in-a-sqllite-database/79712/10 ) -// - Convert hot paths to prepare-cache-execute style - pub(crate) const WORKSPACE_M_1: &str = " CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY AUTOINCREMENT, @@ -23,24 +20,13 @@ CREATE TABLE workspaces( ) STRICT; CREATE TABLE worktree_roots( - worktree_root TEXT NOT NULL, + worktree_root BLOB NOT NULL, workspace_id INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE PRIMARY KEY(worktree_root, workspace_id) ) STRICT; "; -// Zed stores items with ids which are a combination of a view id during a given run and a workspace id. This - -// Case 1: Starting Zed Contextless -// > Zed -> Reopen the last -// Case 2: Starting Zed with a project folder -// > Zed ~/projects/Zed -// Case 3: Starting Zed with a file -// > Zed ~/projects/Zed/cargo.toml -// Case 4: Starting Zed with multiple project folders -// > Zed ~/projects/Zed ~/projects/Zed.dev - #[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] pub struct WorkspaceId(i64); @@ -64,7 +50,12 @@ impl Db { worktree_roots: &[Arc], ) -> SerializedWorkspace { // Find the workspace id which is uniquely identified by this set of paths return it if found - if let Ok(Some(workspace_id)) = self.workspace_id(worktree_roots) { + let mut workspace_id = self.workspace_id(worktree_roots); + if workspace_id.is_none() && worktree_roots.len() == 0 { + workspace_id = self.last_workspace_id(); + } + + if let Some(workspace_id) = workspace_id { // TODO // let workspace_row = self.get_workspace_row(workspace_id); // let center_group = self.get_pane_group(workspace_row.center_group_id); @@ -84,7 +75,8 @@ impl Db { self.real() .map(|db| { let lock = db.connection.lock(); - match lock.execute("INSERT INTO workspaces DEFAULT VALUES;", []) { + // No need to waste the memory caching this, should happen rarely. + match lock.execute("INSERT INTO workspaces DEFAULT VALUES", []) { Ok(_) => SerializedWorkspace { workspace_id: WorkspaceId(lock.last_insert_rowid()), }, @@ -94,9 +86,9 @@ impl Db { .unwrap_or_default() } - fn workspace_id

(&self, worktree_roots: &[P]) -> Result> + fn workspace_id

(&self, worktree_roots: &[P]) -> Option where - P: AsRef, + P: AsRef + Debug, { self.real() .map(|db| { @@ -104,7 +96,7 @@ impl Db { get_workspace_id(worktree_roots, &lock) }) - .unwrap_or(Ok(None)) + .unwrap_or(None) } // fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { @@ -114,195 +106,272 @@ impl Db { /// Updates the open paths for the given workspace id. Will garbage collect items from /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps /// in the workspace id table - pub fn update_worktree_roots

( - &self, - workspace_id: &WorkspaceId, - worktree_roots: &[P], - ) -> Result<()> + pub fn update_worktree_roots

(&self, workspace_id: &WorkspaceId, worktree_roots: &[P]) where - P: AsRef, + P: AsRef + Debug, { - self.real() - .map(|db| { - let mut lock = db.connection.lock(); - - let tx = lock.transaction()?; - - { - // Lookup any old WorkspaceIds which have the same set of roots, and delete them. - let preexisting_id = get_workspace_id(worktree_roots, &tx)?; - if let Some(preexisting_id) = preexisting_id { - if preexisting_id != *workspace_id { - // Should also delete fields in other tables - tx.execute( - "DELETE FROM workspaces WHERE workspace_id = ?", - [preexisting_id.0], - )?; - } + fn logic

( + connection: &mut Connection, + worktree_roots: &[P], + workspace_id: &WorkspaceId, + ) -> Result<()> + where + P: AsRef + Debug, + { + let tx = connection.transaction()?; + { + // Lookup any old WorkspaceIds which have the same set of roots, and delete them. + let preexisting_id = get_workspace_id(worktree_roots, &tx); + if let Some(preexisting_id) = preexisting_id { + if preexisting_id != *workspace_id { + // Should also delete fields in other tables with cascading updates + tx.execute( + "DELETE FROM workspaces WHERE workspace_id = ?", + [preexisting_id.0], + )?; } + } + + tx.execute( + "DELETE FROM worktree_roots WHERE workspace_id = ?", + [workspace_id.0], + )?; + + for root in worktree_roots { + let path = root.as_ref().as_os_str().as_bytes(); tx.execute( - "DELETE FROM worktree_roots WHERE workspace_id = ?", - [workspace_id.0], + "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", + params![workspace_id.0, path], )?; + } - for root in worktree_roots { - // TODO: Update this to use blobs - let path = root.as_ref().to_string_lossy().to_string(); - - let mut stmt = tx.prepare_cached("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")?; - stmt.execute(params![workspace_id.0, path])?; - } + tx.execute( + "UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?", + [workspace_id.0], + )?; + } + tx.commit()?; + Ok(()) + } - let mut stmt = tx.prepare_cached("UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")?; - stmt.execute([workspace_id.0])?; + self.real().map(|db| { + let mut lock = db.connection.lock(); + + match logic(&mut lock, worktree_roots, workspace_id) { + Ok(_) => {} + Err(err) => { + log::error!( + "Failed to update the worktree roots for {:?}, roots: {:?}, error: {}", + workspace_id, + worktree_roots, + err + ); } - tx.commit()?; - - Ok(()) - }) - .unwrap_or(Ok(())) + } + }); } - /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(&self, limit: usize) -> Result>)>> { - // Return all the workspace ids and their associated paths ordered by the access timestamp - //ORDER BY timestamps + pub fn last_workspace_id(&self) -> Option { + fn logic(connection: &mut Connection) -> Result> { + let mut stmt = connection + .prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT 1")?; + + Ok(stmt + .query_row([], |row| Ok(WorkspaceId(row.get(0)?))) + .optional()?) + } + self.real() .map(|db| { let mut lock = db.connection.lock(); - let tx = lock.transaction()?; - let result = { - let mut stmt = tx.prepare_cached( - "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", - )?; - let workspace_ids = stmt - .query_map([limit], |row| Ok(WorkspaceId(row.get(0)?)))? - .collect::, rusqlite::Error>>()?; - - let mut result = Vec::new(); - let mut stmt = tx.prepare_cached( - "SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?", - )?; - for workspace_id in workspace_ids { - let roots = stmt - .query_map([workspace_id.0], |row| { - let row = row.get::<_, String>(0)?; - Ok(PathBuf::from(Path::new(&row)).into()) - })? - .collect::, rusqlite::Error>>()?; - result.push((workspace_id, roots)) + match logic(&mut lock) { + Ok(result) => result, + Err(err) => { + log::error!("Failed to get last workspace id, err: {}", err); + None } + } + }) + .unwrap_or(None) + } + + /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots + pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, Vec>)> { + fn logic( + connection: &mut Connection, + limit: usize, + ) -> Result>)>, anyhow::Error> { + let tx = connection.transaction()?; + let result = { + let mut stmt = tx.prepare( + "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", + )?; + + let workspace_ids = stmt + .query_map([limit], |row| Ok(WorkspaceId(row.get(0)?)))? + .collect::, rusqlite::Error>>()?; + + let mut result = Vec::new(); + let mut stmt = + tx.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; + for workspace_id in workspace_ids { + let roots = stmt + .query_map([workspace_id.0], |row| { + let row = row.get::<_, Vec>(0)?; + Ok(PathBuf::from(OsStr::from_bytes(&row)).into()) + })? + .collect::, rusqlite::Error>>()?; + result.push((workspace_id, roots)) + } - result - }; + result + }; + tx.commit()?; + return Ok(result); + } - tx.commit()?; + self.real() + .map(|db| { + let mut lock = db.connection.lock(); - return Ok(result); + match logic(&mut lock, limit) { + Ok(result) => result, + Err(err) => { + log::error!("Failed to get recent workspaces, err: {}", err); + Vec::new() + } + } }) - .unwrap_or_else(|| Ok(Vec::new())) + .unwrap_or_else(|| Vec::new()) } } -fn get_workspace_id

( - worktree_roots: &[P], - connection: &Connection, -) -> Result, anyhow::Error> +fn get_workspace_id

(worktree_roots: &[P], connection: &Connection) -> Option where - P: AsRef, + P: AsRef + Debug, { - // Prepare the array binding string. SQL doesn't have syntax for this, so - // we have to do it ourselves. - let mut array_binding_stmt = "(".to_string(); - for i in 0..worktree_roots.len() { - array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based - if i < worktree_roots.len() - 1 { - array_binding_stmt.push(','); - array_binding_stmt.push(' '); + fn logic

( + worktree_roots: &[P], + connection: &Connection, + ) -> Result, anyhow::Error> + where + P: AsRef + Debug, + { + // Prepare the array binding string. SQL doesn't have syntax for this, so + // we have to do it ourselves. + let mut array_binding_stmt = "(".to_string(); + for i in 0..worktree_roots.len() { + array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based + if i < worktree_roots.len() - 1 { + array_binding_stmt.push(','); + array_binding_stmt.push(' '); + } } + array_binding_stmt.push(')'); + // Any workspace can have multiple independent paths, and these paths + // can overlap in the database. Take this test data for example: + // + // [/tmp, /tmp2] -> 1 + // [/tmp] -> 2 + // [/tmp2, /tmp3] -> 3 + // + // This would be stred in the database like so: + // + // ID PATH + // 1 /tmp + // 1 /tmp2 + // 2 /tmp + // 3 /tmp2 + // 3 /tmp3 + // + // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. + // So, given an array of worktree roots, how can we find the exactly matching ID? + // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: + // - We start with a join of this table on itself, generating every possible + // pair of ((path, ID), (path, ID)), and filtering the join down to just the + // *overlapping* workspace IDs. For this small data set, this would look like: + // + // wt1.ID wt1.PATH | wt2.ID wt2.PATH + // 3 /tmp3 3 /tmp2 + // + // - Moving one SELECT out, we use the first pair's ID column to invert the selection, + // meaning we now have a list of all the entries for our array and *subsets* + // of our array: + // + // ID PATH + // 1 /tmp + // 2 /tmp + // 2 /tmp2 + // + // - To trim out the subsets, we need to exploit the fact that there can be no duplicate + // entries in this table. We can just use GROUP BY, COUNT, and a WHERE clause that checks + // for the length of our array: + // + // ID num_matching + // 1 2 + // + // And we're done! We've found the matching ID correctly :D + // However, due to limitations in sqlite's query binding, we still have to do some string + // substitution to generate the correct query + // 47,116,109,112,50 + // 2F746D7032 + + let query = format!( + r#" + SELECT workspace_id + FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots + WHERE worktree_root in {array_bind} AND workspace_id NOT IN + (SELECT wt1.workspace_id FROM worktree_roots as wt1 + JOIN worktree_roots as wt2 + ON wt1.workspace_id = wt2.workspace_id + WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) + GROUP BY workspace_id) + WHERE num_matching = ? + "#, + array_bind = array_binding_stmt + ); + + // This will only be called on start up and when root workspaces change, no need to waste memory + // caching it. + let mut stmt = connection.prepare(&query)?; + // Make sure we bound the parameters correctly + debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); + + for i in 0..worktree_roots.len() { + let path = &worktree_roots[i].as_ref().as_os_str().as_bytes(); + stmt.raw_bind_parameter(i + 1, path)? + } + // No -1, because SQLite is 1 based + stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; + + let mut rows = stmt.raw_query(); + let row = rows.next(); + let result = if let Ok(Some(row)) = row { + Ok(Some(WorkspaceId(row.get(0)?))) + } else { + Ok(None) + }; + + // Ensure that this query only returns one row. The PRIMARY KEY constraint should catch this case + // but this is here to catch if someone refactors that constraint out. + debug_assert!(matches!(rows.next(), Ok(None))); + + result } - array_binding_stmt.push(')'); - // Any workspace can have multiple independent paths, and these paths - // can overlap in the database. Take this test data for example: - // - // [/tmp, /tmp2] -> 1 - // [/tmp] -> 2 - // [/tmp2, /tmp3] -> 3 - // - // This would be stred in the database like so: - // - // ID PATH - // 1 /tmp - // 1 /tmp2 - // 2 /tmp - // 3 /tmp2 - // 3 /tmp3 - // - // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. - // So, given an array of worktree roots, how can we find the exactly matching ID? - // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: - // - We start with a join of this table on itself, generating every possible - // pair of ((path, ID), (path, ID)), and filtering the join down to just the - // *overlapping* workspace IDs. For this small data set, this would look like: - // - // wt1.ID wt1.PATH | wt2.ID wt2.PATH - // 3 /tmp3 3 /tmp2 - // - // - Moving one SELECT out, we use the first pair's ID column to invert the selection, - // meaning we now have a list of all the entries for our array and *subsets* - // of our array: - // - // ID PATH - // 1 /tmp - // 2 /tmp - // 2 /tmp2 - // - // - To trim out the subsets, we need to exploit the fact that there can be no duplicate - // entries in this table. We can just use GROUP BY, COUNT, and a WHERE clause that checks - // for the length of our array: - // - // ID num_matching - // 1 2 - // - // And we're done! We've found the matching ID correctly :D - // However, due to limitations in sqlite's query binding, we still have to do some string - // substitution to generate the correct query - let query = format!( - r#" - SELECT workspace_id - FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots - WHERE worktree_root in {array_bind} AND workspace_id NOT IN - (SELECT wt1.workspace_id FROM worktree_roots as wt1 - JOIN worktree_roots as wt2 - ON wt1.workspace_id = wt2.workspace_id - WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) - GROUP BY workspace_id) - WHERE num_matching = ? - "#, - array_bind = array_binding_stmt - ); - let mut stmt = connection.prepare_cached(&query)?; - // Make sure we bound the parameters correctly - debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); - - for i in 0..worktree_roots.len() { - // TODO: Update this to use blobs - let path = &worktree_roots[i].as_ref().to_string_lossy().to_string(); - stmt.raw_bind_parameter(i + 1, path)? - } - // No -1, because SQLite is 1 based - stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; - let mut rows = stmt.raw_query(); - if let Ok(Some(row)) = rows.next() { - return Ok(Some(WorkspaceId(row.get(0)?))); + match logic(worktree_roots, connection) { + Ok(result) => result, + Err(err) => { + log::error!( + "Failed to get the workspace ID for paths {:?}, err: {}", + worktree_roots, + err + ); + None + } } - // Ensure that this query only returns one row. The PRIMARY KEY constraint should catch this case - // but this is here to catch it if someone refactors that constraint out. - debug_assert!(matches!(rows.next(), Ok(None))); - Ok(None) } #[cfg(test)] @@ -335,39 +404,26 @@ mod tests { for (workspace_id, entries) in data { db.make_new_workspace(); - db.update_worktree_roots(workspace_id, entries).unwrap(); + db.update_worktree_roots(workspace_id, entries); } - assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp1"]).unwrap()); + assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp1"])); + assert_eq!(db.workspace_id(&["/tmp1", "/tmp2"]), Some(WorkspaceId(2))); assert_eq!( - db.workspace_id(&["/tmp1", "/tmp2"]).unwrap(), - Some(WorkspaceId(2)) - ); - assert_eq!( - db.workspace_id(&["/tmp1", "/tmp2", "/tmp3"]).unwrap(), + db.workspace_id(&["/tmp1", "/tmp2", "/tmp3"]), Some(WorkspaceId(3)) ); + assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), Some(WorkspaceId(4))); assert_eq!( - db.workspace_id(&["/tmp2", "/tmp3"]).unwrap(), - Some(WorkspaceId(4)) - ); - assert_eq!( - db.workspace_id(&["/tmp2", "/tmp3", "/tmp4"]).unwrap(), + db.workspace_id(&["/tmp2", "/tmp3", "/tmp4"]), Some(WorkspaceId(5)) ); - assert_eq!( - db.workspace_id(&["/tmp2", "/tmp4"]).unwrap(), - Some(WorkspaceId(6)) - ); - assert_eq!(db.workspace_id(&["/tmp2"]).unwrap(), Some(WorkspaceId(7))); + assert_eq!(db.workspace_id(&["/tmp2", "/tmp4"]), Some(WorkspaceId(6))); + assert_eq!(db.workspace_id(&["/tmp2"]), Some(WorkspaceId(7))); - assert_eq!(db.workspace_id(&["/tmp1", "/tmp5"]).unwrap(), None); - assert_eq!(db.workspace_id(&["/tmp5"]).unwrap(), None); - assert_eq!( - db.workspace_id(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]) - .unwrap(), - None - ); + assert_eq!(db.workspace_id(&["/tmp1", "/tmp5"]), None); + assert_eq!(db.workspace_id(&["/tmp5"]), None); + assert_eq!(db.workspace_id(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None); } #[test] @@ -382,18 +438,15 @@ mod tests { for (workspace_id, entries) in data { db.make_new_workspace(); - db.update_worktree_roots(workspace_id, entries).unwrap(); + db.update_worktree_roots(workspace_id, entries); } - assert_eq!(db.workspace_id(&["/tmp2"]).unwrap(), None); - assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]).unwrap(), None); - assert_eq!(db.workspace_id(&["/tmp"]).unwrap(), Some(WorkspaceId(1))); + assert_eq!(db.workspace_id(&["/tmp2"]), None); + assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), None); + assert_eq!(db.workspace_id(&["/tmp"]), Some(WorkspaceId(1))); + assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(2))); assert_eq!( - db.workspace_id(&["/tmp", "/tmp2"]).unwrap(), - Some(WorkspaceId(2)) - ); - assert_eq!( - db.workspace_id(&["/tmp", "/tmp2", "/tmp3"]).unwrap(), + db.workspace_id(&["/tmp", "/tmp2", "/tmp3"]), Some(WorkspaceId(3)) ); } @@ -426,29 +479,28 @@ mod tests { // Load in the test data for (workspace_id, entries) in data { - db.workspace_for_worktree_roots(&[]); - db.update_worktree_roots(workspace_id, entries).unwrap(); + db.make_new_workspace(); + db.update_worktree_roots(workspace_id, entries); } // Make sure the timestamp updates sleep(Duration::from_secs(1)); + // Execute the update - db.update_worktree_roots(&WorkspaceId(2), &["/tmp2", "/tmp3"]) - .unwrap(); + db.update_worktree_roots(&WorkspaceId(2), &["/tmp2", "/tmp3"]); // Make sure that workspace 3 doesn't exist - assert_eq!( - db.workspace_id(&["/tmp2", "/tmp3"]).unwrap(), - Some(WorkspaceId(2)) - ); + assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), Some(WorkspaceId(2))); // And that workspace 1 was untouched - assert_eq!(db.workspace_id(&["/tmp"]).unwrap(), Some(WorkspaceId(1))); + assert_eq!(db.workspace_id(&["/tmp"]), Some(WorkspaceId(1))); + + // And that workspace 2 is no longer registered under these roots + assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), None); - // And that workspace 2 is no longer registered under this - assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]).unwrap(), None); + assert_eq!(Some(WorkspaceId(2)), db.last_workspace_id()); - let recent_workspaces = db.recent_workspaces(10).unwrap(); + let recent_workspaces = db.recent_workspaces(10); assert_eq!( recent_workspaces.get(0).unwrap(), &(WorkspaceId(2), vec![arc_path("/tmp2"), arc_path("/tmp3")]) From a9dc46c950693bf20edf213264fd2b324c3ee426 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 25 Oct 2022 17:09:26 -0700 Subject: [PATCH 15/86] added stubs for more tests --- crates/db/src/workspace.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index e2c4d6319c2b82380777a313c42df294bc9445bd..68008a2795d6da48b67c590daea6986129be2e89 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -49,7 +49,8 @@ impl Db { &self, worktree_roots: &[Arc], ) -> SerializedWorkspace { - // Find the workspace id which is uniquely identified by this set of paths return it if found + // Find the workspace id which is uniquely identified by this set of paths + // return it if found let mut workspace_id = self.workspace_id(worktree_roots); if workspace_id.is_none() && worktree_roots.len() == 0 { workspace_id = self.last_workspace_id(); @@ -388,6 +389,12 @@ mod tests { use super::WorkspaceId; + #[test] + fn test_empty_worktrees() { + // TODO determine update_worktree_roots(), workspace_id(), recent_workspaces() + // semantics for this case + } + #[test] fn test_more_workspace_ids() { let data = &[ From 46ff0885f088da6e74c723f7f9968bc552a9e049 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 26 Oct 2022 11:08:44 -0700 Subject: [PATCH 16/86] WIP: Writing tests --- crates/db/src/workspace.rs | 56 ++++++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 15 deletions(-) diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 68008a2795d6da48b67c590daea6986129be2e89..6bccf3387c79f80deea965c22deacda6105b5bf5 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -260,10 +260,15 @@ where where P: AsRef + Debug, { + if worktree_roots.len() == 0 { + return Ok(None); + } + // Prepare the array binding string. SQL doesn't have syntax for this, so // we have to do it ourselves. let mut array_binding_stmt = "(".to_string(); for i in 0..worktree_roots.len() { + // This uses ?NNN for numbered placeholder syntax array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based if i < worktree_roots.len() - 1 { array_binding_stmt.push(','); @@ -292,33 +297,35 @@ where // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: // - We start with a join of this table on itself, generating every possible // pair of ((path, ID), (path, ID)), and filtering the join down to just the - // *overlapping* workspace IDs. For this small data set, this would look like: + // *overlapping but incorrect* workspace IDs. For this small data set, + // this would look like: // // wt1.ID wt1.PATH | wt2.ID wt2.PATH // 3 /tmp3 3 /tmp2 // // - Moving one SELECT out, we use the first pair's ID column to invert the selection, - // meaning we now have a list of all the entries for our array and *subsets* - // of our array: + // meaning we now have a list of all the entries for our array, minus overlapping sets, + // but including *subsets* of our worktree roots: // // ID PATH // 1 /tmp + // 1 /tmp2 // 2 /tmp - // 2 /tmp2 // - // - To trim out the subsets, we need to exploit the fact that there can be no duplicate - // entries in this table. We can just use GROUP BY, COUNT, and a WHERE clause that checks - // for the length of our array: + // - To trim out the subsets, we can to exploit the PRIMARY KEY constraint that there are no + // duplicate entries in this table. Using a GROUP BY and a COUNT we can find the subsets of + // our keys: // // ID num_matching // 1 2 + // 2 1 // - // And we're done! We've found the matching ID correctly :D - // However, due to limitations in sqlite's query binding, we still have to do some string - // substitution to generate the correct query - // 47,116,109,112,50 - // 2F746D7032 - + // - And with one final WHERE num_matching = $num_of_worktree_roots, we're done! We've found the + // matching ID correctly :D + // + // Note: due to limitations in SQLite's query binding, we have to generate the prepared + // statement with string substitution (the {array_bind}) below, and then bind the + // parameters by number. let query = format!( r#" SELECT workspace_id @@ -391,8 +398,27 @@ mod tests { #[test] fn test_empty_worktrees() { - // TODO determine update_worktree_roots(), workspace_id(), recent_workspaces() - // semantics for this case + let db = Db::open_in_memory(); + + assert_eq!(None, db.workspace_id::(&[])); + + db.make_new_workspace(); + db.update_worktree_roots(&WorkspaceId(1), &["/tmp", "/tmp2"]); + + // Sanity check + assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp", "/tmp2"])); + + db.update_worktree_roots::(&WorkspaceId(1), &[]); + + // Make sure DB doesn't consider 'no worktrees' to be a query it can answer + assert_eq!(None, db.workspace_id::(&[])); + + assert_eq!(Some(WorkspaceId(1)), db.last_workspace_id()); + + assert_eq!( + &(WorkspaceId(1), vec![]), + db.recent_workspaces(1).get(0).unwrap() + ) } #[test] From 5505a776e67b41786f1725a94cb6b38af676c0cd Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 26 Oct 2022 16:31:44 -0700 Subject: [PATCH 17/86] Figured out a good schema for the pane serialization stuff --- Cargo.lock | 1 + crates/db/Cargo.toml | 1 + crates/db/examples/serialize-pane.rs | 27 ++++++ crates/db/examples/serialize_workspace.rs | 14 +-- crates/db/src/items.rs | 9 +- crates/db/src/kvp.rs | 4 + crates/db/src/migrations.rs | 5 +- crates/db/src/pane.rs | 113 ++++++++++++++++------ crates/db/src/workspace.rs | 47 ++++++--- 9 files changed, 156 insertions(+), 65 deletions(-) create mode 100644 crates/db/examples/serialize-pane.rs diff --git a/Cargo.lock b/Cargo.lock index b381331ef19ffb50812df511cd85234061057436..0da4d177101cce83fd7dde9aed4439c278550b68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1560,6 +1560,7 @@ dependencies = [ "rusqlite_migration", "serde", "serde_rusqlite", + "settings", "tempdir", ] diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 9fad1aa39a6e85f9dee323fb927d1a65a9626c6a..64e86e0345dcbafcbc90d91ba828de7d84c953bf 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -13,6 +13,7 @@ test-support = [] [dependencies] collections = { path = "../collections" } gpui = { path = "../gpui" } +settings = { path = "../settings" } anyhow = "1.0.57" async-trait = "0.1" lazy_static = "1.4.0" diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs new file mode 100644 index 0000000000000000000000000000000000000000..289f70967ccbe4b84d58840327565e84f92bdcd8 --- /dev/null +++ b/crates/db/examples/serialize-pane.rs @@ -0,0 +1,27 @@ +use std::{fs::File, path::Path, thread::sleep, time::Duration}; + +const TEST_FILE: &'static str = "test-db.db"; + +fn main() -> anyhow::Result<()> { + let db = db::Db::open_in_memory(); + if db.real().is_none() { + return Err(anyhow::anyhow!("Migrations failed")); + } + let file = Path::new(TEST_FILE); + + let f = File::create(file)?; + drop(f); + + let workspace = db.make_new_workspace(); + + db.update_worktree_roots(&workspace.workspace_id, &["/tmp"]); + + db.save_pane_splits(center_pane_group); + db.save_dock_pane(); + + db.write_to(file).ok(); + + println!("Wrote database!"); + + Ok(()) +} diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index 6de6d9daf2f38bbac621af06755715cbeb4766b8..9e1b61387e31041ab540347567b37de04b339c69 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -15,13 +15,13 @@ fn main() -> anyhow::Result<()> { db.write_kvp("test", "1")?; db.write_kvp("test-2", "2")?; - let workspace_1 = db.workspace_for_worktree_roots(&[]); - let workspace_2 = db.workspace_for_worktree_roots(&[]); - let workspace_3 = db.workspace_for_worktree_roots(&[]); - let workspace_4 = db.workspace_for_worktree_roots(&[]); - let workspace_5 = db.workspace_for_worktree_roots(&[]); - let workspace_6 = db.workspace_for_worktree_roots(&[]); - let workspace_7 = db.workspace_for_worktree_roots(&[]); + let workspace_1 = db.make_new_workspace(); + let workspace_2 = db.make_new_workspace(); + let workspace_3 = db.make_new_workspace(); + let workspace_4 = db.make_new_workspace(); + let workspace_5 = db.make_new_workspace(); + let workspace_6 = db.make_new_workspace(); + let workspace_7 = db.make_new_workspace(); // Order scrambled + sleeps added because sqlite only has 1 second resolution on // their timestamps diff --git a/crates/db/src/items.rs b/crates/db/src/items.rs index 1b633fdc474ce17450bb3f1cbd562daf7b68842d..7bd4c27f432e399d0ac2bd1cdc55ff122c07410c 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/items.rs @@ -46,15 +46,8 @@ use super::Db; // Items // Sidebars +// Things I'm doing: finding about nullability for foreign keys pub(crate) const ITEMS_M_1: &str = " -CREATE TABLE items( - workspace_id INTEGER, - item_id INTEGER, - kind TEXT NOT NULL, - PRIMARY KEY (workspace_id, item_id) - FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) -) STRICT; - CREATE TABLE project_searches( workspace_id INTEGER, item_id INTEGER, diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 96f13d8040bf6e289711b46462ccf88d1eafc735..eecd0238ca2b8975ed644411e45e5ad6bfe87ce8 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -4,10 +4,14 @@ use rusqlite::OptionalExtension; use super::Db; pub(crate) const KVP_M_1: &str = " +BEGIN TRANSACTION; + CREATE TABLE kv_store( key TEXT PRIMARY KEY, value TEXT NOT NULL ) STRICT; + +COMMIT; "; impl Db { diff --git a/crates/db/src/migrations.rs b/crates/db/src/migrations.rs index e10c388d5c760bdd8c699d8be5d9b1f9a046df1f..8caa528fc1ef607405994338265b1460dc34f5de 100644 --- a/crates/db/src/migrations.rs +++ b/crates/db/src/migrations.rs @@ -1,7 +1,7 @@ use rusqlite_migration::{Migrations, M}; // use crate::items::ITEMS_M_1; -use crate::{kvp::KVP_M_1, WORKSPACE_M_1}; +use crate::{kvp::KVP_M_1, pane::PANE_M_1, WORKSPACE_M_1}; // This must be ordered by development time! Only ever add new migrations to the end!! // Bad things will probably happen if you don't monotonically edit this vec!!!! @@ -10,6 +10,7 @@ use crate::{kvp::KVP_M_1, WORKSPACE_M_1}; lazy_static::lazy_static! { pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![ M::up(KVP_M_1), - M::up(WORKSPACE_M_1) + M::up(WORKSPACE_M_1), + M::up(PANE_M_1) ]); } diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 89721157c3e84785ffc338b4d855abc541198c65..e4d6694319baeea2813859fcb622f989698e6a38 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,41 +1,23 @@ use gpui::Axis; +use settings::DockAnchor; use crate::{items::ItemId, workspace::WorkspaceId}; use super::Db; -pub(crate) const PANE_M_1: &str = " -CREATE TABLE pane_groups( - workspace_id INTEGER, - group_id INTEGER, - axis STRING NOT NULL, -- 'Vertical' / 'Horizontal' - PRIMARY KEY (workspace_id, group_id) -) STRICT; - -CREATE TABLE pane_group_children( - workspace_id INTEGER, - group_id INTEGER, - child_pane_id INTEGER, -- Nullable - child_group_id INTEGER, -- Nullable - index INTEGER, - PRIMARY KEY (workspace_id, group_id) -) STRICT; - -CREATE TABLE pane_items( - workspace_id INTEGER, - pane_id INTEGER, - item_id INTEGER, -- Array - index INTEGER, - KEY (workspace_id, pane_id) -) STRICT; +// We have an many-branched, unbalanced tree with three types: +// Pane Groups +// Panes +// Items -ALTER TABLE WORKSPACE -ADD THESE COLS: -center_group INTEGER NOT NULL, -dock_pane INTEGER NOT NULL, --- FOREIGN KEY(center_group) REFERENCES pane_groups(group_id) --- FOREIGN KEY(dock_pane) REFERENCES pane_items(pane_id) -"; +// The root is always a Pane Group +// Pane Groups can have 0 (or more) Panes and/or Pane Groups as children +// Panes can have 0 or more items as children +// Panes can be their own root +// Items cannot have children +// References pointing down is hard (SQL doesn't like arrays) +// References pointing up is easy (1-1 item / parent relationship) but is harder to query +// #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct PaneId { @@ -93,6 +75,71 @@ pub struct SerializedPane { children: Vec, } +pub(crate) const PANE_M_1: &str = " +BEGIN TRANSACTION; + +CREATE TABLE dock_panes( + dock_pane_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' + shown INTEGER NOT NULL, -- Boolean + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE pane_groups( + group_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + parent_group INTEGER, -- NULL indicates that this is a root node + axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE grouped_panes( + pane_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + group_id INTEGER NOT NULL, + idx INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE items( + item_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + kind TEXT NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE group_items( + workspace_id INTEGER NOT NULL, + pane_id INTEGER NOT NULL, + item_id INTEGER NOT NULL, + idx INTEGER NOT NULL, + PRIMARY KEY (workspace_id, pane_id, item_id) + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(pane_id) REFERENCES grouped_panes(pane_id) ON DELETE CASCADE, + FOREIGN KEY(item_id) REFERENCES items(item_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE dock_items( + workspace_id INTEGER NOT NULL, + dock_pane_id INTEGER NOT NULL, + item_id INTEGER NOT NULL, + idx INTEGER NOT NULL, + PRIMARY KEY (workspace_id, dock_pane_id, item_id) + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE, + FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE +) STRICT; + +COMMIT; +"; + +struct SerializedDockPane { + //Cols +} + impl Db { pub(crate) fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { let axis = self.get_pane_group_axis(pane_group_id); @@ -147,5 +194,7 @@ impl Db { unimplemented!(); } - pub fn save_pane(&self, pane: SerializedPane) {} + fn save_dock_pane() {} } + +mod tests {} diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 6bccf3387c79f80deea965c22deacda6105b5bf5..cd1d22f50ba2c1d9a410d068bbca674d3fc21dac 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -13,10 +13,15 @@ use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup}; use super::Db; +// If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging +// you might want to update some of the parsing code as well, I've left the variations in but commented +// out pub(crate) const WORKSPACE_M_1: &str = " +BEGIN TRANSACTION; + CREATE TABLE workspaces( - workspace_id INTEGER PRIMARY KEY AUTOINCREMENT, - timestamp TEXT DEFAULT CURRENT_TIMESTAMP + workspace_id INTEGER PRIMARY KEY, + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL ) STRICT; CREATE TABLE worktree_roots( @@ -25,16 +30,13 @@ CREATE TABLE worktree_roots( FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE PRIMARY KEY(worktree_root, workspace_id) ) STRICT; + +COMMIT; "; #[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] pub struct WorkspaceId(i64); -struct WorkspaceRow { - pub center_group_id: PaneGroupId, - pub dock_pane_id: PaneId, -} - #[derive(Default, Debug)] pub struct SerializedWorkspace { pub workspace_id: WorkspaceId, @@ -72,7 +74,7 @@ impl Db { } } - fn make_new_workspace(&self) -> SerializedWorkspace { + pub fn make_new_workspace(&self) -> SerializedWorkspace { self.real() .map(|db| { let lock = db.connection.lock(); @@ -140,6 +142,8 @@ impl Db { for root in worktree_roots { let path = root.as_ref().as_os_str().as_bytes(); + // If you need to debug this, here's the string parsing: + // let path = root.as_ref().to_string_lossy().to_string(); tx.execute( "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", @@ -162,6 +166,7 @@ impl Db { match logic(&mut lock, worktree_roots, workspace_id) { Ok(_) => {} Err(err) => { + dbg!(&err); log::error!( "Failed to update the worktree roots for {:?}, roots: {:?}, error: {}", workspace_id, @@ -222,6 +227,9 @@ impl Db { .query_map([workspace_id.0], |row| { let row = row.get::<_, Vec>(0)?; Ok(PathBuf::from(OsStr::from_bytes(&row)).into()) + // If you need to debug this, here's the string parsing: + // let row = row.get::<_, String>(0)?; + // Ok(PathBuf::from(row).into()) })? .collect::, rusqlite::Error>>()?; result.push((workspace_id, roots)) @@ -260,6 +268,7 @@ where where P: AsRef + Debug, { + // Short circuit if we can if worktree_roots.len() == 0 { return Ok(None); } @@ -297,7 +306,7 @@ where // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: // - We start with a join of this table on itself, generating every possible // pair of ((path, ID), (path, ID)), and filtering the join down to just the - // *overlapping but incorrect* workspace IDs. For this small data set, + // *overlapping but non-matching* workspace IDs. For this small data set, // this would look like: // // wt1.ID wt1.PATH | wt2.ID wt2.PATH @@ -349,6 +358,8 @@ where for i in 0..worktree_roots.len() { let path = &worktree_roots[i].as_ref().as_os_str().as_bytes(); + // If you need to debug this, here's the string parsing: + // let path = &worktree_roots[i].as_ref().to_string_lossy().to_string() stmt.raw_bind_parameter(i + 1, path)? } // No -1, because SQLite is 1 based @@ -402,22 +413,26 @@ mod tests { assert_eq!(None, db.workspace_id::(&[])); - db.make_new_workspace(); + db.make_new_workspace(); //ID 1 + db.make_new_workspace(); //ID 2 db.update_worktree_roots(&WorkspaceId(1), &["/tmp", "/tmp2"]); // Sanity check - assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp", "/tmp2"])); + assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1))); db.update_worktree_roots::(&WorkspaceId(1), &[]); - // Make sure DB doesn't consider 'no worktrees' to be a query it can answer - assert_eq!(None, db.workspace_id::(&[])); + // Make sure 'no worktrees' fails correctly. returning [1, 2] from this + // call would be semantically correct (as those are the workspaces that + // don't have roots) but I'd prefer that this API to either return exactly one + // workspace, and None otherwise + assert_eq!(db.workspace_id::(&[]), None,); - assert_eq!(Some(WorkspaceId(1)), db.last_workspace_id()); + assert_eq!(db.last_workspace_id(), Some(WorkspaceId(1))); assert_eq!( - &(WorkspaceId(1), vec![]), - db.recent_workspaces(1).get(0).unwrap() + db.recent_workspaces(2), + vec![(WorkspaceId(1), vec![]), (WorkspaceId(2), vec![]),], ) } From b9cbd4084e15b7ab2234323a4ce6659359514bbd Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 27 Oct 2022 13:58:54 -0700 Subject: [PATCH 18/86] WIP: fixing up behavior of workspace initialization --- crates/db/examples/serialize-pane.rs | 16 +- crates/db/examples/serialize_workspace.rs | 28 +- crates/db/src/db.rs | 8 +- crates/db/src/pane.rs | 39 +- crates/db/src/workspace.rs | 450 ++++++++++++---------- 5 files changed, 318 insertions(+), 223 deletions(-) diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index 289f70967ccbe4b84d58840327565e84f92bdcd8..9cf32dfd5792004329a2a65405b3c336c55edccb 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -1,5 +1,8 @@ use std::{fs::File, path::Path, thread::sleep, time::Duration}; +use db::pane::SerializedDockPane; +use settings::DockAnchor; + const TEST_FILE: &'static str = "test-db.db"; fn main() -> anyhow::Result<()> { @@ -12,12 +15,17 @@ fn main() -> anyhow::Result<()> { let f = File::create(file)?; drop(f); - let workspace = db.make_new_workspace(); + let workspace = db.make_new_workspace::(&[]); + + db.update_worktrees(&workspace.workspace_id, &["/tmp"]); - db.update_worktree_roots(&workspace.workspace_id, &["/tmp"]); + db.save_dock_pane(SerializedDockPane { + workspace: workspace.workspace_id, + anchor_position: DockAnchor::Expanded, + shown: true, + }); - db.save_pane_splits(center_pane_group); - db.save_dock_pane(); + let new_workspace = db.workspace_for_roots(&["/tmp"]); db.write_to(file).ok(); diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index 9e1b61387e31041ab540347567b37de04b339c69..97d50bbe5bc240e97b349b92e454d7a0f225f8a0 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -15,29 +15,29 @@ fn main() -> anyhow::Result<()> { db.write_kvp("test", "1")?; db.write_kvp("test-2", "2")?; - let workspace_1 = db.make_new_workspace(); - let workspace_2 = db.make_new_workspace(); - let workspace_3 = db.make_new_workspace(); - let workspace_4 = db.make_new_workspace(); - let workspace_5 = db.make_new_workspace(); - let workspace_6 = db.make_new_workspace(); - let workspace_7 = db.make_new_workspace(); + let workspace_1 = db.make_new_workspace::(&[]); + let workspace_2 = db.make_new_workspace::(&[]); + let workspace_3 = db.make_new_workspace::(&[]); + let workspace_4 = db.make_new_workspace::(&[]); + let workspace_5 = db.make_new_workspace::(&[]); + let workspace_6 = db.make_new_workspace::(&[]); + let workspace_7 = db.make_new_workspace::(&[]); // Order scrambled + sleeps added because sqlite only has 1 second resolution on // their timestamps - db.update_worktree_roots(&workspace_7.workspace_id, &["/tmp2"]); + db.update_worktrees(&workspace_7.workspace_id, &["/tmp2"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_1.workspace_id, &["/tmp1"]); + db.update_worktrees(&workspace_1.workspace_id, &["/tmp1"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]); + db.update_worktrees(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]); + db.update_worktrees(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]); + db.update_worktrees(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]); + db.update_worktrees(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]); sleep(Duration::from_secs(1)); - db.update_worktree_roots(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]); + db.update_worktrees(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]); db.write_to(file).ok(); diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 320b131ea637309b6cd80fd0f2a752563d89629d..107bbffdf439491ef4bb61f814be71f83311b6bd 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,8 +1,8 @@ -mod items; -mod kvp; +pub mod items; +pub mod kvp; mod migrations; -mod pane; -mod workspace; +pub mod pane; +pub mod workspace; use std::fs; use std::path::{Path, PathBuf}; diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index e4d6694319baeea2813859fcb622f989698e6a38..37f91c58a37b897a1d14c1bab44cb17322c1b1d0 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -136,8 +136,11 @@ CREATE TABLE dock_items( COMMIT; "; -struct SerializedDockPane { - //Cols +#[derive(Default, Debug)] +pub struct SerializedDockPane { + pub workspace: WorkspaceId, + pub anchor_position: DockAnchor, + pub shown: bool, } impl Db { @@ -194,7 +197,35 @@ impl Db { unimplemented!(); } - fn save_dock_pane() {} + pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { + unimplemented!() + } + + pub fn save_dock_pane(&self, dock_pane: SerializedDockPane) {} } -mod tests {} +#[cfg(test)] +mod tests { + use settings::DockAnchor; + + use crate::Db; + + use super::SerializedDockPane; + + #[test] + fn test_basic_dock_pane() { + let db = Db::open_in_memory(); + + let workspace = db.make_new_workspace::(&[]); + + db.update_worktrees(&workspace.workspace_id, &["/tmp"]); + + db.save_dock_pane(SerializedDockPane { + workspace: workspace.workspace_id, + anchor_position: DockAnchor::Expanded, + shown: true, + }); + + let new_workspace = db.workspace_for_roots(&["/tmp"]); + } +} diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index cd1d22f50ba2c1d9a410d068bbca674d3fc21dac..0d8dae59ef9edbfc6b26c95eedb2bec596f02453 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -9,7 +9,7 @@ use std::{ sync::Arc, }; -use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup}; +use crate::pane::SerializedDockPane; use super::Db; @@ -41,16 +41,16 @@ pub struct WorkspaceId(i64); pub struct SerializedWorkspace { pub workspace_id: WorkspaceId, // pub center_group: SerializedPaneGroup, - // pub dock_pane: Option, + pub dock_pane: Option, } impl Db { /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, return the /// the last workspace id - pub fn workspace_for_worktree_roots( - &self, - worktree_roots: &[Arc], - ) -> SerializedWorkspace { + pub fn workspace_for_roots

(&self, worktree_roots: &[P]) -> SerializedWorkspace + where + P: AsRef + Debug, + { // Find the workspace id which is uniquely identified by this set of paths // return it if found let mut workspace_id = self.workspace_id(worktree_roots); @@ -59,31 +59,50 @@ impl Db { } if let Some(workspace_id) = workspace_id { - // TODO - // let workspace_row = self.get_workspace_row(workspace_id); - // let center_group = self.get_pane_group(workspace_row.center_group_id); - // let dock_pane = self.get_pane(workspace_row.dock_pane_id); - SerializedWorkspace { workspace_id, - // center_group, - // dock_pane: Some(dock_pane), + dock_pane: self.get_dock_pane(workspace_id), } } else { - self.make_new_workspace() + self.make_new_workspace(worktree_roots) } } - pub fn make_new_workspace(&self) -> SerializedWorkspace { + pub fn make_new_workspace

(&self, worktree_roots: &[P]) -> SerializedWorkspace + where + P: AsRef + Debug, + { + fn logic

( + connection: &mut Connection, + worktree_roots: &[P], + ) -> Result + where + P: AsRef + Debug, + { + let tx = connection.transaction()?; + tx.execute("INSERT INTO workspaces DEFAULT VALUES", [])?; + + let id = WorkspaceId(tx.last_insert_rowid()); + + update_worktree_roots(&tx, &id, worktree_roots)?; + + Ok(SerializedWorkspace { + workspace_id: id, + dock_pane: None, + }) + } + self.real() .map(|db| { - let lock = db.connection.lock(); + let mut lock = db.connection.lock(); + // No need to waste the memory caching this, should happen rarely. - match lock.execute("INSERT INTO workspaces DEFAULT VALUES", []) { - Ok(_) => SerializedWorkspace { - workspace_id: WorkspaceId(lock.last_insert_rowid()), - }, - Err(_) => Default::default(), + match logic(&mut lock, worktree_roots) { + Ok(serialized_workspace) => serialized_workspace, + Err(err) => { + log::error!("Failed to insert new workspace into DB: {}", err); + Default::default() + } } }) .unwrap_or_default() @@ -97,7 +116,13 @@ impl Db { .map(|db| { let lock = db.connection.lock(); - get_workspace_id(worktree_roots, &lock) + match get_workspace_id(worktree_roots, &lock) { + Ok(workspace_id) => workspace_id, + Err(err) => { + log::error!("Failed ot get workspace_id: {}", err); + None + } + } }) .unwrap_or(None) } @@ -109,61 +134,16 @@ impl Db { /// Updates the open paths for the given workspace id. Will garbage collect items from /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps /// in the workspace id table - pub fn update_worktree_roots

(&self, workspace_id: &WorkspaceId, worktree_roots: &[P]) + pub fn update_worktrees

(&self, workspace_id: &WorkspaceId, worktree_roots: &[P]) where P: AsRef + Debug, { - fn logic

( - connection: &mut Connection, - worktree_roots: &[P], - workspace_id: &WorkspaceId, - ) -> Result<()> - where - P: AsRef + Debug, - { - let tx = connection.transaction()?; - { - // Lookup any old WorkspaceIds which have the same set of roots, and delete them. - let preexisting_id = get_workspace_id(worktree_roots, &tx); - if let Some(preexisting_id) = preexisting_id { - if preexisting_id != *workspace_id { - // Should also delete fields in other tables with cascading updates - tx.execute( - "DELETE FROM workspaces WHERE workspace_id = ?", - [preexisting_id.0], - )?; - } - } - - tx.execute( - "DELETE FROM worktree_roots WHERE workspace_id = ?", - [workspace_id.0], - )?; - - for root in worktree_roots { - let path = root.as_ref().as_os_str().as_bytes(); - // If you need to debug this, here's the string parsing: - // let path = root.as_ref().to_string_lossy().to_string(); - - tx.execute( - "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", - params![workspace_id.0, path], - )?; - } - - tx.execute( - "UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?", - [workspace_id.0], - )?; - } - tx.commit()?; - Ok(()) - } - self.real().map(|db| { let mut lock = db.connection.lock(); - match logic(&mut lock, worktree_roots, workspace_id) { + let tx = lock.transaction(); + + match tx.map(|tx| update_worktree_roots(&tx, workspace_id, worktree_roots)) { Ok(_) => {} Err(err) => { dbg!(&err); @@ -257,86 +237,130 @@ impl Db { } } -fn get_workspace_id

(worktree_roots: &[P], connection: &Connection) -> Option +fn update_worktree_roots

( + connection: &Connection, + workspace_id: &WorkspaceId, + worktree_roots: &[P], +) -> Result<()> where P: AsRef + Debug, { - fn logic

( - worktree_roots: &[P], - connection: &Connection, - ) -> Result, anyhow::Error> - where - P: AsRef + Debug, - { - // Short circuit if we can - if worktree_roots.len() == 0 { - return Ok(None); + // Lookup any old WorkspaceIds which have the same set of roots, and delete them. + let preexisting_id = get_workspace_id(worktree_roots, &connection)?; + if let Some(preexisting_id) = preexisting_id { + if preexisting_id != *workspace_id { + // Should also delete fields in other tables with cascading updates + connection.execute( + "DELETE FROM workspaces WHERE workspace_id = ?", + [preexisting_id.0], + )?; } + } - // Prepare the array binding string. SQL doesn't have syntax for this, so - // we have to do it ourselves. - let mut array_binding_stmt = "(".to_string(); - for i in 0..worktree_roots.len() { - // This uses ?NNN for numbered placeholder syntax - array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based - if i < worktree_roots.len() - 1 { - array_binding_stmt.push(','); - array_binding_stmt.push(' '); - } + connection.execute( + "DELETE FROM worktree_roots WHERE workspace_id = ?", + [workspace_id.0], + )?; + + for root in worktree_roots { + let path = root.as_ref().as_os_str().as_bytes(); + // If you need to debug this, here's the string parsing: + // let path = root.as_ref().to_string_lossy().to_string(); + + connection.execute( + "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", + params![workspace_id.0, path], + )?; + } + + connection.execute( + "UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?", + [workspace_id.0], + )?; + + Ok(()) +} + +fn get_workspace_id

(worktree_roots: &[P], connection: &Connection) -> Result> +where + P: AsRef + Debug, +{ + // fn logic

( + // worktree_roots: &[P], + // connection: &Connection, + // ) -> Result, anyhow::Error> + // where + // P: AsRef + Debug, + // { + // Short circuit if we can + if worktree_roots.len() == 0 { + return Ok(None); + } + + // Prepare the array binding string. SQL doesn't have syntax for this, so + // we have to do it ourselves. + let mut array_binding_stmt = "(".to_string(); + for i in 0..worktree_roots.len() { + // This uses ?NNN for numbered placeholder syntax + array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based + if i < worktree_roots.len() - 1 { + array_binding_stmt.push(','); + array_binding_stmt.push(' '); } - array_binding_stmt.push(')'); - // Any workspace can have multiple independent paths, and these paths - // can overlap in the database. Take this test data for example: - // - // [/tmp, /tmp2] -> 1 - // [/tmp] -> 2 - // [/tmp2, /tmp3] -> 3 - // - // This would be stred in the database like so: - // - // ID PATH - // 1 /tmp - // 1 /tmp2 - // 2 /tmp - // 3 /tmp2 - // 3 /tmp3 - // - // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. - // So, given an array of worktree roots, how can we find the exactly matching ID? - // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: - // - We start with a join of this table on itself, generating every possible - // pair of ((path, ID), (path, ID)), and filtering the join down to just the - // *overlapping but non-matching* workspace IDs. For this small data set, - // this would look like: - // - // wt1.ID wt1.PATH | wt2.ID wt2.PATH - // 3 /tmp3 3 /tmp2 - // - // - Moving one SELECT out, we use the first pair's ID column to invert the selection, - // meaning we now have a list of all the entries for our array, minus overlapping sets, - // but including *subsets* of our worktree roots: - // - // ID PATH - // 1 /tmp - // 1 /tmp2 - // 2 /tmp - // - // - To trim out the subsets, we can to exploit the PRIMARY KEY constraint that there are no - // duplicate entries in this table. Using a GROUP BY and a COUNT we can find the subsets of - // our keys: - // - // ID num_matching - // 1 2 - // 2 1 - // - // - And with one final WHERE num_matching = $num_of_worktree_roots, we're done! We've found the - // matching ID correctly :D - // - // Note: due to limitations in SQLite's query binding, we have to generate the prepared - // statement with string substitution (the {array_bind}) below, and then bind the - // parameters by number. - let query = format!( - r#" + } + array_binding_stmt.push(')'); + // Any workspace can have multiple independent paths, and these paths + // can overlap in the database. Take this test data for example: + // + // [/tmp, /tmp2] -> 1 + // [/tmp] -> 2 + // [/tmp2, /tmp3] -> 3 + // + // This would be stred in the database like so: + // + // ID PATH + // 1 /tmp + // 1 /tmp2 + // 2 /tmp + // 3 /tmp2 + // 3 /tmp3 + // + // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. + // So, given an array of worktree roots, how can we find the exactly matching ID? + // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: + // - We start with a join of this table on itself, generating every possible + // pair of ((path, ID), (path, ID)), and filtering the join down to just the + // *overlapping but non-matching* workspace IDs. For this small data set, + // this would look like: + // + // wt1.ID wt1.PATH | wt2.ID wt2.PATH + // 3 /tmp3 3 /tmp2 + // + // - Moving one SELECT out, we use the first pair's ID column to invert the selection, + // meaning we now have a list of all the entries for our array, minus overlapping sets, + // but including *subsets* of our worktree roots: + // + // ID PATH + // 1 /tmp + // 1 /tmp2 + // 2 /tmp + // + // - To trim out the subsets, we can to exploit the PRIMARY KEY constraint that there are no + // duplicate entries in this table. Using a GROUP BY and a COUNT we can find the subsets of + // our keys: + // + // ID num_matching + // 1 2 + // 2 1 + // + // - And with one final WHERE num_matching = $num_of_worktree_roots, we're done! We've found the + // matching ID correctly :D + // + // Note: due to limitations in SQLite's query binding, we have to generate the prepared + // statement with string substitution (the {array_bind}) below, and then bind the + // parameters by number. + let query = format!( + r#" SELECT workspace_id FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots WHERE worktree_root in {array_bind} AND workspace_id NOT IN @@ -347,50 +371,50 @@ where GROUP BY workspace_id) WHERE num_matching = ? "#, - array_bind = array_binding_stmt - ); - - // This will only be called on start up and when root workspaces change, no need to waste memory - // caching it. - let mut stmt = connection.prepare(&query)?; - // Make sure we bound the parameters correctly - debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); - - for i in 0..worktree_roots.len() { - let path = &worktree_roots[i].as_ref().as_os_str().as_bytes(); - // If you need to debug this, here's the string parsing: - // let path = &worktree_roots[i].as_ref().to_string_lossy().to_string() - stmt.raw_bind_parameter(i + 1, path)? - } - // No -1, because SQLite is 1 based - stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; - - let mut rows = stmt.raw_query(); - let row = rows.next(); - let result = if let Ok(Some(row)) = row { - Ok(Some(WorkspaceId(row.get(0)?))) - } else { - Ok(None) - }; + array_bind = array_binding_stmt + ); + + // This will only be called on start up and when root workspaces change, no need to waste memory + // caching it. + let mut stmt = connection.prepare(&query)?; + // Make sure we bound the parameters correctly + debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); + + for i in 0..worktree_roots.len() { + let path = &worktree_roots[i].as_ref().as_os_str().as_bytes(); + // If you need to debug this, here's the string parsing: + // let path = &worktree_roots[i].as_ref().to_string_lossy().to_string() + stmt.raw_bind_parameter(i + 1, path)? + } + // No -1, because SQLite is 1 based + stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; + + let mut rows = stmt.raw_query(); + let row = rows.next(); + let result = if let Ok(Some(row)) = row { + Ok(Some(WorkspaceId(row.get(0)?))) + } else { + Ok(None) + }; - // Ensure that this query only returns one row. The PRIMARY KEY constraint should catch this case - // but this is here to catch if someone refactors that constraint out. - debug_assert!(matches!(rows.next(), Ok(None))); + // Ensure that this query only returns one row. The PRIMARY KEY constraint should catch this case + // but this is here to catch if someone refactors that constraint out. + debug_assert!(matches!(rows.next(), Ok(None))); - result - } + result + // } - match logic(worktree_roots, connection) { - Ok(result) => result, - Err(err) => { - log::error!( - "Failed to get the workspace ID for paths {:?}, err: {}", - worktree_roots, - err - ); - None - } - } + // match logic(worktree_roots, connection) { + // Ok(result) => result, + // Err(err) => { + // log::error!( + // "Failed to get the workspace ID for paths {:?}, err: {}", + // worktree_roots, + // err + // ); + // None + // } + // } } #[cfg(test)] @@ -407,20 +431,52 @@ mod tests { use super::WorkspaceId; + #[test] + fn test_worktree_for_roots() { + let db = Db::open_in_memory(); + + // Test creation in 0 case + let workspace_1 = db.workspace_for_roots::(&[]); + assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); + + // Test pulling from recent workspaces + let workspace_1 = db.workspace_for_roots::(&[]); + assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); + + sleep(Duration::from_secs(1)); + db.make_new_workspace::(&[]); + + // Test pulling another value from recent workspaces + let workspace_2 = db.workspace_for_roots::(&[]); + assert_eq!(workspace_2.workspace_id, WorkspaceId(2)); + + // Test creating a new workspace that doesn't exist already + let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); + assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); + + // Make sure it's in the recent workspaces.... + let workspace_3 = db.workspace_for_roots::(&[]); + assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); + + // And that it can be pulled out again + let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); + assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); + } + #[test] fn test_empty_worktrees() { let db = Db::open_in_memory(); assert_eq!(None, db.workspace_id::(&[])); - db.make_new_workspace(); //ID 1 - db.make_new_workspace(); //ID 2 - db.update_worktree_roots(&WorkspaceId(1), &["/tmp", "/tmp2"]); + db.make_new_workspace::(&[]); //ID 1 + db.make_new_workspace::(&[]); //ID 2 + db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); // Sanity check assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1))); - db.update_worktree_roots::(&WorkspaceId(1), &[]); + db.update_worktrees::(&WorkspaceId(1), &[]); // Make sure 'no worktrees' fails correctly. returning [1, 2] from this // call would be semantically correct (as those are the workspaces that @@ -451,8 +507,8 @@ mod tests { let db = Db::open_in_memory(); for (workspace_id, entries) in data { - db.make_new_workspace(); - db.update_worktree_roots(workspace_id, entries); + db.make_new_workspace::(&[]); + db.update_worktrees(workspace_id, entries); } assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp1"])); @@ -485,8 +541,8 @@ mod tests { let db = Db::open_in_memory(); for (workspace_id, entries) in data { - db.make_new_workspace(); - db.update_worktree_roots(workspace_id, entries); + db.make_new_workspace::(&[]); + db.update_worktrees(workspace_id, entries); } assert_eq!(db.workspace_id(&["/tmp2"]), None); @@ -527,15 +583,15 @@ mod tests { // Load in the test data for (workspace_id, entries) in data { - db.make_new_workspace(); - db.update_worktree_roots(workspace_id, entries); + db.make_new_workspace::(&[]); + db.update_worktrees(workspace_id, entries); } // Make sure the timestamp updates sleep(Duration::from_secs(1)); // Execute the update - db.update_worktree_roots(&WorkspaceId(2), &["/tmp2", "/tmp3"]); + db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); // Make sure that workspace 3 doesn't exist assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), Some(WorkspaceId(2))); From 3451a3c7fe40234fec3db826993961f19ab1f816 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 27 Oct 2022 15:52:38 -0700 Subject: [PATCH 19/86] Rebase - Got Zed compiling and fixed a build error due to conflicting dependencies that cargo didn't catch :( Co-Authored-By: kay@zed.dev --- Cargo.lock | 2 +- Cargo.toml | 1 + crates/auto_update/Cargo.toml | 1 + crates/auto_update/src/auto_update.rs | 14 +- crates/collab/src/integration_tests.rs | 37 ++- crates/collab_ui/src/collab_ui.rs | 7 +- crates/command_palette/src/command_palette.rs | 5 +- crates/db/Cargo.toml | 1 - crates/db/examples/serialize-pane.rs | 7 +- crates/db/src/items.rs | 216 +++++++++--------- crates/db/src/pane.rs | 32 +-- crates/diagnostics/src/diagnostics.rs | 10 +- .../src/test/editor_lsp_test_context.rs | 10 +- crates/file_finder/src/file_finder.rs | 30 ++- crates/project_panel/src/project_panel.rs | 20 +- .../src/tests/terminal_test_context.rs | 11 +- crates/vim/src/test/vim_test_context.rs | 10 +- crates/workspace/src/dock.rs | 5 +- crates/workspace/src/pane.rs | 15 +- crates/workspace/src/workspace.rs | 172 +++++++++----- crates/zed/src/main.rs | 13 +- crates/zed/src/zed.rs | 178 +++++++++------ 22 files changed, 486 insertions(+), 311 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0da4d177101cce83fd7dde9aed4439c278550b68..2cc8063ca48188d52ffa352ec1b96a6060b882b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -428,6 +428,7 @@ version = "0.1.0" dependencies = [ "anyhow", "client", + "db", "gpui", "isahc", "lazy_static", @@ -1560,7 +1561,6 @@ dependencies = [ "rusqlite_migration", "serde", "serde_rusqlite", - "settings", "tempdir", ] diff --git a/Cargo.toml b/Cargo.toml index 8e9814c4481c0472033d8818776c5edba946cf6b..a97f272e47ebd329b674544d0a27d639c94339de 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,3 +81,4 @@ split-debuginfo = "unpacked" [profile.release] debug = true + diff --git a/crates/auto_update/Cargo.toml b/crates/auto_update/Cargo.toml index 944aa87ee55d3e25ced4246a92ee1c6548262672..b1ca061614b5ddb400a69b32a90e3faecec706c0 100644 --- a/crates/auto_update/Cargo.toml +++ b/crates/auto_update/Cargo.toml @@ -8,6 +8,7 @@ path = "src/auto_update.rs" doctest = false [dependencies] +db = { path = "../db" } client = { path = "../client" } gpui = { path = "../gpui" } menu = { path = "../menu" } diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index bda45053b1330377f4ac7bec66d80244a344774e..1baf609268abd5c41905d52f7e3ea22723d453e9 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -1,7 +1,8 @@ mod update_notification; use anyhow::{anyhow, Context, Result}; -use client::{http::HttpClient, ZED_SECRET_CLIENT_TOKEN, ZED_SERVER_URL}; +use client::{http::HttpClient, ZED_SECRET_CLIENT_TOKEN}; +use db::Db; use gpui::{ actions, platform::AppVersion, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, WeakViewHandle, @@ -55,11 +56,16 @@ impl Entity for AutoUpdater { type Event = (); } -pub fn init(db: project::Db, http_client: Arc, cx: &mut MutableAppContext) { +pub fn init( + db: Db, + http_client: Arc, + server_url: String, + cx: &mut MutableAppContext, +) { if let Some(version) = (*ZED_APP_VERSION).or_else(|| cx.platform().app_version().ok()) { - let server_url = ZED_SERVER_URL.to_string(); + let server_url = server_url; let auto_updater = cx.add_model(|cx| { - let updater = AutoUpdater::new(version, db.clone(), http_client, server_url.clone()); + let updater = AutoUpdater::new(version, db, http_client, server_url.clone()); updater.start_polling(cx).detach(); updater }); diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 3e0b2171a87d19a9f55ffb8c9c4bfc00fb63af02..5de28f1c65480ab495647769408ffe0ff611ad32 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -905,8 +905,14 @@ async fn test_host_disconnect( let project_b = client_b.build_remote_project(project_id, cx_b).await; assert!(worktree_a.read_with(cx_a, |tree, _| tree.as_local().unwrap().is_shared())); - let (_, workspace_b) = - cx_b.add_window(|cx| Workspace::new(project_b.clone(), |_, _| unimplemented!(), cx)); + let (_, workspace_b) = cx_b.add_window(|cx| { + Workspace::new( + Default::default(), + project_b.clone(), + |_, _| unimplemented!(), + cx, + ) + }); let editor_b = workspace_b .update(cx_b, |workspace, cx| { workspace.open_path((worktree_id, "b.txt"), None, true, cx) @@ -3701,8 +3707,14 @@ async fn test_collaborating_with_code_actions( // Join the project as client B. let project_b = client_b.build_remote_project(project_id, cx_b).await; - let (_window_b, workspace_b) = - cx_b.add_window(|cx| Workspace::new(project_b.clone(), |_, _| unimplemented!(), cx)); + let (_window_b, workspace_b) = cx_b.add_window(|cx| { + Workspace::new( + Default::default(), + project_b.clone(), + |_, _| unimplemented!(), + cx, + ) + }); let editor_b = workspace_b .update(cx_b, |workspace, cx| { workspace.open_path((worktree_id, "main.rs"), None, true, cx) @@ -3922,8 +3934,14 @@ async fn test_collaborating_with_renames(cx_a: &mut TestAppContext, cx_b: &mut T .unwrap(); let project_b = client_b.build_remote_project(project_id, cx_b).await; - let (_window_b, workspace_b) = - cx_b.add_window(|cx| Workspace::new(project_b.clone(), |_, _| unimplemented!(), cx)); + let (_window_b, workspace_b) = cx_b.add_window(|cx| { + Workspace::new( + Default::default(), + project_b.clone(), + |_, _| unimplemented!(), + cx, + ) + }); let editor_b = workspace_b .update(cx_b, |workspace, cx| { workspace.open_path((worktree_id, "one.rs"), None, true, cx) @@ -6054,7 +6072,12 @@ impl TestClient { ) -> ViewHandle { let (_, root_view) = cx.add_window(|_| EmptyView); cx.add_view(&root_view, |cx| { - Workspace::new(project.clone(), |_, _| unimplemented!(), cx) + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) }) } diff --git a/crates/collab_ui/src/collab_ui.rs b/crates/collab_ui/src/collab_ui.rs index f5f508ce5b167059cf8c3fbaebcb0e1d5e80b996..3a20a2fc6930c9362fc1972889c32134240a3b55 100644 --- a/crates/collab_ui/src/collab_ui.rs +++ b/crates/collab_ui/src/collab_ui.rs @@ -51,7 +51,12 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { .await?; let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { - let mut workspace = Workspace::new(project, app_state.default_item_factory, cx); + let mut workspace = Workspace::new( + Default::default(), + project, + app_state.default_item_factory, + cx, + ); (app_state.initialize_workspace)(&mut workspace, &app_state, cx); workspace }); diff --git a/crates/command_palette/src/command_palette.rs b/crates/command_palette/src/command_palette.rs index b472da3bb530b8189e5d9b3c2b3852541273c4db..5af23b45d720ecfad4ed9faa3dd777d1238f2022 100644 --- a/crates/command_palette/src/command_palette.rs +++ b/crates/command_palette/src/command_palette.rs @@ -350,8 +350,9 @@ mod tests { }); let project = Project::test(app_state.fs.clone(), [], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let editor = cx.add_view(&workspace, |cx| { let mut editor = Editor::single_line(None, cx); editor.set_text("abc", cx); diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 64e86e0345dcbafcbc90d91ba828de7d84c953bf..9fad1aa39a6e85f9dee323fb927d1a65a9626c6a 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -13,7 +13,6 @@ test-support = [] [dependencies] collections = { path = "../collections" } gpui = { path = "../gpui" } -settings = { path = "../settings" } anyhow = "1.0.57" async-trait = "0.1" lazy_static = "1.4.0" diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index 9cf32dfd5792004329a2a65405b3c336c55edccb..fc420b866d6f1f7f008347369222ee0e8b09a182 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -1,7 +1,6 @@ -use std::{fs::File, path::Path, thread::sleep, time::Duration}; +use std::{fs::File, path::Path}; -use db::pane::SerializedDockPane; -use settings::DockAnchor; +use db::pane::{DockAnchor, SerializedDockPane}; const TEST_FILE: &'static str = "test-db.db"; @@ -25,7 +24,7 @@ fn main() -> anyhow::Result<()> { shown: true, }); - let new_workspace = db.workspace_for_roots(&["/tmp"]); + let _new_workspace = db.workspace_for_roots(&["/tmp"]); db.write_to(file).ok(); diff --git a/crates/db/src/items.rs b/crates/db/src/items.rs index 7bd4c27f432e399d0ac2bd1cdc55ff122c07410c..a6497903acc7db85922675262c0d8adee464180b 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/items.rs @@ -1,69 +1,69 @@ -use std::{ - ffi::OsStr, - fmt::Display, - hash::Hash, - os::unix::prelude::OsStrExt, - path::{Path, PathBuf}, - sync::Arc, -}; - -use anyhow::Result; -use collections::HashSet; -use rusqlite::{named_params, params, types::FromSql}; - -use crate::workspace::WorkspaceId; - -use super::Db; - -/// Current design makes the cut at the item level, -/// - Maybe A little more bottom up, serialize 'Terminals' and 'Editors' directly, and then make a seperate -/// - items table, with a kind, and an integer that acts as a key to one of these other tables -/// This column is a foreign key to ONE OF: editors, terminals, searches -/// - - -// (workspace_id, item_id) -// kind -> ::Editor:: - -// -> -// At the workspace level -// -> (Workspace_ID, item_id) -// -> One shot, big query, load everything up: - -// -> SerializedWorkspace::deserialize(tx, itemKey) -// -> SerializedEditor::deserialize(tx, itemKey) - -// -> -// -> Workspace::new(SerializedWorkspace) -// -> Editor::new(serialized_workspace[???]serializedEditor) - -// //Pros: Keeps sql out of every body elese, makes changing it easier (e.g. for loading from a network or RocksDB) -// //Cons: DB has to know the internals of the entire rest of the app - -// Workspace -// Worktree roots -// Pane groups -// Dock -// Items -// Sidebars - -// Things I'm doing: finding about nullability for foreign keys -pub(crate) const ITEMS_M_1: &str = " -CREATE TABLE project_searches( - workspace_id INTEGER, - item_id INTEGER, - query TEXT, - PRIMARY KEY (workspace_id, item_id) - FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) -) STRICT; - -CREATE TABLE editors( - workspace_id INTEGER, - item_id INTEGER, - path BLOB NOT NULL, - PRIMARY KEY (workspace_id, item_id) - FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) -) STRICT; -"; +// use std::{ +// ffi::OsStr, +// fmt::Display, +// hash::Hash, +// os::unix::prelude::OsStrExt, +// path::{Path, PathBuf}, +// sync::Arc, +// }; + +// use anyhow::Result; +// use collections::HashSet; +// use rusqlite::{named_params, params, types::FromSql}; + +// use crate::workspace::WorkspaceId; + +// use super::Db; + +// /// Current design makes the cut at the item level, +// /// - Maybe A little more bottom up, serialize 'Terminals' and 'Editors' directly, and then make a seperate +// /// - items table, with a kind, and an integer that acts as a key to one of these other tables +// /// This column is a foreign key to ONE OF: editors, terminals, searches +// /// - + +// // (workspace_id, item_id) +// // kind -> ::Editor:: + +// // -> +// // At the workspace level +// // -> (Workspace_ID, item_id) +// // -> One shot, big query, load everything up: + +// // -> SerializedWorkspace::deserialize(tx, itemKey) +// // -> SerializedEditor::deserialize(tx, itemKey) + +// // -> +// // -> Workspace::new(SerializedWorkspace) +// // -> Editor::new(serialized_workspace[???]serializedEditor) + +// // //Pros: Keeps sql out of every body elese, makes changing it easier (e.g. for loading from a network or RocksDB) +// // //Cons: DB has to know the internals of the entire rest of the app + +// // Workspace +// // Worktree roots +// // Pane groups +// // Dock +// // Items +// // Sidebars + +// // Things I'm doing: finding about nullability for foreign keys +// pub(crate) const ITEMS_M_1: &str = " +// CREATE TABLE project_searches( +// workspace_id INTEGER, +// item_id INTEGER, +// query TEXT, +// PRIMARY KEY (workspace_id, item_id) +// FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) +// ) STRICT; + +// CREATE TABLE editors( +// workspace_id INTEGER, +// item_id INTEGER, +// path BLOB NOT NULL, +// PRIMARY KEY (workspace_id, item_id) +// FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) +// ) STRICT; +// "; #[derive(Debug, PartialEq, Eq)] pub struct ItemId { @@ -71,45 +71,45 @@ pub struct ItemId { item_id: usize, } -enum SerializedItemKind { - Editor, - Diagnostics, - ProjectSearch, - Terminal, -} - -struct SerializedItemRow { - kind: SerializedItemKind, - item_id: usize, - path: Option>, - query: Option, -} - -#[derive(Debug, PartialEq, Eq)] -pub enum SerializedItem { - Editor { item_id: usize, path: Arc }, - Diagnostics { item_id: usize }, - ProjectSearch { item_id: usize, query: String }, - Terminal { item_id: usize }, -} - -impl SerializedItem { - pub fn item_id(&self) -> usize { - match self { - SerializedItem::Editor { item_id, .. } => *item_id, - SerializedItem::Diagnostics { item_id } => *item_id, - SerializedItem::ProjectSearch { item_id, .. } => *item_id, - SerializedItem::Terminal { item_id } => *item_id, - } - } -} - -impl Db { - pub fn get_item(&self, item_id: ItemId) -> SerializedItem { - unimplemented!() - } - - pub fn save_item(&self, workspace_id: WorkspaceId, item: &SerializedItem) {} - - pub fn close_item(&self, item_id: ItemId) {} -} +// enum SerializedItemKind { +// Editor, +// Diagnostics, +// ProjectSearch, +// Terminal, +// } + +// struct SerializedItemRow { +// kind: SerializedItemKind, +// item_id: usize, +// path: Option>, +// query: Option, +// } + +// #[derive(Debug, PartialEq, Eq)] +// pub enum SerializedItem { +// Editor { item_id: usize, path: Arc }, +// Diagnostics { item_id: usize }, +// ProjectSearch { item_id: usize, query: String }, +// Terminal { item_id: usize }, +// } + +// impl SerializedItem { +// pub fn item_id(&self) -> usize { +// match self { +// SerializedItem::Editor { item_id, .. } => *item_id, +// SerializedItem::Diagnostics { item_id } => *item_id, +// SerializedItem::ProjectSearch { item_id, .. } => *item_id, +// SerializedItem::Terminal { item_id } => *item_id, +// } +// } +// } + +// impl Db { +// pub fn get_item(&self, item_id: ItemId) -> SerializedItem { +// unimplemented!() +// } + +// pub fn save_item(&self, workspace_id: WorkspaceId, item: &SerializedItem) {} + +// pub fn close_item(&self, item_id: ItemId) {} +// } diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 37f91c58a37b897a1d14c1bab44cb17322c1b1d0..447b5eed879dc30a4510a2ef53855cac9db2c423 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,5 +1,4 @@ use gpui::Axis; -use settings::DockAnchor; use crate::{items::ItemId, workspace::WorkspaceId}; @@ -32,7 +31,7 @@ pub struct PaneGroupId { } impl PaneGroupId { - pub(crate) fn root(workspace_id: WorkspaceId) -> Self { + pub fn root(workspace_id: WorkspaceId) -> Self { Self { workspace_id, group_id: 0, @@ -48,7 +47,7 @@ pub struct SerializedPaneGroup { } impl SerializedPaneGroup { - pub(crate) fn empty_root(workspace_id: WorkspaceId) -> Self { + pub fn empty_root(workspace_id: WorkspaceId) -> Self { Self { group_id: PaneGroupId::root(workspace_id), axis: Default::default(), @@ -136,6 +135,14 @@ CREATE TABLE dock_items( COMMIT; "; +#[derive(Default, Debug)] +pub enum DockAnchor { + #[default] + Bottom, + Right, + Expanded, +} + #[derive(Default, Debug)] pub struct SerializedDockPane { pub workspace: WorkspaceId, @@ -144,7 +151,7 @@ pub struct SerializedDockPane { } impl Db { - pub(crate) fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { + pub fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { let axis = self.get_pane_group_axis(pane_group_id); let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); for child_row in self.get_pane_group_children(pane_group_id) { @@ -177,40 +184,39 @@ impl Db { fn get_pane_group_children( &self, - pane_group_id: PaneGroupId, + _pane_group_id: PaneGroupId, ) -> impl Iterator { Vec::new().into_iter() } - fn get_pane_group_axis(&self, pane_group_id: PaneGroupId) -> Axis { + fn get_pane_group_axis(&self, _pane_group_id: PaneGroupId) -> Axis { unimplemented!(); } - pub fn save_pane_splits(&self, center_pane_group: SerializedPaneGroup) { + pub fn save_pane_splits(&self, _center_pane_group: SerializedPaneGroup) { // Delete the center pane group for this workspace and any of its children // Generate new pane group IDs as we go through // insert them // Items garbage collect themselves when dropped } - pub(crate) fn get_pane(&self, pane_id: PaneId) -> SerializedPane { + pub(crate) fn get_pane(&self, _pane_id: PaneId) -> SerializedPane { unimplemented!(); } - pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { + pub fn get_dock_pane(&self, _workspace: WorkspaceId) -> Option { unimplemented!() } - pub fn save_dock_pane(&self, dock_pane: SerializedDockPane) {} + pub fn save_dock_pane(&self, _dock_pane: SerializedDockPane) {} } #[cfg(test)] mod tests { - use settings::DockAnchor; use crate::Db; - use super::SerializedDockPane; + use super::{DockAnchor, SerializedDockPane}; #[test] fn test_basic_dock_pane() { @@ -226,6 +232,6 @@ mod tests { shown: true, }); - let new_workspace = db.workspace_for_roots(&["/tmp"]); + let _new_workspace = db.workspace_for_roots(&["/tmp"]); } } diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index 6ff74901811eba2f29a2430d63525006a7e6515c..078d83ac6112830db120fee5fd74e03bfb94eb8b 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -781,8 +781,14 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/test".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); // Create some diagnostics project.update(cx, |project, cx| { diff --git a/crates/editor/src/test/editor_lsp_test_context.rs b/crates/editor/src/test/editor_lsp_test_context.rs index 69205e1991a358b51943150693457ad844cba8e0..9cf305ad37540230e5648f68e090be8a03941c14 100644 --- a/crates/editor/src/test/editor_lsp_test_context.rs +++ b/crates/editor/src/test/editor_lsp_test_context.rs @@ -63,8 +63,14 @@ impl<'a> EditorLspTestContext<'a> { .insert_tree("/root", json!({ "dir": { file_name: "" }})) .await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); project .update(cx, |project, cx| { project.find_or_create_local_worktree("/root", true, cx) diff --git a/crates/file_finder/src/file_finder.rs b/crates/file_finder/src/file_finder.rs index c6d4a8f121c6c5dc0916de660a4032efcfd0c175..b0016002fa9f57d6df60a36a95669ed77728c58b 100644 --- a/crates/file_finder/src/file_finder.rs +++ b/crates/file_finder/src/file_finder.rs @@ -316,8 +316,9 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); cx.dispatch_action(window_id, Toggle); let finder = cx.read(|cx| workspace.read(cx).modal::().unwrap()); @@ -371,8 +372,9 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/dir".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); @@ -446,8 +448,9 @@ mod tests { cx, ) .await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); finder @@ -471,8 +474,9 @@ mod tests { cx, ) .await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); @@ -524,8 +528,9 @@ mod tests { cx, ) .await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); @@ -563,8 +568,9 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); finder diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index b6787c930c1cc3bab3cd201db0b1dd6649d5256e..dae1f70aae02344d4437e3c98ec72d3f61858758 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -1393,8 +1393,14 @@ mod tests { .await; let project = Project::test(fs.clone(), ["/root1".as_ref(), "/root2".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); let panel = workspace.update(cx, |_, cx| ProjectPanel::new(project, cx)); assert_eq!( visible_entries_as_strings(&panel, 0..50, cx), @@ -1486,8 +1492,14 @@ mod tests { .await; let project = Project::test(fs.clone(), ["/root1".as_ref(), "/root2".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); let panel = workspace.update(cx, |_, cx| ProjectPanel::new(project, cx)); select_path(&panel, "root1", cx); diff --git a/crates/terminal/src/tests/terminal_test_context.rs b/crates/terminal/src/tests/terminal_test_context.rs index 3e3d1243d5fbed3c3bb407a32684b6d85857deeb..352ce4a0d2707e2e9a89f04339e2fa8fcdd55690 100644 --- a/crates/terminal/src/tests/terminal_test_context.rs +++ b/crates/terminal/src/tests/terminal_test_context.rs @@ -28,9 +28,14 @@ impl<'a> TerminalTestContext<'a> { let params = self.cx.update(AppState::test); let project = Project::test(params.fs.clone(), [], self.cx).await; - let (_, workspace) = self - .cx - .add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (_, workspace) = self.cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); (project, workspace) } diff --git a/crates/vim/src/test/vim_test_context.rs b/crates/vim/src/test/vim_test_context.rs index 1aeba9fd0809a455a18ae669937da92a9ffe9caa..68c08f2f7afa55654aa161176b7e8fba7a4f217f 100644 --- a/crates/vim/src/test/vim_test_context.rs +++ b/crates/vim/src/test/vim_test_context.rs @@ -41,8 +41,14 @@ impl<'a> VimTestContext<'a> { .insert_tree("/root", json!({ "dir": { "test.txt": "" } })) .await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); // Setup search toolbars workspace.update(cx, |workspace, cx| { diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index 699b9b1d6011a4f074c5400633e7121b6b0727c6..5f471ff018361648ff01819f14f86ee8fa5b85de 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -568,8 +568,9 @@ mod tests { cx.update(|cx| init(cx)); let project = Project::test(fs, [], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, default_item_factory, cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, default_item_factory, cx) + }); workspace.update(cx, |workspace, cx| { let left_panel = cx.add_view(|_| TestItem::new()); diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index 8dd97e230fc85f86d1c5572cdd86eeace2ea3f95..01313f2046d5c707259369e732079662b057afdd 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -1645,8 +1645,9 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, None, cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); // 1. Add with a destination index @@ -1734,8 +1735,9 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, None, cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); // 1. Add with a destination index @@ -1811,8 +1813,9 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, None, cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); // singleton view diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 7f82a46edf19b44f0e1e8d410caac879c4c1fde6..a6ef7c6c01fdb56178ca692f07c1bf85c637bfa2 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -1073,7 +1073,7 @@ pub enum Event { pub struct Workspace { weak_self: WeakViewHandle, - db_id: WorkspaceId, + _db_id: WorkspaceId, client: Arc, user_store: ModelHandle, remote_entity_subscription: Option, @@ -1217,7 +1217,7 @@ impl Workspace { let mut this = Workspace { modal: None, weak_self: weak_handle, - db_id: serialized_workspace.workspace_id, + _db_id: serialized_workspace.workspace_id, center: PaneGroup::new(center_pane.clone()), dock, // When removing an item, the last element remaining in this array @@ -1250,16 +1250,14 @@ impl Workspace { this } - fn new_local( - abs_paths: &[PathBuf], - app_state: &Arc, + fn new_local( + abs_paths: Vec, + app_state: Arc, cx: &mut MutableAppContext, - callback: F, - ) -> Task - where - T: 'static, - F: 'static + FnOnce(&mut Workspace, &mut ViewContext) -> T, - { + ) -> Task<( + ViewHandle, + Vec, Arc>>>, + )> { let project_handle = Project::local( app_state.client.clone(), app_state.user_store.clone(), @@ -1273,21 +1271,25 @@ impl Workspace { // Get project paths for all of the abs_paths let mut worktree_roots: HashSet> = Default::default(); let mut project_paths = Vec::new(); - for path in abs_paths { + for path in abs_paths.iter() { if let Some((worktree, project_entry)) = cx - .update(|cx| Workspace::project_path_for_path(project_handle, path, true, cx)) + .update(|cx| { + Workspace::project_path_for_path(project_handle.clone(), &path, true, cx) + }) .await .log_err() { worktree_roots.insert(worktree.read_with(&mut cx, |tree, _| tree.abs_path())); - project_paths.push(project_entry); + project_paths.push(Some(project_entry)); + } else { + project_paths.push(None); } } // Use the resolved worktree roots to get the serialized_db from the database let serialized_workspace = cx.read(|cx| { cx.global::() - .workspace_for_worktree_roots(&Vec::from_iter(worktree_roots.into_iter())[..]) + .workspace_for_roots(&Vec::from_iter(worktree_roots.into_iter())[..]) }); // Use the serialized workspace to construct the new window @@ -1303,18 +1305,36 @@ impl Workspace { }); // Call open path for each of the project paths - // (this will bring them to the front if they were in kthe serialized workspace) - let tasks = workspace.update(&mut cx, |workspace, cx| { - let tasks = Vec::new(); - for path in project_paths { - tasks.push(workspace.open_path(path, true, cx)); - } - tasks - }); - futures::future::join_all(tasks.into_iter()).await; + // (this will bring them to the front if they were in the serialized workspace) + debug_assert!(abs_paths.len() == project_paths.len()); + let tasks = abs_paths + .iter() + .cloned() + .zip(project_paths.into_iter()) + .map(|(abs_path, project_path)| { + let workspace = workspace.clone(); + cx.spawn(|mut cx| { + let fs = app_state.fs.clone(); + async move { + let project_path = project_path?; + if fs.is_file(&abs_path).await { + Some( + workspace + .update(&mut cx, |workspace, cx| { + workspace.open_path(project_path, true, cx) + }) + .await, + ) + } else { + None + } + } + }) + }); + + let opened_items = futures::future::join_all(tasks.into_iter()).await; - // Finally call callback on the workspace - workspace.update(&mut cx, |workspace, cx| callback(workspace, cx)) + (workspace, opened_items) }) } @@ -1371,12 +1391,16 @@ impl Workspace { ) -> Task where T: 'static, - F: FnOnce(&mut Workspace, &mut ViewContext) -> T, + F: 'static + FnOnce(&mut Workspace, &mut ViewContext) -> T, { if self.project.read(cx).is_local() { Task::Ready(Some(callback(self, cx))) } else { - Self::new_local(&[], app_state, cx, callback) + let task = Self::new_local(Vec::new(), app_state.clone(), cx); + cx.spawn(|_vh, mut cx| async move { + let (workspace, _) = task.await; + workspace.update(&mut cx, callback) + }) } } @@ -1539,7 +1563,7 @@ impl Workspace { for path in &abs_paths { project_paths.push( this.update(&mut cx, |this, cx| { - Workspace::project_path_for_path(this.project, path, visible, cx) + Workspace::project_path_for_path(this.project.clone(), path, visible, cx) }) .await .log_err(), @@ -3017,8 +3041,15 @@ pub fn open_paths( let app_state = app_state.clone(); let abs_paths = abs_paths.to_vec(); cx.spawn(|mut cx| async move { - let workspace = if let Some(existing) = existing { - existing + if let Some(existing) = existing { + ( + existing.clone(), + existing + .update(&mut cx, |workspace, cx| { + workspace.open_paths(abs_paths, true, cx) + }) + .await, + ) } else { let contains_directory = futures::future::join_all(abs_paths.iter().map(|path| app_state.fs.is_file(path))) @@ -3026,28 +3057,32 @@ pub fn open_paths( .contains(&false); cx.update(|cx| { - Workspace::new_local(&abs_paths[..], &app_state, cx, move |workspace, cx| { - if contains_directory { - workspace.toggle_sidebar(SidebarSide::Left, cx); - } - cx.handle() + let task = Workspace::new_local(abs_paths, app_state.clone(), cx); + + cx.spawn(|mut cx| async move { + let (workspace, items) = task.await; + + workspace.update(&mut cx, |workspace, cx| { + if contains_directory { + workspace.toggle_sidebar(SidebarSide::Left, cx); + } + }); + + (workspace, items) }) }) .await - }; - - let items = workspace - .update(&mut cx, |workspace, cx| { - workspace.open_paths(abs_paths, true, cx) - }) - .await; - - (workspace, items) + } }) } fn open_new(app_state: &Arc, cx: &mut MutableAppContext) -> Task<()> { - Workspace::new_local(&[], app_state, cx, |_, cx| cx.dispatch_action(NewFile)) + let task = Workspace::new_local(Vec::new(), app_state.clone(), cx); + cx.spawn(|mut cx| async move { + let (workspace, _) = task.await; + + workspace.update(&mut cx, |_, cx| cx.dispatch_action(NewFile)) + }) } #[cfg(test)] @@ -3076,8 +3111,14 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, [], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), default_item_factory, cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + default_item_factory, + cx, + ) + }); // Adding an item with no ambiguity renders the tab without detail. let item1 = cx.add_view(&workspace, |_| { @@ -3141,8 +3182,14 @@ mod tests { .await; let project = Project::test(fs, ["root1".as_ref()], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), default_item_factory, cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + default_item_factory, + cx, + ) + }); let worktree_id = project.read_with(cx, |project, cx| { project.worktrees(cx).next().unwrap().read(cx).id() }); @@ -3238,8 +3285,14 @@ mod tests { fs.insert_tree("/root", json!({ "one": "" })).await; let project = Project::test(fs, ["root".as_ref()], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), default_item_factory, cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + default_item_factory, + cx, + ) + }); // When there are no dirty items, there's nothing to do. let item1 = cx.add_view(&workspace, |_| TestItem::new()); @@ -3279,8 +3332,8 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, None, cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, default_item_factory, cx)); + let (window_id, workspace) = cx + .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); let item1 = cx.add_view(&workspace, |_| { let mut item = TestItem::new(); @@ -3375,8 +3428,8 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, [], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, default_item_factory, cx)); + let (window_id, workspace) = cx + .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); // Create several workspace items with single project entries, and two // workspace items with multiple project entries. @@ -3477,8 +3530,8 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, [], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, default_item_factory, cx)); + let (window_id, workspace) = cx + .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); let item = cx.add_view(&workspace, |_| { let mut item = TestItem::new(); @@ -3595,7 +3648,8 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, [], cx).await; - let (_, workspace) = cx.add_window(|cx| Workspace::new(project, default_item_factory, cx)); + let (_, workspace) = cx + .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); let item = cx.add_view(&workspace, |_| { let mut item = TestItem::new(); diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index c6862e66e41e01d3a51ffa068ed7a00e5e09d6fa..84d18ba22f7a8b7d11322d50e2eb706802139e86 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -23,7 +23,7 @@ use isahc::{config::Configurable, Request}; use language::LanguageRegistry; use log::LevelFilter; use parking_lot::Mutex; -use project::{Fs, HomeDir, ProjectStore}; +use project::{Db, Fs, HomeDir, ProjectStore}; use serde_json::json; use settings::{ self, settings_file::SettingsFile, KeymapFileContent, Settings, SettingsFileContent, @@ -148,7 +148,9 @@ fn main() { let project_store = cx.add_model(|_| ProjectStore::new()); let db = cx.background().block(db); - client.start_telemetry(db.clone()); + cx.set_global(db); + + client.start_telemetry(cx.global::().clone()); client.report_event("start app", Default::default()); let app_state = Arc::new(AppState { @@ -162,7 +164,12 @@ fn main() { initialize_workspace, default_item_factory, }); - auto_update::init(db, http, cx); + auto_update::init( + cx.global::().clone(), + http, + client::ZED_SERVER_URL.clone(), + cx, + ); workspace::init(app_state.clone(), cx); journal::init(app_state.clone(), cx); theme_selector::init(app_state.clone(), cx); diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 71a99cb3b21c4690524e35cf28e6faa0add81d9c..de785ca9783f3a6f637a434c744b42d4ccb6bf41 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -463,10 +463,11 @@ fn open_config_file( workspace .update(&mut cx, |workspace, cx| { - workspace.with_local_workspace(app_state, cx, |workspace, cx| { + workspace.with_local_workspace(&app_state, cx, |workspace, cx| { workspace.open_paths(vec![path.to_path_buf()], false, cx) }) }) + .await .await; Ok::<_, anyhow::Error>(()) }) @@ -480,51 +481,55 @@ fn open_log_file( ) { const MAX_LINES: usize = 1000; - workspace.with_local_workspace(app_state.clone(), cx, |_, cx| { - cx.spawn_weak(|workspace, mut cx| async move { - let (old_log, new_log) = futures::join!( - app_state.fs.load(&paths::OLD_LOG), - app_state.fs.load(&paths::LOG) - ); + workspace + .with_local_workspace(&app_state.clone(), cx, move |_, cx| { + cx.spawn_weak(|workspace, mut cx| async move { + let (old_log, new_log) = futures::join!( + app_state.fs.load(&paths::OLD_LOG), + app_state.fs.load(&paths::LOG) + ); - if let Some(workspace) = workspace.upgrade(&cx) { - let mut lines = VecDeque::with_capacity(MAX_LINES); - for line in old_log - .iter() - .flat_map(|log| log.lines()) - .chain(new_log.iter().flat_map(|log| log.lines())) - { - if lines.len() == MAX_LINES { - lines.pop_front(); + if let Some(workspace) = workspace.upgrade(&cx) { + let mut lines = VecDeque::with_capacity(MAX_LINES); + for line in old_log + .iter() + .flat_map(|log| log.lines()) + .chain(new_log.iter().flat_map(|log| log.lines())) + { + if lines.len() == MAX_LINES { + lines.pop_front(); + } + lines.push_back(line); } - lines.push_back(line); - } - let log = lines - .into_iter() - .flat_map(|line| [line, "\n"]) - .collect::(); - - workspace.update(&mut cx, |workspace, cx| { - let project = workspace.project().clone(); - let buffer = project - .update(cx, |project, cx| project.create_buffer("", None, cx)) - .expect("creating buffers on a local workspace always succeeds"); - buffer.update(cx, |buffer, cx| buffer.edit([(0..0, log)], None, cx)); - - let buffer = cx.add_model(|cx| { - MultiBuffer::singleton(buffer, cx).with_title("Log".into()) + let log = lines + .into_iter() + .flat_map(|line| [line, "\n"]) + .collect::(); + + workspace.update(&mut cx, |workspace, cx| { + let project = workspace.project().clone(); + let buffer = project + .update(cx, |project, cx| project.create_buffer("", None, cx)) + .expect("creating buffers on a local workspace always succeeds"); + buffer.update(cx, |buffer, cx| buffer.edit([(0..0, log)], None, cx)); + + let buffer = cx.add_model(|cx| { + MultiBuffer::singleton(buffer, cx).with_title("Log".into()) + }); + workspace.add_item( + Box::new( + cx.add_view(|cx| { + Editor::for_multibuffer(buffer, Some(project), cx) + }), + ), + cx, + ); }); - workspace.add_item( - Box::new( - cx.add_view(|cx| Editor::for_multibuffer(buffer, Some(project), cx)), - ), - cx, - ); - }); - } + } + }) + .detach(); }) .detach(); - }); } fn open_telemetry_log_file( @@ -532,7 +537,7 @@ fn open_telemetry_log_file( app_state: Arc, cx: &mut ViewContext, ) { - workspace.with_local_workspace(app_state.clone(), cx, |_, cx| { + workspace.with_local_workspace(&app_state.clone(), cx, move |_, cx| { cx.spawn_weak(|workspace, mut cx| async move { let workspace = workspace.upgrade(&cx)?; let path = app_state.client.telemetry_log_file_path()?; @@ -580,31 +585,36 @@ fn open_telemetry_log_file( Some(()) }) .detach(); - }); + }).detach(); } fn open_bundled_config_file( workspace: &mut Workspace, app_state: Arc, asset_path: &'static str, - title: &str, + title: &'static str, cx: &mut ViewContext, ) { - workspace.with_local_workspace(cx, app_state, |workspace, cx| { - let project = workspace.project().clone(); - let buffer = project.update(cx, |project, cx| { - let text = Assets::get(asset_path).unwrap().data; - let text = str::from_utf8(text.as_ref()).unwrap(); - project - .create_buffer(text, project.languages().get_language("JSON"), cx) - .expect("creating buffers on a local workspace always succeeds") - }); - let buffer = cx.add_model(|cx| MultiBuffer::singleton(buffer, cx).with_title(title.into())); - workspace.add_item( - Box::new(cx.add_view(|cx| Editor::for_multibuffer(buffer, Some(project.clone()), cx))), - cx, - ); - }); + workspace + .with_local_workspace(&app_state.clone(), cx, |workspace, cx| { + let project = workspace.project().clone(); + let buffer = project.update(cx, |project, cx| { + let text = Assets::get(asset_path).unwrap().data; + let text = str::from_utf8(text.as_ref()).unwrap(); + project + .create_buffer(text, project.languages().get_language("JSON"), cx) + .expect("creating buffers on a local workspace always succeeds") + }); + let buffer = + cx.add_model(|cx| MultiBuffer::singleton(buffer, cx).with_title(title.into())); + workspace.add_item( + Box::new( + cx.add_view(|cx| Editor::for_multibuffer(buffer, Some(project.clone()), cx)), + ), + cx, + ); + }) + .detach(); } fn schema_file_match(path: &Path) -> &Path { @@ -808,8 +818,9 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let entries = cx.read(|cx| workspace.file_project_paths(cx)); let file1 = entries[0].clone(); @@ -928,8 +939,9 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/dir1".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); // Open a file within an existing worktree. cx.update(|cx| { @@ -1088,8 +1100,9 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); // Open a file within an existing worktree. cx.update(|cx| { @@ -1131,8 +1144,9 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; project.update(cx, |project, _| project.languages().add(rust_lang())); - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let worktree = cx.read(|cx| workspace.read(cx).worktrees(cx).next().unwrap()); // Create a new untitled buffer @@ -1221,8 +1235,9 @@ mod tests { let project = Project::test(app_state.fs.clone(), [], cx).await; project.update(cx, |project, _| project.languages().add(rust_lang())); - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); // Create a new untitled buffer cx.dispatch_action(window_id, NewFile); @@ -1275,8 +1290,9 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; - let (window_id, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + }); let entries = cx.read(|cx| workspace.file_project_paths(cx)); let file1 = entries[0].clone(); @@ -1350,8 +1366,14 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); let entries = cx.read(|cx| workspace.file_project_paths(cx)); let file1 = entries[0].clone(); @@ -1615,8 +1637,14 @@ mod tests { .await; let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; - let (_, workspace) = - cx.add_window(|cx| Workspace::new(project.clone(), |_, _| unimplemented!(), cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new( + Default::default(), + project.clone(), + |_, _| unimplemented!(), + cx, + ) + }); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); let entries = cx.read(|cx| workspace.file_project_paths(cx)); From ddecba143f6cc99c7dd14f7ea1d71e70ccce64da Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 27 Oct 2022 16:02:14 -0700 Subject: [PATCH 20/86] Refactored workspaces API and corrected method headers + fixed bug caused by migration failures co-authored-by: kay@zed.dev --- crates/db/examples/serialize-pane.rs | 4 +--- crates/db/examples/serialize_workspace.rs | 24 +++++++---------------- crates/db/src/kvp.rs | 4 ---- crates/db/src/pane.rs | 8 +------- crates/db/src/workspace.rs | 10 +++------- 5 files changed, 12 insertions(+), 38 deletions(-) diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index fc420b866d6f1f7f008347369222ee0e8b09a182..b0744aa60408c60ccf3b1ba67e333d6d739fcd5d 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -14,9 +14,7 @@ fn main() -> anyhow::Result<()> { let f = File::create(file)?; drop(f); - let workspace = db.make_new_workspace::(&[]); - - db.update_worktrees(&workspace.workspace_id, &["/tmp"]); + let workspace = db.workspace_for_roots(&["/tmp"]); db.save_dock_pane(SerializedDockPane { workspace: workspace.workspace_id, diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index 97d50bbe5bc240e97b349b92e454d7a0f225f8a0..5a3f2a216049de5e6009cd0403cf230e26c6e0e8 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -15,29 +15,19 @@ fn main() -> anyhow::Result<()> { db.write_kvp("test", "1")?; db.write_kvp("test-2", "2")?; - let workspace_1 = db.make_new_workspace::(&[]); - let workspace_2 = db.make_new_workspace::(&[]); - let workspace_3 = db.make_new_workspace::(&[]); - let workspace_4 = db.make_new_workspace::(&[]); - let workspace_5 = db.make_new_workspace::(&[]); - let workspace_6 = db.make_new_workspace::(&[]); - let workspace_7 = db.make_new_workspace::(&[]); - - // Order scrambled + sleeps added because sqlite only has 1 second resolution on - // their timestamps - db.update_worktrees(&workspace_7.workspace_id, &["/tmp2"]); + db.workspace_for_roots(&["/tmp1"]); sleep(Duration::from_secs(1)); - db.update_worktrees(&workspace_1.workspace_id, &["/tmp1"]); + db.workspace_for_roots(&["/tmp1", "/tmp2"]); sleep(Duration::from_secs(1)); - db.update_worktrees(&workspace_2.workspace_id, &["/tmp1", "/tmp2"]); + db.workspace_for_roots(&["/tmp1", "/tmp2", "/tmp3"]); sleep(Duration::from_secs(1)); - db.update_worktrees(&workspace_3.workspace_id, &["/tmp1", "/tmp2", "/tmp3"]); + db.workspace_for_roots(&["/tmp2", "/tmp3"]); sleep(Duration::from_secs(1)); - db.update_worktrees(&workspace_4.workspace_id, &["/tmp2", "/tmp3"]); + db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]); sleep(Duration::from_secs(1)); - db.update_worktrees(&workspace_5.workspace_id, &["/tmp2", "/tmp3", "/tmp4"]); + db.workspace_for_roots(&["/tmp2", "/tmp4"]); sleep(Duration::from_secs(1)); - db.update_worktrees(&workspace_6.workspace_id, &["/tmp2", "/tmp4"]); + db.workspace_for_roots(&["/tmp2"]); db.write_to(file).ok(); diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index eecd0238ca2b8975ed644411e45e5ad6bfe87ce8..96f13d8040bf6e289711b46462ccf88d1eafc735 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -4,14 +4,10 @@ use rusqlite::OptionalExtension; use super::Db; pub(crate) const KVP_M_1: &str = " -BEGIN TRANSACTION; - CREATE TABLE kv_store( key TEXT PRIMARY KEY, value TEXT NOT NULL ) STRICT; - -COMMIT; "; impl Db { diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 447b5eed879dc30a4510a2ef53855cac9db2c423..23423ed6f6f129396620ad88b790ad65773c8a6e 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -75,8 +75,6 @@ pub struct SerializedPane { } pub(crate) const PANE_M_1: &str = " -BEGIN TRANSACTION; - CREATE TABLE dock_panes( dock_pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, @@ -131,8 +129,6 @@ CREATE TABLE dock_items( FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE, FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE ) STRICT; - -COMMIT; "; #[derive(Default, Debug)] @@ -222,9 +218,7 @@ mod tests { fn test_basic_dock_pane() { let db = Db::open_in_memory(); - let workspace = db.make_new_workspace::(&[]); - - db.update_worktrees(&workspace.workspace_id, &["/tmp"]); + let workspace = db.workspace_for_roots(&["/tmp"]); db.save_dock_pane(SerializedDockPane { workspace: workspace.workspace_id, diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 0d8dae59ef9edbfc6b26c95eedb2bec596f02453..cb2d4296c16d886cc9f0ca06e47fa688312c9a57 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -17,8 +17,6 @@ use super::Db; // you might want to update some of the parsing code as well, I've left the variations in but commented // out pub(crate) const WORKSPACE_M_1: &str = " -BEGIN TRANSACTION; - CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY, timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL @@ -30,8 +28,6 @@ CREATE TABLE worktree_roots( FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE PRIMARY KEY(worktree_root, workspace_id) ) STRICT; - -COMMIT; "; #[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] @@ -68,7 +64,7 @@ impl Db { } } - pub fn make_new_workspace

(&self, worktree_roots: &[P]) -> SerializedWorkspace + fn make_new_workspace

(&self, worktree_roots: &[P]) -> SerializedWorkspace where P: AsRef + Debug, { @@ -158,7 +154,7 @@ impl Db { }); } - pub fn last_workspace_id(&self) -> Option { + fn last_workspace_id(&self) -> Option { fn logic(connection: &mut Connection) -> Result> { let mut stmt = connection .prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT 1")?; @@ -432,7 +428,7 @@ mod tests { use super::WorkspaceId; #[test] - fn test_worktree_for_roots() { + fn test_new_worktrees_for_roots() { let db = Db::open_in_memory(); // Test creation in 0 case From c105f414876d116db6a9ec311dd0071568b4241e Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 27 Oct 2022 16:37:54 -0700 Subject: [PATCH 21/86] Started working on dock panes co-authored-by: kay@zed.dev --- crates/db/examples/serialize-pane.rs | 22 +++- crates/db/src/pane.rs | 158 +++++++++++++++------------ crates/db/src/workspace.rs | 3 +- 3 files changed, 109 insertions(+), 74 deletions(-) diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index b0744aa60408c60ccf3b1ba67e333d6d739fcd5d..9448336be9b43cd65e1ebb83a772b0d6cb740fc6 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -14,15 +14,25 @@ fn main() -> anyhow::Result<()> { let f = File::create(file)?; drop(f); - let workspace = db.workspace_for_roots(&["/tmp"]); + let workspace_1 = db.workspace_for_roots(&["/tmp"]); + let workspace_2 = db.workspace_for_roots(&["/tmp", "/tmp2"]); + let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]); - db.save_dock_pane(SerializedDockPane { - workspace: workspace.workspace_id, + db.save_dock_pane(&SerializedDockPane { + workspace_id: workspace_1.workspace_id, anchor_position: DockAnchor::Expanded, - shown: true, + visible: true, + }); + db.save_dock_pane(&SerializedDockPane { + workspace_id: workspace_2.workspace_id, + anchor_position: DockAnchor::Bottom, + visible: true, + }); + db.save_dock_pane(&SerializedDockPane { + workspace_id: workspace_3.workspace_id, + anchor_position: DockAnchor::Right, + visible: false, }); - - let _new_workspace = db.workspace_for_roots(&["/tmp"]); db.write_to(file).ok(); diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 23423ed6f6f129396620ad88b790ad65773c8a6e..9a7dfd3b6ef0f2fddf1aa5c382942d171c451649 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,9 +1,69 @@ use gpui::Axis; +use serde::{Deserialize, Serialize}; +use serde_rusqlite::to_params_named; + use crate::{items::ItemId, workspace::WorkspaceId}; use super::Db; +pub(crate) const PANE_M_1: &str = " +CREATE TABLE dock_panes( + dock_pane_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' + visible INTEGER NOT NULL, -- Boolean + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE pane_groups( + group_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + parent_group INTEGER, -- NULL indicates that this is a root node + axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE grouped_panes( + pane_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + group_id INTEGER NOT NULL, + idx INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE items( + item_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + kind TEXT NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE group_items( + workspace_id INTEGER NOT NULL, + pane_id INTEGER NOT NULL, + item_id INTEGER NOT NULL, + idx INTEGER NOT NULL, + PRIMARY KEY (workspace_id, pane_id, item_id) + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(pane_id) REFERENCES grouped_panes(pane_id) ON DELETE CASCADE, + FOREIGN KEY(item_id) REFERENCES items(item_id) ON DELETE CASCADE +) STRICT; + +CREATE TABLE dock_items( + workspace_id INTEGER NOT NULL, + dock_pane_id INTEGER NOT NULL, + item_id INTEGER NOT NULL, + idx INTEGER NOT NULL, + PRIMARY KEY (workspace_id, dock_pane_id, item_id) + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE, + FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE +) STRICT; +"; + // We have an many-branched, unbalanced tree with three types: // Pane Groups // Panes @@ -74,64 +134,7 @@ pub struct SerializedPane { children: Vec, } -pub(crate) const PANE_M_1: &str = " -CREATE TABLE dock_panes( - dock_pane_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' - shown INTEGER NOT NULL, -- Boolean - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE pane_groups( - group_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - parent_group INTEGER, -- NULL indicates that this is a root node - axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE grouped_panes( - pane_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - group_id INTEGER NOT NULL, - idx INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE items( - item_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - kind TEXT NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE group_items( - workspace_id INTEGER NOT NULL, - pane_id INTEGER NOT NULL, - item_id INTEGER NOT NULL, - idx INTEGER NOT NULL, - PRIMARY KEY (workspace_id, pane_id, item_id) - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(pane_id) REFERENCES grouped_panes(pane_id) ON DELETE CASCADE, - FOREIGN KEY(item_id) REFERENCES items(item_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE dock_items( - workspace_id INTEGER NOT NULL, - dock_pane_id INTEGER NOT NULL, - item_id INTEGER NOT NULL, - idx INTEGER NOT NULL, - PRIMARY KEY (workspace_id, dock_pane_id, item_id) - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE, - FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE -) STRICT; -"; - -#[derive(Default, Debug)] +#[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] pub enum DockAnchor { #[default] Bottom, @@ -139,11 +142,11 @@ pub enum DockAnchor { Expanded, } -#[derive(Default, Debug)] +#[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct SerializedDockPane { - pub workspace: WorkspaceId, + pub workspace_id: WorkspaceId, pub anchor_position: DockAnchor, - pub shown: bool, + pub visible: bool, } impl Db { @@ -204,7 +207,24 @@ impl Db { unimplemented!() } - pub fn save_dock_pane(&self, _dock_pane: SerializedDockPane) {} + pub fn save_dock_pane(&self, dock_pane: &SerializedDockPane) { + to_params_named(dock_pane) + .map_err(|err| dbg!(err)) + .ok() + .zip(self.real()) + .map(|(params, db)| { + // TODO: overwrite old dock panes if need be + let query = "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (:workspace_id, :anchor_position, :visible);"; + db.connection + .lock() + .execute(query, params.to_slice().as_slice()) + .map(|_| ()) // Eat the return value + .unwrap_or_else(|err| { + dbg!(&err); + log::error!("Failed to insert new workspace into DB: {}", err); + }) + }); + } } #[cfg(test)] @@ -220,12 +240,16 @@ mod tests { let workspace = db.workspace_for_roots(&["/tmp"]); - db.save_dock_pane(SerializedDockPane { - workspace: workspace.workspace_id, + let dock_pane = SerializedDockPane { + workspace_id: workspace.workspace_id, anchor_position: DockAnchor::Expanded, - shown: true, - }); + visible: true, + }; + + db.save_dock_pane(&dock_pane); + + let new_workspace = db.workspace_for_roots(&["/tmp"]); - let _new_workspace = db.workspace_for_roots(&["/tmp"]); + assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane); } } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index cb2d4296c16d886cc9f0ca06e47fa688312c9a57..d7532b684eb7f1d2a8be3b95777db80f5bed628a 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,5 +1,6 @@ use anyhow::Result; use rusqlite::{params, Connection, OptionalExtension}; +use serde::{Deserialize, Serialize}; use std::{ ffi::OsStr, @@ -30,7 +31,7 @@ CREATE TABLE worktree_roots( ) STRICT; "; -#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Default, Deserialize, Serialize)] pub struct WorkspaceId(i64); #[derive(Default, Debug)] From e6ca0adbcba8e1724703d690622405b389b30b35 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 31 Oct 2022 12:47:13 -0700 Subject: [PATCH 22/86] Fixed failing serialization issues --- crates/db/Cargo.toml | 1 + crates/db/examples/serialize-pane.rs | 6 ++- crates/db/examples/serialize_workspace.rs | 9 +--- crates/db/src/pane.rs | 6 +-- crates/db/src/workspace.rs | 60 ++++++++++++++++------ crates/db/test.db | Bin 0 -> 57344 bytes 6 files changed, 54 insertions(+), 28 deletions(-) create mode 100644 crates/db/test.db diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 9fad1aa39a6e85f9dee323fb927d1a65a9626c6a..a2ac2a9fc528a355baebf8a255bd0973e8a74b98 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -26,3 +26,4 @@ serde_rusqlite = "0.31.0" [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } tempdir = { version = "0.3.7" } +env_logger = "0.9.1" \ No newline at end of file diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index 9448336be9b43cd65e1ebb83a772b0d6cb740fc6..59ad60a6f4423c5da23e5525d7f73860668cdc81 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -5,6 +5,8 @@ use db::pane::{DockAnchor, SerializedDockPane}; const TEST_FILE: &'static str = "test-db.db"; fn main() -> anyhow::Result<()> { + env_logger::init(); + let db = db::Db::open_in_memory(); if db.real().is_none() { return Err(anyhow::anyhow!("Migrations failed")); @@ -17,6 +19,8 @@ fn main() -> anyhow::Result<()> { let workspace_1 = db.workspace_for_roots(&["/tmp"]); let workspace_2 = db.workspace_for_roots(&["/tmp", "/tmp2"]); let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]); + dbg!(&workspace_1, &workspace_2, &workspace_3); + db.write_to(file).ok(); db.save_dock_pane(&SerializedDockPane { workspace_id: workspace_1.workspace_id, @@ -34,7 +38,7 @@ fn main() -> anyhow::Result<()> { visible: false, }); - db.write_to(file).ok(); + // db.write_to(file).ok(); println!("Wrote database!"); diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index 5a3f2a216049de5e6009cd0403cf230e26c6e0e8..4010c7797663b856b829a8b1b5d0310e277ecbec 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -1,8 +1,9 @@ -use std::{fs::File, path::Path, thread::sleep, time::Duration}; +use std::{fs::File, path::Path}; const TEST_FILE: &'static str = "test-db.db"; fn main() -> anyhow::Result<()> { + env_logger::init(); let db = db::Db::open_in_memory(); if db.real().is_none() { return Err(anyhow::anyhow!("Migrations failed")); @@ -16,17 +17,11 @@ fn main() -> anyhow::Result<()> { db.write_kvp("test-2", "2")?; db.workspace_for_roots(&["/tmp1"]); - sleep(Duration::from_secs(1)); db.workspace_for_roots(&["/tmp1", "/tmp2"]); - sleep(Duration::from_secs(1)); db.workspace_for_roots(&["/tmp1", "/tmp2", "/tmp3"]); - sleep(Duration::from_secs(1)); db.workspace_for_roots(&["/tmp2", "/tmp3"]); - sleep(Duration::from_secs(1)); db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]); - sleep(Duration::from_secs(1)); db.workspace_for_roots(&["/tmp2", "/tmp4"]); - sleep(Duration::from_secs(1)); db.workspace_for_roots(&["/tmp2"]); db.write_to(file).ok(); diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 9a7dfd3b6ef0f2fddf1aa5c382942d171c451649..51d8e5ad5b2faf91af3150c4ab35d07d4e2b8f8a 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -204,12 +204,11 @@ impl Db { } pub fn get_dock_pane(&self, _workspace: WorkspaceId) -> Option { - unimplemented!() + None } pub fn save_dock_pane(&self, dock_pane: &SerializedDockPane) { to_params_named(dock_pane) - .map_err(|err| dbg!(err)) .ok() .zip(self.real()) .map(|(params, db)| { @@ -220,8 +219,7 @@ impl Db { .execute(query, params.to_slice().as_slice()) .map(|_| ()) // Eat the return value .unwrap_or_else(|err| { - dbg!(&err); - log::error!("Failed to insert new workspace into DB: {}", err); + log::error!("Failed to insert new dock pane into DB: {}", err); }) }); } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index d7532b684eb7f1d2a8be3b95777db80f5bed628a..5d84ecfccbc57914f8f4bbd2c607a1b46ab402ab 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,4 +1,5 @@ use anyhow::Result; + use rusqlite::{params, Connection, OptionalExtension}; use serde::{Deserialize, Serialize}; @@ -8,6 +9,7 @@ use std::{ os::unix::prelude::OsStrExt, path::{Path, PathBuf}, sync::Arc, + time::{SystemTime, UNIX_EPOCH}, }; use crate::pane::SerializedDockPane; @@ -20,7 +22,7 @@ use super::Db; pub(crate) const WORKSPACE_M_1: &str = " CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY, - timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL + last_opened_timestamp INTEGER NOT NULL ) STRICT; CREATE TABLE worktree_roots( @@ -77,12 +79,18 @@ impl Db { P: AsRef + Debug, { let tx = connection.transaction()?; - tx.execute("INSERT INTO workspaces DEFAULT VALUES", [])?; + + tx.execute( + "INSERT INTO workspaces(last_opened_timestamp) VALUES (?)", + [current_millis()?], + )?; let id = WorkspaceId(tx.last_insert_rowid()); update_worktree_roots(&tx, &id, worktree_roots)?; + tx.commit()?; + Ok(SerializedWorkspace { workspace_id: id, dock_pane: None, @@ -116,7 +124,7 @@ impl Db { match get_workspace_id(worktree_roots, &lock) { Ok(workspace_id) => workspace_id, Err(err) => { - log::error!("Failed ot get workspace_id: {}", err); + log::error!("Failed to get workspace_id: {}", err); None } } @@ -135,15 +143,26 @@ impl Db { where P: AsRef + Debug, { + fn logic

( + connection: &mut Connection, + workspace_id: &WorkspaceId, + worktree_roots: &[P], + ) -> Result<()> + where + P: AsRef + Debug, + { + let tx = connection.transaction()?; + update_worktree_roots(&tx, workspace_id, worktree_roots)?; + tx.commit()?; + Ok(()) + } + self.real().map(|db| { let mut lock = db.connection.lock(); - let tx = lock.transaction(); - - match tx.map(|tx| update_worktree_roots(&tx, workspace_id, worktree_roots)) { + match logic(&mut lock, workspace_id, worktree_roots) { Ok(_) => {} Err(err) => { - dbg!(&err); log::error!( "Failed to update the worktree roots for {:?}, roots: {:?}, error: {}", workspace_id, @@ -157,8 +176,9 @@ impl Db { fn last_workspace_id(&self) -> Option { fn logic(connection: &mut Connection) -> Result> { - let mut stmt = connection - .prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT 1")?; + let mut stmt = connection.prepare( + "SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT 1", + )?; Ok(stmt .query_row([], |row| Ok(WorkspaceId(row.get(0)?))) @@ -189,7 +209,7 @@ impl Db { let tx = connection.transaction()?; let result = { let mut stmt = tx.prepare( - "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", + "SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT ?", )?; let workspace_ids = stmt @@ -234,6 +254,12 @@ impl Db { } } +fn current_millis() -> Result { + // SQLite only supports u64 integers, which means this code will trigger + // undefined behavior in 584 million years. It's probably fine. + Ok(SystemTime::now().duration_since(UNIX_EPOCH)?.as_millis() as u64) +} + fn update_worktree_roots

( connection: &Connection, workspace_id: &WorkspaceId, @@ -271,8 +297,8 @@ where } connection.execute( - "UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?", - [workspace_id.0], + "UPDATE workspaces SET last_opened_timestamp = ? WHERE workspace_id = ?", + params![current_millis()?, workspace_id.0], )?; Ok(()) @@ -440,13 +466,17 @@ mod tests { let workspace_1 = db.workspace_for_roots::(&[]); assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); - sleep(Duration::from_secs(1)); + // Ensure the timestamps are different + sleep(Duration::from_millis(20)); db.make_new_workspace::(&[]); // Test pulling another value from recent workspaces let workspace_2 = db.workspace_for_roots::(&[]); assert_eq!(workspace_2.workspace_id, WorkspaceId(2)); + // Ensure the timestamps are different + sleep(Duration::from_millis(20)); + // Test creating a new workspace that doesn't exist already let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); @@ -470,6 +500,7 @@ mod tests { db.make_new_workspace::(&[]); //ID 2 db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); + db.write_to("test.db").unwrap(); // Sanity check assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1))); @@ -584,9 +615,6 @@ mod tests { db.update_worktrees(workspace_id, entries); } - // Make sure the timestamp updates - sleep(Duration::from_secs(1)); - // Execute the update db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); diff --git a/crates/db/test.db b/crates/db/test.db new file mode 100644 index 0000000000000000000000000000000000000000..b3a78a995a034d7f6dde834e12a47313f8bde62c GIT binary patch literal 57344 zcmeI)U2oe|7{GBmY4f(WsnltfkSd+3LT#jus!ZI#4Q=O@ijqPa7`w7eV^-6|&SJL| z?)n0J0lpAdNPGjXcZar17c68BSzg!W)>ua}n=ylC~%dlHwyWZ5d^oFSKHbwo@ot+iAiQJ`Y zI{&gsc*x-m*sYTxO@G|j{%F1NKzyh_5X*B7yqX#0J>wBB)cl9XBbff8rGmRz{-f~h5;;xA+G#ZDH z?xY;5<+x^`kxEVMH5=QT%{$WtZEHbQY^Po(U6D;0YyI-f`Ta^! zTU=DXKa#oeu;(6iqvS{xZ)CC~RdMC?c;%V#KPAQGVKez4y{wiI)_Kdx76Su0m}2pm z56_0doJ!K+of`#hdvRDgqz2JgOD#;D?w5<2rm5#|%7l;^BR?vcB#^73;lJ!$` zM4hRMqQ4x^B(4{U{?dQg+&X+}yk?%k42*l0@s=UW8gIG3!9YGI*yn4-XpX7LluGTM zDwZKF&)ycg>#V_=apNZfHf^zAdj~B1l1d-}Eg{_y@tQuzz5CBKR>y&vkv_ zxGhUwSj>}PWj0mACUobh^R5s}pIJRWXr{d+R>jf>ZqNSKb^KuQ)m)U)5x&%zoiz*c zp5eml-!p`R65TV%QZX}P$)(PV1}s@lU-Dn%BvF}ed;RtTHZrd`Q|6-t$Q{ilGqNvqs>bWUd!TzbVz Date: Mon, 31 Oct 2022 14:11:55 -0700 Subject: [PATCH 23/86] Abandoning rusqlite, the API is miserable --- crates/db/examples/serialize-pane.rs | 42 +++++++++------- crates/db/src/pane.rs | 71 ++++++++++++++++++++++++--- crates/db/src/workspace.rs | 6 +++ crates/db/test.db | Bin 57344 -> 0 bytes 4 files changed, 94 insertions(+), 25 deletions(-) delete mode 100644 crates/db/test.db diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index 59ad60a6f4423c5da23e5525d7f73860668cdc81..e828f007d120c81a590227227ffa1ef808b30a88 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -19,26 +19,30 @@ fn main() -> anyhow::Result<()> { let workspace_1 = db.workspace_for_roots(&["/tmp"]); let workspace_2 = db.workspace_for_roots(&["/tmp", "/tmp2"]); let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]); - dbg!(&workspace_1, &workspace_2, &workspace_3); - db.write_to(file).ok(); - db.save_dock_pane(&SerializedDockPane { - workspace_id: workspace_1.workspace_id, - anchor_position: DockAnchor::Expanded, - visible: true, - }); - db.save_dock_pane(&SerializedDockPane { - workspace_id: workspace_2.workspace_id, - anchor_position: DockAnchor::Bottom, - visible: true, - }); - db.save_dock_pane(&SerializedDockPane { - workspace_id: workspace_3.workspace_id, - anchor_position: DockAnchor::Right, - visible: false, - }); - - // db.write_to(file).ok(); + db.save_dock_pane( + workspace_1.workspace_id, + &SerializedDockPane { + anchor_position: DockAnchor::Expanded, + visible: true, + }, + ); + db.save_dock_pane( + workspace_2.workspace_id, + &SerializedDockPane { + anchor_position: DockAnchor::Bottom, + visible: true, + }, + ); + db.save_dock_pane( + workspace_3.workspace_id, + &SerializedDockPane { + anchor_position: DockAnchor::Right, + visible: false, + }, + ); + + db.write_to(file).ok(); println!("Wrote database!"); diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 51d8e5ad5b2faf91af3150c4ab35d07d4e2b8f8a..0a1812c60cc68a38c2e4238cadb620a923b7f28a 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,7 +1,9 @@ + use gpui::Axis; +use rusqlite::{OptionalExtension, Connection}; use serde::{Deserialize, Serialize}; -use serde_rusqlite::to_params_named; +use serde_rusqlite::{from_row, to_params_named}; use crate::{items::ItemId, workspace::WorkspaceId}; @@ -134,6 +136,10 @@ pub struct SerializedPane { children: Vec, } + +//********* CURRENTLY IN USE TYPES: ********* + + #[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] pub enum DockAnchor { #[default] @@ -144,11 +150,29 @@ pub enum DockAnchor { #[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] pub struct SerializedDockPane { - pub workspace_id: WorkspaceId, pub anchor_position: DockAnchor, pub visible: bool, } +impl SerializedDockPane { + pub fn to_row(&self, workspace: WorkspaceId) -> DockRow { + DockRow { workspace_id: workspace, anchor_position: self.anchor_position, visible: self.visible } + } +} + +#[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub(crate) struct DockRow { + workspace_id: WorkspaceId, + anchor_position: DockAnchor, + visible: bool, +} + +impl DockRow { + pub fn to_pane(&self) -> SerializedDockPane { + SerializedDockPane { anchor_position: self.anchor_position, visible: self.visible } + } +} + impl Db { pub fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { let axis = self.get_pane_group_axis(pane_group_id); @@ -203,17 +227,52 @@ impl Db { unimplemented!(); } - pub fn get_dock_pane(&self, _workspace: WorkspaceId) -> Option { - None + pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { + fn logic(conn: &Connection, workspace: WorkspaceId) -> anyhow::Result> { + + let mut stmt = conn.prepare("SELECT workspace_id, anchor_position, visible FROM dock_panes WHERE workspace_id = ?")?; + + let dock_panes = stmt.query_row([workspace.raw_id()], |row_ref| from_row::).optional(); + + let mut dock_panes_iter = stmt.query_and_then([workspace.raw_id()], from_row::)?; + let dock_pane = dock_panes_iter + .next() + .and_then(|dock_row| + dock_row + .ok() + .map(|dock_row| dock_row.to_pane())); + + Ok(dock_pane) + } + + self.real() + .map(|db| { + let lock = db.connection.lock(); + + match logic(&lock, workspace) { + Ok(dock_pane) => dock_pane, + Err(err) => { + log::error!("Failed to get the dock pane: {}", err); + None + }, + } + }) + .unwrap_or(None) + } - pub fn save_dock_pane(&self, dock_pane: &SerializedDockPane) { - to_params_named(dock_pane) + pub fn save_dock_pane(&self, workspace: WorkspaceId, dock_pane: SerializedDockPane) { + to_params_named(dock_pane.to_row(workspace)) + .map_err(|err| { + log::error!("Failed to parse params for the dock row: {}", err); + err + }) .ok() .zip(self.real()) .map(|(params, db)| { // TODO: overwrite old dock panes if need be let query = "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (:workspace_id, :anchor_position, :visible);"; + db.connection .lock() .execute(query, params.to_slice().as_slice()) diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 5d84ecfccbc57914f8f4bbd2c607a1b46ab402ab..2dc988a7e367499ca872ae71b41216dfdba35489 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -36,6 +36,12 @@ CREATE TABLE worktree_roots( #[derive(Debug, PartialEq, Eq, Copy, Clone, Default, Deserialize, Serialize)] pub struct WorkspaceId(i64); +impl WorkspaceId { + pub fn raw_id(&self) -> i64 { + self.0 + } +} + #[derive(Default, Debug)] pub struct SerializedWorkspace { pub workspace_id: WorkspaceId, diff --git a/crates/db/test.db b/crates/db/test.db deleted file mode 100644 index b3a78a995a034d7f6dde834e12a47313f8bde62c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 57344 zcmeI)U2oe|7{GBmY4f(WsnltfkSd+3LT#jus!ZI#4Q=O@ijqPa7`w7eV^-6|&SJL| z?)n0J0lpAdNPGjXcZar17c68BSzg!W)>ua}n=ylC~%dlHwyWZ5d^oFSKHbwo@ot+iAiQJ`Y zI{&gsc*x-m*sYTxO@G|j{%F1NKzyh_5X*B7yqX#0J>wBB)cl9XBbff8rGmRz{-f~h5;;xA+G#ZDH z?xY;5<+x^`kxEVMH5=QT%{$WtZEHbQY^Po(U6D;0YyI-f`Ta^! zTU=DXKa#oeu;(6iqvS{xZ)CC~RdMC?c;%V#KPAQGVKez4y{wiI)_Kdx76Su0m}2pm z56_0doJ!K+of`#hdvRDgqz2JgOD#;D?w5<2rm5#|%7l;^BR?vcB#^73;lJ!$` zM4hRMqQ4x^B(4{U{?dQg+&X+}yk?%k42*l0@s=UW8gIG3!9YGI*yn4-XpX7LluGTM zDwZKF&)ycg>#V_=apNZfHf^zAdj~B1l1d-}Eg{_y@tQuzz5CBKR>y&vkv_ zxGhUwSj>}PWj0mACUobh^R5s}pIJRWXr{d+R>jf>ZqNSKb^KuQ)m)U)5x&%zoiz*c zp5eml-!p`R65TV%QZX}P$)(PV1}s@lU-Dn%BvF}ed;RtTHZrd`Q|6-t$Q{ilGqNvqs>bWUd!TzbVz Date: Tue, 1 Nov 2022 13:15:58 -0700 Subject: [PATCH 24/86] WIP switching to sqlez --- Cargo.lock | 710 +++++++++++++++++++------------------ crates/db/Cargo.toml | 6 +- crates/db/src/db.rs | 133 ++----- crates/db/src/workspace.rs | 80 ++--- 4 files changed, 428 insertions(+), 501 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2cc8063ca48188d52ffa352ec1b96a6060b882b4..e2165c09419189d3c78816e176c618c300ae112f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,7 +8,7 @@ version = "0.1.0" dependencies = [ "auto_update", "editor", - "futures 0.3.24", + "futures 0.3.25", "gpui", "language", "project", @@ -45,16 +45,16 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.19" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] @@ -133,9 +133,12 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.65" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98161a4e3e2184da77bb14f02184cdd111e83bbbcc9979dfee3c44b9a85f5602" +checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" +dependencies = [ + "backtrace", +] [[package]] name = "arrayref" @@ -183,9 +186,9 @@ dependencies = [ [[package]] name = "async-channel" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14485364214912d3b19cc3435dde4df66065127f05fa0d75c712f36f12c2f28" +checksum = "cf46fee83e5ccffc220104713af3292ff9bc7c64c7de289f66dae8e38d826833" dependencies = [ "concurrent-queue", "event-listener", @@ -220,15 +223,15 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965" +checksum = "17adb73da160dfb475c183343c8cccd80721ea5a605d3eb57125f0a7b7a92d0b" dependencies = [ + "async-lock", "async-task", "concurrent-queue", "fastrand", "futures-lite", - "once_cell", "slab", ] @@ -246,31 +249,32 @@ dependencies = [ [[package]] name = "async-io" -version = "1.9.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83e21f3a490c72b3b0cf44962180e60045de2925d8dff97918f7ee43c8f637c7" +checksum = "8c374dda1ed3e7d8f0d9ba58715f924862c63eae6849c92d3a18e7fbde9e2794" dependencies = [ + "async-lock", "autocfg 1.1.0", "concurrent-queue", "futures-lite", "libc", "log", - "once_cell", "parking", "polling", "slab", "socket2", "waker-fn", - "winapi 0.3.9", + "windows-sys 0.42.0", ] [[package]] name = "async-lock" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e97a171d191782fba31bb902b14ad94e24a68145032b7eedf871ab0bc0d077b6" +checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" dependencies = [ "event-listener", + "futures-lite", ] [[package]] @@ -290,26 +294,26 @@ name = "async-pipe" version = "0.1.3" source = "git+https://github.com/zed-industries/async-pipe-rs?rev=82d00a04211cf4e1236029aa03e6b6ce2a74c553#82d00a04211cf4e1236029aa03e6b6ce2a74c553" dependencies = [ - "futures 0.3.24", + "futures 0.3.25", "log", ] [[package]] name = "async-process" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02111fd8655a613c25069ea89fc8d9bb89331fa77486eb3bc059ee757cfa481c" +checksum = "6381ead98388605d0d9ff86371043b5aa922a3905824244de40dc263a14fcba4" dependencies = [ "async-io", + "async-lock", "autocfg 1.1.0", "blocking", "cfg-if 1.0.0", "event-listener", "futures-lite", "libc", - "once_cell", "signal-hook", - "winapi 0.3.9", + "windows-sys 0.42.0", ] [[package]] @@ -364,9 +368,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.57" +version = "0.1.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" +checksum = "31e6e93155431f3931513b243d371981bb2770112b370c82745a1d19d2f99364" dependencies = [ "proc-macro2", "quote", @@ -462,15 +466,15 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.16" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e3356844c4d6a6d6467b8da2cffb4a2820be256f50a3a386c9d152bab31043" +checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" dependencies = [ "async-trait", "axum-core", "base64", "bitflags", - "bytes 1.2.1", + "bytes 1.3.0", "futures-util", "headers", "http", @@ -485,7 +489,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sha-1 0.10.0", + "sha-1 0.10.1", "sync_wrapper", "tokio", "tokio-tungstenite", @@ -497,12 +501,12 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f0c0a60006f2a293d82d571f635042a72edf927539b7685bd62d361963839b" +checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" dependencies = [ "async-trait", - "bytes 1.2.1", + "bytes 1.3.0", "futures-util", "http", "http-body", @@ -518,7 +522,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69034b3b0fd97923eee2ce8a47540edb21e07f48f87f67d44bb4271cec622bdb" dependencies = [ "axum", - "bytes 1.2.1", + "bytes 1.3.0", "futures-util", "http", "mime", @@ -549,15 +553,15 @@ dependencies = [ [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64ct" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2b2456fd614d856680dcd9fcc660a51a820fa09daef2e49772b56a193c8474" +checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" [[package]] name = "bincode" @@ -623,16 +627,16 @@ dependencies = [ [[package]] name = "blocking" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6ccb65d468978a086b69884437ded69a90faab3bbe6e67f242173ea728acccc" +checksum = "3c67b173a56acffd6d2326fb7ab938ba0b00a71480e14902b2591c87bc5741e8" dependencies = [ "async-channel", + "async-lock", "async-task", "atomic-waker", "fastrand", "futures-lite", - "once_cell", ] [[package]] @@ -674,15 +678,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.11.0" +version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" [[package]] name = "bytemuck" -version = "1.12.1" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f5715e491b5a1598fc2bef5a606847b5dc1d48ea625bd3c02c00de8285591da" +checksum = "aaa3a8d9a1ca92e282c96a32d6511b695d7d994d1d102ba85d279f9b2756947f" [[package]] name = "byteorder" @@ -702,15 +706,9 @@ dependencies = [ [[package]] name = "bytes" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" - -[[package]] -name = "cache-padded" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" [[package]] name = "call" @@ -720,7 +718,7 @@ dependencies = [ "async-broadcast", "client", "collections", - "futures 0.3.24", + "futures 0.3.25", "gpui", "live_kit_client", "media", @@ -803,9 +801,9 @@ checksum = "a2698f953def977c68f935bb0dfa959375ad4638570e969e2f1e9f433cbf1af6" [[package]] name = "cc" -version = "1.0.73" +version = "1.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4" dependencies = [ "jobserver", ] @@ -833,15 +831,15 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.22" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfd4d1b31faaa3a89d7934dbded3111da0d2ef28e3ebccdb4f0179f5929d1ef1" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" dependencies = [ "iana-time-zone", "js-sys", "num-integer", "num-traits", - "time 0.1.44", + "time 0.1.45", "wasm-bindgen", "winapi 0.3.9", ] @@ -889,9 +887,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.22" +version = "3.2.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86447ad904c7fb335a790c9d7fe3d0d971dc523b8ccd1561a520de9a85302750" +checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" dependencies = [ "atty", "bitflags", @@ -901,7 +899,7 @@ dependencies = [ "once_cell", "strsim 0.10.0", "termcolor", - "textwrap 0.15.1", + "textwrap 0.16.0", ] [[package]] @@ -931,7 +929,7 @@ name = "cli" version = "0.1.0" dependencies = [ "anyhow", - "clap 3.2.22", + "clap 3.2.23", "core-foundation", "core-services", "dirs 3.0.2", @@ -949,7 +947,7 @@ dependencies = [ "async-tungstenite", "collections", "db", - "futures 0.3.24", + "futures 0.3.25", "gpui", "image", "isahc", @@ -965,11 +963,11 @@ dependencies = [ "sum_tree", "tempfile", "thiserror", - "time 0.3.15", + "time 0.3.17", "tiny_http", "url", "util", - "uuid 1.2.1", + "uuid 1.2.2", ] [[package]] @@ -981,9 +979,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.48" +version = "0.1.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ad8cef104ac57b68b89df3208164d228503abbdce70f6880ffa3d970e7443a" +checksum = "db34956e100b30725f2eb215f90d4871051239535632f84fea3bc92722c66b7c" dependencies = [ "cc", ] @@ -1038,7 +1036,7 @@ dependencies = [ "axum-extra", "base64", "call", - "clap 3.2.22", + "clap 3.2.23", "client", "collections", "ctor", @@ -1046,7 +1044,7 @@ dependencies = [ "env_logger", "envy", "fs", - "futures 0.3.24", + "futures 0.3.25", "git", "gpui", "hyper", @@ -1071,7 +1069,7 @@ dependencies = [ "sha-1 0.9.8", "sqlx", "theme", - "time 0.3.15", + "time 0.3.17", "tokio", "tokio-tungstenite", "toml", @@ -1095,7 +1093,7 @@ dependencies = [ "clock", "collections", "editor", - "futures 0.3.24", + "futures 0.3.25", "fuzzy", "gpui", "log", @@ -1144,11 +1142,11 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "1.2.4" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af4780a44ab5696ea9e28294517f1fffb421a83a25af521333c838635509db9c" +checksum = "bd7bef69dc86e3c610e4e7aed41035e2a7ed12e72dd7530f61327a6579a4390b" dependencies = [ - "cache-padded", + "crossbeam-utils 0.8.14", ] [[package]] @@ -1375,7 +1373,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.14", ] [[package]] @@ -1386,30 +1384,30 @@ checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.14", ] [[package]] name = "crossbeam-epoch" -version = "0.9.11" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" +checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" dependencies = [ "autocfg 1.1.0", "cfg-if 1.0.0", - "crossbeam-utils 0.8.12", - "memoffset", + "crossbeam-utils 0.8.14", + "memoffset 0.7.1", "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.3.6" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd42583b04998a5363558e5f9291ee5a5ff6b49944332103f251e7479a82aa7" +checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.14", ] [[package]] @@ -1425,9 +1423,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" +checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" dependencies = [ "cfg-if 1.0.0", ] @@ -1454,9 +1452,9 @@ dependencies = [ [[package]] name = "ctor" -version = "0.1.23" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdffe87e1d521a10f9696f833fe502293ea446d7f256c06128293a4119bdf4cb" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ "quote", "syn", @@ -1479,9 +1477,9 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.56+curl-7.83.1" +version = "0.4.59+curl-7.86.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6093e169dd4de29e468fa649fbae11cdcd5551c81fe5bf1b0677adad7ef3d26f" +checksum = "6cfce34829f448b08f55b7db6d0009e23e2e86a34e8c2b366269bf5799b4a407" dependencies = [ "cc", "libc", @@ -1495,9 +1493,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f83d0ebf42c6eafb8d7c52f7e5f2d3003b89c7aa4fd2b79229209459a849af8" +checksum = "bdf07d07d6531bfcdbe9b8b739b104610c6508dcc4d63b410585faf338241daf" dependencies = [ "cc", "cxxbridge-flags", @@ -1507,9 +1505,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07d050484b55975889284352b0ffc2ecbda25c0c55978017c132b29ba0818a86" +checksum = "d2eb5b96ecdc99f72657332953d4d9c50135af1bac34277801cc3937906ebd39" dependencies = [ "cc", "codespan-reporting", @@ -1522,15 +1520,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d2199b00553eda8012dfec8d3b1c75fce747cf27c169a270b3b99e3448ab78" +checksum = "ac040a39517fd1674e0f32177648334b0f4074625b5588a64519804ba0553b12" [[package]] name = "cxxbridge-macro" -version = "1.0.79" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb67a6de1f602736dd7eaead0080cf3435df806c61b24b13328db128c58868f" +checksum = "1362b0ddcfc4eb0a1f57b68bd77dd99f0e826958a96abd0ae9bd092e114ffed6" dependencies = [ "proc-macro2", "quote", @@ -1553,14 +1551,13 @@ dependencies = [ "anyhow", "async-trait", "collections", + "env_logger", "gpui", + "indoc", "lazy_static", "log", "parking_lot 0.11.2", - "rusqlite", - "rusqlite_migration", - "serde", - "serde_rusqlite", + "sqlez", "tempdir", ] @@ -1576,12 +1573,13 @@ dependencies = [ [[package]] name = "dhat" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0684eaa19a59be283a6f99369917b679bd4d1d06604b2eb2e2f87b4bbd67668d" +checksum = "4f2aaf837aaf456f6706cb46386ba8dffd4013a757e36f4ea05c20dd46b209a3" dependencies = [ "backtrace", "lazy_static", + "mintex", "parking_lot 0.12.1", "rustc-hash", "serde", @@ -1621,9 +1619,9 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" +checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer 0.10.3", "crypto-common", @@ -1740,7 +1738,7 @@ dependencies = [ "ctor", "drag_and_drop", "env_logger", - "futures 0.3.24", + "futures 0.3.25", "fuzzy", "git", "gpui", @@ -1790,9 +1788,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.9.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c90bf5f19754d10198ccb95b70664fc925bd1fc090a0fd9a6ebc54acc8cd6272" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" dependencies = [ "atty", "humantime", @@ -1881,12 +1879,6 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" -[[package]] -name = "fallible-streaming-iterator" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" - [[package]] name = "fastrand" version = "1.8.0" @@ -1934,12 +1926,12 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" +checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ "crc32fast", - "miniz_oxide 0.5.4", + "miniz_oxide 0.6.2", ] [[package]] @@ -2060,7 +2052,7 @@ dependencies = [ "async-trait", "collections", "fsevent", - "futures 0.3.24", + "futures 0.3.25", "git2", "gpui", "lazy_static", @@ -2137,9 +2129,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f21eda599937fba36daeb58a22e8f5cee2d14c4a17b5b7739c7c8e5e3b8230c" +checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" dependencies = [ "futures-channel", "futures-core", @@ -2152,9 +2144,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bdd20c28fadd505d0fd6712cdfcb0d4b5648baf45faef7f852afb2399bb050" +checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" dependencies = [ "futures-core", "futures-sink", @@ -2162,15 +2154,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e5aa3de05362c3fb88de6531e6296e85cde7739cccad4b9dfeeb7f6ebce56bf" +checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" [[package]] name = "futures-executor" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ff63c23854bee61b6e9cd331d523909f238fc7636290b96826e9cfa5faa00ab" +checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" dependencies = [ "futures-core", "futures-task", @@ -2179,9 +2171,9 @@ dependencies = [ [[package]] name = "futures-intrusive" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62007592ac46aa7c2b6416f7deb9a8a8f63a01e0f1d6e1787d5630170db2b63e" +checksum = "a604f7a68fbf8103337523b1fadc8ade7361ee3f112f7c680ad179651616aed5" dependencies = [ "futures-core", "lock_api", @@ -2190,9 +2182,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbf4d2a7a308fd4578637c0b17c7e1c7ba127b8f6ba00b29f717e9655d85eb68" +checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" [[package]] name = "futures-lite" @@ -2211,9 +2203,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cd15d1c7456c04dbdf7e88bcd69760d74f3a798d6444e16974b505b0e62f17" +checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" dependencies = [ "proc-macro2", "quote", @@ -2222,21 +2214,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b20ba5a92e727ba30e72834706623d94ac93a725410b6a6b6fbc1b07f7ba56" +checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" [[package]] name = "futures-task" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6508c467c73851293f390476d4491cf4d227dbabcd4170f3bb6044959b294f1" +checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" [[package]] name = "futures-util" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44fb6cb1be61cc1d2e43b262516aafcf63b241cffdb1d3fa115f91d9c7b09c90" +checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" dependencies = [ "futures 0.1.31", "futures-channel", @@ -2292,9 +2284,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if 1.0.0", "libc", @@ -2330,7 +2322,7 @@ dependencies = [ "async-trait", "clock", "collections", - "futures 0.3.24", + "futures 0.3.25", "git2", "lazy_static", "log", @@ -2408,7 +2400,7 @@ dependencies = [ "etagere", "font-kit", "foreign-types", - "futures 0.3.24", + "futures 0.3.25", "gpui_macros", "image", "itertools", @@ -2434,7 +2426,7 @@ dependencies = [ "smallvec", "smol", "sum_tree", - "time 0.3.15", + "time 0.3.17", "tiny-skia", "tree-sitter", "usvg", @@ -2453,11 +2445,11 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca32592cf21ac7ccab1825cd87f6c9b3d9022c44d086172ed0966bec8af30be" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "fnv", "futures-core", "futures-sink", @@ -2505,7 +2497,7 @@ checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ "base64", "bitflags", - "bytes 1.2.1", + "bytes 1.3.0", "headers-core", "http", "httpdate", @@ -2589,7 +2581,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -2598,7 +2590,7 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "fnv", "itoa", ] @@ -2609,7 +2601,7 @@ version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "http", "pin-project-lite 0.2.9", ] @@ -2640,11 +2632,11 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.20" +version = "0.14.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "futures-channel", "futures-core", "futures-util", @@ -2680,7 +2672,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "hyper", "native-tls", "tokio", @@ -2689,9 +2681,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.51" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5a6ef98976b22b3b7f2f3a806f858cb862044cfa66805aa3ad84cb3d3b785ed" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2727,7 +2719,7 @@ version = "0.4.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "713f1b139373f96a2e0ce3ac931cd01ee973c3c5dd7c40c0c2efe96ad2b6751d" dependencies = [ - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.14", "globset", "lazy_static", "log", @@ -2760,9 +2752,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.1" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg 1.1.0", "hashbrown 0.12.3", @@ -2834,9 +2826,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.5.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" +checksum = "f88c5561171189e69df9d98bcf18fd5f9558300f7ea7b801eb8a0fd748bd8745" [[package]] name = "is-terminal" @@ -2858,7 +2850,7 @@ checksum = "334e04b4d781f436dc315cb1e7515bd96826426345d498149e4bde36b67f8ee9" dependencies = [ "async-channel", "castaway", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.14", "curl", "curl-sys", "encoding_rs", @@ -2957,7 +2949,7 @@ checksum = "6204285f77fe7d9784db3fdc449ecce1a0114927a51d5a41c4c7a292011c015f" dependencies = [ "base64", "crypto-common", - "digest 0.10.5", + "digest 0.10.6", "hmac 0.12.1", "serde", "serde_json", @@ -2996,7 +2988,7 @@ dependencies = [ "ctor", "env_logger", "fs", - "futures 0.3.24", + "futures 0.3.25", "fuzzy", "git", "gpui", @@ -3053,9 +3045,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.135" +version = "0.2.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68783febc7782c6c5cb401fbda4de5a9898be1762314da0bb2c10ced61f18b0c" +checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8" [[package]] name = "libgit2-sys" @@ -3071,9 +3063,9 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ "cfg-if 1.0.0", "winapi 0.3.9", @@ -3081,9 +3073,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "292a948cd991e376cf75541fe5b97a1081d713c618b4f1b9500f8844e49eb565" +checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" [[package]] name = "libnghttp2-sys" @@ -3097,9 +3089,9 @@ dependencies = [ [[package]] name = "libsqlite3-sys" -version = "0.25.1" +version = "0.25.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f0455f2c1bc9a7caa792907026e469c1d91761fb0ea37cbb16427c77280cf35" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" dependencies = [ "cc", "pkg-config", @@ -3167,13 +3159,13 @@ dependencies = [ "async-trait", "block", "byteorder", - "bytes 1.2.1", + "bytes 1.3.0", "cocoa", "collections", "core-foundation", "core-graphics", "foreign-types", - "futures 0.3.24", + "futures 0.3.25", "gpui", "hmac 0.12.1", "jwt", @@ -3197,7 +3189,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "futures 0.3.24", + "futures 0.3.25", "hmac 0.12.1", "jwt", "log", @@ -3239,7 +3231,7 @@ dependencies = [ "collections", "ctor", "env_logger", - "futures 0.3.24", + "futures 0.3.25", "gpui", "log", "lsp-types", @@ -3322,7 +3314,7 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6365506850d44bff6e2fbcb5176cf63650e48bd45ef2fe2665ae1570e0f4b9ca" dependencies = [ - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -3332,7 +3324,7 @@ dependencies = [ "anyhow", "bindgen", "block", - "bytes 1.2.1", + "bytes 1.3.0", "core-foundation", "foreign-types", "metal", @@ -3372,6 +3364,15 @@ dependencies = [ "autocfg 1.1.0", ] +[[package]] +name = "memoffset" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +dependencies = [ + "autocfg 1.1.0", +] + [[package]] name = "menu" version = "0.1.0" @@ -3433,6 +3434,25 @@ dependencies = [ "adler", ] +[[package]] +name = "miniz_oxide" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b275950c28b37e794e8c55d88aeb5e139d0ce23fdbbeda68f8d7174abdf9e8fa" +dependencies = [ + "adler", +] + +[[package]] +name = "mintex" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd7c5ba1c3b5a23418d7bbf98c71c3d4946a0125002129231da8d6b723d559cb" +dependencies = [ + "once_cell", + "sys-info", +] + [[package]] name = "mio" version = "0.6.23" @@ -3454,14 +3474,14 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" +checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" dependencies = [ "libc", "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.36.1", + "windows-sys 0.42.0", ] [[package]] @@ -3544,9 +3564,9 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd7e2f3618557f980e0b17e8856252eee3c97fa12c54dff0ca290fb6266ca4a9" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", @@ -3562,9 +3582,9 @@ dependencies = [ [[package]] name = "net2" -version = "0.2.37" +version = "0.2.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" +checksum = "74d0df99cfcd2530b2e694f6e17e7f37b8e26bb23983ac530c0c97408837c631" dependencies = [ "cfg-if 0.1.10", "libc", @@ -3573,14 +3593,14 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" +checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ "bitflags", "cfg-if 1.0.0", "libc", - "memoffset", + "memoffset 0.6.5", ] [[package]] @@ -3685,30 +3705,21 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.1" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" +checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" dependencies = [ "hermit-abi 0.1.19", "libc", ] -[[package]] -name = "num_threads" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" -dependencies = [ - "libc", -] - [[package]] name = "nvim-rs" version = "0.5.0" source = "git+https://github.com/KillTheMule/nvim-rs?branch=master#d701c2790dcb2579f8f4d7003ba30e2100a7d25b" dependencies = [ "async-trait", - "futures 0.3.24", + "futures 0.3.25", "log", "parity-tokio-ipc", "rmp", @@ -3759,9 +3770,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" [[package]] name = "opaque-debug" @@ -3771,9 +3782,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.42" +version = "0.10.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" +checksum = "020433887e44c27ff16365eaa2d380547a94544ad509aff6eb5b6e3e0b27b376" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -3803,9 +3814,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.76" +version = "0.9.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5230151e44c0f05157effb743e8d517472843121cf9243e8b81393edb5acd9ce" +checksum = "07d5c8cb6e57b3a3612064d7b18b117912b4ce70955c2504d4b741c9e244b132" dependencies = [ "autocfg 1.1.0", "cc", @@ -3825,9 +3836,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.3.0" +version = "6.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" +checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" [[package]] name = "outline" @@ -3858,7 +3869,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9981e32fb75e004cc148f5fb70342f393830e0a4aa62e3cc93b50976218d42b6" dependencies = [ - "futures 0.3.24", + "futures 0.3.25", "libc", "log", "rand 0.7.3", @@ -3890,7 +3901,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.4", + "parking_lot_core 0.9.5", ] [[package]] @@ -3909,9 +3920,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" +checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" dependencies = [ "cfg-if 1.0.0", "libc", @@ -3999,9 +4010,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.4.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc7bc69c062e492337d74d59b120c274fd3d261b6bf6d3207d499b4b379c41a" +checksum = "cc8bed3549e0f9b0a2a78bf7c0018237a2cdf085eecbbc048e52612438e4e9d0" dependencies = [ "thiserror", "ucd-trie", @@ -4080,9 +4091,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "plist" @@ -4094,7 +4105,7 @@ dependencies = [ "indexmap", "line-wrap", "serde", - "time 0.3.15", + "time 0.3.17", "xml-rs", ] @@ -4147,16 +4158,16 @@ dependencies = [ [[package]] name = "polling" -version = "2.3.0" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "899b00b9c8ab553c743b3e11e87c5c7d423b2a2de229ba95b24a756344748011" +checksum = "166ca89eb77fd403230b9c156612965a81e094ec6ec3aa13663d4c8b113fa748" dependencies = [ "autocfg 1.1.0", "cfg-if 1.0.0", "libc", "log", "wepoll-ffi", - "winapi 0.3.9", + "windows-sys 0.42.0", ] [[package]] @@ -4173,7 +4184,7 @@ checksum = "a63d25391d04a097954b76aba742b6b5b74f213dfe3dbaeeb36e8ddc1c657f0b" dependencies = [ "atomic", "crossbeam-queue", - "futures 0.3.24", + "futures 0.3.25", "log", "pin-project", "pollster", @@ -4183,9 +4194,9 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro-error" @@ -4244,7 +4255,7 @@ dependencies = [ "db", "fs", "fsevent", - "futures 0.3.24", + "futures 0.3.25", "fuzzy", "git", "gpui", @@ -4281,7 +4292,7 @@ dependencies = [ "context_menu", "drag_and_drop", "editor", - "futures 0.3.24", + "futures 0.3.25", "gpui", "menu", "postage", @@ -4300,7 +4311,7 @@ version = "0.1.0" dependencies = [ "anyhow", "editor", - "futures 0.3.24", + "futures 0.3.25", "fuzzy", "gpui", "language", @@ -4318,9 +4329,9 @@ dependencies = [ [[package]] name = "prometheus" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45c8babc29389186697fe5a2a4859d697825496b83db5d0b65271cdc0488e88c" +checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" dependencies = [ "cfg-if 1.0.0", "fnv", @@ -4337,7 +4348,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de5e2533f59d08fcf364fd374ebda0692a70bd6d7e66ef97f306f45c6c5d8020" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "prost-derive 0.8.0", ] @@ -4347,7 +4358,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444879275cb4fd84958b1a1d5420d15e6fcf7c235fe47f053c9c2a80aceb6001" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "prost-derive 0.9.0", ] @@ -4357,7 +4368,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62941722fb675d463659e49c4f3fe1fe792ff24fe5bbaa9c08cd3b98a1c354f5" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "heck 0.3.3", "itertools", "lazy_static", @@ -4403,7 +4414,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "603bbd6394701d13f3f25aada59c7de9d35a6a5887cfc156181234a44002771b" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "prost 0.8.0", ] @@ -4413,7 +4424,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "534b7a0e836e3c482d2693070f982e39e7611da9695d4d1f5a4b186b51faef0a" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "prost 0.9.0", ] @@ -4539,7 +4550,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", ] [[package]] @@ -4553,11 +4564,10 @@ dependencies = [ [[package]] name = "rayon" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" +checksum = "1e060280438193c554f654141c9ea9417886713b7acd75974c85b18a69a88e0b" dependencies = [ - "autocfg 1.1.0", "crossbeam-deque", "either", "rayon-core", @@ -4565,13 +4575,13 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.9.3" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" +checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3" dependencies = [ "crossbeam-channel 0.5.6", "crossbeam-deque", - "crossbeam-utils 0.8.12", + "crossbeam-utils 0.8.14", "num_cpus", ] @@ -4605,7 +4615,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", "redox_syscall", "thiserror", ] @@ -4624,9 +4634,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" dependencies = [ "aho-corasick", "memchr", @@ -4644,9 +4654,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] name = "region" @@ -4671,12 +4681,12 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.12" +version = "0.11.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "431949c384f4e2ae07605ccaa56d1d9d2ecdb5cadd4f9577ccfab29f2e5149fc" +checksum = "68cc60575865c7831548863cc02356512e3f1dc2f3f82cb837d7fc4cc8f3c97c" dependencies = [ "base64", - "bytes 1.2.1", + "bytes 1.3.0", "encoding_rs", "futures-core", "futures-util", @@ -4802,7 +4812,7 @@ dependencies = [ "collections", "ctor", "env_logger", - "futures 0.3.24", + "futures 0.3.25", "gpui", "parking_lot 0.11.2", "prost 0.8.0", @@ -4838,35 +4848,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rusqlite" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" -dependencies = [ - "bitflags", - "fallible-iterator", - "fallible-streaming-iterator", - "hashlink", - "libsqlite3-sys", - "serde_json", - "smallvec", -] - -[[package]] -name = "rusqlite_migration" -version = "1.0.0" -source = "git+https://github.com/cljoly/rusqlite_migration?rev=c433555d7c1b41b103426e35756eb3144d0ebbc6#c433555d7c1b41b103426e35756eb3144d0ebbc6" -dependencies = [ - "log", - "rusqlite", -] - [[package]] name = "rust-embed" -version = "6.4.1" +version = "6.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e26934cd67a1da1165efe61cba4047cc1b4a526019da609fcce13a1000afb5fa" +checksum = "283ffe2f866869428c92e0d61c2f35dfb4355293cdfdc48f49e895c15f1333d1" dependencies = [ "rust-embed-impl", "rust-embed-utils", @@ -4875,9 +4861,9 @@ dependencies = [ [[package]] name = "rust-embed-impl" -version = "6.3.0" +version = "6.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e35d7b402e273544cc08e0824aa3404333fab8a90ac43589d3d5b72f4b346e12" +checksum = "31ab23d42d71fb9be1b643fe6765d292c5e14d46912d13f3ae2815ca048ea04d" dependencies = [ "proc-macro2", "quote", @@ -5187,18 +5173,18 @@ checksum = "5a9f47faea3cad316faa914d013d24f471cd90bfca1a0c70f05a3f42c6441e99" [[package]] name = "serde" -version = "1.0.145" +version = "1.0.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b" +checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.145" +version = "1.0.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c" +checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c" dependencies = [ "proc-macro2", "quote", @@ -5227,9 +5213,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.86" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41feea4228a6f1cd09ec7a3593a682276702cd67b5273544757dae23c096f074" +checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" dependencies = [ "indexmap", "itoa", @@ -5257,16 +5243,6 @@ dependencies = [ "syn", ] -[[package]] -name = "serde_rusqlite" -version = "0.31.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "538b51f10ee271375cbd9caa04fa6e3e50af431a21db97caae48da92a074244a" -dependencies = [ - "rusqlite", - "serde", -] - [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -5320,7 +5296,7 @@ dependencies = [ "assets", "collections", "fs", - "futures 0.3.24", + "futures 0.3.25", "gpui", "json_comments", "postage", @@ -5351,13 +5327,13 @@ dependencies = [ [[package]] name = "sha-1" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -5368,7 +5344,7 @@ checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -5392,7 +5368,7 @@ checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if 1.0.0", "cpufeatures", - "digest 0.10.5", + "digest 0.10.6", ] [[package]] @@ -5528,9 +5504,9 @@ checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "smol" -version = "1.2.5" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85cf3b5351f3e783c1d79ab5fc604eeed8b8ae9abd36b166e8b87a089efd85e4" +checksum = "13f2b548cd8447f8de0fdf1c592929f70f4fc7039a05e47404b0d096ec6987a1" dependencies = [ "async-channel", "async-executor", @@ -5541,7 +5517,6 @@ dependencies = [ "async-process", "blocking", "futures-lite", - "once_cell", ] [[package]] @@ -5593,6 +5568,17 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be6c3f39c37a4283ee4b43d1311c828f2e1fb0541e76ea0cb1a2abd9ef2f5b3b" +[[package]] +name = "sqlez" +version = "0.1.0" +source = "git+https://github.com/Kethku/sqlez#10a78dbe535a0c270b6b4bc469fbbffe9fc8c36f" +dependencies = [ + "anyhow", + "indoc", + "libsqlite3-sys", + "thread_local", +] + [[package]] name = "sqlformat" version = "0.2.0" @@ -5623,7 +5609,7 @@ dependencies = [ "base64", "bitflags", "byteorder", - "bytes 1.2.1", + "bytes 1.3.0", "crc", "crossbeam-queue", "dirs 4.0.0", @@ -5662,10 +5648,10 @@ dependencies = [ "sqlx-rt", "stringprep", "thiserror", - "time 0.3.15", + "time 0.3.17", "tokio-stream", "url", - "uuid 1.2.1", + "uuid 1.2.2", "webpki-roots 0.22.5", "whoami", ] @@ -5787,9 +5773,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.102" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1" +checksum = "60b9b43d45702de4c839cb9b51d9f529c5dd26a4aff255b42b1ebc03e88ee908" dependencies = [ "proc-macro2", "quote", @@ -5814,6 +5800,16 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "sys-info" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b3a0d0aba8bf96a0e1ddfdc352fc53b3df7f39318c71854910c3c4b024ae52c" +dependencies = [ + "cc", + "libc", +] + [[package]] name = "system-interface" version = "0.20.0" @@ -5832,9 +5828,9 @@ dependencies = [ [[package]] name = "target-lexicon" -version = "0.12.4" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02424087780c9b71cc96799eaeddff35af2bc513278cda5c99fc1f5d026d3c1" +checksum = "9410d0f6853b1d94f0e519fb95df60f29d2c1eff2d921ffdf01a4c8a3b54f12d" [[package]] name = "tempdir" @@ -5879,7 +5875,7 @@ dependencies = [ "context_menu", "dirs 4.0.0", "editor", - "futures 0.3.24", + "futures 0.3.25", "gpui", "itertools", "language", @@ -5936,9 +5932,9 @@ dependencies = [ [[package]] name = "textwrap" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "949517c0cf1bf4ee812e2e07e08ab448e3ae0d23472aee8a06c985f0c8815b16" +checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "theme" @@ -6031,9 +6027,9 @@ dependencies = [ [[package]] name = "time" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "1b797afad3f312d1c66a56d11d0316f916356d11bd158fbc6ca6389ff6bf805a" dependencies = [ "libc", "wasi 0.10.0+wasi-snapshot-preview1", @@ -6042,22 +6038,30 @@ dependencies = [ [[package]] name = "time" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d634a985c4d4238ec39cacaed2e7ae552fbd3c476b552c1deac3021b7d7eaf0c" +checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ "itoa", - "libc", - "num_threads", "serde", + "time-core", "time-macros", ] +[[package]] +name = "time-core" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd" + [[package]] name = "time-macros" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" +checksum = "d967f99f534ca7e495c575c62638eebc2898a8c84c119b89e250477bc4ba16b2" +dependencies = [ + "time-core", +] [[package]] name = "tiny-skia" @@ -6103,15 +6107,15 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.21.2" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" +checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3" dependencies = [ "autocfg 1.1.0", - "bytes 1.2.1", + "bytes 1.3.0", "libc", "memchr", - "mio 0.8.4", + "mio 0.8.5", "num_cpus", "parking_lot 0.12.1", "pin-project-lite 0.2.9", @@ -6144,9 +6148,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.8.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", @@ -6203,7 +6207,7 @@ version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "futures-core", "futures-sink", "log", @@ -6217,7 +6221,7 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ - "bytes 1.2.1", + "bytes 1.3.0", "futures-core", "futures-io", "futures-sink", @@ -6244,7 +6248,7 @@ dependencies = [ "async-stream", "async-trait", "base64", - "bytes 1.2.1", + "bytes 1.3.0", "futures-core", "futures-util", "h2", @@ -6288,12 +6292,12 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" +checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" dependencies = [ "bitflags", - "bytes 1.2.1", + "bytes 1.3.0", "futures-core", "futures-util", "http", @@ -6592,7 +6596,7 @@ checksum = "6ad3713a14ae247f22a728a0456a545df14acf3867f905adff84be99e23b3ad1" dependencies = [ "base64", "byteorder", - "bytes 1.2.1", + "bytes 1.3.0", "http", "httparse", "log", @@ -6611,12 +6615,12 @@ checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ "base64", "byteorder", - "bytes 1.2.1", + "bytes 1.3.0", "http", "httparse", "log", "rand 0.8.5", - "sha-1 0.10.0", + "sha-1 0.10.1", "thiserror", "url", "utf-8", @@ -6787,7 +6791,7 @@ version = "0.1.0" dependencies = [ "anyhow", "backtrace", - "futures 0.3.24", + "futures 0.3.25", "git2", "lazy_static", "log", @@ -6802,16 +6806,16 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", ] [[package]] name = "uuid" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" +checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" dependencies = [ - "getrandom 0.2.7", + "getrandom 0.2.8", ] [[package]] @@ -7055,9 +7059,9 @@ checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "wasm-encoder" -version = "0.18.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c64ac98d5d61192cc45c701b7e4bd0b9aff91e2edfc7a088406cfe2288581e2c" +checksum = "05632e0a66a6ed8cca593c24223aabd6262f256c3693ad9822c315285f010614" dependencies = [ "leb128", ] @@ -7231,7 +7235,7 @@ dependencies = [ "log", "mach", "memfd", - "memoffset", + "memoffset 0.6.5", "more-asserts", "rand 0.8.5", "region", @@ -7279,9 +7283,9 @@ dependencies = [ [[package]] name = "wast" -version = "47.0.1" +version = "50.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b98502f3978adea49551e801a6687678e6015317d7d9470a67fe813393f2a8" +checksum = "a2cbb59d4ac799842791fe7e806fa5dbbf6b5554d538e51cc8e176db6ff0ae34" dependencies = [ "leb128", "memchr", @@ -7291,11 +7295,11 @@ dependencies = [ [[package]] name = "wat" -version = "1.0.49" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aab4e20c60429fbba9670a6cae0fff9520046ba0aa3e6d0b1cd2653bea14898" +checksum = "584aaf7a1ecf4d383bbe1a25eeab0cbb8ff96acc6796707ff65cde48f4632f15" dependencies = [ - "wast 47.0.1", + "wast 50.0.0", ] [[package]] @@ -7621,7 +7625,7 @@ dependencies = [ "db", "drag_and_drop", "fs", - "futures 0.3.24", + "futures 0.3.25", "gpui", "language", "log", @@ -7705,7 +7709,7 @@ dependencies = [ "file_finder", "fs", "fsevent", - "futures 0.3.24", + "futures 0.3.25", "fuzzy", "go_to_line", "gpui", @@ -7783,9 +7787,9 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" dependencies = [ "proc-macro2", "quote", @@ -7814,9 +7818,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.1+zstd.1.5.2" +version = "2.0.4+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" +checksum = "4fa202f2ef00074143e219d15b62ffc317d17cc33909feac471c044087cad7b0" dependencies = [ "cc", "libc", diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index a2ac2a9fc528a355baebf8a255bd0973e8a74b98..5530caaa81f6b628245c1507c562f1285f614095 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -11,6 +11,7 @@ doctest = false test-support = [] [dependencies] +indoc = "1.0.4" collections = { path = "../collections" } gpui = { path = "../gpui" } anyhow = "1.0.57" @@ -18,10 +19,7 @@ async-trait = "0.1" lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" -rusqlite = { version = "0.28.0", features = ["bundled", "serde_json", "backup"] } -rusqlite_migration = { git = "https://github.com/cljoly/rusqlite_migration", rev = "c433555d7c1b41b103426e35756eb3144d0ebbc6" } -serde = { workspace = true } -serde_rusqlite = "0.31.0" +sqlez = { git = "https://github.com/Kethku/sqlez", ref = "c8c01fe6b82085bbfe81b2a9406718454a7839c4c" } [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 107bbffdf439491ef4bb61f814be71f83311b6bd..e5740c5edb99b694ccc8f4f82be8d10711e1e2ed 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -5,26 +5,25 @@ pub mod pane; pub mod workspace; use std::fs; -use std::path::{Path, PathBuf}; -use std::sync::Arc; +use std::ops::Deref; +use std::path::Path; use anyhow::Result; -use log::error; -use parking_lot::Mutex; -use rusqlite::{backup, Connection}; +use indoc::indoc; +use sqlez::connection::Connection; +use sqlez::thread_safe_connection::ThreadSafeConnection; -use migrations::MIGRATIONS; pub use workspace::*; #[derive(Clone)] -pub enum Db { - Real(Arc), - Null, -} +struct Db(ThreadSafeConnection); + +impl Deref for Db { + type Target = sqlez::connection::Connection; -pub struct RealDb { - connection: Mutex, - path: Option, + fn deref(&self) -> &Self::Target { + &self.0.deref() + } } impl Db { @@ -36,104 +35,44 @@ impl Db { .expect("Should be able to create the database directory"); let db_path = current_db_dir.join(Path::new("db.sqlite")); - Connection::open(db_path) - .map_err(Into::into) - .and_then(|connection| Self::initialize(connection)) - .map(|connection| { - Db::Real(Arc::new(RealDb { - connection, - path: Some(db_dir.to_path_buf()), - })) - }) - .unwrap_or_else(|e| { - error!( - "Connecting to file backed db failed. Reverting to null db. {}", - e - ); - Self::Null - }) - } - - fn initialize(mut conn: Connection) -> Result> { - MIGRATIONS.to_latest(&mut conn)?; - - conn.pragma_update(None, "journal_mode", "WAL")?; - conn.pragma_update(None, "synchronous", "NORMAL")?; - conn.pragma_update(None, "foreign_keys", true)?; - conn.pragma_update(None, "case_sensitive_like", true)?; - - Ok(Mutex::new(conn)) + Db( + ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) + .with_initialize_query(indoc! {" + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA foreign_keys=TRUE; + PRAGMA case_sensitive_like=TRUE; + "}), + ) } pub fn persisting(&self) -> bool { - self.real().and_then(|db| db.path.as_ref()).is_some() - } - - pub fn real(&self) -> Option<&RealDb> { - match self { - Db::Real(db) => Some(&db), - _ => None, - } + self.persistent() } /// Open a in memory database for testing and as a fallback. pub fn open_in_memory() -> Self { - Connection::open_in_memory() - .map_err(Into::into) - .and_then(|connection| Self::initialize(connection)) - .map(|connection| { - Db::Real(Arc::new(RealDb { - connection, - path: None, - })) - }) - .unwrap_or_else(|e| { - error!( - "Connecting to in memory db failed. Reverting to null db. {}", - e - ); - Self::Null - }) + Db( + ThreadSafeConnection::new("Zed DB", false).with_initialize_query(indoc! {" + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA foreign_keys=TRUE; + PRAGMA case_sensitive_like=TRUE; + "}), + ) } pub fn write_to>(&self, dest: P) -> Result<()> { - self.real() - .map(|db| { - if db.path.is_some() { - panic!("DB already exists"); - } - - let lock = db.connection.lock(); - let mut dst = Connection::open(dest)?; - let backup = backup::Backup::new(&lock, &mut dst)?; - backup.step(-1)?; - - Ok(()) - }) - .unwrap_or(Ok(())) + let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref()); + self.backup(&destination) } } impl Drop for Db { fn drop(&mut self) { - match self { - Db::Real(real_db) => { - let lock = real_db.connection.lock(); - - let _ = lock.pragma_update(None, "analysis_limit", "500"); - let _ = lock.pragma_update(None, "optimize", ""); - } - Db::Null => {} - } - } -} - -#[cfg(test)] -mod tests { - use crate::migrations::MIGRATIONS; - - #[test] - fn test_migrations() { - assert!(MIGRATIONS.validate().is_ok()); + self.exec(indoc! {" + PRAGMA analysis_limit=500; + PRAGMA optimize"}) + .ok(); } } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 2dc988a7e367499ca872ae71b41216dfdba35489..5237caa23c2b0bf243bafc0ef67a890f47ab0598 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,8 +1,5 @@ use anyhow::Result; -use rusqlite::{params, Connection, OptionalExtension}; -use serde::{Deserialize, Serialize}; - use std::{ ffi::OsStr, fmt::Debug, @@ -12,28 +9,34 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; +use anyhow::Result; +use indoc::indoc; +use sqlez::{connection::Connection, migrations::Migration}; + use crate::pane::SerializedDockPane; use super::Db; // If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging // you might want to update some of the parsing code as well, I've left the variations in but commented -// out -pub(crate) const WORKSPACE_M_1: &str = " -CREATE TABLE workspaces( - workspace_id INTEGER PRIMARY KEY, - last_opened_timestamp INTEGER NOT NULL -) STRICT; - -CREATE TABLE worktree_roots( - worktree_root BLOB NOT NULL, - workspace_id INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - PRIMARY KEY(worktree_root, workspace_id) -) STRICT; -"; - -#[derive(Debug, PartialEq, Eq, Copy, Clone, Default, Deserialize, Serialize)] +// out. This will panic if run on an existing db that has already been migrated +const WORKSPACES_MIGRATION: Migration = Migration::new( + "migrations", + &[indoc! {" + CREATE TABLE workspaces( + workspace_id INTEGER PRIMARY KEY, + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL + ) STRICT; + + CREATE TABLE worktree_roots( + worktree_root BLOB NOT NULL, + workspace_id INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + PRIMARY KEY(worktree_root, workspace_id) + ) STRICT;"}], +); + +#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] pub struct WorkspaceId(i64); impl WorkspaceId { @@ -77,19 +80,9 @@ impl Db { where P: AsRef + Debug, { - fn logic

( - connection: &mut Connection, - worktree_roots: &[P], - ) -> Result - where - P: AsRef + Debug, - { - let tx = connection.transaction()?; - - tx.execute( - "INSERT INTO workspaces(last_opened_timestamp) VALUES (?)", - [current_millis()?], - )?; + let result = (|| { + let tx = self.transaction()?; + tx.execute("INSERT INTO workspaces(last_opened_timestamp) VALUES" (?), [current_millis()?])?; let id = WorkspaceId(tx.last_insert_rowid()); @@ -101,22 +94,15 @@ impl Db { workspace_id: id, dock_pane: None, }) - } - - self.real() - .map(|db| { - let mut lock = db.connection.lock(); + })(); - // No need to waste the memory caching this, should happen rarely. - match logic(&mut lock, worktree_roots) { - Ok(serialized_workspace) => serialized_workspace, - Err(err) => { - log::error!("Failed to insert new workspace into DB: {}", err); - Default::default() - } - } - }) - .unwrap_or_default() + match result { + Ok(serialized_workspace) => serialized_workspace, + Err(err) => { + log::error!("Failed to insert new workspace into DB: {}", err); + Default::default() + } + } } fn workspace_id

(&self, worktree_roots: &[P]) -> Option From a4a1859dfca1eadbfe1f95e44917f9958fbd6f3e Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 1 Nov 2022 13:31:03 -0700 Subject: [PATCH 25/86] Added sqlez api --- Cargo.lock | 1 - crates/db/Cargo.toml | 3 ++- crates/sqlez | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) create mode 160000 crates/sqlez diff --git a/Cargo.lock b/Cargo.lock index e2165c09419189d3c78816e176c618c300ae112f..2fb859dca5177d6c5d6381397c6b02aad614fa39 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5571,7 +5571,6 @@ checksum = "be6c3f39c37a4283ee4b43d1311c828f2e1fb0541e76ea0cb1a2abd9ef2f5b3b" [[package]] name = "sqlez" version = "0.1.0" -source = "git+https://github.com/Kethku/sqlez#10a78dbe535a0c270b6b4bc469fbbffe9fc8c36f" dependencies = [ "anyhow", "indoc", diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 5530caaa81f6b628245c1507c562f1285f614095..fe0b21eaf4e819f14ba89c955f3f0cdef2cd3fb7 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -14,12 +14,13 @@ test-support = [] indoc = "1.0.4" collections = { path = "../collections" } gpui = { path = "../gpui" } +sqlez = { path = "../sqlez" } anyhow = "1.0.57" async-trait = "0.1" lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" -sqlez = { git = "https://github.com/Kethku/sqlez", ref = "c8c01fe6b82085bbfe81b2a9406718454a7839c4c" } + [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } diff --git a/crates/sqlez b/crates/sqlez new file mode 160000 index 0000000000000000000000000000000000000000..10a78dbe535a0c270b6b4bc469fbbffe9fc8c36f --- /dev/null +++ b/crates/sqlez @@ -0,0 +1 @@ +Subproject commit 10a78dbe535a0c270b6b4bc469fbbffe9fc8c36f From 395070cb921eef4b813d775f6af49de65db6f544 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 1 Nov 2022 13:32:46 -0700 Subject: [PATCH 26/86] remove submodule --- crates/sqlez | 1 - crates/sqlez/.gitignore | 2 + crates/sqlez/Cargo.lock | 150 +++++++++ crates/sqlez/Cargo.toml | 12 + crates/sqlez/src/bindable.rs | 209 +++++++++++++ crates/sqlez/src/connection.rs | 220 +++++++++++++ crates/sqlez/src/lib.rs | 6 + crates/sqlez/src/migrations.rs | 261 ++++++++++++++++ crates/sqlez/src/savepoint.rs | 110 +++++++ crates/sqlez/src/statement.rs | 342 +++++++++++++++++++++ crates/sqlez/src/thread_safe_connection.rs | 78 +++++ 11 files changed, 1390 insertions(+), 1 deletion(-) delete mode 160000 crates/sqlez create mode 100644 crates/sqlez/.gitignore create mode 100644 crates/sqlez/Cargo.lock create mode 100644 crates/sqlez/Cargo.toml create mode 100644 crates/sqlez/src/bindable.rs create mode 100644 crates/sqlez/src/connection.rs create mode 100644 crates/sqlez/src/lib.rs create mode 100644 crates/sqlez/src/migrations.rs create mode 100644 crates/sqlez/src/savepoint.rs create mode 100644 crates/sqlez/src/statement.rs create mode 100644 crates/sqlez/src/thread_safe_connection.rs diff --git a/crates/sqlez b/crates/sqlez deleted file mode 160000 index 10a78dbe535a0c270b6b4bc469fbbffe9fc8c36f..0000000000000000000000000000000000000000 --- a/crates/sqlez +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 10a78dbe535a0c270b6b4bc469fbbffe9fc8c36f diff --git a/crates/sqlez/.gitignore b/crates/sqlez/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..8130c3ab478700662bf4aee0bf9f0cd1dce1f283 --- /dev/null +++ b/crates/sqlez/.gitignore @@ -0,0 +1,2 @@ +debug/ +target/ diff --git a/crates/sqlez/Cargo.lock b/crates/sqlez/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..33348baed99d1fcc0b5910513f35a98ef9791914 --- /dev/null +++ b/crates/sqlez/Cargo.lock @@ -0,0 +1,150 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "anyhow" +version = "1.0.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" +dependencies = [ + "backtrace", +] + +[[package]] +name = "backtrace" +version = "0.3.66" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "cc" +version = "1.0.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "gimli" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" + +[[package]] +name = "indoc" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3" + +[[package]] +name = "libc" +version = "0.2.137" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" + +[[package]] +name = "libsqlite3-sys" +version = "0.25.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29f835d03d717946d28b1d1ed632eb6f0e24a299388ee623d0c23118d3e8a7fa" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "memchr" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" + +[[package]] +name = "miniz_oxide" +version = "0.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" +dependencies = [ + "adler", +] + +[[package]] +name = "object" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" + +[[package]] +name = "pkg-config" +version = "0.3.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" + +[[package]] +name = "rustc-demangle" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" + +[[package]] +name = "sqlez" +version = "0.1.0" +dependencies = [ + "anyhow", + "indoc", + "libsqlite3-sys", + "thread_local", +] + +[[package]] +name = "thread_local" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +dependencies = [ + "once_cell", +] + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" diff --git a/crates/sqlez/Cargo.toml b/crates/sqlez/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..cbb4504a04ef9871106333e9b365c7f236495445 --- /dev/null +++ b/crates/sqlez/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "sqlez" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = { version = "1.0.38", features = ["backtrace"] } +indoc = "1.0.7" +libsqlite3-sys = { version = "0.25.2", features = ["bundled"] } +thread_local = "1.1.4" \ No newline at end of file diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs new file mode 100644 index 0000000000000000000000000000000000000000..ca3ba401cfa34a707ea126b6d9750924ce86596c --- /dev/null +++ b/crates/sqlez/src/bindable.rs @@ -0,0 +1,209 @@ +use anyhow::Result; + +use crate::statement::{SqlType, Statement}; + +pub trait Bind { + fn bind(&self, statement: &Statement, start_index: i32) -> Result; +} + +pub trait Column: Sized { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)>; +} + +impl Bind for &[u8] { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_blob(start_index, self)?; + Ok(start_index + 1) + } +} + +impl Bind for Vec { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_blob(start_index, self)?; + Ok(start_index + 1) + } +} + +impl Column for Vec { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let result = statement.column_blob(start_index)?; + Ok((Vec::from(result), start_index + 1)) + } +} + +impl Bind for f64 { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_double(start_index, *self)?; + Ok(start_index + 1) + } +} + +impl Column for f64 { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let result = statement.column_double(start_index)?; + Ok((result, start_index + 1)) + } +} + +impl Bind for i32 { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_int(start_index, *self)?; + Ok(start_index + 1) + } +} + +impl Column for i32 { + fn column<'a>(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let result = statement.column_int(start_index)?; + Ok((result, start_index + 1)) + } +} + +impl Bind for i64 { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_int64(start_index, *self)?; + Ok(start_index + 1) + } +} + +impl Column for i64 { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let result = statement.column_int64(start_index)?; + Ok((result, start_index + 1)) + } +} + +impl Bind for usize { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + (*self as i64).bind(statement, start_index) + } +} + +impl Column for usize { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let result = statement.column_int64(start_index)?; + Ok((result as usize, start_index + 1)) + } +} + +impl Bind for () { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_null(start_index)?; + Ok(start_index + 1) + } +} + +impl Bind for &str { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_text(start_index, self)?; + Ok(start_index + 1) + } +} + +impl Bind for String { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_text(start_index, self)?; + Ok(start_index + 1) + } +} + +impl Column for String { + fn column<'a>(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let result = statement.column_text(start_index)?; + Ok((result.to_owned(), start_index + 1)) + } +} + +impl Bind for (T1, T2) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + self.1.bind(statement, next_index) + } +} + +impl Column for (T1, T2) { + fn column<'a>(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + Ok(((first, second), next_index)) + } +} + +impl Bind for (T1, T2, T3) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + let next_index = self.1.bind(statement, next_index)?; + self.2.bind(statement, next_index) + } +} + +impl Column for (T1, T2, T3) { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + let (third, next_index) = T3::column(statement, next_index)?; + Ok(((first, second, third), next_index)) + } +} + +impl Bind for (T1, T2, T3, T4) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + let next_index = self.1.bind(statement, next_index)?; + let next_index = self.2.bind(statement, next_index)?; + self.3.bind(statement, next_index) + } +} + +impl Column for (T1, T2, T3, T4) { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + let (third, next_index) = T3::column(statement, next_index)?; + let (forth, next_index) = T4::column(statement, next_index)?; + Ok(((first, second, third, forth), next_index)) + } +} + +impl Bind for Option { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + if let Some(this) = self { + this.bind(statement, start_index) + } else { + statement.bind_null(start_index)?; + Ok(start_index + 1) + } + } +} + +impl Column for Option { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + if let SqlType::Null = statement.column_type(start_index)? { + Ok((None, start_index + 1)) + } else { + T::column(statement, start_index).map(|(result, next_index)| (Some(result), next_index)) + } + } +} + +impl Bind for [T; COUNT] { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let mut current_index = start_index; + for binding in self { + current_index = binding.bind(statement, current_index)? + } + + Ok(current_index) + } +} + +impl Column for [T; COUNT] { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let mut array = [Default::default(); COUNT]; + let mut current_index = start_index; + for i in 0..COUNT { + (array[i], current_index) = T::column(statement, current_index)?; + } + Ok((array, current_index)) + } +} diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs new file mode 100644 index 0000000000000000000000000000000000000000..81bb9dfe78b27f5745b4e5b528c9910b1a027c22 --- /dev/null +++ b/crates/sqlez/src/connection.rs @@ -0,0 +1,220 @@ +use std::{ + ffi::{CStr, CString}, + marker::PhantomData, +}; + +use anyhow::{anyhow, Result}; +use libsqlite3_sys::*; + +use crate::statement::Statement; + +pub struct Connection { + pub(crate) sqlite3: *mut sqlite3, + persistent: bool, + phantom: PhantomData, +} +unsafe impl Send for Connection {} + +impl Connection { + fn open(uri: &str, persistent: bool) -> Result { + let mut connection = Self { + sqlite3: 0 as *mut _, + persistent, + phantom: PhantomData, + }; + + let flags = SQLITE_OPEN_CREATE | SQLITE_OPEN_NOMUTEX | SQLITE_OPEN_READWRITE; + unsafe { + sqlite3_open_v2( + CString::new(uri)?.as_ptr(), + &mut connection.sqlite3, + flags, + 0 as *const _, + ); + + connection.last_error()?; + } + + Ok(connection) + } + + /// Attempts to open the database at uri. If it fails, a shared memory db will be opened + /// instead. + pub fn open_file(uri: &str) -> Self { + Self::open(uri, true).unwrap_or_else(|_| Self::open_memory(uri)) + } + + pub fn open_memory(uri: &str) -> Self { + let in_memory_path = format!("file:{}?mode=memory&cache=shared", uri); + Self::open(&in_memory_path, false).expect("Could not create fallback in memory db") + } + + pub fn persistent(&self) -> bool { + self.persistent + } + + pub fn exec(&self, query: impl AsRef) -> Result<()> { + unsafe { + sqlite3_exec( + self.sqlite3, + CString::new(query.as_ref())?.as_ptr(), + None, + 0 as *mut _, + 0 as *mut _, + ); + self.last_error()?; + } + Ok(()) + } + + pub fn prepare>(&self, query: T) -> Result { + Statement::prepare(&self, query) + } + + pub fn backup_main(&self, destination: &Connection) -> Result<()> { + unsafe { + let backup = sqlite3_backup_init( + destination.sqlite3, + CString::new("main")?.as_ptr(), + self.sqlite3, + CString::new("main")?.as_ptr(), + ); + sqlite3_backup_step(backup, -1); + sqlite3_backup_finish(backup); + destination.last_error() + } + } + + pub(crate) fn last_error(&self) -> Result<()> { + const NON_ERROR_CODES: &[i32] = &[SQLITE_OK, SQLITE_ROW]; + unsafe { + let code = sqlite3_errcode(self.sqlite3); + if NON_ERROR_CODES.contains(&code) { + return Ok(()); + } + + let message = sqlite3_errmsg(self.sqlite3); + let message = if message.is_null() { + None + } else { + Some( + String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) + .into_owned(), + ) + }; + + Err(anyhow!( + "Sqlite call failed with code {} and message: {:?}", + code as isize, + message + )) + } + } +} + +impl Drop for Connection { + fn drop(&mut self) { + unsafe { sqlite3_close(self.sqlite3) }; + } +} + +#[cfg(test)] +mod test { + use anyhow::Result; + use indoc::indoc; + + use crate::connection::Connection; + + #[test] + fn string_round_trips() -> Result<()> { + let connection = Connection::open_memory("string_round_trips"); + connection + .exec(indoc! {" + CREATE TABLE text ( + text TEXT + );"}) + .unwrap(); + + let text = "Some test text"; + + connection + .prepare("INSERT INTO text (text) VALUES (?);") + .unwrap() + .bound(text) + .unwrap() + .run() + .unwrap(); + + assert_eq!( + &connection + .prepare("SELECT text FROM text;") + .unwrap() + .row::() + .unwrap(), + text + ); + + Ok(()) + } + + #[test] + fn tuple_round_trips() { + let connection = Connection::open_memory("tuple_round_trips"); + connection + .exec(indoc! {" + CREATE TABLE test ( + text TEXT, + integer INTEGER, + blob BLOB + );"}) + .unwrap(); + + let tuple1 = ("test".to_string(), 64, vec![0, 1, 2, 4, 8, 16, 32, 64]); + let tuple2 = ("test2".to_string(), 32, vec![64, 32, 16, 8, 4, 2, 1, 0]); + + let mut insert = connection + .prepare("INSERT INTO test (text, integer, blob) VALUES (?, ?, ?)") + .unwrap(); + + insert.bound(tuple1.clone()).unwrap().run().unwrap(); + insert.bound(tuple2.clone()).unwrap().run().unwrap(); + + assert_eq!( + connection + .prepare("SELECT * FROM test") + .unwrap() + .rows::<(String, usize, Vec)>() + .unwrap(), + vec![tuple1, tuple2] + ); + } + + #[test] + fn backup_works() { + let connection1 = Connection::open_memory("backup_works"); + connection1 + .exec(indoc! {" + CREATE TABLE blobs ( + data BLOB + );"}) + .unwrap(); + let blob = &[0, 1, 2, 4, 8, 16, 32, 64]; + let mut write = connection1 + .prepare("INSERT INTO blobs (data) VALUES (?);") + .unwrap(); + write.bind_blob(1, blob).unwrap(); + write.run().unwrap(); + + // Backup connection1 to connection2 + let connection2 = Connection::open_memory("backup_works_other"); + connection1.backup_main(&connection2).unwrap(); + + // Delete the added blob and verify its deleted on the other side + let read_blobs = connection1 + .prepare("SELECT * FROM blobs;") + .unwrap() + .rows::>() + .unwrap(); + assert_eq!(read_blobs, vec![blob]); + } +} diff --git a/crates/sqlez/src/lib.rs b/crates/sqlez/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..3bed7a06cbd60507c955d1becfdd291bc4f91f02 --- /dev/null +++ b/crates/sqlez/src/lib.rs @@ -0,0 +1,6 @@ +pub mod bindable; +pub mod connection; +pub mod migrations; +pub mod savepoint; +pub mod statement; +pub mod thread_safe_connection; diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs new file mode 100644 index 0000000000000000000000000000000000000000..4721b353c68e715a77f75676681566d43e2b8c8a --- /dev/null +++ b/crates/sqlez/src/migrations.rs @@ -0,0 +1,261 @@ +// Migrations are constructed by domain, and stored in a table in the connection db with domain name, +// effected tables, actual query text, and order. +// If a migration is run and any of the query texts don't match, the app panics on startup (maybe fallback +// to creating a new db?) +// Otherwise any missing migrations are run on the connection + +use anyhow::{anyhow, Result}; +use indoc::{formatdoc, indoc}; + +use crate::connection::Connection; + +const MIGRATIONS_MIGRATION: Migration = Migration::new( + "migrations", + // The migrations migration must be infallable because it runs to completion + // with every call to migration run and is run unchecked. + &[indoc! {" + CREATE TABLE IF NOT EXISTS migrations ( + domain TEXT, + step INTEGER, + migration TEXT + ); + "}], +); + +pub struct Migration { + domain: &'static str, + migrations: &'static [&'static str], +} + +impl Migration { + pub const fn new(domain: &'static str, migrations: &'static [&'static str]) -> Self { + Self { domain, migrations } + } + + fn run_unchecked(&self, connection: &Connection) -> Result<()> { + connection.exec(self.migrations.join(";\n")) + } + + pub fn run(&self, connection: &Connection) -> Result<()> { + // Setup the migrations table unconditionally + MIGRATIONS_MIGRATION.run_unchecked(connection)?; + + let completed_migrations = connection + .prepare(indoc! {" + SELECT domain, step, migration FROM migrations + WHERE domain = ? + ORDER BY step + "})? + .bound(self.domain)? + .rows::<(String, usize, String)>()?; + + let mut store_completed_migration = connection + .prepare("INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)")?; + + for (index, migration) in self.migrations.iter().enumerate() { + if let Some((_, _, completed_migration)) = completed_migrations.get(index) { + if completed_migration != migration { + return Err(anyhow!(formatdoc! {" + Migration changed for {} at step {} + + Stored migration: + {} + + Proposed migration: + {}", self.domain, index, completed_migration, migration})); + } else { + // Migration already run. Continue + continue; + } + } + + connection.exec(migration)?; + store_completed_migration + .bound((self.domain, index, *migration))? + .run()?; + } + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use indoc::indoc; + + use crate::{connection::Connection, migrations::Migration}; + + #[test] + fn test_migrations_are_added_to_table() { + let connection = Connection::open_memory("migrations_are_added_to_table"); + + // Create first migration with a single step and run it + let mut migration = Migration::new( + "test", + &[indoc! {" + CREATE TABLE test1 ( + a TEXT, + b TEXT + );"}], + ); + migration.run(&connection).unwrap(); + + // Verify it got added to the migrations table + assert_eq!( + &connection + .prepare("SELECT (migration) FROM migrations") + .unwrap() + .rows::() + .unwrap()[..], + migration.migrations + ); + + // Add another step to the migration and run it again + migration.migrations = &[ + indoc! {" + CREATE TABLE test1 ( + a TEXT, + b TEXT + );"}, + indoc! {" + CREATE TABLE test2 ( + c TEXT, + d TEXT + );"}, + ]; + migration.run(&connection).unwrap(); + + // Verify it is also added to the migrations table + assert_eq!( + &connection + .prepare("SELECT (migration) FROM migrations") + .unwrap() + .rows::() + .unwrap()[..], + migration.migrations + ); + } + + #[test] + fn test_migration_setup_works() { + let connection = Connection::open_memory("migration_setup_works"); + + connection + .exec(indoc! {"CREATE TABLE IF NOT EXISTS migrations ( + domain TEXT, + step INTEGER, + migration TEXT + );"}) + .unwrap(); + + let mut store_completed_migration = connection + .prepare(indoc! {" + INSERT INTO migrations (domain, step, migration) + VALUES (?, ?, ?)"}) + .unwrap(); + + let domain = "test_domain"; + for i in 0..5 { + // Create a table forcing a schema change + connection + .exec(format!("CREATE TABLE table{} ( test TEXT );", i)) + .unwrap(); + + store_completed_migration + .bound((domain, i, i.to_string())) + .unwrap() + .run() + .unwrap(); + } + } + + #[test] + fn migrations_dont_rerun() { + let connection = Connection::open_memory("migrations_dont_rerun"); + + // Create migration which clears a table + let migration = Migration::new("test", &["DELETE FROM test_table"]); + + // Manually create the table for that migration with a row + connection + .exec(indoc! {" + CREATE TABLE test_table ( + test_column INTEGER + ); + INSERT INTO test_table (test_column) VALUES (1)"}) + .unwrap(); + + assert_eq!( + connection + .prepare("SELECT * FROM test_table") + .unwrap() + .row::() + .unwrap(), + 1 + ); + + // Run the migration verifying that the row got dropped + migration.run(&connection).unwrap(); + assert_eq!( + connection + .prepare("SELECT * FROM test_table") + .unwrap() + .rows::() + .unwrap(), + Vec::new() + ); + + // Recreate the dropped row + connection + .exec("INSERT INTO test_table (test_column) VALUES (2)") + .unwrap(); + + // Run the same migration again and verify that the table was left unchanged + migration.run(&connection).unwrap(); + assert_eq!( + connection + .prepare("SELECT * FROM test_table") + .unwrap() + .row::() + .unwrap(), + 2 + ); + } + + #[test] + fn changed_migration_fails() { + let connection = Connection::open_memory("changed_migration_fails"); + + // Create a migration with two steps and run it + Migration::new( + "test migration", + &[ + indoc! {" + CREATE TABLE test ( + col INTEGER + )"}, + indoc! {" + INSERT INTO test (col) VALUES (1)"}, + ], + ) + .run(&connection) + .unwrap(); + + // Create another migration with the same domain but different steps + let second_migration_result = Migration::new( + "test migration", + &[ + indoc! {" + CREATE TABLE test ( + color INTEGER + )"}, + indoc! {" + INSERT INTO test (color) VALUES (1)"}, + ], + ) + .run(&connection); + + // Verify new migration returns error when run + assert!(second_migration_result.is_err()) + } +} diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs new file mode 100644 index 0000000000000000000000000000000000000000..749c0dc9487641c125d880d32817f0a0612636b9 --- /dev/null +++ b/crates/sqlez/src/savepoint.rs @@ -0,0 +1,110 @@ +use anyhow::Result; + +use crate::connection::Connection; + +impl Connection { + // Run a set of commands within the context of a `SAVEPOINT name`. If the callback + // returns Ok(None) or Err(_), the savepoint will be rolled back. Otherwise, the save + // point is released. + pub fn with_savepoint(&mut self, name: impl AsRef, f: F) -> Result> + where + F: FnOnce(&mut Connection) -> Result>, + { + let name = name.as_ref().to_owned(); + self.exec(format!("SAVEPOINT {}", &name))?; + let result = f(self); + match result { + Ok(Some(_)) => { + self.exec(format!("RELEASE {}", name))?; + } + Ok(None) | Err(_) => { + self.exec(format!("ROLLBACK TO {}", name))?; + self.exec(format!("RELEASE {}", name))?; + } + } + result + } +} + +#[cfg(test)] +mod tests { + use crate::connection::Connection; + use anyhow::Result; + use indoc::indoc; + + #[test] + fn test_nested_savepoints() -> Result<()> { + let mut connection = Connection::open_memory("nested_savepoints"); + + connection + .exec(indoc! {" + CREATE TABLE text ( + text TEXT, + idx INTEGER + );"}) + .unwrap(); + + let save1_text = "test save1"; + let save2_text = "test save2"; + + connection.with_savepoint("first", |save1| { + save1 + .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? + .bound((save1_text, 1))? + .run()?; + + assert!(save1 + .with_savepoint("second", |save2| -> Result, anyhow::Error> { + save2 + .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? + .bound((save2_text, 2))? + .run()?; + + assert_eq!( + save2 + .prepare("SELECT text FROM text ORDER BY text.idx ASC")? + .rows::()?, + vec![save1_text, save2_text], + ); + + anyhow::bail!("Failed second save point :(") + }) + .err() + .is_some()); + + assert_eq!( + save1 + .prepare("SELECT text FROM text ORDER BY text.idx ASC")? + .rows::()?, + vec![save1_text], + ); + + save1.with_savepoint("second", |save2| { + save2 + .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? + .bound((save2_text, 2))? + .run()?; + + assert_eq!( + save2 + .prepare("SELECT text FROM text ORDER BY text.idx ASC")? + .rows::()?, + vec![save1_text, save2_text], + ); + + Ok(Some(())) + })?; + + assert_eq!( + save1 + .prepare("SELECT text FROM text ORDER BY text.idx ASC")? + .rows::()?, + vec![save1_text, save2_text], + ); + + Ok(Some(())) + })?; + + Ok(()) + } +} diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs new file mode 100644 index 0000000000000000000000000000000000000000..774cda0e344c4b85bf2f258937067361a6ff3aa2 --- /dev/null +++ b/crates/sqlez/src/statement.rs @@ -0,0 +1,342 @@ +use std::ffi::{c_int, CString}; +use std::marker::PhantomData; +use std::{slice, str}; + +use anyhow::{anyhow, Context, Result}; +use libsqlite3_sys::*; + +use crate::bindable::{Bind, Column}; +use crate::connection::Connection; + +pub struct Statement<'a> { + raw_statement: *mut sqlite3_stmt, + connection: &'a Connection, + phantom: PhantomData, +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum StepResult { + Row, + Done, + Misuse, + Other(i32), +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum SqlType { + Text, + Integer, + Blob, + Float, + Null, +} + +impl<'a> Statement<'a> { + pub fn prepare>(connection: &'a Connection, query: T) -> Result { + let mut statement = Self { + raw_statement: 0 as *mut _, + connection, + phantom: PhantomData, + }; + + unsafe { + sqlite3_prepare_v2( + connection.sqlite3, + CString::new(query.as_ref())?.as_ptr(), + -1, + &mut statement.raw_statement, + 0 as *mut _, + ); + + connection.last_error().context("Prepare call failed.")?; + } + + Ok(statement) + } + + pub fn reset(&mut self) { + unsafe { + sqlite3_reset(self.raw_statement); + } + } + + pub fn bind_blob(&self, index: i32, blob: &[u8]) -> Result<()> { + let index = index as c_int; + let blob_pointer = blob.as_ptr() as *const _; + let len = blob.len() as c_int; + unsafe { + sqlite3_bind_blob( + self.raw_statement, + index, + blob_pointer, + len, + SQLITE_TRANSIENT(), + ); + } + self.connection.last_error() + } + + pub fn column_blob<'b>(&'b mut self, index: i32) -> Result<&'b [u8]> { + let index = index as c_int; + let pointer = unsafe { sqlite3_column_blob(self.raw_statement, index) }; + + self.connection.last_error()?; + if pointer.is_null() { + return Ok(&[]); + } + let len = unsafe { sqlite3_column_bytes(self.raw_statement, index) as usize }; + self.connection.last_error()?; + unsafe { Ok(slice::from_raw_parts(pointer as *const u8, len)) } + } + + pub fn bind_double(&self, index: i32, double: f64) -> Result<()> { + let index = index as c_int; + + unsafe { + sqlite3_bind_double(self.raw_statement, index, double); + } + self.connection.last_error() + } + + pub fn column_double(&self, index: i32) -> Result { + let index = index as c_int; + let result = unsafe { sqlite3_column_double(self.raw_statement, index) }; + self.connection.last_error()?; + Ok(result) + } + + pub fn bind_int(&self, index: i32, int: i32) -> Result<()> { + let index = index as c_int; + + unsafe { + sqlite3_bind_int(self.raw_statement, index, int); + } + self.connection.last_error() + } + + pub fn column_int(&self, index: i32) -> Result { + let index = index as c_int; + let result = unsafe { sqlite3_column_int(self.raw_statement, index) }; + self.connection.last_error()?; + Ok(result) + } + + pub fn bind_int64(&self, index: i32, int: i64) -> Result<()> { + let index = index as c_int; + unsafe { + sqlite3_bind_int64(self.raw_statement, index, int); + } + self.connection.last_error() + } + + pub fn column_int64(&self, index: i32) -> Result { + let index = index as c_int; + let result = unsafe { sqlite3_column_int64(self.raw_statement, index) }; + self.connection.last_error()?; + Ok(result) + } + + pub fn bind_null(&self, index: i32) -> Result<()> { + let index = index as c_int; + unsafe { + sqlite3_bind_null(self.raw_statement, index); + } + self.connection.last_error() + } + + pub fn bind_text(&self, index: i32, text: &str) -> Result<()> { + let index = index as c_int; + let text_pointer = text.as_ptr() as *const _; + let len = text.len() as c_int; + unsafe { + sqlite3_bind_blob( + self.raw_statement, + index, + text_pointer, + len, + SQLITE_TRANSIENT(), + ); + } + self.connection.last_error() + } + + pub fn column_text<'b>(&'b mut self, index: i32) -> Result<&'b str> { + let index = index as c_int; + let pointer = unsafe { sqlite3_column_text(self.raw_statement, index) }; + + self.connection.last_error()?; + if pointer.is_null() { + return Ok(""); + } + let len = unsafe { sqlite3_column_bytes(self.raw_statement, index) as usize }; + self.connection.last_error()?; + + let slice = unsafe { slice::from_raw_parts(pointer as *const u8, len) }; + Ok(str::from_utf8(slice)?) + } + + pub fn bind(&self, value: T) -> Result<()> { + value.bind(self, 1)?; + Ok(()) + } + + pub fn column(&mut self) -> Result { + let (result, _) = T::column(self, 0)?; + Ok(result) + } + + pub fn column_type(&mut self, index: i32) -> Result { + let result = unsafe { sqlite3_column_type(self.raw_statement, index) }; // SELECT FROM TABLE + self.connection.last_error()?; + match result { + SQLITE_INTEGER => Ok(SqlType::Integer), + SQLITE_FLOAT => Ok(SqlType::Float), + SQLITE_TEXT => Ok(SqlType::Text), + SQLITE_BLOB => Ok(SqlType::Blob), + SQLITE_NULL => Ok(SqlType::Null), + _ => Err(anyhow!("Column type returned was incorrect ")), + } + } + + pub fn bound(&mut self, bindings: impl Bind) -> Result<&mut Self> { + self.bind(bindings)?; + Ok(self) + } + + fn step(&mut self) -> Result { + unsafe { + match sqlite3_step(self.raw_statement) { + SQLITE_ROW => Ok(StepResult::Row), + SQLITE_DONE => Ok(StepResult::Done), + SQLITE_MISUSE => Ok(StepResult::Misuse), + other => self + .connection + .last_error() + .map(|_| StepResult::Other(other)), + } + } + } + + pub fn run(&mut self) -> Result<()> { + fn logic(this: &mut Statement) -> Result<()> { + while this.step()? == StepResult::Row {} + Ok(()) + } + let result = logic(self); + self.reset(); + result + } + + pub fn map(&mut self, callback: impl FnMut(&mut Statement) -> Result) -> Result> { + fn logic( + this: &mut Statement, + mut callback: impl FnMut(&mut Statement) -> Result, + ) -> Result> { + let mut mapped_rows = Vec::new(); + while this.step()? == StepResult::Row { + mapped_rows.push(callback(this)?); + } + Ok(mapped_rows) + } + + let result = logic(self, callback); + self.reset(); + result + } + + pub fn rows(&mut self) -> Result> { + self.map(|s| s.column::()) + } + + pub fn single(&mut self, callback: impl FnOnce(&mut Statement) -> Result) -> Result { + fn logic( + this: &mut Statement, + callback: impl FnOnce(&mut Statement) -> Result, + ) -> Result { + if this.step()? != StepResult::Row { + return Err(anyhow!( + "Single(Map) called with query that returns no rows." + )); + } + callback(this) + } + let result = logic(self, callback); + self.reset(); + result + } + + pub fn row(&mut self) -> Result { + self.single(|this| this.column::()) + } + + pub fn maybe( + &mut self, + callback: impl FnOnce(&mut Statement) -> Result, + ) -> Result> { + fn logic( + this: &mut Statement, + callback: impl FnOnce(&mut Statement) -> Result, + ) -> Result> { + if this.step()? != StepResult::Row { + return Ok(None); + } + callback(this).map(|r| Some(r)) + } + let result = logic(self, callback); + self.reset(); + result + } + + pub fn maybe_row(&mut self) -> Result> { + self.maybe(|this| this.column::()) + } +} + +impl<'a> Drop for Statement<'a> { + fn drop(&mut self) { + unsafe { + sqlite3_finalize(self.raw_statement); + self.connection + .last_error() + .expect("sqlite3 finalize failed for statement :("); + }; + } +} + +#[cfg(test)] +mod test { + use indoc::indoc; + + use crate::{connection::Connection, statement::StepResult}; + + #[test] + fn blob_round_trips() { + let connection1 = Connection::open_memory("blob_round_trips"); + connection1 + .exec(indoc! {" + CREATE TABLE blobs ( + data BLOB + );"}) + .unwrap(); + + let blob = &[0, 1, 2, 4, 8, 16, 32, 64]; + + let mut write = connection1 + .prepare("INSERT INTO blobs (data) VALUES (?);") + .unwrap(); + write.bind_blob(1, blob).unwrap(); + assert_eq!(write.step().unwrap(), StepResult::Done); + + // Read the blob from the + let connection2 = Connection::open_memory("blob_round_trips"); + let mut read = connection2.prepare("SELECT * FROM blobs;").unwrap(); + assert_eq!(read.step().unwrap(), StepResult::Row); + assert_eq!(read.column_blob(0).unwrap(), blob); + assert_eq!(read.step().unwrap(), StepResult::Done); + + // Delete the added blob and verify its deleted on the other side + connection2.exec("DELETE FROM blobs;").unwrap(); + let mut read = connection1.prepare("SELECT * FROM blobs;").unwrap(); + assert_eq!(read.step().unwrap(), StepResult::Done); + } +} diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs new file mode 100644 index 0000000000000000000000000000000000000000..8885edc2c0a52f1d6514be0d1c9fc8483966c410 --- /dev/null +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -0,0 +1,78 @@ +use std::{ops::Deref, sync::Arc}; + +use connection::Connection; +use thread_local::ThreadLocal; + +use crate::connection; + +pub struct ThreadSafeConnection { + uri: Arc, + persistent: bool, + initialize_query: Option<&'static str>, + connection: Arc>, +} + +impl ThreadSafeConnection { + pub fn new(uri: &str, persistent: bool) -> Self { + Self { + uri: Arc::from(uri), + persistent, + initialize_query: None, + connection: Default::default(), + } + } + + /// Sets the query to run every time a connection is opened. This must + /// be infallible (EG only use pragma statements) + pub fn with_initialize_query(mut self, initialize_query: &'static str) -> Self { + self.initialize_query = Some(initialize_query); + self + } + + /// Opens a new db connection with the initialized file path. This is internal and only + /// called from the deref function. + /// If opening fails, the connection falls back to a shared memory connection + fn open_file(&self) -> Connection { + Connection::open_file(self.uri.as_ref()) + } + + /// Opens a shared memory connection using the file path as the identifier. This unwraps + /// as we expect it always to succeed + fn open_shared_memory(&self) -> Connection { + Connection::open_memory(self.uri.as_ref()) + } +} + +impl Clone for ThreadSafeConnection { + fn clone(&self) -> Self { + Self { + uri: self.uri.clone(), + persistent: self.persistent, + initialize_query: self.initialize_query.clone(), + connection: self.connection.clone(), + } + } +} + +impl Deref for ThreadSafeConnection { + type Target = Connection; + + fn deref(&self) -> &Self::Target { + self.connection.get_or(|| { + let connection = if self.persistent { + self.open_file() + } else { + self.open_shared_memory() + }; + + if let Some(initialize_query) = self.initialize_query { + connection.exec(initialize_query).expect(&format!( + "Initialize query failed to execute: {}", + initialize_query + )); + } + + connection + }) + } +} From 777f05eb76557f83d8f03ef6abf2a6dcafa6f6d3 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 1 Nov 2022 15:58:23 -0700 Subject: [PATCH 27/86] Finished implementing the workspace stuff --- crates/db/src/db.rs | 7 +- crates/db/src/kvp.rs | 67 ++--- crates/db/src/migrations.rs | 28 +- crates/db/src/pane.rs | 18 +- crates/db/src/workspace.rs | 281 +++++++-------------- crates/sqlez/src/connection.rs | 19 +- crates/sqlez/src/migrations.rs | 11 +- crates/sqlez/src/savepoint.rs | 74 +++++- crates/sqlez/src/statement.rs | 20 +- crates/sqlez/src/thread_safe_connection.rs | 18 +- 10 files changed, 264 insertions(+), 279 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index e5740c5edb99b694ccc8f4f82be8d10711e1e2ed..857b5f273eb2d506f1e245e49798a1b05bf73ef9 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -10,6 +10,8 @@ use std::path::Path; use anyhow::Result; use indoc::indoc; +use kvp::KVP_MIGRATION; +use pane::PANE_MIGRATIONS; use sqlez::connection::Connection; use sqlez::thread_safe_connection::ThreadSafeConnection; @@ -42,7 +44,8 @@ impl Db { PRAGMA synchronous=NORMAL; PRAGMA foreign_keys=TRUE; PRAGMA case_sensitive_like=TRUE; - "}), + "}) + .with_migrations(&[KVP_MIGRATION, WORKSPACES_MIGRATION, PANE_MIGRATIONS]), ) } @@ -64,7 +67,7 @@ impl Db { pub fn write_to>(&self, dest: P) -> Result<()> { let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref()); - self.backup(&destination) + self.backup_main(&destination) } } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 96f13d8040bf6e289711b46462ccf88d1eafc735..6db99831f765d03a0faa9cc43ec951cf0450c7bb 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,55 +1,38 @@ -use anyhow::Result; -use rusqlite::OptionalExtension; - use super::Db; - -pub(crate) const KVP_M_1: &str = " -CREATE TABLE kv_store( - key TEXT PRIMARY KEY, - value TEXT NOT NULL -) STRICT; -"; +use anyhow::Result; +use indoc::indoc; +use sqlez::migrations::Migration; + +pub(crate) const KVP_MIGRATION: Migration = Migration::new( + "kvp", + &[indoc! {" + CREATE TABLE kv_store( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ) STRICT; + "}], +); impl Db { pub fn read_kvp(&self, key: &str) -> Result> { - self.real() - .map(|db| { - let lock = db.connection.lock(); - let mut stmt = lock.prepare_cached("SELECT value FROM kv_store WHERE key = (?)")?; - - Ok(stmt.query_row([key], |row| row.get(0)).optional()?) - }) - .unwrap_or(Ok(None)) + self.0 + .prepare("SELECT value FROM kv_store WHERE key = (?)")? + .bind(key)? + .maybe_row() } pub fn write_kvp(&self, key: &str, value: &str) -> Result<()> { - self.real() - .map(|db| { - let lock = db.connection.lock(); - - let mut stmt = lock.prepare_cached( - "INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))", - )?; - - stmt.execute([key, value])?; - - Ok(()) - }) - .unwrap_or(Ok(())) + self.0 + .prepare("INSERT OR REPLACE INTO kv_store(key, value) VALUES (?, ?)")? + .bind((key, value))? + .exec() } pub fn delete_kvp(&self, key: &str) -> Result<()> { - self.real() - .map(|db| { - let lock = db.connection.lock(); - - let mut stmt = lock.prepare_cached("DELETE FROM kv_store WHERE key = (?)")?; - - stmt.execute([key])?; - - Ok(()) - }) - .unwrap_or(Ok(())) + self.0 + .prepare("DELETE FROM kv_store WHERE key = (?)")? + .bind(key)? + .exec() } } diff --git a/crates/db/src/migrations.rs b/crates/db/src/migrations.rs index 8caa528fc1ef607405994338265b1460dc34f5de..a95654f420fa418d4c82e3703cf1328e000f5e20 100644 --- a/crates/db/src/migrations.rs +++ b/crates/db/src/migrations.rs @@ -1,16 +1,14 @@ -use rusqlite_migration::{Migrations, M}; +// // use crate::items::ITEMS_M_1; +// use crate::{kvp::KVP_M_1, pane::PANE_M_1, WORKSPACES_MIGRATION}; -// use crate::items::ITEMS_M_1; -use crate::{kvp::KVP_M_1, pane::PANE_M_1, WORKSPACE_M_1}; - -// This must be ordered by development time! Only ever add new migrations to the end!! -// Bad things will probably happen if you don't monotonically edit this vec!!!! -// And no re-ordering ever!!!!!!!!!! The results of these migrations are on the user's -// file system and so everything we do here is locked in _f_o_r_e_v_e_r_. -lazy_static::lazy_static! { - pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![ - M::up(KVP_M_1), - M::up(WORKSPACE_M_1), - M::up(PANE_M_1) - ]); -} +// // This must be ordered by development time! Only ever add new migrations to the end!! +// // Bad things will probably happen if you don't monotonically edit this vec!!!! +// // And no re-ordering ever!!!!!!!!!! The results of these migrations are on the user's +// // file system and so everything we do here is locked in _f_o_r_e_v_e_r_. +// lazy_static::lazy_static! { +// pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![ +// M::up(KVP_M_1), +// M::up(WORKSPACE_M_1), +// M::up(PANE_M_1) +// ]); +// } diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 0a1812c60cc68a38c2e4238cadb620a923b7f28a..0716d19b1d209a52754159bf5bcc461a9936ed75 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,15 +1,14 @@ use gpui::Axis; +use indoc::indoc; +use sqlez::migrations::Migration; -use rusqlite::{OptionalExtension, Connection}; -use serde::{Deserialize, Serialize}; -use serde_rusqlite::{from_row, to_params_named}; use crate::{items::ItemId, workspace::WorkspaceId}; use super::Db; -pub(crate) const PANE_M_1: &str = " +pub(crate) const PANE_MIGRATIONS: Migration = Migration::new("pane", &[indoc! {" CREATE TABLE dock_panes( dock_pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, @@ -64,7 +63,7 @@ CREATE TABLE dock_items( FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE, FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE ) STRICT; -"; +"}]); // We have an many-branched, unbalanced tree with three types: // Pane Groups @@ -140,7 +139,7 @@ pub struct SerializedPane { //********* CURRENTLY IN USE TYPES: ********* -#[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] +#[derive(Default, Debug, PartialEq, Eq)] pub enum DockAnchor { #[default] Bottom, @@ -148,7 +147,7 @@ pub enum DockAnchor { Expanded, } -#[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] +#[derive(Default, Debug, PartialEq, Eq)] pub struct SerializedDockPane { pub anchor_position: DockAnchor, pub visible: bool, @@ -160,7 +159,7 @@ impl SerializedDockPane { } } -#[derive(Default, Debug, PartialEq, Eq, Deserialize, Serialize)] +#[derive(Default, Debug, PartialEq, Eq)] pub(crate) struct DockRow { workspace_id: WorkspaceId, anchor_position: DockAnchor, @@ -298,12 +297,11 @@ mod tests { let workspace = db.workspace_for_roots(&["/tmp"]); let dock_pane = SerializedDockPane { - workspace_id: workspace.workspace_id, anchor_position: DockAnchor::Expanded, visible: true, }; - db.save_dock_pane(&dock_pane); + db.save_dock_pane(workspace.workspace_id, dock_pane); let new_workspace = db.workspace_for_roots(&["/tmp"]); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 5237caa23c2b0bf243bafc0ef67a890f47ab0598..16ff0e78c050b453ccfe69ab426d5df7931ff754 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -6,12 +6,12 @@ use std::{ os::unix::prelude::OsStrExt, path::{Path, PathBuf}, sync::Arc, - time::{SystemTime, UNIX_EPOCH}, }; -use anyhow::Result; use indoc::indoc; -use sqlez::{connection::Connection, migrations::Migration}; +use sqlez::{ + connection::Connection, migrations::Migration, +}; use crate::pane::SerializedDockPane; @@ -20,8 +20,8 @@ use super::Db; // If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging // you might want to update some of the parsing code as well, I've left the variations in but commented // out. This will panic if run on an existing db that has already been migrated -const WORKSPACES_MIGRATION: Migration = Migration::new( - "migrations", +pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( + "workspace", &[indoc! {" CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY, @@ -53,8 +53,8 @@ pub struct SerializedWorkspace { } impl Db { - /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, return the - /// the last workspace id + /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, + /// returns the last workspace which was updated pub fn workspace_for_roots

(&self, worktree_roots: &[P]) -> SerializedWorkspace where P: AsRef + Debug, @@ -80,23 +80,21 @@ impl Db { where P: AsRef + Debug, { - let result = (|| { - let tx = self.transaction()?; - tx.execute("INSERT INTO workspaces(last_opened_timestamp) VALUES" (?), [current_millis()?])?; - - let id = WorkspaceId(tx.last_insert_rowid()); + let res = self.with_savepoint("make_new_workspace", |conn| { + let workspace_id = WorkspaceId( + conn.prepare("INSERT INTO workspaces DEFAULT VALUES")? + .insert()?, + ); - update_worktree_roots(&tx, &id, worktree_roots)?; - - tx.commit()?; + update_worktree_roots(conn, &workspace_id, worktree_roots)?; Ok(SerializedWorkspace { - workspace_id: id, + workspace_id, dock_pane: None, }) - })(); + }); - match result { + match res { Ok(serialized_workspace) => serialized_workspace, Err(err) => { log::error!("Failed to insert new workspace into DB: {}", err); @@ -109,19 +107,13 @@ impl Db { where P: AsRef + Debug, { - self.real() - .map(|db| { - let lock = db.connection.lock(); - - match get_workspace_id(worktree_roots, &lock) { - Ok(workspace_id) => workspace_id, - Err(err) => { - log::error!("Failed to get workspace_id: {}", err); - None - } - } - }) - .unwrap_or(None) + match get_workspace_id(worktree_roots, &self) { + Ok(workspace_id) => workspace_id, + Err(err) => { + log::error!("Failed to get workspace_id: {}", err); + None + } + } } // fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { @@ -135,123 +127,73 @@ impl Db { where P: AsRef + Debug, { - fn logic

( - connection: &mut Connection, - workspace_id: &WorkspaceId, - worktree_roots: &[P], - ) -> Result<()> - where - P: AsRef + Debug, - { - let tx = connection.transaction()?; - update_worktree_roots(&tx, workspace_id, worktree_roots)?; - tx.commit()?; - Ok(()) + match self.with_savepoint("update_worktrees", |conn| { + update_worktree_roots(conn, workspace_id, worktree_roots) + }) { + Ok(_) => {} + Err(err) => log::error!( + "Failed to update workspace {:?} with roots {:?}, error: {}", + workspace_id, + worktree_roots, + err + ), } - - self.real().map(|db| { - let mut lock = db.connection.lock(); - - match logic(&mut lock, workspace_id, worktree_roots) { - Ok(_) => {} - Err(err) => { - log::error!( - "Failed to update the worktree roots for {:?}, roots: {:?}, error: {}", - workspace_id, - worktree_roots, - err - ); - } - } - }); } fn last_workspace_id(&self) -> Option { - fn logic(connection: &mut Connection) -> Result> { - let mut stmt = connection.prepare( + let res = self + .prepare( "SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT 1", - )?; + ) + .and_then(|stmt| stmt.maybe_row()) + .map(|row| row.map(|id| WorkspaceId(id))); - Ok(stmt - .query_row([], |row| Ok(WorkspaceId(row.get(0)?))) - .optional()?) + match res { + Ok(result) => result, + Err(err) => { + log::error!("Failed to get last workspace id, err: {}", err); + return None; + } } - - self.real() - .map(|db| { - let mut lock = db.connection.lock(); - - match logic(&mut lock) { - Ok(result) => result, - Err(err) => { - log::error!("Failed to get last workspace id, err: {}", err); - None - } - } - }) - .unwrap_or(None) } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, Vec>)> { - fn logic( - connection: &mut Connection, - limit: usize, - ) -> Result>)>, anyhow::Error> { - let tx = connection.transaction()?; - let result = { - let mut stmt = tx.prepare( - "SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT ?", - )?; - - let workspace_ids = stmt - .query_map([limit], |row| Ok(WorkspaceId(row.get(0)?)))? - .collect::, rusqlite::Error>>()?; - - let mut result = Vec::new(); - let mut stmt = - tx.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; - for workspace_id in workspace_ids { - let roots = stmt - .query_map([workspace_id.0], |row| { - let row = row.get::<_, Vec>(0)?; - Ok(PathBuf::from(OsStr::from_bytes(&row)).into()) - // If you need to debug this, here's the string parsing: - // let row = row.get::<_, String>(0)?; - // Ok(PathBuf::from(row).into()) - })? - .collect::, rusqlite::Error>>()?; - result.push((workspace_id, roots)) - } - - result - }; - tx.commit()?; - return Ok(result); - } - - self.real() - .map(|db| { - let mut lock = db.connection.lock(); - - match logic(&mut lock, limit) { - Ok(result) => result, - Err(err) => { - log::error!("Failed to get recent workspaces, err: {}", err); - Vec::new() - } - } - }) - .unwrap_or_else(|| Vec::new()) + let res = self.with_savepoint("recent_workspaces", |conn| { + let ids = conn.prepare("SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT ?")? + .bind(limit)? + .rows::()? + .iter() + .map(|row| WorkspaceId(*row)); + + let result = Vec::new(); + + let stmt = conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; + for workspace_id in ids { + let roots = stmt.bind(workspace_id.0)? + .rows::>()? + .iter() + .map(|row| { + PathBuf::from(OsStr::from_bytes(&row)).into() + }) + .collect(); + result.push((workspace_id, roots)) + } + + + Ok(result) + }); + + match res { + Ok(result) => result, + Err(err) => { + log::error!("Failed to get recent workspaces, err: {}", err); + Vec::new() + } + } } } -fn current_millis() -> Result { - // SQLite only supports u64 integers, which means this code will trigger - // undefined behavior in 584 million years. It's probably fine. - Ok(SystemTime::now().duration_since(UNIX_EPOCH)?.as_millis() as u64) -} - fn update_worktree_roots

( connection: &Connection, workspace_id: &WorkspaceId, @@ -265,33 +207,32 @@ where if let Some(preexisting_id) = preexisting_id { if preexisting_id != *workspace_id { // Should also delete fields in other tables with cascading updates - connection.execute( + connection.prepare( "DELETE FROM workspaces WHERE workspace_id = ?", - [preexisting_id.0], - )?; + )? + .bind(preexisting_id.0)? + .exec()?; } } - connection.execute( - "DELETE FROM worktree_roots WHERE workspace_id = ?", - [workspace_id.0], - )?; + connection + .prepare("DELETE FROM worktree_roots WHERE workspace_id = ?")? + .bind(workspace_id.0)? + .exec()?; for root in worktree_roots { let path = root.as_ref().as_os_str().as_bytes(); // If you need to debug this, here's the string parsing: // let path = root.as_ref().to_string_lossy().to_string(); - connection.execute( - "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", - params![workspace_id.0, path], - )?; + connection.prepare("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")? + .bind((workspace_id.0, path))? + .exec()?; } - connection.execute( - "UPDATE workspaces SET last_opened_timestamp = ? WHERE workspace_id = ?", - params![current_millis()?, workspace_id.0], - )?; + connection.prepare("UPDATE workspaces SET last_opened_timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")? + .bind(workspace_id.0)? + .exec()?; Ok(()) } @@ -300,13 +241,6 @@ fn get_workspace_id

(worktree_roots: &[P], connection: &Connection) -> Result< where P: AsRef + Debug, { - // fn logic

( - // worktree_roots: &[P], - // connection: &Connection, - // ) -> Result, anyhow::Error> - // where - // P: AsRef + Debug, - // { // Short circuit if we can if worktree_roots.len() == 0 { return Ok(None); @@ -324,6 +258,7 @@ where } } array_binding_stmt.push(')'); + // Any workspace can have multiple independent paths, and these paths // can overlap in the database. Take this test data for example: // @@ -393,43 +328,19 @@ where // caching it. let mut stmt = connection.prepare(&query)?; // Make sure we bound the parameters correctly - debug_assert!(worktree_roots.len() + 1 == stmt.parameter_count()); + debug_assert!(worktree_roots.len() as i32 + 1 == stmt.parameter_count()); for i in 0..worktree_roots.len() { let path = &worktree_roots[i].as_ref().as_os_str().as_bytes(); // If you need to debug this, here's the string parsing: // let path = &worktree_roots[i].as_ref().to_string_lossy().to_string() - stmt.raw_bind_parameter(i + 1, path)? + stmt.bind_value(*path, i as i32 + 1); } // No -1, because SQLite is 1 based - stmt.raw_bind_parameter(worktree_roots.len() + 1, worktree_roots.len())?; - - let mut rows = stmt.raw_query(); - let row = rows.next(); - let result = if let Ok(Some(row)) = row { - Ok(Some(WorkspaceId(row.get(0)?))) - } else { - Ok(None) - }; + stmt.bind_value(worktree_roots.len(), worktree_roots.len() as i32 + 1)?; - // Ensure that this query only returns one row. The PRIMARY KEY constraint should catch this case - // but this is here to catch if someone refactors that constraint out. - debug_assert!(matches!(rows.next(), Ok(None))); - - result - // } - - // match logic(worktree_roots, connection) { - // Ok(result) => result, - // Err(err) => { - // log::error!( - // "Failed to get the workspace ID for paths {:?}, err: {}", - // worktree_roots, - // err - // ); - // None - // } - // } + stmt.maybe_row() + .map(|row| row.map(|id| WorkspaceId(id))) } #[cfg(test)] diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 81bb9dfe78b27f5745b4e5b528c9910b1a027c22..be529784951a10ddd5a19d1b19b04774a9c3bfb2 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -53,6 +53,15 @@ impl Connection { self.persistent } + pub(crate) fn last_insert_id(&self) -> i64 { + unsafe { sqlite3_last_insert_rowid(self.sqlite3) } + } + + pub fn insert(&self, query: impl AsRef) -> Result { + self.exec(query)?; + Ok(self.last_insert_id()) + } + pub fn exec(&self, query: impl AsRef) -> Result<()> { unsafe { sqlite3_exec( @@ -140,9 +149,9 @@ mod test { connection .prepare("INSERT INTO text (text) VALUES (?);") .unwrap() - .bound(text) + .bind(text) .unwrap() - .run() + .exec() .unwrap(); assert_eq!( @@ -176,8 +185,8 @@ mod test { .prepare("INSERT INTO test (text, integer, blob) VALUES (?, ?, ?)") .unwrap(); - insert.bound(tuple1.clone()).unwrap().run().unwrap(); - insert.bound(tuple2.clone()).unwrap().run().unwrap(); + insert.bind(tuple1.clone()).unwrap().exec().unwrap(); + insert.bind(tuple2.clone()).unwrap().exec().unwrap(); assert_eq!( connection @@ -203,7 +212,7 @@ mod test { .prepare("INSERT INTO blobs (data) VALUES (?);") .unwrap(); write.bind_blob(1, blob).unwrap(); - write.run().unwrap(); + write.exec().unwrap(); // Backup connection1 to connection2 let connection2 = Connection::open_memory("backup_works_other"); diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 4721b353c68e715a77f75676681566d43e2b8c8a..3c0771c0feb7a1f6df931f41f93618656c19b181 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -22,6 +22,7 @@ const MIGRATIONS_MIGRATION: Migration = Migration::new( "}], ); +#[derive(Debug)] pub struct Migration { domain: &'static str, migrations: &'static [&'static str], @@ -46,7 +47,7 @@ impl Migration { WHERE domain = ? ORDER BY step "})? - .bound(self.domain)? + .bind(self.domain)? .rows::<(String, usize, String)>()?; let mut store_completed_migration = connection @@ -71,8 +72,8 @@ impl Migration { connection.exec(migration)?; store_completed_migration - .bound((self.domain, index, *migration))? - .run()?; + .bind((self.domain, index, *migration))? + .exec()?; } Ok(()) @@ -162,9 +163,9 @@ mod test { .unwrap(); store_completed_migration - .bound((domain, i, i.to_string())) + .bind((domain, i, i.to_string())) .unwrap() - .run() + .exec() .unwrap(); } } diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs index 749c0dc9487641c125d880d32817f0a0612636b9..50f28c73901d2382f1ef677425af1e835ea9678b 100644 --- a/crates/sqlez/src/savepoint.rs +++ b/crates/sqlez/src/savepoint.rs @@ -3,10 +3,36 @@ use anyhow::Result; use crate::connection::Connection; impl Connection { + // Run a set of commands within the context of a `SAVEPOINT name`. If the callback + // returns Err(_), the savepoint will be rolled back. Otherwise, the save + // point is released. + pub fn with_savepoint(&mut self, name: impl AsRef, f: F) -> Result + where + F: FnOnce(&mut Connection) -> Result, + { + let name = name.as_ref().to_owned(); + self.exec(format!("SAVEPOINT {}", &name))?; + let result = f(self); + match result { + Ok(_) => { + self.exec(format!("RELEASE {}", name))?; + } + Err(_) => { + self.exec(format!("ROLLBACK TO {}", name))?; + self.exec(format!("RELEASE {}", name))?; + } + } + result + } + // Run a set of commands within the context of a `SAVEPOINT name`. If the callback // returns Ok(None) or Err(_), the savepoint will be rolled back. Otherwise, the save // point is released. - pub fn with_savepoint(&mut self, name: impl AsRef, f: F) -> Result> + pub fn with_savepoint_rollback( + &mut self, + name: impl AsRef, + f: F, + ) -> Result> where F: FnOnce(&mut Connection) -> Result>, { @@ -50,15 +76,15 @@ mod tests { connection.with_savepoint("first", |save1| { save1 .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .bound((save1_text, 1))? - .run()?; + .bind((save1_text, 1))? + .exec()?; assert!(save1 .with_savepoint("second", |save2| -> Result, anyhow::Error> { save2 .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .bound((save2_text, 2))? - .run()?; + .bind((save2_text, 2))? + .exec()?; assert_eq!( save2 @@ -79,11 +105,34 @@ mod tests { vec![save1_text], ); - save1.with_savepoint("second", |save2| { + save1.with_savepoint_rollback::<(), _>("second", |save2| { save2 .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .bound((save2_text, 2))? - .run()?; + .bind((save2_text, 2))? + .exec()?; + + assert_eq!( + save2 + .prepare("SELECT text FROM text ORDER BY text.idx ASC")? + .rows::()?, + vec![save1_text, save2_text], + ); + + Ok(None) + })?; + + assert_eq!( + save1 + .prepare("SELECT text FROM text ORDER BY text.idx ASC")? + .rows::()?, + vec![save1_text], + ); + + save1.with_savepoint_rollback("second", |save2| { + save2 + .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? + .bind((save2_text, 2))? + .exec()?; assert_eq!( save2 @@ -102,9 +151,16 @@ mod tests { vec![save1_text, save2_text], ); - Ok(Some(())) + Ok(()) })?; + assert_eq!( + connection + .prepare("SELECT text FROM text ORDER BY text.idx ASC")? + .rows::()?, + vec![save1_text, save2_text], + ); + Ok(()) } } diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index 774cda0e344c4b85bf2f258937067361a6ff3aa2..ac57847774b1ad37e1f2c5a7d47f653e5d9a363e 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -60,6 +60,10 @@ impl<'a> Statement<'a> { } } + pub fn parameter_count(&self) -> i32 { + unsafe { sqlite3_bind_parameter_count(self.raw_statement) } + } + pub fn bind_blob(&self, index: i32, blob: &[u8]) -> Result<()> { let index = index as c_int; let blob_pointer = blob.as_ptr() as *const _; @@ -175,8 +179,9 @@ impl<'a> Statement<'a> { Ok(str::from_utf8(slice)?) } - pub fn bind(&self, value: T) -> Result<()> { - value.bind(self, 1)?; + pub fn bind_value(&self, value: T, idx: i32) -> Result<()> { + debug_assert!(idx > 0); + value.bind(self, idx)?; Ok(()) } @@ -198,8 +203,8 @@ impl<'a> Statement<'a> { } } - pub fn bound(&mut self, bindings: impl Bind) -> Result<&mut Self> { - self.bind(bindings)?; + pub fn bind(&mut self, bindings: impl Bind) -> Result<&mut Self> { + self.bind_value(bindings, 1)?; Ok(self) } @@ -217,7 +222,12 @@ impl<'a> Statement<'a> { } } - pub fn run(&mut self) -> Result<()> { + pub fn insert(&mut self) -> Result { + self.exec()?; + Ok(self.connection.last_insert_id()) + } + + pub fn exec(&mut self) -> Result<()> { fn logic(this: &mut Statement) -> Result<()> { while this.step()? == StepResult::Row {} Ok(()) diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 8885edc2c0a52f1d6514be0d1c9fc8483966c410..53d49464bed97fff60d9f9aed17882161f5f5465 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -3,12 +3,13 @@ use std::{ops::Deref, sync::Arc}; use connection::Connection; use thread_local::ThreadLocal; -use crate::connection; +use crate::{connection, migrations::Migration}; pub struct ThreadSafeConnection { uri: Arc, persistent: bool, initialize_query: Option<&'static str>, + migrations: Option<&'static [Migration]>, connection: Arc>, } @@ -18,6 +19,7 @@ impl ThreadSafeConnection { uri: Arc::from(uri), persistent, initialize_query: None, + migrations: None, connection: Default::default(), } } @@ -29,6 +31,11 @@ impl ThreadSafeConnection { self } + pub fn with_migrations(mut self, migrations: &'static [Migration]) -> Self { + self.migrations = Some(migrations); + self + } + /// Opens a new db connection with the initialized file path. This is internal and only /// called from the deref function. /// If opening fails, the connection falls back to a shared memory connection @@ -49,6 +56,7 @@ impl Clone for ThreadSafeConnection { uri: self.uri.clone(), persistent: self.persistent, initialize_query: self.initialize_query.clone(), + migrations: self.migrations.clone(), connection: self.connection.clone(), } } @@ -72,6 +80,14 @@ impl Deref for ThreadSafeConnection { )); } + if let Some(migrations) = self.migrations { + for migration in migrations { + migration + .run(&connection) + .expect(&format!("Migrations failed to execute: {:?}", migration)); + } + } + connection }) } From 3c1b747f641c29ec4de6111b911b608f80862dbb Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 1 Nov 2022 17:26:03 -0700 Subject: [PATCH 28/86] WIP almost compiling with sqlez --- crates/db/src/kvp.rs | 6 +- crates/db/src/workspace.rs | 65 +++++++++------------- crates/sqlez/src/bindable.rs | 22 ++++++++ crates/sqlez/src/connection.rs | 14 ++++- crates/sqlez/src/migrations.rs | 6 +- crates/sqlez/src/savepoint.rs | 8 +-- crates/sqlez/src/statement.rs | 11 ++-- crates/sqlez/src/thread_safe_connection.rs | 2 + 8 files changed, 77 insertions(+), 57 deletions(-) diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 6db99831f765d03a0faa9cc43ec951cf0450c7bb..a692d73d886c0db71b39a1b0eeb7fc784da4a998 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -17,21 +17,21 @@ impl Db { pub fn read_kvp(&self, key: &str) -> Result> { self.0 .prepare("SELECT value FROM kv_store WHERE key = (?)")? - .bind(key)? + .with_bindings(key)? .maybe_row() } pub fn write_kvp(&self, key: &str, value: &str) -> Result<()> { self.0 .prepare("INSERT OR REPLACE INTO kv_store(key, value) VALUES (?, ?)")? - .bind((key, value))? + .with_bindings((key, value))? .exec() } pub fn delete_kvp(&self, key: &str) -> Result<()> { self.0 .prepare("DELETE FROM kv_store WHERE key = (?)")? - .bind(key)? + .with_bindings(key)? .exec() } } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 16ff0e78c050b453ccfe69ab426d5df7931ff754..126a34676ee4ac10817d87d55b3a5a9ecba5bc2b 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -23,17 +23,17 @@ use super::Db; pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( "workspace", &[indoc! {" - CREATE TABLE workspaces( - workspace_id INTEGER PRIMARY KEY, - timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL - ) STRICT; - - CREATE TABLE worktree_roots( - worktree_root BLOB NOT NULL, - workspace_id INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - PRIMARY KEY(worktree_root, workspace_id) - ) STRICT;"}], + CREATE TABLE workspaces( + workspace_id INTEGER PRIMARY KEY, + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL + ) STRICT; + + CREATE TABLE worktree_roots( + worktree_root BLOB NOT NULL, + workspace_id INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + PRIMARY KEY(worktree_root, workspace_id) + ) STRICT;"}], ); #[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] @@ -159,9 +159,9 @@ impl Db { /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, Vec>)> { - let res = self.with_savepoint("recent_workspaces", |conn| { + self.with_savepoint("recent_workspaces", |conn| { let ids = conn.prepare("SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT ?")? - .bind(limit)? + .with_bindings(limit)? .rows::()? .iter() .map(|row| WorkspaceId(*row)); @@ -170,7 +170,7 @@ impl Db { let stmt = conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; for workspace_id in ids { - let roots = stmt.bind(workspace_id.0)? + let roots = stmt.with_bindings(workspace_id.0)? .rows::>()? .iter() .map(|row| { @@ -180,17 +180,11 @@ impl Db { result.push((workspace_id, roots)) } - Ok(result) - }); - - match res { - Ok(result) => result, - Err(err) => { - log::error!("Failed to get recent workspaces, err: {}", err); - Vec::new() - } - } + }).unwrap_or_else(|err| { + log::error!("Failed to get recent workspaces, err: {}", err); + Vec::new() + }) } } @@ -210,14 +204,14 @@ where connection.prepare( "DELETE FROM workspaces WHERE workspace_id = ?", )? - .bind(preexisting_id.0)? + .with_bindings(preexisting_id.0)? .exec()?; } } connection .prepare("DELETE FROM worktree_roots WHERE workspace_id = ?")? - .bind(workspace_id.0)? + .with_bindings(workspace_id.0)? .exec()?; for root in worktree_roots { @@ -226,12 +220,12 @@ where // let path = root.as_ref().to_string_lossy().to_string(); connection.prepare("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")? - .bind((workspace_id.0, path))? + .with_bindings((workspace_id.0, path))? .exec()?; } connection.prepare("UPDATE workspaces SET last_opened_timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")? - .bind(workspace_id.0)? + .with_bindings(workspace_id.0)? .exec()?; Ok(()) @@ -330,16 +324,11 @@ where // Make sure we bound the parameters correctly debug_assert!(worktree_roots.len() as i32 + 1 == stmt.parameter_count()); - for i in 0..worktree_roots.len() { - let path = &worktree_roots[i].as_ref().as_os_str().as_bytes(); - // If you need to debug this, here's the string parsing: - // let path = &worktree_roots[i].as_ref().to_string_lossy().to_string() - stmt.bind_value(*path, i as i32 + 1); - } - // No -1, because SQLite is 1 based - stmt.bind_value(worktree_roots.len(), worktree_roots.len() as i32 + 1)?; - - stmt.maybe_row() + let root_bytes: Vec<&[u8]> = worktree_roots.iter() + .map(|root| root.as_ref().as_os_str().as_bytes()).collect(); + + stmt.with_bindings((root_bytes, root_bytes.len()))? + .maybe_row() .map(|row| row.map(|id| WorkspaceId(id))) } diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index ca3ba401cfa34a707ea126b6d9750924ce86596c..9b8308f70c85b8d889661ef09834d7d8e14bf97f 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -207,3 +207,25 @@ impl Column for [T; COUNT] { Ok((array, current_index)) } } + +impl Bind for Vec { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let mut current_index = start_index; + for binding in self.iter() { + current_index = binding.bind(statement, current_index)? + } + + Ok(current_index) + } +} + +impl Bind for &[T] { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let mut current_index = start_index; + for binding in *self { + current_index = binding.bind(statement, current_index)? + } + + Ok(current_index) + } +} diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index be529784951a10ddd5a19d1b19b04774a9c3bfb2..1fd814c5803989486d440a8efdd4688844333db0 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -149,7 +149,7 @@ mod test { connection .prepare("INSERT INTO text (text) VALUES (?);") .unwrap() - .bind(text) + .with_bindings(text) .unwrap() .exec() .unwrap(); @@ -185,8 +185,16 @@ mod test { .prepare("INSERT INTO test (text, integer, blob) VALUES (?, ?, ?)") .unwrap(); - insert.bind(tuple1.clone()).unwrap().exec().unwrap(); - insert.bind(tuple2.clone()).unwrap().exec().unwrap(); + insert + .with_bindings(tuple1.clone()) + .unwrap() + .exec() + .unwrap(); + insert + .with_bindings(tuple2.clone()) + .unwrap() + .exec() + .unwrap(); assert_eq!( connection diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 3c0771c0feb7a1f6df931f41f93618656c19b181..9f3bd333cae325dcd3a29a0778425d02a131c697 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -47,7 +47,7 @@ impl Migration { WHERE domain = ? ORDER BY step "})? - .bind(self.domain)? + .with_bindings(self.domain)? .rows::<(String, usize, String)>()?; let mut store_completed_migration = connection @@ -72,7 +72,7 @@ impl Migration { connection.exec(migration)?; store_completed_migration - .bind((self.domain, index, *migration))? + .with_bindings((self.domain, index, *migration))? .exec()?; } @@ -163,7 +163,7 @@ mod test { .unwrap(); store_completed_migration - .bind((domain, i, i.to_string())) + .with_bindings((domain, i, i.to_string())) .unwrap() .exec() .unwrap(); diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs index 50f28c73901d2382f1ef677425af1e835ea9678b..9589037e77c730f8b586a2e45e312d99bb4b5576 100644 --- a/crates/sqlez/src/savepoint.rs +++ b/crates/sqlez/src/savepoint.rs @@ -76,14 +76,14 @@ mod tests { connection.with_savepoint("first", |save1| { save1 .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .bind((save1_text, 1))? + .with_bindings((save1_text, 1))? .exec()?; assert!(save1 .with_savepoint("second", |save2| -> Result, anyhow::Error> { save2 .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .bind((save2_text, 2))? + .with_bindings((save2_text, 2))? .exec()?; assert_eq!( @@ -108,7 +108,7 @@ mod tests { save1.with_savepoint_rollback::<(), _>("second", |save2| { save2 .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .bind((save2_text, 2))? + .with_bindings((save2_text, 2))? .exec()?; assert_eq!( @@ -131,7 +131,7 @@ mod tests { save1.with_savepoint_rollback("second", |save2| { save2 .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .bind((save2_text, 2))? + .with_bindings((save2_text, 2))? .exec()?; assert_eq!( diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index ac57847774b1ad37e1f2c5a7d47f653e5d9a363e..06a090c417d9a9f42b0dd2408e6d02c2d0bc567d 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -179,10 +179,9 @@ impl<'a> Statement<'a> { Ok(str::from_utf8(slice)?) } - pub fn bind_value(&self, value: T, idx: i32) -> Result<()> { - debug_assert!(idx > 0); - value.bind(self, idx)?; - Ok(()) + pub fn bind(&self, value: T, index: i32) -> Result { + debug_assert!(index > 0); + value.bind(self, index) } pub fn column(&mut self) -> Result { @@ -203,8 +202,8 @@ impl<'a> Statement<'a> { } } - pub fn bind(&mut self, bindings: impl Bind) -> Result<&mut Self> { - self.bind_value(bindings, 1)?; + pub fn with_bindings(&mut self, bindings: impl Bind) -> Result<&mut Self> { + self.bind(bindings, 1)?; Ok(self) } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 53d49464bed97fff60d9f9aed17882161f5f5465..f4f759cd6c42e93b6f3b99744e2419f0e53acf6b 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -31,6 +31,8 @@ impl ThreadSafeConnection { self } + /// Migrations have to be run per connection because we fallback to memory + /// so this needs pub fn with_migrations(mut self, migrations: &'static [Migration]) -> Self { self.migrations = Some(migrations); self From c8face33fa9feb9d929757de7fd3317c0456500d Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 1 Nov 2022 17:46:39 -0700 Subject: [PATCH 29/86] WIP, incorporating type parsing using new sqlez patterns --- crates/db/src/pane.rs | 49 ++++++++++++++++++++++++++++++++--- crates/db/src/workspace.rs | 16 ++++++++++-- crates/sqlez/src/bindable.rs | 12 +++++++++ crates/sqlez/src/statement.rs | 2 +- 4 files changed, 73 insertions(+), 6 deletions(-) diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 0716d19b1d209a52754159bf5bcc461a9936ed75..3292cc031d1d47740f615a3a35d0904aebbfc316 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,7 +1,9 @@ +use std::str::FromStr; + use gpui::Axis; use indoc::indoc; -use sqlez::migrations::Migration; +use sqlez::{migrations::Migration, bindable::{Bind, Column}, connection::Connection, statement::Statement}; use crate::{items::ItemId, workspace::WorkspaceId}; @@ -138,7 +140,6 @@ pub struct SerializedPane { //********* CURRENTLY IN USE TYPES: ********* - #[derive(Default, Debug, PartialEq, Eq)] pub enum DockAnchor { #[default] @@ -147,6 +148,29 @@ pub enum DockAnchor { Expanded, } +impl ToString for DockAnchor { + fn to_string(&self) -> String { + match self { + DockAnchor::Bottom => "Bottom".to_string(), + DockAnchor::Right => "Right".to_string(), + DockAnchor::Expanded => "Expanded".to_string(), + } + } +} + +impl FromStr for DockAnchor { + type Err = anyhow::Error; + + fn from_str(s: &str) -> anyhow::Result { + match s { + "Bottom" => Ok(DockAnchor::Bottom), + "Right" => Ok(DockAnchor::Right), + "Expanded" => Ok(DockAnchor::Expanded), + _ => anyhow::bail!("Not a valid dock anchor") + } + } +} + #[derive(Default, Debug, PartialEq, Eq)] pub struct SerializedDockPane { pub anchor_position: DockAnchor, @@ -159,6 +183,7 @@ impl SerializedDockPane { } } + #[derive(Default, Debug, PartialEq, Eq)] pub(crate) struct DockRow { workspace_id: WorkspaceId, @@ -172,6 +197,21 @@ impl DockRow { } } +impl Bind for DockRow { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + statement.bind((self.workspace_id, self.anchor_position.to_string(), self.visible), start_index) + } +} + +impl Column for DockRow { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + <(WorkspaceId, &str, bool) as Column>::column(statement, start_index) + .map(|((workspace_id, anchor_position, visible), next_index)| { + + }) + } +} + impl Db { pub fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { let axis = self.get_pane_group_axis(pane_group_id); @@ -229,7 +269,10 @@ impl Db { pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { fn logic(conn: &Connection, workspace: WorkspaceId) -> anyhow::Result> { - let mut stmt = conn.prepare("SELECT workspace_id, anchor_position, visible FROM dock_panes WHERE workspace_id = ?")?; + let mut stmt = conn.prepare("SELECT workspace_id, anchor_position, visible FROM dock_panes WHERE workspace_id = ?")? + .maybe_row() + .map(|row| DockRow::col); + let dock_panes = stmt.query_row([workspace.raw_id()], |row_ref| from_row::).optional(); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 126a34676ee4ac10817d87d55b3a5a9ecba5bc2b..f454151cbb715d7776a795443f79e791499be612 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,4 +1,4 @@ -use anyhow::Result; +use anyhow::{Result, anyhow}; use std::{ ffi::OsStr, @@ -10,7 +10,7 @@ use std::{ use indoc::indoc; use sqlez::{ - connection::Connection, migrations::Migration, + connection::Connection, migrations::Migration, bindable::{Column, Bind}, }; use crate::pane::SerializedDockPane; @@ -45,6 +45,18 @@ impl WorkspaceId { } } +impl Bind for WorkspaceId { + fn bind(&self, statement: &sqlez::statement::Statement, start_index: i32) -> Result { + todo!(); + } +} + +impl Column for WorkspaceId { + fn column(statement: &mut sqlez::statement::Statement, start_index: i32) -> Result<(Self, i32)> { + todo!(); + } +} + #[derive(Default, Debug)] pub struct SerializedWorkspace { pub workspace_id: WorkspaceId, diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index 9b8308f70c85b8d889661ef09834d7d8e14bf97f..e2cdde039ebb34cc51c40e4d8494413fbeffd3a8 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -10,6 +10,18 @@ pub trait Column: Sized { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)>; } +impl Bind for bool { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind(self.then_some(1).unwrap_or(0), start_index) + } +} + +impl Column for bool { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + i32::column(statement, start_index).map(|(i, next_index)| (i != 0, next_index)) + } +} + impl Bind for &[u8] { fn bind(&self, statement: &Statement, start_index: i32) -> Result { statement.bind_blob(start_index, self)?; diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index 06a090c417d9a9f42b0dd2408e6d02c2d0bc567d..14683171a775b71e97235d019014a0335ed1230c 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -114,7 +114,7 @@ impl<'a> Statement<'a> { unsafe { sqlite3_bind_int(self.raw_statement, index, int); - } + }; self.connection.last_error() } From 406663c75ef202bddd4ed2b03260a16ba21918db Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 2 Nov 2022 13:26:23 -0700 Subject: [PATCH 30/86] Converted to sqlez, so much nicer --- Cargo.lock | 1 + crates/db/Cargo.toml | 3 +- crates/db/examples/serialize-pane.rs | 12 +- crates/db/examples/serialize_workspace.rs | 6 +- crates/db/src/db.rs | 12 +- crates/db/src/kvp.rs | 22 +-- crates/db/src/pane.rs | 185 ++++++++++++---------- crates/db/src/workspace.rs | 98 +++++++----- crates/sqlez/src/connection.rs | 85 +++++++--- crates/sqlez/src/savepoint.rs | 14 +- crates/sqlez/src/statement.rs | 16 +- crates/util/src/lib.rs | 21 +++ 12 files changed, 278 insertions(+), 197 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2fb859dca5177d6c5d6381397c6b02aad614fa39..3e8526fbed6e87f7ec82ce65135fead97cb24191 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1559,6 +1559,7 @@ dependencies = [ "parking_lot 0.11.2", "sqlez", "tempdir", + "util", ] [[package]] diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index fe0b21eaf4e819f14ba89c955f3f0cdef2cd3fb7..1ee9de6186711940ee3ec04c6fe2abb4aa2f6510 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -11,11 +11,12 @@ doctest = false test-support = [] [dependencies] -indoc = "1.0.4" collections = { path = "../collections" } gpui = { path = "../gpui" } sqlez = { path = "../sqlez" } +util = { path = "../util" } anyhow = "1.0.57" +indoc = "1.0.4" async-trait = "0.1" lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index e828f007d120c81a590227227ffa1ef808b30a88..607347670948d8acdcbff029f17c4af15e0f18ab 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -7,10 +7,8 @@ const TEST_FILE: &'static str = "test-db.db"; fn main() -> anyhow::Result<()> { env_logger::init(); - let db = db::Db::open_in_memory(); - if db.real().is_none() { - return Err(anyhow::anyhow!("Migrations failed")); - } + let db = db::Db::open_in_memory("db"); + let file = Path::new(TEST_FILE); let f = File::create(file)?; @@ -21,21 +19,21 @@ fn main() -> anyhow::Result<()> { let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]); db.save_dock_pane( - workspace_1.workspace_id, + &workspace_1.workspace_id, &SerializedDockPane { anchor_position: DockAnchor::Expanded, visible: true, }, ); db.save_dock_pane( - workspace_2.workspace_id, + &workspace_2.workspace_id, &SerializedDockPane { anchor_position: DockAnchor::Bottom, visible: true, }, ); db.save_dock_pane( - workspace_3.workspace_id, + &workspace_3.workspace_id, &SerializedDockPane { anchor_position: DockAnchor::Right, visible: false, diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs index 4010c7797663b856b829a8b1b5d0310e277ecbec..9b6082ce534c6038e4c1a7bd8e23a4469049b3fa 100644 --- a/crates/db/examples/serialize_workspace.rs +++ b/crates/db/examples/serialize_workspace.rs @@ -4,10 +4,8 @@ const TEST_FILE: &'static str = "test-db.db"; fn main() -> anyhow::Result<()> { env_logger::init(); - let db = db::Db::open_in_memory(); - if db.real().is_none() { - return Err(anyhow::anyhow!("Migrations failed")); - } + let db = db::Db::open_in_memory("db"); + let file = Path::new(TEST_FILE); let f = File::create(file)?; diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 857b5f273eb2d506f1e245e49798a1b05bf73ef9..48a025112abc87f08d53af1ec39f48610c72a2ad 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -18,7 +18,7 @@ use sqlez::thread_safe_connection::ThreadSafeConnection; pub use workspace::*; #[derive(Clone)] -struct Db(ThreadSafeConnection); +pub struct Db(ThreadSafeConnection); impl Deref for Db { type Target = sqlez::connection::Connection; @@ -54,15 +54,15 @@ impl Db { } /// Open a in memory database for testing and as a fallback. - pub fn open_in_memory() -> Self { - Db( - ThreadSafeConnection::new("Zed DB", false).with_initialize_query(indoc! {" + pub fn open_in_memory(db_name: &str) -> Self { + Db(ThreadSafeConnection::new(db_name, false) + .with_initialize_query(indoc! {" PRAGMA journal_mode=WAL; PRAGMA synchronous=NORMAL; PRAGMA foreign_keys=TRUE; PRAGMA case_sensitive_like=TRUE; - "}), - ) + "}) + .with_migrations(&[KVP_MIGRATION, WORKSPACES_MIGRATION, PANE_MIGRATIONS])) } pub fn write_to>(&self, dest: P) -> Result<()> { diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index a692d73d886c0db71b39a1b0eeb7fc784da4a998..93be5e10c0ea21861d300aebe8e11a48af462458 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -23,7 +23,7 @@ impl Db { pub fn write_kvp(&self, key: &str, value: &str) -> Result<()> { self.0 - .prepare("INSERT OR REPLACE INTO kv_store(key, value) VALUES (?, ?)")? + .prepare("INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))")? .with_bindings((key, value))? .exec() } @@ -44,21 +44,21 @@ mod tests { #[test] fn test_kvp() -> Result<()> { - let db = Db::open_in_memory(); + let db = Db::open_in_memory("test_kvp"); - assert_eq!(db.read_kvp("key-1")?, None); + assert_eq!(db.read_kvp("key-1").unwrap(), None); - db.write_kvp("key-1", "one")?; - assert_eq!(db.read_kvp("key-1")?, Some("one".to_string())); + db.write_kvp("key-1", "one").unwrap(); + assert_eq!(db.read_kvp("key-1").unwrap(), Some("one".to_string())); - db.write_kvp("key-1", "one-2")?; - assert_eq!(db.read_kvp("key-1")?, Some("one-2".to_string())); + db.write_kvp("key-1", "one-2").unwrap(); + assert_eq!(db.read_kvp("key-1").unwrap(), Some("one-2".to_string())); - db.write_kvp("key-2", "two")?; - assert_eq!(db.read_kvp("key-2")?, Some("two".to_string())); + db.write_kvp("key-2", "two").unwrap(); + assert_eq!(db.read_kvp("key-2").unwrap(), Some("two".to_string())); - db.delete_kvp("key-1")?; - assert_eq!(db.read_kvp("key-1")?, None); + db.delete_kvp("key-1").unwrap(); + assert_eq!(db.read_kvp("key-1").unwrap(), None); Ok(()) } diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 3292cc031d1d47740f615a3a35d0904aebbfc316..5db805012d25b2fb8dde6216318c73ba2b7b2526 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,16 +1,21 @@ - use std::str::FromStr; use gpui::Axis; use indoc::indoc; -use sqlez::{migrations::Migration, bindable::{Bind, Column}, connection::Connection, statement::Statement}; - +use sqlez::{ + bindable::{Bind, Column}, + migrations::Migration, + statement::Statement, +}; +use util::{iife, ResultExt}; use crate::{items::ItemId, workspace::WorkspaceId}; use super::Db; -pub(crate) const PANE_MIGRATIONS: Migration = Migration::new("pane", &[indoc! {" +pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( + "pane", + &[indoc! {" CREATE TABLE dock_panes( dock_pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, @@ -19,7 +24,7 @@ CREATE TABLE dock_panes( FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE ) STRICT; -CREATE TABLE pane_groups( +CREATE TABLE pane_groups( -- Inner nodes group_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, parent_group INTEGER, -- NULL indicates that this is a root node @@ -28,7 +33,8 @@ CREATE TABLE pane_groups( FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; -CREATE TABLE grouped_panes( + +CREATE TABLE grouped_panes( -- Leaf nodes pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, group_id INTEGER NOT NULL, @@ -65,7 +71,8 @@ CREATE TABLE dock_items( FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE, FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE ) STRICT; -"}]); +"}], +); // We have an many-branched, unbalanced tree with three types: // Pane Groups @@ -137,10 +144,9 @@ pub struct SerializedPane { children: Vec, } - //********* CURRENTLY IN USE TYPES: ********* -#[derive(Default, Debug, PartialEq, Eq)] +#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] pub enum DockAnchor { #[default] Bottom, @@ -162,15 +168,28 @@ impl FromStr for DockAnchor { type Err = anyhow::Error; fn from_str(s: &str) -> anyhow::Result { - match s { + match s { "Bottom" => Ok(DockAnchor::Bottom), "Right" => Ok(DockAnchor::Right), "Expanded" => Ok(DockAnchor::Expanded), - _ => anyhow::bail!("Not a valid dock anchor") + _ => anyhow::bail!("Not a valid dock anchor"), } } } +impl Bind for DockAnchor { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + statement.bind(self.to_string(), start_index) + } +} + +impl Column for DockAnchor { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + ::column(statement, start_index) + .and_then(|(str, next_index)| Ok((DockAnchor::from_str(&str)?, next_index))) + } +} + #[derive(Default, Debug, PartialEq, Eq)] pub struct SerializedDockPane { pub anchor_position: DockAnchor, @@ -178,11 +197,30 @@ pub struct SerializedDockPane { } impl SerializedDockPane { - pub fn to_row(&self, workspace: WorkspaceId) -> DockRow { - DockRow { workspace_id: workspace, anchor_position: self.anchor_position, visible: self.visible } + fn to_row(&self, workspace: &WorkspaceId) -> DockRow { + DockRow { + workspace_id: *workspace, + anchor_position: self.anchor_position, + visible: self.visible, + } } } +impl Column for SerializedDockPane { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + <(DockAnchor, bool) as Column>::column(statement, start_index).map( + |((anchor_position, visible), next_index)| { + ( + SerializedDockPane { + anchor_position, + visible, + }, + next_index, + ) + }, + ) + } +} #[derive(Default, Debug, PartialEq, Eq)] pub(crate) struct DockRow { @@ -191,24 +229,16 @@ pub(crate) struct DockRow { visible: bool, } -impl DockRow { - pub fn to_pane(&self) -> SerializedDockPane { - SerializedDockPane { anchor_position: self.anchor_position, visible: self.visible } - } -} - impl Bind for DockRow { fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - statement.bind((self.workspace_id, self.anchor_position.to_string(), self.visible), start_index) - } -} - -impl Column for DockRow { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - <(WorkspaceId, &str, bool) as Column>::column(statement, start_index) - .map(|((workspace_id, anchor_position, visible), next_index)| { - - }) + statement.bind( + ( + self.workspace_id, + self.anchor_position.to_string(), + self.visible, + ), + start_index, + ) } } @@ -267,75 +297,37 @@ impl Db { } pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { - fn logic(conn: &Connection, workspace: WorkspaceId) -> anyhow::Result> { - - let mut stmt = conn.prepare("SELECT workspace_id, anchor_position, visible FROM dock_panes WHERE workspace_id = ?")? - .maybe_row() - .map(|row| DockRow::col); - - - let dock_panes = stmt.query_row([workspace.raw_id()], |row_ref| from_row::).optional(); - - let mut dock_panes_iter = stmt.query_and_then([workspace.raw_id()], from_row::)?; - let dock_pane = dock_panes_iter - .next() - .and_then(|dock_row| - dock_row - .ok() - .map(|dock_row| dock_row.to_pane())); - - Ok(dock_pane) - } - - self.real() - .map(|db| { - let lock = db.connection.lock(); - - match logic(&lock, workspace) { - Ok(dock_pane) => dock_pane, - Err(err) => { - log::error!("Failed to get the dock pane: {}", err); - None - }, - } - }) - .unwrap_or(None) - + iife!({ + self.prepare("SELECT anchor_position, visible FROM dock_panes WHERE workspace_id = ?")? + .with_bindings(workspace)? + .maybe_row::() + }) + .log_err() + .flatten() } - pub fn save_dock_pane(&self, workspace: WorkspaceId, dock_pane: SerializedDockPane) { - to_params_named(dock_pane.to_row(workspace)) - .map_err(|err| { - log::error!("Failed to parse params for the dock row: {}", err); - err - }) - .ok() - .zip(self.real()) - .map(|(params, db)| { - // TODO: overwrite old dock panes if need be - let query = "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (:workspace_id, :anchor_position, :visible);"; - - db.connection - .lock() - .execute(query, params.to_slice().as_slice()) - .map(|_| ()) // Eat the return value - .unwrap_or_else(|err| { - log::error!("Failed to insert new dock pane into DB: {}", err); - }) - }); + pub fn save_dock_pane(&self, workspace: &WorkspaceId, dock_pane: &SerializedDockPane) { + iife!({ + self.prepare( + "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);", + )? + .with_bindings(dock_pane.to_row(workspace))? + .insert() + }) + .log_err(); } } #[cfg(test)] mod tests { - use crate::Db; + use crate::{pane::SerializedPane, Db}; use super::{DockAnchor, SerializedDockPane}; #[test] fn test_basic_dock_pane() { - let db = Db::open_in_memory(); + let db = Db::open_in_memory("basic_dock_pane"); let workspace = db.workspace_for_roots(&["/tmp"]); @@ -344,7 +336,28 @@ mod tests { visible: true, }; - db.save_dock_pane(workspace.workspace_id, dock_pane); + db.save_dock_pane(&workspace.workspace_id, &dock_pane); + + let new_workspace = db.workspace_for_roots(&["/tmp"]); + + assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane); + } + + #[test] + fn test_dock_simple_split() { + let db = Db::open_in_memory("simple_split"); + + let workspace = db.workspace_for_roots(&["/tmp"]); + + let center_pane = SerializedPane { + pane_id: crate::pane::PaneId { + workspace_id: workspace.workspace_id, + pane_id: 1, + }, + children: vec![], + }; + + db.save_dock_pane(&workspace.workspace_id, &dock_pane); let new_workspace = db.workspace_for_roots(&["/tmp"]); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index f454151cbb715d7776a795443f79e791499be612..bf2f765e19a8014ea9b854582d8fc31e9e6903e4 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,4 +1,4 @@ -use anyhow::{Result, anyhow}; +use anyhow::Result; use std::{ ffi::OsStr, @@ -10,7 +10,9 @@ use std::{ use indoc::indoc; use sqlez::{ - connection::Connection, migrations::Migration, bindable::{Column, Bind}, + bindable::{Bind, Column}, + connection::Connection, + migrations::Migration, }; use crate::pane::SerializedDockPane; @@ -47,13 +49,17 @@ impl WorkspaceId { impl Bind for WorkspaceId { fn bind(&self, statement: &sqlez::statement::Statement, start_index: i32) -> Result { - todo!(); + statement.bind(self.raw_id(), start_index) } } impl Column for WorkspaceId { - fn column(statement: &mut sqlez::statement::Statement, start_index: i32) -> Result<(Self, i32)> { - todo!(); + fn column( + statement: &mut sqlez::statement::Statement, + start_index: i32, + ) -> Result<(Self, i32)> { + ::column(statement, start_index) + .map(|(id, next_index)| (WorkspaceId(id), next_index)) } } @@ -154,10 +160,8 @@ impl Db { fn last_workspace_id(&self) -> Option { let res = self - .prepare( - "SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT 1", - ) - .and_then(|stmt| stmt.maybe_row()) + .prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT 1") + .and_then(|mut stmt| stmt.maybe_row()) .map(|row| row.map(|id| WorkspaceId(id))); match res { @@ -172,28 +176,30 @@ impl Db { /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, Vec>)> { self.with_savepoint("recent_workspaces", |conn| { - let ids = conn.prepare("SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT ?")? + let rows = conn + .prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? .with_bindings(limit)? - .rows::()? - .iter() - .map(|row| WorkspaceId(*row)); - - let result = Vec::new(); - - let stmt = conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; + .rows::()?; + + let ids = rows.iter().map(|row| WorkspaceId(*row)); + + let mut result = Vec::new(); + + let mut stmt = + conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; for workspace_id in ids { - let roots = stmt.with_bindings(workspace_id.0)? + let roots = stmt + .with_bindings(workspace_id.0)? .rows::>()? .iter() - .map(|row| { - PathBuf::from(OsStr::from_bytes(&row)).into() - }) + .map(|row| PathBuf::from(OsStr::from_bytes(&row)).into()) .collect(); result.push((workspace_id, roots)) } - + Ok(result) - }).unwrap_or_else(|err| { + }) + .unwrap_or_else(|err| { log::error!("Failed to get recent workspaces, err: {}", err); Vec::new() }) @@ -213,11 +219,10 @@ where if let Some(preexisting_id) = preexisting_id { if preexisting_id != *workspace_id { // Should also delete fields in other tables with cascading updates - connection.prepare( - "DELETE FROM workspaces WHERE workspace_id = ?", - )? - .with_bindings(preexisting_id.0)? - .exec()?; + connection + .prepare("DELETE FROM workspaces WHERE workspace_id = ?")? + .with_bindings(preexisting_id.0)? + .exec()?; } } @@ -231,12 +236,14 @@ where // If you need to debug this, here's the string parsing: // let path = root.as_ref().to_string_lossy().to_string(); - connection.prepare("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")? + connection + .prepare("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")? .with_bindings((workspace_id.0, path))? .exec()?; } - connection.prepare("UPDATE workspaces SET last_opened_timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")? + connection + .prepare("UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")? .with_bindings(workspace_id.0)? .exec()?; @@ -264,7 +271,7 @@ where } } array_binding_stmt.push(')'); - + // Any workspace can have multiple independent paths, and these paths // can overlap in the database. Take this test data for example: // @@ -336,10 +343,14 @@ where // Make sure we bound the parameters correctly debug_assert!(worktree_roots.len() as i32 + 1 == stmt.parameter_count()); - let root_bytes: Vec<&[u8]> = worktree_roots.iter() - .map(|root| root.as_ref().as_os_str().as_bytes()).collect(); - - stmt.with_bindings((root_bytes, root_bytes.len()))? + let root_bytes: Vec<&[u8]> = worktree_roots + .iter() + .map(|root| root.as_ref().as_os_str().as_bytes()) + .collect(); + + let len = root_bytes.len(); + + stmt.with_bindings((root_bytes, len))? .maybe_row() .map(|row| row.map(|id| WorkspaceId(id))) } @@ -360,7 +371,8 @@ mod tests { #[test] fn test_new_worktrees_for_roots() { - let db = Db::open_in_memory(); + env_logger::init(); + let db = Db::open_in_memory("test_new_worktrees_for_roots"); // Test creation in 0 case let workspace_1 = db.workspace_for_roots::(&[]); @@ -371,7 +383,7 @@ mod tests { assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); // Ensure the timestamps are different - sleep(Duration::from_millis(20)); + sleep(Duration::from_secs(1)); db.make_new_workspace::(&[]); // Test pulling another value from recent workspaces @@ -379,7 +391,7 @@ mod tests { assert_eq!(workspace_2.workspace_id, WorkspaceId(2)); // Ensure the timestamps are different - sleep(Duration::from_millis(20)); + sleep(Duration::from_secs(1)); // Test creating a new workspace that doesn't exist already let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); @@ -396,7 +408,7 @@ mod tests { #[test] fn test_empty_worktrees() { - let db = Db::open_in_memory(); + let db = Db::open_in_memory("test_empty_worktrees"); assert_eq!(None, db.workspace_id::(&[])); @@ -404,7 +416,6 @@ mod tests { db.make_new_workspace::(&[]); //ID 2 db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); - db.write_to("test.db").unwrap(); // Sanity check assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1))); @@ -436,7 +447,7 @@ mod tests { (WorkspaceId(7), vec!["/tmp2"]), ]; - let db = Db::open_in_memory(); + let db = Db::open_in_memory("test_more_workspace_ids"); for (workspace_id, entries) in data { db.make_new_workspace::(&[]); @@ -470,7 +481,7 @@ mod tests { (WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]), ]; - let db = Db::open_in_memory(); + let db = Db::open_in_memory("test_detect_workspace_id"); for (workspace_id, entries) in data { db.make_new_workspace::(&[]); @@ -511,7 +522,7 @@ mod tests { (WorkspaceId(3), vec!["/tmp2", "/tmp3"]), ]; - let db = Db::open_in_memory(); + let db = Db::open_in_memory("test_tricky_overlapping_update"); // Load in the test data for (workspace_id, entries) in data { @@ -519,6 +530,7 @@ mod tests { db.update_worktrees(workspace_id, entries); } + sleep(Duration::from_secs(1)); // Execute the update db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 1fd814c5803989486d440a8efdd4688844333db0..fcc180a48dc0c0052ed1db91b285d308a43d52aa 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -32,6 +32,9 @@ impl Connection { 0 as *const _, ); + // Turn on extended error codes + sqlite3_extended_result_codes(connection.sqlite3, 1); + connection.last_error()?; } @@ -71,6 +74,7 @@ impl Connection { 0 as *mut _, 0 as *mut _, ); + sqlite3_errcode(self.sqlite3); self.last_error()?; } Ok(()) @@ -95,29 +99,7 @@ impl Connection { } pub(crate) fn last_error(&self) -> Result<()> { - const NON_ERROR_CODES: &[i32] = &[SQLITE_OK, SQLITE_ROW]; - unsafe { - let code = sqlite3_errcode(self.sqlite3); - if NON_ERROR_CODES.contains(&code) { - return Ok(()); - } - - let message = sqlite3_errmsg(self.sqlite3); - let message = if message.is_null() { - None - } else { - Some( - String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) - .into_owned(), - ) - }; - - Err(anyhow!( - "Sqlite call failed with code {} and message: {:?}", - code as isize, - message - )) - } + unsafe { error_to_result(sqlite3_errcode(self.sqlite3)) } } } @@ -127,12 +109,37 @@ impl Drop for Connection { } } +pub(crate) fn error_to_result(code: std::os::raw::c_int) -> Result<()> { + const NON_ERROR_CODES: &[i32] = &[SQLITE_OK, SQLITE_ROW]; + unsafe { + if NON_ERROR_CODES.contains(&code) { + return Ok(()); + } + + let message = sqlite3_errstr(code); + let message = if message.is_null() { + None + } else { + Some( + String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) + .into_owned(), + ) + }; + + Err(anyhow!( + "Sqlite call failed with code {} and message: {:?}", + code as isize, + message + )) + } +} + #[cfg(test)] mod test { use anyhow::Result; use indoc::indoc; - use crate::connection::Connection; + use crate::{connection::Connection, migrations::Migration}; #[test] fn string_round_trips() -> Result<()> { @@ -234,4 +241,34 @@ mod test { .unwrap(); assert_eq!(read_blobs, vec![blob]); } + + #[test] + fn test_kv_store() -> anyhow::Result<()> { + let connection = Connection::open_memory("kv_store"); + + Migration::new( + "kv", + &["CREATE TABLE kv_store( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ) STRICT;"], + ) + .run(&connection) + .unwrap(); + + let mut stmt = connection.prepare("INSERT INTO kv_store(key, value) VALUES(?, ?)")?; + stmt.bind_text(1, "a").unwrap(); + stmt.bind_text(2, "b").unwrap(); + stmt.exec().unwrap(); + let id = connection.last_insert_id(); + + let res = connection + .prepare("SELECT key, value FROM kv_store WHERE rowid = ?")? + .with_bindings(id)? + .row::<(String, String)>()?; + + assert_eq!(res, ("a".to_string(), "b".to_string())); + + Ok(()) + } } diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs index 9589037e77c730f8b586a2e45e312d99bb4b5576..3d7830dd91f623894b1062cafbc99043d63325eb 100644 --- a/crates/sqlez/src/savepoint.rs +++ b/crates/sqlez/src/savepoint.rs @@ -6,9 +6,9 @@ impl Connection { // Run a set of commands within the context of a `SAVEPOINT name`. If the callback // returns Err(_), the savepoint will be rolled back. Otherwise, the save // point is released. - pub fn with_savepoint(&mut self, name: impl AsRef, f: F) -> Result + pub fn with_savepoint(&self, name: impl AsRef, f: F) -> Result where - F: FnOnce(&mut Connection) -> Result, + F: FnOnce(&Connection) -> Result, { let name = name.as_ref().to_owned(); self.exec(format!("SAVEPOINT {}", &name))?; @@ -28,13 +28,9 @@ impl Connection { // Run a set of commands within the context of a `SAVEPOINT name`. If the callback // returns Ok(None) or Err(_), the savepoint will be rolled back. Otherwise, the save // point is released. - pub fn with_savepoint_rollback( - &mut self, - name: impl AsRef, - f: F, - ) -> Result> + pub fn with_savepoint_rollback(&self, name: impl AsRef, f: F) -> Result> where - F: FnOnce(&mut Connection) -> Result>, + F: FnOnce(&Connection) -> Result>, { let name = name.as_ref().to_owned(); self.exec(format!("SAVEPOINT {}", &name))?; @@ -60,7 +56,7 @@ mod tests { #[test] fn test_nested_savepoints() -> Result<()> { - let mut connection = Connection::open_memory("nested_savepoints"); + let connection = Connection::open_memory("nested_savepoints"); connection .exec(indoc! {" diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index 14683171a775b71e97235d019014a0335ed1230c..e2b59d86f1c234787fa244023f6a53509c2f8180 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -6,7 +6,7 @@ use anyhow::{anyhow, Context, Result}; use libsqlite3_sys::*; use crate::bindable::{Bind, Column}; -use crate::connection::Connection; +use crate::connection::{error_to_result, Connection}; pub struct Statement<'a> { raw_statement: *mut sqlite3_stmt, @@ -65,6 +65,7 @@ impl<'a> Statement<'a> { } pub fn bind_blob(&self, index: i32, blob: &[u8]) -> Result<()> { + // dbg!("bind blob", index); let index = index as c_int; let blob_pointer = blob.as_ptr() as *const _; let len = blob.len() as c_int; @@ -94,6 +95,7 @@ impl<'a> Statement<'a> { } pub fn bind_double(&self, index: i32, double: f64) -> Result<()> { + // dbg!("bind double", index); let index = index as c_int; unsafe { @@ -110,6 +112,7 @@ impl<'a> Statement<'a> { } pub fn bind_int(&self, index: i32, int: i32) -> Result<()> { + // dbg!("bind int", index); let index = index as c_int; unsafe { @@ -126,6 +129,7 @@ impl<'a> Statement<'a> { } pub fn bind_int64(&self, index: i32, int: i64) -> Result<()> { + // dbg!("bind int64", index); let index = index as c_int; unsafe { sqlite3_bind_int64(self.raw_statement, index, int); @@ -141,6 +145,7 @@ impl<'a> Statement<'a> { } pub fn bind_null(&self, index: i32) -> Result<()> { + // dbg!("bind null", index); let index = index as c_int; unsafe { sqlite3_bind_null(self.raw_statement, index); @@ -149,11 +154,12 @@ impl<'a> Statement<'a> { } pub fn bind_text(&self, index: i32, text: &str) -> Result<()> { + // dbg!("bind text", index, text); let index = index as c_int; let text_pointer = text.as_ptr() as *const _; let len = text.len() as c_int; unsafe { - sqlite3_bind_blob( + sqlite3_bind_text( self.raw_statement, index, text_pointer, @@ -304,10 +310,8 @@ impl<'a> Statement<'a> { impl<'a> Drop for Statement<'a> { fn drop(&mut self) { unsafe { - sqlite3_finalize(self.raw_statement); - self.connection - .last_error() - .expect("sqlite3 finalize failed for statement :("); + let error = sqlite3_finalize(self.raw_statement); + error_to_result(error).expect("failed error"); }; } } diff --git a/crates/util/src/lib.rs b/crates/util/src/lib.rs index 22d63a0996f10d72294974036b85498632d0d680..3757da5854f0c9c0e7f4d4d6203ba4a5f989f64e 100644 --- a/crates/util/src/lib.rs +++ b/crates/util/src/lib.rs @@ -204,6 +204,13 @@ impl Iterator for RandomCharIter { } } +#[macro_export] +macro_rules! iife { + ($block:block) => { + (|| $block)() + }; +} + #[cfg(test)] mod tests { use super::*; @@ -221,4 +228,18 @@ mod tests { extend_sorted(&mut vec, vec![1000, 19, 17, 9, 5], 8, |a, b| b.cmp(a)); assert_eq!(vec, &[1000, 101, 21, 19, 17, 13, 9, 8]); } + + #[test] + fn test_iife() { + fn option_returning_function() -> Option<()> { + None + } + + let foo = iife!({ + option_returning_function()?; + Some(()) + }); + + assert_eq!(foo, None); + } } From 685bc9fed30046638c0c9fcb84d6d86a26c28def Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 2 Nov 2022 14:37:51 -0700 Subject: [PATCH 31/86] impl bind and column and adjust pane tables --- crates/db/src/pane.rs | 116 ++++++++++++++++--------------------- crates/db/src/workspace.rs | 19 ++---- 2 files changed, 55 insertions(+), 80 deletions(-) diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 5db805012d25b2fb8dde6216318c73ba2b7b2526..816290d8706c896994bbbb89ab44c20389ee6ce5 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,5 +1,4 @@ -use std::str::FromStr; - +use anyhow::bail; use gpui::Axis; use indoc::indoc; use sqlez::{ @@ -16,15 +15,7 @@ use super::Db; pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( "pane", &[indoc! {" -CREATE TABLE dock_panes( - dock_pane_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' - visible INTEGER NOT NULL, -- Boolean - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE pane_groups( -- Inner nodes +CREATE TABLE pane_groups( group_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, parent_group INTEGER, -- NULL indicates that this is a root node @@ -33,43 +24,32 @@ CREATE TABLE pane_groups( -- Inner nodes FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; - -CREATE TABLE grouped_panes( -- Leaf nodes +CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, - group_id INTEGER NOT NULL, + group_id INTEGER, -- If null, this is a dock pane idx INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; -CREATE TABLE items( - item_id INTEGER PRIMARY KEY, +CREATE TABLE dock_panes( + pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, - kind TEXT NOT NULL, + anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' + visible INTEGER NOT NULL, -- Boolean FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE ) STRICT; -CREATE TABLE group_items( - workspace_id INTEGER NOT NULL, +CREATE TABLE items( + item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique pane_id INTEGER NOT NULL, - item_id INTEGER NOT NULL, - idx INTEGER NOT NULL, - PRIMARY KEY (workspace_id, pane_id, item_id) - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(pane_id) REFERENCES grouped_panes(pane_id) ON DELETE CASCADE, - FOREIGN KEY(item_id) REFERENCES items(item_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE dock_items( workspace_id INTEGER NOT NULL, - dock_pane_id INTEGER NOT NULL, - item_id INTEGER NOT NULL, - idx INTEGER NOT NULL, - PRIMARY KEY (workspace_id, dock_pane_id, item_id) - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE, - FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE + kind TEXT NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE + PRIMARY KEY(item_id, workspace_id) ) STRICT; "}], ); @@ -154,39 +134,30 @@ pub enum DockAnchor { Expanded, } -impl ToString for DockAnchor { - fn to_string(&self) -> String { - match self { - DockAnchor::Bottom => "Bottom".to_string(), - DockAnchor::Right => "Right".to_string(), - DockAnchor::Expanded => "Expanded".to_string(), - } - } -} - -impl FromStr for DockAnchor { - type Err = anyhow::Error; - - fn from_str(s: &str) -> anyhow::Result { - match s { - "Bottom" => Ok(DockAnchor::Bottom), - "Right" => Ok(DockAnchor::Right), - "Expanded" => Ok(DockAnchor::Expanded), - _ => anyhow::bail!("Not a valid dock anchor"), - } - } -} - impl Bind for DockAnchor { fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - statement.bind(self.to_string(), start_index) + match self { + DockAnchor::Bottom => "Bottom", + DockAnchor::Right => "Right", + DockAnchor::Expanded => "Expanded", + } + .bind(statement, start_index) } } impl Column for DockAnchor { fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - ::column(statement, start_index) - .and_then(|(str, next_index)| Ok((DockAnchor::from_str(&str)?, next_index))) + String::column(statement, start_index).and_then(|(anchor_text, next_index)| { + Ok(( + match anchor_text.as_ref() { + "Bottom" => DockAnchor::Bottom, + "Right" => DockAnchor::Right, + "Expanded" => DockAnchor::Expanded, + _ => bail!("Stored dock anchor is incorrect"), + }, + next_index, + )) + }) } } @@ -232,16 +203,29 @@ pub(crate) struct DockRow { impl Bind for DockRow { fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { statement.bind( - ( - self.workspace_id, - self.anchor_position.to_string(), - self.visible, - ), + (self.workspace_id, self.anchor_position, self.visible), start_index, ) } } +impl Column for DockRow { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + <(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map( + |((workspace_id, anchor_position, visible), next_index)| { + ( + DockRow { + workspace_id, + anchor_position, + visible, + }, + next_index, + ) + }, + ) + } +} + impl Db { pub fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { let axis = self.get_pane_group_axis(pane_group_id); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index bf2f765e19a8014ea9b854582d8fc31e9e6903e4..e5fe6d5aee4b2e33806a6926443a6e6040c0569b 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -13,6 +13,7 @@ use sqlez::{ bindable::{Bind, Column}, connection::Connection, migrations::Migration, + statement::Statement, }; use crate::pane::SerializedDockPane; @@ -41,25 +42,15 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( #[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] pub struct WorkspaceId(i64); -impl WorkspaceId { - pub fn raw_id(&self) -> i64 { - self.0 - } -} - impl Bind for WorkspaceId { - fn bind(&self, statement: &sqlez::statement::Statement, start_index: i32) -> Result { - statement.bind(self.raw_id(), start_index) + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + self.0.bind(statement, start_index) } } impl Column for WorkspaceId { - fn column( - statement: &mut sqlez::statement::Statement, - start_index: i32, - ) -> Result<(Self, i32)> { - ::column(statement, start_index) - .map(|(id, next_index)| (WorkspaceId(id), next_index)) + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + i64::column(statement, start_index).map(|(id, next_index)| (Self(id), next_index)) } } From 19aac6a57f1a006ddc66f502d29854ff091a6377 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 2 Nov 2022 15:20:47 -0700 Subject: [PATCH 32/86] Moved docks to a better position --- crates/db/examples/serialize-pane.rs | 2 +- crates/db/src/items.rs | 3 +- crates/db/src/pane.rs | 166 +++++++++------------ crates/db/src/workspace.rs | 207 ++++++++++++++++++++------- 4 files changed, 225 insertions(+), 153 deletions(-) diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index 607347670948d8acdcbff029f17c4af15e0f18ab..ebe88037cd259cd9cfab4aa7c91778a1cf3eaeb9 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -1,6 +1,6 @@ use std::{fs::File, path::Path}; -use db::pane::{DockAnchor, SerializedDockPane}; +use db::{pane::SerializedDockPane, DockAnchor}; const TEST_FILE: &'static str = "test-db.db"; diff --git a/crates/db/src/items.rs b/crates/db/src/items.rs index a6497903acc7db85922675262c0d8adee464180b..93251e5eedba4de01795e99c5504d2f55a515ff4 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/items.rs @@ -67,8 +67,7 @@ #[derive(Debug, PartialEq, Eq)] pub struct ItemId { - workspace_id: usize, - item_id: usize, + pub item_id: usize, } // enum SerializedItemKind { diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index 816290d8706c896994bbbb89ab44c20389ee6ce5..ffb81c4012220027e8d894ef7eaa22decb229c75 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -1,4 +1,3 @@ -use anyhow::bail; use gpui::Axis; use indoc::indoc; use sqlez::{ @@ -8,7 +7,7 @@ use sqlez::{ }; use util::{iife, ResultExt}; -use crate::{items::ItemId, workspace::WorkspaceId}; +use crate::{items::ItemId, workspace::WorkspaceId, DockAnchor}; use super::Db; @@ -33,14 +32,15 @@ CREATE TABLE panes( FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; -CREATE TABLE dock_panes( - pane_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' - visible INTEGER NOT NULL, -- Boolean - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE -) STRICT; +-- MOVE TO WORKSPACE TABLE +// CREATE TABLE dock_panes( +// pane_id INTEGER PRIMARY KEY, +// workspace_id INTEGER NOT NULL, +// anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' +// visible INTEGER NOT NULL, -- Boolean +// FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE +// FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE +// ) STRICT; CREATE TABLE items( item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique @@ -77,36 +77,34 @@ pub struct PaneId { #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub struct PaneGroupId { workspace_id: WorkspaceId, - group_id: usize, } impl PaneGroupId { pub fn root(workspace_id: WorkspaceId) -> Self { Self { workspace_id, - group_id: 0, + // group_id: 0, } } } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Default)] pub struct SerializedPaneGroup { - group_id: PaneGroupId, axis: Axis, children: Vec, } impl SerializedPaneGroup { - pub fn empty_root(workspace_id: WorkspaceId) -> Self { + pub fn empty_root(_workspace_id: WorkspaceId) -> Self { Self { - group_id: PaneGroupId::root(workspace_id), + // group_id: PaneGroupId::root(workspace_id), axis: Default::default(), children: Default::default(), } } } -struct PaneGroupChildRow { +struct _PaneGroupChildRow { child_pane_id: Option, child_group_id: Option, index: usize, @@ -120,47 +118,11 @@ pub enum PaneGroupChild { #[derive(Debug, PartialEq, Eq)] pub struct SerializedPane { - pane_id: PaneId, - children: Vec, + items: Vec, } //********* CURRENTLY IN USE TYPES: ********* -#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] -pub enum DockAnchor { - #[default] - Bottom, - Right, - Expanded, -} - -impl Bind for DockAnchor { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - DockAnchor::Bottom => "Bottom", - DockAnchor::Right => "Right", - DockAnchor::Expanded => "Expanded", - } - .bind(statement, start_index) - } -} - -impl Column for DockAnchor { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(anchor_text, next_index)| { - Ok(( - match anchor_text.as_ref() { - "Bottom" => DockAnchor::Bottom, - "Right" => DockAnchor::Right, - "Expanded" => DockAnchor::Expanded, - _ => bail!("Stored dock anchor is incorrect"), - }, - next_index, - )) - }) - } -} - #[derive(Default, Debug, PartialEq, Eq)] pub struct SerializedDockPane { pub anchor_position: DockAnchor, @@ -227,56 +189,64 @@ impl Column for DockRow { } impl Db { - pub fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup { - let axis = self.get_pane_group_axis(pane_group_id); - let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); - for child_row in self.get_pane_group_children(pane_group_id) { - if let Some(child_pane_id) = child_row.child_pane_id { - children.push(( - child_row.index, - PaneGroupChild::Pane(self.get_pane(PaneId { - workspace_id: pane_group_id.workspace_id, - pane_id: child_pane_id, - })), - )); - } else if let Some(child_group_id) = child_row.child_group_id { - children.push(( - child_row.index, - PaneGroupChild::Group(self.get_pane_group(PaneGroupId { - workspace_id: pane_group_id.workspace_id, - group_id: child_group_id, - })), - )); - } - } - children.sort_by_key(|(index, _)| *index); + pub fn get_center_group(&self, _workspace: WorkspaceId) -> SerializedPaneGroup { + unimplemented!() + } - SerializedPaneGroup { - group_id: pane_group_id, - axis, - children: children.into_iter().map(|(_, child)| child).collect(), - } + pub fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup { + unimplemented!() + // let axis = self.get_pane_group_axis(pane_group_id); + // let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); + // for child_row in self.get_pane_group_children(pane_group_id) { + // if let Some(child_pane_id) = child_row.child_pane_id { + // children.push(( + // child_row.index, + // PaneGroupChild::Pane(self.get_pane(PaneId { + // workspace_id: pane_group_id.workspace_id, + // pane_id: child_pane_id, + // })), + // )); + // } else if let Some(child_group_id) = child_row.child_group_id { + // children.push(( + // child_row.index, + // PaneGroupChild::Group(self.get_pane_group(PaneGroupId { + // workspace_id: pane_group_id.workspace_id, + // group_id: child_group_id, + // })), + // )); + // } + // } + // children.sort_by_key(|(index, _)| *index); + + // SerializedPaneGroup { + // group_id: pane_group_id, + // axis, + // children: children.into_iter().map(|(_, child)| child).collect(), + // } } - fn get_pane_group_children( + fn _get_pane_group_children( &self, _pane_group_id: PaneGroupId, - ) -> impl Iterator { + ) -> impl Iterator { Vec::new().into_iter() } - fn get_pane_group_axis(&self, _pane_group_id: PaneGroupId) -> Axis { + fn _get_pane_group_axis(&self, _pane_group_id: PaneGroupId) -> Axis { unimplemented!(); } - pub fn save_pane_splits(&self, _center_pane_group: SerializedPaneGroup) { + pub fn save_pane_splits( + &self, + _workspace: &WorkspaceId, + _center_pane_group: &SerializedPaneGroup, + ) { // Delete the center pane group for this workspace and any of its children // Generate new pane group IDs as we go through // insert them - // Items garbage collect themselves when dropped } - pub(crate) fn get_pane(&self, _pane_id: PaneId) -> SerializedPane { + pub(crate) fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane { unimplemented!(); } @@ -305,9 +275,9 @@ impl Db { #[cfg(test)] mod tests { - use crate::{pane::SerializedPane, Db}; + use crate::{items::ItemId, pane::SerializedPane, Db, DockAnchor}; - use super::{DockAnchor, SerializedDockPane}; + use super::{PaneGroupChild, SerializedDockPane, SerializedPaneGroup}; #[test] fn test_basic_dock_pane() { @@ -333,18 +303,18 @@ mod tests { let workspace = db.workspace_for_roots(&["/tmp"]); - let center_pane = SerializedPane { - pane_id: crate::pane::PaneId { - workspace_id: workspace.workspace_id, - pane_id: 1, - }, - children: vec![], + // Pane group -> Pane -> 10 , 20 + let center_pane = SerializedPaneGroup { + axis: gpui::Axis::Horizontal, + children: vec![PaneGroupChild::Pane(SerializedPane { + items: vec![ItemId { item_id: 10 }, ItemId { item_id: 20 }], + })], }; - db.save_dock_pane(&workspace.workspace_id, &dock_pane); + db.save_pane_splits(&workspace.workspace_id, ¢er_pane); let new_workspace = db.workspace_for_roots(&["/tmp"]); - assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane); + assert_eq!(new_workspace.center_group, center_pane); } } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index e5fe6d5aee4b2e33806a6926443a6e6040c0569b..3f8dc6e498166e1e2547968693ecdee2452d3de7 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,4 +1,4 @@ -use anyhow::Result; +use anyhow::{bail, Result}; use std::{ ffi::OsStr, @@ -16,7 +16,7 @@ use sqlez::{ statement::Statement, }; -use crate::pane::SerializedDockPane; +use crate::pane::{SerializedDockPane, SerializedPaneGroup}; use super::Db; @@ -28,7 +28,11 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( &[indoc! {" CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY, + center_pane_group INTEGER NOT NULL, + dock_anchor TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' + dock_visible INTEGER NOT NULL, -- Boolean timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL + FOREIGN KEY(center_pane_group) REFERENCES pane_groups(group_id) ) STRICT; CREATE TABLE worktree_roots( @@ -54,10 +58,71 @@ impl Column for WorkspaceId { } } +#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] +pub enum DockAnchor { + #[default] + Bottom, + Right, + Expanded, +} + +impl Bind for DockAnchor { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + DockAnchor::Bottom => "Bottom", + DockAnchor::Right => "Right", + DockAnchor::Expanded => "Expanded", + } + .bind(statement, start_index) + } +} + +impl Column for DockAnchor { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(anchor_text, next_index)| { + Ok(( + match anchor_text.as_ref() { + "Bottom" => DockAnchor::Bottom, + "Right" => DockAnchor::Right, + "Expanded" => DockAnchor::Expanded, + _ => bail!("Stored dock anchor is incorrect"), + }, + next_index, + )) + }) + } +} + +#[derive(Debug, PartialEq, Eq)] +struct WorkspaceRow { + pub workspace_id: WorkspaceId, + pub dock_anchor: DockAnchor, + pub dock_visible: bool, +} + +impl Column for WorkspaceRow { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + <(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map( + |((id, anchor, visible), next_index)| { + ( + WorkspaceRow { + workspace_id: id, + dock_anchor: anchor, + dock_visible: visible, + }, + next_index, + ) + }, + ) + } +} + #[derive(Default, Debug)] pub struct SerializedWorkspace { pub workspace_id: WorkspaceId, - // pub center_group: SerializedPaneGroup, + pub center_group: SerializedPaneGroup, + pub dock_anchor: DockAnchor, + pub dock_visible: bool, pub dock_pane: Option, } @@ -70,15 +135,18 @@ impl Db { { // Find the workspace id which is uniquely identified by this set of paths // return it if found - let mut workspace_id = self.workspace_id(worktree_roots); - if workspace_id.is_none() && worktree_roots.len() == 0 { - workspace_id = self.last_workspace_id(); + let mut workspace_row = self.workspace(worktree_roots); + if workspace_row.is_none() && worktree_roots.len() == 0 { + workspace_row = self.last_workspace_id(); } - if let Some(workspace_id) = workspace_id { + if let Some(workspace_row) = workspace_row { SerializedWorkspace { - workspace_id, - dock_pane: self.get_dock_pane(workspace_id), + dock_pane: self.get_dock_pane(workspace_row.workspace_id), + center_group: self.get_center_group(workspace_row.workspace_id), + workspace_id: workspace_row.workspace_id, + dock_anchor: workspace_row.dock_anchor, + dock_visible: workspace_row.dock_visible, } } else { self.make_new_workspace(worktree_roots) @@ -99,7 +167,7 @@ impl Db { Ok(SerializedWorkspace { workspace_id, - dock_pane: None, + ..Default::default() }) }); @@ -112,11 +180,11 @@ impl Db { } } - fn workspace_id

(&self, worktree_roots: &[P]) -> Option + fn workspace

(&self, worktree_roots: &[P]) -> Option where P: AsRef + Debug, { - match get_workspace_id(worktree_roots, &self) { + match get_workspace(worktree_roots, &self) { Ok(workspace_id) => workspace_id, Err(err) => { log::error!("Failed to get workspace_id: {}", err); @@ -149,11 +217,10 @@ impl Db { } } - fn last_workspace_id(&self) -> Option { + fn last_workspace_id(&self) -> Option { let res = self - .prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT 1") - .and_then(|mut stmt| stmt.maybe_row()) - .map(|row| row.map(|id| WorkspaceId(id))); + .prepare("SELECT workspace_id, dock FROM workspaces ORDER BY timestamp DESC LIMIT 1") + .and_then(|mut stmt| stmt.maybe_row::()); match res { Ok(result) => result, @@ -206,13 +273,13 @@ where P: AsRef + Debug, { // Lookup any old WorkspaceIds which have the same set of roots, and delete them. - let preexisting_id = get_workspace_id(worktree_roots, &connection)?; - if let Some(preexisting_id) = preexisting_id { - if preexisting_id != *workspace_id { + let preexisting_workspace = get_workspace(worktree_roots, &connection)?; + if let Some(preexisting_workspace) = preexisting_workspace { + if preexisting_workspace.workspace_id != *workspace_id { // Should also delete fields in other tables with cascading updates connection .prepare("DELETE FROM workspaces WHERE workspace_id = ?")? - .with_bindings(preexisting_id.0)? + .with_bindings(preexisting_workspace.workspace_id.0)? .exec()?; } } @@ -241,7 +308,7 @@ where Ok(()) } -fn get_workspace_id

(worktree_roots: &[P], connection: &Connection) -> Result> +fn get_workspace

(worktree_roots: &[P], connection: &Connection) -> Result> where P: AsRef + Debug, { @@ -315,7 +382,7 @@ where // parameters by number. let query = format!( r#" - SELECT workspace_id + SELECT workspace_id, dock_anchor, dock_visible FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots WHERE worktree_root in {array_bind} AND workspace_id NOT IN (SELECT wt1.workspace_id FROM worktree_roots as wt1 @@ -331,6 +398,7 @@ where // This will only be called on start up and when root workspaces change, no need to waste memory // caching it. let mut stmt = connection.prepare(&query)?; + // Make sure we bound the parameters correctly debug_assert!(worktree_roots.len() as i32 + 1 == stmt.parameter_count()); @@ -339,11 +407,10 @@ where .map(|root| root.as_ref().as_os_str().as_bytes()) .collect(); - let len = root_bytes.len(); + let num_of_roots = root_bytes.len(); - stmt.with_bindings((root_bytes, len))? - .maybe_row() - .map(|row| row.map(|id| WorkspaceId(id))) + stmt.with_bindings((root_bytes, num_of_roots))? + .maybe_row::() } #[cfg(test)] @@ -401,14 +468,17 @@ mod tests { fn test_empty_worktrees() { let db = Db::open_in_memory("test_empty_worktrees"); - assert_eq!(None, db.workspace_id::(&[])); + assert_eq!(None, db.workspace::(&[])); db.make_new_workspace::(&[]); //ID 1 db.make_new_workspace::(&[]); //ID 2 db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); // Sanity check - assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1))); + assert_eq!( + db.workspace(&["/tmp", "/tmp2"]).unwrap().workspace_id, + WorkspaceId(1) + ); db.update_worktrees::(&WorkspaceId(1), &[]); @@ -416,9 +486,9 @@ mod tests { // call would be semantically correct (as those are the workspaces that // don't have roots) but I'd prefer that this API to either return exactly one // workspace, and None otherwise - assert_eq!(db.workspace_id::(&[]), None,); + assert_eq!(db.workspace::(&[]), None,); - assert_eq!(db.last_workspace_id(), Some(WorkspaceId(1))); + assert_eq!(db.last_workspace_id().unwrap().workspace_id, WorkspaceId(1)); assert_eq!( db.recent_workspaces(2), @@ -445,23 +515,42 @@ mod tests { db.update_worktrees(workspace_id, entries); } - assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp1"])); - assert_eq!(db.workspace_id(&["/tmp1", "/tmp2"]), Some(WorkspaceId(2))); assert_eq!( - db.workspace_id(&["/tmp1", "/tmp2", "/tmp3"]), - Some(WorkspaceId(3)) + WorkspaceId(1), + db.workspace(&["/tmp1"]).unwrap().workspace_id + ); + assert_eq!( + db.workspace(&["/tmp1", "/tmp2"]).unwrap().workspace_id, + WorkspaceId(2) + ); + assert_eq!( + db.workspace(&["/tmp1", "/tmp2", "/tmp3"]) + .unwrap() + .workspace_id, + WorkspaceId(3) + ); + assert_eq!( + db.workspace(&["/tmp2", "/tmp3"]).unwrap().workspace_id, + WorkspaceId(4) + ); + assert_eq!( + db.workspace(&["/tmp2", "/tmp3", "/tmp4"]) + .unwrap() + .workspace_id, + WorkspaceId(5) ); - assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), Some(WorkspaceId(4))); assert_eq!( - db.workspace_id(&["/tmp2", "/tmp3", "/tmp4"]), - Some(WorkspaceId(5)) + db.workspace(&["/tmp2", "/tmp4"]).unwrap().workspace_id, + WorkspaceId(6) + ); + assert_eq!( + db.workspace(&["/tmp2"]).unwrap().workspace_id, + WorkspaceId(7) ); - assert_eq!(db.workspace_id(&["/tmp2", "/tmp4"]), Some(WorkspaceId(6))); - assert_eq!(db.workspace_id(&["/tmp2"]), Some(WorkspaceId(7))); - assert_eq!(db.workspace_id(&["/tmp1", "/tmp5"]), None); - assert_eq!(db.workspace_id(&["/tmp5"]), None); - assert_eq!(db.workspace_id(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None); + assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None); + assert_eq!(db.workspace(&["/tmp5"]), None); + assert_eq!(db.workspace(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None); } #[test] @@ -479,13 +568,21 @@ mod tests { db.update_worktrees(workspace_id, entries); } - assert_eq!(db.workspace_id(&["/tmp2"]), None); - assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), None); - assert_eq!(db.workspace_id(&["/tmp"]), Some(WorkspaceId(1))); - assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(2))); + assert_eq!(db.workspace(&["/tmp2"]), None); + assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None); + assert_eq!( + db.workspace(&["/tmp"]).unwrap().workspace_id, + WorkspaceId(1) + ); + assert_eq!( + db.workspace(&["/tmp", "/tmp2"]).unwrap().workspace_id, + WorkspaceId(2) + ); assert_eq!( - db.workspace_id(&["/tmp", "/tmp2", "/tmp3"]), - Some(WorkspaceId(3)) + db.workspace(&["/tmp", "/tmp2", "/tmp3"]) + .unwrap() + .workspace_id, + WorkspaceId(3) ); } @@ -526,15 +623,21 @@ mod tests { db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); // Make sure that workspace 3 doesn't exist - assert_eq!(db.workspace_id(&["/tmp2", "/tmp3"]), Some(WorkspaceId(2))); + assert_eq!( + db.workspace(&["/tmp2", "/tmp3"]).unwrap().workspace_id, + WorkspaceId(2) + ); // And that workspace 1 was untouched - assert_eq!(db.workspace_id(&["/tmp"]), Some(WorkspaceId(1))); + assert_eq!( + db.workspace(&["/tmp"]).unwrap().workspace_id, + WorkspaceId(1) + ); // And that workspace 2 is no longer registered under these roots - assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), None); + assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None); - assert_eq!(Some(WorkspaceId(2)), db.last_workspace_id()); + assert_eq!(db.last_workspace_id().unwrap().workspace_id, WorkspaceId(2)); let recent_workspaces = db.recent_workspaces(10); assert_eq!( From d492cbced9e25518440d8eaba6a638f6bdf92cee Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 2 Nov 2022 16:26:43 -0700 Subject: [PATCH 33/86] WIP --- crates/db/src/db.rs | 9 -- crates/db/src/pane.rs | 14 +- crates/db/src/workspace.rs | 286 ++++++++++--------------------------- 3 files changed, 79 insertions(+), 230 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 48a025112abc87f08d53af1ec39f48610c72a2ad..6077bdeec10dd8e26223f0e0f3695b0cde2b85c1 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -70,12 +70,3 @@ impl Db { self.backup_main(&destination) } } - -impl Drop for Db { - fn drop(&mut self) { - self.exec(indoc! {" - PRAGMA analysis_limit=500; - PRAGMA optimize"}) - .ok(); - } -} diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs index ffb81c4012220027e8d894ef7eaa22decb229c75..4904f515b91d885bab7946dfdc30f823717aa850 100644 --- a/crates/db/src/pane.rs +++ b/crates/db/src/pane.rs @@ -32,16 +32,6 @@ CREATE TABLE panes( FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; --- MOVE TO WORKSPACE TABLE -// CREATE TABLE dock_panes( -// pane_id INTEGER PRIMARY KEY, -// workspace_id INTEGER NOT NULL, -// anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' -// visible INTEGER NOT NULL, -- Boolean -// FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE -// FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE -// ) STRICT; - CREATE TABLE items( item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique pane_id INTEGER NOT NULL, @@ -313,8 +303,8 @@ mod tests { db.save_pane_splits(&workspace.workspace_id, ¢er_pane); - let new_workspace = db.workspace_for_roots(&["/tmp"]); + // let new_workspace = db.workspace_for_roots(&["/tmp"]); - assert_eq!(new_workspace.center_group, center_pane); + // assert_eq!(new_workspace.center_group, center_pane); } } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 3f8dc6e498166e1e2547968693ecdee2452d3de7..03ca321b5d47624d5efaa0a919e46569f3287d9a 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,7 +1,7 @@ -use anyhow::{bail, Result}; +use anyhow::{bail, Context, Result}; +use util::{iife, ResultExt}; use std::{ - ffi::OsStr, fmt::Debug, os::unix::prelude::OsStrExt, path::{Path, PathBuf}, @@ -28,11 +28,9 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( &[indoc! {" CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY, - center_pane_group INTEGER NOT NULL, - dock_anchor TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded' - dock_visible INTEGER NOT NULL, -- Boolean + dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' + dock_visible INTEGER, -- Boolean timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL - FOREIGN KEY(center_pane_group) REFERENCES pane_groups(group_id) ) STRICT; CREATE TABLE worktree_roots( @@ -93,43 +91,21 @@ impl Column for DockAnchor { } } -#[derive(Debug, PartialEq, Eq)] -struct WorkspaceRow { - pub workspace_id: WorkspaceId, - pub dock_anchor: DockAnchor, - pub dock_visible: bool, -} - -impl Column for WorkspaceRow { - fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - <(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map( - |((id, anchor, visible), next_index)| { - ( - WorkspaceRow { - workspace_id: id, - dock_anchor: anchor, - dock_visible: visible, - }, - next_index, - ) - }, - ) - } -} +type WorkspaceRow = (WorkspaceId, DockAnchor, bool); #[derive(Default, Debug)] pub struct SerializedWorkspace { - pub workspace_id: WorkspaceId, + pub worktree_roots: Vec>, pub center_group: SerializedPaneGroup, pub dock_anchor: DockAnchor, pub dock_visible: bool, - pub dock_pane: Option, + pub dock_pane: SerializedDockPane, } impl Db { /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, /// returns the last workspace which was updated - pub fn workspace_for_roots

(&self, worktree_roots: &[P]) -> SerializedWorkspace + pub fn workspace_for_roots

(&self, worktree_roots: &[P]) -> Option where P: AsRef + Debug, { @@ -140,57 +116,23 @@ impl Db { workspace_row = self.last_workspace_id(); } - if let Some(workspace_row) = workspace_row { - SerializedWorkspace { - dock_pane: self.get_dock_pane(workspace_row.workspace_id), - center_group: self.get_center_group(workspace_row.workspace_id), - workspace_id: workspace_row.workspace_id, - dock_anchor: workspace_row.dock_anchor, - dock_visible: workspace_row.dock_visible, - } - } else { - self.make_new_workspace(worktree_roots) - } - } - - fn make_new_workspace

(&self, worktree_roots: &[P]) -> SerializedWorkspace - where - P: AsRef + Debug, - { - let res = self.with_savepoint("make_new_workspace", |conn| { - let workspace_id = WorkspaceId( - conn.prepare("INSERT INTO workspaces DEFAULT VALUES")? - .insert()?, - ); - - update_worktree_roots(conn, &workspace_id, worktree_roots)?; - - Ok(SerializedWorkspace { - workspace_id, - ..Default::default() - }) - }); - - match res { - Ok(serialized_workspace) => serialized_workspace, - Err(err) => { - log::error!("Failed to insert new workspace into DB: {}", err); - Default::default() - } - } + workspace_row.and_then( + |(workspace_id, dock_anchor, dock_visible)| SerializedWorkspace { + dock_pane: self.get_dock_pane(workspace_id)?, + center_group: self.get_center_group(workspace_id), + dock_anchor, + dock_visible, + }, + ) } fn workspace

(&self, worktree_roots: &[P]) -> Option where P: AsRef + Debug, { - match get_workspace(worktree_roots, &self) { - Ok(workspace_id) => workspace_id, - Err(err) => { - log::error!("Failed to get workspace_id: {}", err); - None - } - } + get_workspace(worktree_roots, &self) + .log_err() + .unwrap_or_default() } // fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { @@ -204,63 +146,35 @@ impl Db { where P: AsRef + Debug, { - match self.with_savepoint("update_worktrees", |conn| { + self.with_savepoint("update_worktrees", |conn| { update_worktree_roots(conn, workspace_id, worktree_roots) - }) { - Ok(_) => {} - Err(err) => log::error!( - "Failed to update workspace {:?} with roots {:?}, error: {}", - workspace_id, - worktree_roots, - err - ), - } + }) + .context("Update workspace {workspace_id:?} with roots {worktree_roots:?}") + .log_err(); } fn last_workspace_id(&self) -> Option { - let res = self - .prepare("SELECT workspace_id, dock FROM workspaces ORDER BY timestamp DESC LIMIT 1") - .and_then(|mut stmt| stmt.maybe_row::()); - - match res { - Ok(result) => result, - Err(err) => { - log::error!("Failed to get last workspace id, err: {}", err); - return None; - } - } + iife! ({ + self.prepare("SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1")? + .maybe_row::() + }).log_err()? } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, Vec>)> { + pub fn recent_workspaces(&self, limit: usize) -> Vec> { self.with_savepoint("recent_workspaces", |conn| { - let rows = conn - .prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? - .with_bindings(limit)? - .rows::()?; - - let ids = rows.iter().map(|row| WorkspaceId(*row)); - - let mut result = Vec::new(); - let mut stmt = conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; - for workspace_id in ids { - let roots = stmt - .with_bindings(workspace_id.0)? - .rows::>()? - .iter() - .map(|row| PathBuf::from(OsStr::from_bytes(&row)).into()) - .collect(); - result.push((workspace_id, roots)) - } - - Ok(result) - }) - .unwrap_or_else(|err| { - log::error!("Failed to get recent workspaces, err: {}", err); - Vec::new() + + conn.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? + .with_bindings(limit)? + .rows::()? + .iter() + .map(|workspace_id| stmt.with_bindings(workspace_id.0)?.rows::()) + .collect::>() }) + .log_err() + .unwrap_or_default() } } @@ -274,12 +188,12 @@ where { // Lookup any old WorkspaceIds which have the same set of roots, and delete them. let preexisting_workspace = get_workspace(worktree_roots, &connection)?; - if let Some(preexisting_workspace) = preexisting_workspace { - if preexisting_workspace.workspace_id != *workspace_id { + if let Some((preexisting_workspace_id, _, _)) = preexisting_workspace { + if preexisting_workspace_id != *workspace_id { // Should also delete fields in other tables with cascading updates connection .prepare("DELETE FROM workspaces WHERE workspace_id = ?")? - .with_bindings(preexisting_workspace.workspace_id.0)? + .with_bindings(preexisting_workspace_id)? .exec()?; } } @@ -319,16 +233,13 @@ where // Prepare the array binding string. SQL doesn't have syntax for this, so // we have to do it ourselves. - let mut array_binding_stmt = "(".to_string(); - for i in 0..worktree_roots.len() { - // This uses ?NNN for numbered placeholder syntax - array_binding_stmt.push_str(&format!("?{}", (i + 1))); //sqlite is 1-based - if i < worktree_roots.len() - 1 { - array_binding_stmt.push(','); - array_binding_stmt.push(' '); - } - } - array_binding_stmt.push(')'); + let array_binding_stmt = format!( + "({})", + (0..worktree_roots.len()) + .map(|index| format!("?{}", index + 1)) + .collect::>() + .join(", ") + ); // Any workspace can have multiple independent paths, and these paths // can overlap in the database. Take this test data for example: @@ -382,15 +293,17 @@ where // parameters by number. let query = format!( r#" - SELECT workspace_id, dock_anchor, dock_visible - FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots - WHERE worktree_root in {array_bind} AND workspace_id NOT IN - (SELECT wt1.workspace_id FROM worktree_roots as wt1 - JOIN worktree_roots as wt2 - ON wt1.workspace_id = wt2.workspace_id - WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) - GROUP BY workspace_id) - WHERE num_matching = ? + SELECT workspaces.workspace_id, workspaces.dock_anchor, workspaces.dock_visible + FROM (SELECT workspace_id + FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots + WHERE worktree_root in {array_bind} AND workspace_id NOT IN + (SELECT wt1.workspace_id FROM worktree_roots as wt1 + JOIN worktree_roots as wt2 + ON wt1.workspace_id = wt2.workspace_id + WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) + GROUP BY workspace_id) + WHERE num_matching = ?) as matching_workspace + JOIN workspaces ON workspaces.workspace_id = matching_workspace.workspace_id "#, array_bind = array_binding_stmt ); @@ -416,12 +329,7 @@ where #[cfg(test)] mod tests { - use std::{ - path::{Path, PathBuf}, - sync::Arc, - thread::sleep, - time::Duration, - }; + use std::{path::PathBuf, thread::sleep, time::Duration}; use crate::Db; @@ -475,10 +383,7 @@ mod tests { db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); // Sanity check - assert_eq!( - db.workspace(&["/tmp", "/tmp2"]).unwrap().workspace_id, - WorkspaceId(1) - ); + assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(1)); db.update_worktrees::(&WorkspaceId(1), &[]); @@ -488,11 +393,11 @@ mod tests { // workspace, and None otherwise assert_eq!(db.workspace::(&[]), None,); - assert_eq!(db.last_workspace_id().unwrap().workspace_id, WorkspaceId(1)); + assert_eq!(db.last_workspace_id().unwrap().0, WorkspaceId(1)); assert_eq!( db.recent_workspaces(2), - vec![(WorkspaceId(1), vec![]), (WorkspaceId(2), vec![]),], + vec![Vec::::new(), Vec::::new()], ) } @@ -515,38 +420,19 @@ mod tests { db.update_worktrees(workspace_id, entries); } + assert_eq!(WorkspaceId(1), db.workspace(&["/tmp1"]).unwrap().0); + assert_eq!(db.workspace(&["/tmp1", "/tmp2"]).unwrap().0, WorkspaceId(2)); assert_eq!( - WorkspaceId(1), - db.workspace(&["/tmp1"]).unwrap().workspace_id - ); - assert_eq!( - db.workspace(&["/tmp1", "/tmp2"]).unwrap().workspace_id, - WorkspaceId(2) - ); - assert_eq!( - db.workspace(&["/tmp1", "/tmp2", "/tmp3"]) - .unwrap() - .workspace_id, + db.workspace(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(3) ); + assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(4)); assert_eq!( - db.workspace(&["/tmp2", "/tmp3"]).unwrap().workspace_id, - WorkspaceId(4) - ); - assert_eq!( - db.workspace(&["/tmp2", "/tmp3", "/tmp4"]) - .unwrap() - .workspace_id, + db.workspace(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().0, WorkspaceId(5) ); - assert_eq!( - db.workspace(&["/tmp2", "/tmp4"]).unwrap().workspace_id, - WorkspaceId(6) - ); - assert_eq!( - db.workspace(&["/tmp2"]).unwrap().workspace_id, - WorkspaceId(7) - ); + assert_eq!(db.workspace(&["/tmp2", "/tmp4"]).unwrap().0, WorkspaceId(6)); + assert_eq!(db.workspace(&["/tmp2"]).unwrap().0, WorkspaceId(7)); assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None); assert_eq!(db.workspace(&["/tmp5"]), None); @@ -570,26 +456,14 @@ mod tests { assert_eq!(db.workspace(&["/tmp2"]), None); assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None); + assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); + assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(2)); assert_eq!( - db.workspace(&["/tmp"]).unwrap().workspace_id, - WorkspaceId(1) - ); - assert_eq!( - db.workspace(&["/tmp", "/tmp2"]).unwrap().workspace_id, - WorkspaceId(2) - ); - assert_eq!( - db.workspace(&["/tmp", "/tmp2", "/tmp3"]) - .unwrap() - .workspace_id, + db.workspace(&["/tmp", "/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(3) ); } - fn arc_path(path: &'static str) -> Arc { - PathBuf::from(path).into() - } - #[test] fn test_tricky_overlapping_updates() { // DB state: @@ -623,30 +497,24 @@ mod tests { db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); // Make sure that workspace 3 doesn't exist - assert_eq!( - db.workspace(&["/tmp2", "/tmp3"]).unwrap().workspace_id, - WorkspaceId(2) - ); + assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(2)); // And that workspace 1 was untouched - assert_eq!( - db.workspace(&["/tmp"]).unwrap().workspace_id, - WorkspaceId(1) - ); + assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); // And that workspace 2 is no longer registered under these roots assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None); - assert_eq!(db.last_workspace_id().unwrap().workspace_id, WorkspaceId(2)); + assert_eq!(db.last_workspace_id().unwrap().0, WorkspaceId(2)); let recent_workspaces = db.recent_workspaces(10); assert_eq!( recent_workspaces.get(0).unwrap(), - &(WorkspaceId(2), vec![arc_path("/tmp2"), arc_path("/tmp3")]) + &vec![PathBuf::from("/tmp2"), PathBuf::from("/tmp3")] ); assert_eq!( recent_workspaces.get(1).unwrap(), - &(WorkspaceId(1), vec![arc_path("/tmp")]) + &vec![PathBuf::from("/tmp")] ); } } From b552f1788c7282f6c75d7476817770ed775b36a4 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 2 Nov 2022 16:31:36 -0700 Subject: [PATCH 34/86] WIP2 --- crates/db/src/workspace.rs | 115 +++++++++++++++---------------------- 1 file changed, 46 insertions(+), 69 deletions(-) diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 03ca321b5d47624d5efaa0a919e46569f3287d9a..bec9f988234bf7be6e22cb4f780763c0baaf997b 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -5,7 +5,6 @@ use std::{ fmt::Debug, os::unix::prelude::OsStrExt, path::{Path, PathBuf}, - sync::Arc, }; use indoc::indoc; @@ -95,7 +94,6 @@ type WorkspaceRow = (WorkspaceId, DockAnchor, bool); #[derive(Default, Debug)] pub struct SerializedWorkspace { - pub worktree_roots: Vec>, pub center_group: SerializedPaneGroup, pub dock_anchor: DockAnchor, pub dock_visible: bool, @@ -105,40 +103,30 @@ pub struct SerializedWorkspace { impl Db { /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, /// returns the last workspace which was updated + pub fn workspace_for_roots

(&self, worktree_roots: &[P]) -> Option where P: AsRef + Debug, { // Find the workspace id which is uniquely identified by this set of paths // return it if found - let mut workspace_row = self.workspace(worktree_roots); + let mut workspace_row = get_workspace(worktree_roots, &self) + .log_err() + .unwrap_or_default(); if workspace_row.is_none() && worktree_roots.len() == 0 { - workspace_row = self.last_workspace_id(); + workspace_row = self.last_workspace(); } - workspace_row.and_then( - |(workspace_id, dock_anchor, dock_visible)| SerializedWorkspace { + workspace_row.and_then(|(workspace_id, dock_anchor, dock_visible)| { + Some(SerializedWorkspace { dock_pane: self.get_dock_pane(workspace_id)?, center_group: self.get_center_group(workspace_id), dock_anchor, dock_visible, - }, - ) - } - - fn workspace

(&self, worktree_roots: &[P]) -> Option - where - P: AsRef + Debug, - { - get_workspace(worktree_roots, &self) - .log_err() - .unwrap_or_default() + }) + }) } - // fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow { - // unimplemented!() - // } - /// Updates the open paths for the given workspace id. Will garbage collect items from /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps /// in the workspace id table @@ -147,13 +135,46 @@ impl Db { P: AsRef + Debug, { self.with_savepoint("update_worktrees", |conn| { - update_worktree_roots(conn, workspace_id, worktree_roots) + // Lookup any old WorkspaceIds which have the same set of roots, and delete them. + let preexisting_workspace = get_workspace(worktree_roots, &conn)?; + if let Some((preexisting_workspace_id, _, _)) = preexisting_workspace { + if preexisting_workspace_id != *workspace_id { + // Should also delete fields in other tables with cascading updates + conn.prepare("DELETE FROM workspaces WHERE workspace_id = ?")? + .with_bindings(preexisting_workspace_id)? + .exec()?; + } + } + + conn.prepare("DELETE FROM worktree_roots WHERE workspace_id = ?")? + .with_bindings(workspace_id.0)? + .exec()?; + + for root in worktree_roots { + let path = root.as_ref().as_os_str().as_bytes(); + // If you need to debug this, here's the string parsing: + // let path = root.as_ref().to_string_lossy().to_string(); + + conn.prepare( + "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", + )? + .with_bindings((workspace_id.0, path))? + .exec()?; + } + + conn.prepare( + "UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?", + )? + .with_bindings(workspace_id.0)? + .exec()?; + + Ok(()) }) .context("Update workspace {workspace_id:?} with roots {worktree_roots:?}") .log_err(); } - fn last_workspace_id(&self) -> Option { + fn last_workspace(&self) -> Option { iife! ({ self.prepare("SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1")? .maybe_row::() @@ -178,50 +199,6 @@ impl Db { } } -fn update_worktree_roots

( - connection: &Connection, - workspace_id: &WorkspaceId, - worktree_roots: &[P], -) -> Result<()> -where - P: AsRef + Debug, -{ - // Lookup any old WorkspaceIds which have the same set of roots, and delete them. - let preexisting_workspace = get_workspace(worktree_roots, &connection)?; - if let Some((preexisting_workspace_id, _, _)) = preexisting_workspace { - if preexisting_workspace_id != *workspace_id { - // Should also delete fields in other tables with cascading updates - connection - .prepare("DELETE FROM workspaces WHERE workspace_id = ?")? - .with_bindings(preexisting_workspace_id)? - .exec()?; - } - } - - connection - .prepare("DELETE FROM worktree_roots WHERE workspace_id = ?")? - .with_bindings(workspace_id.0)? - .exec()?; - - for root in worktree_roots { - let path = root.as_ref().as_os_str().as_bytes(); - // If you need to debug this, here's the string parsing: - // let path = root.as_ref().to_string_lossy().to_string(); - - connection - .prepare("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")? - .with_bindings((workspace_id.0, path))? - .exec()?; - } - - connection - .prepare("UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")? - .with_bindings(workspace_id.0)? - .exec()?; - - Ok(()) -} - fn get_workspace

(worktree_roots: &[P], connection: &Connection) -> Result> where P: AsRef + Debug, @@ -393,7 +370,7 @@ mod tests { // workspace, and None otherwise assert_eq!(db.workspace::(&[]), None,); - assert_eq!(db.last_workspace_id().unwrap().0, WorkspaceId(1)); + assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(1)); assert_eq!( db.recent_workspaces(2), @@ -505,7 +482,7 @@ mod tests { // And that workspace 2 is no longer registered under these roots assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None); - assert_eq!(db.last_workspace_id().unwrap().0, WorkspaceId(2)); + assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(2)); let recent_workspaces = db.recent_workspaces(10); assert_eq!( From aa7b909b7b63dded4702badaa4d0f92a7d3364cd Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Wed, 2 Nov 2022 16:36:40 -0700 Subject: [PATCH 35/86] WIP3 --- crates/db/src/workspace.rs | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index bec9f988234bf7be6e22cb4f780763c0baaf997b..10f99df2af3db97639b1a192721cf5e09a919e79 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -41,7 +41,7 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( ); #[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] -pub struct WorkspaceId(i64); +pub(crate) struct WorkspaceId(i64); impl Bind for WorkspaceId { fn bind(&self, statement: &Statement, start_index: i32) -> Result { @@ -103,7 +103,6 @@ pub struct SerializedWorkspace { impl Db { /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, /// returns the last workspace which was updated - pub fn workspace_for_roots

(&self, worktree_roots: &[P]) -> Option where P: AsRef + Debug, @@ -114,7 +113,11 @@ impl Db { .log_err() .unwrap_or_default(); if workspace_row.is_none() && worktree_roots.len() == 0 { - workspace_row = self.last_workspace(); + workspace_row = self.prepare( + "SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1" + ).and_then(|mut stmt| stmt.maybe_row::()) + .log_err() + .flatten() } workspace_row.and_then(|(workspace_id, dock_anchor, dock_visible)| { @@ -127,6 +130,8 @@ impl Db { }) } + /// TODO: Change to be 'update workspace' and to serialize the whole workspace in one go. + /// /// Updates the open paths for the given workspace id. Will garbage collect items from /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps /// in the workspace id table @@ -174,13 +179,6 @@ impl Db { .log_err(); } - fn last_workspace(&self) -> Option { - iife! ({ - self.prepare("SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1")? - .maybe_row::() - }).log_err()? - } - /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec> { self.with_savepoint("recent_workspaces", |conn| { From eb0598dac2dfce10100b8b9893c61e70d3c35574 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 2 Nov 2022 18:09:35 -0700 Subject: [PATCH 36/86] more refactoring and slightly better api --- crates/db/src/db.rs | 4 +- crates/db/src/pane.rs | 310 ------------- crates/db/src/workspace.rs | 611 +++++++++++-------------- crates/db/src/{ => workspace}/items.rs | 5 - crates/db/src/workspace/model.rs | 173 +++++++ crates/db/src/workspace/pane.rs | 169 +++++++ crates/sqlez/src/bindable.rs | 23 + 7 files changed, 628 insertions(+), 667 deletions(-) delete mode 100644 crates/db/src/pane.rs rename crates/db/src/{ => workspace}/items.rs (97%) create mode 100644 crates/db/src/workspace/model.rs create mode 100644 crates/db/src/workspace/pane.rs diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 6077bdeec10dd8e26223f0e0f3695b0cde2b85c1..07670e309ae15ed45d58998e225409769f1adbd2 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,7 +1,5 @@ -pub mod items; pub mod kvp; mod migrations; -pub mod pane; pub mod workspace; use std::fs; @@ -11,10 +9,10 @@ use std::path::Path; use anyhow::Result; use indoc::indoc; use kvp::KVP_MIGRATION; -use pane::PANE_MIGRATIONS; use sqlez::connection::Connection; use sqlez::thread_safe_connection::ThreadSafeConnection; +use workspace::pane::PANE_MIGRATIONS; pub use workspace::*; #[derive(Clone)] diff --git a/crates/db/src/pane.rs b/crates/db/src/pane.rs deleted file mode 100644 index 4904f515b91d885bab7946dfdc30f823717aa850..0000000000000000000000000000000000000000 --- a/crates/db/src/pane.rs +++ /dev/null @@ -1,310 +0,0 @@ -use gpui::Axis; -use indoc::indoc; -use sqlez::{ - bindable::{Bind, Column}, - migrations::Migration, - statement::Statement, -}; -use util::{iife, ResultExt}; - -use crate::{items::ItemId, workspace::WorkspaceId, DockAnchor}; - -use super::Db; - -pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( - "pane", - &[indoc! {" -CREATE TABLE pane_groups( - group_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - parent_group INTEGER, -- NULL indicates that this is a root node - axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE panes( - pane_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, - group_id INTEGER, -- If null, this is a dock pane - idx INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE -) STRICT; - -CREATE TABLE items( - item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique - pane_id INTEGER NOT NULL, - workspace_id INTEGER NOT NULL, - kind TEXT NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE - PRIMARY KEY(item_id, workspace_id) -) STRICT; -"}], -); - -// We have an many-branched, unbalanced tree with three types: -// Pane Groups -// Panes -// Items - -// The root is always a Pane Group -// Pane Groups can have 0 (or more) Panes and/or Pane Groups as children -// Panes can have 0 or more items as children -// Panes can be their own root -// Items cannot have children -// References pointing down is hard (SQL doesn't like arrays) -// References pointing up is easy (1-1 item / parent relationship) but is harder to query -// - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct PaneId { - workspace_id: WorkspaceId, - pane_id: usize, -} - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct PaneGroupId { - workspace_id: WorkspaceId, -} - -impl PaneGroupId { - pub fn root(workspace_id: WorkspaceId) -> Self { - Self { - workspace_id, - // group_id: 0, - } - } -} - -#[derive(Debug, PartialEq, Eq, Default)] -pub struct SerializedPaneGroup { - axis: Axis, - children: Vec, -} - -impl SerializedPaneGroup { - pub fn empty_root(_workspace_id: WorkspaceId) -> Self { - Self { - // group_id: PaneGroupId::root(workspace_id), - axis: Default::default(), - children: Default::default(), - } - } -} - -struct _PaneGroupChildRow { - child_pane_id: Option, - child_group_id: Option, - index: usize, -} - -#[derive(Debug, PartialEq, Eq)] -pub enum PaneGroupChild { - Pane(SerializedPane), - Group(SerializedPaneGroup), -} - -#[derive(Debug, PartialEq, Eq)] -pub struct SerializedPane { - items: Vec, -} - -//********* CURRENTLY IN USE TYPES: ********* - -#[derive(Default, Debug, PartialEq, Eq)] -pub struct SerializedDockPane { - pub anchor_position: DockAnchor, - pub visible: bool, -} - -impl SerializedDockPane { - fn to_row(&self, workspace: &WorkspaceId) -> DockRow { - DockRow { - workspace_id: *workspace, - anchor_position: self.anchor_position, - visible: self.visible, - } - } -} - -impl Column for SerializedDockPane { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - <(DockAnchor, bool) as Column>::column(statement, start_index).map( - |((anchor_position, visible), next_index)| { - ( - SerializedDockPane { - anchor_position, - visible, - }, - next_index, - ) - }, - ) - } -} - -#[derive(Default, Debug, PartialEq, Eq)] -pub(crate) struct DockRow { - workspace_id: WorkspaceId, - anchor_position: DockAnchor, - visible: bool, -} - -impl Bind for DockRow { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - statement.bind( - (self.workspace_id, self.anchor_position, self.visible), - start_index, - ) - } -} - -impl Column for DockRow { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - <(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map( - |((workspace_id, anchor_position, visible), next_index)| { - ( - DockRow { - workspace_id, - anchor_position, - visible, - }, - next_index, - ) - }, - ) - } -} - -impl Db { - pub fn get_center_group(&self, _workspace: WorkspaceId) -> SerializedPaneGroup { - unimplemented!() - } - - pub fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup { - unimplemented!() - // let axis = self.get_pane_group_axis(pane_group_id); - // let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); - // for child_row in self.get_pane_group_children(pane_group_id) { - // if let Some(child_pane_id) = child_row.child_pane_id { - // children.push(( - // child_row.index, - // PaneGroupChild::Pane(self.get_pane(PaneId { - // workspace_id: pane_group_id.workspace_id, - // pane_id: child_pane_id, - // })), - // )); - // } else if let Some(child_group_id) = child_row.child_group_id { - // children.push(( - // child_row.index, - // PaneGroupChild::Group(self.get_pane_group(PaneGroupId { - // workspace_id: pane_group_id.workspace_id, - // group_id: child_group_id, - // })), - // )); - // } - // } - // children.sort_by_key(|(index, _)| *index); - - // SerializedPaneGroup { - // group_id: pane_group_id, - // axis, - // children: children.into_iter().map(|(_, child)| child).collect(), - // } - } - - fn _get_pane_group_children( - &self, - _pane_group_id: PaneGroupId, - ) -> impl Iterator { - Vec::new().into_iter() - } - - fn _get_pane_group_axis(&self, _pane_group_id: PaneGroupId) -> Axis { - unimplemented!(); - } - - pub fn save_pane_splits( - &self, - _workspace: &WorkspaceId, - _center_pane_group: &SerializedPaneGroup, - ) { - // Delete the center pane group for this workspace and any of its children - // Generate new pane group IDs as we go through - // insert them - } - - pub(crate) fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane { - unimplemented!(); - } - - pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { - iife!({ - self.prepare("SELECT anchor_position, visible FROM dock_panes WHERE workspace_id = ?")? - .with_bindings(workspace)? - .maybe_row::() - }) - .log_err() - .flatten() - } - - pub fn save_dock_pane(&self, workspace: &WorkspaceId, dock_pane: &SerializedDockPane) { - iife!({ - self.prepare( - "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);", - )? - .with_bindings(dock_pane.to_row(workspace))? - .insert() - }) - .log_err(); - } -} - -#[cfg(test)] -mod tests { - - use crate::{items::ItemId, pane::SerializedPane, Db, DockAnchor}; - - use super::{PaneGroupChild, SerializedDockPane, SerializedPaneGroup}; - - #[test] - fn test_basic_dock_pane() { - let db = Db::open_in_memory("basic_dock_pane"); - - let workspace = db.workspace_for_roots(&["/tmp"]); - - let dock_pane = SerializedDockPane { - anchor_position: DockAnchor::Expanded, - visible: true, - }; - - db.save_dock_pane(&workspace.workspace_id, &dock_pane); - - let new_workspace = db.workspace_for_roots(&["/tmp"]); - - assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane); - } - - #[test] - fn test_dock_simple_split() { - let db = Db::open_in_memory("simple_split"); - - let workspace = db.workspace_for_roots(&["/tmp"]); - - // Pane group -> Pane -> 10 , 20 - let center_pane = SerializedPaneGroup { - axis: gpui::Axis::Horizontal, - children: vec![PaneGroupChild::Pane(SerializedPane { - items: vec![ItemId { item_id: 10 }, ItemId { item_id: 20 }], - })], - }; - - db.save_pane_splits(&workspace.workspace_id, ¢er_pane); - - // let new_workspace = db.workspace_for_roots(&["/tmp"]); - - // assert_eq!(new_workspace.center_group, center_pane); - } -} diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 10f99df2af3db97639b1a192721cf5e09a919e79..4e65c9788c2563f66a69c112ba6eebf8b12b7e0a 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,23 +1,14 @@ -use anyhow::{bail, Context, Result}; -use util::{iife, ResultExt}; +mod items; +pub mod model; +pub(crate) mod pane; -use std::{ - fmt::Debug, - os::unix::prelude::OsStrExt, - path::{Path, PathBuf}, -}; +use anyhow::{Context, Result}; +use util::ResultExt; -use indoc::indoc; -use sqlez::{ - bindable::{Bind, Column}, - connection::Connection, - migrations::Migration, - statement::Statement, -}; +use std::path::{Path, PathBuf}; -use crate::pane::{SerializedDockPane, SerializedPaneGroup}; - -use super::Db; +use indoc::{formatdoc, indoc}; +use sqlez::{connection::Connection, migrations::Migration}; // If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging // you might want to update some of the parsing code as well, I've left the variations in but commented @@ -37,87 +28,34 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( workspace_id INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE PRIMARY KEY(worktree_root, workspace_id) - ) STRICT;"}], + ) STRICT; + "}], ); -#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] -pub(crate) struct WorkspaceId(i64); - -impl Bind for WorkspaceId { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - self.0.bind(statement, start_index) - } -} - -impl Column for WorkspaceId { - fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - i64::column(statement, start_index).map(|(id, next_index)| (Self(id), next_index)) - } -} - -#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] -pub enum DockAnchor { - #[default] - Bottom, - Right, - Expanded, -} - -impl Bind for DockAnchor { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - DockAnchor::Bottom => "Bottom", - DockAnchor::Right => "Right", - DockAnchor::Expanded => "Expanded", - } - .bind(statement, start_index) - } -} - -impl Column for DockAnchor { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(anchor_text, next_index)| { - Ok(( - match anchor_text.as_ref() { - "Bottom" => DockAnchor::Bottom, - "Right" => DockAnchor::Right, - "Expanded" => DockAnchor::Expanded, - _ => bail!("Stored dock anchor is incorrect"), - }, - next_index, - )) - }) - } -} - -type WorkspaceRow = (WorkspaceId, DockAnchor, bool); +use self::model::{SerializedWorkspace, WorkspaceId, WorkspaceRow}; -#[derive(Default, Debug)] -pub struct SerializedWorkspace { - pub center_group: SerializedPaneGroup, - pub dock_anchor: DockAnchor, - pub dock_visible: bool, - pub dock_pane: SerializedDockPane, -} +use super::Db; impl Db { /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, /// returns the last workspace which was updated - pub fn workspace_for_roots

(&self, worktree_roots: &[P]) -> Option - where - P: AsRef + Debug, - { + pub fn workspace_for_roots>( + &self, + worktree_roots: &[P], + ) -> Option { // Find the workspace id which is uniquely identified by this set of paths // return it if found let mut workspace_row = get_workspace(worktree_roots, &self) .log_err() .unwrap_or_default(); + if workspace_row.is_none() && worktree_roots.len() == 0 { + // Return last workspace if no roots passed workspace_row = self.prepare( "SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1" ).and_then(|mut stmt| stmt.maybe_row::()) .log_err() - .flatten() + .flatten(); } workspace_row.and_then(|(workspace_id, dock_anchor, dock_visible)| { @@ -130,66 +68,56 @@ impl Db { }) } - /// TODO: Change to be 'update workspace' and to serialize the whole workspace in one go. - /// - /// Updates the open paths for the given workspace id. Will garbage collect items from - /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps - /// in the workspace id table - pub fn update_worktrees

(&self, workspace_id: &WorkspaceId, worktree_roots: &[P]) - where - P: AsRef + Debug, - { + /// Saves a workspace using the worktree roots. Will garbage collect any workspaces + /// that used this workspace previously + pub fn save_workspace>( + &self, + worktree_roots: &[P], + workspace: SerializedWorkspace, + ) { self.with_savepoint("update_worktrees", |conn| { // Lookup any old WorkspaceIds which have the same set of roots, and delete them. - let preexisting_workspace = get_workspace(worktree_roots, &conn)?; - if let Some((preexisting_workspace_id, _, _)) = preexisting_workspace { - if preexisting_workspace_id != *workspace_id { - // Should also delete fields in other tables with cascading updates - conn.prepare("DELETE FROM workspaces WHERE workspace_id = ?")? - .with_bindings(preexisting_workspace_id)? - .exec()?; - } + if let Some((id_to_delete, _, _)) = get_workspace(worktree_roots, &conn)? { + // Should also delete fields in other tables with cascading updates and insert + // new entry + conn.prepare("DELETE FROM workspaces WHERE workspace_id = ?")? + .with_bindings(id_to_delete)? + .exec()?; } - conn.prepare("DELETE FROM worktree_roots WHERE workspace_id = ?")? - .with_bindings(workspace_id.0)? - .exec()?; + // Insert new workspace into workspaces table if none were found + let workspace_id = WorkspaceId( + conn.prepare("INSERT INTO workspaces(dock_anchor, dock_visible) VALUES (?, ?)")? + .with_bindings((workspace.dock_anchor, workspace.dock_visible))? + .insert()?, + ); + // Write worktree_roots with new workspace_id for root in worktree_roots { - let path = root.as_ref().as_os_str().as_bytes(); - // If you need to debug this, here's the string parsing: - // let path = root.as_ref().to_string_lossy().to_string(); - conn.prepare( "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", )? - .with_bindings((workspace_id.0, path))? + .with_bindings((workspace_id, root.as_ref()))? .exec()?; } - conn.prepare( - "UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?", - )? - .with_bindings(workspace_id.0)? - .exec()?; - Ok(()) }) - .context("Update workspace {workspace_id:?} with roots {worktree_roots:?}") + .context("Update workspace with roots {worktree_roots:?}") .log_err(); } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec> { self.with_savepoint("recent_workspaces", |conn| { - let mut stmt = + let mut roots_by_id = conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; conn.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? .with_bindings(limit)? .rows::()? .iter() - .map(|workspace_id| stmt.with_bindings(workspace_id.0)?.rows::()) + .map(|workspace_id| roots_by_id.with_bindings(workspace_id.0)?.rows::()) .collect::>() }) .log_err() @@ -197,25 +125,15 @@ impl Db { } } -fn get_workspace

(worktree_roots: &[P], connection: &Connection) -> Result> -where - P: AsRef + Debug, -{ +fn get_workspace>( + worktree_roots: &[P], + connection: &Connection, +) -> Result> { // Short circuit if we can if worktree_roots.len() == 0 { return Ok(None); } - // Prepare the array binding string. SQL doesn't have syntax for this, so - // we have to do it ourselves. - let array_binding_stmt = format!( - "({})", - (0..worktree_roots.len()) - .map(|index| format!("?{}", index + 1)) - .collect::>() - .join(", ") - ); - // Any workspace can have multiple independent paths, and these paths // can overlap in the database. Take this test data for example: // @@ -266,230 +184,225 @@ where // Note: due to limitations in SQLite's query binding, we have to generate the prepared // statement with string substitution (the {array_bind}) below, and then bind the // parameters by number. - let query = format!( - r#" - SELECT workspaces.workspace_id, workspaces.dock_anchor, workspaces.dock_visible - FROM (SELECT workspace_id - FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots - WHERE worktree_root in {array_bind} AND workspace_id NOT IN - (SELECT wt1.workspace_id FROM worktree_roots as wt1 - JOIN worktree_roots as wt2 - ON wt1.workspace_id = wt2.workspace_id - WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind}) - GROUP BY workspace_id) - WHERE num_matching = ?) as matching_workspace - JOIN workspaces ON workspaces.workspace_id = matching_workspace.workspace_id - "#, - array_bind = array_binding_stmt - ); - - // This will only be called on start up and when root workspaces change, no need to waste memory - // caching it. - let mut stmt = connection.prepare(&query)?; - - // Make sure we bound the parameters correctly - debug_assert!(worktree_roots.len() as i32 + 1 == stmt.parameter_count()); - - let root_bytes: Vec<&[u8]> = worktree_roots - .iter() - .map(|root| root.as_ref().as_os_str().as_bytes()) - .collect(); - - let num_of_roots = root_bytes.len(); - - stmt.with_bindings((root_bytes, num_of_roots))? + connection + .prepare(formatdoc! {" + SELECT workspaces.workspace_id, workspaces.dock_anchor, workspaces.dock_visible + FROM (SELECT workspace_id + FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots + WHERE worktree_root in ({roots}) AND workspace_id NOT IN + (SELECT wt1.workspace_id FROM worktree_roots as wt1 + JOIN worktree_roots as wt2 + ON wt1.workspace_id = wt2.workspace_id + WHERE wt1.worktree_root NOT in ({roots}) AND wt2.worktree_root in ({roots})) + GROUP BY workspace_id) + WHERE num_matching = ?) as matching_workspace + JOIN workspaces ON workspaces.workspace_id = matching_workspace.workspace_id", + roots = + // Prepare the array binding string. SQL doesn't have syntax for this, so + // we have to do it ourselves. + (0..worktree_roots.len()) + .map(|index| format!("?{}", index + 1)) + .collect::>() + .join(", ") + })? + .with_bindings(( + worktree_roots + .into_iter() + .map(|p| p.as_ref()) + .collect::>(), + worktree_roots.len(), + ))? .maybe_row::() } #[cfg(test)] mod tests { - use std::{path::PathBuf, thread::sleep, time::Duration}; - - use crate::Db; - - use super::WorkspaceId; - - #[test] - fn test_new_worktrees_for_roots() { - env_logger::init(); - let db = Db::open_in_memory("test_new_worktrees_for_roots"); - - // Test creation in 0 case - let workspace_1 = db.workspace_for_roots::(&[]); - assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); - - // Test pulling from recent workspaces - let workspace_1 = db.workspace_for_roots::(&[]); - assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); - - // Ensure the timestamps are different - sleep(Duration::from_secs(1)); - db.make_new_workspace::(&[]); - - // Test pulling another value from recent workspaces - let workspace_2 = db.workspace_for_roots::(&[]); - assert_eq!(workspace_2.workspace_id, WorkspaceId(2)); - - // Ensure the timestamps are different - sleep(Duration::from_secs(1)); - - // Test creating a new workspace that doesn't exist already - let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); - assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); - - // Make sure it's in the recent workspaces.... - let workspace_3 = db.workspace_for_roots::(&[]); - assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); - - // And that it can be pulled out again - let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); - assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); - } - - #[test] - fn test_empty_worktrees() { - let db = Db::open_in_memory("test_empty_worktrees"); - - assert_eq!(None, db.workspace::(&[])); - - db.make_new_workspace::(&[]); //ID 1 - db.make_new_workspace::(&[]); //ID 2 - db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); - - // Sanity check - assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(1)); - - db.update_worktrees::(&WorkspaceId(1), &[]); - - // Make sure 'no worktrees' fails correctly. returning [1, 2] from this - // call would be semantically correct (as those are the workspaces that - // don't have roots) but I'd prefer that this API to either return exactly one - // workspace, and None otherwise - assert_eq!(db.workspace::(&[]), None,); - - assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(1)); - - assert_eq!( - db.recent_workspaces(2), - vec![Vec::::new(), Vec::::new()], - ) - } - - #[test] - fn test_more_workspace_ids() { - let data = &[ - (WorkspaceId(1), vec!["/tmp1"]), - (WorkspaceId(2), vec!["/tmp1", "/tmp2"]), - (WorkspaceId(3), vec!["/tmp1", "/tmp2", "/tmp3"]), - (WorkspaceId(4), vec!["/tmp2", "/tmp3"]), - (WorkspaceId(5), vec!["/tmp2", "/tmp3", "/tmp4"]), - (WorkspaceId(6), vec!["/tmp2", "/tmp4"]), - (WorkspaceId(7), vec!["/tmp2"]), - ]; - - let db = Db::open_in_memory("test_more_workspace_ids"); - - for (workspace_id, entries) in data { - db.make_new_workspace::(&[]); - db.update_worktrees(workspace_id, entries); - } - - assert_eq!(WorkspaceId(1), db.workspace(&["/tmp1"]).unwrap().0); - assert_eq!(db.workspace(&["/tmp1", "/tmp2"]).unwrap().0, WorkspaceId(2)); - assert_eq!( - db.workspace(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().0, - WorkspaceId(3) - ); - assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(4)); - assert_eq!( - db.workspace(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().0, - WorkspaceId(5) - ); - assert_eq!(db.workspace(&["/tmp2", "/tmp4"]).unwrap().0, WorkspaceId(6)); - assert_eq!(db.workspace(&["/tmp2"]).unwrap().0, WorkspaceId(7)); - - assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None); - assert_eq!(db.workspace(&["/tmp5"]), None); - assert_eq!(db.workspace(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None); - } - - #[test] - fn test_detect_workspace_id() { - let data = &[ - (WorkspaceId(1), vec!["/tmp"]), - (WorkspaceId(2), vec!["/tmp", "/tmp2"]), - (WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]), - ]; - - let db = Db::open_in_memory("test_detect_workspace_id"); - - for (workspace_id, entries) in data { - db.make_new_workspace::(&[]); - db.update_worktrees(workspace_id, entries); - } - - assert_eq!(db.workspace(&["/tmp2"]), None); - assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None); - assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); - assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(2)); - assert_eq!( - db.workspace(&["/tmp", "/tmp2", "/tmp3"]).unwrap().0, - WorkspaceId(3) - ); - } - - #[test] - fn test_tricky_overlapping_updates() { - // DB state: - // (/tree) -> ID: 1 - // (/tree, /tree2) -> ID: 2 - // (/tree2, /tree3) -> ID: 3 - - // -> User updates 2 to: (/tree2, /tree3) - - // DB state: - // (/tree) -> ID: 1 - // (/tree2, /tree3) -> ID: 2 - // Get rid of 3 for garbage collection - - let data = &[ - (WorkspaceId(1), vec!["/tmp"]), - (WorkspaceId(2), vec!["/tmp", "/tmp2"]), - (WorkspaceId(3), vec!["/tmp2", "/tmp3"]), - ]; - - let db = Db::open_in_memory("test_tricky_overlapping_update"); - - // Load in the test data - for (workspace_id, entries) in data { - db.make_new_workspace::(&[]); - db.update_worktrees(workspace_id, entries); - } - - sleep(Duration::from_secs(1)); - // Execute the update - db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); - - // Make sure that workspace 3 doesn't exist - assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(2)); - - // And that workspace 1 was untouched - assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); - - // And that workspace 2 is no longer registered under these roots - assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None); - - assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(2)); - - let recent_workspaces = db.recent_workspaces(10); - assert_eq!( - recent_workspaces.get(0).unwrap(), - &vec![PathBuf::from("/tmp2"), PathBuf::from("/tmp3")] - ); - assert_eq!( - recent_workspaces.get(1).unwrap(), - &vec![PathBuf::from("/tmp")] - ); - } + // use std::{path::PathBuf, thread::sleep, time::Duration}; + + // use crate::Db; + + // use super::WorkspaceId; + + // #[test] + // fn test_workspace_saving() { + // env_logger::init(); + // let db = Db::open_in_memory("test_new_worktrees_for_roots"); + + // // Test nothing returned with no roots at first + // assert_eq!(db.workspace_for_roots::(&[]), None); + + // // Test creation + // let workspace_1 = db.workspace_for_roots::(&[]); + // assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); + + // // Ensure the timestamps are different + // sleep(Duration::from_secs(1)); + // db.make_new_workspace::(&[]); + + // // Test pulling another value from recent workspaces + // let workspace_2 = db.workspace_for_roots::(&[]); + // assert_eq!(workspace_2.workspace_id, WorkspaceId(2)); + + // // Ensure the timestamps are different + // sleep(Duration::from_secs(1)); + + // // Test creating a new workspace that doesn't exist already + // let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); + // assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); + + // // Make sure it's in the recent workspaces.... + // let workspace_3 = db.workspace_for_roots::(&[]); + // assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); + + // // And that it can be pulled out again + // let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); + // assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); + // } + + // #[test] + // fn test_empty_worktrees() { + // let db = Db::open_in_memory("test_empty_worktrees"); + + // assert_eq!(None, db.workspace::(&[])); + + // db.make_new_workspace::(&[]); //ID 1 + // db.make_new_workspace::(&[]); //ID 2 + // db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); + + // // Sanity check + // assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(1)); + + // db.update_worktrees::(&WorkspaceId(1), &[]); + + // // Make sure 'no worktrees' fails correctly. returning [1, 2] from this + // // call would be semantically correct (as those are the workspaces that + // // don't have roots) but I'd prefer that this API to either return exactly one + // // workspace, and None otherwise + // assert_eq!(db.workspace::(&[]), None,); + + // assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(1)); + + // assert_eq!( + // db.recent_workspaces(2), + // vec![Vec::::new(), Vec::::new()], + // ) + // } + + // #[test] + // fn test_more_workspace_ids() { + // let data = &[ + // (WorkspaceId(1), vec!["/tmp1"]), + // (WorkspaceId(2), vec!["/tmp1", "/tmp2"]), + // (WorkspaceId(3), vec!["/tmp1", "/tmp2", "/tmp3"]), + // (WorkspaceId(4), vec!["/tmp2", "/tmp3"]), + // (WorkspaceId(5), vec!["/tmp2", "/tmp3", "/tmp4"]), + // (WorkspaceId(6), vec!["/tmp2", "/tmp4"]), + // (WorkspaceId(7), vec!["/tmp2"]), + // ]; + + // let db = Db::open_in_memory("test_more_workspace_ids"); + + // for (workspace_id, entries) in data { + // db.make_new_workspace::(&[]); + // db.update_worktrees(workspace_id, entries); + // } + + // assert_eq!(WorkspaceId(1), db.workspace(&["/tmp1"]).unwrap().0); + // assert_eq!(db.workspace(&["/tmp1", "/tmp2"]).unwrap().0, WorkspaceId(2)); + // assert_eq!( + // db.workspace(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().0, + // WorkspaceId(3) + // ); + // assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(4)); + // assert_eq!( + // db.workspace(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().0, + // WorkspaceId(5) + // ); + // assert_eq!(db.workspace(&["/tmp2", "/tmp4"]).unwrap().0, WorkspaceId(6)); + // assert_eq!(db.workspace(&["/tmp2"]).unwrap().0, WorkspaceId(7)); + + // assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None); + // assert_eq!(db.workspace(&["/tmp5"]), None); + // assert_eq!(db.workspace(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None); + // } + + // #[test] + // fn test_detect_workspace_id() { + // let data = &[ + // (WorkspaceId(1), vec!["/tmp"]), + // (WorkspaceId(2), vec!["/tmp", "/tmp2"]), + // (WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]), + // ]; + + // let db = Db::open_in_memory("test_detect_workspace_id"); + + // for (workspace_id, entries) in data { + // db.make_new_workspace::(&[]); + // db.update_worktrees(workspace_id, entries); + // } + + // assert_eq!(db.workspace(&["/tmp2"]), None); + // assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None); + // assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); + // assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(2)); + // assert_eq!( + // db.workspace(&["/tmp", "/tmp2", "/tmp3"]).unwrap().0, + // WorkspaceId(3) + // ); + // } + + // #[test] + // fn test_tricky_overlapping_updates() { + // // DB state: + // // (/tree) -> ID: 1 + // // (/tree, /tree2) -> ID: 2 + // // (/tree2, /tree3) -> ID: 3 + + // // -> User updates 2 to: (/tree2, /tree3) + + // // DB state: + // // (/tree) -> ID: 1 + // // (/tree2, /tree3) -> ID: 2 + // // Get rid of 3 for garbage collection + + // let data = &[ + // (WorkspaceId(1), vec!["/tmp"]), + // (WorkspaceId(2), vec!["/tmp", "/tmp2"]), + // (WorkspaceId(3), vec!["/tmp2", "/tmp3"]), + // ]; + + // let db = Db::open_in_memory("test_tricky_overlapping_update"); + + // // Load in the test data + // for (workspace_id, entries) in data { + // db.make_new_workspace::(&[]); + // db.update_worktrees(workspace_id, entries); + // } + + // sleep(Duration::from_secs(1)); + // // Execute the update + // db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); + + // // Make sure that workspace 3 doesn't exist + // assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(2)); + + // // And that workspace 1 was untouched + // assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); + + // // And that workspace 2 is no longer registered under these roots + // assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None); + + // assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(2)); + + // let recent_workspaces = db.recent_workspaces(10); + // assert_eq!( + // recent_workspaces.get(0).unwrap(), + // &vec![PathBuf::from("/tmp2"), PathBuf::from("/tmp3")] + // ); + // assert_eq!( + // recent_workspaces.get(1).unwrap(), + // &vec![PathBuf::from("/tmp")] + // ); + // } } diff --git a/crates/db/src/items.rs b/crates/db/src/workspace/items.rs similarity index 97% rename from crates/db/src/items.rs rename to crates/db/src/workspace/items.rs index 93251e5eedba4de01795e99c5504d2f55a515ff4..c3405974d5dbb25e5dfe19cb1d47b1d72471025f 100644 --- a/crates/db/src/items.rs +++ b/crates/db/src/workspace/items.rs @@ -65,11 +65,6 @@ // ) STRICT; // "; -#[derive(Debug, PartialEq, Eq)] -pub struct ItemId { - pub item_id: usize, -} - // enum SerializedItemKind { // Editor, // Diagnostics, diff --git a/crates/db/src/workspace/model.rs b/crates/db/src/workspace/model.rs new file mode 100644 index 0000000000000000000000000000000000000000..148b6b76cabba0c7bc09bc8c0b90fba6a2727c7c --- /dev/null +++ b/crates/db/src/workspace/model.rs @@ -0,0 +1,173 @@ +use anyhow::{bail, Result}; + +use gpui::Axis; +use sqlez::{ + bindable::{Bind, Column}, + statement::Statement, +}; + +#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] +pub(crate) struct WorkspaceId(pub(crate) i64); + +impl Bind for WorkspaceId { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + self.0.bind(statement, start_index) + } +} + +impl Column for WorkspaceId { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + i64::column(statement, start_index).map(|(id, next_index)| (Self(id), next_index)) + } +} + +#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] +pub enum DockAnchor { + #[default] + Bottom, + Right, + Expanded, +} + +impl Bind for DockAnchor { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + DockAnchor::Bottom => "Bottom", + DockAnchor::Right => "Right", + DockAnchor::Expanded => "Expanded", + } + .bind(statement, start_index) + } +} + +impl Column for DockAnchor { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(anchor_text, next_index)| { + Ok(( + match anchor_text.as_ref() { + "Bottom" => DockAnchor::Bottom, + "Right" => DockAnchor::Right, + "Expanded" => DockAnchor::Expanded, + _ => bail!("Stored dock anchor is incorrect"), + }, + next_index, + )) + }) + } +} + +pub(crate) type WorkspaceRow = (WorkspaceId, DockAnchor, bool); + +#[derive(Default, Debug)] +pub struct SerializedWorkspace { + pub center_group: SerializedPaneGroup, + pub dock_anchor: DockAnchor, + pub dock_visible: bool, + pub dock_pane: SerializedDockPane, +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub struct PaneId { + workspace_id: WorkspaceId, + pane_id: usize, +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone)] +pub struct PaneGroupId { + workspace_id: WorkspaceId, +} + +impl PaneGroupId { + pub fn root(workspace_id: WorkspaceId) -> Self { + Self { + workspace_id, + // group_id: 0, + } + } +} + +#[derive(Debug, PartialEq, Eq, Default)] +pub struct SerializedPaneGroup { + axis: Axis, + children: Vec, +} + +impl SerializedPaneGroup { + pub(crate) fn empty_root(_workspace_id: WorkspaceId) -> Self { + Self { + // group_id: PaneGroupId::root(workspace_id), + axis: Default::default(), + children: Default::default(), + } + } +} + +#[derive(Default, Debug, PartialEq, Eq)] +pub struct SerializedDockPane { + pub anchor_position: DockAnchor, + pub visible: bool, +} + +impl SerializedDockPane { + fn to_row(&self, workspace: &WorkspaceId) -> DockRow { + DockRow { + workspace_id: *workspace, + anchor_position: self.anchor_position, + visible: self.visible, + } + } +} + +impl Column for SerializedDockPane { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + <(DockAnchor, bool) as Column>::column(statement, start_index).map( + |((anchor_position, visible), next_index)| { + ( + SerializedDockPane { + anchor_position, + visible, + }, + next_index, + ) + }, + ) + } +} + +#[derive(Default, Debug, PartialEq, Eq)] +pub(crate) struct DockRow { + workspace_id: WorkspaceId, + anchor_position: DockAnchor, + visible: bool, +} + +impl Bind for DockRow { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + statement.bind( + (self.workspace_id, self.anchor_position, self.visible), + start_index, + ) + } +} + +impl Column for DockRow { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + <(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map( + |((workspace_id, anchor_position, visible), next_index)| { + ( + DockRow { + workspace_id, + anchor_position, + visible, + }, + next_index, + ) + }, + ) + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct ItemId { + pub item_id: usize, +} diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs new file mode 100644 index 0000000000000000000000000000000000000000..3c007fd402e91dcc85d98b42433ec68254bb9b0a --- /dev/null +++ b/crates/db/src/workspace/pane.rs @@ -0,0 +1,169 @@ +use gpui::Axis; +use indoc::indoc; +use sqlez::migrations::Migration; +use util::{iife, ResultExt}; + +use super::{ + model::{PaneGroupId, PaneId, SerializedDockPane, SerializedPaneGroup, WorkspaceId}, + Db, +}; + +pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( + "pane", + &[indoc! {" + CREATE TABLE pane_groups( + group_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + parent_group INTEGER, -- NULL indicates that this is a root node + axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + + CREATE TABLE panes( + pane_id INTEGER PRIMARY KEY, + workspace_id INTEGER NOT NULL, + group_id INTEGER, -- If null, this is a dock pane + idx INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + + CREATE TABLE items( + item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique + pane_id INTEGER NOT NULL, + workspace_id INTEGER NOT NULL, + kind TEXT NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE + PRIMARY KEY(item_id, workspace_id) + ) STRICT; + "}], +); + +impl Db { + pub(crate) fn get_center_group(&self, _workspace: WorkspaceId) -> SerializedPaneGroup { + unimplemented!() + } + + pub(crate) fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup { + unimplemented!() + // let axis = self.get_pane_group_axis(pane_group_id); + // let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); + // for child_row in self.get_pane_group_children(pane_group_id) { + // if let Some(child_pane_id) = child_row.child_pane_id { + // children.push(( + // child_row.index, + // PaneGroupChild::Pane(self.get_pane(PaneId { + // workspace_id: pane_group_id.workspace_id, + // pane_id: child_pane_id, + // })), + // )); + // } else if let Some(child_group_id) = child_row.child_group_id { + // children.push(( + // child_row.index, + // PaneGroupChild::Group(self.get_pane_group(PaneGroupId { + // workspace_id: pane_group_id.workspace_id, + // group_id: child_group_id, + // })), + // )); + // } + // } + // children.sort_by_key(|(index, _)| *index); + + // SerializedPaneGroup { + // group_id: pane_group_id, + // axis, + // children: children.into_iter().map(|(_, child)| child).collect(), + // } + } + + // fn _get_pane_group_children( + // &self, + // _pane_group_id: PaneGroupId, + // ) -> impl Iterator { + // Vec::new().into_iter() + // } + + pub(crate) fn save_pane_splits( + &self, + _workspace: &WorkspaceId, + _center_pane_group: &SerializedPaneGroup, + ) { + // Delete the center pane group for this workspace and any of its children + // Generate new pane group IDs as we go through + // insert them + } + + pub(crate) fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane { + unimplemented!(); + } + + pub(crate) fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { + iife!({ + self.prepare("SELECT anchor_position, visible FROM dock_panes WHERE workspace_id = ?")? + .with_bindings(workspace)? + .maybe_row::() + }) + .log_err() + .flatten() + } + + pub(crate) fn save_dock_pane(&self, workspace: &WorkspaceId, dock_pane: &SerializedDockPane) { + // iife!({ + // self.prepare( + // "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);", + // )? + // .with_bindings(dock_pane.to_row(workspace))? + // .insert() + // }) + // .log_err(); + } +} + +#[cfg(test)] +mod tests { + + // use crate::{items::ItemId, pane::SerializedPane, Db, DockAnchor}; + + // use super::{PaneGroupChild, SerializedDockPane, SerializedPaneGroup}; + + // #[test] + // fn test_basic_dock_pane() { + // let db = Db::open_in_memory("basic_dock_pane"); + + // let workspace = db.workspace_for_roots(&["/tmp"]); + + // let dock_pane = SerializedDockPane { + // anchor_position: DockAnchor::Expanded, + // visible: true, + // }; + + // db.save_dock_pane(&workspace.workspace_id, &dock_pane); + + // let new_workspace = db.workspace_for_roots(&["/tmp"]); + + // assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane); + // } + + // #[test] + // fn test_dock_simple_split() { + // let db = Db::open_in_memory("simple_split"); + + // let workspace = db.workspace_for_roots(&["/tmp"]); + + // // Pane group -> Pane -> 10 , 20 + // let center_pane = SerializedPaneGroup { + // axis: gpui::Axis::Horizontal, + // children: vec![PaneGroupChild::Pane(SerializedPane { + // items: vec![ItemId { item_id: 10 }, ItemId { item_id: 20 }], + // })], + // }; + + // db.save_pane_splits(&workspace.workspace_id, ¢er_pane); + + // // let new_workspace = db.workspace_for_roots(&["/tmp"]); + + // // assert_eq!(new_workspace.center_group, center_pane); + // } +} diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index e2cdde039ebb34cc51c40e4d8494413fbeffd3a8..a22363ae63dd5b9c281e8a80418f159d93a6fd77 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -1,3 +1,9 @@ +use std::{ + ffi::{CString, OsStr}, + os::unix::prelude::OsStrExt, + path::{Path, PathBuf}, +}; + use anyhow::Result; use crate::statement::{SqlType, Statement}; @@ -241,3 +247,20 @@ impl Bind for &[T] { Ok(current_index) } } + +impl Bind for &Path { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + self.as_os_str().as_bytes().bind(statement, start_index) + } +} + +impl Column for PathBuf { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let blob = statement.column_blob(start_index)?; + + Ok(( + PathBuf::from(OsStr::from_bytes(blob).to_owned()), + start_index + 1, + )) + } +} From d419f27d75addb64fe06deecb9be96c488afc2cc Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Thu, 3 Nov 2022 22:37:40 -0700 Subject: [PATCH 37/86] replace worktree roots table with serialized worktree roots list --- Cargo.lock | 2 + crates/db/Cargo.toml | 2 + crates/db/src/workspace.rs | 212 ++++++++----------------------- crates/db/src/workspace/model.rs | 132 ++++--------------- crates/db/src/workspace/pane.rs | 24 ++-- 5 files changed, 99 insertions(+), 273 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3e8526fbed6e87f7ec82ce65135fead97cb24191..8fa755b1614a93faaecdba1ee005c66ab350dfa2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1550,6 +1550,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "bincode", "collections", "env_logger", "gpui", @@ -1557,6 +1558,7 @@ dependencies = [ "lazy_static", "log", "parking_lot 0.11.2", + "serde", "sqlez", "tempdir", "util", diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 1ee9de6186711940ee3ec04c6fe2abb4aa2f6510..b69779c4089ed9691d4dd4aa04e41176477c42a1 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -21,6 +21,8 @@ async-trait = "0.1" lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" +serde = { version = "1.0", features = ["derive"] } +bincode = "1.2.1" [dev-dependencies] diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 4e65c9788c2563f66a69c112ba6eebf8b12b7e0a..5fc9e075e979cd071992f183855b19393a704178 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -3,12 +3,12 @@ pub mod model; pub(crate) mod pane; use anyhow::{Context, Result}; -use util::ResultExt; +use util::{iife, ResultExt}; use std::path::{Path, PathBuf}; -use indoc::{formatdoc, indoc}; -use sqlez::{connection::Connection, migrations::Migration}; +use indoc::indoc; +use sqlez::migrations::Migration; // If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging // you might want to update some of the parsing code as well, I've left the variations in but commented @@ -17,18 +17,11 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( "workspace", &[indoc! {" CREATE TABLE workspaces( - workspace_id INTEGER PRIMARY KEY, + workspace_id BLOB PRIMARY KEY, dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' dock_visible INTEGER, -- Boolean timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL ) STRICT; - - CREATE TABLE worktree_roots( - worktree_root BLOB NOT NULL, - workspace_id INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - PRIMARY KEY(worktree_root, workspace_id) - ) STRICT; "}], ); @@ -37,34 +30,39 @@ use self::model::{SerializedWorkspace, WorkspaceId, WorkspaceRow}; use super::Db; impl Db { - /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty, - /// returns the last workspace which was updated + /// Returns a serialized workspace for the given worktree_roots. If the passed array + /// is empty, the most recent workspace is returned instead. If no workspace for the + /// passed roots is stored, returns none. pub fn workspace_for_roots>( &self, worktree_roots: &[P], ) -> Option { - // Find the workspace id which is uniquely identified by this set of paths - // return it if found - let mut workspace_row = get_workspace(worktree_roots, &self) - .log_err() - .unwrap_or_default(); - - if workspace_row.is_none() && worktree_roots.len() == 0 { - // Return last workspace if no roots passed - workspace_row = self.prepare( - "SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1" - ).and_then(|mut stmt| stmt.maybe_row::()) - .log_err() - .flatten(); - } - - workspace_row.and_then(|(workspace_id, dock_anchor, dock_visible)| { - Some(SerializedWorkspace { - dock_pane: self.get_dock_pane(workspace_id)?, - center_group: self.get_center_group(workspace_id), - dock_anchor, - dock_visible, - }) + let workspace_id: WorkspaceId = worktree_roots.into(); + + let (_, dock_anchor, dock_visible) = iife!({ + if worktree_roots.len() == 0 { + self.prepare(indoc! {" + SELECT workspace_id, dock_anchor, dock_visible + FROM workspaces + ORDER BY timestamp DESC LIMIT 1"})? + .maybe_row::() + } else { + self.prepare(indoc! {" + SELECT workspace_id, dock_anchor, dock_visible + FROM workspaces + WHERE workspace_id = ?"})? + .with_bindings(workspace_id)? + .maybe_row::() + } + }) + .log_err() + .flatten()?; + + Some(SerializedWorkspace { + dock_pane: self.get_dock_pane(workspace_id)?, + center_group: self.get_center_group(workspace_id), + dock_anchor, + dock_visible, }) } @@ -75,146 +73,40 @@ impl Db { worktree_roots: &[P], workspace: SerializedWorkspace, ) { - self.with_savepoint("update_worktrees", |conn| { - // Lookup any old WorkspaceIds which have the same set of roots, and delete them. - if let Some((id_to_delete, _, _)) = get_workspace(worktree_roots, &conn)? { - // Should also delete fields in other tables with cascading updates and insert - // new entry - conn.prepare("DELETE FROM workspaces WHERE workspace_id = ?")? - .with_bindings(id_to_delete)? - .exec()?; - } + let workspace_id: WorkspaceId = worktree_roots.into(); + self.with_savepoint("update_worktrees", |conn| { + // Delete any previous workspaces with the same roots. This cascades to all + // other tables that are based on the same roots set. // Insert new workspace into workspaces table if none were found - let workspace_id = WorkspaceId( - conn.prepare("INSERT INTO workspaces(dock_anchor, dock_visible) VALUES (?, ?)")? - .with_bindings((workspace.dock_anchor, workspace.dock_visible))? - .insert()?, - ); - - // Write worktree_roots with new workspace_id - for root in worktree_roots { - conn.prepare( - "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)", - )? - .with_bindings((workspace_id, root.as_ref()))? - .exec()?; - } + self.prepare(indoc!{" + DELETE FROM workspaces WHERE workspace_id = ?1; + INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?1, ?, ?)"})? + .with_bindings((workspace_id, workspace.dock_anchor, workspace.dock_visible))? + .exec()?; + + // Save center pane group and dock pane + Self::save_center_group(workspace_id, &workspace.center_group, conn)?; + Self::save_dock_pane(workspace_id, &workspace.dock_pane, conn)?; Ok(()) }) - .context("Update workspace with roots {worktree_roots:?}") + .with_context(|| format!("Update workspace with roots {:?}", worktree_roots.iter().map(|p| p.as_ref()).collect::>())) .log_err(); } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec> { - self.with_savepoint("recent_workspaces", |conn| { - let mut roots_by_id = - conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?; - - conn.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? + iife!({ + self.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? .with_bindings(limit)? .rows::()? - .iter() - .map(|workspace_id| roots_by_id.with_bindings(workspace_id.0)?.rows::()) - .collect::>() - }) - .log_err() - .unwrap_or_default() + .into_iter().map(|id| id.0) + .collect() + }).log_err().unwrap_or_default() } } -fn get_workspace>( - worktree_roots: &[P], - connection: &Connection, -) -> Result> { - // Short circuit if we can - if worktree_roots.len() == 0 { - return Ok(None); - } - - // Any workspace can have multiple independent paths, and these paths - // can overlap in the database. Take this test data for example: - // - // [/tmp, /tmp2] -> 1 - // [/tmp] -> 2 - // [/tmp2, /tmp3] -> 3 - // - // This would be stred in the database like so: - // - // ID PATH - // 1 /tmp - // 1 /tmp2 - // 2 /tmp - // 3 /tmp2 - // 3 /tmp3 - // - // Note how both /tmp and /tmp2 are associated with multiple workspace IDs. - // So, given an array of worktree roots, how can we find the exactly matching ID? - // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out: - // - We start with a join of this table on itself, generating every possible - // pair of ((path, ID), (path, ID)), and filtering the join down to just the - // *overlapping but non-matching* workspace IDs. For this small data set, - // this would look like: - // - // wt1.ID wt1.PATH | wt2.ID wt2.PATH - // 3 /tmp3 3 /tmp2 - // - // - Moving one SELECT out, we use the first pair's ID column to invert the selection, - // meaning we now have a list of all the entries for our array, minus overlapping sets, - // but including *subsets* of our worktree roots: - // - // ID PATH - // 1 /tmp - // 1 /tmp2 - // 2 /tmp - // - // - To trim out the subsets, we can to exploit the PRIMARY KEY constraint that there are no - // duplicate entries in this table. Using a GROUP BY and a COUNT we can find the subsets of - // our keys: - // - // ID num_matching - // 1 2 - // 2 1 - // - // - And with one final WHERE num_matching = $num_of_worktree_roots, we're done! We've found the - // matching ID correctly :D - // - // Note: due to limitations in SQLite's query binding, we have to generate the prepared - // statement with string substitution (the {array_bind}) below, and then bind the - // parameters by number. - connection - .prepare(formatdoc! {" - SELECT workspaces.workspace_id, workspaces.dock_anchor, workspaces.dock_visible - FROM (SELECT workspace_id - FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots - WHERE worktree_root in ({roots}) AND workspace_id NOT IN - (SELECT wt1.workspace_id FROM worktree_roots as wt1 - JOIN worktree_roots as wt2 - ON wt1.workspace_id = wt2.workspace_id - WHERE wt1.worktree_root NOT in ({roots}) AND wt2.worktree_root in ({roots})) - GROUP BY workspace_id) - WHERE num_matching = ?) as matching_workspace - JOIN workspaces ON workspaces.workspace_id = matching_workspace.workspace_id", - roots = - // Prepare the array binding string. SQL doesn't have syntax for this, so - // we have to do it ourselves. - (0..worktree_roots.len()) - .map(|index| format!("?{}", index + 1)) - .collect::>() - .join(", ") - })? - .with_bindings(( - worktree_roots - .into_iter() - .map(|p| p.as_ref()) - .collect::>(), - worktree_roots.len(), - ))? - .maybe_row::() -} - #[cfg(test)] mod tests { diff --git a/crates/db/src/workspace/model.rs b/crates/db/src/workspace/model.rs index 148b6b76cabba0c7bc09bc8c0b90fba6a2727c7c..37c353a47b65252069e1b0c2368002992576585f 100644 --- a/crates/db/src/workspace/model.rs +++ b/crates/db/src/workspace/model.rs @@ -1,3 +1,5 @@ +use std::path::{Path, PathBuf}; + use anyhow::{bail, Result}; use gpui::Axis; @@ -6,18 +8,32 @@ use sqlez::{ statement::Statement, }; -#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)] -pub(crate) struct WorkspaceId(pub(crate) i64); +#[derive(Debug, PartialEq, Eq, Clone)] +pub(crate) struct WorkspaceId(Vec); + +impl, T: IntoIterator> From for WorkspaceId { + fn from(iterator: T) -> Self { + let mut roots = iterator + .into_iter() + .map(|p| p.as_ref().to_path_buf()) + .collect::>(); + roots.sort(); + Self(roots) + } +} impl Bind for WorkspaceId { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - self.0.bind(statement, start_index) + bincode::serialize(&self.0) + .expect("Bincode serialization of paths should not fail") + .bind(statement, start_index) } } impl Column for WorkspaceId { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - i64::column(statement, start_index).map(|(id, next_index)| (Self(id), next_index)) + let blob = statement.column_blob(start_index)?; + Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1)) } } @@ -58,116 +74,24 @@ impl Column for DockAnchor { pub(crate) type WorkspaceRow = (WorkspaceId, DockAnchor, bool); -#[derive(Default, Debug)] +#[derive(Debug)] pub struct SerializedWorkspace { - pub center_group: SerializedPaneGroup, pub dock_anchor: DockAnchor, pub dock_visible: bool, - pub dock_pane: SerializedDockPane, -} - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct PaneId { - workspace_id: WorkspaceId, - pane_id: usize, -} - -#[derive(Debug, PartialEq, Eq, Copy, Clone)] -pub struct PaneGroupId { - workspace_id: WorkspaceId, -} - -impl PaneGroupId { - pub fn root(workspace_id: WorkspaceId) -> Self { - Self { - workspace_id, - // group_id: 0, - } - } + pub center_group: SerializedPaneGroup, + pub dock_pane: SerializedPane, } -#[derive(Debug, PartialEq, Eq, Default)] +#[derive(Debug, PartialEq, Eq)] pub struct SerializedPaneGroup { axis: Axis, children: Vec, } -impl SerializedPaneGroup { - pub(crate) fn empty_root(_workspace_id: WorkspaceId) -> Self { - Self { - // group_id: PaneGroupId::root(workspace_id), - axis: Default::default(), - children: Default::default(), - } - } +pub struct SerializedPane { + children: Vec, } -#[derive(Default, Debug, PartialEq, Eq)] -pub struct SerializedDockPane { - pub anchor_position: DockAnchor, - pub visible: bool, -} +pub enum SerializedItemKind {} -impl SerializedDockPane { - fn to_row(&self, workspace: &WorkspaceId) -> DockRow { - DockRow { - workspace_id: *workspace, - anchor_position: self.anchor_position, - visible: self.visible, - } - } -} - -impl Column for SerializedDockPane { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - <(DockAnchor, bool) as Column>::column(statement, start_index).map( - |((anchor_position, visible), next_index)| { - ( - SerializedDockPane { - anchor_position, - visible, - }, - next_index, - ) - }, - ) - } -} - -#[derive(Default, Debug, PartialEq, Eq)] -pub(crate) struct DockRow { - workspace_id: WorkspaceId, - anchor_position: DockAnchor, - visible: bool, -} - -impl Bind for DockRow { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - statement.bind( - (self.workspace_id, self.anchor_position, self.visible), - start_index, - ) - } -} - -impl Column for DockRow { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - <(WorkspaceId, DockAnchor, bool) as Column>::column(statement, start_index).map( - |((workspace_id, anchor_position, visible), next_index)| { - ( - DockRow { - workspace_id, - anchor_position, - visible, - }, - next_index, - ) - }, - ) - } -} - -#[derive(Debug, PartialEq, Eq)] -pub struct ItemId { - pub item_id: usize, -} +pub enum SerializedItem {} diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs index 3c007fd402e91dcc85d98b42433ec68254bb9b0a..4f263e496c7ff1abbf72103f12d92a2613ef2989 100644 --- a/crates/db/src/workspace/pane.rs +++ b/crates/db/src/workspace/pane.rs @@ -1,6 +1,6 @@ use gpui::Axis; use indoc::indoc; -use sqlez::migrations::Migration; +use sqlez::{connection::Connection, migrations::Migration}; use util::{iife, ResultExt}; use super::{ @@ -13,26 +13,28 @@ pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( &[indoc! {" CREATE TABLE pane_groups( group_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, + workspace_id BLOB NOT NULL, parent_group INTEGER, -- NULL indicates that this is a root node axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE + PRIMARY KEY(group_id, workspace_id) ) STRICT; CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, - workspace_id INTEGER NOT NULL, + workspace_id BLOB NOT NULL, group_id INTEGER, -- If null, this is a dock pane idx INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + PRIMARY KEY(pane_id, workspace_id) ) STRICT; CREATE TABLE items( item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique pane_id INTEGER NOT NULL, - workspace_id INTEGER NOT NULL, + workspace_id BLOB NOT NULL, kind TEXT NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE @@ -46,7 +48,7 @@ impl Db { unimplemented!() } - pub(crate) fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup { + pub fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup { unimplemented!() // let axis = self.get_pane_group_axis(pane_group_id); // let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); @@ -85,17 +87,17 @@ impl Db { // Vec::new().into_iter() // } - pub(crate) fn save_pane_splits( - &self, + pub(crate) fn save_center_group( _workspace: &WorkspaceId, _center_pane_group: &SerializedPaneGroup, + _connection: &Connection, ) { // Delete the center pane group for this workspace and any of its children // Generate new pane group IDs as we go through // insert them } - pub(crate) fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane { + pub fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane { unimplemented!(); } @@ -109,7 +111,11 @@ impl Db { .flatten() } - pub(crate) fn save_dock_pane(&self, workspace: &WorkspaceId, dock_pane: &SerializedDockPane) { + pub(crate) fn save_dock_pane( + workspace: &WorkspaceId, + dock_pane: &SerializedDockPane, + connection: &Connection, + ) { // iife!({ // self.prepare( // "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);", From 6b214acbc4f3c02df6e02b2d009a0ce7e7edb9ad Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 4 Nov 2022 10:34:28 -0700 Subject: [PATCH 38/86] =?UTF-8?q?Got=20Zed=20compiling=20again=20?= =?UTF-8?q?=F0=9F=A5=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- crates/db/examples/serialize-pane.rs | 52 +++++++++++++--------------- crates/db/src/workspace.rs | 26 +++++++------- crates/db/src/workspace/model.rs | 11 +++--- crates/db/src/workspace/pane.rs | 37 ++++++++------------ crates/sqlez/src/bindable.rs | 2 +- crates/workspace/src/workspace.rs | 8 ++--- 6 files changed, 65 insertions(+), 71 deletions(-) diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs index ebe88037cd259cd9cfab4aa7c91778a1cf3eaeb9..2f362fb99724887b21a0361f19641421d37956a0 100644 --- a/crates/db/examples/serialize-pane.rs +++ b/crates/db/examples/serialize-pane.rs @@ -1,7 +1,5 @@ use std::{fs::File, path::Path}; -use db::{pane::SerializedDockPane, DockAnchor}; - const TEST_FILE: &'static str = "test-db.db"; fn main() -> anyhow::Result<()> { @@ -14,31 +12,31 @@ fn main() -> anyhow::Result<()> { let f = File::create(file)?; drop(f); - let workspace_1 = db.workspace_for_roots(&["/tmp"]); - let workspace_2 = db.workspace_for_roots(&["/tmp", "/tmp2"]); - let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]); - - db.save_dock_pane( - &workspace_1.workspace_id, - &SerializedDockPane { - anchor_position: DockAnchor::Expanded, - visible: true, - }, - ); - db.save_dock_pane( - &workspace_2.workspace_id, - &SerializedDockPane { - anchor_position: DockAnchor::Bottom, - visible: true, - }, - ); - db.save_dock_pane( - &workspace_3.workspace_id, - &SerializedDockPane { - anchor_position: DockAnchor::Right, - visible: false, - }, - ); + // let workspace_1 = db.workspace_for_roots(&["/tmp"]); + // let workspace_2 = db.workspace_for_roots(&["/tmp", "/tmp2"]); + // let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]); + + // db.save_dock_pane( + // &workspace_1.workspace_id, + // &SerializedDockPane { + // anchor_position: DockAnchor::Expanded, + // visible: true, + // }, + // ); + // db.save_dock_pane( + // &workspace_2.workspace_id, + // &SerializedDockPane { + // anchor_position: DockAnchor::Bottom, + // visible: true, + // }, + // ); + // db.save_dock_pane( + // &workspace_3.workspace_id, + // &SerializedDockPane { + // anchor_position: DockAnchor::Right, + // visible: false, + // }, + // ); db.write_to(file).ok(); diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 5fc9e075e979cd071992f183855b19393a704178..cf09bdd06eb137e01f428452ae59f1522a73be3c 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -2,7 +2,7 @@ mod items; pub mod model; pub(crate) mod pane; -use anyhow::{Context, Result}; +use anyhow::Context; use util::{iife, ResultExt}; use std::path::{Path, PathBuf}; @@ -10,9 +10,6 @@ use std::path::{Path, PathBuf}; use indoc::indoc; use sqlez::migrations::Migration; -// If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging -// you might want to update some of the parsing code as well, I've left the variations in but commented -// out. This will panic if run on an existing db that has already been migrated pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( "workspace", &[indoc! {" @@ -39,7 +36,9 @@ impl Db { ) -> Option { let workspace_id: WorkspaceId = worktree_roots.into(); - let (_, dock_anchor, dock_visible) = iife!({ + // Note that we re-assign the workspace_id here in case it's empty + // and we've grabbed the most recent workspace + let (workspace_id, dock_anchor, dock_visible) = iife!({ if worktree_roots.len() == 0 { self.prepare(indoc! {" SELECT workspace_id, dock_anchor, dock_visible @@ -51,7 +50,7 @@ impl Db { SELECT workspace_id, dock_anchor, dock_visible FROM workspaces WHERE workspace_id = ?"})? - .with_bindings(workspace_id)? + .with_bindings(&workspace_id)? .maybe_row::() } }) @@ -59,8 +58,8 @@ impl Db { .flatten()?; Some(SerializedWorkspace { - dock_pane: self.get_dock_pane(workspace_id)?, - center_group: self.get_center_group(workspace_id), + dock_pane: self.get_dock_pane(&workspace_id)?, + center_group: self.get_center_group(&workspace_id), dock_anchor, dock_visible, }) @@ -82,12 +81,12 @@ impl Db { self.prepare(indoc!{" DELETE FROM workspaces WHERE workspace_id = ?1; INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?1, ?, ?)"})? - .with_bindings((workspace_id, workspace.dock_anchor, workspace.dock_visible))? + .with_bindings((&workspace_id, workspace.dock_anchor, workspace.dock_visible))? .exec()?; // Save center pane group and dock pane - Self::save_center_group(workspace_id, &workspace.center_group, conn)?; - Self::save_dock_pane(workspace_id, &workspace.dock_pane, conn)?; + Self::save_center_group(&workspace_id, &workspace.center_group, conn)?; + Self::save_dock_pane(&workspace_id, &workspace.dock_pane, conn)?; Ok(()) }) @@ -98,11 +97,12 @@ impl Db { /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec> { iife!({ - self.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? + Ok::<_, anyhow::Error>(self.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? .with_bindings(limit)? .rows::()? .into_iter().map(|id| id.0) - .collect() + .collect::>>()) + }).log_err().unwrap_or_default() } } diff --git a/crates/db/src/workspace/model.rs b/crates/db/src/workspace/model.rs index 37c353a47b65252069e1b0c2368002992576585f..1a6b4ee41f2868eee22da3874f53bc76f24e4b6c 100644 --- a/crates/db/src/workspace/model.rs +++ b/crates/db/src/workspace/model.rs @@ -9,7 +9,7 @@ use sqlez::{ }; #[derive(Debug, PartialEq, Eq, Clone)] -pub(crate) struct WorkspaceId(Vec); +pub(crate) struct WorkspaceId(pub(crate) Vec); impl, T: IntoIterator> From for WorkspaceId { fn from(iterator: T) -> Self { @@ -22,7 +22,7 @@ impl, T: IntoIterator> From for WorkspaceId { } } -impl Bind for WorkspaceId { +impl Bind for &WorkspaceId { fn bind(&self, statement: &Statement, start_index: i32) -> Result { bincode::serialize(&self.0) .expect("Bincode serialization of paths should not fail") @@ -85,13 +85,16 @@ pub struct SerializedWorkspace { #[derive(Debug, PartialEq, Eq)] pub struct SerializedPaneGroup { axis: Axis, - children: Vec, + children: Vec, } +#[derive(Debug)] pub struct SerializedPane { - children: Vec, + _children: Vec, } +#[derive(Debug)] pub enum SerializedItemKind {} +#[derive(Debug)] pub enum SerializedItem {} diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs index 4f263e496c7ff1abbf72103f12d92a2613ef2989..73306707cfce0dc460a2326a89dd546bc8b48595 100644 --- a/crates/db/src/workspace/pane.rs +++ b/crates/db/src/workspace/pane.rs @@ -1,10 +1,11 @@ -use gpui::Axis; +use anyhow::Result; use indoc::indoc; use sqlez::{connection::Connection, migrations::Migration}; -use util::{iife, ResultExt}; + +use crate::model::SerializedPane; use super::{ - model::{PaneGroupId, PaneId, SerializedDockPane, SerializedPaneGroup, WorkspaceId}, + model::{SerializedPaneGroup, WorkspaceId}, Db, }; @@ -44,11 +45,11 @@ pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( ); impl Db { - pub(crate) fn get_center_group(&self, _workspace: WorkspaceId) -> SerializedPaneGroup { + pub(crate) fn get_center_group(&self, _workspace: &WorkspaceId) -> SerializedPaneGroup { unimplemented!() } - pub fn get_pane_group(&self, _pane_group_id: PaneGroupId) -> SerializedPaneGroup { + pub(crate) fn _get_pane_group(&self, _workspace: &WorkspaceId) -> SerializedPaneGroup { unimplemented!() // let axis = self.get_pane_group_axis(pane_group_id); // let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); @@ -91,31 +92,22 @@ impl Db { _workspace: &WorkspaceId, _center_pane_group: &SerializedPaneGroup, _connection: &Connection, - ) { + ) -> Result<()> { // Delete the center pane group for this workspace and any of its children // Generate new pane group IDs as we go through // insert them + Ok(()) } - pub fn _get_pane(&self, _pane_id: PaneId) -> SerializedPane { - unimplemented!(); - } - - pub(crate) fn get_dock_pane(&self, workspace: WorkspaceId) -> Option { - iife!({ - self.prepare("SELECT anchor_position, visible FROM dock_panes WHERE workspace_id = ?")? - .with_bindings(workspace)? - .maybe_row::() - }) - .log_err() - .flatten() + pub(crate) fn get_dock_pane(&self, _workspace: &WorkspaceId) -> Option { + unimplemented!() } pub(crate) fn save_dock_pane( - workspace: &WorkspaceId, - dock_pane: &SerializedDockPane, - connection: &Connection, - ) { + _workspace: &WorkspaceId, + _dock_pane: &SerializedPane, + _connection: &Connection, + ) -> Result<()> { // iife!({ // self.prepare( // "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);", @@ -124,6 +116,7 @@ impl Db { // .insert() // }) // .log_err(); + Ok(()) } } diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index a22363ae63dd5b9c281e8a80418f159d93a6fd77..1ce350a550b05f6d38bb20578e46d5aabe02bdf5 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -1,5 +1,5 @@ use std::{ - ffi::{CString, OsStr}, + ffi::OsStr, os::unix::prelude::OsStrExt, path::{Path, PathBuf}, }; diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index a6ef7c6c01fdb56178ca692f07c1bf85c637bfa2..d1dbc6982b27a178769e3bc29cec8f4474a5de11 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -15,7 +15,7 @@ use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; -use db::{Db, SerializedWorkspace, WorkspaceId}; +use db::{model::SerializedWorkspace, Db}; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -1073,7 +1073,7 @@ pub enum Event { pub struct Workspace { weak_self: WeakViewHandle, - _db_id: WorkspaceId, + // _db_id: WorkspaceId, client: Arc, user_store: ModelHandle, remote_entity_subscription: Option, @@ -1120,7 +1120,7 @@ enum FollowerItem { impl Workspace { pub fn new( - serialized_workspace: SerializedWorkspace, + _serialized_workspace: Option, project: ModelHandle, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, @@ -1217,7 +1217,7 @@ impl Workspace { let mut this = Workspace { modal: None, weak_self: weak_handle, - _db_id: serialized_workspace.workspace_id, + // _db_id: serialized_workspace.workspace_id, center: PaneGroup::new(center_pane.clone()), dock, // When removing an item, the last element remaining in this array From 01862894208f931a80be097ebd0e62bd89ecf949 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 4 Nov 2022 13:22:35 -0700 Subject: [PATCH 39/86] Refined sqlez, implemented 60% of workspace serialization sql --- crates/db/src/db.rs | 52 ++--- crates/db/src/migrations.rs | 14 -- crates/db/src/workspace.rs | 337 ++++++++++++------------------- crates/db/src/workspace/items.rs | 118 ++++++----- crates/db/src/workspace/model.rs | 146 ++++++++++++- crates/db/src/workspace/pane.rs | 164 +++++++-------- crates/sqlez/src/bindable.rs | 25 ++- crates/sqlez/src/connection.rs | 78 ++++--- crates/sqlez/src/savepoint.rs | 38 ++-- crates/sqlez/src/statement.rs | 54 ++++- crates/util/src/lib.rs | 10 + 11 files changed, 586 insertions(+), 450 deletions(-) delete mode 100644 crates/db/src/migrations.rs diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 07670e309ae15ed45d58998e225409769f1adbd2..97dfce0e1901167fa388b5b07a40087f8ee46c63 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,5 +1,4 @@ pub mod kvp; -mod migrations; pub mod workspace; use std::fs; @@ -11,8 +10,9 @@ use indoc::indoc; use kvp::KVP_MIGRATION; use sqlez::connection::Connection; use sqlez::thread_safe_connection::ThreadSafeConnection; - +use workspace::items::ITEM_MIGRATIONS; use workspace::pane::PANE_MIGRATIONS; + pub use workspace::*; #[derive(Clone)] @@ -35,32 +35,21 @@ impl Db { .expect("Should be able to create the database directory"); let db_path = current_db_dir.join(Path::new("db.sqlite")); - Db( - ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) - .with_initialize_query(indoc! {" - PRAGMA journal_mode=WAL; - PRAGMA synchronous=NORMAL; - PRAGMA foreign_keys=TRUE; - PRAGMA case_sensitive_like=TRUE; - "}) - .with_migrations(&[KVP_MIGRATION, WORKSPACES_MIGRATION, PANE_MIGRATIONS]), - ) - } - - pub fn persisting(&self) -> bool { - self.persistent() + Db(initialize_connection(ThreadSafeConnection::new( + db_path.to_string_lossy().as_ref(), + true, + ))) } /// Open a in memory database for testing and as a fallback. pub fn open_in_memory(db_name: &str) -> Self { - Db(ThreadSafeConnection::new(db_name, false) - .with_initialize_query(indoc! {" - PRAGMA journal_mode=WAL; - PRAGMA synchronous=NORMAL; - PRAGMA foreign_keys=TRUE; - PRAGMA case_sensitive_like=TRUE; - "}) - .with_migrations(&[KVP_MIGRATION, WORKSPACES_MIGRATION, PANE_MIGRATIONS])) + Db(initialize_connection(ThreadSafeConnection::new( + db_name, false, + ))) + } + + pub fn persisting(&self) -> bool { + self.persistent() } pub fn write_to>(&self, dest: P) -> Result<()> { @@ -68,3 +57,18 @@ impl Db { self.backup_main(&destination) } } + +fn initialize_connection(conn: ThreadSafeConnection) -> ThreadSafeConnection { + conn.with_initialize_query(indoc! {" + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA foreign_keys=TRUE; + PRAGMA case_sensitive_like=TRUE; + "}) + .with_migrations(&[ + KVP_MIGRATION, + WORKSPACES_MIGRATION, + PANE_MIGRATIONS, + ITEM_MIGRATIONS, + ]) +} diff --git a/crates/db/src/migrations.rs b/crates/db/src/migrations.rs deleted file mode 100644 index a95654f420fa418d4c82e3703cf1328e000f5e20..0000000000000000000000000000000000000000 --- a/crates/db/src/migrations.rs +++ /dev/null @@ -1,14 +0,0 @@ -// // use crate::items::ITEMS_M_1; -// use crate::{kvp::KVP_M_1, pane::PANE_M_1, WORKSPACES_MIGRATION}; - -// // This must be ordered by development time! Only ever add new migrations to the end!! -// // Bad things will probably happen if you don't monotonically edit this vec!!!! -// // And no re-ordering ever!!!!!!!!!! The results of these migrations are on the user's -// // file system and so everything we do here is locked in _f_o_r_e_v_e_r_. -// lazy_static::lazy_static! { -// pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![ -// M::up(KVP_M_1), -// M::up(WORKSPACE_M_1), -// M::up(PANE_M_1) -// ]); -// } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index cf09bdd06eb137e01f428452ae59f1522a73be3c..b1d139066f3f5f5f19495c83354e5089aa80ca45 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,4 +1,4 @@ -mod items; +pub(crate) mod items; pub mod model; pub(crate) mod pane; @@ -58,8 +58,14 @@ impl Db { .flatten()?; Some(SerializedWorkspace { - dock_pane: self.get_dock_pane(&workspace_id)?, - center_group: self.get_center_group(&workspace_id), + dock_pane: self + .get_dock_pane(&workspace_id) + .context("Getting dock pane") + .log_err()?, + center_group: self + .get_center_group(&workspace_id) + .context("Getting center group") + .log_err()?, dock_anchor, dock_visible, }) @@ -70,231 +76,152 @@ impl Db { pub fn save_workspace>( &self, worktree_roots: &[P], - workspace: SerializedWorkspace, + old_roots: Option<&[P]>, + workspace: &SerializedWorkspace, ) { let workspace_id: WorkspaceId = worktree_roots.into(); - self.with_savepoint("update_worktrees", |conn| { + self.with_savepoint("update_worktrees", || { + if let Some(old_roots) = old_roots { + let old_id: WorkspaceId = old_roots.into(); + + self.prepare("DELETE FROM WORKSPACES WHERE workspace_id = ?")? + .with_bindings(&old_id)? + .exec()?; + } + // Delete any previous workspaces with the same roots. This cascades to all // other tables that are based on the same roots set. // Insert new workspace into workspaces table if none were found - self.prepare(indoc!{" - DELETE FROM workspaces WHERE workspace_id = ?1; - INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?1, ?, ?)"})? + self.prepare("DELETE FROM workspaces WHERE workspace_id = ?;")? + .with_bindings(&workspace_id)? + .exec()?; + + self.prepare( + "INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)", + )? .with_bindings((&workspace_id, workspace.dock_anchor, workspace.dock_visible))? .exec()?; - + // Save center pane group and dock pane - Self::save_center_group(&workspace_id, &workspace.center_group, conn)?; - Self::save_dock_pane(&workspace_id, &workspace.dock_pane, conn)?; + self.save_center_group(&workspace_id, &workspace.center_group)?; + self.save_dock_pane(&workspace_id, &workspace.dock_pane)?; Ok(()) }) - .with_context(|| format!("Update workspace with roots {:?}", worktree_roots.iter().map(|p| p.as_ref()).collect::>())) + .with_context(|| { + format!( + "Update workspace with roots {:?}", + worktree_roots + .iter() + .map(|p| p.as_ref()) + .collect::>() + ) + }) .log_err(); } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec> { iife!({ - Ok::<_, anyhow::Error>(self.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")? + // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html + Ok::<_, anyhow::Error>( + self.prepare( + "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", + )? .with_bindings(limit)? .rows::()? - .into_iter().map(|id| id.0) - .collect::>>()) - - }).log_err().unwrap_or_default() + .into_iter() + .map(|id| id.paths()) + .collect::>>(), + ) + }) + .log_err() + .unwrap_or_default() } } #[cfg(test)] mod tests { - - // use std::{path::PathBuf, thread::sleep, time::Duration}; - - // use crate::Db; - - // use super::WorkspaceId; - - // #[test] - // fn test_workspace_saving() { - // env_logger::init(); - // let db = Db::open_in_memory("test_new_worktrees_for_roots"); - - // // Test nothing returned with no roots at first - // assert_eq!(db.workspace_for_roots::(&[]), None); - - // // Test creation - // let workspace_1 = db.workspace_for_roots::(&[]); - // assert_eq!(workspace_1.workspace_id, WorkspaceId(1)); - - // // Ensure the timestamps are different - // sleep(Duration::from_secs(1)); - // db.make_new_workspace::(&[]); - - // // Test pulling another value from recent workspaces - // let workspace_2 = db.workspace_for_roots::(&[]); - // assert_eq!(workspace_2.workspace_id, WorkspaceId(2)); - - // // Ensure the timestamps are different - // sleep(Duration::from_secs(1)); - - // // Test creating a new workspace that doesn't exist already - // let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); - // assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); - - // // Make sure it's in the recent workspaces.... - // let workspace_3 = db.workspace_for_roots::(&[]); - // assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); - - // // And that it can be pulled out again - // let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]); - // assert_eq!(workspace_3.workspace_id, WorkspaceId(3)); - // } - - // #[test] - // fn test_empty_worktrees() { - // let db = Db::open_in_memory("test_empty_worktrees"); - - // assert_eq!(None, db.workspace::(&[])); - - // db.make_new_workspace::(&[]); //ID 1 - // db.make_new_workspace::(&[]); //ID 2 - // db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]); - - // // Sanity check - // assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(1)); - - // db.update_worktrees::(&WorkspaceId(1), &[]); - - // // Make sure 'no worktrees' fails correctly. returning [1, 2] from this - // // call would be semantically correct (as those are the workspaces that - // // don't have roots) but I'd prefer that this API to either return exactly one - // // workspace, and None otherwise - // assert_eq!(db.workspace::(&[]), None,); - - // assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(1)); - - // assert_eq!( - // db.recent_workspaces(2), - // vec![Vec::::new(), Vec::::new()], - // ) - // } - - // #[test] - // fn test_more_workspace_ids() { - // let data = &[ - // (WorkspaceId(1), vec!["/tmp1"]), - // (WorkspaceId(2), vec!["/tmp1", "/tmp2"]), - // (WorkspaceId(3), vec!["/tmp1", "/tmp2", "/tmp3"]), - // (WorkspaceId(4), vec!["/tmp2", "/tmp3"]), - // (WorkspaceId(5), vec!["/tmp2", "/tmp3", "/tmp4"]), - // (WorkspaceId(6), vec!["/tmp2", "/tmp4"]), - // (WorkspaceId(7), vec!["/tmp2"]), - // ]; - - // let db = Db::open_in_memory("test_more_workspace_ids"); - - // for (workspace_id, entries) in data { - // db.make_new_workspace::(&[]); - // db.update_worktrees(workspace_id, entries); - // } - - // assert_eq!(WorkspaceId(1), db.workspace(&["/tmp1"]).unwrap().0); - // assert_eq!(db.workspace(&["/tmp1", "/tmp2"]).unwrap().0, WorkspaceId(2)); - // assert_eq!( - // db.workspace(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().0, - // WorkspaceId(3) - // ); - // assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(4)); - // assert_eq!( - // db.workspace(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().0, - // WorkspaceId(5) - // ); - // assert_eq!(db.workspace(&["/tmp2", "/tmp4"]).unwrap().0, WorkspaceId(6)); - // assert_eq!(db.workspace(&["/tmp2"]).unwrap().0, WorkspaceId(7)); - - // assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None); - // assert_eq!(db.workspace(&["/tmp5"]), None); - // assert_eq!(db.workspace(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None); - // } - - // #[test] - // fn test_detect_workspace_id() { - // let data = &[ - // (WorkspaceId(1), vec!["/tmp"]), - // (WorkspaceId(2), vec!["/tmp", "/tmp2"]), - // (WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]), - // ]; - - // let db = Db::open_in_memory("test_detect_workspace_id"); - - // for (workspace_id, entries) in data { - // db.make_new_workspace::(&[]); - // db.update_worktrees(workspace_id, entries); - // } - - // assert_eq!(db.workspace(&["/tmp2"]), None); - // assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None); - // assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); - // assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(2)); - // assert_eq!( - // db.workspace(&["/tmp", "/tmp2", "/tmp3"]).unwrap().0, - // WorkspaceId(3) - // ); - // } - - // #[test] - // fn test_tricky_overlapping_updates() { - // // DB state: - // // (/tree) -> ID: 1 - // // (/tree, /tree2) -> ID: 2 - // // (/tree2, /tree3) -> ID: 3 - - // // -> User updates 2 to: (/tree2, /tree3) - - // // DB state: - // // (/tree) -> ID: 1 - // // (/tree2, /tree3) -> ID: 2 - // // Get rid of 3 for garbage collection - - // let data = &[ - // (WorkspaceId(1), vec!["/tmp"]), - // (WorkspaceId(2), vec!["/tmp", "/tmp2"]), - // (WorkspaceId(3), vec!["/tmp2", "/tmp3"]), - // ]; - - // let db = Db::open_in_memory("test_tricky_overlapping_update"); - - // // Load in the test data - // for (workspace_id, entries) in data { - // db.make_new_workspace::(&[]); - // db.update_worktrees(workspace_id, entries); - // } - - // sleep(Duration::from_secs(1)); - // // Execute the update - // db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]); - - // // Make sure that workspace 3 doesn't exist - // assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(2)); - - // // And that workspace 1 was untouched - // assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1)); - - // // And that workspace 2 is no longer registered under these roots - // assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None); - - // assert_eq!(db.last_workspace().unwrap().0, WorkspaceId(2)); - - // let recent_workspaces = db.recent_workspaces(10); - // assert_eq!( - // recent_workspaces.get(0).unwrap(), - // &vec![PathBuf::from("/tmp2"), PathBuf::from("/tmp3")] - // ); - // assert_eq!( - // recent_workspaces.get(1).unwrap(), - // &vec![PathBuf::from("/tmp")] - // ); - // } + use crate::{ + model::{ + DockAnchor::{Bottom, Expanded, Right}, + SerializedWorkspace, + }, + Db, + }; + + #[test] + fn test_basic_functionality() { + env_logger::init(); + + let db = Db::open_in_memory("test_basic_functionality"); + + let workspace_1 = SerializedWorkspace { + dock_anchor: Bottom, + dock_visible: true, + center_group: Default::default(), + dock_pane: Default::default(), + }; + + let workspace_2 = SerializedWorkspace { + dock_anchor: Expanded, + dock_visible: false, + center_group: Default::default(), + dock_pane: Default::default(), + }; + + let workspace_3 = SerializedWorkspace { + dock_anchor: Right, + dock_visible: true, + center_group: Default::default(), + dock_pane: Default::default(), + }; + + db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_1); + db.save_workspace(&["/tmp"], None, &workspace_2); + + db.write_to("test.db").unwrap(); + + // Test that paths are treated as a set + assert_eq!( + db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + workspace_1 + ); + assert_eq!( + db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(), + workspace_1 + ); + + // Make sure that other keys work + assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2); + assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None); + + // Test 'mutate' case of updating a pre-existing id + db.save_workspace(&["/tmp", "/tmp2"], Some(&["/tmp", "/tmp2"]), &workspace_2); + assert_eq!( + db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + workspace_2 + ); + + // Test other mechanism for mutating + db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_3); + assert_eq!( + db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + workspace_3 + ); + + // Make sure that updating paths differently also works + db.save_workspace( + &["/tmp3", "/tmp4", "/tmp2"], + Some(&["/tmp", "/tmp2"]), + &workspace_3, + ); + assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None); + assert_eq!( + db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]) + .unwrap(), + workspace_3 + ); + } } diff --git a/crates/db/src/workspace/items.rs b/crates/db/src/workspace/items.rs index c3405974d5dbb25e5dfe19cb1d47b1d72471025f..87437ccf730433861fba1458c04c0abfa8f85383 100644 --- a/crates/db/src/workspace/items.rs +++ b/crates/db/src/workspace/items.rs @@ -1,13 +1,11 @@ -// use std::{ -// ffi::OsStr, -// fmt::Display, -// hash::Hash, -// os::unix::prelude::OsStrExt, -// path::{Path, PathBuf}, -// sync::Arc, -// }; - -// use anyhow::Result; +use anyhow::{Context, Result}; +use indoc::indoc; +use sqlez::migrations::Migration; + +use crate::{ + model::{ItemId, PaneId, SerializedItem, SerializedItemKind, WorkspaceId}, + Db, +}; // use collections::HashSet; // use rusqlite::{named_params, params, types::FromSql}; @@ -65,45 +63,61 @@ // ) STRICT; // "; -// enum SerializedItemKind { -// Editor, -// Diagnostics, -// ProjectSearch, -// Terminal, -// } - -// struct SerializedItemRow { -// kind: SerializedItemKind, -// item_id: usize, -// path: Option>, -// query: Option, -// } - -// #[derive(Debug, PartialEq, Eq)] -// pub enum SerializedItem { -// Editor { item_id: usize, path: Arc }, -// Diagnostics { item_id: usize }, -// ProjectSearch { item_id: usize, query: String }, -// Terminal { item_id: usize }, -// } - -// impl SerializedItem { -// pub fn item_id(&self) -> usize { -// match self { -// SerializedItem::Editor { item_id, .. } => *item_id, -// SerializedItem::Diagnostics { item_id } => *item_id, -// SerializedItem::ProjectSearch { item_id, .. } => *item_id, -// SerializedItem::Terminal { item_id } => *item_id, -// } -// } -// } - -// impl Db { -// pub fn get_item(&self, item_id: ItemId) -> SerializedItem { -// unimplemented!() -// } - -// pub fn save_item(&self, workspace_id: WorkspaceId, item: &SerializedItem) {} - -// pub fn close_item(&self, item_id: ItemId) {} -// } +pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new( + "item", + &[indoc! {" + CREATE TABLE items( + item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique + workspace_id BLOB NOT NULL, + pane_id INTEGER NOT NULL, + kind TEXT NOT NULL, + position INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE + PRIMARY KEY(item_id, workspace_id) + ) STRICT; + "}], +); + +impl Db { + pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { + Ok(self + .prepare(indoc! {" + SELECT item_id, kind FROM items + WHERE pane_id = ? + ORDER BY position"})? + .with_bindings(pane_id)? + .rows::<(ItemId, SerializedItemKind)>()? + .into_iter() + .map(|(item_id, kind)| match kind { + SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, + _ => unimplemented!(), + }) + .collect()) + } + + pub(crate) fn save_items( + &self, + workspace_id: &WorkspaceId, + pane_id: PaneId, + items: &[SerializedItem], + ) -> Result<()> { + let mut delete_old = self + .prepare("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") + .context("Preparing deletion")?; + let mut insert_new = self.prepare( + "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", + ).context("Preparing insertion")?; + for (position, item) in items.iter().enumerate() { + delete_old + .with_bindings((workspace_id, pane_id, item.item_id()))? + .exec()?; + + insert_new + .with_bindings((item.item_id(), workspace_id, pane_id, item.kind(), position))? + .exec()?; + } + + Ok(()) + } +} diff --git a/crates/db/src/workspace/model.rs b/crates/db/src/workspace/model.rs index 1a6b4ee41f2868eee22da3874f53bc76f24e4b6c..a2bb0c1cd293afc9fc1b69f57269ca53b98e2787 100644 --- a/crates/db/src/workspace/model.rs +++ b/crates/db/src/workspace/model.rs @@ -1,4 +1,7 @@ -use std::path::{Path, PathBuf}; +use std::{ + path::{Path, PathBuf}, + sync::Arc, +}; use anyhow::{bail, Result}; @@ -8,8 +11,14 @@ use sqlez::{ statement::Statement, }; -#[derive(Debug, PartialEq, Eq, Clone)] -pub(crate) struct WorkspaceId(pub(crate) Vec); +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct WorkspaceId(Vec); + +impl WorkspaceId { + pub fn paths(self) -> Vec { + self.0 + } +} impl, T: IntoIterator> From for WorkspaceId { fn from(iterator: T) -> Self { @@ -74,7 +83,7 @@ impl Column for DockAnchor { pub(crate) type WorkspaceRow = (WorkspaceId, DockAnchor, bool); -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq)] pub struct SerializedWorkspace { pub dock_anchor: DockAnchor, pub dock_visible: bool, @@ -82,19 +91,134 @@ pub struct SerializedWorkspace { pub dock_pane: SerializedPane, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Default)] pub struct SerializedPaneGroup { axis: Axis, children: Vec, } -#[derive(Debug)] +impl SerializedPaneGroup { + pub fn new() -> Self { + SerializedPaneGroup { + axis: Axis::Horizontal, + children: Vec::new(), + } + } +} + +#[derive(Debug, PartialEq, Eq, Default)] pub struct SerializedPane { - _children: Vec, + pub(crate) children: Vec, +} + +impl SerializedPane { + pub fn new(children: Vec) -> Self { + SerializedPane { children } + } +} + +pub type GroupId = i64; +pub type PaneId = i64; +pub type ItemId = usize; + +pub(crate) enum SerializedItemKind { + Editor, + Diagnostics, + ProjectSearch, + Terminal, +} + +impl Bind for SerializedItemKind { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + SerializedItemKind::Editor => "Editor", + SerializedItemKind::Diagnostics => "Diagnostics", + SerializedItemKind::ProjectSearch => "ProjectSearch", + SerializedItemKind::Terminal => "Terminal", + } + .bind(statement, start_index) + } +} + +impl Column for SerializedItemKind { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(anchor_text, next_index)| { + Ok(( + match anchor_text.as_ref() { + "Editor" => SerializedItemKind::Editor, + "Diagnostics" => SerializedItemKind::Diagnostics, + "ProjectSearch" => SerializedItemKind::ProjectSearch, + "Terminal" => SerializedItemKind::Terminal, + _ => bail!("Stored serialized item kind is incorrect"), + }, + next_index, + )) + }) + } } -#[derive(Debug)] -pub enum SerializedItemKind {} +#[derive(Debug, PartialEq, Eq)] +pub enum SerializedItem { + Editor { item_id: usize, path: Arc }, + Diagnostics { item_id: usize }, + ProjectSearch { item_id: usize, query: String }, + Terminal { item_id: usize }, +} -#[derive(Debug)] -pub enum SerializedItem {} +impl SerializedItem { + pub fn item_id(&self) -> usize { + match self { + SerializedItem::Editor { item_id, .. } => *item_id, + SerializedItem::Diagnostics { item_id } => *item_id, + SerializedItem::ProjectSearch { item_id, .. } => *item_id, + SerializedItem::Terminal { item_id } => *item_id, + } + } + + pub(crate) fn kind(&self) -> SerializedItemKind { + match self { + SerializedItem::Editor { .. } => SerializedItemKind::Editor, + SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics, + SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch, + SerializedItem::Terminal { .. } => SerializedItemKind::Terminal, + } + } +} + +#[cfg(test)] +mod tests { + use sqlez::connection::Connection; + + use crate::model::DockAnchor; + + use super::WorkspaceId; + + #[test] + fn test_workspace_round_trips() { + let db = Connection::open_memory("workspace_id_round_trips"); + + db.exec(indoc::indoc! {" + CREATE TABLE workspace_id_test( + workspace_id BLOB, + dock_anchor TEXT + );"}) + .unwrap(); + + let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]); + + db.prepare("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") + .unwrap() + .with_bindings((&workspace_id, DockAnchor::Bottom)) + .unwrap() + .exec() + .unwrap(); + + assert_eq!( + db.prepare("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") + .unwrap() + .row::<(WorkspaceId, DockAnchor)>() + .unwrap(), + (WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom) + ); + } +} diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs index 73306707cfce0dc460a2326a89dd546bc8b48595..f2b7fc8ef071327a3b232fa2d8812af628a0b9ad 100644 --- a/crates/db/src/workspace/pane.rs +++ b/crates/db/src/workspace/pane.rs @@ -1,8 +1,9 @@ -use anyhow::Result; +use anyhow::{Context, Result}; use indoc::indoc; -use sqlez::{connection::Connection, migrations::Migration}; +use sqlez::migrations::Migration; +use util::unzip_option; -use crate::model::SerializedPane; +use crate::model::{GroupId, PaneId, SerializedPane}; use super::{ model::{SerializedPaneGroup, WorkspaceId}, @@ -19,79 +20,31 @@ pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE - PRIMARY KEY(group_id, workspace_id) ) STRICT; CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id BLOB NOT NULL, group_id INTEGER, -- If null, this is a dock pane - idx INTEGER NOT NULL, + position INTEGER, -- If null, this is a dock pane FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE - PRIMARY KEY(pane_id, workspace_id) - ) STRICT; - - CREATE TABLE items( - item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique - pane_id INTEGER NOT NULL, - workspace_id BLOB NOT NULL, - kind TEXT NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE - PRIMARY KEY(item_id, workspace_id) ) STRICT; "}], ); impl Db { - pub(crate) fn get_center_group(&self, _workspace: &WorkspaceId) -> SerializedPaneGroup { - unimplemented!() - } - - pub(crate) fn _get_pane_group(&self, _workspace: &WorkspaceId) -> SerializedPaneGroup { - unimplemented!() - // let axis = self.get_pane_group_axis(pane_group_id); - // let mut children: Vec<(usize, PaneGroupChild)> = Vec::new(); - // for child_row in self.get_pane_group_children(pane_group_id) { - // if let Some(child_pane_id) = child_row.child_pane_id { - // children.push(( - // child_row.index, - // PaneGroupChild::Pane(self.get_pane(PaneId { - // workspace_id: pane_group_id.workspace_id, - // pane_id: child_pane_id, - // })), - // )); - // } else if let Some(child_group_id) = child_row.child_group_id { - // children.push(( - // child_row.index, - // PaneGroupChild::Group(self.get_pane_group(PaneGroupId { - // workspace_id: pane_group_id.workspace_id, - // group_id: child_group_id, - // })), - // )); - // } - // } - // children.sort_by_key(|(index, _)| *index); - - // SerializedPaneGroup { - // group_id: pane_group_id, - // axis, - // children: children.into_iter().map(|(_, child)| child).collect(), - // } + pub(crate) fn get_center_group( + &self, + _workspace_id: &WorkspaceId, + ) -> Result { + Ok(SerializedPaneGroup::new()) } - // fn _get_pane_group_children( - // &self, - // _pane_group_id: PaneGroupId, - // ) -> impl Iterator { - // Vec::new().into_iter() - // } - pub(crate) fn save_center_group( - _workspace: &WorkspaceId, + &self, + _workspace_id: &WorkspaceId, _center_pane_group: &SerializedPaneGroup, - _connection: &Connection, ) -> Result<()> { // Delete the center pane group for this workspace and any of its children // Generate new pane group IDs as we go through @@ -99,51 +52,86 @@ impl Db { Ok(()) } - pub(crate) fn get_dock_pane(&self, _workspace: &WorkspaceId) -> Option { - unimplemented!() + pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { + let pane_id = self + .prepare(indoc! {" + SELECT pane_id FROM panes + WHERE workspace_id = ? AND group_id IS NULL AND position IS NULL"})? + .with_bindings(workspace_id)? + .row::()?; + + Ok(SerializedPane::new( + self.get_items(pane_id).context("Reading items")?, + )) } pub(crate) fn save_dock_pane( - _workspace: &WorkspaceId, - _dock_pane: &SerializedPane, - _connection: &Connection, + &self, + workspace: &WorkspaceId, + dock_pane: &SerializedPane, ) -> Result<()> { - // iife!({ - // self.prepare( - // "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);", - // )? - // .with_bindings(dock_pane.to_row(workspace))? - // .insert() - // }) - // .log_err(); - Ok(()) + self.save_pane(workspace, &dock_pane, None) + } + + pub(crate) fn save_pane( + &self, + workspace_id: &WorkspaceId, + pane: &SerializedPane, + parent: Option<(GroupId, usize)>, + ) -> Result<()> { + let (parent_id, order) = unzip_option(parent); + + let pane_id = self + .prepare("INSERT INTO panes(workspace_id, group_id, position) VALUES (?, ?, ?)")? + .with_bindings((workspace_id, parent_id, order))? + .insert()? as PaneId; + + self.save_items(workspace_id, pane_id, &pane.children) + .context("Saving items") } } #[cfg(test)] mod tests { - // use crate::{items::ItemId, pane::SerializedPane, Db, DockAnchor}; - - // use super::{PaneGroupChild, SerializedDockPane, SerializedPaneGroup}; + use crate::{ + model::{SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace}, + Db, + }; + + fn default_workspace( + dock_pane: SerializedPane, + center_group: SerializedPaneGroup, + ) -> SerializedWorkspace { + SerializedWorkspace { + dock_anchor: crate::model::DockAnchor::Right, + dock_visible: false, + center_group, + dock_pane, + } + } - // #[test] - // fn test_basic_dock_pane() { - // let db = Db::open_in_memory("basic_dock_pane"); + #[test] + fn test_basic_dock_pane() { + let db = Db::open_in_memory("basic_dock_pane"); - // let workspace = db.workspace_for_roots(&["/tmp"]); + let dock_pane = crate::model::SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 1 }, + SerializedItem::Terminal { item_id: 4 }, + SerializedItem::Terminal { item_id: 2 }, + SerializedItem::Terminal { item_id: 3 }, + ], + }; - // let dock_pane = SerializedDockPane { - // anchor_position: DockAnchor::Expanded, - // visible: true, - // }; + let workspace = default_workspace(dock_pane, SerializedPaneGroup::new()); - // db.save_dock_pane(&workspace.workspace_id, &dock_pane); + db.save_workspace(&["/tmp"], None, &workspace); - // let new_workspace = db.workspace_for_roots(&["/tmp"]); + let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); - // assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane); - // } + assert_eq!(workspace.dock_pane, new_workspace.dock_pane); + } // #[test] // fn test_dock_simple_split() { diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index 1ce350a550b05f6d38bb20578e46d5aabe02bdf5..7a3483bcea176bb4d7577896f6e694ed6feb8721 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -178,8 +178,29 @@ impl Column for (T1, T2, T3, T4) let (first, next_index) = T1::column(statement, start_index)?; let (second, next_index) = T2::column(statement, next_index)?; let (third, next_index) = T3::column(statement, next_index)?; - let (forth, next_index) = T4::column(statement, next_index)?; - Ok(((first, second, third, forth), next_index)) + let (fourth, next_index) = T4::column(statement, next_index)?; + Ok(((first, second, third, fourth), next_index)) + } +} + +impl Bind for (T1, T2, T3, T4, T5) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + let next_index = self.1.bind(statement, next_index)?; + let next_index = self.2.bind(statement, next_index)?; + let next_index = self.3.bind(statement, next_index)?; + self.4.bind(statement, next_index) + } +} + +impl Column for (T1, T2, T3, T4, T5) { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + let (third, next_index) = T3::column(statement, next_index)?; + let (fourth, next_index) = T4::column(statement, next_index)?; + let (fifth, next_index) = T5::column(statement, next_index)?; + Ok(((first, second, third, fourth, fifth), next_index)) } } diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index fcc180a48dc0c0052ed1db91b285d308a43d52aa..04a12cfc97dba09f42161fe7abcf1b3e5f41f082 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -99,7 +99,29 @@ impl Connection { } pub(crate) fn last_error(&self) -> Result<()> { - unsafe { error_to_result(sqlite3_errcode(self.sqlite3)) } + unsafe { + let code = sqlite3_errcode(self.sqlite3); + const NON_ERROR_CODES: &[i32] = &[SQLITE_OK, SQLITE_ROW]; + if NON_ERROR_CODES.contains(&code) { + return Ok(()); + } + + let message = sqlite3_errmsg(self.sqlite3); + let message = if message.is_null() { + None + } else { + Some( + String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) + .into_owned(), + ) + }; + + Err(anyhow!( + "Sqlite call failed with code {} and message: {:?}", + code as isize, + message + )) + } } } @@ -109,31 +131,6 @@ impl Drop for Connection { } } -pub(crate) fn error_to_result(code: std::os::raw::c_int) -> Result<()> { - const NON_ERROR_CODES: &[i32] = &[SQLITE_OK, SQLITE_ROW]; - unsafe { - if NON_ERROR_CODES.contains(&code) { - return Ok(()); - } - - let message = sqlite3_errstr(code); - let message = if message.is_null() { - None - } else { - Some( - String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) - .into_owned(), - ) - }; - - Err(anyhow!( - "Sqlite call failed with code {} and message: {:?}", - code as isize, - message - )) - } -} - #[cfg(test)] mod test { use anyhow::Result; @@ -213,6 +210,35 @@ mod test { ); } + #[test] + fn bool_round_trips() { + let connection = Connection::open_memory("bool_round_trips"); + connection + .exec(indoc! {" + CREATE TABLE bools ( + t INTEGER, + f INTEGER + );"}) + .unwrap(); + + connection + .prepare("INSERT INTO bools(t, f) VALUES (?, ?);") + .unwrap() + .with_bindings((true, false)) + .unwrap() + .exec() + .unwrap(); + + assert_eq!( + &connection + .prepare("SELECT * FROM bools;") + .unwrap() + .row::<(bool, bool)>() + .unwrap(), + &(true, false) + ); + } + #[test] fn backup_works() { let connection1 = Connection::open_memory("backup_works"); diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs index 3d7830dd91f623894b1062cafbc99043d63325eb..ba4b1e774b03852f1abd229729316a4c86ec09ab 100644 --- a/crates/sqlez/src/savepoint.rs +++ b/crates/sqlez/src/savepoint.rs @@ -8,11 +8,11 @@ impl Connection { // point is released. pub fn with_savepoint(&self, name: impl AsRef, f: F) -> Result where - F: FnOnce(&Connection) -> Result, + F: FnOnce() -> Result, { let name = name.as_ref().to_owned(); self.exec(format!("SAVEPOINT {}", &name))?; - let result = f(self); + let result = f(); match result { Ok(_) => { self.exec(format!("RELEASE {}", name))?; @@ -30,11 +30,11 @@ impl Connection { // point is released. pub fn with_savepoint_rollback(&self, name: impl AsRef, f: F) -> Result> where - F: FnOnce(&Connection) -> Result>, + F: FnOnce() -> Result>, { let name = name.as_ref().to_owned(); self.exec(format!("SAVEPOINT {}", &name))?; - let result = f(self); + let result = f(); match result { Ok(Some(_)) => { self.exec(format!("RELEASE {}", name))?; @@ -69,21 +69,21 @@ mod tests { let save1_text = "test save1"; let save2_text = "test save2"; - connection.with_savepoint("first", |save1| { - save1 + connection.with_savepoint("first", || { + connection .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? .with_bindings((save1_text, 1))? .exec()?; - assert!(save1 - .with_savepoint("second", |save2| -> Result, anyhow::Error> { - save2 + assert!(connection + .with_savepoint("second", || -> Result, anyhow::Error> { + connection .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? .with_bindings((save2_text, 2))? .exec()?; assert_eq!( - save2 + connection .prepare("SELECT text FROM text ORDER BY text.idx ASC")? .rows::()?, vec![save1_text, save2_text], @@ -95,20 +95,20 @@ mod tests { .is_some()); assert_eq!( - save1 + connection .prepare("SELECT text FROM text ORDER BY text.idx ASC")? .rows::()?, vec![save1_text], ); - save1.with_savepoint_rollback::<(), _>("second", |save2| { - save2 + connection.with_savepoint_rollback::<(), _>("second", || { + connection .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? .with_bindings((save2_text, 2))? .exec()?; assert_eq!( - save2 + connection .prepare("SELECT text FROM text ORDER BY text.idx ASC")? .rows::()?, vec![save1_text, save2_text], @@ -118,20 +118,20 @@ mod tests { })?; assert_eq!( - save1 + connection .prepare("SELECT text FROM text ORDER BY text.idx ASC")? .rows::()?, vec![save1_text], ); - save1.with_savepoint_rollback("second", |save2| { - save2 + connection.with_savepoint_rollback("second", || { + connection .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? .with_bindings((save2_text, 2))? .exec()?; assert_eq!( - save2 + connection .prepare("SELECT text FROM text ORDER BY text.idx ASC")? .rows::()?, vec![save1_text, save2_text], @@ -141,7 +141,7 @@ mod tests { })?; assert_eq!( - save1 + connection .prepare("SELECT text FROM text ORDER BY text.idx ASC")? .rows::()?, vec![save1_text, save2_text], diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index e2b59d86f1c234787fa244023f6a53509c2f8180..f0de8703aba92592c64c310a53b314aa3153d897 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -6,7 +6,7 @@ use anyhow::{anyhow, Context, Result}; use libsqlite3_sys::*; use crate::bindable::{Bind, Column}; -use crate::connection::{error_to_result, Connection}; +use crate::connection::Connection; pub struct Statement<'a> { raw_statement: *mut sqlite3_stmt, @@ -48,7 +48,9 @@ impl<'a> Statement<'a> { 0 as *mut _, ); - connection.last_error().context("Prepare call failed.")?; + connection + .last_error() + .with_context(|| format!("Prepare call failed for query:\n{}", query.as_ref()))?; } Ok(statement) @@ -309,10 +311,7 @@ impl<'a> Statement<'a> { impl<'a> Drop for Statement<'a> { fn drop(&mut self) { - unsafe { - let error = sqlite3_finalize(self.raw_statement); - error_to_result(error).expect("failed error"); - }; + unsafe { sqlite3_finalize(self.raw_statement) }; } } @@ -327,9 +326,9 @@ mod test { let connection1 = Connection::open_memory("blob_round_trips"); connection1 .exec(indoc! {" - CREATE TABLE blobs ( - data BLOB - );"}) + CREATE TABLE blobs ( + data BLOB + );"}) .unwrap(); let blob = &[0, 1, 2, 4, 8, 16, 32, 64]; @@ -352,4 +351,41 @@ mod test { let mut read = connection1.prepare("SELECT * FROM blobs;").unwrap(); assert_eq!(read.step().unwrap(), StepResult::Done); } + + #[test] + pub fn maybe_returns_options() { + let connection = Connection::open_memory("maybe_returns_options"); + connection + .exec(indoc! {" + CREATE TABLE texts ( + text TEXT + );"}) + .unwrap(); + + assert!(connection + .prepare("SELECT text FROM texts") + .unwrap() + .maybe_row::() + .unwrap() + .is_none()); + + let text_to_insert = "This is a test"; + + connection + .prepare("INSERT INTO texts VALUES (?)") + .unwrap() + .with_bindings(text_to_insert) + .unwrap() + .exec() + .unwrap(); + + assert_eq!( + connection + .prepare("SELECT text FROM texts") + .unwrap() + .maybe_row::() + .unwrap(), + Some(text_to_insert.to_string()) + ); + } } diff --git a/crates/util/src/lib.rs b/crates/util/src/lib.rs index 3757da5854f0c9c0e7f4d4d6203ba4a5f989f64e..19d17c1190a8e73851d9a7c00bc60e411b58d360 100644 --- a/crates/util/src/lib.rs +++ b/crates/util/src/lib.rs @@ -204,6 +204,16 @@ impl Iterator for RandomCharIter { } } +// copy unstable standard feature option unzip +// https://github.com/rust-lang/rust/issues/87800 +// Remove when this ship in Rust 1.66 or 1.67 +pub fn unzip_option(option: Option<(T, U)>) -> (Option, Option) { + match option { + Some((a, b)) => (Some(a), Some(b)), + None => (None, None), + } +} + #[macro_export] macro_rules! iife { ($block:block) => { From f27a9d77d18cc6f1c9ab7efcf6929dd1eadf7c98 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 4 Nov 2022 17:48:29 -0700 Subject: [PATCH 40/86] Finished the bulk of workspace serialization. Just items and wiring it all through. Co-Authored-By: kay@zed.dev --- crates/db/src/workspace.rs | 10 +- crates/db/src/workspace/items.rs | 60 +---------- crates/db/src/workspace/model.rs | 64 +++++++++--- crates/db/src/workspace/pane.rs | 174 ++++++++++++++++++++++--------- crates/db/test.db | Bin 0 -> 40960 bytes 5 files changed, 187 insertions(+), 121 deletions(-) create mode 100644 crates/db/test.db diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index b1d139066f3f5f5f19495c83354e5089aa80ca45..9b2d9e45631ae0ec2feff6cbac6cae9e3be3da04 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -63,7 +63,7 @@ impl Db { .context("Getting dock pane") .log_err()?, center_group: self - .get_center_group(&workspace_id) + .get_center_pane_group(&workspace_id) .context("Getting center group") .log_err()?, dock_anchor, @@ -104,8 +104,8 @@ impl Db { .exec()?; // Save center pane group and dock pane - self.save_center_group(&workspace_id, &workspace.center_group)?; - self.save_dock_pane(&workspace_id, &workspace.dock_pane)?; + self.save_pane_group(&workspace_id, &workspace.center_group, None)?; + self.save_pane(&workspace_id, &workspace.dock_pane, None)?; Ok(()) }) @@ -152,8 +152,8 @@ mod tests { }; #[test] - fn test_basic_functionality() { - env_logger::init(); + fn test_workspace_assignment() { + env_logger::try_init().ok(); let db = Db::open_in_memory("test_basic_functionality"); diff --git a/crates/db/src/workspace/items.rs b/crates/db/src/workspace/items.rs index 87437ccf730433861fba1458c04c0abfa8f85383..25873a7f9b03b2c2bb75ede3d1b12361c2f7690e 100644 --- a/crates/db/src/workspace/items.rs +++ b/crates/db/src/workspace/items.rs @@ -6,63 +6,11 @@ use crate::{ model::{ItemId, PaneId, SerializedItem, SerializedItemKind, WorkspaceId}, Db, }; -// use collections::HashSet; -// use rusqlite::{named_params, params, types::FromSql}; - -// use crate::workspace::WorkspaceId; - -// use super::Db; - -// /// Current design makes the cut at the item level, -// /// - Maybe A little more bottom up, serialize 'Terminals' and 'Editors' directly, and then make a seperate -// /// - items table, with a kind, and an integer that acts as a key to one of these other tables -// /// This column is a foreign key to ONE OF: editors, terminals, searches -// /// - - -// // (workspace_id, item_id) -// // kind -> ::Editor:: - -// // -> -// // At the workspace level -// // -> (Workspace_ID, item_id) -// // -> One shot, big query, load everything up: - -// // -> SerializedWorkspace::deserialize(tx, itemKey) -// // -> SerializedEditor::deserialize(tx, itemKey) - -// // -> -// // -> Workspace::new(SerializedWorkspace) -// // -> Editor::new(serialized_workspace[???]serializedEditor) - -// // //Pros: Keeps sql out of every body elese, makes changing it easier (e.g. for loading from a network or RocksDB) -// // //Cons: DB has to know the internals of the entire rest of the app - -// // Workspace -// // Worktree roots -// // Pane groups -// // Dock -// // Items -// // Sidebars - -// // Things I'm doing: finding about nullability for foreign keys -// pub(crate) const ITEMS_M_1: &str = " -// CREATE TABLE project_searches( -// workspace_id INTEGER, -// item_id INTEGER, -// query TEXT, -// PRIMARY KEY (workspace_id, item_id) -// FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) -// ) STRICT; - -// CREATE TABLE editors( -// workspace_id INTEGER, -// item_id INTEGER, -// path BLOB NOT NULL, -// PRIMARY KEY (workspace_id, item_id) -// FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id) -// ) STRICT; -// "; +// 1) Move all of this into Workspace crate +// 2) Deserialize items fully +// 3) Typed prepares (including how you expect to pull data out) +// 4) Investigate Tree column impls pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new( "item", &[indoc! {" diff --git a/crates/db/src/workspace/model.rs b/crates/db/src/workspace/model.rs index a2bb0c1cd293afc9fc1b69f57269ca53b98e2787..1d9065f6d95e7cc86641c817b583cb920eff06e6 100644 --- a/crates/db/src/workspace/model.rs +++ b/crates/db/src/workspace/model.rs @@ -5,7 +5,6 @@ use std::{ use anyhow::{bail, Result}; -use gpui::Axis; use sqlez::{ bindable::{Bind, Column}, statement::Statement, @@ -91,22 +90,61 @@ pub struct SerializedWorkspace { pub dock_pane: SerializedPane, } -#[derive(Debug, PartialEq, Eq, Default)] -pub struct SerializedPaneGroup { - axis: Axis, - children: Vec, +#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] +pub enum Axis { + #[default] + Horizontal, + Vertical, +} + +impl Bind for Axis { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + Axis::Horizontal => "Horizontal", + Axis::Vertical => "Vertical", + } + .bind(statement, start_index) + } +} + +impl Column for Axis { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(axis_text, next_index)| { + Ok(( + match axis_text.as_str() { + "Horizontal" => Axis::Horizontal, + "Vertical" => Axis::Vertical, + _ => bail!("Stored serialized item kind is incorrect"), + }, + next_index, + )) + }) + } } -impl SerializedPaneGroup { - pub fn new() -> Self { - SerializedPaneGroup { +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum SerializedPaneGroup { + Group { + axis: Axis, + children: Vec, + }, + Pane(SerializedPane), +} + +// Dock panes, and grouped panes combined? +// AND we're collapsing PaneGroup::Pane +// In the case where + +impl Default for SerializedPaneGroup { + fn default() -> Self { + Self::Group { axis: Axis::Horizontal, - children: Vec::new(), + children: vec![Self::Pane(Default::default())], } } } -#[derive(Debug, PartialEq, Eq, Default)] +#[derive(Debug, PartialEq, Eq, Default, Clone)] pub struct SerializedPane { pub(crate) children: Vec, } @@ -142,9 +180,9 @@ impl Bind for SerializedItemKind { impl Column for SerializedItemKind { fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(anchor_text, next_index)| { + String::column(statement, start_index).and_then(|(kind_text, next_index)| { Ok(( - match anchor_text.as_ref() { + match kind_text.as_ref() { "Editor" => SerializedItemKind::Editor, "Diagnostics" => SerializedItemKind::Diagnostics, "ProjectSearch" => SerializedItemKind::ProjectSearch, @@ -157,7 +195,7 @@ impl Column for SerializedItemKind { } } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub enum SerializedItem { Editor { item_id: usize, path: Arc }, Diagnostics { item_id: usize }, diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs index f2b7fc8ef071327a3b232fa2d8812af628a0b9ad..7fef2d6b75308fcbb2662171a5c8af2c9649cdb9 100644 --- a/crates/db/src/workspace/pane.rs +++ b/crates/db/src/workspace/pane.rs @@ -1,9 +1,9 @@ -use anyhow::{Context, Result}; +use anyhow::{bail, Context, Result}; use indoc::indoc; use sqlez::migrations::Migration; use util::unzip_option; -use crate::model::{GroupId, PaneId, SerializedPane}; +use crate::model::{Axis, GroupId, PaneId, SerializedPane}; use super::{ model::{SerializedPaneGroup, WorkspaceId}, @@ -16,47 +16,107 @@ pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( CREATE TABLE pane_groups( group_id INTEGER PRIMARY KEY, workspace_id BLOB NOT NULL, - parent_group INTEGER, -- NULL indicates that this is a root node + parent_group_id INTEGER, -- NULL indicates that this is a root node + position INTEGER, -- NULL indicates that this is a root node axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id BLOB NOT NULL, - group_id INTEGER, -- If null, this is a dock pane - position INTEGER, -- If null, this is a dock pane + parent_group_id INTEGER, -- NULL, this is a dock pane + position INTEGER, -- NULL, this is a dock pane FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; "}], ); impl Db { - pub(crate) fn get_center_group( + pub(crate) fn get_center_pane_group( &self, - _workspace_id: &WorkspaceId, + workspace_id: &WorkspaceId, ) -> Result { - Ok(SerializedPaneGroup::new()) + self.get_pane_group_children(workspace_id, None)? + .into_iter() + .next() + .context("No center pane group") } - pub(crate) fn save_center_group( + fn get_pane_group_children( &self, - _workspace_id: &WorkspaceId, - _center_pane_group: &SerializedPaneGroup, + workspace_id: &WorkspaceId, + group_id: Option, + ) -> Result> { + let children = self + .prepare(indoc! {" + SELECT group_id, axis, pane_id + FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id + FROM pane_groups + UNION + SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id + FROM panes + -- Remove the dock panes from the union + WHERE parent_group_id IS NOT NULL and position IS NOT NULL) + WHERE parent_group_id IS ? AND workspace_id = ? + ORDER BY position + "})? + .with_bindings((group_id, workspace_id))? + .rows::<(Option, Option, Option)>()?; + + children + .into_iter() + .map(|(group_id, axis, pane_id)| { + if let Some((group_id, axis)) = group_id.zip(axis) { + Ok(SerializedPaneGroup::Group { + axis, + children: self.get_pane_group_children(workspace_id, Some(group_id))?, + }) + } else if let Some(pane_id) = pane_id { + Ok(SerializedPaneGroup::Pane(SerializedPane { + children: self.get_items(pane_id)?, + })) + } else { + bail!("Pane Group Child was neither a pane group or a pane"); + } + }) + .collect::>() + } + + pub(crate) fn save_pane_group( + &self, + workspace_id: &WorkspaceId, + pane_group: &SerializedPaneGroup, + parent: Option<(GroupId, usize)>, ) -> Result<()> { - // Delete the center pane group for this workspace and any of its children - // Generate new pane group IDs as we go through - // insert them - Ok(()) + if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) { + bail!("Pane groups must have a SerializedPaneGroup::Group at the root") + } + + let (parent_id, position) = unzip_option(parent); + + match pane_group { + SerializedPaneGroup::Group { axis, children } => { + let parent_id = self.prepare("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? + .with_bindings((workspace_id, parent_id, position, *axis))? + .insert()? as GroupId; + + for (position, group) in children.iter().enumerate() { + self.save_pane_group(workspace_id, group, Some((parent_id, position)))? + } + Ok(()) + } + SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent), + } } pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { let pane_id = self .prepare(indoc! {" SELECT pane_id FROM panes - WHERE workspace_id = ? AND group_id IS NULL AND position IS NULL"})? + WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})? .with_bindings(workspace_id)? .row::()?; @@ -65,14 +125,6 @@ impl Db { )) } - pub(crate) fn save_dock_pane( - &self, - workspace: &WorkspaceId, - dock_pane: &SerializedPane, - ) -> Result<()> { - self.save_pane(workspace, &dock_pane, None) - } - pub(crate) fn save_pane( &self, workspace_id: &WorkspaceId, @@ -82,7 +134,7 @@ impl Db { let (parent_id, order) = unzip_option(parent); let pane_id = self - .prepare("INSERT INTO panes(workspace_id, group_id, position) VALUES (?, ?, ?)")? + .prepare("INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)")? .with_bindings((workspace_id, parent_id, order))? .insert()? as PaneId; @@ -101,18 +153,20 @@ mod tests { fn default_workspace( dock_pane: SerializedPane, - center_group: SerializedPaneGroup, + center_group: &SerializedPaneGroup, ) -> SerializedWorkspace { SerializedWorkspace { dock_anchor: crate::model::DockAnchor::Right, dock_visible: false, - center_group, + center_group: center_group.clone(), dock_pane, } } #[test] fn test_basic_dock_pane() { + env_logger::try_init().ok(); + let db = Db::open_in_memory("basic_dock_pane"); let dock_pane = crate::model::SerializedPane { @@ -124,7 +178,7 @@ mod tests { ], }; - let workspace = default_workspace(dock_pane, SerializedPaneGroup::new()); + let workspace = default_workspace(dock_pane, &Default::default()); db.save_workspace(&["/tmp"], None, &workspace); @@ -133,24 +187,50 @@ mod tests { assert_eq!(workspace.dock_pane, new_workspace.dock_pane); } - // #[test] - // fn test_dock_simple_split() { - // let db = Db::open_in_memory("simple_split"); - - // let workspace = db.workspace_for_roots(&["/tmp"]); - - // // Pane group -> Pane -> 10 , 20 - // let center_pane = SerializedPaneGroup { - // axis: gpui::Axis::Horizontal, - // children: vec![PaneGroupChild::Pane(SerializedPane { - // items: vec![ItemId { item_id: 10 }, ItemId { item_id: 20 }], - // })], - // }; + #[test] + fn test_simple_split() { + env_logger::try_init().ok(); + + let db = Db::open_in_memory("simple_split"); + + // ----------------- + // | 1,2 | 5,6 | + // | - - - | | + // | 3,4 | | + // ----------------- + let center_pane = SerializedPaneGroup::Group { + axis: crate::model::Axis::Horizontal, + children: vec![ + SerializedPaneGroup::Group { + axis: crate::model::Axis::Vertical, + children: vec![ + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 1 }, + SerializedItem::Terminal { item_id: 2 }, + ], + }), + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 4 }, + SerializedItem::Terminal { item_id: 3 }, + ], + }), + ], + }, + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 5 }, + SerializedItem::Terminal { item_id: 6 }, + ], + }), + ], + }; - // db.save_pane_splits(&workspace.workspace_id, ¢er_pane); + let workspace = default_workspace(Default::default(), ¢er_pane); - // // let new_workspace = db.workspace_for_roots(&["/tmp"]); + db.save_workspace(&["/tmp"], None, &workspace); - // // assert_eq!(new_workspace.center_group, center_pane); - // } + assert_eq!(workspace.center_group, center_pane); + } } diff --git a/crates/db/test.db b/crates/db/test.db new file mode 100644 index 0000000000000000000000000000000000000000..09a0bc8f1126715177b2517c92827685e8b0fb7c GIT binary patch literal 40960 zcmeI*|7+u990%~EUB6{)5k%@844-kwb%o8X>-2{ePrELg#jd++GjS+{xW+AXO?pYX z+xd%(>O|1r{15!$Ktx3RUAW)-lO}D}j*hc~Io{#cK6#$x$&=UTNxJ9h zk<}kpbVnnfnN~}6NS=Fz<9Y5qLO70_rauDx@fUY}%3q=H{6PA$?KvINkv&Aa&e;~5tg+e5W2WYMQ@ODRhG7;8maEeq`FtBERv-qA|L5Cq5qsCjcu5* z$%(E#C3<6#*e1>PiVf2t9YcT8(O8vFO{;0Q)dS6~ccZeq(XW)1w^f4`4IPq|Hg%&x zWa%T?48JMfUM4*g-86<<%8IvkYf^c;N!sqRG*oA=(V^g&|V8YzYFRE?OJ3T1UQ$5 z&C(1+(DUO#T#PCSvmcaQ%%7X+iXyGzhZCWZpzAA%YA#|gUk#N$I~H_{JOM{}=dI=1LX z%QEQ&Y?zHP(Yk5{>M6Y~gs))tW+EA#);b}X4>il7?KsYmWs-T{wDiwS!=bB;VK794 z9WoT3pkVA}e7$i$`(i4Qo}cHtWfu2-v-+u6DC+%u@8%n<2bbJX)ZHsdQH_Hm(_*IV z+VLne8>Zu!E#_F$504y{l1^!-YiJG6lkI+@+dBO;@VgePV#73#HPv9~j^5JfeWca) zqP8h*756GKDecwheN$HC@{UxOi#v~kJ92Qx%E#jA?fJ{@Si$MpO0J)d8<9kMZjL`& zW@^n7#db_f^WUQ(F+bPyYHH8CCJr&3sK*^G-06ZwE=S|((%fZ@^pBwA{FFPUPP(7* z7SF=lYiLV9v{d&K$nK{ip~m*}iPzqS*-~}GiyM=+9jzUNf#n4Iu&spb3_b2t>PJre zVfNSQPiO)Q1Rwwb2tWV=5P-l95$G;Y#a>IE@4Ex?(651PgpS!2e+p>S$l0BF8XW4e zaWb6>8Jw(f^CmgFQV`DTQ5v!D=G~e@5o3pEBXHY`8%E>*ztMO>5Qc)<3umu;`6Dkf zG<*z3r@=t{!sKIkw#cvO3KLoFTR!(y5YCK9?2Tl1Col{}A;|58B3KWkhk|jlr#uS6 z$9(M7atX z@&7H3Fis5u5P$##AOHafKmY;|fB*y_z;W*P{}J&UPW+v|V1WPxAOHafKmY;|fB*y_ z009U<;CcntBC)l(WZr+Mp8ue|WvA8lAF;5yvbwsI%Ppw#Js|)A2tWV=5P$##AOHafKmY;|n1I0j zNSs^aIQpj#yk+o<{{G*z$nkNmO1Bz2>hJHup8qdC Date: Fri, 4 Nov 2022 17:56:47 -0700 Subject: [PATCH 41/86] Re-use big union statement for get_center_pane --- crates/db/src/workspace/pane.rs | 44 +++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs index 7fef2d6b75308fcbb2662171a5c8af2c9649cdb9..8528acb8af66bdd0dc67aa4a1b96a98d94d8838d 100644 --- a/crates/db/src/workspace/pane.rs +++ b/crates/db/src/workspace/pane.rs @@ -1,6 +1,6 @@ use anyhow::{bail, Context, Result}; use indoc::indoc; -use sqlez::migrations::Migration; +use sqlez::{migrations::Migration, statement::Statement}; use util::unzip_option; use crate::model::{Axis, GroupId, PaneId, SerializedPane}; @@ -39,19 +39,7 @@ impl Db { &self, workspace_id: &WorkspaceId, ) -> Result { - self.get_pane_group_children(workspace_id, None)? - .into_iter() - .next() - .context("No center pane group") - } - - fn get_pane_group_children( - &self, - workspace_id: &WorkspaceId, - group_id: Option, - ) -> Result> { - let children = self - .prepare(indoc! {" + let mut query = self.prepare(indoc! {" SELECT group_id, axis, pane_id FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id FROM pane_groups @@ -62,9 +50,25 @@ impl Db { WHERE parent_group_id IS NOT NULL and position IS NOT NULL) WHERE parent_group_id IS ? AND workspace_id = ? ORDER BY position - "})? - .with_bindings((group_id, workspace_id))? - .rows::<(Option, Option, Option)>()?; + "})?; + + self.get_pane_group_children(workspace_id, None, &mut query)? + .into_iter() + .next() + .context("No center pane group") + } + + fn get_pane_group_children( + &self, + workspace_id: &WorkspaceId, + group_id: Option, + query: &mut Statement, + ) -> Result> { + let children = query.with_bindings((group_id, workspace_id))?.rows::<( + Option, + Option, + Option, + )>()?; children .into_iter() @@ -72,7 +76,11 @@ impl Db { if let Some((group_id, axis)) = group_id.zip(axis) { Ok(SerializedPaneGroup::Group { axis, - children: self.get_pane_group_children(workspace_id, Some(group_id))?, + children: self.get_pane_group_children( + workspace_id, + Some(group_id), + query, + )?, }) } else if let Some(pane_id) = pane_id { Ok(SerializedPaneGroup::Pane(SerializedPane { From 4a00f0b062c0f55d178c72f6b5e3c3cae20d6308 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Sun, 6 Nov 2022 17:00:34 -0800 Subject: [PATCH 42/86] Add typed statements --- .gitignore | 3 - crates/db/src/kvp.rs | 19 +-- crates/db/src/workspace.rs | 36 ++--- crates/db/src/workspace/items.rs | 34 ++-- crates/db/src/workspace/model.rs | 23 ++- crates/db/src/workspace/pane.rs | 62 +++---- crates/db/test.db | Bin 40960 -> 40960 bytes crates/sqlez/src/connection.rs | 163 +++++++------------ crates/sqlez/src/lib.rs | 1 + crates/sqlez/src/migrations.rs | 116 ++++++------- crates/sqlez/src/savepoint.rs | 80 ++++----- crates/sqlez/src/statement.rs | 179 ++++++++++++--------- crates/sqlez/src/thread_safe_connection.rs | 3 +- crates/sqlez/src/typed_statements.rs | 67 ++++++++ 14 files changed, 390 insertions(+), 396 deletions(-) create mode 100644 crates/sqlez/src/typed_statements.rs diff --git a/.gitignore b/.gitignore index da1950f2b386e5655a3d1c3884b42caecf7203e8..e2d90adbb153879e0438f72ce9cfcc526917a794 100644 --- a/.gitignore +++ b/.gitignore @@ -10,7 +10,6 @@ /assets/themes/Internal/*.json /assets/themes/Experiments/*.json **/venv -<<<<<<< HEAD .build Packages *.xcodeproj @@ -19,6 +18,4 @@ DerivedData/ .swiftpm/config/registries.json .swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata .netrc -======= crates/db/test-db.db ->>>>>>> 9d9ad38ce (Successfully detecting workplace IDs :D) diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 93be5e10c0ea21861d300aebe8e11a48af462458..6f1230f7b81f23ccaf825ad3b2987a69bad33725 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -15,24 +15,19 @@ pub(crate) const KVP_MIGRATION: Migration = Migration::new( impl Db { pub fn read_kvp(&self, key: &str) -> Result> { - self.0 - .prepare("SELECT value FROM kv_store WHERE key = (?)")? - .with_bindings(key)? - .maybe_row() + self.select_row_bound("SELECT value FROM kv_store WHERE key = (?)")?(key) } pub fn write_kvp(&self, key: &str, value: &str) -> Result<()> { - self.0 - .prepare("INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))")? - .with_bindings((key, value))? - .exec() + self.exec_bound("INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))")?(( + key, value, + ))?; + + Ok(()) } pub fn delete_kvp(&self, key: &str) -> Result<()> { - self.0 - .prepare("DELETE FROM kv_store WHERE key = (?)")? - .with_bindings(key)? - .exec() + self.exec_bound("DELETE FROM kv_store WHERE key = (?)")?(key) } } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index 9b2d9e45631ae0ec2feff6cbac6cae9e3be3da04..c4e4873dce5cfea774e5df3b051db65609b43ddb 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -22,7 +22,7 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( "}], ); -use self::model::{SerializedWorkspace, WorkspaceId, WorkspaceRow}; +use self::model::{SerializedWorkspace, WorkspaceId}; use super::Db; @@ -40,21 +40,19 @@ impl Db { // and we've grabbed the most recent workspace let (workspace_id, dock_anchor, dock_visible) = iife!({ if worktree_roots.len() == 0 { - self.prepare(indoc! {" + self.select_row(indoc! {" SELECT workspace_id, dock_anchor, dock_visible FROM workspaces - ORDER BY timestamp DESC LIMIT 1"})? - .maybe_row::() + ORDER BY timestamp DESC LIMIT 1"})?()? } else { - self.prepare(indoc! {" + self.select_row_bound(indoc! {" SELECT workspace_id, dock_anchor, dock_visible FROM workspaces - WHERE workspace_id = ?"})? - .with_bindings(&workspace_id)? - .maybe_row::() + WHERE workspace_id = ?"})?(&workspace_id)? } + .context("No workspaces found") }) - .log_err() + .warn_on_err() .flatten()?; Some(SerializedWorkspace { @@ -85,23 +83,17 @@ impl Db { if let Some(old_roots) = old_roots { let old_id: WorkspaceId = old_roots.into(); - self.prepare("DELETE FROM WORKSPACES WHERE workspace_id = ?")? - .with_bindings(&old_id)? - .exec()?; + self.exec_bound("DELETE FROM WORKSPACES WHERE workspace_id = ?")?(&old_id)?; } // Delete any previous workspaces with the same roots. This cascades to all // other tables that are based on the same roots set. // Insert new workspace into workspaces table if none were found - self.prepare("DELETE FROM workspaces WHERE workspace_id = ?;")? - .with_bindings(&workspace_id)? - .exec()?; + self.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?; - self.prepare( + self.exec_bound( "INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)", - )? - .with_bindings((&workspace_id, workspace.dock_anchor, workspace.dock_visible))? - .exec()?; + )?((&workspace_id, workspace.dock_anchor, workspace.dock_visible))?; // Save center pane group and dock pane self.save_pane_group(&workspace_id, &workspace.center_group, None)?; @@ -126,11 +118,9 @@ impl Db { iife!({ // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html Ok::<_, anyhow::Error>( - self.prepare( + self.select_bound::( "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", - )? - .with_bindings(limit)? - .rows::()? + )?(limit)? .into_iter() .map(|id| id.paths()) .collect::>>(), diff --git a/crates/db/src/workspace/items.rs b/crates/db/src/workspace/items.rs index 25873a7f9b03b2c2bb75ede3d1b12361c2f7690e..9e859ffdad4a5f5718db231cc74a2e40d93db793 100644 --- a/crates/db/src/workspace/items.rs +++ b/crates/db/src/workspace/items.rs @@ -3,7 +3,7 @@ use indoc::indoc; use sqlez::migrations::Migration; use crate::{ - model::{ItemId, PaneId, SerializedItem, SerializedItemKind, WorkspaceId}, + model::{PaneId, SerializedItem, SerializedItemKind, WorkspaceId}, Db, }; @@ -29,19 +29,16 @@ pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new( impl Db { pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { - Ok(self - .prepare(indoc! {" + Ok(self.select_bound(indoc! {" SELECT item_id, kind FROM items WHERE pane_id = ? - ORDER BY position"})? - .with_bindings(pane_id)? - .rows::<(ItemId, SerializedItemKind)>()? - .into_iter() - .map(|(item_id, kind)| match kind { - SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, - _ => unimplemented!(), - }) - .collect()) + ORDER BY position"})?(pane_id)? + .into_iter() + .map(|(item_id, kind)| match kind { + SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, + _ => unimplemented!(), + }) + .collect()) } pub(crate) fn save_items( @@ -51,19 +48,14 @@ impl Db { items: &[SerializedItem], ) -> Result<()> { let mut delete_old = self - .prepare("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") + .exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") .context("Preparing deletion")?; - let mut insert_new = self.prepare( + let mut insert_new = self.exec_bound( "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", ).context("Preparing insertion")?; for (position, item) in items.iter().enumerate() { - delete_old - .with_bindings((workspace_id, pane_id, item.item_id()))? - .exec()?; - - insert_new - .with_bindings((item.item_id(), workspace_id, pane_id, item.kind(), position))? - .exec()?; + delete_old((workspace_id, pane_id, item.item_id()))?; + insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?; } Ok(()) diff --git a/crates/db/src/workspace/model.rs b/crates/db/src/workspace/model.rs index 1d9065f6d95e7cc86641c817b583cb920eff06e6..36099f66e65faf284667c86302125bfc2fcdbc3e 100644 --- a/crates/db/src/workspace/model.rs +++ b/crates/db/src/workspace/model.rs @@ -80,8 +80,6 @@ impl Column for DockAnchor { } } -pub(crate) type WorkspaceRow = (WorkspaceId, DockAnchor, bool); - #[derive(Debug, PartialEq, Eq)] pub struct SerializedWorkspace { pub dock_anchor: DockAnchor, @@ -240,23 +238,20 @@ mod tests { workspace_id BLOB, dock_anchor TEXT );"}) - .unwrap(); + .unwrap()() + .unwrap(); let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]); - db.prepare("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") - .unwrap() - .with_bindings((&workspace_id, DockAnchor::Bottom)) - .unwrap() - .exec() - .unwrap(); + db.exec_bound("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") + .unwrap()((&workspace_id, DockAnchor::Bottom)) + .unwrap(); assert_eq!( - db.prepare("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") - .unwrap() - .row::<(WorkspaceId, DockAnchor)>() - .unwrap(), - (WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom) + db.select_row("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") + .unwrap()() + .unwrap(), + Some((WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom)) ); } } diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs index 8528acb8af66bdd0dc67aa4a1b96a98d94d8838d..24d6a3f938f75499a3496661cca6f4bf30cc1bb8 100644 --- a/crates/db/src/workspace/pane.rs +++ b/crates/db/src/workspace/pane.rs @@ -1,6 +1,6 @@ use anyhow::{bail, Context, Result}; use indoc::indoc; -use sqlez::{migrations::Migration, statement::Statement}; +use sqlez::migrations::Migration; use util::unzip_option; use crate::model::{Axis, GroupId, PaneId, SerializedPane}; @@ -39,38 +39,29 @@ impl Db { &self, workspace_id: &WorkspaceId, ) -> Result { - let mut query = self.prepare(indoc! {" - SELECT group_id, axis, pane_id - FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id - FROM pane_groups - UNION - SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id - FROM panes - -- Remove the dock panes from the union - WHERE parent_group_id IS NOT NULL and position IS NOT NULL) - WHERE parent_group_id IS ? AND workspace_id = ? - ORDER BY position - "})?; - - self.get_pane_group_children(workspace_id, None, &mut query)? + self.get_pane_group_children(workspace_id, None)? .into_iter() .next() .context("No center pane group") } - fn get_pane_group_children( + fn get_pane_group_children<'a>( &self, workspace_id: &WorkspaceId, group_id: Option, - query: &mut Statement, ) -> Result> { - let children = query.with_bindings((group_id, workspace_id))?.rows::<( - Option, - Option, - Option, - )>()?; - - children + self.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" + SELECT group_id, axis, pane_id + FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id + FROM pane_groups + UNION + SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id + FROM panes + -- Remove the dock panes from the union + WHERE parent_group_id IS NOT NULL and position IS NOT NULL) + WHERE parent_group_id IS ? AND workspace_id = ? + ORDER BY position + "})?((group_id, workspace_id))? .into_iter() .map(|(group_id, axis, pane_id)| { if let Some((group_id, axis)) = group_id.zip(axis) { @@ -79,7 +70,6 @@ impl Db { children: self.get_pane_group_children( workspace_id, Some(group_id), - query, )?, }) } else if let Some(pane_id) = pane_id { @@ -107,9 +97,8 @@ impl Db { match pane_group { SerializedPaneGroup::Group { axis, children } => { - let parent_id = self.prepare("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? - .with_bindings((workspace_id, parent_id, position, *axis))? - .insert()? as GroupId; + let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? + ((workspace_id, parent_id, position, *axis))?; for (position, group) in children.iter().enumerate() { self.save_pane_group(workspace_id, group, Some((parent_id, position)))? @@ -121,12 +110,12 @@ impl Db { } pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { - let pane_id = self - .prepare(indoc! {" + let pane_id = self.select_row_bound(indoc! {" SELECT pane_id FROM panes - WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})? - .with_bindings(workspace_id)? - .row::()?; + WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?( + workspace_id, + )? + .context("No dock pane for workspace")?; Ok(SerializedPane::new( self.get_items(pane_id).context("Reading items")?, @@ -141,10 +130,9 @@ impl Db { ) -> Result<()> { let (parent_id, order) = unzip_option(parent); - let pane_id = self - .prepare("INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)")? - .with_bindings((workspace_id, parent_id, order))? - .insert()? as PaneId; + let pane_id = self.insert_bound( + "INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)", + )?((workspace_id, parent_id, order))?; self.save_items(workspace_id, pane_id, &pane.children) .context("Saving items") diff --git a/crates/db/test.db b/crates/db/test.db index 09a0bc8f1126715177b2517c92827685e8b0fb7c..cedefe5f832586d90e62e9a50c9e8c7506cf81e7 100644 GIT binary patch delta 64 zcmZoTz|?SnX@ayM9|Hpe7ZAe$-$Wf_M!t;+q5fQE3Wf$&h9*{qrjz&knQ4AW|Q~(n) -> Result { - self.exec(query)?; - Ok(self.last_insert_id()) - } - - pub fn exec(&self, query: impl AsRef) -> Result<()> { - unsafe { - sqlite3_exec( - self.sqlite3, - CString::new(query.as_ref())?.as_ptr(), - None, - 0 as *mut _, - 0 as *mut _, - ); - sqlite3_errcode(self.sqlite3); - self.last_error()?; - } - Ok(()) - } - - pub fn prepare>(&self, query: T) -> Result { - Statement::prepare(&self, query) - } - pub fn backup_main(&self, destination: &Connection) -> Result<()> { unsafe { let backup = sqlite3_backup_init( @@ -136,7 +110,7 @@ mod test { use anyhow::Result; use indoc::indoc; - use crate::{connection::Connection, migrations::Migration}; + use crate::connection::Connection; #[test] fn string_round_trips() -> Result<()> { @@ -146,25 +120,19 @@ mod test { CREATE TABLE text ( text TEXT );"}) - .unwrap(); + .unwrap()() + .unwrap(); let text = "Some test text"; connection - .prepare("INSERT INTO text (text) VALUES (?);") - .unwrap() - .with_bindings(text) - .unwrap() - .exec() - .unwrap(); + .insert_bound("INSERT INTO text (text) VALUES (?);") + .unwrap()(text) + .unwrap(); assert_eq!( - &connection - .prepare("SELECT text FROM text;") - .unwrap() - .row::() - .unwrap(), - text + connection.select_row("SELECT text FROM text;").unwrap()().unwrap(), + Some(text.to_string()) ); Ok(()) @@ -180,32 +148,26 @@ mod test { integer INTEGER, blob BLOB );"}) - .unwrap(); + .unwrap()() + .unwrap(); let tuple1 = ("test".to_string(), 64, vec![0, 1, 2, 4, 8, 16, 32, 64]); let tuple2 = ("test2".to_string(), 32, vec![64, 32, 16, 8, 4, 2, 1, 0]); let mut insert = connection - .prepare("INSERT INTO test (text, integer, blob) VALUES (?, ?, ?)") + .insert_bound::<(String, usize, Vec)>( + "INSERT INTO test (text, integer, blob) VALUES (?, ?, ?)", + ) .unwrap(); - insert - .with_bindings(tuple1.clone()) - .unwrap() - .exec() - .unwrap(); - insert - .with_bindings(tuple2.clone()) - .unwrap() - .exec() - .unwrap(); + insert(tuple1.clone()).unwrap(); + insert(tuple2.clone()).unwrap(); assert_eq!( connection - .prepare("SELECT * FROM test") - .unwrap() - .rows::<(String, usize, Vec)>() - .unwrap(), + .select::<(String, usize, Vec)>("SELECT * FROM test") + .unwrap()() + .unwrap(), vec![tuple1, tuple2] ); } @@ -219,23 +181,20 @@ mod test { t INTEGER, f INTEGER );"}) - .unwrap(); + .unwrap()() + .unwrap(); connection - .prepare("INSERT INTO bools(t, f) VALUES (?, ?);") - .unwrap() - .with_bindings((true, false)) - .unwrap() - .exec() - .unwrap(); + .insert_bound("INSERT INTO bools(t, f) VALUES (?, ?);") + .unwrap()((true, false)) + .unwrap(); assert_eq!( - &connection - .prepare("SELECT * FROM bools;") - .unwrap() - .row::<(bool, bool)>() - .unwrap(), - &(true, false) + connection + .select_row::<(bool, bool)>("SELECT * FROM bools;") + .unwrap()() + .unwrap(), + Some((true, false)) ); } @@ -247,13 +206,13 @@ mod test { CREATE TABLE blobs ( data BLOB );"}) - .unwrap(); - let blob = &[0, 1, 2, 4, 8, 16, 32, 64]; - let mut write = connection1 - .prepare("INSERT INTO blobs (data) VALUES (?);") - .unwrap(); - write.bind_blob(1, blob).unwrap(); - write.exec().unwrap(); + .unwrap()() + .unwrap(); + let blob = vec![0, 1, 2, 4, 8, 16, 32, 64]; + connection1 + .insert_bound::>("INSERT INTO blobs (data) VALUES (?);") + .unwrap()(blob.clone()) + .unwrap(); // Backup connection1 to connection2 let connection2 = Connection::open_memory("backup_works_other"); @@ -261,40 +220,36 @@ mod test { // Delete the added blob and verify its deleted on the other side let read_blobs = connection1 - .prepare("SELECT * FROM blobs;") - .unwrap() - .rows::>() - .unwrap(); + .select::>("SELECT * FROM blobs;") + .unwrap()() + .unwrap(); assert_eq!(read_blobs, vec![blob]); } #[test] - fn test_kv_store() -> anyhow::Result<()> { - let connection = Connection::open_memory("kv_store"); - - Migration::new( - "kv", - &["CREATE TABLE kv_store( - key TEXT PRIMARY KEY, - value TEXT NOT NULL - ) STRICT;"], - ) - .run(&connection) - .unwrap(); - - let mut stmt = connection.prepare("INSERT INTO kv_store(key, value) VALUES(?, ?)")?; - stmt.bind_text(1, "a").unwrap(); - stmt.bind_text(2, "b").unwrap(); - stmt.exec().unwrap(); - let id = connection.last_insert_id(); + fn multi_step_statement_works() { + let connection = Connection::open_memory("multi_step_statement_works"); - let res = connection - .prepare("SELECT key, value FROM kv_store WHERE rowid = ?")? - .with_bindings(id)? - .row::<(String, String)>()?; + connection + .exec(indoc! {" + CREATE TABLE test ( + col INTEGER + )"}) + .unwrap()() + .unwrap(); - assert_eq!(res, ("a".to_string(), "b".to_string())); + connection + .exec(indoc! {" + INSERT INTO test(col) VALUES (2)"}) + .unwrap()() + .unwrap(); - Ok(()) + assert_eq!( + connection + .select_row::("SELECt * FROM test") + .unwrap()() + .unwrap(), + Some(2) + ); } } diff --git a/crates/sqlez/src/lib.rs b/crates/sqlez/src/lib.rs index 3bed7a06cbd60507c955d1becfdd291bc4f91f02..155fb28901dddbf524f9c76b97901bd5346e0ce9 100644 --- a/crates/sqlez/src/lib.rs +++ b/crates/sqlez/src/lib.rs @@ -4,3 +4,4 @@ pub mod migrations; pub mod savepoint; pub mod statement; pub mod thread_safe_connection; +pub mod typed_statements; diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 9f3bd333cae325dcd3a29a0778425d02a131c697..89eaebb4942175dc5b8561c0f0084a472f03b074 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -18,7 +18,7 @@ const MIGRATIONS_MIGRATION: Migration = Migration::new( domain TEXT, step INTEGER, migration TEXT - ); + ) "}], ); @@ -34,24 +34,26 @@ impl Migration { } fn run_unchecked(&self, connection: &Connection) -> Result<()> { - connection.exec(self.migrations.join(";\n")) + for migration in self.migrations { + connection.exec(migration)?()?; + } + + Ok(()) } pub fn run(&self, connection: &Connection) -> Result<()> { // Setup the migrations table unconditionally MIGRATIONS_MIGRATION.run_unchecked(connection)?; - let completed_migrations = connection - .prepare(indoc! {" - SELECT domain, step, migration FROM migrations - WHERE domain = ? - ORDER BY step - "})? - .with_bindings(self.domain)? - .rows::<(String, usize, String)>()?; + let completed_migrations = + connection.select_bound::<&str, (String, usize, String)>(indoc! {" + SELECT domain, step, migration FROM migrations + WHERE domain = ? + ORDER BY step + "})?(self.domain)?; let mut store_completed_migration = connection - .prepare("INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)")?; + .insert_bound("INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)")?; for (index, migration) in self.migrations.iter().enumerate() { if let Some((_, _, completed_migration)) = completed_migrations.get(index) { @@ -70,10 +72,8 @@ impl Migration { } } - connection.exec(migration)?; - store_completed_migration - .with_bindings((self.domain, index, *migration))? - .exec()?; + connection.exec(migration)?()?; + store_completed_migration((self.domain, index, *migration))?; } Ok(()) @@ -97,17 +97,16 @@ mod test { CREATE TABLE test1 ( a TEXT, b TEXT - );"}], + )"}], ); migration.run(&connection).unwrap(); // Verify it got added to the migrations table assert_eq!( &connection - .prepare("SELECT (migration) FROM migrations") - .unwrap() - .rows::() - .unwrap()[..], + .select::("SELECT (migration) FROM migrations") + .unwrap()() + .unwrap()[..], migration.migrations ); @@ -117,22 +116,21 @@ mod test { CREATE TABLE test1 ( a TEXT, b TEXT - );"}, + )"}, indoc! {" CREATE TABLE test2 ( c TEXT, d TEXT - );"}, + )"}, ]; migration.run(&connection).unwrap(); // Verify it is also added to the migrations table assert_eq!( &connection - .prepare("SELECT (migration) FROM migrations") - .unwrap() - .rows::() - .unwrap()[..], + .select::("SELECT (migration) FROM migrations") + .unwrap()() + .unwrap()[..], migration.migrations ); } @@ -142,15 +140,17 @@ mod test { let connection = Connection::open_memory("migration_setup_works"); connection - .exec(indoc! {"CREATE TABLE IF NOT EXISTS migrations ( + .exec(indoc! {" + CREATE TABLE IF NOT EXISTS migrations ( domain TEXT, step INTEGER, migration TEXT );"}) - .unwrap(); + .unwrap()() + .unwrap(); let mut store_completed_migration = connection - .prepare(indoc! {" + .insert_bound::<(&str, usize, String)>(indoc! {" INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)"}) .unwrap(); @@ -159,14 +159,11 @@ mod test { for i in 0..5 { // Create a table forcing a schema change connection - .exec(format!("CREATE TABLE table{} ( test TEXT );", i)) - .unwrap(); - - store_completed_migration - .with_bindings((domain, i, i.to_string())) - .unwrap() - .exec() - .unwrap(); + .exec(&format!("CREATE TABLE table{} ( test TEXT );", i)) + .unwrap()() + .unwrap(); + + store_completed_migration((domain, i, i.to_string())).unwrap(); } } @@ -180,46 +177,49 @@ mod test { // Manually create the table for that migration with a row connection .exec(indoc! {" - CREATE TABLE test_table ( - test_column INTEGER - ); - INSERT INTO test_table (test_column) VALUES (1)"}) - .unwrap(); + CREATE TABLE test_table ( + test_column INTEGER + );"}) + .unwrap()() + .unwrap(); + connection + .exec(indoc! {" + INSERT INTO test_table (test_column) VALUES (1);"}) + .unwrap()() + .unwrap(); assert_eq!( connection - .prepare("SELECT * FROM test_table") - .unwrap() - .row::() - .unwrap(), - 1 + .select_row::("SELECT * FROM test_table") + .unwrap()() + .unwrap(), + Some(1) ); // Run the migration verifying that the row got dropped migration.run(&connection).unwrap(); assert_eq!( connection - .prepare("SELECT * FROM test_table") - .unwrap() - .rows::() - .unwrap(), - Vec::new() + .select_row::("SELECT * FROM test_table") + .unwrap()() + .unwrap(), + None ); // Recreate the dropped row connection .exec("INSERT INTO test_table (test_column) VALUES (2)") - .unwrap(); + .unwrap()() + .unwrap(); // Run the same migration again and verify that the table was left unchanged migration.run(&connection).unwrap(); assert_eq!( connection - .prepare("SELECT * FROM test_table") - .unwrap() - .row::() - .unwrap(), - 2 + .select_row::("SELECT * FROM test_table") + .unwrap()() + .unwrap(), + Some(2) ); } diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs index ba4b1e774b03852f1abd229729316a4c86ec09ab..b78358deb9008f085b8e195e68e283c7cbcfd863 100644 --- a/crates/sqlez/src/savepoint.rs +++ b/crates/sqlez/src/savepoint.rs @@ -1,4 +1,5 @@ use anyhow::Result; +use indoc::{formatdoc, indoc}; use crate::connection::Connection; @@ -10,16 +11,17 @@ impl Connection { where F: FnOnce() -> Result, { - let name = name.as_ref().to_owned(); - self.exec(format!("SAVEPOINT {}", &name))?; + let name = name.as_ref(); + self.exec(&format!("SAVEPOINT {name}"))?()?; let result = f(); match result { Ok(_) => { - self.exec(format!("RELEASE {}", name))?; + self.exec(&format!("RELEASE {name}"))?()?; } Err(_) => { - self.exec(format!("ROLLBACK TO {}", name))?; - self.exec(format!("RELEASE {}", name))?; + self.exec(&formatdoc! {" + ROLLBACK TO {name}; + RELEASE {name}"})?()?; } } result @@ -32,16 +34,17 @@ impl Connection { where F: FnOnce() -> Result>, { - let name = name.as_ref().to_owned(); - self.exec(format!("SAVEPOINT {}", &name))?; + let name = name.as_ref(); + self.exec(&format!("SAVEPOINT {name}"))?()?; let result = f(); match result { Ok(Some(_)) => { - self.exec(format!("RELEASE {}", name))?; + self.exec(&format!("RELEASE {name}"))?()?; } Ok(None) | Err(_) => { - self.exec(format!("ROLLBACK TO {}", name))?; - self.exec(format!("RELEASE {}", name))?; + self.exec(&formatdoc! {" + ROLLBACK TO {name}; + RELEASE {name}"})?()?; } } result @@ -64,28 +67,25 @@ mod tests { text TEXT, idx INTEGER );"}) - .unwrap(); + .unwrap()() + .unwrap(); let save1_text = "test save1"; let save2_text = "test save2"; connection.with_savepoint("first", || { - connection - .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .with_bindings((save1_text, 1))? - .exec()?; + connection.exec_bound("INSERT INTO text(text, idx) VALUES (?, ?)")?((save1_text, 1))?; assert!(connection .with_savepoint("second", || -> Result, anyhow::Error> { - connection - .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .with_bindings((save2_text, 2))? - .exec()?; + connection.exec_bound("INSERT INTO text(text, idx) VALUES (?, ?)")?(( + save2_text, 2, + ))?; assert_eq!( connection - .prepare("SELECT text FROM text ORDER BY text.idx ASC")? - .rows::()?, + .select::("SELECT text FROM text ORDER BY text.idx ASC")?( + )?, vec![save1_text, save2_text], ); @@ -95,22 +95,17 @@ mod tests { .is_some()); assert_eq!( - connection - .prepare("SELECT text FROM text ORDER BY text.idx ASC")? - .rows::()?, + connection.select::("SELECT text FROM text ORDER BY text.idx ASC")?()?, vec![save1_text], ); connection.with_savepoint_rollback::<(), _>("second", || { - connection - .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .with_bindings((save2_text, 2))? - .exec()?; + connection.exec_bound("INSERT INTO text(text, idx) VALUES (?, ?)")?(( + save2_text, 2, + ))?; assert_eq!( - connection - .prepare("SELECT text FROM text ORDER BY text.idx ASC")? - .rows::()?, + connection.select::("SELECT text FROM text ORDER BY text.idx ASC")?()?, vec![save1_text, save2_text], ); @@ -118,22 +113,17 @@ mod tests { })?; assert_eq!( - connection - .prepare("SELECT text FROM text ORDER BY text.idx ASC")? - .rows::()?, + connection.select::("SELECT text FROM text ORDER BY text.idx ASC")?()?, vec![save1_text], ); connection.with_savepoint_rollback("second", || { - connection - .prepare("INSERT INTO text(text, idx) VALUES (?, ?)")? - .with_bindings((save2_text, 2))? - .exec()?; + connection.exec_bound("INSERT INTO text(text, idx) VALUES (?, ?)")?(( + save2_text, 2, + ))?; assert_eq!( - connection - .prepare("SELECT text FROM text ORDER BY text.idx ASC")? - .rows::()?, + connection.select::("SELECT text FROM text ORDER BY text.idx ASC")?()?, vec![save1_text, save2_text], ); @@ -141,9 +131,7 @@ mod tests { })?; assert_eq!( - connection - .prepare("SELECT text FROM text ORDER BY text.idx ASC")? - .rows::()?, + connection.select::("SELECT text FROM text ORDER BY text.idx ASC")?()?, vec![save1_text, save2_text], ); @@ -151,9 +139,7 @@ mod tests { })?; assert_eq!( - connection - .prepare("SELECT text FROM text ORDER BY text.idx ASC")? - .rows::()?, + connection.select::("SELECT text FROM text ORDER BY text.idx ASC")?()?, vec![save1_text, save2_text], ); diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index f0de8703aba92592c64c310a53b314aa3153d897..e0b284e62829bd26c4f1a976e491594cb69ddccd 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -1,6 +1,6 @@ -use std::ffi::{c_int, CString}; +use std::ffi::{c_int, CStr, CString}; use std::marker::PhantomData; -use std::{slice, str}; +use std::{ptr, slice, str}; use anyhow::{anyhow, Context, Result}; use libsqlite3_sys::*; @@ -9,7 +9,8 @@ use crate::bindable::{Bind, Column}; use crate::connection::Connection; pub struct Statement<'a> { - raw_statement: *mut sqlite3_stmt, + raw_statements: Vec<*mut sqlite3_stmt>, + current_statement: usize, connection: &'a Connection, phantom: PhantomData, } @@ -34,19 +35,31 @@ pub enum SqlType { impl<'a> Statement<'a> { pub fn prepare>(connection: &'a Connection, query: T) -> Result { let mut statement = Self { - raw_statement: 0 as *mut _, + raw_statements: Default::default(), + current_statement: 0, connection, phantom: PhantomData, }; unsafe { - sqlite3_prepare_v2( - connection.sqlite3, - CString::new(query.as_ref())?.as_ptr(), - -1, - &mut statement.raw_statement, - 0 as *mut _, - ); + let sql = CString::new(query.as_ref())?; + let mut remaining_sql = sql.as_c_str(); + while { + let remaining_sql_str = remaining_sql.to_str()?; + remaining_sql_str.trim() != ";" && !remaining_sql_str.is_empty() + } { + let mut raw_statement = 0 as *mut sqlite3_stmt; + let mut remaining_sql_ptr = ptr::null(); + sqlite3_prepare_v2( + connection.sqlite3, + remaining_sql.as_ptr(), + -1, + &mut raw_statement, + &mut remaining_sql_ptr, + ); + remaining_sql = CStr::from_ptr(remaining_sql_ptr); + statement.raw_statements.push(raw_statement); + } connection .last_error() @@ -56,131 +69,138 @@ impl<'a> Statement<'a> { Ok(statement) } + fn current_statement(&self) -> *mut sqlite3_stmt { + *self.raw_statements.get(self.current_statement).unwrap() + } + pub fn reset(&mut self) { unsafe { - sqlite3_reset(self.raw_statement); + for raw_statement in self.raw_statements.iter() { + sqlite3_reset(*raw_statement); + } } + self.current_statement = 0; } pub fn parameter_count(&self) -> i32 { - unsafe { sqlite3_bind_parameter_count(self.raw_statement) } + unsafe { + self.raw_statements + .iter() + .map(|raw_statement| sqlite3_bind_parameter_count(*raw_statement)) + .max() + .unwrap_or(0) + } } pub fn bind_blob(&self, index: i32, blob: &[u8]) -> Result<()> { - // dbg!("bind blob", index); let index = index as c_int; let blob_pointer = blob.as_ptr() as *const _; let len = blob.len() as c_int; unsafe { - sqlite3_bind_blob( - self.raw_statement, - index, - blob_pointer, - len, - SQLITE_TRANSIENT(), - ); + for raw_statement in self.raw_statements.iter() { + sqlite3_bind_blob(*raw_statement, index, blob_pointer, len, SQLITE_TRANSIENT()); + } } self.connection.last_error() } pub fn column_blob<'b>(&'b mut self, index: i32) -> Result<&'b [u8]> { let index = index as c_int; - let pointer = unsafe { sqlite3_column_blob(self.raw_statement, index) }; + let pointer = unsafe { sqlite3_column_blob(self.current_statement(), index) }; self.connection.last_error()?; if pointer.is_null() { return Ok(&[]); } - let len = unsafe { sqlite3_column_bytes(self.raw_statement, index) as usize }; + let len = unsafe { sqlite3_column_bytes(self.current_statement(), index) as usize }; self.connection.last_error()?; unsafe { Ok(slice::from_raw_parts(pointer as *const u8, len)) } } pub fn bind_double(&self, index: i32, double: f64) -> Result<()> { - // dbg!("bind double", index); let index = index as c_int; unsafe { - sqlite3_bind_double(self.raw_statement, index, double); + for raw_statement in self.raw_statements.iter() { + sqlite3_bind_double(*raw_statement, index, double); + } } self.connection.last_error() } pub fn column_double(&self, index: i32) -> Result { let index = index as c_int; - let result = unsafe { sqlite3_column_double(self.raw_statement, index) }; + let result = unsafe { sqlite3_column_double(self.current_statement(), index) }; self.connection.last_error()?; Ok(result) } pub fn bind_int(&self, index: i32, int: i32) -> Result<()> { - // dbg!("bind int", index); let index = index as c_int; unsafe { - sqlite3_bind_int(self.raw_statement, index, int); + for raw_statement in self.raw_statements.iter() { + sqlite3_bind_int(*raw_statement, index, int); + } }; self.connection.last_error() } pub fn column_int(&self, index: i32) -> Result { let index = index as c_int; - let result = unsafe { sqlite3_column_int(self.raw_statement, index) }; + let result = unsafe { sqlite3_column_int(self.current_statement(), index) }; self.connection.last_error()?; Ok(result) } pub fn bind_int64(&self, index: i32, int: i64) -> Result<()> { - // dbg!("bind int64", index); let index = index as c_int; unsafe { - sqlite3_bind_int64(self.raw_statement, index, int); + for raw_statement in self.raw_statements.iter() { + sqlite3_bind_int64(*raw_statement, index, int); + } } self.connection.last_error() } pub fn column_int64(&self, index: i32) -> Result { let index = index as c_int; - let result = unsafe { sqlite3_column_int64(self.raw_statement, index) }; + let result = unsafe { sqlite3_column_int64(self.current_statement(), index) }; self.connection.last_error()?; Ok(result) } pub fn bind_null(&self, index: i32) -> Result<()> { - // dbg!("bind null", index); let index = index as c_int; unsafe { - sqlite3_bind_null(self.raw_statement, index); + for raw_statement in self.raw_statements.iter() { + sqlite3_bind_null(*raw_statement, index); + } } self.connection.last_error() } pub fn bind_text(&self, index: i32, text: &str) -> Result<()> { - // dbg!("bind text", index, text); let index = index as c_int; let text_pointer = text.as_ptr() as *const _; let len = text.len() as c_int; unsafe { - sqlite3_bind_text( - self.raw_statement, - index, - text_pointer, - len, - SQLITE_TRANSIENT(), - ); + for raw_statement in self.raw_statements.iter() { + sqlite3_bind_text(*raw_statement, index, text_pointer, len, SQLITE_TRANSIENT()); + } } self.connection.last_error() } pub fn column_text<'b>(&'b mut self, index: i32) -> Result<&'b str> { let index = index as c_int; - let pointer = unsafe { sqlite3_column_text(self.raw_statement, index) }; + let pointer = unsafe { sqlite3_column_text(self.current_statement(), index) }; self.connection.last_error()?; if pointer.is_null() { return Ok(""); } - let len = unsafe { sqlite3_column_bytes(self.raw_statement, index) as usize }; + let len = unsafe { sqlite3_column_bytes(self.current_statement(), index) as usize }; self.connection.last_error()?; let slice = unsafe { slice::from_raw_parts(pointer as *const u8, len) }; @@ -198,7 +218,7 @@ impl<'a> Statement<'a> { } pub fn column_type(&mut self, index: i32) -> Result { - let result = unsafe { sqlite3_column_type(self.raw_statement, index) }; // SELECT FROM TABLE + let result = unsafe { sqlite3_column_type(self.current_statement(), index) }; self.connection.last_error()?; match result { SQLITE_INTEGER => Ok(SqlType::Integer), @@ -217,9 +237,16 @@ impl<'a> Statement<'a> { fn step(&mut self) -> Result { unsafe { - match sqlite3_step(self.raw_statement) { + match sqlite3_step(self.current_statement()) { SQLITE_ROW => Ok(StepResult::Row), - SQLITE_DONE => Ok(StepResult::Done), + SQLITE_DONE => { + if self.current_statement >= self.raw_statements.len() - 1 { + Ok(StepResult::Done) + } else { + self.current_statement += 1; + self.step() + } + } SQLITE_MISUSE => Ok(StepResult::Misuse), other => self .connection @@ -311,7 +338,11 @@ impl<'a> Statement<'a> { impl<'a> Drop for Statement<'a> { fn drop(&mut self) { - unsafe { sqlite3_finalize(self.raw_statement) }; + unsafe { + for raw_statement in self.raw_statements.iter() { + sqlite3_finalize(*raw_statement); + } + } } } @@ -319,7 +350,10 @@ impl<'a> Drop for Statement<'a> { mod test { use indoc::indoc; - use crate::{connection::Connection, statement::StepResult}; + use crate::{ + connection::Connection, + statement::{Statement, StepResult}, + }; #[test] fn blob_round_trips() { @@ -327,28 +361,28 @@ mod test { connection1 .exec(indoc! {" CREATE TABLE blobs ( - data BLOB - );"}) - .unwrap(); + data BLOB + )"}) + .unwrap()() + .unwrap(); let blob = &[0, 1, 2, 4, 8, 16, 32, 64]; - let mut write = connection1 - .prepare("INSERT INTO blobs (data) VALUES (?);") - .unwrap(); + let mut write = + Statement::prepare(&connection1, "INSERT INTO blobs (data) VALUES (?)").unwrap(); write.bind_blob(1, blob).unwrap(); assert_eq!(write.step().unwrap(), StepResult::Done); // Read the blob from the let connection2 = Connection::open_memory("blob_round_trips"); - let mut read = connection2.prepare("SELECT * FROM blobs;").unwrap(); + let mut read = Statement::prepare(&connection2, "SELECT * FROM blobs").unwrap(); assert_eq!(read.step().unwrap(), StepResult::Row); assert_eq!(read.column_blob(0).unwrap(), blob); assert_eq!(read.step().unwrap(), StepResult::Done); // Delete the added blob and verify its deleted on the other side - connection2.exec("DELETE FROM blobs;").unwrap(); - let mut read = connection1.prepare("SELECT * FROM blobs;").unwrap(); + connection2.exec("DELETE FROM blobs").unwrap()().unwrap(); + let mut read = Statement::prepare(&connection1, "SELECT * FROM blobs").unwrap(); assert_eq!(read.step().unwrap(), StepResult::Done); } @@ -359,32 +393,25 @@ mod test { .exec(indoc! {" CREATE TABLE texts ( text TEXT - );"}) - .unwrap(); + )"}) + .unwrap()() + .unwrap(); assert!(connection - .prepare("SELECT text FROM texts") - .unwrap() - .maybe_row::() - .unwrap() - .is_none()); + .select_row::("SELECT text FROM texts") + .unwrap()() + .unwrap() + .is_none()); let text_to_insert = "This is a test"; connection - .prepare("INSERT INTO texts VALUES (?)") - .unwrap() - .with_bindings(text_to_insert) - .unwrap() - .exec() - .unwrap(); + .exec_bound("INSERT INTO texts VALUES (?)") + .unwrap()(text_to_insert) + .unwrap(); assert_eq!( - connection - .prepare("SELECT text FROM texts") - .unwrap() - .maybe_row::() - .unwrap(), + connection.select_row("SELECT text FROM texts").unwrap()().unwrap(), Some(text_to_insert.to_string()) ); } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index f4f759cd6c42e93b6f3b99744e2419f0e53acf6b..45e22e4b3fd77a3db478350e74bdf081b7b714fd 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -79,7 +79,8 @@ impl Deref for ThreadSafeConnection { connection.exec(initialize_query).expect(&format!( "Initialize query failed to execute: {}", initialize_query - )); + ))() + .unwrap(); } if let Some(migrations) = self.migrations { diff --git a/crates/sqlez/src/typed_statements.rs b/crates/sqlez/src/typed_statements.rs new file mode 100644 index 0000000000000000000000000000000000000000..f2d66a781f09f05adb53f3bf05b1667cf14629d2 --- /dev/null +++ b/crates/sqlez/src/typed_statements.rs @@ -0,0 +1,67 @@ +use anyhow::Result; + +use crate::{ + bindable::{Bind, Column}, + connection::Connection, + statement::Statement, +}; + +impl Connection { + pub fn exec<'a>(&'a self, query: &str) -> Result Result<()>> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move || statement.exec()) + } + + pub fn exec_bound<'a, B: Bind>( + &'a self, + query: &str, + ) -> Result Result<()>> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move |bindings| statement.with_bindings(bindings)?.exec()) + } + + pub fn insert<'a>(&'a self, query: &str) -> Result Result> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move || statement.insert()) + } + + pub fn insert_bound<'a, B: Bind>( + &'a self, + query: &str, + ) -> Result Result> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move |bindings| statement.with_bindings(bindings)?.insert()) + } + + pub fn select<'a, C: Column>( + &'a self, + query: &str, + ) -> Result Result>> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move || statement.rows::()) + } + + pub fn select_bound<'a, B: Bind, C: Column>( + &'a self, + query: &str, + ) -> Result Result>> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move |bindings| statement.with_bindings(bindings)?.rows::()) + } + + pub fn select_row<'a, C: Column>( + &'a self, + query: &str, + ) -> Result Result>> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move || statement.maybe_row::()) + } + + pub fn select_row_bound<'a, B: Bind, C: Column>( + &'a self, + query: &str, + ) -> Result Result>> { + let mut statement = Statement::prepare(&self, query)?; + Ok(move |bindings| statement.with_bindings(bindings)?.maybe_row::()) + } +} From c84201fc9fda59f0d71a6fe49c519902135252af Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 10 Nov 2022 15:29:29 -0800 Subject: [PATCH 43/86] Done first draft of strongly typed migrations --- crates/auto_update/src/auto_update.rs | 8 +- crates/client/src/client.rs | 4 +- crates/client/src/telemetry.rs | 4 +- crates/db/examples/serialize-pane.rs | 46 ---- crates/db/examples/serialize_workspace.rs | 30 --- crates/db/src/db.rs | 46 ++-- crates/db/src/kvp.rs | 13 +- crates/db/src/workspace.rs | 294 ++++++++++++++++++++- crates/db/src/workspace/items.rs | 63 ----- crates/db/src/workspace/pane.rs | 232 ---------------- crates/project/src/project.rs | 2 +- crates/sqlez/src/domain.rs | 39 +++ crates/sqlez/src/lib.rs | 1 + crates/sqlez/src/savepoint.rs | 2 +- crates/sqlez/src/thread_safe_connection.rs | 45 ++-- crates/workspace/src/pane.rs | 2 +- crates/workspace/src/workspace.rs | 5 +- crates/zed/src/main.rs | 6 +- 18 files changed, 395 insertions(+), 447 deletions(-) delete mode 100644 crates/db/examples/serialize-pane.rs delete mode 100644 crates/db/examples/serialize_workspace.rs delete mode 100644 crates/db/src/workspace/items.rs delete mode 100644 crates/db/src/workspace/pane.rs create mode 100644 crates/sqlez/src/domain.rs diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index 1baf609268abd5c41905d52f7e3ea22723d453e9..d6eaaab82619fe9bfb6bac221019a0a61cd77d16 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -2,7 +2,7 @@ mod update_notification; use anyhow::{anyhow, Context, Result}; use client::{http::HttpClient, ZED_SECRET_CLIENT_TOKEN}; -use db::Db; +use db::{kvp::KeyValue, Db}; use gpui::{ actions, platform::AppVersion, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, WeakViewHandle, @@ -42,7 +42,7 @@ pub struct AutoUpdater { current_version: AppVersion, http_client: Arc, pending_poll: Option>, - db: project::Db, + db: project::Db, server_url: String, } @@ -57,7 +57,7 @@ impl Entity for AutoUpdater { } pub fn init( - db: Db, + db: Db, http_client: Arc, server_url: String, cx: &mut MutableAppContext, @@ -126,7 +126,7 @@ impl AutoUpdater { fn new( current_version: AppVersion, - db: project::Db, + db: project::Db, http_client: Arc, server_url: String, ) -> Self { diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index c943b274172c8264ee311270d4575973f945e6cc..907f7e80f1ad8f5178b3970ac41a9089fb9e9984 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -11,7 +11,7 @@ use async_tungstenite::tungstenite::{ error::Error as WebsocketError, http::{Request, StatusCode}, }; -use db::Db; +use db::{kvp::KeyValue, Db}; use futures::{future::LocalBoxFuture, AsyncReadExt, FutureExt, SinkExt, StreamExt, TryStreamExt}; use gpui::{ actions, @@ -1218,7 +1218,7 @@ impl Client { self.peer.respond_with_error(receipt, error) } - pub fn start_telemetry(&self, db: Db) { + pub fn start_telemetry(&self, db: Db) { self.telemetry.start(db.clone()); } diff --git a/crates/client/src/telemetry.rs b/crates/client/src/telemetry.rs index f8e7d161c33bd8992b0098575ff4a951c6e74934..16a7c1cc82e461413c59f9e337381d2f8e0c2bf1 100644 --- a/crates/client/src/telemetry.rs +++ b/crates/client/src/telemetry.rs @@ -1,5 +1,5 @@ use crate::http::HttpClient; -use db::Db; +use db::{kvp::KeyValue, Db}; use gpui::{ executor::Background, serde_json::{self, value::Map, Value}, @@ -148,7 +148,7 @@ impl Telemetry { Some(self.state.lock().log_file.as_ref()?.path().to_path_buf()) } - pub fn start(self: &Arc, db: Db) { + pub fn start(self: &Arc, db: Db) { let this = self.clone(); self.executor .spawn( diff --git a/crates/db/examples/serialize-pane.rs b/crates/db/examples/serialize-pane.rs deleted file mode 100644 index 2f362fb99724887b21a0361f19641421d37956a0..0000000000000000000000000000000000000000 --- a/crates/db/examples/serialize-pane.rs +++ /dev/null @@ -1,46 +0,0 @@ -use std::{fs::File, path::Path}; - -const TEST_FILE: &'static str = "test-db.db"; - -fn main() -> anyhow::Result<()> { - env_logger::init(); - - let db = db::Db::open_in_memory("db"); - - let file = Path::new(TEST_FILE); - - let f = File::create(file)?; - drop(f); - - // let workspace_1 = db.workspace_for_roots(&["/tmp"]); - // let workspace_2 = db.workspace_for_roots(&["/tmp", "/tmp2"]); - // let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]); - - // db.save_dock_pane( - // &workspace_1.workspace_id, - // &SerializedDockPane { - // anchor_position: DockAnchor::Expanded, - // visible: true, - // }, - // ); - // db.save_dock_pane( - // &workspace_2.workspace_id, - // &SerializedDockPane { - // anchor_position: DockAnchor::Bottom, - // visible: true, - // }, - // ); - // db.save_dock_pane( - // &workspace_3.workspace_id, - // &SerializedDockPane { - // anchor_position: DockAnchor::Right, - // visible: false, - // }, - // ); - - db.write_to(file).ok(); - - println!("Wrote database!"); - - Ok(()) -} diff --git a/crates/db/examples/serialize_workspace.rs b/crates/db/examples/serialize_workspace.rs deleted file mode 100644 index 9b6082ce534c6038e4c1a7bd8e23a4469049b3fa..0000000000000000000000000000000000000000 --- a/crates/db/examples/serialize_workspace.rs +++ /dev/null @@ -1,30 +0,0 @@ -use std::{fs::File, path::Path}; - -const TEST_FILE: &'static str = "test-db.db"; - -fn main() -> anyhow::Result<()> { - env_logger::init(); - let db = db::Db::open_in_memory("db"); - - let file = Path::new(TEST_FILE); - - let f = File::create(file)?; - drop(f); - - db.write_kvp("test", "1")?; - db.write_kvp("test-2", "2")?; - - db.workspace_for_roots(&["/tmp1"]); - db.workspace_for_roots(&["/tmp1", "/tmp2"]); - db.workspace_for_roots(&["/tmp1", "/tmp2", "/tmp3"]); - db.workspace_for_roots(&["/tmp2", "/tmp3"]); - db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]); - db.workspace_for_roots(&["/tmp2", "/tmp4"]); - db.workspace_for_roots(&["/tmp2"]); - - db.write_to(file).ok(); - - println!("Wrote database!"); - - Ok(()) -} diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 97dfce0e1901167fa388b5b07a40087f8ee46c63..4e348b5614d4132d90c6fad61b50e59711d02913 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -7,18 +7,23 @@ use std::path::Path; use anyhow::Result; use indoc::indoc; -use kvp::KVP_MIGRATION; use sqlez::connection::Connection; +use sqlez::domain::Domain; use sqlez::thread_safe_connection::ThreadSafeConnection; -use workspace::items::ITEM_MIGRATIONS; -use workspace::pane::PANE_MIGRATIONS; pub use workspace::*; +const INITIALIZE_QUERY: &'static str = indoc! {" + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA foreign_keys=TRUE; + PRAGMA case_sensitive_like=TRUE; +"}; + #[derive(Clone)] -pub struct Db(ThreadSafeConnection); +pub struct Db(ThreadSafeConnection); -impl Deref for Db { +impl Deref for Db { type Target = sqlez::connection::Connection; fn deref(&self) -> &Self::Target { @@ -26,7 +31,7 @@ impl Deref for Db { } } -impl Db { +impl Db { /// Open or create a database at the given directory path. pub fn open(db_dir: &Path, channel: &'static str) -> Self { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM @@ -35,17 +40,15 @@ impl Db { .expect("Should be able to create the database directory"); let db_path = current_db_dir.join(Path::new("db.sqlite")); - Db(initialize_connection(ThreadSafeConnection::new( - db_path.to_string_lossy().as_ref(), - true, - ))) + Db( + ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) + .with_initialize_query(INITIALIZE_QUERY), + ) } /// Open a in memory database for testing and as a fallback. pub fn open_in_memory(db_name: &str) -> Self { - Db(initialize_connection(ThreadSafeConnection::new( - db_name, false, - ))) + Db(ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY)) } pub fn persisting(&self) -> bool { @@ -56,19 +59,8 @@ impl Db { let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref()); self.backup_main(&destination) } -} -fn initialize_connection(conn: ThreadSafeConnection) -> ThreadSafeConnection { - conn.with_initialize_query(indoc! {" - PRAGMA journal_mode=WAL; - PRAGMA synchronous=NORMAL; - PRAGMA foreign_keys=TRUE; - PRAGMA case_sensitive_like=TRUE; - "}) - .with_migrations(&[ - KVP_MIGRATION, - WORKSPACES_MIGRATION, - PANE_MIGRATIONS, - ITEM_MIGRATIONS, - ]) + pub fn open_as(&self) -> Db { + Db(self.0.for_domain()) + } } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 6f1230f7b81f23ccaf825ad3b2987a69bad33725..c5c9c1c5b5e50782d9c8f9fc1c2249efa7ad7dee 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,7 +1,7 @@ use super::Db; use anyhow::Result; use indoc::indoc; -use sqlez::migrations::Migration; +use sqlez::{connection::Connection, domain::Domain, migrations::Migration}; pub(crate) const KVP_MIGRATION: Migration = Migration::new( "kvp", @@ -13,7 +13,16 @@ pub(crate) const KVP_MIGRATION: Migration = Migration::new( "}], ); -impl Db { +#[derive(Clone)] +pub enum KeyValue {} + +impl Domain for KeyValue { + fn migrate(conn: &Connection) -> anyhow::Result<()> { + KVP_MIGRATION.run(conn) + } +} + +impl Db { pub fn read_kvp(&self, key: &str) -> Result> { self.select_row_bound("SELECT value FROM kv_store WHERE key = (?)")?(key) } diff --git a/crates/db/src/workspace.rs b/crates/db/src/workspace.rs index c4e4873dce5cfea774e5df3b051db65609b43ddb..17ff9cf22cb5c77308763c7ca0fe7532a7b9a7b6 100644 --- a/crates/db/src/workspace.rs +++ b/crates/db/src/workspace.rs @@ -1,14 +1,24 @@ -pub(crate) mod items; pub mod model; -pub(crate) mod pane; -use anyhow::Context; -use util::{iife, ResultExt}; +use anyhow::{bail, Context, Result}; +use util::{iife, unzip_option, ResultExt}; use std::path::{Path, PathBuf}; use indoc::indoc; -use sqlez::migrations::Migration; +use sqlez::{domain::Domain, migrations::Migration}; + +use self::model::{ + Axis, GroupId, PaneId, SerializedItem, SerializedItemKind, SerializedPane, SerializedPaneGroup, + SerializedWorkspace, WorkspaceId, +}; + +use super::Db; + +// 1) Move all of this into Workspace crate +// 2) Deserialize items fully +// 3) Typed prepares (including how you expect to pull data out) +// 4) Investigate Tree column impls pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( "workspace", @@ -22,11 +32,58 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( "}], ); -use self::model::{SerializedWorkspace, WorkspaceId}; +pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( + "pane", + &[indoc! {" + CREATE TABLE pane_groups( + group_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + parent_group_id INTEGER, -- NULL indicates that this is a root node + position INTEGER, -- NULL indicates that this is a root node + axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + + CREATE TABLE panes( + pane_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + parent_group_id INTEGER, -- NULL, this is a dock pane + position INTEGER, -- NULL, this is a dock pane + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + "}], +); + +pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new( + "item", + &[indoc! {" + CREATE TABLE items( + item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique + workspace_id BLOB NOT NULL, + pane_id INTEGER NOT NULL, + kind TEXT NOT NULL, + position INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE + PRIMARY KEY(item_id, workspace_id) + ) STRICT; + "}], +); -use super::Db; +#[derive(Clone)] +pub enum Workspace {} + +impl Domain for Workspace { + fn migrate(conn: &sqlez::connection::Connection) -> anyhow::Result<()> { + WORKSPACES_MIGRATION.run(&conn)?; + PANE_MIGRATIONS.run(&conn)?; + ITEM_MIGRATIONS.run(&conn) + } +} -impl Db { +impl Db { /// Returns a serialized workspace for the given worktree_roots. If the passed array /// is empty, the most recent workspace is returned instead. If no workspace for the /// passed roots is stored, returns none. @@ -129,6 +186,142 @@ impl Db { .log_err() .unwrap_or_default() } + + pub(crate) fn get_center_pane_group( + &self, + workspace_id: &WorkspaceId, + ) -> Result { + self.get_pane_group_children(workspace_id, None)? + .into_iter() + .next() + .context("No center pane group") + } + + fn get_pane_group_children<'a>( + &self, + workspace_id: &WorkspaceId, + group_id: Option, + ) -> Result> { + self.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" + SELECT group_id, axis, pane_id + FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id + FROM pane_groups + UNION + SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id + FROM panes + -- Remove the dock panes from the union + WHERE parent_group_id IS NOT NULL and position IS NOT NULL) + WHERE parent_group_id IS ? AND workspace_id = ? + ORDER BY position + "})?((group_id, workspace_id))? + .into_iter() + .map(|(group_id, axis, pane_id)| { + if let Some((group_id, axis)) = group_id.zip(axis) { + Ok(SerializedPaneGroup::Group { + axis, + children: self.get_pane_group_children( + workspace_id, + Some(group_id), + )?, + }) + } else if let Some(pane_id) = pane_id { + Ok(SerializedPaneGroup::Pane(SerializedPane { + children: self.get_items(pane_id)?, + })) + } else { + bail!("Pane Group Child was neither a pane group or a pane"); + } + }) + .collect::>() + } + + pub(crate) fn save_pane_group( + &self, + workspace_id: &WorkspaceId, + pane_group: &SerializedPaneGroup, + parent: Option<(GroupId, usize)>, + ) -> Result<()> { + if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) { + bail!("Pane groups must have a SerializedPaneGroup::Group at the root") + } + + let (parent_id, position) = unzip_option(parent); + + match pane_group { + SerializedPaneGroup::Group { axis, children } => { + let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? + ((workspace_id, parent_id, position, *axis))?; + + for (position, group) in children.iter().enumerate() { + self.save_pane_group(workspace_id, group, Some((parent_id, position)))? + } + Ok(()) + } + SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent), + } + } + + pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { + let pane_id = self.select_row_bound(indoc! {" + SELECT pane_id FROM panes + WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?( + workspace_id, + )? + .context("No dock pane for workspace")?; + + Ok(SerializedPane::new( + self.get_items(pane_id).context("Reading items")?, + )) + } + + pub(crate) fn save_pane( + &self, + workspace_id: &WorkspaceId, + pane: &SerializedPane, + parent: Option<(GroupId, usize)>, + ) -> Result<()> { + let (parent_id, order) = unzip_option(parent); + + let pane_id = self.insert_bound( + "INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)", + )?((workspace_id, parent_id, order))?; + + self.save_items(workspace_id, pane_id, &pane.children) + .context("Saving items") + } + + pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { + Ok(self.select_bound(indoc! {" + SELECT item_id, kind FROM items + WHERE pane_id = ? + ORDER BY position"})?(pane_id)? + .into_iter() + .map(|(item_id, kind)| match kind { + SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, + _ => unimplemented!(), + }) + .collect()) + } + + pub(crate) fn save_items( + &self, + workspace_id: &WorkspaceId, + pane_id: PaneId, + items: &[SerializedItem], + ) -> Result<()> { + let mut delete_old = self + .exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") + .context("Preparing deletion")?; + let mut insert_new = self.exec_bound( + "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", + ).context("Preparing insertion")?; + for (position, item) in items.iter().enumerate() { + delete_old((workspace_id, pane_id, item.item_id()))?; + insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?; + } + + Ok(()) + } } #[cfg(test)] @@ -214,4 +407,89 @@ mod tests { workspace_3 ); } + + use crate::model::{SerializedItem, SerializedPane, SerializedPaneGroup}; + + fn default_workspace( + dock_pane: SerializedPane, + center_group: &SerializedPaneGroup, + ) -> SerializedWorkspace { + SerializedWorkspace { + dock_anchor: crate::model::DockAnchor::Right, + dock_visible: false, + center_group: center_group.clone(), + dock_pane, + } + } + + #[test] + fn test_basic_dock_pane() { + env_logger::try_init().ok(); + + let db = Db::open_in_memory("basic_dock_pane"); + + let dock_pane = crate::model::SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 1 }, + SerializedItem::Terminal { item_id: 4 }, + SerializedItem::Terminal { item_id: 2 }, + SerializedItem::Terminal { item_id: 3 }, + ], + }; + + let workspace = default_workspace(dock_pane, &Default::default()); + + db.save_workspace(&["/tmp"], None, &workspace); + + let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); + + assert_eq!(workspace.dock_pane, new_workspace.dock_pane); + } + + #[test] + fn test_simple_split() { + env_logger::try_init().ok(); + + let db = Db::open_in_memory("simple_split"); + + // ----------------- + // | 1,2 | 5,6 | + // | - - - | | + // | 3,4 | | + // ----------------- + let center_pane = SerializedPaneGroup::Group { + axis: crate::model::Axis::Horizontal, + children: vec![ + SerializedPaneGroup::Group { + axis: crate::model::Axis::Vertical, + children: vec![ + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 1 }, + SerializedItem::Terminal { item_id: 2 }, + ], + }), + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 4 }, + SerializedItem::Terminal { item_id: 3 }, + ], + }), + ], + }, + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 5 }, + SerializedItem::Terminal { item_id: 6 }, + ], + }), + ], + }; + + let workspace = default_workspace(Default::default(), ¢er_pane); + + db.save_workspace(&["/tmp"], None, &workspace); + + assert_eq!(workspace.center_group, center_pane); + } } diff --git a/crates/db/src/workspace/items.rs b/crates/db/src/workspace/items.rs deleted file mode 100644 index 9e859ffdad4a5f5718db231cc74a2e40d93db793..0000000000000000000000000000000000000000 --- a/crates/db/src/workspace/items.rs +++ /dev/null @@ -1,63 +0,0 @@ -use anyhow::{Context, Result}; -use indoc::indoc; -use sqlez::migrations::Migration; - -use crate::{ - model::{PaneId, SerializedItem, SerializedItemKind, WorkspaceId}, - Db, -}; - -// 1) Move all of this into Workspace crate -// 2) Deserialize items fully -// 3) Typed prepares (including how you expect to pull data out) -// 4) Investigate Tree column impls -pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new( - "item", - &[indoc! {" - CREATE TABLE items( - item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique - workspace_id BLOB NOT NULL, - pane_id INTEGER NOT NULL, - kind TEXT NOT NULL, - position INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE - PRIMARY KEY(item_id, workspace_id) - ) STRICT; - "}], -); - -impl Db { - pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { - Ok(self.select_bound(indoc! {" - SELECT item_id, kind FROM items - WHERE pane_id = ? - ORDER BY position"})?(pane_id)? - .into_iter() - .map(|(item_id, kind)| match kind { - SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, - _ => unimplemented!(), - }) - .collect()) - } - - pub(crate) fn save_items( - &self, - workspace_id: &WorkspaceId, - pane_id: PaneId, - items: &[SerializedItem], - ) -> Result<()> { - let mut delete_old = self - .exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") - .context("Preparing deletion")?; - let mut insert_new = self.exec_bound( - "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", - ).context("Preparing insertion")?; - for (position, item) in items.iter().enumerate() { - delete_old((workspace_id, pane_id, item.item_id()))?; - insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?; - } - - Ok(()) - } -} diff --git a/crates/db/src/workspace/pane.rs b/crates/db/src/workspace/pane.rs deleted file mode 100644 index 24d6a3f938f75499a3496661cca6f4bf30cc1bb8..0000000000000000000000000000000000000000 --- a/crates/db/src/workspace/pane.rs +++ /dev/null @@ -1,232 +0,0 @@ -use anyhow::{bail, Context, Result}; -use indoc::indoc; -use sqlez::migrations::Migration; -use util::unzip_option; - -use crate::model::{Axis, GroupId, PaneId, SerializedPane}; - -use super::{ - model::{SerializedPaneGroup, WorkspaceId}, - Db, -}; - -pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( - "pane", - &[indoc! {" - CREATE TABLE pane_groups( - group_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL indicates that this is a root node - position INTEGER, -- NULL indicates that this is a root node - axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE - ) STRICT; - - CREATE TABLE panes( - pane_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL, this is a dock pane - position INTEGER, -- NULL, this is a dock pane - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE - ) STRICT; - "}], -); - -impl Db { - pub(crate) fn get_center_pane_group( - &self, - workspace_id: &WorkspaceId, - ) -> Result { - self.get_pane_group_children(workspace_id, None)? - .into_iter() - .next() - .context("No center pane group") - } - - fn get_pane_group_children<'a>( - &self, - workspace_id: &WorkspaceId, - group_id: Option, - ) -> Result> { - self.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" - SELECT group_id, axis, pane_id - FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id - FROM pane_groups - UNION - SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id - FROM panes - -- Remove the dock panes from the union - WHERE parent_group_id IS NOT NULL and position IS NOT NULL) - WHERE parent_group_id IS ? AND workspace_id = ? - ORDER BY position - "})?((group_id, workspace_id))? - .into_iter() - .map(|(group_id, axis, pane_id)| { - if let Some((group_id, axis)) = group_id.zip(axis) { - Ok(SerializedPaneGroup::Group { - axis, - children: self.get_pane_group_children( - workspace_id, - Some(group_id), - )?, - }) - } else if let Some(pane_id) = pane_id { - Ok(SerializedPaneGroup::Pane(SerializedPane { - children: self.get_items(pane_id)?, - })) - } else { - bail!("Pane Group Child was neither a pane group or a pane"); - } - }) - .collect::>() - } - - pub(crate) fn save_pane_group( - &self, - workspace_id: &WorkspaceId, - pane_group: &SerializedPaneGroup, - parent: Option<(GroupId, usize)>, - ) -> Result<()> { - if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) { - bail!("Pane groups must have a SerializedPaneGroup::Group at the root") - } - - let (parent_id, position) = unzip_option(parent); - - match pane_group { - SerializedPaneGroup::Group { axis, children } => { - let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? - ((workspace_id, parent_id, position, *axis))?; - - for (position, group) in children.iter().enumerate() { - self.save_pane_group(workspace_id, group, Some((parent_id, position)))? - } - Ok(()) - } - SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent), - } - } - - pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { - let pane_id = self.select_row_bound(indoc! {" - SELECT pane_id FROM panes - WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?( - workspace_id, - )? - .context("No dock pane for workspace")?; - - Ok(SerializedPane::new( - self.get_items(pane_id).context("Reading items")?, - )) - } - - pub(crate) fn save_pane( - &self, - workspace_id: &WorkspaceId, - pane: &SerializedPane, - parent: Option<(GroupId, usize)>, - ) -> Result<()> { - let (parent_id, order) = unzip_option(parent); - - let pane_id = self.insert_bound( - "INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)", - )?((workspace_id, parent_id, order))?; - - self.save_items(workspace_id, pane_id, &pane.children) - .context("Saving items") - } -} - -#[cfg(test)] -mod tests { - - use crate::{ - model::{SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace}, - Db, - }; - - fn default_workspace( - dock_pane: SerializedPane, - center_group: &SerializedPaneGroup, - ) -> SerializedWorkspace { - SerializedWorkspace { - dock_anchor: crate::model::DockAnchor::Right, - dock_visible: false, - center_group: center_group.clone(), - dock_pane, - } - } - - #[test] - fn test_basic_dock_pane() { - env_logger::try_init().ok(); - - let db = Db::open_in_memory("basic_dock_pane"); - - let dock_pane = crate::model::SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 1 }, - SerializedItem::Terminal { item_id: 4 }, - SerializedItem::Terminal { item_id: 2 }, - SerializedItem::Terminal { item_id: 3 }, - ], - }; - - let workspace = default_workspace(dock_pane, &Default::default()); - - db.save_workspace(&["/tmp"], None, &workspace); - - let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); - - assert_eq!(workspace.dock_pane, new_workspace.dock_pane); - } - - #[test] - fn test_simple_split() { - env_logger::try_init().ok(); - - let db = Db::open_in_memory("simple_split"); - - // ----------------- - // | 1,2 | 5,6 | - // | - - - | | - // | 3,4 | | - // ----------------- - let center_pane = SerializedPaneGroup::Group { - axis: crate::model::Axis::Horizontal, - children: vec![ - SerializedPaneGroup::Group { - axis: crate::model::Axis::Vertical, - children: vec![ - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 1 }, - SerializedItem::Terminal { item_id: 2 }, - ], - }), - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 4 }, - SerializedItem::Terminal { item_id: 3 }, - ], - }), - ], - }, - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 5 }, - SerializedItem::Terminal { item_id: 6 }, - ], - }), - ], - }; - - let workspace = default_workspace(Default::default(), ¢er_pane); - - db.save_workspace(&["/tmp"], None, &workspace); - - assert_eq!(workspace.center_group, center_pane); - } -} diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 08714d6cd315277480c6897e3742deeecb95a79a..d1d8c96ce2d2e970cdb96f564680b05e064b5aab 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -63,7 +63,7 @@ use std::{ use thiserror::Error; use util::{defer, post_inc, ResultExt, TryFutureExt as _}; -pub use db::Db; +pub use db::{kvp::KeyValue, Db}; pub use fs::*; pub use worktree::*; diff --git a/crates/sqlez/src/domain.rs b/crates/sqlez/src/domain.rs new file mode 100644 index 0000000000000000000000000000000000000000..01b17eea31a44085f8688da42de53ff4a7dbe804 --- /dev/null +++ b/crates/sqlez/src/domain.rs @@ -0,0 +1,39 @@ +use crate::connection::Connection; + +pub trait Domain: Send + Sync + Clone { + fn migrate(conn: &Connection) -> anyhow::Result<()>; +} + +impl Domain for (D1, D2) { + fn migrate(conn: &Connection) -> anyhow::Result<()> { + D1::migrate(conn)?; + D2::migrate(conn) + } +} + +impl Domain for (D1, D2, D3) { + fn migrate(conn: &Connection) -> anyhow::Result<()> { + D1::migrate(conn)?; + D2::migrate(conn)?; + D3::migrate(conn) + } +} + +impl Domain for (D1, D2, D3, D4) { + fn migrate(conn: &Connection) -> anyhow::Result<()> { + D1::migrate(conn)?; + D2::migrate(conn)?; + D3::migrate(conn)?; + D4::migrate(conn) + } +} + +impl Domain for (D1, D2, D3, D4, D5) { + fn migrate(conn: &Connection) -> anyhow::Result<()> { + D1::migrate(conn)?; + D2::migrate(conn)?; + D3::migrate(conn)?; + D4::migrate(conn)?; + D5::migrate(conn) + } +} diff --git a/crates/sqlez/src/lib.rs b/crates/sqlez/src/lib.rs index 155fb28901dddbf524f9c76b97901bd5346e0ce9..ecebbd264301040aa5c40a9e9daa4d52184081cc 100644 --- a/crates/sqlez/src/lib.rs +++ b/crates/sqlez/src/lib.rs @@ -1,5 +1,6 @@ pub mod bindable; pub mod connection; +pub mod domain; pub mod migrations; pub mod savepoint; pub mod statement; diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs index b78358deb9008f085b8e195e68e283c7cbcfd863..9751aac51d90966c8c6aaa5386d2bc9f3da9573e 100644 --- a/crates/sqlez/src/savepoint.rs +++ b/crates/sqlez/src/savepoint.rs @@ -1,5 +1,5 @@ use anyhow::Result; -use indoc::{formatdoc, indoc}; +use indoc::formatdoc; use crate::connection::Connection; diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 45e22e4b3fd77a3db478350e74bdf081b7b714fd..1081101f6a9ec32cd3a66a93a5812b1884c00076 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -1,26 +1,26 @@ -use std::{ops::Deref, sync::Arc}; +use std::{marker::PhantomData, ops::Deref, sync::Arc}; use connection::Connection; use thread_local::ThreadLocal; -use crate::{connection, migrations::Migration}; +use crate::{connection, domain::Domain}; -pub struct ThreadSafeConnection { +pub struct ThreadSafeConnection { uri: Arc, persistent: bool, initialize_query: Option<&'static str>, - migrations: Option<&'static [Migration]>, connection: Arc>, + _pd: PhantomData, } -impl ThreadSafeConnection { +impl ThreadSafeConnection { pub fn new(uri: &str, persistent: bool) -> Self { Self { uri: Arc::from(uri), persistent, initialize_query: None, - migrations: None, connection: Default::default(), + _pd: PhantomData, } } @@ -31,13 +31,6 @@ impl ThreadSafeConnection { self } - /// Migrations have to be run per connection because we fallback to memory - /// so this needs - pub fn with_migrations(mut self, migrations: &'static [Migration]) -> Self { - self.migrations = Some(migrations); - self - } - /// Opens a new db connection with the initialized file path. This is internal and only /// called from the deref function. /// If opening fails, the connection falls back to a shared memory connection @@ -50,21 +43,33 @@ impl ThreadSafeConnection { fn open_shared_memory(&self) -> Connection { Connection::open_memory(self.uri.as_ref()) } + + // Open a new connection for the given domain, leaving this + // connection intact. + pub fn for_domain(&self) -> ThreadSafeConnection { + ThreadSafeConnection { + uri: self.uri.clone(), + persistent: self.persistent, + initialize_query: self.initialize_query, + connection: Default::default(), + _pd: PhantomData, + } + } } -impl Clone for ThreadSafeConnection { +impl Clone for ThreadSafeConnection { fn clone(&self) -> Self { Self { uri: self.uri.clone(), persistent: self.persistent, initialize_query: self.initialize_query.clone(), - migrations: self.migrations.clone(), connection: self.connection.clone(), + _pd: PhantomData, } } } -impl Deref for ThreadSafeConnection { +impl Deref for ThreadSafeConnection { type Target = Connection; fn deref(&self) -> &Self::Target { @@ -83,13 +88,7 @@ impl Deref for ThreadSafeConnection { .unwrap(); } - if let Some(migrations) = self.migrations { - for migration in migrations { - migration - .run(&connection) - .expect(&format!("Migrations failed to execute: {:?}", migration)); - } - } + D::migrate(&connection).expect("Migrations failed"); connection }) diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index 01313f2046d5c707259369e732079662b057afdd..644fa9481e4da782efd99c52fad0e3362cf12b0e 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -1925,7 +1925,7 @@ mod tests { let project = Project::test(fs, None, cx).await; let (_, workspace) = - cx.add_window(|cx| Workspace::new(project, |_, _| unimplemented!(), cx)); + cx.add_window(|cx| Workspace::new(None, project, |_, _| unimplemented!(), cx)); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); add_labled_item(&workspace, &pane, "A", cx); diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index d1dbc6982b27a178769e3bc29cec8f4474a5de11..990f7142eef03ed9b4fa3722204ebf26da5c988f 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -15,7 +15,7 @@ use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; -use db::{model::SerializedWorkspace, Db}; +use db::{kvp::KeyValue, model::SerializedWorkspace, Db}; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -1288,7 +1288,8 @@ impl Workspace { // Use the resolved worktree roots to get the serialized_db from the database let serialized_workspace = cx.read(|cx| { - cx.global::() + cx.global::>() + .open_as::() .workspace_for_roots(&Vec::from_iter(worktree_roots.into_iter())[..]) }); diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 84d18ba22f7a8b7d11322d50e2eb706802139e86..359648b7d7048c62f03d4c543f7bf1da286fd471 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -57,7 +57,7 @@ fn main() { init_panic_hook(app_version, http.clone(), app.background()); let db = app.background().spawn(async move { - project::Db::open(&*zed::paths::DB_DIR, RELEASE_CHANNEL_NAME.as_str()) + project::Db::::open(&*zed::paths::DB_DIR, RELEASE_CHANNEL_NAME.as_str()) }); load_embedded_fonts(&app); @@ -150,7 +150,7 @@ fn main() { let db = cx.background().block(db); cx.set_global(db); - client.start_telemetry(cx.global::().clone()); + client.start_telemetry(cx.global::>().clone()); client.report_event("start app", Default::default()); let app_state = Arc::new(AppState { @@ -165,7 +165,7 @@ fn main() { default_item_factory, }); auto_update::init( - cx.global::().clone(), + cx.global::>().clone(), http, client::ZED_SERVER_URL.clone(), cx, From e578f2530e1c2b6c54dc00416234d16e401a4622 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 10 Nov 2022 16:40:35 -0800 Subject: [PATCH 44/86] WIP commit, migrating workspace serialization code into the workspace --- crates/db/src/db.rs | 3 - crates/db/src/workspace/model.rs | 257 ----------------- crates/workspace/src/workspace.rs | 9 +- .../src/workspace_db.rs} | 262 +++++++++++++++++- 4 files changed, 266 insertions(+), 265 deletions(-) delete mode 100644 crates/db/src/workspace/model.rs rename crates/{db/src/workspace.rs => workspace/src/workspace_db.rs} (68%) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 4e348b5614d4132d90c6fad61b50e59711d02913..02fc51ee8d40c73592632ea708271942a8b491ae 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,5 +1,4 @@ pub mod kvp; -pub mod workspace; use std::fs; use std::ops::Deref; @@ -11,8 +10,6 @@ use sqlez::connection::Connection; use sqlez::domain::Domain; use sqlez::thread_safe_connection::ThreadSafeConnection; -pub use workspace::*; - const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA journal_mode=WAL; PRAGMA synchronous=NORMAL; diff --git a/crates/db/src/workspace/model.rs b/crates/db/src/workspace/model.rs deleted file mode 100644 index 36099f66e65faf284667c86302125bfc2fcdbc3e..0000000000000000000000000000000000000000 --- a/crates/db/src/workspace/model.rs +++ /dev/null @@ -1,257 +0,0 @@ -use std::{ - path::{Path, PathBuf}, - sync::Arc, -}; - -use anyhow::{bail, Result}; - -use sqlez::{ - bindable::{Bind, Column}, - statement::Statement, -}; - -#[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct WorkspaceId(Vec); - -impl WorkspaceId { - pub fn paths(self) -> Vec { - self.0 - } -} - -impl, T: IntoIterator> From for WorkspaceId { - fn from(iterator: T) -> Self { - let mut roots = iterator - .into_iter() - .map(|p| p.as_ref().to_path_buf()) - .collect::>(); - roots.sort(); - Self(roots) - } -} - -impl Bind for &WorkspaceId { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - bincode::serialize(&self.0) - .expect("Bincode serialization of paths should not fail") - .bind(statement, start_index) - } -} - -impl Column for WorkspaceId { - fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let blob = statement.column_blob(start_index)?; - Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1)) - } -} - -#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] -pub enum DockAnchor { - #[default] - Bottom, - Right, - Expanded, -} - -impl Bind for DockAnchor { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - DockAnchor::Bottom => "Bottom", - DockAnchor::Right => "Right", - DockAnchor::Expanded => "Expanded", - } - .bind(statement, start_index) - } -} - -impl Column for DockAnchor { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(anchor_text, next_index)| { - Ok(( - match anchor_text.as_ref() { - "Bottom" => DockAnchor::Bottom, - "Right" => DockAnchor::Right, - "Expanded" => DockAnchor::Expanded, - _ => bail!("Stored dock anchor is incorrect"), - }, - next_index, - )) - }) - } -} - -#[derive(Debug, PartialEq, Eq)] -pub struct SerializedWorkspace { - pub dock_anchor: DockAnchor, - pub dock_visible: bool, - pub center_group: SerializedPaneGroup, - pub dock_pane: SerializedPane, -} - -#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] -pub enum Axis { - #[default] - Horizontal, - Vertical, -} - -impl Bind for Axis { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - Axis::Horizontal => "Horizontal", - Axis::Vertical => "Vertical", - } - .bind(statement, start_index) - } -} - -impl Column for Axis { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(axis_text, next_index)| { - Ok(( - match axis_text.as_str() { - "Horizontal" => Axis::Horizontal, - "Vertical" => Axis::Vertical, - _ => bail!("Stored serialized item kind is incorrect"), - }, - next_index, - )) - }) - } -} - -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum SerializedPaneGroup { - Group { - axis: Axis, - children: Vec, - }, - Pane(SerializedPane), -} - -// Dock panes, and grouped panes combined? -// AND we're collapsing PaneGroup::Pane -// In the case where - -impl Default for SerializedPaneGroup { - fn default() -> Self { - Self::Group { - axis: Axis::Horizontal, - children: vec![Self::Pane(Default::default())], - } - } -} - -#[derive(Debug, PartialEq, Eq, Default, Clone)] -pub struct SerializedPane { - pub(crate) children: Vec, -} - -impl SerializedPane { - pub fn new(children: Vec) -> Self { - SerializedPane { children } - } -} - -pub type GroupId = i64; -pub type PaneId = i64; -pub type ItemId = usize; - -pub(crate) enum SerializedItemKind { - Editor, - Diagnostics, - ProjectSearch, - Terminal, -} - -impl Bind for SerializedItemKind { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - SerializedItemKind::Editor => "Editor", - SerializedItemKind::Diagnostics => "Diagnostics", - SerializedItemKind::ProjectSearch => "ProjectSearch", - SerializedItemKind::Terminal => "Terminal", - } - .bind(statement, start_index) - } -} - -impl Column for SerializedItemKind { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(kind_text, next_index)| { - Ok(( - match kind_text.as_ref() { - "Editor" => SerializedItemKind::Editor, - "Diagnostics" => SerializedItemKind::Diagnostics, - "ProjectSearch" => SerializedItemKind::ProjectSearch, - "Terminal" => SerializedItemKind::Terminal, - _ => bail!("Stored serialized item kind is incorrect"), - }, - next_index, - )) - }) - } -} - -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum SerializedItem { - Editor { item_id: usize, path: Arc }, - Diagnostics { item_id: usize }, - ProjectSearch { item_id: usize, query: String }, - Terminal { item_id: usize }, -} - -impl SerializedItem { - pub fn item_id(&self) -> usize { - match self { - SerializedItem::Editor { item_id, .. } => *item_id, - SerializedItem::Diagnostics { item_id } => *item_id, - SerializedItem::ProjectSearch { item_id, .. } => *item_id, - SerializedItem::Terminal { item_id } => *item_id, - } - } - - pub(crate) fn kind(&self) -> SerializedItemKind { - match self { - SerializedItem::Editor { .. } => SerializedItemKind::Editor, - SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics, - SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch, - SerializedItem::Terminal { .. } => SerializedItemKind::Terminal, - } - } -} - -#[cfg(test)] -mod tests { - use sqlez::connection::Connection; - - use crate::model::DockAnchor; - - use super::WorkspaceId; - - #[test] - fn test_workspace_round_trips() { - let db = Connection::open_memory("workspace_id_round_trips"); - - db.exec(indoc::indoc! {" - CREATE TABLE workspace_id_test( - workspace_id BLOB, - dock_anchor TEXT - );"}) - .unwrap()() - .unwrap(); - - let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]); - - db.exec_bound("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") - .unwrap()((&workspace_id, DockAnchor::Bottom)) - .unwrap(); - - assert_eq!( - db.select_row("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") - .unwrap()() - .unwrap(), - Some((WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom)) - ); - } -} diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 990f7142eef03ed9b4fa3722204ebf26da5c988f..a994b8a8330df8ada540fbb99a4c335dd77796e9 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -10,12 +10,13 @@ pub mod shared_screen; pub mod sidebar; mod status_bar; mod toolbar; +mod workspace_db; use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; -use db::{kvp::KeyValue, model::SerializedWorkspace, Db}; +use db::{kvp::KeyValue, Db}; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -61,6 +62,8 @@ use theme::{Theme, ThemeRegistry}; pub use toolbar::{ToolbarItemLocation, ToolbarItemView}; use util::ResultExt; +use crate::workspace_db::model; + type ProjectItemBuilders = HashMap< TypeId, fn(ModelHandle, AnyModelHandle, &mut ViewContext) -> Box, @@ -1120,7 +1123,7 @@ enum FollowerItem { impl Workspace { pub fn new( - _serialized_workspace: Option, + _serialized_workspace: Option, project: ModelHandle, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, @@ -1289,7 +1292,7 @@ impl Workspace { // Use the resolved worktree roots to get the serialized_db from the database let serialized_workspace = cx.read(|cx| { cx.global::>() - .open_as::() + .open_as::() .workspace_for_roots(&Vec::from_iter(worktree_roots.into_iter())[..]) }); diff --git a/crates/db/src/workspace.rs b/crates/workspace/src/workspace_db.rs similarity index 68% rename from crates/db/src/workspace.rs rename to crates/workspace/src/workspace_db.rs index 17ff9cf22cb5c77308763c7ca0fe7532a7b9a7b6..3e10b06f853cc47689b81fd67a23beb100b1abd5 100644 --- a/crates/db/src/workspace.rs +++ b/crates/workspace/src/workspace_db.rs @@ -1,5 +1,3 @@ -pub mod model; - use anyhow::{bail, Context, Result}; use util::{iife, unzip_option, ResultExt}; @@ -493,3 +491,263 @@ mod tests { assert_eq!(workspace.center_group, center_pane); } } + +pub mod model { + use std::{ + path::{Path, PathBuf}, + sync::Arc, + }; + + use anyhow::{bail, Result}; + + use sqlez::{ + bindable::{Bind, Column}, + statement::Statement, + }; + + #[derive(Debug, Clone, PartialEq, Eq)] + pub(crate) struct WorkspaceId(Vec); + + impl WorkspaceId { + pub fn paths(self) -> Vec { + self.0 + } + } + + impl, T: IntoIterator> From for WorkspaceId { + fn from(iterator: T) -> Self { + let mut roots = iterator + .into_iter() + .map(|p| p.as_ref().to_path_buf()) + .collect::>(); + roots.sort(); + Self(roots) + } + } + + impl Bind for &WorkspaceId { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + bincode::serialize(&self.0) + .expect("Bincode serialization of paths should not fail") + .bind(statement, start_index) + } + } + + impl Column for WorkspaceId { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let blob = statement.column_blob(start_index)?; + Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1)) + } + } + + #[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] + pub enum DockAnchor { + #[default] + Bottom, + Right, + Expanded, + } + + impl Bind for DockAnchor { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + DockAnchor::Bottom => "Bottom", + DockAnchor::Right => "Right", + DockAnchor::Expanded => "Expanded", + } + .bind(statement, start_index) + } + } + + impl Column for DockAnchor { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(anchor_text, next_index)| { + Ok(( + match anchor_text.as_ref() { + "Bottom" => DockAnchor::Bottom, + "Right" => DockAnchor::Right, + "Expanded" => DockAnchor::Expanded, + _ => bail!("Stored dock anchor is incorrect"), + }, + next_index, + )) + }) + } + } + + #[derive(Debug, PartialEq, Eq)] + pub struct SerializedWorkspace { + pub dock_anchor: DockAnchor, + pub dock_visible: bool, + pub center_group: SerializedPaneGroup, + pub dock_pane: SerializedPane, + } + + #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] + pub enum Axis { + #[default] + Horizontal, + Vertical, + } + + impl Bind for Axis { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + Axis::Horizontal => "Horizontal", + Axis::Vertical => "Vertical", + } + .bind(statement, start_index) + } + } + + impl Column for Axis { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(axis_text, next_index)| { + Ok(( + match axis_text.as_str() { + "Horizontal" => Axis::Horizontal, + "Vertical" => Axis::Vertical, + _ => bail!("Stored serialized item kind is incorrect"), + }, + next_index, + )) + }) + } + } + + #[derive(Debug, PartialEq, Eq, Clone)] + pub enum SerializedPaneGroup { + Group { + axis: Axis, + children: Vec, + }, + Pane(SerializedPane), + } + + // Dock panes, and grouped panes combined? + // AND we're collapsing PaneGroup::Pane + // In the case where + + impl Default for SerializedPaneGroup { + fn default() -> Self { + Self::Group { + axis: Axis::Horizontal, + children: vec![Self::Pane(Default::default())], + } + } + } + + #[derive(Debug, PartialEq, Eq, Default, Clone)] + pub struct SerializedPane { + pub(crate) children: Vec, + } + + impl SerializedPane { + pub fn new(children: Vec) -> Self { + SerializedPane { children } + } + } + + pub type GroupId = i64; + pub type PaneId = i64; + pub type ItemId = usize; + + pub(crate) enum SerializedItemKind { + Editor, + Diagnostics, + ProjectSearch, + Terminal, + } + + impl Bind for SerializedItemKind { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + SerializedItemKind::Editor => "Editor", + SerializedItemKind::Diagnostics => "Diagnostics", + SerializedItemKind::ProjectSearch => "ProjectSearch", + SerializedItemKind::Terminal => "Terminal", + } + .bind(statement, start_index) + } + } + + impl Column for SerializedItemKind { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(kind_text, next_index)| { + Ok(( + match kind_text.as_ref() { + "Editor" => SerializedItemKind::Editor, + "Diagnostics" => SerializedItemKind::Diagnostics, + "ProjectSearch" => SerializedItemKind::ProjectSearch, + "Terminal" => SerializedItemKind::Terminal, + _ => bail!("Stored serialized item kind is incorrect"), + }, + next_index, + )) + }) + } + } + + #[derive(Debug, PartialEq, Eq, Clone)] + pub enum SerializedItem { + Editor { item_id: usize, path: Arc }, + Diagnostics { item_id: usize }, + ProjectSearch { item_id: usize, query: String }, + Terminal { item_id: usize }, + } + + impl SerializedItem { + pub fn item_id(&self) -> usize { + match self { + SerializedItem::Editor { item_id, .. } => *item_id, + SerializedItem::Diagnostics { item_id } => *item_id, + SerializedItem::ProjectSearch { item_id, .. } => *item_id, + SerializedItem::Terminal { item_id } => *item_id, + } + } + + pub(crate) fn kind(&self) -> SerializedItemKind { + match self { + SerializedItem::Editor { .. } => SerializedItemKind::Editor, + SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics, + SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch, + SerializedItem::Terminal { .. } => SerializedItemKind::Terminal, + } + } + } + + #[cfg(test)] + mod tests { + use sqlez::connection::Connection; + + use crate::model::DockAnchor; + + use super::WorkspaceId; + + #[test] + fn test_workspace_round_trips() { + let db = Connection::open_memory("workspace_id_round_trips"); + + db.exec(indoc::indoc! {" + CREATE TABLE workspace_id_test( + workspace_id BLOB, + dock_anchor TEXT + );"}) + .unwrap()() + .unwrap(); + + let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]); + + db.exec_bound("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") + .unwrap()((&workspace_id, DockAnchor::Bottom)) + .unwrap(); + + assert_eq!( + db.select_row("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") + .unwrap()() + .unwrap(), + Some((WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom)) + ); + } + } +} From a5edac312e4b03fb9a5c30ac80943278f8e9307a Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 10 Nov 2022 21:08:20 -0800 Subject: [PATCH 45/86] Moved to workspaces crate... don't feel great about it --- Cargo.lock | 5 +- crates/collab/Cargo.toml | 1 + crates/collab/src/integration_tests.rs | 10 +- crates/collab/src/main.rs | 2 +- crates/command_palette/src/command_palette.rs | 4 +- crates/db/Cargo.toml | 1 - crates/sqlez/src/domain.rs | 2 +- crates/workspace/Cargo.toml | 4 + crates/workspace/src/workspace.rs | 16 +- crates/workspace/src/workspace_db.rs | 170 ++++++++++-------- crates/zed/src/main.rs | 6 +- crates/zed/src/zed.rs | 4 +- 12 files changed, 128 insertions(+), 97 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8fa755b1614a93faaecdba1ee005c66ab350dfa2..90482254742412a2221dbdd466e076c32d7cb4f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1040,6 +1040,7 @@ dependencies = [ "client", "collections", "ctor", + "db", "editor", "env_logger", "envy", @@ -1550,7 +1551,6 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "bincode", "collections", "env_logger", "gpui", @@ -7620,6 +7620,7 @@ name = "workspace" version = "0.1.0" dependencies = [ "anyhow", + "bincode", "call", "client", "collections", @@ -7629,6 +7630,7 @@ dependencies = [ "fs", "futures 0.3.25", "gpui", + "indoc", "language", "log", "menu", @@ -7639,6 +7641,7 @@ dependencies = [ "serde_json", "settings", "smallvec", + "sqlez", "theme", "util", ] diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 09f379526eec23d44f2057e48b2fb7d7b27e2d17..1722d3374a4f223e43aa3d2331a224f4327c61ba 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -18,6 +18,7 @@ live_kit_server = { path = "../live_kit_server" } rpc = { path = "../rpc" } util = { path = "../util" } +db = { path = "../db" } anyhow = "1.0.40" async-trait = "0.1.50" async-tungstenite = "0.16" diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 5de28f1c65480ab495647769408ffe0ff611ad32..bfc14618eab756d15a790b6976a186ce5d7e5d26 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1,8 +1,9 @@ use crate::{ - db::{NewUserParams, ProjectId, SqliteTestDb as TestDb, UserId}, + db::{Db, NewUserParams, ProjectId, UserId}, rpc::{Executor, Server}, AppState, }; + use ::rpc::Peer; use anyhow::anyhow; use call::{room, ActiveCall, ParticipantLocation, Room}; @@ -11,6 +12,7 @@ use client::{ User, UserStore, RECEIVE_TIMEOUT, }; use collections::{BTreeMap, HashMap, HashSet}; +use db as SqliteDb; use editor::{ self, ConfirmCodeAction, ConfirmCompletion, ConfirmRename, Editor, Redo, Rename, ToOffset, ToggleCodeActions, Undo, @@ -5836,7 +5838,11 @@ impl TestServer { Project::init(&client); cx.update(|cx| { - workspace::init(app_state.clone(), cx); + workspace::init( + app_state.clone(), + cx, + SqliteDb::open_in_memory("integration tests"), + ); call::init(client.clone(), user_store.clone(), cx); }); diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index dc98a2ee6855c072f5adc9ed95dbad38626eca48..d26ea1a0fa8edbb92113d7cc2f812b4bc28c0d19 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -9,11 +9,11 @@ mod db_tests; #[cfg(test)] mod integration_tests; +use crate::db::{Db, PostgresDb}; use crate::rpc::ResultExt as _; use anyhow::anyhow; use axum::{routing::get, Router}; use collab::{Error, Result}; -use db::DefaultDb as Db; use serde::Deserialize; use std::{ env::args, diff --git a/crates/command_palette/src/command_palette.rs b/crates/command_palette/src/command_palette.rs index 5af23b45d720ecfad4ed9faa3dd777d1238f2022..f2542c9bc89a509721ee648de2653ed92a69b5a3 100644 --- a/crates/command_palette/src/command_palette.rs +++ b/crates/command_palette/src/command_palette.rs @@ -320,7 +320,7 @@ mod tests { use super::*; use editor::Editor; use gpui::TestAppContext; - use project::Project; + use project::{Db, Project}; use workspace::{AppState, Workspace}; #[test] @@ -345,7 +345,7 @@ mod tests { cx.update(|cx| { editor::init(cx); - workspace::init(app_state.clone(), cx); + workspace::init(app_state.clone(), cx, Db::open_in_memory("test")); init(cx); }); diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index b69779c4089ed9691d4dd4aa04e41176477c42a1..27a11bea7bcfad2110770cd0708c63909fcc7d8c 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -22,7 +22,6 @@ lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } -bincode = "1.2.1" [dev-dependencies] diff --git a/crates/sqlez/src/domain.rs b/crates/sqlez/src/domain.rs index 01b17eea31a44085f8688da42de53ff4a7dbe804..f57e89a5c8f453d41d6f1b3cd6d2d5501fa63ba6 100644 --- a/crates/sqlez/src/domain.rs +++ b/crates/sqlez/src/domain.rs @@ -1,6 +1,6 @@ use crate::connection::Connection; -pub trait Domain: Send + Sync + Clone { +pub trait Domain { fn migrate(conn: &Connection) -> anyhow::Result<()>; } diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index c481792f7cc1924befd353d20e92fe9baac1600a..f8bcba5eb7b5041975801eaa38cf5368a221f745 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -30,8 +30,10 @@ language = { path = "../language" } menu = { path = "../menu" } project = { path = "../project" } settings = { path = "../settings" } +sqlez = { path = "../sqlez" } theme = { path = "../theme" } util = { path = "../util" } +bincode = "1.2.1" anyhow = "1.0.38" futures = "0.3" log = { version = "0.4.16", features = ["kv_unstable_serde"] } @@ -40,6 +42,8 @@ postage = { version = "0.4.1", features = ["futures-traits"] } serde = { version = "1.0", features = ["derive", "rc"] } serde_json = { version = "1.0", features = ["preserve_order"] } smallvec = { version = "1.6", features = ["union"] } +indoc = "1.0.4" + [dev-dependencies] call = { path = "../call", features = ["test-support"] } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index a994b8a8330df8ada540fbb99a4c335dd77796e9..39843859c0319e20120b503addb08f446209dd96 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -12,6 +12,7 @@ mod status_bar; mod toolbar; mod workspace_db; +use crate::workspace_db::model::SerializedWorkspace; use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; @@ -62,8 +63,6 @@ use theme::{Theme, ThemeRegistry}; pub use toolbar::{ToolbarItemLocation, ToolbarItemView}; use util::ResultExt; -use crate::workspace_db::model; - type ProjectItemBuilders = HashMap< TypeId, fn(ModelHandle, AnyModelHandle, &mut ViewContext) -> Box, @@ -166,7 +165,9 @@ impl_internal_actions!( ); impl_actions!(workspace, [ActivatePane]); -pub fn init(app_state: Arc, cx: &mut MutableAppContext) { +pub fn init(app_state: Arc, cx: &mut MutableAppContext, db: Db) { + cx.set_global(db); + pane::init(cx); dock::init(cx); @@ -1123,7 +1124,7 @@ enum FollowerItem { impl Workspace { pub fn new( - _serialized_workspace: Option, + _serialized_workspace: Option, project: ModelHandle, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, @@ -1291,9 +1292,10 @@ impl Workspace { // Use the resolved worktree roots to get the serialized_db from the database let serialized_workspace = cx.read(|cx| { - cx.global::>() - .open_as::() - .workspace_for_roots(&Vec::from_iter(worktree_roots.into_iter())[..]) + Workspace::workspace_for_roots( + cx.global::>(), + &Vec::from_iter(worktree_roots.into_iter())[..], + ) }); // Use the serialized workspace to construct the new window diff --git a/crates/workspace/src/workspace_db.rs b/crates/workspace/src/workspace_db.rs index 3e10b06f853cc47689b81fd67a23beb100b1abd5..e896dd6c275c8f82438a5588fdced2aa6bd2333b 100644 --- a/crates/workspace/src/workspace_db.rs +++ b/crates/workspace/src/workspace_db.rs @@ -1,18 +1,20 @@ use anyhow::{bail, Context, Result}; + +use db::Db; use util::{iife, unzip_option, ResultExt}; use std::path::{Path, PathBuf}; use indoc::indoc; -use sqlez::{domain::Domain, migrations::Migration}; +use sqlez::{connection::Connection, domain::Domain, migrations::Migration}; + +use super::Workspace; use self::model::{ Axis, GroupId, PaneId, SerializedItem, SerializedItemKind, SerializedPane, SerializedPaneGroup, SerializedWorkspace, WorkspaceId, }; -use super::Db; - // 1) Move all of this into Workspace crate // 2) Deserialize items fully // 3) Typed prepares (including how you expect to pull data out) @@ -70,23 +72,20 @@ pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new( "}], ); -#[derive(Clone)] -pub enum Workspace {} - impl Domain for Workspace { - fn migrate(conn: &sqlez::connection::Connection) -> anyhow::Result<()> { + fn migrate(conn: &Connection) -> anyhow::Result<()> { WORKSPACES_MIGRATION.run(&conn)?; PANE_MIGRATIONS.run(&conn)?; ITEM_MIGRATIONS.run(&conn) } } -impl Db { +impl Workspace { /// Returns a serialized workspace for the given worktree_roots. If the passed array /// is empty, the most recent workspace is returned instead. If no workspace for the /// passed roots is stored, returns none. pub fn workspace_for_roots>( - &self, + db: &Db, worktree_roots: &[P], ) -> Option { let workspace_id: WorkspaceId = worktree_roots.into(); @@ -95,12 +94,12 @@ impl Db { // and we've grabbed the most recent workspace let (workspace_id, dock_anchor, dock_visible) = iife!({ if worktree_roots.len() == 0 { - self.select_row(indoc! {" + db.select_row(indoc! {" SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1"})?()? } else { - self.select_row_bound(indoc! {" + db.select_row_bound(indoc! {" SELECT workspace_id, dock_anchor, dock_visible FROM workspaces WHERE workspace_id = ?"})?(&workspace_id)? @@ -111,12 +110,10 @@ impl Db { .flatten()?; Some(SerializedWorkspace { - dock_pane: self - .get_dock_pane(&workspace_id) + dock_pane: Workspace::get_dock_pane(&db, &workspace_id) .context("Getting dock pane") .log_err()?, - center_group: self - .get_center_pane_group(&workspace_id) + center_group: Workspace::get_center_pane_group(&db, &workspace_id) .context("Getting center group") .log_err()?, dock_anchor, @@ -127,32 +124,32 @@ impl Db { /// Saves a workspace using the worktree roots. Will garbage collect any workspaces /// that used this workspace previously pub fn save_workspace>( - &self, + db: &Db, worktree_roots: &[P], old_roots: Option<&[P]>, workspace: &SerializedWorkspace, ) { let workspace_id: WorkspaceId = worktree_roots.into(); - self.with_savepoint("update_worktrees", || { + db.with_savepoint("update_worktrees", || { if let Some(old_roots) = old_roots { let old_id: WorkspaceId = old_roots.into(); - self.exec_bound("DELETE FROM WORKSPACES WHERE workspace_id = ?")?(&old_id)?; + db.exec_bound("DELETE FROM WORKSPACES WHERE workspace_id = ?")?(&old_id)?; } // Delete any previous workspaces with the same roots. This cascades to all // other tables that are based on the same roots set. // Insert new workspace into workspaces table if none were found - self.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?; + db.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?; - self.exec_bound( + db.exec_bound( "INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)", )?((&workspace_id, workspace.dock_anchor, workspace.dock_visible))?; // Save center pane group and dock pane - self.save_pane_group(&workspace_id, &workspace.center_group, None)?; - self.save_pane(&workspace_id, &workspace.dock_pane, None)?; + Workspace::save_pane_group(db, &workspace_id, &workspace.center_group, None)?; + Workspace::save_pane(db, &workspace_id, &workspace.dock_pane, None)?; Ok(()) }) @@ -169,11 +166,11 @@ impl Db { } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(&self, limit: usize) -> Vec> { + pub fn recent_workspaces(conn: &Connection, limit: usize) -> Vec> { iife!({ // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html Ok::<_, anyhow::Error>( - self.select_bound::( + conn.select_bound::( "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", )?(limit)? .into_iter() @@ -186,21 +183,21 @@ impl Db { } pub(crate) fn get_center_pane_group( - &self, + db: &Db, workspace_id: &WorkspaceId, ) -> Result { - self.get_pane_group_children(workspace_id, None)? + Workspace::get_pane_group_children(&db, workspace_id, None)? .into_iter() .next() .context("No center pane group") } fn get_pane_group_children<'a>( - &self, + db: &Db, workspace_id: &WorkspaceId, group_id: Option, ) -> Result> { - self.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" + db.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" SELECT group_id, axis, pane_id FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id FROM pane_groups @@ -217,14 +214,15 @@ impl Db { if let Some((group_id, axis)) = group_id.zip(axis) { Ok(SerializedPaneGroup::Group { axis, - children: self.get_pane_group_children( + children: Workspace::get_pane_group_children( + db, workspace_id, Some(group_id), )?, }) } else if let Some(pane_id) = pane_id { Ok(SerializedPaneGroup::Pane(SerializedPane { - children: self.get_items(pane_id)?, + children: Workspace::get_items(db, pane_id)?, })) } else { bail!("Pane Group Child was neither a pane group or a pane"); @@ -234,7 +232,7 @@ impl Db { } pub(crate) fn save_pane_group( - &self, + db: &Db, workspace_id: &WorkspaceId, pane_group: &SerializedPaneGroup, parent: Option<(GroupId, usize)>, @@ -247,20 +245,28 @@ impl Db { match pane_group { SerializedPaneGroup::Group { axis, children } => { - let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? + let parent_id = db.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? ((workspace_id, parent_id, position, *axis))?; for (position, group) in children.iter().enumerate() { - self.save_pane_group(workspace_id, group, Some((parent_id, position)))? + Workspace::save_pane_group( + db, + workspace_id, + group, + Some((parent_id, position)), + )? } Ok(()) } - SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent), + SerializedPaneGroup::Pane(pane) => Workspace::save_pane(db, workspace_id, pane, parent), } } - pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { - let pane_id = self.select_row_bound(indoc! {" + pub(crate) fn get_dock_pane( + db: &Db, + workspace_id: &WorkspaceId, + ) -> Result { + let pane_id = db.select_row_bound(indoc! {" SELECT pane_id FROM panes WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?( workspace_id, @@ -268,28 +274,27 @@ impl Db { .context("No dock pane for workspace")?; Ok(SerializedPane::new( - self.get_items(pane_id).context("Reading items")?, + Workspace::get_items(db, pane_id).context("Reading items")?, )) } pub(crate) fn save_pane( - &self, + db: &Db, workspace_id: &WorkspaceId, pane: &SerializedPane, parent: Option<(GroupId, usize)>, ) -> Result<()> { let (parent_id, order) = unzip_option(parent); - let pane_id = self.insert_bound( + let pane_id = db.insert_bound( "INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)", )?((workspace_id, parent_id, order))?; - self.save_items(workspace_id, pane_id, &pane.children) - .context("Saving items") + Workspace::save_items(db, workspace_id, pane_id, &pane.children).context("Saving items") } - pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { - Ok(self.select_bound(indoc! {" + pub(crate) fn get_items(db: &Db, pane_id: PaneId) -> Result> { + Ok(db.select_bound(indoc! {" SELECT item_id, kind FROM items WHERE pane_id = ? ORDER BY position"})?(pane_id)? @@ -302,15 +307,15 @@ impl Db { } pub(crate) fn save_items( - &self, + db: &Db, workspace_id: &WorkspaceId, pane_id: PaneId, items: &[SerializedItem], ) -> Result<()> { - let mut delete_old = self + let mut delete_old = db .exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") .context("Preparing deletion")?; - let mut insert_new = self.exec_bound( + let mut insert_new = db.exec_bound( "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", ).context("Preparing insertion")?; for (position, item) in items.iter().enumerate() { @@ -324,17 +329,12 @@ impl Db { #[cfg(test)] mod tests { - use crate::{ - model::{ - DockAnchor::{Bottom, Expanded, Right}, - SerializedWorkspace, - }, - Db, - }; + use crate::workspace_db::model::DockAnchor::{Bottom, Expanded, Right}; + use crate::{Db, Workspace}; #[test] fn test_workspace_assignment() { - env_logger::try_init().ok(); + // env_logger::try_init().ok(); let db = Db::open_in_memory("test_basic_functionality"); @@ -359,61 +359,73 @@ mod tests { dock_pane: Default::default(), }; - db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_1); - db.save_workspace(&["/tmp"], None, &workspace_2); + Workspace::save_workspace(&db, &["/tmp", "/tmp2"], None, &workspace_1); + Workspace::save_workspace(&db, &["/tmp"], None, &workspace_2); db.write_to("test.db").unwrap(); // Test that paths are treated as a set assert_eq!( - db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(), workspace_1 ); assert_eq!( - db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(), + Workspace::workspace_for_roots(&db, &["/tmp2", "/tmp"]).unwrap(), workspace_1 ); // Make sure that other keys work - assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2); - assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None); + assert_eq!( + Workspace::workspace_for_roots(&db, &["/tmp"]).unwrap(), + workspace_2 + ); + assert_eq!( + Workspace::workspace_for_roots(&db, &["/tmp3", "/tmp2", "/tmp4"]), + None + ); // Test 'mutate' case of updating a pre-existing id - db.save_workspace(&["/tmp", "/tmp2"], Some(&["/tmp", "/tmp2"]), &workspace_2); + Workspace::save_workspace( + &db, + &["/tmp", "/tmp2"], + Some(&["/tmp", "/tmp2"]), + &workspace_2, + ); assert_eq!( - db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(), workspace_2 ); // Test other mechanism for mutating - db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_3); + Workspace::save_workspace(&db, &["/tmp", "/tmp2"], None, &workspace_3); assert_eq!( - db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(), workspace_3 ); // Make sure that updating paths differently also works - db.save_workspace( + Workspace::save_workspace( + &db, &["/tmp3", "/tmp4", "/tmp2"], Some(&["/tmp", "/tmp2"]), &workspace_3, ); - assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None); + assert_eq!(Workspace::workspace_for_roots(&db, &["/tmp2", "tmp"]), None); assert_eq!( - db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]) - .unwrap(), + Workspace::workspace_for_roots(&db, &["/tmp2", "/tmp3", "/tmp4"]).unwrap(), workspace_3 ); } - use crate::model::{SerializedItem, SerializedPane, SerializedPaneGroup}; + use crate::workspace_db::model::SerializedWorkspace; + use crate::workspace_db::model::{SerializedItem, SerializedPane, SerializedPaneGroup}; fn default_workspace( dock_pane: SerializedPane, center_group: &SerializedPaneGroup, ) -> SerializedWorkspace { SerializedWorkspace { - dock_anchor: crate::model::DockAnchor::Right, + dock_anchor: crate::workspace_db::model::DockAnchor::Right, dock_visible: false, center_group: center_group.clone(), dock_pane, @@ -422,11 +434,11 @@ mod tests { #[test] fn test_basic_dock_pane() { - env_logger::try_init().ok(); + // env_logger::try_init().ok(); let db = Db::open_in_memory("basic_dock_pane"); - let dock_pane = crate::model::SerializedPane { + let dock_pane = crate::workspace_db::model::SerializedPane { children: vec![ SerializedItem::Terminal { item_id: 1 }, SerializedItem::Terminal { item_id: 4 }, @@ -437,16 +449,16 @@ mod tests { let workspace = default_workspace(dock_pane, &Default::default()); - db.save_workspace(&["/tmp"], None, &workspace); + Workspace::save_workspace(&db, &["/tmp"], None, &workspace); - let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); + let new_workspace = Workspace::workspace_for_roots(&db, &["/tmp"]).unwrap(); assert_eq!(workspace.dock_pane, new_workspace.dock_pane); } #[test] fn test_simple_split() { - env_logger::try_init().ok(); + // env_logger::try_init().ok(); let db = Db::open_in_memory("simple_split"); @@ -456,10 +468,10 @@ mod tests { // | 3,4 | | // ----------------- let center_pane = SerializedPaneGroup::Group { - axis: crate::model::Axis::Horizontal, + axis: crate::workspace_db::model::Axis::Horizontal, children: vec![ SerializedPaneGroup::Group { - axis: crate::model::Axis::Vertical, + axis: crate::workspace_db::model::Axis::Vertical, children: vec![ SerializedPaneGroup::Pane(SerializedPane { children: vec![ @@ -486,7 +498,7 @@ mod tests { let workspace = default_workspace(Default::default(), ¢er_pane); - db.save_workspace(&["/tmp"], None, &workspace); + Workspace::save_workspace(&db, &["/tmp"], None, &workspace); assert_eq!(workspace.center_group, center_pane); } @@ -720,7 +732,7 @@ pub mod model { mod tests { use sqlez::connection::Connection; - use crate::model::DockAnchor; + use crate::workspace_db::model::DockAnchor; use super::WorkspaceId; diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 359648b7d7048c62f03d4c543f7bf1da286fd471..05f5b8871fb06c484d10d4365825d915429fffa0 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -170,7 +170,11 @@ fn main() { client::ZED_SERVER_URL.clone(), cx, ); - workspace::init(app_state.clone(), cx); + + let workspace_db = cx.global::>().open_as::(); + + workspace::init(app_state.clone(), cx, workspace_db); + journal::init(app_state.clone(), cx); theme_selector::init(app_state.clone(), cx); zed::init(&app_state, cx); diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index de785ca9783f3a6f637a434c744b42d4ccb6bf41..d6106d78e486dc0f386bd3fc159124303be4898c 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -630,7 +630,7 @@ mod tests { use gpui::{ executor::Deterministic, AssetSource, MutableAppContext, TestAppContext, ViewHandle, }; - use project::{Project, ProjectPath}; + use project::{Db, Project, ProjectPath}; use serde_json::json; use std::{ collections::HashSet, @@ -1817,7 +1817,7 @@ mod tests { state.initialize_workspace = initialize_workspace; state.build_window_options = build_window_options; call::init(app_state.client.clone(), app_state.user_store.clone(), cx); - workspace::init(app_state.clone(), cx); + workspace::init(app_state.clone(), cx, Db::open_in_memory("test")); editor::init(cx); pane::init(cx); app_state From 2a5565ca93bfa41879159cfad3e576744259b568 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 14 Nov 2022 11:25:11 -0800 Subject: [PATCH 46/86] WIP --- Cargo.lock | 2 +- crates/util/Cargo.toml | 1 + crates/util/src/lib.rs | 1 + crates/{zed => util}/src/paths.rs | 0 crates/zed/Cargo.toml | 1 - crates/zed/src/main.rs | 25 ++++++++++++------------- crates/zed/src/zed.rs | 3 +-- 7 files changed, 16 insertions(+), 17 deletions(-) rename crates/{zed => util}/src/paths.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 90482254742412a2221dbdd466e076c32d7cb4f7..74860439dd8d953f6a5c4f84cc9162ee20444bf0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6793,6 +6793,7 @@ version = "0.1.0" dependencies = [ "anyhow", "backtrace", + "dirs 3.0.2", "futures 0.3.25", "git2", "lazy_static", @@ -7707,7 +7708,6 @@ dependencies = [ "context_menu", "ctor", "diagnostics", - "dirs 3.0.2", "easy-parallel", "editor", "env_logger", diff --git a/crates/util/Cargo.toml b/crates/util/Cargo.toml index fc16eeb53c3751006917e181b2f4ae184d9d2940..0a0bacf53c0b5dc75ad59091c55319ca06f1c654 100644 --- a/crates/util/Cargo.toml +++ b/crates/util/Cargo.toml @@ -19,6 +19,7 @@ rand = { workspace = true } tempdir = { version = "0.3.7", optional = true } serde_json = { version = "1.0", features = ["preserve_order"], optional = true } git2 = { version = "0.15", default-features = false, optional = true } +dirs = "3.0" [dev-dependencies] diff --git a/crates/util/src/lib.rs b/crates/util/src/lib.rs index 19d17c1190a8e73851d9a7c00bc60e411b58d360..b03bc21210f2d6f9f1685367d7b9a91a2b6d3904 100644 --- a/crates/util/src/lib.rs +++ b/crates/util/src/lib.rs @@ -1,3 +1,4 @@ +pub mod paths; #[cfg(any(test, feature = "test-support"))] pub mod test; diff --git a/crates/zed/src/paths.rs b/crates/util/src/paths.rs similarity index 100% rename from crates/zed/src/paths.rs rename to crates/util/src/paths.rs diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index a3023918e378fbc8845ab2a6dcc7d5b22fbcdd8f..7fef0aafcf8adf7b1e57513f3f0e52aad2d9cca5 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -62,7 +62,6 @@ async-trait = "0.1" backtrace = "0.3" chrono = "0.4" ctor = "0.1.20" -dirs = "3.0" easy-parallel = "3.1.0" env_logger = "0.9" futures = "0.3" diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 05f5b8871fb06c484d10d4365825d915429fffa0..6e7aaba3c6e064f924814de4f3bf8d941d228241 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -37,7 +37,7 @@ use terminal::terminal_container_view::{get_working_directory, TerminalContainer use fs::RealFs; use settings::watched_json::{watch_keymap_file, watch_settings_file, WatchedJsonFile}; use theme::ThemeRegistry; -use util::{ResultExt, TryFutureExt}; +use util::{paths, ResultExt, TryFutureExt}; use workspace::{self, AppState, ItemHandle, NewFile, OpenPaths, Workspace}; use zed::{ self, build_window_options, initialize_workspace, languages, menus, RELEASE_CHANNEL, @@ -57,7 +57,7 @@ fn main() { init_panic_hook(app_version, http.clone(), app.background()); let db = app.background().spawn(async move { - project::Db::::open(&*zed::paths::DB_DIR, RELEASE_CHANNEL_NAME.as_str()) + project::Db::::open(&*paths::DB_DIR, RELEASE_CHANNEL_NAME.as_str()) }); load_embedded_fonts(&app); @@ -91,11 +91,11 @@ fn main() { app.run(move |cx| { cx.set_global(*RELEASE_CHANNEL); - cx.set_global(HomeDir(zed::paths::HOME.to_path_buf())); + cx.set_global(HomeDir(paths::HOME.to_path_buf())); let client = client::Client::new(http.clone(), cx); let mut languages = LanguageRegistry::new(login_shell_env_loaded); - languages.set_language_server_download_dir(zed::paths::LANGUAGES_DIR.clone()); + languages.set_language_server_download_dir(paths::LANGUAGES_DIR.clone()); let languages = Arc::new(languages); let init_languages = cx .background() @@ -106,7 +106,7 @@ fn main() { //Setup settings global before binding actions cx.set_global(SettingsFile::new( - &*zed::paths::SETTINGS, + &*paths::SETTINGS, settings_file_content.clone(), fs.clone(), )); @@ -236,16 +236,15 @@ fn init_logger() { const KIB: u64 = 1024; const MIB: u64 = 1024 * KIB; const MAX_LOG_BYTES: u64 = MIB; - if std::fs::metadata(&*zed::paths::LOG) - .map_or(false, |metadata| metadata.len() > MAX_LOG_BYTES) + if std::fs::metadata(&*paths::LOG).map_or(false, |metadata| metadata.len() > MAX_LOG_BYTES) { - let _ = std::fs::rename(&*zed::paths::LOG, &*zed::paths::OLD_LOG); + let _ = std::fs::rename(&*paths::LOG, &*paths::OLD_LOG); } let log_file = OpenOptions::new() .create(true) .append(true) - .open(&*zed::paths::LOG) + .open(&*paths::LOG) .expect("could not open logfile"); simplelog::WriteLogger::init(level, simplelog::Config::default(), log_file) .expect("could not initialize logger"); @@ -257,7 +256,7 @@ fn init_panic_hook(app_version: String, http: Arc, background: A .spawn({ async move { let panic_report_url = format!("{}/api/panic", &*client::ZED_SERVER_URL); - let mut children = smol::fs::read_dir(&*zed::paths::LOGS_DIR).await?; + let mut children = smol::fs::read_dir(&*paths::LOGS_DIR).await?; while let Some(child) = children.next().await { let child = child?; let child_path = child.path(); @@ -345,7 +344,7 @@ fn init_panic_hook(app_version: String, http: Arc, background: A let panic_filename = chrono::Utc::now().format("%Y_%m_%d %H_%M_%S").to_string(); std::fs::write( - zed::paths::LOGS_DIR.join(format!("zed-{}-{}.panic", app_version, panic_filename)), + paths::LOGS_DIR.join(format!("zed-{}-{}.panic", app_version, panic_filename)), &message, ) .context("error writing panic to disk") @@ -479,8 +478,8 @@ fn load_config_files( .clone() .spawn(async move { let settings_file = - WatchedJsonFile::new(fs.clone(), &executor, zed::paths::SETTINGS.clone()).await; - let keymap_file = WatchedJsonFile::new(fs, &executor, zed::paths::KEYMAP.clone()).await; + WatchedJsonFile::new(fs.clone(), &executor, paths::SETTINGS.clone()).await; + let keymap_file = WatchedJsonFile::new(fs, &executor, paths::KEYMAP.clone()).await; tx.send((settings_file, keymap_file)).ok() }) .detach(); diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index d6106d78e486dc0f386bd3fc159124303be4898c..a8ec71bd4b5fa918ddb568c6350471b0be5c8756 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -1,7 +1,6 @@ mod feedback; pub mod languages; pub mod menus; -pub mod paths; #[cfg(any(test, feature = "test-support"))] pub mod test; @@ -31,7 +30,7 @@ use serde::Deserialize; use serde_json::to_string_pretty; use settings::{keymap_file_json_schema, settings_file_json_schema, ReleaseChannel, Settings}; use std::{env, path::Path, str, sync::Arc}; -use util::ResultExt; +use util::{paths, ResultExt}; pub use workspace; use workspace::{sidebar::SidebarSide, AppState, Workspace}; From 479816111815c5eaadea28a3b027cd6a9596018b Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 14 Nov 2022 13:18:44 -0800 Subject: [PATCH 47/86] Distributed database pattern built. Co-Authored-By: kay@zed.dev --- Cargo.lock | 4 +- crates/auto_update/src/auto_update.rs | 29 +- crates/auto_update/src/update_notification.rs | 3 +- crates/client/src/client.rs | 7 +- crates/client/src/telemetry.rs | 22 +- crates/collab/Cargo.toml | 1 - crates/collab/src/integration_tests.rs | 11 +- crates/command_palette/src/command_palette.rs | 4 +- crates/db/src/db.rs | 65 +- crates/db/src/kvp.rs | 31 +- crates/gpui/Cargo.toml | 1 + .../bindings/node/binding.cc | 12 +- crates/gpui/src/presenter.rs | 30 + crates/project/src/project.rs | 1 - crates/settings/Cargo.toml | 1 + crates/settings/src/settings.rs | 51 +- crates/sqlez/src/statement.rs | 4 +- crates/sqlez/src/thread_safe_connection.rs | 3 + crates/util/src/channel.rs | 32 + crates/util/src/lib.rs | 1 + crates/workspace/Cargo.toml | 1 + crates/workspace/src/persistence.rs | 494 +++++++++++ crates/workspace/src/persistence/model.rs | 188 +++++ crates/workspace/src/workspace.rs | 17 +- crates/workspace/src/workspace_db.rs | 765 ------------------ crates/workspace/test.db | Bin 0 -> 32768 bytes crates/zed/src/main.rs | 28 +- crates/zed/src/zed.rs | 22 +- 28 files changed, 892 insertions(+), 936 deletions(-) create mode 100644 crates/util/src/channel.rs create mode 100644 crates/workspace/src/persistence.rs create mode 100644 crates/workspace/src/persistence/model.rs delete mode 100644 crates/workspace/src/workspace_db.rs create mode 100644 crates/workspace/test.db diff --git a/Cargo.lock b/Cargo.lock index 74860439dd8d953f6a5c4f84cc9162ee20444bf0..bad036a05d4883b8f3fb7f91d6cc12c0bc2fdb17 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1040,7 +1040,6 @@ dependencies = [ "client", "collections", "ctor", - "db", "editor", "env_logger", "envy", @@ -2428,6 +2427,7 @@ dependencies = [ "simplelog", "smallvec", "smol", + "sqlez", "sum_tree", "time 0.3.17", "tiny-skia", @@ -5307,6 +5307,7 @@ dependencies = [ "serde", "serde_json", "serde_path_to_error", + "sqlez", "theme", "toml", "tree-sitter", @@ -7633,6 +7634,7 @@ dependencies = [ "gpui", "indoc", "language", + "lazy_static", "log", "menu", "parking_lot 0.11.2", diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index d6eaaab82619fe9bfb6bac221019a0a61cd77d16..2a8d2fcf05bb650f31581baa384d5f6e9860941a 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -2,17 +2,17 @@ mod update_notification; use anyhow::{anyhow, Context, Result}; use client::{http::HttpClient, ZED_SECRET_CLIENT_TOKEN}; -use db::{kvp::KeyValue, Db}; +use db::kvp::KEY_VALUE_STORE; use gpui::{ actions, platform::AppVersion, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, MutableAppContext, Task, WeakViewHandle, }; use lazy_static::lazy_static; use serde::Deserialize; -use settings::ReleaseChannel; use smol::{fs::File, io::AsyncReadExt, process::Command}; use std::{env, ffi::OsString, path::PathBuf, sync::Arc, time::Duration}; use update_notification::UpdateNotification; +use util::channel::ReleaseChannel; use workspace::Workspace; const SHOULD_SHOW_UPDATE_NOTIFICATION_KEY: &str = "auto-updater-should-show-updated-notification"; @@ -42,7 +42,6 @@ pub struct AutoUpdater { current_version: AppVersion, http_client: Arc, pending_poll: Option>, - db: project::Db, server_url: String, } @@ -56,16 +55,11 @@ impl Entity for AutoUpdater { type Event = (); } -pub fn init( - db: Db, - http_client: Arc, - server_url: String, - cx: &mut MutableAppContext, -) { +pub fn init(http_client: Arc, server_url: String, cx: &mut MutableAppContext) { if let Some(version) = (*ZED_APP_VERSION).or_else(|| cx.platform().app_version().ok()) { let server_url = server_url; let auto_updater = cx.add_model(|cx| { - let updater = AutoUpdater::new(version, db, http_client, server_url.clone()); + let updater = AutoUpdater::new(version, http_client, server_url.clone()); updater.start_polling(cx).detach(); updater }); @@ -126,14 +120,12 @@ impl AutoUpdater { fn new( current_version: AppVersion, - db: project::Db, http_client: Arc, server_url: String, ) -> Self { Self { status: AutoUpdateStatus::Idle, current_version, - db, http_client, server_url, pending_poll: None, @@ -303,20 +295,21 @@ impl AutoUpdater { should_show: bool, cx: &AppContext, ) -> Task> { - let db = self.db.clone(); cx.background().spawn(async move { if should_show { - db.write_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY, "")?; + KEY_VALUE_STORE.write_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY, "")?; } else { - db.delete_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?; + KEY_VALUE_STORE.delete_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?; } Ok(()) }) } fn should_show_update_notification(&self, cx: &AppContext) -> Task> { - let db = self.db.clone(); - cx.background() - .spawn(async move { Ok(db.read_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?.is_some()) }) + cx.background().spawn(async move { + Ok(KEY_VALUE_STORE + .read_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)? + .is_some()) + }) } } diff --git a/crates/auto_update/src/update_notification.rs b/crates/auto_update/src/update_notification.rs index 133a197f153909b86a7981514aab2aee682dc7ca..9963ae65b80f3dec9677aac88e7ac888f23d980e 100644 --- a/crates/auto_update/src/update_notification.rs +++ b/crates/auto_update/src/update_notification.rs @@ -5,7 +5,8 @@ use gpui::{ Element, Entity, MouseButton, View, ViewContext, }; use menu::Cancel; -use settings::{ReleaseChannel, Settings}; +use settings::Settings; +use util::channel::ReleaseChannel; use workspace::Notification; pub struct UpdateNotification { diff --git a/crates/client/src/client.rs b/crates/client/src/client.rs index 907f7e80f1ad8f5178b3970ac41a9089fb9e9984..f9b3a88545e1a611fe611e14a86e55c07a6be371 100644 --- a/crates/client/src/client.rs +++ b/crates/client/src/client.rs @@ -11,7 +11,6 @@ use async_tungstenite::tungstenite::{ error::Error as WebsocketError, http::{Request, StatusCode}, }; -use db::{kvp::KeyValue, Db}; use futures::{future::LocalBoxFuture, AsyncReadExt, FutureExt, SinkExt, StreamExt, TryStreamExt}; use gpui::{ actions, @@ -27,7 +26,6 @@ use postage::watch; use rand::prelude::*; use rpc::proto::{AnyTypedEnvelope, EntityMessage, EnvelopedMessage, RequestMessage}; use serde::Deserialize; -use settings::ReleaseChannel; use std::{ any::TypeId, collections::HashMap, @@ -41,6 +39,7 @@ use std::{ use telemetry::Telemetry; use thiserror::Error; use url::Url; +use util::channel::ReleaseChannel; use util::{ResultExt, TryFutureExt}; pub use rpc::*; @@ -1218,8 +1217,8 @@ impl Client { self.peer.respond_with_error(receipt, error) } - pub fn start_telemetry(&self, db: Db) { - self.telemetry.start(db.clone()); + pub fn start_telemetry(&self) { + self.telemetry.start(); } pub fn report_event(&self, kind: &str, properties: Value) { diff --git a/crates/client/src/telemetry.rs b/crates/client/src/telemetry.rs index 16a7c1cc82e461413c59f9e337381d2f8e0c2bf1..0ce1a07f1b2fd231315c18a999670391405c1fad 100644 --- a/crates/client/src/telemetry.rs +++ b/crates/client/src/telemetry.rs @@ -1,5 +1,5 @@ use crate::http::HttpClient; -use db::{kvp::KeyValue, Db}; +use db::kvp::KEY_VALUE_STORE; use gpui::{ executor::Background, serde_json::{self, value::Map, Value}, @@ -10,7 +10,6 @@ use lazy_static::lazy_static; use parking_lot::Mutex; use serde::Serialize; use serde_json::json; -use settings::ReleaseChannel; use std::{ io::Write, mem, @@ -19,7 +18,7 @@ use std::{ time::{Duration, SystemTime, UNIX_EPOCH}, }; use tempfile::NamedTempFile; -use util::{post_inc, ResultExt, TryFutureExt}; +use util::{channel::ReleaseChannel, post_inc, ResultExt, TryFutureExt}; use uuid::Uuid; pub struct Telemetry { @@ -148,18 +147,19 @@ impl Telemetry { Some(self.state.lock().log_file.as_ref()?.path().to_path_buf()) } - pub fn start(self: &Arc, db: Db) { + pub fn start(self: &Arc) { let this = self.clone(); self.executor .spawn( async move { - let device_id = if let Ok(Some(device_id)) = db.read_kvp("device_id") { - device_id - } else { - let device_id = Uuid::new_v4().to_string(); - db.write_kvp("device_id", &device_id)?; - device_id - }; + let device_id = + if let Ok(Some(device_id)) = KEY_VALUE_STORE.read_kvp("device_id") { + device_id + } else { + let device_id = Uuid::new_v4().to_string(); + KEY_VALUE_STORE.write_kvp("device_id", &device_id)?; + device_id + }; let device_id: Arc = device_id.into(); let mut state = this.state.lock(); diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index 1722d3374a4f223e43aa3d2331a224f4327c61ba..09f379526eec23d44f2057e48b2fb7d7b27e2d17 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -18,7 +18,6 @@ live_kit_server = { path = "../live_kit_server" } rpc = { path = "../rpc" } util = { path = "../util" } -db = { path = "../db" } anyhow = "1.0.40" async-trait = "0.1.50" async-tungstenite = "0.16" diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index bfc14618eab756d15a790b6976a186ce5d7e5d26..ade4e102806c70252fbae4b7478019d22bcb02ee 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1,6 +1,6 @@ use crate::{ - db::{Db, NewUserParams, ProjectId, UserId}, - rpc::{Executor, Server}, + db::{NewUserParams, ProjectId, TestDb, UserId}, + rpc::{Executor, Server, Store}, AppState, }; @@ -12,7 +12,6 @@ use client::{ User, UserStore, RECEIVE_TIMEOUT, }; use collections::{BTreeMap, HashMap, HashSet}; -use db as SqliteDb; use editor::{ self, ConfirmCodeAction, ConfirmCompletion, ConfirmRename, Editor, Redo, Rename, ToOffset, ToggleCodeActions, Undo, @@ -5838,11 +5837,7 @@ impl TestServer { Project::init(&client); cx.update(|cx| { - workspace::init( - app_state.clone(), - cx, - SqliteDb::open_in_memory("integration tests"), - ); + workspace::init(app_state.clone(), cx); call::init(client.clone(), user_store.clone(), cx); }); diff --git a/crates/command_palette/src/command_palette.rs b/crates/command_palette/src/command_palette.rs index f2542c9bc89a509721ee648de2653ed92a69b5a3..5af23b45d720ecfad4ed9faa3dd777d1238f2022 100644 --- a/crates/command_palette/src/command_palette.rs +++ b/crates/command_palette/src/command_palette.rs @@ -320,7 +320,7 @@ mod tests { use super::*; use editor::Editor; use gpui::TestAppContext; - use project::{Db, Project}; + use project::Project; use workspace::{AppState, Workspace}; #[test] @@ -345,7 +345,7 @@ mod tests { cx.update(|cx| { editor::init(cx); - workspace::init(app_state.clone(), cx, Db::open_in_memory("test")); + workspace::init(app_state.clone(), cx); init(cx); }); diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 02fc51ee8d40c73592632ea708271942a8b491ae..56fc79f475b2f3bf64caf755189949804a6a41a6 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,11 +1,12 @@ pub mod kvp; use std::fs; -use std::ops::Deref; use std::path::Path; +#[cfg(any(test, feature = "test-support"))] use anyhow::Result; use indoc::indoc; +#[cfg(any(test, feature = "test-support"))] use sqlez::connection::Connection; use sqlez::domain::Domain; use sqlez::thread_safe_connection::ThreadSafeConnection; @@ -17,47 +18,29 @@ const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA case_sensitive_like=TRUE; "}; -#[derive(Clone)] -pub struct Db(ThreadSafeConnection); - -impl Deref for Db { - type Target = sqlez::connection::Connection; - - fn deref(&self) -> &Self::Target { - &self.0.deref() - } +/// Open or create a database at the given directory path. +pub fn open_file_db() -> ThreadSafeConnection { + // Use 0 for now. Will implement incrementing and clearing of old db files soon TM + let current_db_dir = (*util::paths::DB_DIR).join(Path::new(&format!( + "0-{}", + *util::channel::RELEASE_CHANNEL_NAME + ))); + fs::create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); + let db_path = current_db_dir.join(Path::new("db.sqlite")); + + ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) + .with_initialize_query(INITIALIZE_QUERY) } -impl Db { - /// Open or create a database at the given directory path. - pub fn open(db_dir: &Path, channel: &'static str) -> Self { - // Use 0 for now. Will implement incrementing and clearing of old db files soon TM - let current_db_dir = db_dir.join(Path::new(&format!("0-{}", channel))); - fs::create_dir_all(¤t_db_dir) - .expect("Should be able to create the database directory"); - let db_path = current_db_dir.join(Path::new("db.sqlite")); - - Db( - ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) - .with_initialize_query(INITIALIZE_QUERY), - ) - } - - /// Open a in memory database for testing and as a fallback. - pub fn open_in_memory(db_name: &str) -> Self { - Db(ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY)) - } - - pub fn persisting(&self) -> bool { - self.persistent() - } - - pub fn write_to>(&self, dest: P) -> Result<()> { - let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref()); - self.backup_main(&destination) - } +pub fn open_memory_db(db_name: &str) -> ThreadSafeConnection { + ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY) +} - pub fn open_as(&self) -> Db { - Db(self.0.for_domain()) - } +#[cfg(any(test, feature = "test-support"))] +pub fn write_db_to>( + conn: &ThreadSafeConnection, + dest: P, +) -> Result<()> { + let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref()); + conn.backup_main(&destination) } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index c5c9c1c5b5e50782d9c8f9fc1c2249efa7ad7dee..1dd1cf69b7a626ec68dabfc7d08508f9e581099c 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,7 +1,11 @@ -use super::Db; use anyhow::Result; use indoc::indoc; -use sqlez::{connection::Connection, domain::Domain, migrations::Migration}; + +use sqlez::{ + connection::Connection, domain::Domain, migrations::Migration, + thread_safe_connection::ThreadSafeConnection, +}; +use std::ops::Deref; pub(crate) const KVP_MIGRATION: Migration = Migration::new( "kvp", @@ -13,16 +17,29 @@ pub(crate) const KVP_MIGRATION: Migration = Migration::new( "}], ); +lazy_static::lazy_static! { + pub static ref KEY_VALUE_STORE: KeyValueStore = + KeyValueStore(crate::open_file_db()); +} + #[derive(Clone)] -pub enum KeyValue {} +pub struct KeyValueStore(ThreadSafeConnection); -impl Domain for KeyValue { +impl Domain for KeyValueStore { fn migrate(conn: &Connection) -> anyhow::Result<()> { KVP_MIGRATION.run(conn) } } -impl Db { +impl Deref for KeyValueStore { + type Target = ThreadSafeConnection; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl KeyValueStore { pub fn read_kvp(&self, key: &str) -> Result> { self.select_row_bound("SELECT value FROM kv_store WHERE key = (?)")?(key) } @@ -44,11 +61,11 @@ impl Db { mod tests { use anyhow::Result; - use super::*; + use crate::kvp::KeyValueStore; #[test] fn test_kvp() -> Result<()> { - let db = Db::open_in_memory("test_kvp"); + let db = KeyValueStore(crate::open_memory_db("test_kvp")); assert_eq!(db.read_kvp("key-1").unwrap(), None); diff --git a/crates/gpui/Cargo.toml b/crates/gpui/Cargo.toml index 54fe5e46a2f9068d5bb13b74435d749d9600b425..683e3bdfcd05f7794f326cafd1b20baea8462e99 100644 --- a/crates/gpui/Cargo.toml +++ b/crates/gpui/Cargo.toml @@ -17,6 +17,7 @@ collections = { path = "../collections" } gpui_macros = { path = "../gpui_macros" } util = { path = "../util" } sum_tree = { path = "../sum_tree" } +sqlez = { path = "../sqlez" } async-task = "4.0.3" backtrace = { version = "0.3", optional = true } ctor = "0.1" diff --git a/crates/gpui/grammars/context-predicate/bindings/node/binding.cc b/crates/gpui/grammars/context-predicate/bindings/node/binding.cc index 9a3df4b02803da5b13253c55325dbd9f4a05b9b7..1264f491009a4cbf9bc11e3d31c311ca0e653fd8 100644 --- a/crates/gpui/grammars/context-predicate/bindings/node/binding.cc +++ b/crates/gpui/grammars/context-predicate/bindings/node/binding.cc @@ -1,10 +1,10 @@ +#include "nan.h" #include "tree_sitter/parser.h" #include -#include "nan.h" using namespace v8; -extern "C" TSLanguage * tree_sitter_context_predicate(); +extern "C" TSLanguage *tree_sitter_context_predicate(); namespace { @@ -16,13 +16,15 @@ void Init(Local exports, Local module) { tpl->InstanceTemplate()->SetInternalFieldCount(1); Local constructor = Nan::GetFunction(tpl).ToLocalChecked(); - Local instance = constructor->NewInstance(Nan::GetCurrentContext()).ToLocalChecked(); + Local instance = + constructor->NewInstance(Nan::GetCurrentContext()).ToLocalChecked(); Nan::SetInternalFieldPointer(instance, 0, tree_sitter_context_predicate()); - Nan::Set(instance, Nan::New("name").ToLocalChecked(), Nan::New("context_predicate").ToLocalChecked()); + Nan::Set(instance, Nan::New("name").ToLocalChecked(), + Nan::New("context_predicate").ToLocalChecked()); Nan::Set(module, Nan::New("exports").ToLocalChecked(), instance); } NODE_MODULE(tree_sitter_context_predicate_binding, Init) -} // namespace +} // namespace diff --git a/crates/gpui/src/presenter.rs b/crates/gpui/src/presenter.rs index 27cd2a1347cffd2e192f34885cf17a112fcddd3f..eb7554a39cada1790dc856b1bd09a79829335d1a 100644 --- a/crates/gpui/src/presenter.rs +++ b/crates/gpui/src/presenter.rs @@ -17,10 +17,15 @@ use crate::{ SceneBuilder, UpgradeModelHandle, UpgradeViewHandle, View, ViewHandle, WeakModelHandle, WeakViewHandle, }; +use anyhow::bail; use collections::{HashMap, HashSet}; use pathfinder_geometry::vector::{vec2f, Vector2F}; use serde_json::json; use smallvec::SmallVec; +use sqlez::{ + bindable::{Bind, Column}, + statement::Statement, +}; use std::{ marker::PhantomData, ops::{Deref, DerefMut, Range}, @@ -895,6 +900,31 @@ impl ToJson for Axis { } } +impl Bind for Axis { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + Axis::Horizontal => "Horizontal", + Axis::Vertical => "Vertical", + } + .bind(statement, start_index) + } +} + +impl Column for Axis { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(axis_text, next_index)| { + Ok(( + match axis_text.as_str() { + "Horizontal" => Axis::Horizontal, + "Vertical" => Axis::Vertical, + _ => bail!("Stored serialized item kind is incorrect"), + }, + next_index, + )) + }) + } +} + pub trait Vector2FExt { fn along(self, axis: Axis) -> f32; } diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index d1d8c96ce2d2e970cdb96f564680b05e064b5aab..94558fee3e27239c8c8957f2f6a25bd58307f420 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -63,7 +63,6 @@ use std::{ use thiserror::Error; use util::{defer, post_inc, ResultExt, TryFutureExt as _}; -pub use db::{kvp::KeyValue, Db}; pub use fs::*; pub use worktree::*; diff --git a/crates/settings/Cargo.toml b/crates/settings/Cargo.toml index ad184ad313fa134bcbbfcfdbf0fb550c014e1d82..a292358e7501bc2ca8e820d7c5696997bfe5993b 100644 --- a/crates/settings/Cargo.toml +++ b/crates/settings/Cargo.toml @@ -14,6 +14,7 @@ test-support = [] assets = { path = "../assets" } collections = { path = "../collections" } gpui = { path = "../gpui" } +sqlez = { path = "../sqlez" } fs = { path = "../fs" } anyhow = "1.0.38" futures = "0.3" diff --git a/crates/settings/src/settings.rs b/crates/settings/src/settings.rs index cb83c2c37086ecc9cda3b1b765f29739a4795cea..5137751579e90fc52aa8efbd0f049975fd79cc54 100644 --- a/crates/settings/src/settings.rs +++ b/crates/settings/src/settings.rs @@ -2,7 +2,7 @@ mod keymap_file; pub mod settings_file; pub mod watched_json; -use anyhow::Result; +use anyhow::{bail, Result}; use gpui::{ font_cache::{FamilyId, FontCache}, AssetSource, @@ -14,6 +14,10 @@ use schemars::{ }; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde_json::Value; +use sqlez::{ + bindable::{Bind, Column}, + statement::Statement, +}; use std::{collections::HashMap, fmt::Write as _, num::NonZeroU32, str, sync::Arc}; use theme::{Theme, ThemeRegistry}; use tree_sitter::Query; @@ -55,24 +59,6 @@ pub struct FeatureFlags { pub experimental_themes: bool, } -#[derive(Copy, Clone, PartialEq, Eq, Default)] -pub enum ReleaseChannel { - #[default] - Dev, - Preview, - Stable, -} - -impl ReleaseChannel { - pub fn name(&self) -> &'static str { - match self { - ReleaseChannel::Dev => "Zed Dev", - ReleaseChannel::Preview => "Zed Preview", - ReleaseChannel::Stable => "Zed", - } - } -} - impl FeatureFlags { pub fn keymap_files(&self) -> Vec<&'static str> { vec![] @@ -244,6 +230,33 @@ pub enum DockAnchor { Expanded, } +impl Bind for DockAnchor { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + DockAnchor::Bottom => "Bottom", + DockAnchor::Right => "Right", + DockAnchor::Expanded => "Expanded", + } + .bind(statement, start_index) + } +} + +impl Column for DockAnchor { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(anchor_text, next_index)| { + Ok(( + match anchor_text.as_ref() { + "Bottom" => DockAnchor::Bottom, + "Right" => DockAnchor::Right, + "Expanded" => DockAnchor::Expanded, + _ => bail!("Stored dock anchor is incorrect"), + }, + next_index, + )) + }) + } +} + #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema)] pub struct SettingsFileContent { pub experiments: Option, diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index e0b284e62829bd26c4f1a976e491594cb69ddccd..b04f5bb82ffe196e8a0440a7cbd56698908c9a83 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -45,8 +45,8 @@ impl<'a> Statement<'a> { let sql = CString::new(query.as_ref())?; let mut remaining_sql = sql.as_c_str(); while { - let remaining_sql_str = remaining_sql.to_str()?; - remaining_sql_str.trim() != ";" && !remaining_sql_str.is_empty() + let remaining_sql_str = remaining_sql.to_str()?.trim(); + remaining_sql_str != ";" && !remaining_sql_str.is_empty() } { let mut raw_statement = 0 as *mut sqlite3_stmt; let mut remaining_sql_ptr = ptr::null(); diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 1081101f6a9ec32cd3a66a93a5812b1884c00076..b9bb1657ea2491217d624e1982219343b37d1689 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -13,6 +13,9 @@ pub struct ThreadSafeConnection { _pd: PhantomData, } +unsafe impl Send for ThreadSafeConnection {} +unsafe impl Sync for ThreadSafeConnection {} + impl ThreadSafeConnection { pub fn new(uri: &str, persistent: bool) -> Self { Self { diff --git a/crates/util/src/channel.rs b/crates/util/src/channel.rs new file mode 100644 index 0000000000000000000000000000000000000000..ab5b53b4ab47a3b6b62fec513fb41b8c8631632b --- /dev/null +++ b/crates/util/src/channel.rs @@ -0,0 +1,32 @@ +use std::env; + +use lazy_static::lazy_static; + +lazy_static! { + pub static ref RELEASE_CHANNEL_NAME: String = env::var("ZED_RELEASE_CHANNEL") + .unwrap_or(include_str!("../../zed/RELEASE_CHANNEL").to_string()); + pub static ref RELEASE_CHANNEL: ReleaseChannel = match RELEASE_CHANNEL_NAME.as_str() { + "dev" => ReleaseChannel::Dev, + "preview" => ReleaseChannel::Preview, + "stable" => ReleaseChannel::Stable, + _ => panic!("invalid release channel {}", *RELEASE_CHANNEL_NAME), + }; +} + +#[derive(Copy, Clone, PartialEq, Eq, Default)] +pub enum ReleaseChannel { + #[default] + Dev, + Preview, + Stable, +} + +impl ReleaseChannel { + pub fn name(&self) -> &'static str { + match self { + ReleaseChannel::Dev => "Zed Dev", + ReleaseChannel::Preview => "Zed Preview", + ReleaseChannel::Stable => "Zed", + } + } +} diff --git a/crates/util/src/lib.rs b/crates/util/src/lib.rs index b03bc21210f2d6f9f1685367d7b9a91a2b6d3904..78536f01d07fc9656e4246b33bc186d0412adf22 100644 --- a/crates/util/src/lib.rs +++ b/crates/util/src/lib.rs @@ -1,3 +1,4 @@ +pub mod channel; pub mod paths; #[cfg(any(test, feature = "test-support"))] pub mod test; diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index f8bcba5eb7b5041975801eaa38cf5368a221f745..553479b175dcad89af41d438bf86e614cef2d486 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -36,6 +36,7 @@ util = { path = "../util" } bincode = "1.2.1" anyhow = "1.0.38" futures = "0.3" +lazy_static = "1.4" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" postage = { version = "0.4.1", features = ["futures-traits"] } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs new file mode 100644 index 0000000000000000000000000000000000000000..8a80dc5a76fb20a737c84c14a8c811baded6bd33 --- /dev/null +++ b/crates/workspace/src/persistence.rs @@ -0,0 +1,494 @@ +#![allow(dead_code)] + +pub mod model; + +use std::ops::Deref; +use std::path::{Path, PathBuf}; + +use anyhow::{bail, Context, Result}; +use db::open_file_db; +use gpui::Axis; +use indoc::indoc; +use lazy_static::lazy_static; + +use sqlez::thread_safe_connection::ThreadSafeConnection; +use sqlez::{connection::Connection, domain::Domain, migrations::Migration}; +use util::{iife, unzip_option, ResultExt}; + +use super::Workspace; + +use model::{ + GroupId, PaneId, SerializedItem, SerializedItemKind, SerializedPane, SerializedPaneGroup, + SerializedWorkspace, WorkspaceId, +}; + +lazy_static! { + pub static ref DB: WorkspaceDb = WorkspaceDb(open_file_db()); +} + +pub struct WorkspaceDb(ThreadSafeConnection); + +impl Deref for WorkspaceDb { + type Target = ThreadSafeConnection; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( + "workspace", + &[indoc! {" + CREATE TABLE workspaces( + workspace_id BLOB PRIMARY KEY, + dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' + dock_visible INTEGER, -- Boolean + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL + ) STRICT; + + CREATE TABLE pane_groups( + group_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + parent_group_id INTEGER, -- NULL indicates that this is a root node + position INTEGER, -- NULL indicates that this is a root node + axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + + CREATE TABLE panes( + pane_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + parent_group_id INTEGER, -- NULL, this is a dock pane + position INTEGER, -- NULL, this is a dock pane + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + + CREATE TABLE items( + item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique + workspace_id BLOB NOT NULL, + pane_id INTEGER NOT NULL, + kind TEXT NOT NULL, + position INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE + PRIMARY KEY(item_id, workspace_id) + ) STRICT; + "}], +); + +impl Domain for Workspace { + fn migrate(conn: &Connection) -> anyhow::Result<()> { + WORKSPACES_MIGRATION.run(&conn) + } +} + +impl WorkspaceDb { + /// Returns a serialized workspace for the given worktree_roots. If the passed array + /// is empty, the most recent workspace is returned instead. If no workspace for the + /// passed roots is stored, returns none. + pub fn workspace_for_roots>( + &self, + worktree_roots: &[P], + ) -> Option { + let workspace_id: WorkspaceId = worktree_roots.into(); + + // Note that we re-assign the workspace_id here in case it's empty + // and we've grabbed the most recent workspace + let (workspace_id, dock_anchor, dock_visible) = iife!({ + if worktree_roots.len() == 0 { + self.select_row(indoc! {" + SELECT workspace_id, dock_anchor, dock_visible + FROM workspaces + ORDER BY timestamp DESC LIMIT 1"})?()? + } else { + self.select_row_bound(indoc! {" + SELECT workspace_id, dock_anchor, dock_visible + FROM workspaces + WHERE workspace_id = ?"})?(&workspace_id)? + } + .context("No workspaces found") + }) + .warn_on_err() + .flatten()?; + + Some(SerializedWorkspace { + dock_pane: self + .get_dock_pane(&workspace_id) + .context("Getting dock pane") + .log_err()?, + center_group: self + .get_center_pane_group(&workspace_id) + .context("Getting center group") + .log_err()?, + dock_anchor, + dock_visible, + }) + } + + /// Saves a workspace using the worktree roots. Will garbage collect any workspaces + /// that used this workspace previously + pub fn save_workspace>( + &self, + worktree_roots: &[P], + old_roots: Option<&[P]>, + workspace: &SerializedWorkspace, + ) { + let workspace_id: WorkspaceId = worktree_roots.into(); + + self.with_savepoint("update_worktrees", || { + if let Some(old_roots) = old_roots { + let old_id: WorkspaceId = old_roots.into(); + + self.exec_bound("DELETE FROM WORKSPACES WHERE workspace_id = ?")?(&old_id)?; + } + + // Delete any previous workspaces with the same roots. This cascades to all + // other tables that are based on the same roots set. + // Insert new workspace into workspaces table if none were found + self.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?; + + self.exec_bound( + "INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)", + )?((&workspace_id, workspace.dock_anchor, workspace.dock_visible))?; + + // Save center pane group and dock pane + self.save_pane_group(&workspace_id, &workspace.center_group, None)?; + self.save_pane(&workspace_id, &workspace.dock_pane, None)?; + + Ok(()) + }) + .with_context(|| { + format!( + "Update workspace with roots {:?}", + worktree_roots + .iter() + .map(|p| p.as_ref()) + .collect::>() + ) + }) + .log_err(); + } + + /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots + pub fn recent_workspaces(&self, limit: usize) -> Vec> { + iife!({ + // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html + Ok::<_, anyhow::Error>( + self.select_bound::( + "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", + )?(limit)? + .into_iter() + .map(|id| id.paths()) + .collect::>>(), + ) + }) + .log_err() + .unwrap_or_default() + } + + pub(crate) fn get_center_pane_group( + &self, + workspace_id: &WorkspaceId, + ) -> Result { + self.get_pane_group_children(workspace_id, None)? + .into_iter() + .next() + .context("No center pane group") + } + + fn get_pane_group_children<'a>( + &self, + workspace_id: &WorkspaceId, + group_id: Option, + ) -> Result> { + self.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" + SELECT group_id, axis, pane_id + FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id + FROM pane_groups + UNION + SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id + FROM panes + -- Remove the dock panes from the union + WHERE parent_group_id IS NOT NULL and position IS NOT NULL) + WHERE parent_group_id IS ? AND workspace_id = ? + ORDER BY position + "})?((group_id, workspace_id))? + .into_iter() + .map(|(group_id, axis, pane_id)| { + if let Some((group_id, axis)) = group_id.zip(axis) { + Ok(SerializedPaneGroup::Group { + axis, + children: self.get_pane_group_children( + workspace_id, + Some(group_id), + )?, + }) + } else if let Some(pane_id) = pane_id { + Ok(SerializedPaneGroup::Pane(SerializedPane { + children: self.get_items( pane_id)?, + })) + } else { + bail!("Pane Group Child was neither a pane group or a pane"); + } + }) + .collect::>() + } + + pub(crate) fn save_pane_group( + &self, + workspace_id: &WorkspaceId, + pane_group: &SerializedPaneGroup, + parent: Option<(GroupId, usize)>, + ) -> Result<()> { + if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) { + bail!("Pane groups must have a SerializedPaneGroup::Group at the root") + } + + let (parent_id, position) = unzip_option(parent); + + match pane_group { + SerializedPaneGroup::Group { axis, children } => { + let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? + ((workspace_id, parent_id, position, *axis))?; + + for (position, group) in children.iter().enumerate() { + self.save_pane_group(workspace_id, group, Some((parent_id, position)))? + } + Ok(()) + } + SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent), + } + } + + pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { + let pane_id = self.select_row_bound(indoc! {" + SELECT pane_id FROM panes + WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?( + workspace_id, + )? + .context("No dock pane for workspace")?; + + Ok(SerializedPane::new( + self.get_items(pane_id).context("Reading items")?, + )) + } + + pub(crate) fn save_pane( + &self, + workspace_id: &WorkspaceId, + pane: &SerializedPane, + parent: Option<(GroupId, usize)>, + ) -> Result<()> { + let (parent_id, order) = unzip_option(parent); + + let pane_id = self.insert_bound( + "INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)", + )?((workspace_id, parent_id, order))?; + + self.save_items(workspace_id, pane_id, &pane.children) + .context("Saving items") + } + + pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { + Ok(self.select_bound(indoc! {" + SELECT item_id, kind FROM items + WHERE pane_id = ? + ORDER BY position"})?(pane_id)? + .into_iter() + .map(|(item_id, kind)| match kind { + SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, + _ => unimplemented!(), + }) + .collect()) + } + + pub(crate) fn save_items( + &self, + workspace_id: &WorkspaceId, + pane_id: PaneId, + items: &[SerializedItem], + ) -> Result<()> { + let mut delete_old = self + .exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") + .context("Preparing deletion")?; + let mut insert_new = self.exec_bound( + "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", + ).context("Preparing insertion")?; + for (position, item) in items.iter().enumerate() { + delete_old((workspace_id, pane_id, item.item_id()))?; + insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?; + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use db::open_memory_db; + use settings::DockAnchor; + + use super::*; + + #[test] + fn test_workspace_assignment() { + // env_logger::try_init().ok(); + + let db = WorkspaceDb(open_memory_db("test_basic_functionality")); + + let workspace_1 = SerializedWorkspace { + dock_anchor: DockAnchor::Bottom, + dock_visible: true, + center_group: Default::default(), + dock_pane: Default::default(), + }; + + let workspace_2 = SerializedWorkspace { + dock_anchor: DockAnchor::Expanded, + dock_visible: false, + center_group: Default::default(), + dock_pane: Default::default(), + }; + + let workspace_3 = SerializedWorkspace { + dock_anchor: DockAnchor::Right, + dock_visible: true, + center_group: Default::default(), + dock_pane: Default::default(), + }; + + db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_1); + db.save_workspace(&["/tmp"], None, &workspace_2); + + db::write_db_to(&db, "test.db").unwrap(); + + // Test that paths are treated as a set + assert_eq!( + db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + workspace_1 + ); + assert_eq!( + db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(), + workspace_1 + ); + + // Make sure that other keys work + assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2); + assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None); + + // Test 'mutate' case of updating a pre-existing id + db.save_workspace(&["/tmp", "/tmp2"], Some(&["/tmp", "/tmp2"]), &workspace_2); + assert_eq!( + db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + workspace_2 + ); + + // Test other mechanism for mutating + db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_3); + assert_eq!( + db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), + workspace_3 + ); + + // Make sure that updating paths differently also works + db.save_workspace( + &["/tmp3", "/tmp4", "/tmp2"], + Some(&["/tmp", "/tmp2"]), + &workspace_3, + ); + assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None); + assert_eq!( + db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]) + .unwrap(), + workspace_3 + ); + } + + use crate::persistence::model::SerializedWorkspace; + use crate::persistence::model::{SerializedItem, SerializedPane, SerializedPaneGroup}; + + fn default_workspace( + dock_pane: SerializedPane, + center_group: &SerializedPaneGroup, + ) -> SerializedWorkspace { + SerializedWorkspace { + dock_anchor: DockAnchor::Right, + dock_visible: false, + center_group: center_group.clone(), + dock_pane, + } + } + + #[test] + fn test_basic_dock_pane() { + // env_logger::try_init().ok(); + + let db = WorkspaceDb(open_memory_db("basic_dock_pane")); + + let dock_pane = crate::persistence::model::SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 1 }, + SerializedItem::Terminal { item_id: 4 }, + SerializedItem::Terminal { item_id: 2 }, + SerializedItem::Terminal { item_id: 3 }, + ], + }; + + let workspace = default_workspace(dock_pane, &Default::default()); + + db.save_workspace(&["/tmp"], None, &workspace); + + let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); + + assert_eq!(workspace.dock_pane, new_workspace.dock_pane); + } + + #[test] + fn test_simple_split() { + // env_logger::try_init().ok(); + + let db = WorkspaceDb(open_memory_db("simple_split")); + + // ----------------- + // | 1,2 | 5,6 | + // | - - - | | + // | 3,4 | | + // ----------------- + let center_pane = SerializedPaneGroup::Group { + axis: gpui::Axis::Horizontal, + children: vec![ + SerializedPaneGroup::Group { + axis: gpui::Axis::Vertical, + children: vec![ + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 1 }, + SerializedItem::Terminal { item_id: 2 }, + ], + }), + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 4 }, + SerializedItem::Terminal { item_id: 3 }, + ], + }), + ], + }, + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::Terminal { item_id: 5 }, + SerializedItem::Terminal { item_id: 6 }, + ], + }), + ], + }; + + let workspace = default_workspace(Default::default(), ¢er_pane); + + db.save_workspace(&["/tmp"], None, &workspace); + + assert_eq!(workspace.center_group, center_pane); + } +} diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs new file mode 100644 index 0000000000000000000000000000000000000000..824f649f9835db10da8ff10d5269c862082329cb --- /dev/null +++ b/crates/workspace/src/persistence/model.rs @@ -0,0 +1,188 @@ +use std::{ + path::{Path, PathBuf}, + sync::Arc, +}; + +use anyhow::{bail, Result}; + +use gpui::Axis; +use settings::DockAnchor; +use sqlez::{ + bindable::{Bind, Column}, + statement::Statement, +}; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct WorkspaceId(Vec); + +impl WorkspaceId { + pub fn paths(self) -> Vec { + self.0 + } +} + +impl, T: IntoIterator> From for WorkspaceId { + fn from(iterator: T) -> Self { + let mut roots = iterator + .into_iter() + .map(|p| p.as_ref().to_path_buf()) + .collect::>(); + roots.sort(); + Self(roots) + } +} + +impl Bind for &WorkspaceId { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + bincode::serialize(&self.0) + .expect("Bincode serialization of paths should not fail") + .bind(statement, start_index) + } +} + +impl Column for WorkspaceId { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let blob = statement.column_blob(start_index)?; + Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1)) + } +} + +#[derive(Debug, PartialEq, Eq)] +pub struct SerializedWorkspace { + pub dock_anchor: DockAnchor, + pub dock_visible: bool, + pub center_group: SerializedPaneGroup, + pub dock_pane: SerializedPane, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum SerializedPaneGroup { + Group { + axis: Axis, + children: Vec, + }, + Pane(SerializedPane), +} + +impl Default for SerializedPaneGroup { + fn default() -> Self { + Self::Group { + axis: Axis::Horizontal, + children: vec![Self::Pane(Default::default())], + } + } +} + +#[derive(Debug, PartialEq, Eq, Default, Clone)] +pub struct SerializedPane { + pub(crate) children: Vec, +} + +impl SerializedPane { + pub fn new(children: Vec) -> Self { + SerializedPane { children } + } +} + +pub type GroupId = i64; +pub type PaneId = i64; +pub type ItemId = usize; + +pub(crate) enum SerializedItemKind { + Editor, + Diagnostics, + ProjectSearch, + Terminal, +} + +impl Bind for SerializedItemKind { + fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { + match self { + SerializedItemKind::Editor => "Editor", + SerializedItemKind::Diagnostics => "Diagnostics", + SerializedItemKind::ProjectSearch => "ProjectSearch", + SerializedItemKind::Terminal => "Terminal", + } + .bind(statement, start_index) + } +} + +impl Column for SerializedItemKind { + fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { + String::column(statement, start_index).and_then(|(kind_text, next_index)| { + Ok(( + match kind_text.as_ref() { + "Editor" => SerializedItemKind::Editor, + "Diagnostics" => SerializedItemKind::Diagnostics, + "ProjectSearch" => SerializedItemKind::ProjectSearch, + "Terminal" => SerializedItemKind::Terminal, + _ => bail!("Stored serialized item kind is incorrect"), + }, + next_index, + )) + }) + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum SerializedItem { + Editor { item_id: usize, path: Arc }, + Diagnostics { item_id: usize }, + ProjectSearch { item_id: usize, query: String }, + Terminal { item_id: usize }, +} + +impl SerializedItem { + pub fn item_id(&self) -> usize { + match self { + SerializedItem::Editor { item_id, .. } => *item_id, + SerializedItem::Diagnostics { item_id } => *item_id, + SerializedItem::ProjectSearch { item_id, .. } => *item_id, + SerializedItem::Terminal { item_id } => *item_id, + } + } + + pub(crate) fn kind(&self) -> SerializedItemKind { + match self { + SerializedItem::Editor { .. } => SerializedItemKind::Editor, + SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics, + SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch, + SerializedItem::Terminal { .. } => SerializedItemKind::Terminal, + } + } +} + +#[cfg(test)] +mod tests { + use sqlez::connection::Connection; + + use crate::persistence::model::DockAnchor; + + use super::WorkspaceId; + + #[test] + fn test_workspace_round_trips() { + let db = Connection::open_memory("workspace_id_round_trips"); + + db.exec(indoc::indoc! {" + CREATE TABLE workspace_id_test( + workspace_id BLOB, + dock_anchor TEXT + );"}) + .unwrap()() + .unwrap(); + + let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]); + + db.exec_bound("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") + .unwrap()((&workspace_id, DockAnchor::Bottom)) + .unwrap(); + + assert_eq!( + db.select_row("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") + .unwrap()() + .unwrap(), + Some((WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom)) + ); + } +} diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 39843859c0319e20120b503addb08f446209dd96..085d9e2eb2d6c007cb759f981f32e3f374e7c135 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -5,19 +5,18 @@ pub mod dock; pub mod pane; pub mod pane_group; +mod persistence; pub mod searchable; pub mod shared_screen; pub mod sidebar; mod status_bar; mod toolbar; -mod workspace_db; -use crate::workspace_db::model::SerializedWorkspace; +use crate::persistence::model::SerializedWorkspace; use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; -use db::{kvp::KeyValue, Db}; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -165,9 +164,7 @@ impl_internal_actions!( ); impl_actions!(workspace, [ActivatePane]); -pub fn init(app_state: Arc, cx: &mut MutableAppContext, db: Db) { - cx.set_global(db); - +pub fn init(app_state: Arc, cx: &mut MutableAppContext) { pane::init(cx); dock::init(cx); @@ -1291,12 +1288,8 @@ impl Workspace { } // Use the resolved worktree roots to get the serialized_db from the database - let serialized_workspace = cx.read(|cx| { - Workspace::workspace_for_roots( - cx.global::>(), - &Vec::from_iter(worktree_roots.into_iter())[..], - ) - }); + let serialized_workspace = persistence::DB + .workspace_for_roots(&Vec::from_iter(worktree_roots.into_iter())[..]); // Use the serialized workspace to construct the new window let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { diff --git a/crates/workspace/src/workspace_db.rs b/crates/workspace/src/workspace_db.rs deleted file mode 100644 index e896dd6c275c8f82438a5588fdced2aa6bd2333b..0000000000000000000000000000000000000000 --- a/crates/workspace/src/workspace_db.rs +++ /dev/null @@ -1,765 +0,0 @@ -use anyhow::{bail, Context, Result}; - -use db::Db; -use util::{iife, unzip_option, ResultExt}; - -use std::path::{Path, PathBuf}; - -use indoc::indoc; -use sqlez::{connection::Connection, domain::Domain, migrations::Migration}; - -use super::Workspace; - -use self::model::{ - Axis, GroupId, PaneId, SerializedItem, SerializedItemKind, SerializedPane, SerializedPaneGroup, - SerializedWorkspace, WorkspaceId, -}; - -// 1) Move all of this into Workspace crate -// 2) Deserialize items fully -// 3) Typed prepares (including how you expect to pull data out) -// 4) Investigate Tree column impls - -pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( - "workspace", - &[indoc! {" - CREATE TABLE workspaces( - workspace_id BLOB PRIMARY KEY, - dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' - dock_visible INTEGER, -- Boolean - timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL - ) STRICT; - "}], -); - -pub(crate) const PANE_MIGRATIONS: Migration = Migration::new( - "pane", - &[indoc! {" - CREATE TABLE pane_groups( - group_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL indicates that this is a root node - position INTEGER, -- NULL indicates that this is a root node - axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE - ) STRICT; - - CREATE TABLE panes( - pane_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL, this is a dock pane - position INTEGER, -- NULL, this is a dock pane - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, - FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE - ) STRICT; - "}], -); - -pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new( - "item", - &[indoc! {" - CREATE TABLE items( - item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique - workspace_id BLOB NOT NULL, - pane_id INTEGER NOT NULL, - kind TEXT NOT NULL, - position INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE - PRIMARY KEY(item_id, workspace_id) - ) STRICT; - "}], -); - -impl Domain for Workspace { - fn migrate(conn: &Connection) -> anyhow::Result<()> { - WORKSPACES_MIGRATION.run(&conn)?; - PANE_MIGRATIONS.run(&conn)?; - ITEM_MIGRATIONS.run(&conn) - } -} - -impl Workspace { - /// Returns a serialized workspace for the given worktree_roots. If the passed array - /// is empty, the most recent workspace is returned instead. If no workspace for the - /// passed roots is stored, returns none. - pub fn workspace_for_roots>( - db: &Db, - worktree_roots: &[P], - ) -> Option { - let workspace_id: WorkspaceId = worktree_roots.into(); - - // Note that we re-assign the workspace_id here in case it's empty - // and we've grabbed the most recent workspace - let (workspace_id, dock_anchor, dock_visible) = iife!({ - if worktree_roots.len() == 0 { - db.select_row(indoc! {" - SELECT workspace_id, dock_anchor, dock_visible - FROM workspaces - ORDER BY timestamp DESC LIMIT 1"})?()? - } else { - db.select_row_bound(indoc! {" - SELECT workspace_id, dock_anchor, dock_visible - FROM workspaces - WHERE workspace_id = ?"})?(&workspace_id)? - } - .context("No workspaces found") - }) - .warn_on_err() - .flatten()?; - - Some(SerializedWorkspace { - dock_pane: Workspace::get_dock_pane(&db, &workspace_id) - .context("Getting dock pane") - .log_err()?, - center_group: Workspace::get_center_pane_group(&db, &workspace_id) - .context("Getting center group") - .log_err()?, - dock_anchor, - dock_visible, - }) - } - - /// Saves a workspace using the worktree roots. Will garbage collect any workspaces - /// that used this workspace previously - pub fn save_workspace>( - db: &Db, - worktree_roots: &[P], - old_roots: Option<&[P]>, - workspace: &SerializedWorkspace, - ) { - let workspace_id: WorkspaceId = worktree_roots.into(); - - db.with_savepoint("update_worktrees", || { - if let Some(old_roots) = old_roots { - let old_id: WorkspaceId = old_roots.into(); - - db.exec_bound("DELETE FROM WORKSPACES WHERE workspace_id = ?")?(&old_id)?; - } - - // Delete any previous workspaces with the same roots. This cascades to all - // other tables that are based on the same roots set. - // Insert new workspace into workspaces table if none were found - db.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?; - - db.exec_bound( - "INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)", - )?((&workspace_id, workspace.dock_anchor, workspace.dock_visible))?; - - // Save center pane group and dock pane - Workspace::save_pane_group(db, &workspace_id, &workspace.center_group, None)?; - Workspace::save_pane(db, &workspace_id, &workspace.dock_pane, None)?; - - Ok(()) - }) - .with_context(|| { - format!( - "Update workspace with roots {:?}", - worktree_roots - .iter() - .map(|p| p.as_ref()) - .collect::>() - ) - }) - .log_err(); - } - - /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(conn: &Connection, limit: usize) -> Vec> { - iife!({ - // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html - Ok::<_, anyhow::Error>( - conn.select_bound::( - "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", - )?(limit)? - .into_iter() - .map(|id| id.paths()) - .collect::>>(), - ) - }) - .log_err() - .unwrap_or_default() - } - - pub(crate) fn get_center_pane_group( - db: &Db, - workspace_id: &WorkspaceId, - ) -> Result { - Workspace::get_pane_group_children(&db, workspace_id, None)? - .into_iter() - .next() - .context("No center pane group") - } - - fn get_pane_group_children<'a>( - db: &Db, - workspace_id: &WorkspaceId, - group_id: Option, - ) -> Result> { - db.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" - SELECT group_id, axis, pane_id - FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id - FROM pane_groups - UNION - SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id - FROM panes - -- Remove the dock panes from the union - WHERE parent_group_id IS NOT NULL and position IS NOT NULL) - WHERE parent_group_id IS ? AND workspace_id = ? - ORDER BY position - "})?((group_id, workspace_id))? - .into_iter() - .map(|(group_id, axis, pane_id)| { - if let Some((group_id, axis)) = group_id.zip(axis) { - Ok(SerializedPaneGroup::Group { - axis, - children: Workspace::get_pane_group_children( - db, - workspace_id, - Some(group_id), - )?, - }) - } else if let Some(pane_id) = pane_id { - Ok(SerializedPaneGroup::Pane(SerializedPane { - children: Workspace::get_items(db, pane_id)?, - })) - } else { - bail!("Pane Group Child was neither a pane group or a pane"); - } - }) - .collect::>() - } - - pub(crate) fn save_pane_group( - db: &Db, - workspace_id: &WorkspaceId, - pane_group: &SerializedPaneGroup, - parent: Option<(GroupId, usize)>, - ) -> Result<()> { - if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) { - bail!("Pane groups must have a SerializedPaneGroup::Group at the root") - } - - let (parent_id, position) = unzip_option(parent); - - match pane_group { - SerializedPaneGroup::Group { axis, children } => { - let parent_id = db.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? - ((workspace_id, parent_id, position, *axis))?; - - for (position, group) in children.iter().enumerate() { - Workspace::save_pane_group( - db, - workspace_id, - group, - Some((parent_id, position)), - )? - } - Ok(()) - } - SerializedPaneGroup::Pane(pane) => Workspace::save_pane(db, workspace_id, pane, parent), - } - } - - pub(crate) fn get_dock_pane( - db: &Db, - workspace_id: &WorkspaceId, - ) -> Result { - let pane_id = db.select_row_bound(indoc! {" - SELECT pane_id FROM panes - WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?( - workspace_id, - )? - .context("No dock pane for workspace")?; - - Ok(SerializedPane::new( - Workspace::get_items(db, pane_id).context("Reading items")?, - )) - } - - pub(crate) fn save_pane( - db: &Db, - workspace_id: &WorkspaceId, - pane: &SerializedPane, - parent: Option<(GroupId, usize)>, - ) -> Result<()> { - let (parent_id, order) = unzip_option(parent); - - let pane_id = db.insert_bound( - "INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)", - )?((workspace_id, parent_id, order))?; - - Workspace::save_items(db, workspace_id, pane_id, &pane.children).context("Saving items") - } - - pub(crate) fn get_items(db: &Db, pane_id: PaneId) -> Result> { - Ok(db.select_bound(indoc! {" - SELECT item_id, kind FROM items - WHERE pane_id = ? - ORDER BY position"})?(pane_id)? - .into_iter() - .map(|(item_id, kind)| match kind { - SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, - _ => unimplemented!(), - }) - .collect()) - } - - pub(crate) fn save_items( - db: &Db, - workspace_id: &WorkspaceId, - pane_id: PaneId, - items: &[SerializedItem], - ) -> Result<()> { - let mut delete_old = db - .exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") - .context("Preparing deletion")?; - let mut insert_new = db.exec_bound( - "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", - ).context("Preparing insertion")?; - for (position, item) in items.iter().enumerate() { - delete_old((workspace_id, pane_id, item.item_id()))?; - insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?; - } - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use crate::workspace_db::model::DockAnchor::{Bottom, Expanded, Right}; - use crate::{Db, Workspace}; - - #[test] - fn test_workspace_assignment() { - // env_logger::try_init().ok(); - - let db = Db::open_in_memory("test_basic_functionality"); - - let workspace_1 = SerializedWorkspace { - dock_anchor: Bottom, - dock_visible: true, - center_group: Default::default(), - dock_pane: Default::default(), - }; - - let workspace_2 = SerializedWorkspace { - dock_anchor: Expanded, - dock_visible: false, - center_group: Default::default(), - dock_pane: Default::default(), - }; - - let workspace_3 = SerializedWorkspace { - dock_anchor: Right, - dock_visible: true, - center_group: Default::default(), - dock_pane: Default::default(), - }; - - Workspace::save_workspace(&db, &["/tmp", "/tmp2"], None, &workspace_1); - Workspace::save_workspace(&db, &["/tmp"], None, &workspace_2); - - db.write_to("test.db").unwrap(); - - // Test that paths are treated as a set - assert_eq!( - Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(), - workspace_1 - ); - assert_eq!( - Workspace::workspace_for_roots(&db, &["/tmp2", "/tmp"]).unwrap(), - workspace_1 - ); - - // Make sure that other keys work - assert_eq!( - Workspace::workspace_for_roots(&db, &["/tmp"]).unwrap(), - workspace_2 - ); - assert_eq!( - Workspace::workspace_for_roots(&db, &["/tmp3", "/tmp2", "/tmp4"]), - None - ); - - // Test 'mutate' case of updating a pre-existing id - Workspace::save_workspace( - &db, - &["/tmp", "/tmp2"], - Some(&["/tmp", "/tmp2"]), - &workspace_2, - ); - assert_eq!( - Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(), - workspace_2 - ); - - // Test other mechanism for mutating - Workspace::save_workspace(&db, &["/tmp", "/tmp2"], None, &workspace_3); - assert_eq!( - Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(), - workspace_3 - ); - - // Make sure that updating paths differently also works - Workspace::save_workspace( - &db, - &["/tmp3", "/tmp4", "/tmp2"], - Some(&["/tmp", "/tmp2"]), - &workspace_3, - ); - assert_eq!(Workspace::workspace_for_roots(&db, &["/tmp2", "tmp"]), None); - assert_eq!( - Workspace::workspace_for_roots(&db, &["/tmp2", "/tmp3", "/tmp4"]).unwrap(), - workspace_3 - ); - } - - use crate::workspace_db::model::SerializedWorkspace; - use crate::workspace_db::model::{SerializedItem, SerializedPane, SerializedPaneGroup}; - - fn default_workspace( - dock_pane: SerializedPane, - center_group: &SerializedPaneGroup, - ) -> SerializedWorkspace { - SerializedWorkspace { - dock_anchor: crate::workspace_db::model::DockAnchor::Right, - dock_visible: false, - center_group: center_group.clone(), - dock_pane, - } - } - - #[test] - fn test_basic_dock_pane() { - // env_logger::try_init().ok(); - - let db = Db::open_in_memory("basic_dock_pane"); - - let dock_pane = crate::workspace_db::model::SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 1 }, - SerializedItem::Terminal { item_id: 4 }, - SerializedItem::Terminal { item_id: 2 }, - SerializedItem::Terminal { item_id: 3 }, - ], - }; - - let workspace = default_workspace(dock_pane, &Default::default()); - - Workspace::save_workspace(&db, &["/tmp"], None, &workspace); - - let new_workspace = Workspace::workspace_for_roots(&db, &["/tmp"]).unwrap(); - - assert_eq!(workspace.dock_pane, new_workspace.dock_pane); - } - - #[test] - fn test_simple_split() { - // env_logger::try_init().ok(); - - let db = Db::open_in_memory("simple_split"); - - // ----------------- - // | 1,2 | 5,6 | - // | - - - | | - // | 3,4 | | - // ----------------- - let center_pane = SerializedPaneGroup::Group { - axis: crate::workspace_db::model::Axis::Horizontal, - children: vec![ - SerializedPaneGroup::Group { - axis: crate::workspace_db::model::Axis::Vertical, - children: vec![ - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 1 }, - SerializedItem::Terminal { item_id: 2 }, - ], - }), - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 4 }, - SerializedItem::Terminal { item_id: 3 }, - ], - }), - ], - }, - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::Terminal { item_id: 5 }, - SerializedItem::Terminal { item_id: 6 }, - ], - }), - ], - }; - - let workspace = default_workspace(Default::default(), ¢er_pane); - - Workspace::save_workspace(&db, &["/tmp"], None, &workspace); - - assert_eq!(workspace.center_group, center_pane); - } -} - -pub mod model { - use std::{ - path::{Path, PathBuf}, - sync::Arc, - }; - - use anyhow::{bail, Result}; - - use sqlez::{ - bindable::{Bind, Column}, - statement::Statement, - }; - - #[derive(Debug, Clone, PartialEq, Eq)] - pub(crate) struct WorkspaceId(Vec); - - impl WorkspaceId { - pub fn paths(self) -> Vec { - self.0 - } - } - - impl, T: IntoIterator> From for WorkspaceId { - fn from(iterator: T) -> Self { - let mut roots = iterator - .into_iter() - .map(|p| p.as_ref().to_path_buf()) - .collect::>(); - roots.sort(); - Self(roots) - } - } - - impl Bind for &WorkspaceId { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - bincode::serialize(&self.0) - .expect("Bincode serialization of paths should not fail") - .bind(statement, start_index) - } - } - - impl Column for WorkspaceId { - fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let blob = statement.column_blob(start_index)?; - Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1)) - } - } - - #[derive(Default, Debug, PartialEq, Eq, Clone, Copy)] - pub enum DockAnchor { - #[default] - Bottom, - Right, - Expanded, - } - - impl Bind for DockAnchor { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - DockAnchor::Bottom => "Bottom", - DockAnchor::Right => "Right", - DockAnchor::Expanded => "Expanded", - } - .bind(statement, start_index) - } - } - - impl Column for DockAnchor { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(anchor_text, next_index)| { - Ok(( - match anchor_text.as_ref() { - "Bottom" => DockAnchor::Bottom, - "Right" => DockAnchor::Right, - "Expanded" => DockAnchor::Expanded, - _ => bail!("Stored dock anchor is incorrect"), - }, - next_index, - )) - }) - } - } - - #[derive(Debug, PartialEq, Eq)] - pub struct SerializedWorkspace { - pub dock_anchor: DockAnchor, - pub dock_visible: bool, - pub center_group: SerializedPaneGroup, - pub dock_pane: SerializedPane, - } - - #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)] - pub enum Axis { - #[default] - Horizontal, - Vertical, - } - - impl Bind for Axis { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - Axis::Horizontal => "Horizontal", - Axis::Vertical => "Vertical", - } - .bind(statement, start_index) - } - } - - impl Column for Axis { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(axis_text, next_index)| { - Ok(( - match axis_text.as_str() { - "Horizontal" => Axis::Horizontal, - "Vertical" => Axis::Vertical, - _ => bail!("Stored serialized item kind is incorrect"), - }, - next_index, - )) - }) - } - } - - #[derive(Debug, PartialEq, Eq, Clone)] - pub enum SerializedPaneGroup { - Group { - axis: Axis, - children: Vec, - }, - Pane(SerializedPane), - } - - // Dock panes, and grouped panes combined? - // AND we're collapsing PaneGroup::Pane - // In the case where - - impl Default for SerializedPaneGroup { - fn default() -> Self { - Self::Group { - axis: Axis::Horizontal, - children: vec![Self::Pane(Default::default())], - } - } - } - - #[derive(Debug, PartialEq, Eq, Default, Clone)] - pub struct SerializedPane { - pub(crate) children: Vec, - } - - impl SerializedPane { - pub fn new(children: Vec) -> Self { - SerializedPane { children } - } - } - - pub type GroupId = i64; - pub type PaneId = i64; - pub type ItemId = usize; - - pub(crate) enum SerializedItemKind { - Editor, - Diagnostics, - ProjectSearch, - Terminal, - } - - impl Bind for SerializedItemKind { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - SerializedItemKind::Editor => "Editor", - SerializedItemKind::Diagnostics => "Diagnostics", - SerializedItemKind::ProjectSearch => "ProjectSearch", - SerializedItemKind::Terminal => "Terminal", - } - .bind(statement, start_index) - } - } - - impl Column for SerializedItemKind { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(kind_text, next_index)| { - Ok(( - match kind_text.as_ref() { - "Editor" => SerializedItemKind::Editor, - "Diagnostics" => SerializedItemKind::Diagnostics, - "ProjectSearch" => SerializedItemKind::ProjectSearch, - "Terminal" => SerializedItemKind::Terminal, - _ => bail!("Stored serialized item kind is incorrect"), - }, - next_index, - )) - }) - } - } - - #[derive(Debug, PartialEq, Eq, Clone)] - pub enum SerializedItem { - Editor { item_id: usize, path: Arc }, - Diagnostics { item_id: usize }, - ProjectSearch { item_id: usize, query: String }, - Terminal { item_id: usize }, - } - - impl SerializedItem { - pub fn item_id(&self) -> usize { - match self { - SerializedItem::Editor { item_id, .. } => *item_id, - SerializedItem::Diagnostics { item_id } => *item_id, - SerializedItem::ProjectSearch { item_id, .. } => *item_id, - SerializedItem::Terminal { item_id } => *item_id, - } - } - - pub(crate) fn kind(&self) -> SerializedItemKind { - match self { - SerializedItem::Editor { .. } => SerializedItemKind::Editor, - SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics, - SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch, - SerializedItem::Terminal { .. } => SerializedItemKind::Terminal, - } - } - } - - #[cfg(test)] - mod tests { - use sqlez::connection::Connection; - - use crate::workspace_db::model::DockAnchor; - - use super::WorkspaceId; - - #[test] - fn test_workspace_round_trips() { - let db = Connection::open_memory("workspace_id_round_trips"); - - db.exec(indoc::indoc! {" - CREATE TABLE workspace_id_test( - workspace_id BLOB, - dock_anchor TEXT - );"}) - .unwrap()() - .unwrap(); - - let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]); - - db.exec_bound("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") - .unwrap()((&workspace_id, DockAnchor::Bottom)) - .unwrap(); - - assert_eq!( - db.select_row("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") - .unwrap()() - .unwrap(), - Some((WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom)) - ); - } - } -} diff --git a/crates/workspace/test.db b/crates/workspace/test.db new file mode 100644 index 0000000000000000000000000000000000000000..7491ccde3ad6a8c23785f69b92b679e622b1e62c GIT binary patch literal 32768 zcmeI)&2QUe90zdQXKAW7X4ktQQ?CUy#5V_K_?o!QP- zxS*-L2*DAF6X#v|50G}^!X1gj{uQ3*ar~B~$~4jg-PcM}zdnAR&+oD4-_tDJ&zh!Z zkS}d#sC#5vct;RL;bTGsK}gY`9Q_F#HU@%x^qKRPw&qt4LfZ`pcBt!gUK zsik&G<9F?kXBnGtzNUaK{hvuc5J$YzPw|D?^tlj6VrG~%)th6 zZMxduY}pDsh`to1Xb~m zvS~|9+nTK@-@MoEsEz#=`BZ(l9#3RJIl>(yO# zrp!sD6UDK0?s~Xoej)ceje~0EA+Oj3dn2j{B`V5A($zYRx>n3)@*n0!feILB?&DLs zdtV=WHec@Zp!G}qSMFR+^FV*+@<8*N@V87N(0&blzbW-ty*Bs`A*oP;Co!me5ROnvekR^xHZ(+R9k)CMHOkDkTy6Zu6i)1#uLEO&A zE7|<5TjCF;Fc_1=8Np;WJrkJV6x=8{PuRVe9v?!xk$I?l2D_eV@)JeNvFQbD*@Feq zdes8-GkRM{Ucu~UqFUqOJwgf(49BDOI4zJZQuxGn%&%?Bql=s&9-?@MOvESLu<$az z+_*pcM#|>b*2MD$k9$+slixGBGOe9A)2_B?yY z9XsaHvB&4sGur6}#=!UFhd(i0lm2TEbS+*)&9+Ys-Qwt;IW*{fWH|DpwyW+{drghh zdmVb;)cRWEK<#SPgZtqfIlNS5$BB>NVik1)rA8C{@?$f3(8;0^VhEfj)VXN zAOHafKmY;|fB*y_009U::open(&*paths::DB_DIR, RELEASE_CHANNEL_NAME.as_str()) - }); - load_embedded_fonts(&app); let fs = Arc::new(RealFs); @@ -147,10 +140,8 @@ fn main() { .detach(); let project_store = cx.add_model(|_| ProjectStore::new()); - let db = cx.background().block(db); - cx.set_global(db); - client.start_telemetry(cx.global::>().clone()); + client.start_telemetry(); client.report_event("start app", Default::default()); let app_state = Arc::new(AppState { @@ -164,16 +155,9 @@ fn main() { initialize_workspace, default_item_factory, }); - auto_update::init( - cx.global::>().clone(), - http, - client::ZED_SERVER_URL.clone(), - cx, - ); - - let workspace_db = cx.global::>().open_as::(); + auto_update::init(http, client::ZED_SERVER_URL.clone(), cx); - workspace::init(app_state.clone(), cx, workspace_db); + workspace::init(app_state.clone(), cx); journal::init(app_state.clone(), cx); theme_selector::init(app_state.clone(), cx); diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index a8ec71bd4b5fa918ddb568c6350471b0be5c8756..6b6b65ab3249295163166195e78fd9ccc0ea90c8 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -12,7 +12,6 @@ use collab_ui::{CollabTitlebarItem, ToggleCollaborationMenu}; use collections::VecDeque; pub use editor; use editor::{Editor, MultiBuffer}; -use lazy_static::lazy_static; use gpui::{ actions, @@ -28,9 +27,9 @@ use project_panel::ProjectPanel; use search::{BufferSearchBar, ProjectSearchBar}; use serde::Deserialize; use serde_json::to_string_pretty; -use settings::{keymap_file_json_schema, settings_file_json_schema, ReleaseChannel, Settings}; +use settings::{keymap_file_json_schema, settings_file_json_schema, Settings}; use std::{env, path::Path, str, sync::Arc}; -use util::{paths, ResultExt}; +use util::{channel::ReleaseChannel, paths, ResultExt}; pub use workspace; use workspace::{sidebar::SidebarSide, AppState, Workspace}; @@ -69,17 +68,6 @@ actions!( const MIN_FONT_SIZE: f32 = 6.0; -lazy_static! { - pub static ref RELEASE_CHANNEL_NAME: String = - env::var("ZED_RELEASE_CHANNEL").unwrap_or(include_str!("../RELEASE_CHANNEL").to_string()); - pub static ref RELEASE_CHANNEL: ReleaseChannel = match RELEASE_CHANNEL_NAME.as_str() { - "dev" => ReleaseChannel::Dev, - "preview" => ReleaseChannel::Preview, - "stable" => ReleaseChannel::Stable, - _ => panic!("invalid release channel {}", *RELEASE_CHANNEL_NAME), - }; -} - pub fn init(app_state: &Arc, cx: &mut gpui::MutableAppContext) { cx.add_action(about); cx.add_global_action(|_: &Hide, cx: &mut gpui::MutableAppContext| { @@ -629,7 +617,7 @@ mod tests { use gpui::{ executor::Deterministic, AssetSource, MutableAppContext, TestAppContext, ViewHandle, }; - use project::{Db, Project, ProjectPath}; + use project::{Project, ProjectPath}; use serde_json::json; use std::{ collections::HashSet, @@ -774,6 +762,8 @@ mod tests { async fn test_new_empty_workspace(cx: &mut TestAppContext) { let app_state = init(cx); cx.dispatch_global_action(workspace::NewFile); + cx.foreground().run_until_parked(); + let window_id = *cx.window_ids().first().unwrap(); let workspace = cx.root_view::(window_id).unwrap(); let editor = workspace.update(cx, |workspace, cx| { @@ -1816,7 +1806,7 @@ mod tests { state.initialize_workspace = initialize_workspace; state.build_window_options = build_window_options; call::init(app_state.client.clone(), app_state.user_store.clone(), cx); - workspace::init(app_state.clone(), cx, Db::open_in_memory("test")); + workspace::init(app_state.clone(), cx); editor::init(cx); pane::init(cx); app_state From c1f79023094ee03ee0f4db350602dd5612b299a8 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Mon, 14 Nov 2022 17:31:12 -0800 Subject: [PATCH 48/86] wip --- .../src/activity_indicator.rs | 2 +- crates/breadcrumbs/src/breadcrumbs.rs | 5 +- crates/collab/src/integration_tests.rs | 2 +- crates/diagnostics/src/diagnostics.rs | 9 +- crates/diagnostics/src/items.rs | 4 +- crates/editor/src/editor_tests.rs | 7 +- crates/editor/src/items.rs | 6 +- crates/search/src/buffer_search.rs | 3 +- crates/search/src/project_search.rs | 6 +- .../terminal/src/terminal_container_view.rs | 5 +- crates/theme_testbench/src/theme_testbench.rs | 7 +- crates/workspace/src/dock.rs | 20 +- crates/workspace/src/item.rs | 876 ++++++++++++++ crates/workspace/src/pane.rs | 5 +- crates/workspace/src/persistence.rs | 45 +- crates/workspace/src/persistence/model.rs | 37 +- crates/workspace/src/searchable.rs | 2 +- crates/workspace/src/shared_screen.rs | 22 +- crates/workspace/src/workspace.rs | 1003 ++--------------- crates/zed/src/feedback.rs | 4 +- crates/zed/src/main.rs | 2 +- crates/zed/src/zed.rs | 3 +- 22 files changed, 1114 insertions(+), 961 deletions(-) create mode 100644 crates/workspace/src/item.rs diff --git a/crates/activity_indicator/src/activity_indicator.rs b/crates/activity_indicator/src/activity_indicator.rs index 775e460a2d454d8ffa15d529ebdf1c18d109de20..8b9eb4b0409af37a947252a51b2de8cfe8d0851b 100644 --- a/crates/activity_indicator/src/activity_indicator.rs +++ b/crates/activity_indicator/src/activity_indicator.rs @@ -11,7 +11,7 @@ use settings::Settings; use smallvec::SmallVec; use std::{cmp::Reverse, fmt::Write, sync::Arc}; use util::ResultExt; -use workspace::{ItemHandle, StatusItemView, Workspace}; +use workspace::{item::ItemHandle, StatusItemView, Workspace}; actions!(lsp_status, [ShowErrorMessage]); diff --git a/crates/breadcrumbs/src/breadcrumbs.rs b/crates/breadcrumbs/src/breadcrumbs.rs index 85f0509caf6f7f73e84312454a321d94d139edcd..278b8f39e29b9f981c051c490ccebb931f6126fa 100644 --- a/crates/breadcrumbs/src/breadcrumbs.rs +++ b/crates/breadcrumbs/src/breadcrumbs.rs @@ -4,7 +4,10 @@ use gpui::{ use itertools::Itertools; use search::ProjectSearchView; use settings::Settings; -use workspace::{ItemEvent, ItemHandle, ToolbarItemLocation, ToolbarItemView}; +use workspace::{ + item::{ItemEvent, ItemHandle}, + ToolbarItemLocation, ToolbarItemView, +}; pub enum Event { UpdateLocation, diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index ade4e102806c70252fbae4b7478019d22bcb02ee..762a5cf71144c899f63aecb06aa75323db83cea0 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -52,7 +52,7 @@ use std::{ use theme::ThemeRegistry; use unindent::Unindent as _; use util::post_inc; -use workspace::{shared_screen::SharedScreen, Item, SplitDirection, ToggleFollow, Workspace}; +use workspace::{shared_screen::SharedScreen, item::Item, SplitDirection, ToggleFollow, Workspace}; #[ctor::ctor] fn init_logger() { diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index 078d83ac6112830db120fee5fd74e03bfb94eb8b..a3621dd30d4233516ecaf15067c253dac0efd087 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -29,7 +29,10 @@ use std::{ sync::Arc, }; use util::TryFutureExt; -use workspace::{ItemHandle as _, ItemNavHistory, Workspace}; +use workspace::{ + item::{Item, ItemEvent, ItemHandle}, + ItemNavHistory, Workspace, +}; actions!(diagnostics, [Deploy]); @@ -503,7 +506,7 @@ impl ProjectDiagnosticsEditor { } } -impl workspace::Item for ProjectDiagnosticsEditor { +impl Item for ProjectDiagnosticsEditor { fn tab_content( &self, _detail: Option, @@ -571,7 +574,7 @@ impl workspace::Item for ProjectDiagnosticsEditor { unreachable!() } - fn to_item_events(event: &Self::Event) -> Vec { + fn to_item_events(event: &Self::Event) -> Vec { Editor::to_item_events(event) } diff --git a/crates/diagnostics/src/items.rs b/crates/diagnostics/src/items.rs index 26636d77007bedd90773d296181d7933b8e50fce..d109a5262c4ef2958619fc41215558bb48a7ddfc 100644 --- a/crates/diagnostics/src/items.rs +++ b/crates/diagnostics/src/items.rs @@ -7,7 +7,7 @@ use gpui::{ use language::Diagnostic; use project::Project; use settings::Settings; -use workspace::StatusItemView; +use workspace::{item::ItemHandle, StatusItemView}; pub struct DiagnosticIndicator { summary: project::DiagnosticSummary, @@ -219,7 +219,7 @@ impl View for DiagnosticIndicator { impl StatusItemView for DiagnosticIndicator { fn set_active_pane_item( &mut self, - active_pane_item: Option<&dyn workspace::ItemHandle>, + active_pane_item: Option<&dyn ItemHandle>, cx: &mut ViewContext, ) { if let Some(editor) = active_pane_item.and_then(|item| item.downcast::()) { diff --git a/crates/editor/src/editor_tests.rs b/crates/editor/src/editor_tests.rs index 8ac1f9a3fc529426632c8020fe59b53cc8c8f9cc..ca66ae7dc9114c423e82f793043409e3575941bf 100644 --- a/crates/editor/src/editor_tests.rs +++ b/crates/editor/src/editor_tests.rs @@ -22,7 +22,10 @@ use util::{ assert_set_eq, test::{marked_text_ranges, marked_text_ranges_by, sample_text, TextRangeMarker}, }; -use workspace::{FollowableItem, ItemHandle, NavigationEntry, Pane}; +use workspace::{ + item::{FollowableItem, ItemHandle}, + NavigationEntry, Pane, +}; #[gpui::test] fn test_edit_events(cx: &mut MutableAppContext) { @@ -475,7 +478,7 @@ fn test_clone(cx: &mut gpui::MutableAppContext) { fn test_navigation_history(cx: &mut gpui::MutableAppContext) { cx.set_global(Settings::test(cx)); cx.set_global(DragAndDrop::::default()); - use workspace::Item; + use workspace::item::Item; let (_, pane) = cx.add_window(Default::default(), |cx| Pane::new(None, cx)); let buffer = MultiBuffer::build_simple(&sample_text(300, 5, 'a'), cx); diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 0cc8575e99156d85366cc3aaec442913b36dd6a4..4f9c7d5593b3c11e343f0fad2132508479dc8c4b 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -24,9 +24,9 @@ use std::{ use text::Selection; use util::TryFutureExt; use workspace::{ + item::{FollowableItem, Item, ItemEvent, ItemHandle, ProjectItem}, searchable::{Direction, SearchEvent, SearchableItem, SearchableItemHandle}, - FollowableItem, Item, ItemEvent, ItemHandle, ItemNavHistory, ProjectItem, StatusItemView, - ToolbarItemLocation, + ItemNavHistory, StatusItemView, ToolbarItemLocation, }; pub const MAX_TAB_TITLE_LEN: usize = 24; @@ -490,7 +490,7 @@ impl Item for Editor { Task::ready(Ok(())) } - fn to_item_events(event: &Self::Event) -> Vec { + fn to_item_events(event: &Self::Event) -> Vec { let mut result = Vec::new(); match event { Event::Closed => result.push(ItemEvent::CloseItem), diff --git a/crates/search/src/buffer_search.rs b/crates/search/src/buffer_search.rs index 7d668f6b3ede85106b5e833a177374671a3f953d..5877322feb64c96d843dffbec0f328af93e4ac97 100644 --- a/crates/search/src/buffer_search.rs +++ b/crates/search/src/buffer_search.rs @@ -14,8 +14,9 @@ use serde::Deserialize; use settings::Settings; use std::{any::Any, sync::Arc}; use workspace::{ + item::ItemHandle, searchable::{Direction, SearchEvent, SearchableItemHandle, WeakSearchableItemHandle}, - ItemHandle, Pane, ToolbarItemLocation, ToolbarItemView, + Pane, ToolbarItemLocation, ToolbarItemView, }; #[derive(Clone, Deserialize, PartialEq)] diff --git a/crates/search/src/project_search.rs b/crates/search/src/project_search.rs index 5e935a6ae3aebfb0a76bb560a710b76ec14d9278..edd4f40ba262df583674ab711e1c756b58718170 100644 --- a/crates/search/src/project_search.rs +++ b/crates/search/src/project_search.rs @@ -24,9 +24,9 @@ use std::{ }; use util::ResultExt as _; use workspace::{ + item::{Item, ItemEvent, ItemHandle}, searchable::{Direction, SearchableItem, SearchableItemHandle}, - Item, ItemEvent, ItemHandle, ItemNavHistory, Pane, ToolbarItemLocation, ToolbarItemView, - Workspace, + ItemNavHistory, Pane, ToolbarItemLocation, ToolbarItemView, Workspace, }; actions!(project_search, [SearchInNew, ToggleFocus]); @@ -893,7 +893,7 @@ impl View for ProjectSearchBar { impl ToolbarItemView for ProjectSearchBar { fn set_active_pane_item( &mut self, - active_pane_item: Option<&dyn workspace::ItemHandle>, + active_pane_item: Option<&dyn ItemHandle>, cx: &mut ViewContext, ) -> ToolbarItemLocation { cx.notify(); diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal/src/terminal_container_view.rs index 6ee8bc78ae18cd68574dbd96bf06e35155a9c668..5d5fda1206c26c39f514cd9cb3317a6c0a469c18 100644 --- a/crates/terminal/src/terminal_container_view.rs +++ b/crates/terminal/src/terminal_container_view.rs @@ -9,7 +9,10 @@ use gpui::{ }; use util::truncate_and_trailoff; use workspace::searchable::{SearchEvent, SearchOptions, SearchableItem, SearchableItemHandle}; -use workspace::{Item, ItemEvent, ToolbarItemLocation, Workspace}; +use workspace::{ + item::{Item, ItemEvent}, + ToolbarItemLocation, Workspace, +}; use project::{LocalWorktree, Project, ProjectPath}; use settings::{AlternateScroll, Settings, WorkingDirectory}; diff --git a/crates/theme_testbench/src/theme_testbench.rs b/crates/theme_testbench/src/theme_testbench.rs index 17b6248671d0a80397a5231fccdb207f3b272acb..9c7d6bdf49771cb032aaa35dd3ce8e35243a092f 100644 --- a/crates/theme_testbench/src/theme_testbench.rs +++ b/crates/theme_testbench/src/theme_testbench.rs @@ -12,7 +12,10 @@ use project::{Project, ProjectEntryId, ProjectPath}; use settings::Settings; use smallvec::SmallVec; use theme::{ColorScheme, Layer, Style, StyleSet}; -use workspace::{Item, Workspace}; +use workspace::{ + item::{Item, ItemEvent}, + Workspace, +}; actions!(theme, [DeployThemeTestbench]); @@ -351,7 +354,7 @@ impl Item for ThemeTestbench { gpui::Task::ready(Ok(())) } - fn to_item_events(_: &Self::Event) -> Vec { + fn to_item_events(_: &Self::Event) -> Vec { Vec::new() } } diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index 5f471ff018361648ff01819f14f86ee8fa5b85de..5b08b689abee44839e216a41f6fd0aef8692fda5 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -98,14 +98,14 @@ pub fn icon_for_dock_anchor(anchor: DockAnchor) -> &'static str { } impl DockPosition { - fn is_visible(&self) -> bool { + pub fn is_visible(&self) -> bool { match self { DockPosition::Shown(_) => true, DockPosition::Hidden(_) => false, } } - fn anchor(&self) -> DockAnchor { + pub fn anchor(&self) -> DockAnchor { match self { DockPosition::Shown(anchor) | DockPosition::Hidden(anchor) => *anchor, } @@ -137,9 +137,15 @@ pub struct Dock { } impl Dock { - pub fn new(default_item_factory: DefaultItemFactory, cx: &mut ViewContext) -> Self { - let anchor = cx.global::().default_dock_anchor; - let pane = cx.add_view(|cx| Pane::new(Some(anchor), cx)); + pub fn new( + default_item_factory: DefaultItemFactory, + position: Option, + cx: &mut ViewContext, + ) -> Self { + let position = position + .unwrap_or_else(|| DockPosition::Hidden(cx.global::().default_dock_anchor)); + + let pane = cx.add_view(|cx| Pane::new(Some(position.anchor()), cx)); pane.update(cx, |pane, cx| { pane.set_active(false, cx); }); @@ -152,7 +158,7 @@ impl Dock { Self { pane, panel_sizes: Default::default(), - position: DockPosition::Hidden(anchor), + position, default_item_factory, } } @@ -454,7 +460,7 @@ mod tests { use settings::Settings; use super::*; - use crate::{sidebar::Sidebar, tests::TestItem, ItemHandle, Workspace}; + use crate::{item::test::TestItem, sidebar::Sidebar, ItemHandle, Workspace}; pub fn default_item_factory( _workspace: &mut Workspace, diff --git a/crates/workspace/src/item.rs b/crates/workspace/src/item.rs new file mode 100644 index 0000000000000000000000000000000000000000..215ad47e1b3503a163e2b2a76278c50c26822ae7 --- /dev/null +++ b/crates/workspace/src/item.rs @@ -0,0 +1,876 @@ +use std::{ + any::{Any, TypeId}, + borrow::Cow, + cell::RefCell, + fmt, + path::PathBuf, + rc::Rc, + sync::atomic::{AtomicBool, Ordering}, + time::Duration, +}; + +use anyhow::Result; +use client::proto; +use gpui::{ + AnyViewHandle, AppContext, ElementBox, ModelHandle, MutableAppContext, Task, View, ViewContext, + ViewHandle, WeakViewHandle, +}; +use project::{Project, ProjectEntryId, ProjectPath}; +use settings::{Autosave, Settings}; +use smallvec::SmallVec; +use theme::Theme; +use util::ResultExt; + +use crate::{ + pane, + persistence::model::{ItemId, WorkspaceId}, + searchable::SearchableItemHandle, + DelayedDebouncedEditAction, FollowableItemBuilders, ItemNavHistory, Pane, ToolbarItemLocation, + Workspace, +}; + +#[derive(Eq, PartialEq, Hash)] +pub enum ItemEvent { + CloseItem, + UpdateTab, + UpdateBreadcrumbs, + Edit, +} + +pub trait Item: View { + fn deactivated(&mut self, _: &mut ViewContext) {} + fn workspace_deactivated(&mut self, _: &mut ViewContext) {} + fn navigate(&mut self, _: Box, _: &mut ViewContext) -> bool { + false + } + fn tab_description<'a>(&'a self, _: usize, _: &'a AppContext) -> Option> { + None + } + fn tab_content(&self, detail: Option, style: &theme::Tab, cx: &AppContext) + -> ElementBox; + fn project_path(&self, cx: &AppContext) -> Option; + fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]>; + fn is_singleton(&self, cx: &AppContext) -> bool; + fn set_nav_history(&mut self, _: ItemNavHistory, _: &mut ViewContext); + fn clone_on_split(&self, _: &mut ViewContext) -> Option + where + Self: Sized, + { + None + } + fn is_dirty(&self, _: &AppContext) -> bool { + false + } + fn has_conflict(&self, _: &AppContext) -> bool { + false + } + fn can_save(&self, cx: &AppContext) -> bool; + fn save( + &mut self, + project: ModelHandle, + cx: &mut ViewContext, + ) -> Task>; + fn save_as( + &mut self, + project: ModelHandle, + abs_path: PathBuf, + cx: &mut ViewContext, + ) -> Task>; + fn reload( + &mut self, + project: ModelHandle, + cx: &mut ViewContext, + ) -> Task>; + fn git_diff_recalc( + &mut self, + _project: ModelHandle, + _cx: &mut ViewContext, + ) -> Task> { + Task::ready(Ok(())) + } + fn to_item_events(event: &Self::Event) -> Vec; + fn should_close_item_on_event(_: &Self::Event) -> bool { + false + } + fn should_update_tab_on_event(_: &Self::Event) -> bool { + false + } + fn is_edit_event(_: &Self::Event) -> bool { + false + } + fn act_as_type( + &self, + type_id: TypeId, + self_handle: &ViewHandle, + _: &AppContext, + ) -> Option { + if TypeId::of::() == type_id { + Some(self_handle.into()) + } else { + None + } + } + fn as_searchable(&self, _: &ViewHandle) -> Option> { + None + } + + fn breadcrumb_location(&self) -> ToolbarItemLocation { + ToolbarItemLocation::Hidden + } + fn breadcrumbs(&self, _theme: &Theme, _cx: &AppContext) -> Option> { + None + } + fn serialized_item_kind() -> Option<&'static str>; + fn deserialize( + workspace_id: WorkspaceId, + item_id: ItemId, + cx: &mut ViewContext, + ) -> Result; +} + +pub trait ItemHandle: 'static + fmt::Debug { + fn subscribe_to_item_events( + &self, + cx: &mut MutableAppContext, + handler: Box, + ) -> gpui::Subscription; + fn tab_description<'a>(&self, detail: usize, cx: &'a AppContext) -> Option>; + fn tab_content(&self, detail: Option, style: &theme::Tab, cx: &AppContext) + -> ElementBox; + fn project_path(&self, cx: &AppContext) -> Option; + fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]>; + fn is_singleton(&self, cx: &AppContext) -> bool; + fn boxed_clone(&self) -> Box; + fn clone_on_split(&self, cx: &mut MutableAppContext) -> Option>; + fn added_to_pane( + &self, + workspace: &mut Workspace, + pane: ViewHandle, + cx: &mut ViewContext, + ); + fn deactivated(&self, cx: &mut MutableAppContext); + fn workspace_deactivated(&self, cx: &mut MutableAppContext); + fn navigate(&self, data: Box, cx: &mut MutableAppContext) -> bool; + fn id(&self) -> usize; + fn window_id(&self) -> usize; + fn to_any(&self) -> AnyViewHandle; + fn is_dirty(&self, cx: &AppContext) -> bool; + fn has_conflict(&self, cx: &AppContext) -> bool; + fn can_save(&self, cx: &AppContext) -> bool; + fn save(&self, project: ModelHandle, cx: &mut MutableAppContext) -> Task>; + fn save_as( + &self, + project: ModelHandle, + abs_path: PathBuf, + cx: &mut MutableAppContext, + ) -> Task>; + fn reload(&self, project: ModelHandle, cx: &mut MutableAppContext) + -> Task>; + fn git_diff_recalc( + &self, + project: ModelHandle, + cx: &mut MutableAppContext, + ) -> Task>; + fn act_as_type(&self, type_id: TypeId, cx: &AppContext) -> Option; + fn to_followable_item_handle(&self, cx: &AppContext) -> Option>; + fn on_release( + &self, + cx: &mut MutableAppContext, + callback: Box, + ) -> gpui::Subscription; + fn to_searchable_item_handle(&self, cx: &AppContext) -> Option>; + fn breadcrumb_location(&self, cx: &AppContext) -> ToolbarItemLocation; + fn breadcrumbs(&self, theme: &Theme, cx: &AppContext) -> Option>; +} + +pub trait WeakItemHandle { + fn id(&self) -> usize; + fn window_id(&self) -> usize; + fn upgrade(&self, cx: &AppContext) -> Option>; +} + +impl dyn ItemHandle { + pub fn downcast(&self) -> Option> { + self.to_any().downcast() + } + + pub fn act_as(&self, cx: &AppContext) -> Option> { + self.act_as_type(TypeId::of::(), cx) + .and_then(|t| t.downcast()) + } +} + +impl ItemHandle for ViewHandle { + fn subscribe_to_item_events( + &self, + cx: &mut MutableAppContext, + handler: Box, + ) -> gpui::Subscription { + cx.subscribe(self, move |_, event, cx| { + for item_event in T::to_item_events(event) { + handler(item_event, cx) + } + }) + } + + fn tab_description<'a>(&self, detail: usize, cx: &'a AppContext) -> Option> { + self.read(cx).tab_description(detail, cx) + } + + fn tab_content( + &self, + detail: Option, + style: &theme::Tab, + cx: &AppContext, + ) -> ElementBox { + self.read(cx).tab_content(detail, style, cx) + } + + fn project_path(&self, cx: &AppContext) -> Option { + self.read(cx).project_path(cx) + } + + fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]> { + self.read(cx).project_entry_ids(cx) + } + + fn is_singleton(&self, cx: &AppContext) -> bool { + self.read(cx).is_singleton(cx) + } + + fn boxed_clone(&self) -> Box { + Box::new(self.clone()) + } + + fn clone_on_split(&self, cx: &mut MutableAppContext) -> Option> { + self.update(cx, |item, cx| { + cx.add_option_view(|cx| item.clone_on_split(cx)) + }) + .map(|handle| Box::new(handle) as Box) + } + + fn added_to_pane( + &self, + workspace: &mut Workspace, + pane: ViewHandle, + cx: &mut ViewContext, + ) { + let history = pane.read(cx).nav_history_for_item(self); + self.update(cx, |this, cx| this.set_nav_history(history, cx)); + + if let Some(followed_item) = self.to_followable_item_handle(cx) { + if let Some(message) = followed_item.to_state_proto(cx) { + workspace.update_followers( + proto::update_followers::Variant::CreateView(proto::View { + id: followed_item.id() as u64, + variant: Some(message), + leader_id: workspace.leader_for_pane(&pane).map(|id| id.0), + }), + cx, + ); + } + } + + if workspace + .panes_by_item + .insert(self.id(), pane.downgrade()) + .is_none() + { + let mut pending_autosave = DelayedDebouncedEditAction::new(); + let mut pending_git_update = DelayedDebouncedEditAction::new(); + let pending_update = Rc::new(RefCell::new(None)); + let pending_update_scheduled = Rc::new(AtomicBool::new(false)); + + let mut event_subscription = + Some(cx.subscribe(self, move |workspace, item, event, cx| { + let pane = if let Some(pane) = workspace + .panes_by_item + .get(&item.id()) + .and_then(|pane| pane.upgrade(cx)) + { + pane + } else { + log::error!("unexpected item event after pane was dropped"); + return; + }; + + if let Some(item) = item.to_followable_item_handle(cx) { + let leader_id = workspace.leader_for_pane(&pane); + + if leader_id.is_some() && item.should_unfollow_on_event(event, cx) { + workspace.unfollow(&pane, cx); + } + + if item.add_event_to_update_proto( + event, + &mut *pending_update.borrow_mut(), + cx, + ) && !pending_update_scheduled.load(Ordering::SeqCst) + { + pending_update_scheduled.store(true, Ordering::SeqCst); + cx.after_window_update({ + let pending_update = pending_update.clone(); + let pending_update_scheduled = pending_update_scheduled.clone(); + move |this, cx| { + pending_update_scheduled.store(false, Ordering::SeqCst); + this.update_followers( + proto::update_followers::Variant::UpdateView( + proto::UpdateView { + id: item.id() as u64, + variant: pending_update.borrow_mut().take(), + leader_id: leader_id.map(|id| id.0), + }, + ), + cx, + ); + } + }); + } + } + + for item_event in T::to_item_events(event).into_iter() { + match item_event { + ItemEvent::CloseItem => { + Pane::close_item(workspace, pane, item.id(), cx) + .detach_and_log_err(cx); + return; + } + + ItemEvent::UpdateTab => { + pane.update(cx, |_, cx| { + cx.emit(pane::Event::ChangeItemTitle); + cx.notify(); + }); + } + + ItemEvent::Edit => { + if let Autosave::AfterDelay { milliseconds } = + cx.global::().autosave + { + let delay = Duration::from_millis(milliseconds); + let item = item.clone(); + pending_autosave.fire_new( + delay, + workspace, + cx, + |project, mut cx| async move { + cx.update(|cx| Pane::autosave_item(&item, project, cx)) + .await + .log_err(); + }, + ); + } + + let settings = cx.global::(); + let debounce_delay = settings.git_overrides.gutter_debounce; + + let item = item.clone(); + + if let Some(delay) = debounce_delay { + const MIN_GIT_DELAY: u64 = 50; + + let delay = delay.max(MIN_GIT_DELAY); + let duration = Duration::from_millis(delay); + + pending_git_update.fire_new( + duration, + workspace, + cx, + |project, mut cx| async move { + cx.update(|cx| item.git_diff_recalc(project, cx)) + .await + .log_err(); + }, + ); + } else { + let project = workspace.project().downgrade(); + cx.spawn_weak(|_, mut cx| async move { + if let Some(project) = project.upgrade(&cx) { + cx.update(|cx| item.git_diff_recalc(project, cx)) + .await + .log_err(); + } + }) + .detach(); + } + } + + _ => {} + } + } + })); + + cx.observe_focus(self, move |workspace, item, focused, cx| { + if !focused && cx.global::().autosave == Autosave::OnFocusChange { + Pane::autosave_item(&item, workspace.project.clone(), cx) + .detach_and_log_err(cx); + } + }) + .detach(); + + let item_id = self.id(); + cx.observe_release(self, move |workspace, _, _| { + workspace.panes_by_item.remove(&item_id); + event_subscription.take(); + }) + .detach(); + } + } + + fn deactivated(&self, cx: &mut MutableAppContext) { + self.update(cx, |this, cx| this.deactivated(cx)); + } + + fn workspace_deactivated(&self, cx: &mut MutableAppContext) { + self.update(cx, |this, cx| this.workspace_deactivated(cx)); + } + + fn navigate(&self, data: Box, cx: &mut MutableAppContext) -> bool { + self.update(cx, |this, cx| this.navigate(data, cx)) + } + + fn id(&self) -> usize { + self.id() + } + + fn window_id(&self) -> usize { + self.window_id() + } + + fn to_any(&self) -> AnyViewHandle { + self.into() + } + + fn is_dirty(&self, cx: &AppContext) -> bool { + self.read(cx).is_dirty(cx) + } + + fn has_conflict(&self, cx: &AppContext) -> bool { + self.read(cx).has_conflict(cx) + } + + fn can_save(&self, cx: &AppContext) -> bool { + self.read(cx).can_save(cx) + } + + fn save(&self, project: ModelHandle, cx: &mut MutableAppContext) -> Task> { + self.update(cx, |item, cx| item.save(project, cx)) + } + + fn save_as( + &self, + project: ModelHandle, + abs_path: PathBuf, + cx: &mut MutableAppContext, + ) -> Task> { + self.update(cx, |item, cx| item.save_as(project, abs_path, cx)) + } + + fn reload( + &self, + project: ModelHandle, + cx: &mut MutableAppContext, + ) -> Task> { + self.update(cx, |item, cx| item.reload(project, cx)) + } + + fn git_diff_recalc( + &self, + project: ModelHandle, + cx: &mut MutableAppContext, + ) -> Task> { + self.update(cx, |item, cx| item.git_diff_recalc(project, cx)) + } + + fn act_as_type(&self, type_id: TypeId, cx: &AppContext) -> Option { + self.read(cx).act_as_type(type_id, self, cx) + } + + fn to_followable_item_handle(&self, cx: &AppContext) -> Option> { + if cx.has_global::() { + let builders = cx.global::(); + let item = self.to_any(); + Some(builders.get(&item.view_type())?.1(item)) + } else { + None + } + } + + fn on_release( + &self, + cx: &mut MutableAppContext, + callback: Box, + ) -> gpui::Subscription { + cx.observe_release(self, move |_, cx| callback(cx)) + } + + fn to_searchable_item_handle(&self, cx: &AppContext) -> Option> { + self.read(cx).as_searchable(self) + } + + fn breadcrumb_location(&self, cx: &AppContext) -> ToolbarItemLocation { + self.read(cx).breadcrumb_location() + } + + fn breadcrumbs(&self, theme: &Theme, cx: &AppContext) -> Option> { + self.read(cx).breadcrumbs(theme, cx) + } +} + +impl From> for AnyViewHandle { + fn from(val: Box) -> Self { + val.to_any() + } +} + +impl From<&Box> for AnyViewHandle { + fn from(val: &Box) -> Self { + val.to_any() + } +} + +impl Clone for Box { + fn clone(&self) -> Box { + self.boxed_clone() + } +} + +impl WeakItemHandle for WeakViewHandle { + fn id(&self) -> usize { + self.id() + } + + fn window_id(&self) -> usize { + self.window_id() + } + + fn upgrade(&self, cx: &AppContext) -> Option> { + self.upgrade(cx).map(|v| Box::new(v) as Box) + } +} + +pub trait ProjectItem: Item { + type Item: project::Item; + + fn for_project_item( + project: ModelHandle, + item: ModelHandle, + cx: &mut ViewContext, + ) -> Self; +} + +pub trait FollowableItem: Item { + fn to_state_proto(&self, cx: &AppContext) -> Option; + fn from_state_proto( + pane: ViewHandle, + project: ModelHandle, + state: &mut Option, + cx: &mut MutableAppContext, + ) -> Option>>>; + fn add_event_to_update_proto( + &self, + event: &Self::Event, + update: &mut Option, + cx: &AppContext, + ) -> bool; + fn apply_update_proto( + &mut self, + message: proto::update_view::Variant, + cx: &mut ViewContext, + ) -> Result<()>; + + fn set_leader_replica_id(&mut self, leader_replica_id: Option, cx: &mut ViewContext); + fn should_unfollow_on_event(event: &Self::Event, cx: &AppContext) -> bool; +} + +pub trait FollowableItemHandle: ItemHandle { + fn set_leader_replica_id(&self, leader_replica_id: Option, cx: &mut MutableAppContext); + fn to_state_proto(&self, cx: &AppContext) -> Option; + fn add_event_to_update_proto( + &self, + event: &dyn Any, + update: &mut Option, + cx: &AppContext, + ) -> bool; + fn apply_update_proto( + &self, + message: proto::update_view::Variant, + cx: &mut MutableAppContext, + ) -> Result<()>; + fn should_unfollow_on_event(&self, event: &dyn Any, cx: &AppContext) -> bool; +} + +impl FollowableItemHandle for ViewHandle { + fn set_leader_replica_id(&self, leader_replica_id: Option, cx: &mut MutableAppContext) { + self.update(cx, |this, cx| { + this.set_leader_replica_id(leader_replica_id, cx) + }) + } + + fn to_state_proto(&self, cx: &AppContext) -> Option { + self.read(cx).to_state_proto(cx) + } + + fn add_event_to_update_proto( + &self, + event: &dyn Any, + update: &mut Option, + cx: &AppContext, + ) -> bool { + if let Some(event) = event.downcast_ref() { + self.read(cx).add_event_to_update_proto(event, update, cx) + } else { + false + } + } + + fn apply_update_proto( + &self, + message: proto::update_view::Variant, + cx: &mut MutableAppContext, + ) -> Result<()> { + self.update(cx, |this, cx| this.apply_update_proto(message, cx)) + } + + fn should_unfollow_on_event(&self, event: &dyn Any, cx: &AppContext) -> bool { + if let Some(event) = event.downcast_ref() { + T::should_unfollow_on_event(event, cx) + } else { + false + } + } +} + +#[cfg(test)] +pub(crate) mod test { + use std::{any::Any, borrow::Cow, cell::Cell}; + + use anyhow::anyhow; + use gpui::{ + elements::Empty, AppContext, Element, ElementBox, Entity, ModelHandle, RenderContext, Task, + View, ViewContext, + }; + use project::{Project, ProjectEntryId, ProjectPath}; + use smallvec::SmallVec; + + use crate::{sidebar::SidebarItem, ItemNavHistory}; + + use super::{Item, ItemEvent}; + + pub struct TestItem { + pub state: String, + pub label: String, + pub save_count: usize, + pub save_as_count: usize, + pub reload_count: usize, + pub is_dirty: bool, + pub is_singleton: bool, + pub has_conflict: bool, + pub project_entry_ids: Vec, + pub project_path: Option, + pub nav_history: Option, + pub tab_descriptions: Option>, + pub tab_detail: Cell>, + } + + pub enum TestItemEvent { + Edit, + } + + impl Clone for TestItem { + fn clone(&self) -> Self { + Self { + state: self.state.clone(), + label: self.label.clone(), + save_count: self.save_count, + save_as_count: self.save_as_count, + reload_count: self.reload_count, + is_dirty: self.is_dirty, + is_singleton: self.is_singleton, + has_conflict: self.has_conflict, + project_entry_ids: self.project_entry_ids.clone(), + project_path: self.project_path.clone(), + nav_history: None, + tab_descriptions: None, + tab_detail: Default::default(), + } + } + } + + impl TestItem { + pub fn new() -> Self { + Self { + state: String::new(), + label: String::new(), + save_count: 0, + save_as_count: 0, + reload_count: 0, + is_dirty: false, + has_conflict: false, + project_entry_ids: Vec::new(), + project_path: None, + is_singleton: true, + nav_history: None, + tab_descriptions: None, + tab_detail: Default::default(), + } + } + + pub fn with_label(mut self, state: &str) -> Self { + self.label = state.to_string(); + self + } + + pub fn with_singleton(mut self, singleton: bool) -> Self { + self.is_singleton = singleton; + self + } + + pub fn with_project_entry_ids(mut self, project_entry_ids: &[u64]) -> Self { + self.project_entry_ids.extend( + project_entry_ids + .iter() + .copied() + .map(ProjectEntryId::from_proto), + ); + self + } + + pub fn set_state(&mut self, state: String, cx: &mut ViewContext) { + self.push_to_nav_history(cx); + self.state = state; + } + + fn push_to_nav_history(&mut self, cx: &mut ViewContext) { + if let Some(history) = &mut self.nav_history { + history.push(Some(Box::new(self.state.clone())), cx); + } + } + } + + impl Entity for TestItem { + type Event = TestItemEvent; + } + + impl View for TestItem { + fn ui_name() -> &'static str { + "TestItem" + } + + fn render(&mut self, _: &mut RenderContext) -> ElementBox { + Empty::new().boxed() + } + } + + impl Item for TestItem { + fn tab_description<'a>(&'a self, detail: usize, _: &'a AppContext) -> Option> { + self.tab_descriptions.as_ref().and_then(|descriptions| { + let description = *descriptions.get(detail).or_else(|| descriptions.last())?; + Some(description.into()) + }) + } + + fn tab_content(&self, detail: Option, _: &theme::Tab, _: &AppContext) -> ElementBox { + self.tab_detail.set(detail); + Empty::new().boxed() + } + + fn project_path(&self, _: &AppContext) -> Option { + self.project_path.clone() + } + + fn project_entry_ids(&self, _: &AppContext) -> SmallVec<[ProjectEntryId; 3]> { + self.project_entry_ids.iter().copied().collect() + } + + fn is_singleton(&self, _: &AppContext) -> bool { + self.is_singleton + } + + fn set_nav_history(&mut self, history: ItemNavHistory, _: &mut ViewContext) { + self.nav_history = Some(history); + } + + fn navigate(&mut self, state: Box, _: &mut ViewContext) -> bool { + let state = *state.downcast::().unwrap_or_default(); + if state != self.state { + self.state = state; + true + } else { + false + } + } + + fn deactivated(&mut self, cx: &mut ViewContext) { + self.push_to_nav_history(cx); + } + + fn clone_on_split(&self, _: &mut ViewContext) -> Option + where + Self: Sized, + { + Some(self.clone()) + } + + fn is_dirty(&self, _: &AppContext) -> bool { + self.is_dirty + } + + fn has_conflict(&self, _: &AppContext) -> bool { + self.has_conflict + } + + fn can_save(&self, _: &AppContext) -> bool { + !self.project_entry_ids.is_empty() + } + + fn save( + &mut self, + _: ModelHandle, + _: &mut ViewContext, + ) -> Task> { + self.save_count += 1; + self.is_dirty = false; + Task::ready(Ok(())) + } + + fn save_as( + &mut self, + _: ModelHandle, + _: std::path::PathBuf, + _: &mut ViewContext, + ) -> Task> { + self.save_as_count += 1; + self.is_dirty = false; + Task::ready(Ok(())) + } + + fn reload( + &mut self, + _: ModelHandle, + _: &mut ViewContext, + ) -> Task> { + self.reload_count += 1; + self.is_dirty = false; + Task::ready(Ok(())) + } + + fn to_item_events(_: &Self::Event) -> Vec { + vec![ItemEvent::UpdateTab, ItemEvent::Edit] + } + + fn serialized_item_kind() -> Option<&'static str> { + None + } + + fn deserialize( + workspace_id: crate::persistence::model::WorkspaceId, + item_id: crate::persistence::model::ItemId, + cx: &mut ViewContext, + ) -> anyhow::Result { + Err(anyhow!("Cannot deserialize test item")) + } + } + + impl SidebarItem for TestItem {} +} diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index 644fa9481e4da782efd99c52fad0e3362cf12b0e..5db8d6feec03bf2c2fe72e084b073618f7bf3d2a 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -3,8 +3,9 @@ mod dragged_item_receiver; use super::{ItemHandle, SplitDirection}; use crate::{ dock::{icon_for_dock_anchor, AnchorDockBottom, AnchorDockRight, ExpandDock, HideDock}, + item::WeakItemHandle, toolbar::Toolbar, - Item, NewFile, NewSearch, NewTerminal, WeakItemHandle, Workspace, + Item, NewFile, NewSearch, NewTerminal, Workspace, }; use anyhow::Result; use collections::{HashMap, HashSet, VecDeque}; @@ -1634,7 +1635,7 @@ mod tests { use std::sync::Arc; use super::*; - use crate::tests::TestItem; + use crate::item::test::TestItem; use gpui::{executor::Deterministic, TestAppContext}; use project::FakeFs; diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 8a80dc5a76fb20a737c84c14a8c811baded6bd33..164807b24fd888abc2907968e049c07fd54bcda9 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -4,6 +4,7 @@ pub mod model; use std::ops::Deref; use std::path::{Path, PathBuf}; +use std::sync::Arc; use anyhow::{bail, Context, Result}; use db::open_file_db; @@ -52,7 +53,9 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( parent_group_id INTEGER, -- NULL indicates that this is a root node position INTEGER, -- NULL indicates that this is a root node axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; @@ -61,7 +64,9 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( workspace_id BLOB NOT NULL, parent_group_id INTEGER, -- NULL, this is a dock pane position INTEGER, -- NULL, this is a dock pane - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; @@ -71,8 +76,11 @@ pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( pane_id INTEGER NOT NULL, kind TEXT NOT NULL, position INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE, + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) + ON DELETE CASCADE, PRIMARY KEY(item_id, workspace_id) ) STRICT; "}], @@ -96,15 +104,15 @@ impl WorkspaceDb { // Note that we re-assign the workspace_id here in case it's empty // and we've grabbed the most recent workspace - let (workspace_id, dock_anchor, dock_visible) = iife!({ + let (workspace_id, dock_position) = iife!({ if worktree_roots.len() == 0 { self.select_row(indoc! {" - SELECT workspace_id, dock_anchor, dock_visible + SELECT workspace_id, dock_visible, dock_anchor FROM workspaces ORDER BY timestamp DESC LIMIT 1"})?()? } else { self.select_row_bound(indoc! {" - SELECT workspace_id, dock_anchor, dock_visible + SELECT workspace_id, dock_visible, dock_anchor FROM workspaces WHERE workspace_id = ?"})?(&workspace_id)? } @@ -122,8 +130,7 @@ impl WorkspaceDb { .get_center_pane_group(&workspace_id) .context("Getting center group") .log_err()?, - dock_anchor, - dock_visible, + dock_position, }) } @@ -150,8 +157,8 @@ impl WorkspaceDb { self.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?; self.exec_bound( - "INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)", - )?((&workspace_id, workspace.dock_anchor, workspace.dock_visible))?; + "INSERT INTO workspaces(workspace_id, dock_visible, dock_anchor) VALUES (?, ?, ?)", + )?((&workspace_id, workspace.dock_position))?; // Save center pane group and dock pane self.save_pane_group(&workspace_id, &workspace.center_group, None)?; @@ -172,7 +179,7 @@ impl WorkspaceDb { } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(&self, limit: usize) -> Vec> { + pub fn recent_workspaces(&self, limit: usize) -> Vec>> { iife!({ // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html Ok::<_, anyhow::Error>( @@ -181,7 +188,7 @@ impl WorkspaceDb { )?(limit)? .into_iter() .map(|id| id.paths()) - .collect::>>(), + .collect::>>>(), ) }) .log_err() @@ -339,22 +346,19 @@ mod tests { let db = WorkspaceDb(open_memory_db("test_basic_functionality")); let workspace_1 = SerializedWorkspace { - dock_anchor: DockAnchor::Bottom, - dock_visible: true, + dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), }; let workspace_2 = SerializedWorkspace { - dock_anchor: DockAnchor::Expanded, - dock_visible: false, + dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), }; let workspace_3 = SerializedWorkspace { - dock_anchor: DockAnchor::Right, - dock_visible: true, + dock_position: crate::dock::DockPosition::Shown(DockAnchor::Right), center_group: Default::default(), dock_pane: Default::default(), }; @@ -414,8 +418,7 @@ mod tests { center_group: &SerializedPaneGroup, ) -> SerializedWorkspace { SerializedWorkspace { - dock_anchor: DockAnchor::Right, - dock_visible: false, + dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Right), center_group: center_group.clone(), dock_pane, } diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index 824f649f9835db10da8ff10d5269c862082329cb..7afd186a36ff01189dcaaf43fb9af450d623955d 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -6,18 +6,21 @@ use std::{ use anyhow::{bail, Result}; use gpui::Axis; + use settings::DockAnchor; use sqlez::{ bindable::{Bind, Column}, statement::Statement, }; +use crate::dock::DockPosition; + #[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct WorkspaceId(Vec); +pub(crate) struct WorkspaceId(Arc>); impl WorkspaceId { - pub fn paths(self) -> Vec { - self.0 + pub fn paths(self) -> Arc> { + self.0.clone() } } @@ -28,7 +31,7 @@ impl, T: IntoIterator> From for WorkspaceId { .map(|p| p.as_ref().to_path_buf()) .collect::>(); roots.sort(); - Self(roots) + Self(Arc::new(roots)) } } @@ -49,8 +52,7 @@ impl Column for WorkspaceId { #[derive(Debug, PartialEq, Eq)] pub struct SerializedWorkspace { - pub dock_anchor: DockAnchor, - pub dock_visible: bool, + pub dock_position: DockPosition, pub center_group: SerializedPaneGroup, pub dock_pane: SerializedPane, } @@ -152,12 +154,31 @@ impl SerializedItem { } } +impl Bind for DockPosition { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = statement.bind(self.is_visible(), start_index)?; + statement.bind(self.anchor(), next_index) + } +} + +impl Column for DockPosition { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (visible, next_index) = bool::column(statement, start_index)?; + let (dock_anchor, next_index) = DockAnchor::column(statement, next_index)?; + let position = if visible { + DockPosition::Shown(dock_anchor) + } else { + DockPosition::Hidden(dock_anchor) + }; + Ok((position, next_index)) + } +} + #[cfg(test)] mod tests { + use settings::DockAnchor; use sqlez::connection::Connection; - use crate::persistence::model::DockAnchor; - use super::WorkspaceId; #[test] diff --git a/crates/workspace/src/searchable.rs b/crates/workspace/src/searchable.rs index cbe7364536281856da7947a1e2162e5024347683..073e88bf6dd5777dee4915ae763f1826ae2720ba 100644 --- a/crates/workspace/src/searchable.rs +++ b/crates/workspace/src/searchable.rs @@ -6,7 +6,7 @@ use gpui::{ }; use project::search::SearchQuery; -use crate::{Item, ItemHandle, WeakItemHandle}; +use crate::{item::WeakItemHandle, Item, ItemHandle}; #[derive(Debug)] pub enum SearchEvent { diff --git a/crates/workspace/src/shared_screen.rs b/crates/workspace/src/shared_screen.rs index 8c3f293895abf98a7352664c802965d2230d505b..d6a69490a5a52e907d28326a2a76514ad7823e95 100644 --- a/crates/workspace/src/shared_screen.rs +++ b/crates/workspace/src/shared_screen.rs @@ -1,4 +1,8 @@ -use crate::{Item, ItemNavHistory}; +use crate::{ + item::ItemEvent, + persistence::model::{ItemId, WorkspaceId}, + Item, ItemNavHistory, +}; use anyhow::{anyhow, Result}; use call::participant::{Frame, RemoteVideoTrack}; use client::{PeerId, User}; @@ -176,9 +180,21 @@ impl Item for SharedScreen { Task::ready(Err(anyhow!("Item::reload called on SharedScreen"))) } - fn to_item_events(event: &Self::Event) -> Vec { + fn to_item_events(event: &Self::Event) -> Vec { match event { - Event::Close => vec![crate::ItemEvent::CloseItem], + Event::Close => vec![ItemEvent::CloseItem], } } + + fn serialized_item_kind() -> Option<&'static str> { + None + } + + fn deserialize( + workspace_id: WorkspaceId, + item_id: ItemId, + cx: &mut ViewContext, + ) -> Result { + Err(anyhow!("SharedScreen can not be deserialized")) + } } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 085d9e2eb2d6c007cb759f981f32e3f374e7c135..c51979f6555fe6acf4f0625372814736d5fc4e94 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -3,6 +3,7 @@ /// This may cause issues when you're trying to write tests that use workspace focus to add items at /// specific locations. pub mod dock; +pub mod item; pub mod pane; pub mod pane_group; mod persistence; @@ -12,7 +13,15 @@ pub mod sidebar; mod status_bar; mod toolbar; -use crate::persistence::model::SerializedWorkspace; +use std::{ + any::TypeId, + borrow::Cow, + future::Future, + path::{Path, PathBuf}, + sync::Arc, + time::Duration, +}; + use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; @@ -30,56 +39,25 @@ use gpui::{ MouseButton, MutableAppContext, PathPromptOptions, PromptLevel, RenderContext, Task, View, ViewContext, ViewHandle, WeakViewHandle, }; +use item::{FollowableItem, FollowableItemHandle, Item, ItemHandle, ProjectItem}; use language::LanguageRegistry; use log::{error, warn}; pub use pane::*; pub use pane_group::*; +use persistence::model::{ItemId, WorkspaceId}; use postage::prelude::Stream; use project::{Project, ProjectEntryId, ProjectPath, ProjectStore, Worktree, WorktreeId}; -use searchable::SearchableItemHandle; use serde::Deserialize; use settings::{Autosave, DockAnchor, Settings}; use shared_screen::SharedScreen; use sidebar::{Sidebar, SidebarButtons, SidebarSide, ToggleSidebarItem}; -use smallvec::SmallVec; use status_bar::StatusBar; pub use status_bar::StatusItemView; -use std::{ - any::{Any, TypeId}, - borrow::Cow, - cell::RefCell, - fmt, - future::Future, - path::{Path, PathBuf}, - rc::Rc, - sync::{ - atomic::{AtomicBool, Ordering::SeqCst}, - Arc, - }, - time::Duration, -}; use theme::{Theme, ThemeRegistry}; pub use toolbar::{ToolbarItemLocation, ToolbarItemView}; use util::ResultExt; -type ProjectItemBuilders = HashMap< - TypeId, - fn(ModelHandle, AnyModelHandle, &mut ViewContext) -> Box, ->; - -type FollowableItemBuilder = fn( - ViewHandle, - ModelHandle, - &mut Option, - &mut MutableAppContext, -) -> Option>>>; -type FollowableItemBuilders = HashMap< - TypeId, - ( - FollowableItemBuilder, - fn(AnyViewHandle) -> Box, - ), ->; +use crate::persistence::model::SerializedWorkspace; #[derive(Clone, PartialEq)] pub struct RemoveWorktreeFromProject(pub WorktreeId); @@ -316,6 +294,10 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { client.add_view_message_handler(Workspace::handle_update_followers); } +type ProjectItemBuilders = HashMap< + TypeId, + fn(ModelHandle, AnyModelHandle, &mut ViewContext) -> Box, +>; pub fn register_project_item(cx: &mut MutableAppContext) { cx.update_default_global(|builders: &mut ProjectItemBuilders, _| { builders.insert(TypeId::of::(), |project, model, cx| { @@ -325,6 +307,19 @@ pub fn register_project_item(cx: &mut MutableAppContext) { }); } +type FollowableItemBuilder = fn( + ViewHandle, + ModelHandle, + &mut Option, + &mut MutableAppContext, +) -> Option>>>; +type FollowableItemBuilders = HashMap< + TypeId, + ( + FollowableItemBuilder, + fn(AnyViewHandle) -> Box, + ), +>; pub fn register_followable_item(cx: &mut MutableAppContext) { cx.update_default_global(|builders: &mut FollowableItemBuilders, _| { builders.insert( @@ -342,6 +337,26 @@ pub fn register_followable_item(cx: &mut MutableAppContext) { }); } +type SerializableItemBuilders = HashMap< + &'static str, + fn(WorkspaceId, ItemId, &mut ViewContext) -> Option>, +>; +pub fn register_deserializable_item(cx: &mut MutableAppContext) { + cx.update_default_global(|deserializers: &mut SerializableItemBuilders, _| { + if let Some(serialized_item_kind) = I::serialized_item_kind() { + deserializers.insert(serialized_item_kind, |workspace_id, item_id, cx| { + if let Some(v) = + cx.add_option_view(|cx| I::deserialize(workspace_id, item_id, cx).log_err()) + { + Some(Box::new(v)) + } else { + None + } + }); + } + }); +} + pub struct AppState { pub languages: Arc, pub themes: Arc, @@ -354,189 +369,34 @@ pub struct AppState { pub default_item_factory: DefaultItemFactory, } -#[derive(Eq, PartialEq, Hash)] -pub enum ItemEvent { - CloseItem, - UpdateTab, - UpdateBreadcrumbs, - Edit, -} - -pub trait Item: View { - fn deactivated(&mut self, _: &mut ViewContext) {} - fn workspace_deactivated(&mut self, _: &mut ViewContext) {} - fn navigate(&mut self, _: Box, _: &mut ViewContext) -> bool { - false - } - fn tab_description<'a>(&'a self, _: usize, _: &'a AppContext) -> Option> { - None - } - fn tab_content(&self, detail: Option, style: &theme::Tab, cx: &AppContext) - -> ElementBox; - fn project_path(&self, cx: &AppContext) -> Option; - fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]>; - fn is_singleton(&self, cx: &AppContext) -> bool; - fn set_nav_history(&mut self, _: ItemNavHistory, _: &mut ViewContext); - fn clone_on_split(&self, _: &mut ViewContext) -> Option - where - Self: Sized, - { - None - } - fn is_dirty(&self, _: &AppContext) -> bool { - false - } - fn has_conflict(&self, _: &AppContext) -> bool { - false - } - fn can_save(&self, cx: &AppContext) -> bool; - fn save( - &mut self, - project: ModelHandle, - cx: &mut ViewContext, - ) -> Task>; - fn save_as( - &mut self, - project: ModelHandle, - abs_path: PathBuf, - cx: &mut ViewContext, - ) -> Task>; - fn reload( - &mut self, - project: ModelHandle, - cx: &mut ViewContext, - ) -> Task>; - fn git_diff_recalc( - &mut self, - _project: ModelHandle, - _cx: &mut ViewContext, - ) -> Task> { - Task::ready(Ok(())) - } - fn to_item_events(event: &Self::Event) -> Vec; - fn should_close_item_on_event(_: &Self::Event) -> bool { - false - } - fn should_update_tab_on_event(_: &Self::Event) -> bool { - false - } - fn is_edit_event(_: &Self::Event) -> bool { - false - } - fn act_as_type( - &self, - type_id: TypeId, - self_handle: &ViewHandle, - _: &AppContext, - ) -> Option { - if TypeId::of::() == type_id { - Some(self_handle.into()) - } else { - None - } - } - fn as_searchable(&self, _: &ViewHandle) -> Option> { - None - } - - fn breadcrumb_location(&self) -> ToolbarItemLocation { - ToolbarItemLocation::Hidden - } - fn breadcrumbs(&self, _theme: &Theme, _cx: &AppContext) -> Option> { - None - } -} - -pub trait ProjectItem: Item { - type Item: project::Item; - - fn for_project_item( - project: ModelHandle, - item: ModelHandle, - cx: &mut ViewContext, - ) -> Self; -} - -pub trait FollowableItem: Item { - fn to_state_proto(&self, cx: &AppContext) -> Option; - fn from_state_proto( - pane: ViewHandle, - project: ModelHandle, - state: &mut Option, - cx: &mut MutableAppContext, - ) -> Option>>>; - fn add_event_to_update_proto( - &self, - event: &Self::Event, - update: &mut Option, - cx: &AppContext, - ) -> bool; - fn apply_update_proto( - &mut self, - message: proto::update_view::Variant, - cx: &mut ViewContext, - ) -> Result<()>; - - fn set_leader_replica_id(&mut self, leader_replica_id: Option, cx: &mut ViewContext); - fn should_unfollow_on_event(event: &Self::Event, cx: &AppContext) -> bool; -} +impl AppState { + #[cfg(any(test, feature = "test-support"))] + pub fn test(cx: &mut MutableAppContext) -> Arc { + use fs::HomeDir; -pub trait FollowableItemHandle: ItemHandle { - fn set_leader_replica_id(&self, leader_replica_id: Option, cx: &mut MutableAppContext); - fn to_state_proto(&self, cx: &AppContext) -> Option; - fn add_event_to_update_proto( - &self, - event: &dyn Any, - update: &mut Option, - cx: &AppContext, - ) -> bool; - fn apply_update_proto( - &self, - message: proto::update_view::Variant, - cx: &mut MutableAppContext, - ) -> Result<()>; - fn should_unfollow_on_event(&self, event: &dyn Any, cx: &AppContext) -> bool; -} + cx.set_global(HomeDir(Path::new("/tmp/").to_path_buf())); + let settings = Settings::test(cx); + cx.set_global(settings); -impl FollowableItemHandle for ViewHandle { - fn set_leader_replica_id(&self, leader_replica_id: Option, cx: &mut MutableAppContext) { - self.update(cx, |this, cx| { - this.set_leader_replica_id(leader_replica_id, cx) + let fs = fs::FakeFs::new(cx.background().clone()); + let languages = Arc::new(LanguageRegistry::test()); + let http_client = client::test::FakeHttpClient::with_404_response(); + let client = Client::new(http_client.clone(), cx); + let project_store = cx.add_model(|_| ProjectStore::new()); + let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx)); + let themes = ThemeRegistry::new((), cx.font_cache().clone()); + Arc::new(Self { + client, + themes, + fs, + languages, + user_store, + project_store, + initialize_workspace: |_, _, _| {}, + build_window_options: Default::default, + default_item_factory: |_, _| unimplemented!(), }) } - - fn to_state_proto(&self, cx: &AppContext) -> Option { - self.read(cx).to_state_proto(cx) - } - - fn add_event_to_update_proto( - &self, - event: &dyn Any, - update: &mut Option, - cx: &AppContext, - ) -> bool { - if let Some(event) = event.downcast_ref() { - self.read(cx).add_event_to_update_proto(event, update, cx) - } else { - false - } - } - - fn apply_update_proto( - &self, - message: proto::update_view::Variant, - cx: &mut MutableAppContext, - ) -> Result<()> { - self.update(cx, |this, cx| this.apply_update_proto(message, cx)) - } - - fn should_unfollow_on_event(&self, event: &dyn Any, cx: &AppContext) -> bool { - if let Some(event) = event.downcast_ref() { - T::should_unfollow_on_event(event, cx) - } else { - false - } - } } struct DelayedDebouncedEditAction { @@ -580,7 +440,7 @@ impl DelayedDebouncedEditAction { futures::select_biased! { _ = receiver => return, - _ = timer => {} + _ = timer => {} } if let Some(project) = project.upgrade(&cx) { @@ -590,427 +450,6 @@ impl DelayedDebouncedEditAction { } } -pub trait ItemHandle: 'static + fmt::Debug { - fn subscribe_to_item_events( - &self, - cx: &mut MutableAppContext, - handler: Box, - ) -> gpui::Subscription; - fn tab_description<'a>(&self, detail: usize, cx: &'a AppContext) -> Option>; - fn tab_content(&self, detail: Option, style: &theme::Tab, cx: &AppContext) - -> ElementBox; - fn project_path(&self, cx: &AppContext) -> Option; - fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]>; - fn is_singleton(&self, cx: &AppContext) -> bool; - fn boxed_clone(&self) -> Box; - fn clone_on_split(&self, cx: &mut MutableAppContext) -> Option>; - fn added_to_pane( - &self, - workspace: &mut Workspace, - pane: ViewHandle, - cx: &mut ViewContext, - ); - fn deactivated(&self, cx: &mut MutableAppContext); - fn workspace_deactivated(&self, cx: &mut MutableAppContext); - fn navigate(&self, data: Box, cx: &mut MutableAppContext) -> bool; - fn id(&self) -> usize; - fn window_id(&self) -> usize; - fn to_any(&self) -> AnyViewHandle; - fn is_dirty(&self, cx: &AppContext) -> bool; - fn has_conflict(&self, cx: &AppContext) -> bool; - fn can_save(&self, cx: &AppContext) -> bool; - fn save(&self, project: ModelHandle, cx: &mut MutableAppContext) -> Task>; - fn save_as( - &self, - project: ModelHandle, - abs_path: PathBuf, - cx: &mut MutableAppContext, - ) -> Task>; - fn reload(&self, project: ModelHandle, cx: &mut MutableAppContext) - -> Task>; - fn git_diff_recalc( - &self, - project: ModelHandle, - cx: &mut MutableAppContext, - ) -> Task>; - fn act_as_type(&self, type_id: TypeId, cx: &AppContext) -> Option; - fn to_followable_item_handle(&self, cx: &AppContext) -> Option>; - fn on_release( - &self, - cx: &mut MutableAppContext, - callback: Box, - ) -> gpui::Subscription; - fn to_searchable_item_handle(&self, cx: &AppContext) -> Option>; - fn breadcrumb_location(&self, cx: &AppContext) -> ToolbarItemLocation; - fn breadcrumbs(&self, theme: &Theme, cx: &AppContext) -> Option>; -} - -pub trait WeakItemHandle { - fn id(&self) -> usize; - fn window_id(&self) -> usize; - fn upgrade(&self, cx: &AppContext) -> Option>; -} - -impl dyn ItemHandle { - pub fn downcast(&self) -> Option> { - self.to_any().downcast() - } - - pub fn act_as(&self, cx: &AppContext) -> Option> { - self.act_as_type(TypeId::of::(), cx) - .and_then(|t| t.downcast()) - } -} - -impl ItemHandle for ViewHandle { - fn subscribe_to_item_events( - &self, - cx: &mut MutableAppContext, - handler: Box, - ) -> gpui::Subscription { - cx.subscribe(self, move |_, event, cx| { - for item_event in T::to_item_events(event) { - handler(item_event, cx) - } - }) - } - - fn tab_description<'a>(&self, detail: usize, cx: &'a AppContext) -> Option> { - self.read(cx).tab_description(detail, cx) - } - - fn tab_content( - &self, - detail: Option, - style: &theme::Tab, - cx: &AppContext, - ) -> ElementBox { - self.read(cx).tab_content(detail, style, cx) - } - - fn project_path(&self, cx: &AppContext) -> Option { - self.read(cx).project_path(cx) - } - - fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]> { - self.read(cx).project_entry_ids(cx) - } - - fn is_singleton(&self, cx: &AppContext) -> bool { - self.read(cx).is_singleton(cx) - } - - fn boxed_clone(&self) -> Box { - Box::new(self.clone()) - } - - fn clone_on_split(&self, cx: &mut MutableAppContext) -> Option> { - self.update(cx, |item, cx| { - cx.add_option_view(|cx| item.clone_on_split(cx)) - }) - .map(|handle| Box::new(handle) as Box) - } - - fn added_to_pane( - &self, - workspace: &mut Workspace, - pane: ViewHandle, - cx: &mut ViewContext, - ) { - let history = pane.read(cx).nav_history_for_item(self); - self.update(cx, |this, cx| this.set_nav_history(history, cx)); - - if let Some(followed_item) = self.to_followable_item_handle(cx) { - if let Some(message) = followed_item.to_state_proto(cx) { - workspace.update_followers( - proto::update_followers::Variant::CreateView(proto::View { - id: followed_item.id() as u64, - variant: Some(message), - leader_id: workspace.leader_for_pane(&pane).map(|id| id.0), - }), - cx, - ); - } - } - - if workspace - .panes_by_item - .insert(self.id(), pane.downgrade()) - .is_none() - { - let mut pending_autosave = DelayedDebouncedEditAction::new(); - let mut pending_git_update = DelayedDebouncedEditAction::new(); - let pending_update = Rc::new(RefCell::new(None)); - let pending_update_scheduled = Rc::new(AtomicBool::new(false)); - - let mut event_subscription = - Some(cx.subscribe(self, move |workspace, item, event, cx| { - let pane = if let Some(pane) = workspace - .panes_by_item - .get(&item.id()) - .and_then(|pane| pane.upgrade(cx)) - { - pane - } else { - log::error!("unexpected item event after pane was dropped"); - return; - }; - - if let Some(item) = item.to_followable_item_handle(cx) { - let leader_id = workspace.leader_for_pane(&pane); - - if leader_id.is_some() && item.should_unfollow_on_event(event, cx) { - workspace.unfollow(&pane, cx); - } - - if item.add_event_to_update_proto( - event, - &mut *pending_update.borrow_mut(), - cx, - ) && !pending_update_scheduled.load(SeqCst) - { - pending_update_scheduled.store(true, SeqCst); - cx.after_window_update({ - let pending_update = pending_update.clone(); - let pending_update_scheduled = pending_update_scheduled.clone(); - move |this, cx| { - pending_update_scheduled.store(false, SeqCst); - this.update_followers( - proto::update_followers::Variant::UpdateView( - proto::UpdateView { - id: item.id() as u64, - variant: pending_update.borrow_mut().take(), - leader_id: leader_id.map(|id| id.0), - }, - ), - cx, - ); - } - }); - } - } - - for item_event in T::to_item_events(event).into_iter() { - match item_event { - ItemEvent::CloseItem => { - Pane::close_item(workspace, pane, item.id(), cx) - .detach_and_log_err(cx); - return; - } - - ItemEvent::UpdateTab => { - pane.update(cx, |_, cx| { - cx.emit(pane::Event::ChangeItemTitle); - cx.notify(); - }); - } - - ItemEvent::Edit => { - if let Autosave::AfterDelay { milliseconds } = - cx.global::().autosave - { - let delay = Duration::from_millis(milliseconds); - let item = item.clone(); - pending_autosave.fire_new( - delay, - workspace, - cx, - |project, mut cx| async move { - cx.update(|cx| Pane::autosave_item(&item, project, cx)) - .await - .log_err(); - }, - ); - } - - let settings = cx.global::(); - let debounce_delay = settings.git_overrides.gutter_debounce; - - let item = item.clone(); - - if let Some(delay) = debounce_delay { - const MIN_GIT_DELAY: u64 = 50; - - let delay = delay.max(MIN_GIT_DELAY); - let duration = Duration::from_millis(delay); - - pending_git_update.fire_new( - duration, - workspace, - cx, - |project, mut cx| async move { - cx.update(|cx| item.git_diff_recalc(project, cx)) - .await - .log_err(); - }, - ); - } else { - let project = workspace.project().downgrade(); - cx.spawn_weak(|_, mut cx| async move { - if let Some(project) = project.upgrade(&cx) { - cx.update(|cx| item.git_diff_recalc(project, cx)) - .await - .log_err(); - } - }) - .detach(); - } - } - - _ => {} - } - } - })); - - cx.observe_focus(self, move |workspace, item, focused, cx| { - if !focused && cx.global::().autosave == Autosave::OnFocusChange { - Pane::autosave_item(&item, workspace.project.clone(), cx) - .detach_and_log_err(cx); - } - }) - .detach(); - - let item_id = self.id(); - cx.observe_release(self, move |workspace, _, _| { - workspace.panes_by_item.remove(&item_id); - event_subscription.take(); - }) - .detach(); - } - } - - fn deactivated(&self, cx: &mut MutableAppContext) { - self.update(cx, |this, cx| this.deactivated(cx)); - } - - fn workspace_deactivated(&self, cx: &mut MutableAppContext) { - self.update(cx, |this, cx| this.workspace_deactivated(cx)); - } - - fn navigate(&self, data: Box, cx: &mut MutableAppContext) -> bool { - self.update(cx, |this, cx| this.navigate(data, cx)) - } - - fn id(&self) -> usize { - self.id() - } - - fn window_id(&self) -> usize { - self.window_id() - } - - fn to_any(&self) -> AnyViewHandle { - self.into() - } - - fn is_dirty(&self, cx: &AppContext) -> bool { - self.read(cx).is_dirty(cx) - } - - fn has_conflict(&self, cx: &AppContext) -> bool { - self.read(cx).has_conflict(cx) - } - - fn can_save(&self, cx: &AppContext) -> bool { - self.read(cx).can_save(cx) - } - - fn save(&self, project: ModelHandle, cx: &mut MutableAppContext) -> Task> { - self.update(cx, |item, cx| item.save(project, cx)) - } - - fn save_as( - &self, - project: ModelHandle, - abs_path: PathBuf, - cx: &mut MutableAppContext, - ) -> Task> { - self.update(cx, |item, cx| item.save_as(project, abs_path, cx)) - } - - fn reload( - &self, - project: ModelHandle, - cx: &mut MutableAppContext, - ) -> Task> { - self.update(cx, |item, cx| item.reload(project, cx)) - } - - fn git_diff_recalc( - &self, - project: ModelHandle, - cx: &mut MutableAppContext, - ) -> Task> { - self.update(cx, |item, cx| item.git_diff_recalc(project, cx)) - } - - fn act_as_type(&self, type_id: TypeId, cx: &AppContext) -> Option { - self.read(cx).act_as_type(type_id, self, cx) - } - - fn to_followable_item_handle(&self, cx: &AppContext) -> Option> { - if cx.has_global::() { - let builders = cx.global::(); - let item = self.to_any(); - Some(builders.get(&item.view_type())?.1(item)) - } else { - None - } - } - - fn on_release( - &self, - cx: &mut MutableAppContext, - callback: Box, - ) -> gpui::Subscription { - cx.observe_release(self, move |_, cx| callback(cx)) - } - - fn to_searchable_item_handle(&self, cx: &AppContext) -> Option> { - self.read(cx).as_searchable(self) - } - - fn breadcrumb_location(&self, cx: &AppContext) -> ToolbarItemLocation { - self.read(cx).breadcrumb_location() - } - - fn breadcrumbs(&self, theme: &Theme, cx: &AppContext) -> Option> { - self.read(cx).breadcrumbs(theme, cx) - } -} - -impl From> for AnyViewHandle { - fn from(val: Box) -> Self { - val.to_any() - } -} - -impl From<&Box> for AnyViewHandle { - fn from(val: &Box) -> Self { - val.to_any() - } -} - -impl Clone for Box { - fn clone(&self) -> Box { - self.boxed_clone() - } -} - -impl WeakItemHandle for WeakViewHandle { - fn id(&self) -> usize { - self.id() - } - - fn window_id(&self) -> usize { - self.window_id() - } - - fn upgrade(&self, cx: &AppContext) -> Option> { - self.upgrade(cx).map(|v| Box::new(v) as Box) - } -} - pub trait Notification: View { fn should_dismiss_notification_on_event(&self, event: &::Event) -> bool; } @@ -1036,34 +475,23 @@ impl From<&dyn NotificationHandle> for AnyViewHandle { } } -impl AppState { - #[cfg(any(test, feature = "test-support"))] - pub fn test(cx: &mut MutableAppContext) -> Arc { - use fs::HomeDir; +#[derive(Default)] +struct LeaderState { + followers: HashSet, +} - cx.set_global(HomeDir(Path::new("/tmp/").to_path_buf())); - let settings = Settings::test(cx); - cx.set_global(settings); +type FollowerStatesByLeader = HashMap, FollowerState>>; - let fs = fs::FakeFs::new(cx.background().clone()); - let languages = Arc::new(LanguageRegistry::test()); - let http_client = client::test::FakeHttpClient::with_404_response(); - let client = Client::new(http_client.clone(), cx); - let project_store = cx.add_model(|_| ProjectStore::new()); - let user_store = cx.add_model(|cx| UserStore::new(client.clone(), http_client, cx)); - let themes = ThemeRegistry::new((), cx.font_cache().clone()); - Arc::new(Self { - client, - themes, - fs, - languages, - user_store, - project_store, - initialize_workspace: |_, _, _| {}, - build_window_options: Default::default, - default_item_factory: |_, _| unimplemented!(), - }) - } +#[derive(Default)] +struct FollowerState { + active_view_id: Option, + items_by_leader_view_id: HashMap, +} + +#[derive(Debug)] +enum FollowerItem { + Loading(Vec), + Loaded(Box), } pub enum Event { @@ -1074,7 +502,6 @@ pub enum Event { pub struct Workspace { weak_self: WeakViewHandle, - // _db_id: WorkspaceId, client: Arc, user_store: ModelHandle, remote_entity_subscription: Option, @@ -1100,28 +527,9 @@ pub struct Workspace { _observe_current_user: Task<()>, } -#[derive(Default)] -struct LeaderState { - followers: HashSet, -} - -type FollowerStatesByLeader = HashMap, FollowerState>>; - -#[derive(Default)] -struct FollowerState { - active_view_id: Option, - items_by_leader_view_id: HashMap, -} - -#[derive(Debug)] -enum FollowerItem { - Loading(Vec), - Loaded(Box), -} - impl Workspace { pub fn new( - _serialized_workspace: Option, + serialized_workspace: Option, project: ModelHandle, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, @@ -1160,6 +568,20 @@ impl Workspace { .detach(); cx.focus(¢er_pane); cx.emit(Event::PaneAdded(center_pane.clone())); + let dock = Dock::new( + dock_default_factory, + serialized_workspace + .as_ref() + .map(|ws| ws.dock_position) + .clone(), + cx, + ); + let dock_pane = dock.pane().clone(); + + if let Some(serialized_workspace) = serialized_workspace { + + // Fill them in? + } let fs = project.read(cx).fs().clone(); let user_store = project.read(cx).user_store(); @@ -1186,9 +608,6 @@ impl Workspace { cx.emit_global(WorkspaceCreated(weak_handle.clone())); - let dock = Dock::new(dock_default_factory, cx); - let dock_pane = dock.pane().clone(); - let left_sidebar = cx.add_view(|_| Sidebar::new(SidebarSide::Left)); let right_sidebar = cx.add_view(|_| Sidebar::new(SidebarSide::Right)); let left_sidebar_buttons = cx.add_view(|cx| SidebarButtons::new(left_sidebar.clone(), cx)); @@ -1218,7 +637,6 @@ impl Workspace { let mut this = Workspace { modal: None, weak_self: weak_handle, - // _db_id: serialized_workspace.workspace_id, center: PaneGroup::new(center_pane.clone()), dock, // When removing an item, the last element remaining in this array @@ -3086,13 +2504,13 @@ fn open_new(app_state: &Arc, cx: &mut MutableAppContext) -> Task<()> { #[cfg(test)] mod tests { - use std::cell::Cell; + use std::{cell::RefCell, rc::Rc}; - use crate::sidebar::SidebarItem; + use crate::item::test::{TestItem, TestItemEvent}; use super::*; use fs::FakeFs; - use gpui::{executor::Deterministic, ModelHandle, TestAppContext, ViewContext}; + use gpui::{executor::Deterministic, TestAppContext, ViewContext}; use project::{Project, ProjectEntryId}; use serde_json::json; @@ -3697,209 +3115,4 @@ mod tests { assert!(pane.can_navigate_forward()); }); } - - pub struct TestItem { - state: String, - pub label: String, - save_count: usize, - save_as_count: usize, - reload_count: usize, - is_dirty: bool, - is_singleton: bool, - has_conflict: bool, - project_entry_ids: Vec, - project_path: Option, - nav_history: Option, - tab_descriptions: Option>, - tab_detail: Cell>, - } - - pub enum TestItemEvent { - Edit, - } - - impl Clone for TestItem { - fn clone(&self) -> Self { - Self { - state: self.state.clone(), - label: self.label.clone(), - save_count: self.save_count, - save_as_count: self.save_as_count, - reload_count: self.reload_count, - is_dirty: self.is_dirty, - is_singleton: self.is_singleton, - has_conflict: self.has_conflict, - project_entry_ids: self.project_entry_ids.clone(), - project_path: self.project_path.clone(), - nav_history: None, - tab_descriptions: None, - tab_detail: Default::default(), - } - } - } - - impl TestItem { - pub fn new() -> Self { - Self { - state: String::new(), - label: String::new(), - save_count: 0, - save_as_count: 0, - reload_count: 0, - is_dirty: false, - has_conflict: false, - project_entry_ids: Vec::new(), - project_path: None, - is_singleton: true, - nav_history: None, - tab_descriptions: None, - tab_detail: Default::default(), - } - } - - pub fn with_label(mut self, state: &str) -> Self { - self.label = state.to_string(); - self - } - - pub fn with_singleton(mut self, singleton: bool) -> Self { - self.is_singleton = singleton; - self - } - - pub fn with_project_entry_ids(mut self, project_entry_ids: &[u64]) -> Self { - self.project_entry_ids.extend( - project_entry_ids - .iter() - .copied() - .map(ProjectEntryId::from_proto), - ); - self - } - - fn set_state(&mut self, state: String, cx: &mut ViewContext) { - self.push_to_nav_history(cx); - self.state = state; - } - - fn push_to_nav_history(&mut self, cx: &mut ViewContext) { - if let Some(history) = &mut self.nav_history { - history.push(Some(Box::new(self.state.clone())), cx); - } - } - } - - impl Entity for TestItem { - type Event = TestItemEvent; - } - - impl View for TestItem { - fn ui_name() -> &'static str { - "TestItem" - } - - fn render(&mut self, _: &mut RenderContext) -> ElementBox { - Empty::new().boxed() - } - } - - impl Item for TestItem { - fn tab_description<'a>(&'a self, detail: usize, _: &'a AppContext) -> Option> { - self.tab_descriptions.as_ref().and_then(|descriptions| { - let description = *descriptions.get(detail).or_else(|| descriptions.last())?; - Some(description.into()) - }) - } - - fn tab_content(&self, detail: Option, _: &theme::Tab, _: &AppContext) -> ElementBox { - self.tab_detail.set(detail); - Empty::new().boxed() - } - - fn project_path(&self, _: &AppContext) -> Option { - self.project_path.clone() - } - - fn project_entry_ids(&self, _: &AppContext) -> SmallVec<[ProjectEntryId; 3]> { - self.project_entry_ids.iter().copied().collect() - } - - fn is_singleton(&self, _: &AppContext) -> bool { - self.is_singleton - } - - fn set_nav_history(&mut self, history: ItemNavHistory, _: &mut ViewContext) { - self.nav_history = Some(history); - } - - fn navigate(&mut self, state: Box, _: &mut ViewContext) -> bool { - let state = *state.downcast::().unwrap_or_default(); - if state != self.state { - self.state = state; - true - } else { - false - } - } - - fn deactivated(&mut self, cx: &mut ViewContext) { - self.push_to_nav_history(cx); - } - - fn clone_on_split(&self, _: &mut ViewContext) -> Option - where - Self: Sized, - { - Some(self.clone()) - } - - fn is_dirty(&self, _: &AppContext) -> bool { - self.is_dirty - } - - fn has_conflict(&self, _: &AppContext) -> bool { - self.has_conflict - } - - fn can_save(&self, _: &AppContext) -> bool { - !self.project_entry_ids.is_empty() - } - - fn save( - &mut self, - _: ModelHandle, - _: &mut ViewContext, - ) -> Task> { - self.save_count += 1; - self.is_dirty = false; - Task::ready(Ok(())) - } - - fn save_as( - &mut self, - _: ModelHandle, - _: std::path::PathBuf, - _: &mut ViewContext, - ) -> Task> { - self.save_as_count += 1; - self.is_dirty = false; - Task::ready(Ok(())) - } - - fn reload( - &mut self, - _: ModelHandle, - _: &mut ViewContext, - ) -> Task> { - self.reload_count += 1; - self.is_dirty = false; - Task::ready(Ok(())) - } - - fn to_item_events(_: &Self::Event) -> Vec { - vec![ItemEvent::UpdateTab, ItemEvent::Edit] - } - } - - impl SidebarItem for TestItem {} } diff --git a/crates/zed/src/feedback.rs b/crates/zed/src/feedback.rs index 03b068a019af29d04abedcfeb84f5e8065060795..55597312aea4a15876ed7c39ec2a8558522e6d4b 100644 --- a/crates/zed/src/feedback.rs +++ b/crates/zed/src/feedback.rs @@ -5,7 +5,7 @@ use gpui::{ Element, Entity, MouseButton, RenderContext, View, }; use settings::Settings; -use workspace::StatusItemView; +use workspace::{item::ItemHandle, StatusItemView}; pub const NEW_ISSUE_URL: &str = "https://github.com/zed-industries/feedback/issues/new/choose"; @@ -43,7 +43,7 @@ impl View for FeedbackLink { impl StatusItemView for FeedbackLink { fn set_active_pane_item( &mut self, - _: Option<&dyn workspace::ItemHandle>, + _: Option<&dyn ItemHandle>, _: &mut gpui::ViewContext, ) { } diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 5f67e290b54bf229cdf0a3f57c1d10cfd9b234b9..53273b45d8ed0057f03b87f2cab9c2bff2772811 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -38,7 +38,7 @@ use fs::RealFs; use settings::watched_json::{watch_keymap_file, watch_settings_file, WatchedJsonFile}; use theme::ThemeRegistry; use util::{channel::RELEASE_CHANNEL, paths, ResultExt, TryFutureExt}; -use workspace::{self, AppState, ItemHandle, NewFile, OpenPaths, Workspace}; +use workspace::{self, item::ItemHandle, AppState, NewFile, OpenPaths, Workspace}; use zed::{self, build_window_options, initialize_workspace, languages, menus}; fn main() { diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 6b6b65ab3249295163166195e78fd9ccc0ea90c8..0abcbeac485d5fcaef5c452f6847b31835b0f6ff 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -625,7 +625,8 @@ mod tests { }; use theme::ThemeRegistry; use workspace::{ - open_paths, pane, Item, ItemHandle, NewFile, Pane, SplitDirection, WorkspaceHandle, + item::{Item, ItemHandle}, + open_paths, pane, NewFile, Pane, SplitDirection, WorkspaceHandle, }; #[gpui::test] From d20d21c6a20ed208c81b3271e62d72b87fcbc5c3 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 16 Nov 2022 16:35:56 -0800 Subject: [PATCH 49/86] Dock persistence working! Co-Authored-By: Mikayla Maki --- Cargo.lock | 3 + crates/collab/src/integration_tests.rs | 2 +- crates/db/src/db.rs | 41 +- crates/db/src/kvp.rs | 28 +- crates/diagnostics/src/diagnostics.rs | 16 +- crates/editor/Cargo.toml | 2 + crates/editor/src/editor.rs | 1 + crates/editor/src/items.rs | 19 +- crates/editor/src/persistence.rs | 30 ++ crates/project/src/project.rs | 2 + crates/search/src/project_search.rs | 14 + crates/sqlez/src/bindable.rs | 15 + crates/sqlez/src/connection.rs | 12 +- crates/sqlez/src/domain.rs | 57 +-- crates/sqlez/src/migrations.rs | 166 ++++---- crates/sqlez/src/statement.rs | 5 - crates/sqlez/src/thread_safe_connection.rs | 23 +- crates/sqlez/src/typed_statements.rs | 13 - crates/terminal/src/terminal.rs | 4 + .../terminal/src/terminal_container_view.rs | 23 +- crates/theme_testbench/src/theme_testbench.rs | 21 +- crates/workspace/Cargo.toml | 1 + crates/workspace/src/dock.rs | 16 +- crates/workspace/src/item.rs | 29 +- crates/workspace/src/persistence.rs | 380 +++++++++++------- crates/workspace/src/persistence/model.rs | 81 ++-- crates/workspace/src/shared_screen.rs | 18 +- crates/workspace/src/workspace.rs | 202 ++++++++-- crates/workspace/test.db | Bin 32768 -> 32768 bytes 29 files changed, 782 insertions(+), 442 deletions(-) create mode 100644 crates/editor/src/persistence.rs diff --git a/Cargo.lock b/Cargo.lock index bad036a05d4883b8f3fb7f91d6cc12c0bc2fdb17..b4df5a9ab9d1ce6dda66750bf6627806fe65e5f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1738,6 +1738,7 @@ dependencies = [ "collections", "context_menu", "ctor", + "db", "drag_and_drop", "env_logger", "futures 0.3.25", @@ -1761,6 +1762,7 @@ dependencies = [ "smallvec", "smol", "snippet", + "sqlez", "sum_tree", "text", "theme", @@ -7629,6 +7631,7 @@ dependencies = [ "context_menu", "db", "drag_and_drop", + "env_logger", "fs", "futures 0.3.25", "gpui", diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 762a5cf71144c899f63aecb06aa75323db83cea0..e1b242713f6cf8cad163190e92f2e45d606f0a6b 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -52,7 +52,7 @@ use std::{ use theme::ThemeRegistry; use unindent::Unindent as _; use util::post_inc; -use workspace::{shared_screen::SharedScreen, item::Item, SplitDirection, ToggleFollow, Workspace}; +use workspace::{item::Item, shared_screen::SharedScreen, SplitDirection, ToggleFollow, Workspace}; #[ctor::ctor] fn init_logger() { diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 56fc79f475b2f3bf64caf755189949804a6a41a6..9bb4286b832e6867a1711156a6eb20dd9e43dabb 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,6 +1,6 @@ pub mod kvp; -use std::fs; +use std::fs::create_dir_all; use std::path::Path; #[cfg(any(test, feature = "test-support"))] @@ -8,24 +8,29 @@ use anyhow::Result; use indoc::indoc; #[cfg(any(test, feature = "test-support"))] use sqlez::connection::Connection; -use sqlez::domain::Domain; +use sqlez::domain::{Domain, Migrator}; use sqlez::thread_safe_connection::ThreadSafeConnection; +use util::channel::RELEASE_CHANNEL_NAME; +use util::paths::DB_DIR; const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA journal_mode=WAL; PRAGMA synchronous=NORMAL; + PRAGMA busy_timeout=1; PRAGMA foreign_keys=TRUE; PRAGMA case_sensitive_like=TRUE; "}; /// Open or create a database at the given directory path. -pub fn open_file_db() -> ThreadSafeConnection { +pub fn open_file_db() -> ThreadSafeConnection { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM - let current_db_dir = (*util::paths::DB_DIR).join(Path::new(&format!( - "0-{}", - *util::channel::RELEASE_CHANNEL_NAME - ))); - fs::create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); + let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); + + // if *RELEASE_CHANNEL == ReleaseChannel::Dev { + // remove_dir_all(¤t_db_dir).ok(); + // } + + create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); let db_path = current_db_dir.join(Path::new("db.sqlite")); ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) @@ -44,3 +49,23 @@ pub fn write_db_to>( let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref()); conn.backup_main(&destination) } + +/// Implements a basic DB wrapper for a given domain +#[macro_export] +macro_rules! connection { + ($id:ident: $t:ident<$d:ty>) => { + pub struct $t(::sqlez::thread_safe_connection::ThreadSafeConnection<$d>); + + impl ::std::ops::Deref for $t { + type Target = ::sqlez::thread_safe_connection::ThreadSafeConnection<$d>; + + fn deref(&self) -> &Self::Target { + &self.0 + } + } + + lazy_static! { + pub static ref $id: $t = $t(::db::open_file_db()); + } + }; +} diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 1dd1cf69b7a626ec68dabfc7d08508f9e581099c..dd82c17615de4a65dcfe7936937ac523835b5030 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,22 +1,9 @@ use anyhow::Result; use indoc::indoc; -use sqlez::{ - connection::Connection, domain::Domain, migrations::Migration, - thread_safe_connection::ThreadSafeConnection, -}; +use sqlez::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; use std::ops::Deref; -pub(crate) const KVP_MIGRATION: Migration = Migration::new( - "kvp", - &[indoc! {" - CREATE TABLE kv_store( - key TEXT PRIMARY KEY, - value TEXT NOT NULL - ) STRICT; - "}], -); - lazy_static::lazy_static! { pub static ref KEY_VALUE_STORE: KeyValueStore = KeyValueStore(crate::open_file_db()); @@ -26,8 +13,17 @@ lazy_static::lazy_static! { pub struct KeyValueStore(ThreadSafeConnection); impl Domain for KeyValueStore { - fn migrate(conn: &Connection) -> anyhow::Result<()> { - KVP_MIGRATION.run(conn) + fn name() -> &'static str { + "kvp" + } + + fn migrations() -> &'static [&'static str] { + &[indoc! {" + CREATE TABLE kv_store( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ) STRICT; + "}] } } diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index a3621dd30d4233516ecaf15067c253dac0efd087..639a1087247c2c138a8858199cc8217578a98fbd 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -31,7 +31,7 @@ use std::{ use util::TryFutureExt; use workspace::{ item::{Item, ItemEvent, ItemHandle}, - ItemNavHistory, Workspace, + ItemNavHistory, Pane, Workspace, }; actions!(diagnostics, [Deploy]); @@ -613,6 +613,20 @@ impl Item for ProjectDiagnosticsEditor { fn deactivated(&mut self, cx: &mut ViewContext) { self.editor.update(cx, |editor, cx| editor.deactivated(cx)); } + + fn serialized_item_kind() -> Option<&'static str> { + Some("diagnostics") + } + + fn deserialize( + project: ModelHandle, + workspace: WeakViewHandle, + _workspace_id: workspace::WorkspaceId, + _item_id: workspace::ItemId, + cx: &mut ViewContext, + ) -> Task>> { + Task::ready(Ok(cx.add_view(|cx| Self::new(project, workspace, cx)))) + } } fn diagnostic_header_renderer(diagnostic: Diagnostic) -> RenderBlock { diff --git a/crates/editor/Cargo.toml b/crates/editor/Cargo.toml index f56ed36f75076311ebbd3a778c7896f783b8268d..f992ed5116d8e77ea593e0730e03d7c56e7b1a2a 100644 --- a/crates/editor/Cargo.toml +++ b/crates/editor/Cargo.toml @@ -23,6 +23,7 @@ test-support = [ drag_and_drop = { path = "../drag_and_drop" } text = { path = "../text" } clock = { path = "../clock" } +db = { path = "../db" } collections = { path = "../collections" } context_menu = { path = "../context_menu" } fuzzy = { path = "../fuzzy" } @@ -37,6 +38,7 @@ snippet = { path = "../snippet" } sum_tree = { path = "../sum_tree" } theme = { path = "../theme" } util = { path = "../util" } +sqlez = { path = "../sqlez" } workspace = { path = "../workspace" } aho-corasick = "0.7" anyhow = "1.0" diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 5bbeed3fb56dd754aa181f867a85956ba71d90b4..ce810bab0c508bb04c4c56c79af614f8fdbc02df 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -9,6 +9,7 @@ mod link_go_to_definition; mod mouse_context_menu; pub mod movement; mod multi_buffer; +mod persistence; pub mod selections_collection; #[cfg(test)] diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 4f9c7d5593b3c11e343f0fad2132508479dc8c4b..ae9bbd57484ec04f5b1f8e304e5e4f274c747234 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -7,7 +7,7 @@ use anyhow::{anyhow, Result}; use futures::FutureExt; use gpui::{ elements::*, geometry::vector::vec2f, AppContext, Entity, ModelHandle, MutableAppContext, - RenderContext, Subscription, Task, View, ViewContext, ViewHandle, + RenderContext, Subscription, Task, View, ViewContext, ViewHandle, WeakViewHandle, }; use language::{Bias, Buffer, File as _, OffsetRangeExt, Point, SelectionGoal}; use project::{File, FormatTrigger, Project, ProjectEntryId, ProjectPath}; @@ -26,7 +26,7 @@ use util::TryFutureExt; use workspace::{ item::{FollowableItem, Item, ItemEvent, ItemHandle, ProjectItem}, searchable::{Direction, SearchEvent, SearchableItem, SearchableItemHandle}, - ItemNavHistory, StatusItemView, ToolbarItemLocation, + ItemId, ItemNavHistory, Pane, StatusItemView, ToolbarItemLocation, Workspace, WorkspaceId, }; pub const MAX_TAB_TITLE_LEN: usize = 24; @@ -552,6 +552,21 @@ impl Item for Editor { })); Some(breadcrumbs) } + + fn serialized_item_kind() -> Option<&'static str> { + Some("Editor") + } + + fn deserialize( + _project: ModelHandle, + _workspace: WeakViewHandle, + _workspace_id: WorkspaceId, + _item_id: ItemId, + _cx: &mut ViewContext, + ) -> Task>> { + // Look up the path with this key associated, create a self with that path + unimplemented!() + } } impl ProjectItem for Editor { diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs new file mode 100644 index 0000000000000000000000000000000000000000..4b39f9463886b74ac4bca0079a4b692a323614eb --- /dev/null +++ b/crates/editor/src/persistence.rs @@ -0,0 +1,30 @@ +use std::path::PathBuf; + +use db::connection; +use indoc::indoc; +use lazy_static::lazy_static; +use project::WorktreeId; +use sqlez::domain::Domain; +use workspace::{ItemId, Workspace}; + +use crate::Editor; + +connection!(DB: EditorDb<(Workspace, Editor)>); + +impl Domain for Editor { + fn name() -> &'static str { + "editor" + } + + fn migrations() -> &'static [&'static str] { + &[indoc! {" + + "}] + } +} + +impl EditorDb { + fn get_path(_item_id: ItemId, _workspace_id: WorktreeId) -> PathBuf { + unimplemented!(); + } +} diff --git a/crates/project/src/project.rs b/crates/project/src/project.rs index 94558fee3e27239c8c8957f2f6a25bd58307f420..e0cc3cdd0b4f77c8b306648a2df0d7f28f93c830 100644 --- a/crates/project/src/project.rs +++ b/crates/project/src/project.rs @@ -804,6 +804,7 @@ impl Project { &self.collaborators } + /// Collect all worktrees, including ones that don't appear in the project panel pub fn worktrees<'a>( &'a self, cx: &'a AppContext, @@ -813,6 +814,7 @@ impl Project { .filter_map(move |worktree| worktree.upgrade(cx)) } + /// Collect all user-visible worktrees, the ones that appear in the project panel pub fn visible_worktrees<'a>( &'a self, cx: &'a AppContext, diff --git a/crates/search/src/project_search.rs b/crates/search/src/project_search.rs index edd4f40ba262df583674ab711e1c756b58718170..322d035870caf5045024c70073413e31718848f6 100644 --- a/crates/search/src/project_search.rs +++ b/crates/search/src/project_search.rs @@ -353,6 +353,20 @@ impl Item for ProjectSearchView { fn breadcrumbs(&self, theme: &theme::Theme, cx: &AppContext) -> Option> { self.results_editor.breadcrumbs(theme, cx) } + + fn serialized_item_kind() -> Option<&'static str> { + None + } + + fn deserialize( + _project: ModelHandle, + _workspace: WeakViewHandle, + _workspace_id: workspace::WorkspaceId, + _item_id: workspace::ItemId, + _cx: &mut ViewContext, + ) -> Task>> { + unimplemented!() + } } impl ProjectSearchView { diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index 7a3483bcea176bb4d7577896f6e694ed6feb8721..1e4f0df33fd1e39b676999a1c30cd6c0269052e5 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -2,6 +2,7 @@ use std::{ ffi::OsStr, os::unix::prelude::OsStrExt, path::{Path, PathBuf}, + sync::Arc, }; use anyhow::Result; @@ -118,6 +119,13 @@ impl Bind for &str { } } +impl Bind for Arc { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_text(start_index, self.as_ref())?; + Ok(start_index + 1) + } +} + impl Bind for String { fn bind(&self, statement: &Statement, start_index: i32) -> Result { statement.bind_text(start_index, self)?; @@ -125,6 +133,13 @@ impl Bind for String { } } +impl Column for Arc { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let result = statement.column_text(start_index)?; + Ok((Arc::from(result), start_index + 1)) + } +} + impl Column for String { fn column<'a>(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { let result = statement.column_text(start_index)?; diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index b673167c8623e586a0bd02ecce47de3f534431e2..8ab1e345d83853de600fd4fc6f0f416ce62abcce 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -54,10 +54,6 @@ impl Connection { self.persistent } - pub(crate) fn last_insert_id(&self) -> i64 { - unsafe { sqlite3_last_insert_rowid(self.sqlite3) } - } - pub fn backup_main(&self, destination: &Connection) -> Result<()> { unsafe { let backup = sqlite3_backup_init( @@ -126,7 +122,7 @@ mod test { let text = "Some test text"; connection - .insert_bound("INSERT INTO text (text) VALUES (?);") + .exec_bound("INSERT INTO text (text) VALUES (?);") .unwrap()(text) .unwrap(); @@ -155,7 +151,7 @@ mod test { let tuple2 = ("test2".to_string(), 32, vec![64, 32, 16, 8, 4, 2, 1, 0]); let mut insert = connection - .insert_bound::<(String, usize, Vec)>( + .exec_bound::<(String, usize, Vec)>( "INSERT INTO test (text, integer, blob) VALUES (?, ?, ?)", ) .unwrap(); @@ -185,7 +181,7 @@ mod test { .unwrap(); connection - .insert_bound("INSERT INTO bools(t, f) VALUES (?, ?);") + .exec_bound("INSERT INTO bools(t, f) VALUES (?, ?)") .unwrap()((true, false)) .unwrap(); @@ -210,7 +206,7 @@ mod test { .unwrap(); let blob = vec![0, 1, 2, 4, 8, 16, 32, 64]; connection1 - .insert_bound::>("INSERT INTO blobs (data) VALUES (?);") + .exec_bound::>("INSERT INTO blobs (data) VALUES (?);") .unwrap()(blob.clone()) .unwrap(); diff --git a/crates/sqlez/src/domain.rs b/crates/sqlez/src/domain.rs index f57e89a5c8f453d41d6f1b3cd6d2d5501fa63ba6..b7cfbaef887ce482f781dcaa8fe1676ae2cd9794 100644 --- a/crates/sqlez/src/domain.rs +++ b/crates/sqlez/src/domain.rs @@ -1,39 +1,50 @@ use crate::connection::Connection; pub trait Domain { - fn migrate(conn: &Connection) -> anyhow::Result<()>; + fn name() -> &'static str; + fn migrations() -> &'static [&'static str]; } -impl Domain for (D1, D2) { - fn migrate(conn: &Connection) -> anyhow::Result<()> { - D1::migrate(conn)?; - D2::migrate(conn) +pub trait Migrator { + fn migrate(connection: &Connection) -> anyhow::Result<()>; +} + +impl Migrator for D { + fn migrate(connection: &Connection) -> anyhow::Result<()> { + connection.migrate(Self::name(), Self::migrations()) + } +} + +impl Migrator for (D1, D2) { + fn migrate(connection: &Connection) -> anyhow::Result<()> { + D1::migrate(connection)?; + D2::migrate(connection) } } -impl Domain for (D1, D2, D3) { - fn migrate(conn: &Connection) -> anyhow::Result<()> { - D1::migrate(conn)?; - D2::migrate(conn)?; - D3::migrate(conn) +impl Migrator for (D1, D2, D3) { + fn migrate(connection: &Connection) -> anyhow::Result<()> { + D1::migrate(connection)?; + D2::migrate(connection)?; + D3::migrate(connection) } } -impl Domain for (D1, D2, D3, D4) { - fn migrate(conn: &Connection) -> anyhow::Result<()> { - D1::migrate(conn)?; - D2::migrate(conn)?; - D3::migrate(conn)?; - D4::migrate(conn) +impl Migrator for (D1, D2, D3, D4) { + fn migrate(connection: &Connection) -> anyhow::Result<()> { + D1::migrate(connection)?; + D2::migrate(connection)?; + D3::migrate(connection)?; + D4::migrate(connection) } } -impl Domain for (D1, D2, D3, D4, D5) { - fn migrate(conn: &Connection) -> anyhow::Result<()> { - D1::migrate(conn)?; - D2::migrate(conn)?; - D3::migrate(conn)?; - D4::migrate(conn)?; - D5::migrate(conn) +impl Migrator for (D1, D2, D3, D4, D5) { + fn migrate(connection: &Connection) -> anyhow::Result<()> { + D1::migrate(connection)?; + D2::migrate(connection)?; + D3::migrate(connection)?; + D4::migrate(connection)?; + D5::migrate(connection) } } diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 89eaebb4942175dc5b8561c0f0084a472f03b074..1f4b3f0f7c979dc844253dcb1655beb7bea3a561 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -9,53 +9,27 @@ use indoc::{formatdoc, indoc}; use crate::connection::Connection; -const MIGRATIONS_MIGRATION: Migration = Migration::new( - "migrations", - // The migrations migration must be infallable because it runs to completion - // with every call to migration run and is run unchecked. - &[indoc! {" - CREATE TABLE IF NOT EXISTS migrations ( - domain TEXT, - step INTEGER, - migration TEXT - ) - "}], -); - -#[derive(Debug)] -pub struct Migration { - domain: &'static str, - migrations: &'static [&'static str], -} - -impl Migration { - pub const fn new(domain: &'static str, migrations: &'static [&'static str]) -> Self { - Self { domain, migrations } - } - - fn run_unchecked(&self, connection: &Connection) -> Result<()> { - for migration in self.migrations { - connection.exec(migration)?()?; - } - - Ok(()) - } - - pub fn run(&self, connection: &Connection) -> Result<()> { +impl Connection { + pub fn migrate(&self, domain: &'static str, migrations: &[&'static str]) -> Result<()> { // Setup the migrations table unconditionally - MIGRATIONS_MIGRATION.run_unchecked(connection)?; + self.exec(indoc! {" + CREATE TABLE IF NOT EXISTS migrations ( + domain TEXT, + step INTEGER, + migration TEXT + )"})?()?; let completed_migrations = - connection.select_bound::<&str, (String, usize, String)>(indoc! {" + self.select_bound::<&str, (String, usize, String)>(indoc! {" SELECT domain, step, migration FROM migrations WHERE domain = ? ORDER BY step - "})?(self.domain)?; + "})?(domain)?; - let mut store_completed_migration = connection - .insert_bound("INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)")?; + let mut store_completed_migration = + self.exec_bound("INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)")?; - for (index, migration) in self.migrations.iter().enumerate() { + for (index, migration) in migrations.iter().enumerate() { if let Some((_, _, completed_migration)) = completed_migrations.get(index) { if completed_migration != migration { return Err(anyhow!(formatdoc! {" @@ -65,15 +39,15 @@ impl Migration { {} Proposed migration: - {}", self.domain, index, completed_migration, migration})); + {}", domain, index, completed_migration, migration})); } else { // Migration already run. Continue continue; } } - connection.exec(migration)?()?; - store_completed_migration((self.domain, index, *migration))?; + self.exec(migration)?()?; + store_completed_migration((domain, index, *migration))?; } Ok(()) @@ -84,22 +58,23 @@ impl Migration { mod test { use indoc::indoc; - use crate::{connection::Connection, migrations::Migration}; + use crate::connection::Connection; #[test] fn test_migrations_are_added_to_table() { let connection = Connection::open_memory("migrations_are_added_to_table"); // Create first migration with a single step and run it - let mut migration = Migration::new( - "test", - &[indoc! {" - CREATE TABLE test1 ( - a TEXT, - b TEXT - )"}], - ); - migration.run(&connection).unwrap(); + connection + .migrate( + "test", + &[indoc! {" + CREATE TABLE test1 ( + a TEXT, + b TEXT + )"}], + ) + .unwrap(); // Verify it got added to the migrations table assert_eq!( @@ -107,23 +82,31 @@ mod test { .select::("SELECT (migration) FROM migrations") .unwrap()() .unwrap()[..], - migration.migrations - ); - - // Add another step to the migration and run it again - migration.migrations = &[ - indoc! {" + &[indoc! {" CREATE TABLE test1 ( a TEXT, b TEXT - )"}, - indoc! {" - CREATE TABLE test2 ( - c TEXT, - d TEXT - )"}, - ]; - migration.run(&connection).unwrap(); + )"}], + ); + + // Add another step to the migration and run it again + connection + .migrate( + "test", + &[ + indoc! {" + CREATE TABLE test1 ( + a TEXT, + b TEXT + )"}, + indoc! {" + CREATE TABLE test2 ( + c TEXT, + d TEXT + )"}, + ], + ) + .unwrap(); // Verify it is also added to the migrations table assert_eq!( @@ -131,7 +114,18 @@ mod test { .select::("SELECT (migration) FROM migrations") .unwrap()() .unwrap()[..], - migration.migrations + &[ + indoc! {" + CREATE TABLE test1 ( + a TEXT, + b TEXT + )"}, + indoc! {" + CREATE TABLE test2 ( + c TEXT, + d TEXT + )"}, + ], ); } @@ -150,7 +144,7 @@ mod test { .unwrap(); let mut store_completed_migration = connection - .insert_bound::<(&str, usize, String)>(indoc! {" + .exec_bound::<(&str, usize, String)>(indoc! {" INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)"}) .unwrap(); @@ -171,8 +165,7 @@ mod test { fn migrations_dont_rerun() { let connection = Connection::open_memory("migrations_dont_rerun"); - // Create migration which clears a table - let migration = Migration::new("test", &["DELETE FROM test_table"]); + // Create migration which clears a tabl // Manually create the table for that migration with a row connection @@ -197,7 +190,9 @@ mod test { ); // Run the migration verifying that the row got dropped - migration.run(&connection).unwrap(); + connection + .migrate("test", &["DELETE FROM test_table"]) + .unwrap(); assert_eq!( connection .select_row::("SELECT * FROM test_table") @@ -213,7 +208,9 @@ mod test { .unwrap(); // Run the same migration again and verify that the table was left unchanged - migration.run(&connection).unwrap(); + connection + .migrate("test", &["DELETE FROM test_table"]) + .unwrap(); assert_eq!( connection .select_row::("SELECT * FROM test_table") @@ -228,22 +225,22 @@ mod test { let connection = Connection::open_memory("changed_migration_fails"); // Create a migration with two steps and run it - Migration::new( - "test migration", - &[ - indoc! {" + connection + .migrate( + "test migration", + &[ + indoc! {" CREATE TABLE test ( col INTEGER )"}, - indoc! {" - INSERT INTO test (col) VALUES (1)"}, - ], - ) - .run(&connection) - .unwrap(); + indoc! {" + INSERT INTO test (col) VALUES (1)"}, + ], + ) + .unwrap(); // Create another migration with the same domain but different steps - let second_migration_result = Migration::new( + let second_migration_result = connection.migrate( "test migration", &[ indoc! {" @@ -253,8 +250,7 @@ mod test { indoc! {" INSERT INTO test (color) VALUES (1)"}, ], - ) - .run(&connection); + ); // Verify new migration returns error when run assert!(second_migration_result.is_err()) diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index b04f5bb82ffe196e8a0440a7cbd56698908c9a83..40118dd9237e3ff3bbfd4c01773427cf393638e7 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -256,11 +256,6 @@ impl<'a> Statement<'a> { } } - pub fn insert(&mut self) -> Result { - self.exec()?; - Ok(self.connection.last_insert_id()) - } - pub fn exec(&mut self) -> Result<()> { fn logic(this: &mut Statement) -> Result<()> { while this.step()? == StepResult::Row {} diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index b9bb1657ea2491217d624e1982219343b37d1689..e85ba4c51a275dd464cbd6ff44c698e2a6c74352 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -3,20 +3,23 @@ use std::{marker::PhantomData, ops::Deref, sync::Arc}; use connection::Connection; use thread_local::ThreadLocal; -use crate::{connection, domain::Domain}; +use crate::{ + connection, + domain::{Domain, Migrator}, +}; -pub struct ThreadSafeConnection { +pub struct ThreadSafeConnection { uri: Arc, persistent: bool, initialize_query: Option<&'static str>, connection: Arc>, - _pd: PhantomData, + _pd: PhantomData, } -unsafe impl Send for ThreadSafeConnection {} -unsafe impl Sync for ThreadSafeConnection {} +unsafe impl Send for ThreadSafeConnection {} +unsafe impl Sync for ThreadSafeConnection {} -impl ThreadSafeConnection { +impl ThreadSafeConnection { pub fn new(uri: &str, persistent: bool) -> Self { Self { uri: Arc::from(uri), @@ -72,7 +75,11 @@ impl Clone for ThreadSafeConnection { } } -impl Deref for ThreadSafeConnection { +// TODO: +// 1. When migration or initialization fails, move the corrupted db to a holding place and create a new one +// 2. If the new db also fails, downgrade to a shared in memory db +// 3. In either case notify the user about what went wrong +impl Deref for ThreadSafeConnection { type Target = Connection; fn deref(&self) -> &Self::Target { @@ -91,7 +98,7 @@ impl Deref for ThreadSafeConnection { .unwrap(); } - D::migrate(&connection).expect("Migrations failed"); + M::migrate(&connection).expect("Migrations failed"); connection }) diff --git a/crates/sqlez/src/typed_statements.rs b/crates/sqlez/src/typed_statements.rs index f2d66a781f09f05adb53f3bf05b1667cf14629d2..98f51b970a1e856df60f0f574419fdea0ea7d757 100644 --- a/crates/sqlez/src/typed_statements.rs +++ b/crates/sqlez/src/typed_statements.rs @@ -20,19 +20,6 @@ impl Connection { Ok(move |bindings| statement.with_bindings(bindings)?.exec()) } - pub fn insert<'a>(&'a self, query: &str) -> Result Result> { - let mut statement = Statement::prepare(&self, query)?; - Ok(move || statement.insert()) - } - - pub fn insert_bound<'a, B: Bind>( - &'a self, - query: &str, - ) -> Result Result> { - let mut statement = Statement::prepare(&self, query)?; - Ok(move |bindings| statement.with_bindings(bindings)?.insert()) - } - pub fn select<'a, C: Column>( &'a self, query: &str, diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 7e469e19fec03564140f5063f1ee8e243331d345..15b3b4e66ed7b7f71bf91408c091bc313378d34a 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -34,7 +34,9 @@ use mappings::mouse::{ use procinfo::LocalProcessInfo; use settings::{AlternateScroll, Settings, Shell, TerminalBlink}; +use terminal_container_view::TerminalContainer; use util::ResultExt; +use workspace::register_deserializable_item; use std::{ cmp::min, @@ -67,6 +69,8 @@ use lazy_static::lazy_static; pub fn init(cx: &mut MutableAppContext) { terminal_view::init(cx); terminal_container_view::init(cx); + + register_deserializable_item::(cx); } ///Scrolling is unbearably sluggish by default. Alacritty supports a configurable diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal/src/terminal_container_view.rs index 5d5fda1206c26c39f514cd9cb3317a6c0a469c18..49b6ae341f56d26294e09be8c2bea322cee4e7af 100644 --- a/crates/terminal/src/terminal_container_view.rs +++ b/crates/terminal/src/terminal_container_view.rs @@ -5,7 +5,7 @@ use alacritty_terminal::index::Point; use dirs::home_dir; use gpui::{ actions, elements::*, AnyViewHandle, AppContext, Entity, ModelHandle, MutableAppContext, Task, - View, ViewContext, ViewHandle, + View, ViewContext, ViewHandle, WeakViewHandle, }; use util::truncate_and_trailoff; use workspace::searchable::{SearchEvent, SearchOptions, SearchableItem, SearchableItemHandle}; @@ -13,6 +13,7 @@ use workspace::{ item::{Item, ItemEvent}, ToolbarItemLocation, Workspace, }; +use workspace::{register_deserializable_item, Pane}; use project::{LocalWorktree, Project, ProjectPath}; use settings::{AlternateScroll, Settings, WorkingDirectory}; @@ -26,6 +27,8 @@ actions!(terminal, [DeployModal]); pub fn init(cx: &mut MutableAppContext) { cx.add_action(TerminalContainer::deploy); + + register_deserializable_item::(cx); } //Make terminal view an enum, that can give you views for the error and non-error states @@ -127,7 +130,7 @@ impl TerminalContainer { TerminalContainerContent::Error(view) } }; - cx.focus(content.handle()); + // cx.focus(content.handle()); TerminalContainer { content, @@ -375,6 +378,22 @@ impl Item for TerminalContainer { ) .boxed()]) } + + fn serialized_item_kind() -> Option<&'static str> { + Some("Terminal") + } + + fn deserialize( + _project: ModelHandle, + _workspace: WeakViewHandle, + _workspace_id: workspace::WorkspaceId, + _item_id: workspace::ItemId, + cx: &mut ViewContext, + ) -> Task>> { + // TODO: Pull the current working directory out of the DB. + + Task::ready(Ok(cx.add_view(|cx| TerminalContainer::new(None, false, cx)))) + } } impl SearchableItem for TerminalContainer { diff --git a/crates/theme_testbench/src/theme_testbench.rs b/crates/theme_testbench/src/theme_testbench.rs index 9c7d6bdf49771cb032aaa35dd3ce8e35243a092f..cf9f03de45f351cae59a89eaca78654074daff85 100644 --- a/crates/theme_testbench/src/theme_testbench.rs +++ b/crates/theme_testbench/src/theme_testbench.rs @@ -6,7 +6,8 @@ use gpui::{ Padding, ParentElement, }, fonts::TextStyle, - Border, Element, Entity, MutableAppContext, Quad, RenderContext, View, ViewContext, + Border, Element, Entity, ModelHandle, MutableAppContext, Quad, RenderContext, Task, View, + ViewContext, ViewHandle, WeakViewHandle, }; use project::{Project, ProjectEntryId, ProjectPath}; use settings::Settings; @@ -14,13 +15,15 @@ use smallvec::SmallVec; use theme::{ColorScheme, Layer, Style, StyleSet}; use workspace::{ item::{Item, ItemEvent}, - Workspace, + register_deserializable_item, Pane, Workspace, }; actions!(theme, [DeployThemeTestbench]); pub fn init(cx: &mut MutableAppContext) { cx.add_action(ThemeTestbench::deploy); + + register_deserializable_item::(cx) } pub struct ThemeTestbench {} @@ -357,4 +360,18 @@ impl Item for ThemeTestbench { fn to_item_events(_: &Self::Event) -> Vec { Vec::new() } + + fn serialized_item_kind() -> Option<&'static str> { + Some("ThemeTestBench") + } + + fn deserialize( + _project: ModelHandle, + _workspace: WeakViewHandle, + _workspace_id: workspace::WorkspaceId, + _item_id: workspace::ItemId, + cx: &mut ViewContext, + ) -> Task>> { + Task::ready(Ok(cx.add_view(|_| Self {}))) + } } diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index 553479b175dcad89af41d438bf86e614cef2d486..822a008eedb02f799c9f03c0d77a6893c930a40a 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -37,6 +37,7 @@ bincode = "1.2.1" anyhow = "1.0.38" futures = "0.3" lazy_static = "1.4" +env_logger = "0.9.1" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" postage = { version = "0.4.1", features = ["futures-traits"] } diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index 5b08b689abee44839e216a41f6fd0aef8692fda5..2e4fbcad6f9419603826a83bf558885e2235e1ff 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -137,13 +137,8 @@ pub struct Dock { } impl Dock { - pub fn new( - default_item_factory: DefaultItemFactory, - position: Option, - cx: &mut ViewContext, - ) -> Self { - let position = position - .unwrap_or_else(|| DockPosition::Hidden(cx.global::().default_dock_anchor)); + pub fn new(default_item_factory: DefaultItemFactory, cx: &mut ViewContext) -> Self { + let position = DockPosition::Hidden(cx.global::().default_dock_anchor); let pane = cx.add_view(|cx| Pane::new(Some(position.anchor()), cx)); pane.update(cx, |pane, cx| { @@ -175,7 +170,7 @@ impl Dock { self.position.is_visible() && self.position.anchor() == anchor } - fn set_dock_position( + pub(crate) fn set_dock_position( workspace: &mut Workspace, new_position: DockPosition, cx: &mut ViewContext, @@ -211,6 +206,7 @@ impl Dock { cx.focus(last_active_center_pane); } cx.emit(crate::Event::DockAnchorChanged); + workspace.serialize_workspace(None, cx); cx.notify(); } @@ -347,6 +343,10 @@ impl Dock { } }) } + + pub fn position(&self) -> DockPosition { + self.position + } } pub struct ToggleDockButton { diff --git a/crates/workspace/src/item.rs b/crates/workspace/src/item.rs index 215ad47e1b3503a163e2b2a76278c50c26822ae7..d006f2fe1507bf5c0192493c3fb2c0835a3c718c 100644 --- a/crates/workspace/src/item.rs +++ b/crates/workspace/src/item.rs @@ -117,15 +117,18 @@ pub trait Item: View { fn breadcrumb_location(&self) -> ToolbarItemLocation { ToolbarItemLocation::Hidden } + fn breadcrumbs(&self, _theme: &Theme, _cx: &AppContext) -> Option> { None } fn serialized_item_kind() -> Option<&'static str>; fn deserialize( + project: ModelHandle, + workspace: WeakViewHandle, workspace_id: WorkspaceId, item_id: ItemId, - cx: &mut ViewContext, - ) -> Result; + cx: &mut ViewContext, + ) -> Task>>; } pub trait ItemHandle: 'static + fmt::Debug { @@ -181,6 +184,7 @@ pub trait ItemHandle: 'static + fmt::Debug { fn to_searchable_item_handle(&self, cx: &AppContext) -> Option>; fn breadcrumb_location(&self, cx: &AppContext) -> ToolbarItemLocation; fn breadcrumbs(&self, theme: &Theme, cx: &AppContext) -> Option>; + fn serialized_item_kind(&self) -> Option<&'static str>; } pub trait WeakItemHandle { @@ -515,6 +519,10 @@ impl ItemHandle for ViewHandle { fn breadcrumbs(&self, theme: &Theme, cx: &AppContext) -> Option> { self.read(cx).breadcrumbs(theme, cx) } + + fn serialized_item_kind(&self) -> Option<&'static str> { + T::serialized_item_kind() + } } impl From> for AnyViewHandle { @@ -645,15 +653,14 @@ impl FollowableItemHandle for ViewHandle { pub(crate) mod test { use std::{any::Any, borrow::Cow, cell::Cell}; - use anyhow::anyhow; use gpui::{ elements::Empty, AppContext, Element, ElementBox, Entity, ModelHandle, RenderContext, Task, - View, ViewContext, + View, ViewContext, ViewHandle, WeakViewHandle, }; use project::{Project, ProjectEntryId, ProjectPath}; use smallvec::SmallVec; - use crate::{sidebar::SidebarItem, ItemNavHistory}; + use crate::{sidebar::SidebarItem, ItemId, ItemNavHistory, Pane, Workspace, WorkspaceId}; use super::{Item, ItemEvent}; @@ -864,11 +871,13 @@ pub(crate) mod test { } fn deserialize( - workspace_id: crate::persistence::model::WorkspaceId, - item_id: crate::persistence::model::ItemId, - cx: &mut ViewContext, - ) -> anyhow::Result { - Err(anyhow!("Cannot deserialize test item")) + _project: ModelHandle, + _workspace: WeakViewHandle, + _workspace_id: WorkspaceId, + _item_id: ItemId, + _cx: &mut ViewContext, + ) -> Task>> { + unreachable!("Cannot deserialize test item") } } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 164807b24fd888abc2907968e049c07fd54bcda9..cc07a76596677c7b51d9cf8b1e7d5e6cb593f987 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -2,93 +2,81 @@ pub mod model; -use std::ops::Deref; use std::path::{Path, PathBuf}; use std::sync::Arc; -use anyhow::{bail, Context, Result}; -use db::open_file_db; +use anyhow::{anyhow, bail, Result, Context}; +use db::connection; use gpui::Axis; use indoc::indoc; use lazy_static::lazy_static; -use sqlez::thread_safe_connection::ThreadSafeConnection; -use sqlez::{connection::Connection, domain::Domain, migrations::Migration}; + +use sqlez::domain::Domain; use util::{iife, unzip_option, ResultExt}; +use crate::dock::DockPosition; + use super::Workspace; use model::{ - GroupId, PaneId, SerializedItem, SerializedItemKind, SerializedPane, SerializedPaneGroup, + GroupId, PaneId, SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace, WorkspaceId, }; -lazy_static! { - pub static ref DB: WorkspaceDb = WorkspaceDb(open_file_db()); -} - -pub struct WorkspaceDb(ThreadSafeConnection); - -impl Deref for WorkspaceDb { - type Target = ThreadSafeConnection; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new( - "workspace", - &[indoc! {" - CREATE TABLE workspaces( - workspace_id BLOB PRIMARY KEY, - dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' - dock_visible INTEGER, -- Boolean - timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL - ) STRICT; - - CREATE TABLE pane_groups( - group_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL indicates that this is a root node - position INTEGER, -- NULL indicates that this is a root node - axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE, - FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE - ) STRICT; - - CREATE TABLE panes( - pane_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL, this is a dock pane - position INTEGER, -- NULL, this is a dock pane - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE, - FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE - ) STRICT; - - CREATE TABLE items( - item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique - workspace_id BLOB NOT NULL, - pane_id INTEGER NOT NULL, - kind TEXT NOT NULL, - position INTEGER NOT NULL, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE, - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) - ON DELETE CASCADE, - PRIMARY KEY(item_id, workspace_id) - ) STRICT; - "}], -); +connection!(DB: WorkspaceDb); impl Domain for Workspace { - fn migrate(conn: &Connection) -> anyhow::Result<()> { - WORKSPACES_MIGRATION.run(&conn) + fn name() -> &'static str { + "workspace" + } + + fn migrations() -> &'static [&'static str] { + &[indoc! {" + CREATE TABLE workspaces( + workspace_id BLOB PRIMARY KEY, + dock_visible INTEGER, -- Boolean + dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL + ) STRICT; + + CREATE TABLE pane_groups( + group_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + parent_group_id INTEGER, -- NULL indicates that this is a root node + position INTEGER, -- NULL indicates that this is a root node + axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE, + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + + CREATE TABLE panes( + pane_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + parent_group_id INTEGER, -- NULL, this is a dock pane + position INTEGER, -- NULL, this is a dock pane + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE, + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + + CREATE TABLE items( + item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique + workspace_id BLOB NOT NULL, + pane_id INTEGER NOT NULL, + kind TEXT NOT NULL, + position INTEGER NOT NULL, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE, + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) + ON DELETE CASCADE, + PRIMARY KEY(item_id, workspace_id) + ) STRICT; + "}] } } @@ -104,7 +92,7 @@ impl WorkspaceDb { // Note that we re-assign the workspace_id here in case it's empty // and we've grabbed the most recent workspace - let (workspace_id, dock_position) = iife!({ + let (workspace_id, dock_position): (WorkspaceId, DockPosition) = iife!({ if worktree_roots.len() == 0 { self.select_row(indoc! {" SELECT workspace_id, dock_visible, dock_anchor @@ -122,6 +110,7 @@ impl WorkspaceDb { .flatten()?; Some(SerializedWorkspace { + workspace_id: workspace_id.clone(), dock_pane: self .get_dock_pane(&workspace_id) .context("Getting dock pane") @@ -136,43 +125,47 @@ impl WorkspaceDb { /// Saves a workspace using the worktree roots. Will garbage collect any workspaces /// that used this workspace previously - pub fn save_workspace>( + pub fn save_workspace( &self, - worktree_roots: &[P], - old_roots: Option<&[P]>, + old_id: Option, workspace: &SerializedWorkspace, ) { - let workspace_id: WorkspaceId = worktree_roots.into(); - self.with_savepoint("update_worktrees", || { - if let Some(old_roots) = old_roots { - let old_id: WorkspaceId = old_roots.into(); - - self.exec_bound("DELETE FROM WORKSPACES WHERE workspace_id = ?")?(&old_id)?; + if let Some(old_id) = old_id { + self.exec_bound(indoc! {" + DELETE FROM pane_groups WHERE workspace_id = ?"})?(&old_id)?; + + // If collision, delete + + self.exec_bound(indoc! {" + UPDATE OR REPLACE workspaces + SET workspace_id = ?, + dock_visible = ?, + dock_anchor = ?, + timestamp = CURRENT_TIMESTAMP + WHERE workspace_id = ?"})?(( + &workspace.workspace_id, + workspace.dock_position, + &old_id, + ))?; + } else { + self.exec_bound(indoc! {" + DELETE FROM pane_groups WHERE workspace_id = ?"})?(&workspace.workspace_id)?; + self.exec_bound( + "INSERT OR REPLACE INTO workspaces(workspace_id, dock_visible, dock_anchor) VALUES (?, ?, ?)", + )?((&workspace.workspace_id, workspace.dock_position))?; } - - // Delete any previous workspaces with the same roots. This cascades to all - // other tables that are based on the same roots set. - // Insert new workspace into workspaces table if none were found - self.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?; - - self.exec_bound( - "INSERT INTO workspaces(workspace_id, dock_visible, dock_anchor) VALUES (?, ?, ?)", - )?((&workspace_id, workspace.dock_position))?; - + // Save center pane group and dock pane - self.save_pane_group(&workspace_id, &workspace.center_group, None)?; - self.save_pane(&workspace_id, &workspace.dock_pane, None)?; + self.save_pane_group(&workspace.workspace_id, &workspace.center_group, None)?; + self.save_pane(&workspace.workspace_id, &workspace.dock_pane, None)?; Ok(()) }) .with_context(|| { format!( "Update workspace with roots {:?}", - worktree_roots - .iter() - .map(|p| p.as_ref()) - .collect::>() + workspace.workspace_id.paths() ) }) .log_err(); @@ -253,15 +246,19 @@ impl WorkspaceDb { bail!("Pane groups must have a SerializedPaneGroup::Group at the root") } - let (parent_id, position) = unzip_option(parent); - match pane_group { SerializedPaneGroup::Group { axis, children } => { - let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")? - ((workspace_id, parent_id, position, *axis))?; - + let (parent_id, position) = unzip_option(parent); + + let group_id = self.select_row_bound::<_, i64>(indoc!{" + INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) + VALUES (?, ?, ?, ?) + RETURNING group_id"})? + ((workspace_id, parent_id, position, *axis))? + .ok_or_else(|| anyhow!("Couldn't retrieve group_id from inserted pane_group"))?; + for (position, group) in children.iter().enumerate() { - self.save_pane_group(workspace_id, group, Some((parent_id, position)))? + self.save_pane_group(workspace_id, group, Some((group_id, position)))? } Ok(()) } @@ -289,10 +286,13 @@ impl WorkspaceDb { parent: Option<(GroupId, usize)>, ) -> Result<()> { let (parent_id, order) = unzip_option(parent); - - let pane_id = self.insert_bound( - "INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)", - )?((workspace_id, parent_id, order))?; + + let pane_id = self.select_row_bound::<_, i64>(indoc!{" + INSERT INTO panes(workspace_id, parent_group_id, position) + VALUES (?, ?, ?) + RETURNING pane_id"}, + )?((workspace_id, parent_id, order))? + .ok_or_else(|| anyhow!("Could not retrieve inserted pane_id"))?; self.save_items(workspace_id, pane_id, &pane.children) .context("Saving items") @@ -300,15 +300,9 @@ impl WorkspaceDb { pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { Ok(self.select_bound(indoc! {" - SELECT item_id, kind FROM items + SELECT kind, item_id FROM items WHERE pane_id = ? - ORDER BY position"})?(pane_id)? - .into_iter() - .map(|(item_id, kind)| match kind { - SerializedItemKind::Terminal => SerializedItem::Terminal { item_id }, - _ => unimplemented!(), - }) - .collect()) + ORDER BY position"})?(pane_id)?) } pub(crate) fn save_items( @@ -317,15 +311,11 @@ impl WorkspaceDb { pane_id: PaneId, items: &[SerializedItem], ) -> Result<()> { - let mut delete_old = self - .exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?") - .context("Preparing deletion")?; - let mut insert_new = self.exec_bound( - "INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)", + let mut insert = self.exec_bound( + "INSERT INTO items(workspace_id, pane_id, position, kind, item_id) VALUES (?, ?, ?, ?, ?)", ).context("Preparing insertion")?; for (position, item) in items.iter().enumerate() { - delete_old((workspace_id, pane_id, item.item_id()))?; - insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?; + insert((workspace_id, pane_id, position, item))?; } Ok(()) @@ -339,34 +329,102 @@ mod tests { use super::*; + #[test] + fn test_full_workspace_serialization() { + env_logger::try_init().ok(); + + let db = WorkspaceDb(open_memory_db("test_full_workspace_serialization")); + + let dock_pane = crate::persistence::model::SerializedPane { + children: vec![ + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), + SerializedItem::new("Terminal", 3), + SerializedItem::new("Terminal", 4), + + ], + }; + + // ----------------- + // | 1,2 | 5,6 | + // | - - - | | + // | 3,4 | | + // ----------------- + let center_group = SerializedPaneGroup::Group { + axis: gpui::Axis::Horizontal, + children: vec![ + SerializedPaneGroup::Group { + axis: gpui::Axis::Vertical, + children: vec![ + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::new("Terminal", 5), + SerializedItem::new("Terminal", 6), + ], + }), + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::new("Terminal", 7), + SerializedItem::new("Terminal", 8), + + ], + }), + ], + }, + SerializedPaneGroup::Pane(SerializedPane { + children: vec![ + SerializedItem::new("Terminal", 9), + SerializedItem::new("Terminal", 10), + + ], + }), + ], + }; + + let workspace = SerializedWorkspace { + workspace_id: (["/tmp", "/tmp2"]).into(), + dock_position: DockPosition::Shown(DockAnchor::Bottom), + center_group, + dock_pane, + }; + + db.save_workspace(None, &workspace); + let round_trip_workspace = db.workspace_for_roots(&["/tmp2", "/tmp"]); + + assert_eq!(workspace, round_trip_workspace.unwrap()); + + // Test guaranteed duplicate IDs + db.save_workspace(None, &workspace); + db.save_workspace(None, &workspace); + + let round_trip_workspace = db.workspace_for_roots(&["/tmp", "/tmp2"]); + assert_eq!(workspace, round_trip_workspace.unwrap()); + + + } + #[test] fn test_workspace_assignment() { - // env_logger::try_init().ok(); + env_logger::try_init().ok(); let db = WorkspaceDb(open_memory_db("test_basic_functionality")); let workspace_1 = SerializedWorkspace { + workspace_id: (["/tmp", "/tmp2"]).into(), dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), }; - let workspace_2 = SerializedWorkspace { + let mut workspace_2 = SerializedWorkspace { + workspace_id: (["/tmp"]).into(), dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), }; - let workspace_3 = SerializedWorkspace { - dock_position: crate::dock::DockPosition::Shown(DockAnchor::Right), - center_group: Default::default(), - dock_pane: Default::default(), - }; - - db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_1); - db.save_workspace(&["/tmp"], None, &workspace_2); - - db::write_db_to(&db, "test.db").unwrap(); + db.save_workspace(None, &workspace_1); + db.save_workspace(None, &workspace_2); // Test that paths are treated as a set assert_eq!( @@ -383,23 +441,32 @@ mod tests { assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None); // Test 'mutate' case of updating a pre-existing id - db.save_workspace(&["/tmp", "/tmp2"], Some(&["/tmp", "/tmp2"]), &workspace_2); + workspace_2.workspace_id = (["/tmp", "/tmp2"]).into(); + db.save_workspace(Some((&["/tmp"]).into()), &workspace_2); assert_eq!( db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), workspace_2 ); // Test other mechanism for mutating - db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_3); + let mut workspace_3 = SerializedWorkspace { + workspace_id: (&["/tmp", "/tmp2"]).into(), + dock_position: DockPosition::Shown(DockAnchor::Right), + center_group: Default::default(), + dock_pane: Default::default(), + }; + + + db.save_workspace(None, &workspace_3); assert_eq!( db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), workspace_3 ); // Make sure that updating paths differently also works + workspace_3.workspace_id = (["/tmp3", "/tmp4", "/tmp2"]).into(); db.save_workspace( - &["/tmp3", "/tmp4", "/tmp2"], - Some(&["/tmp", "/tmp2"]), + Some((&["/tmp", "/tmp2"]).into()), &workspace_3, ); assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None); @@ -408,16 +475,21 @@ mod tests { .unwrap(), workspace_3 ); + + } + use crate::dock::DockPosition; use crate::persistence::model::SerializedWorkspace; use crate::persistence::model::{SerializedItem, SerializedPane, SerializedPaneGroup}; - fn default_workspace( + fn default_workspace>( + workspace_id: &[P], dock_pane: SerializedPane, center_group: &SerializedPaneGroup, ) -> SerializedWorkspace { SerializedWorkspace { + workspace_id: workspace_id.into(), dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Right), center_group: center_group.clone(), dock_pane, @@ -426,23 +498,23 @@ mod tests { #[test] fn test_basic_dock_pane() { - // env_logger::try_init().ok(); + env_logger::try_init().ok(); let db = WorkspaceDb(open_memory_db("basic_dock_pane")); let dock_pane = crate::persistence::model::SerializedPane { children: vec![ - SerializedItem::Terminal { item_id: 1 }, - SerializedItem::Terminal { item_id: 4 }, - SerializedItem::Terminal { item_id: 2 }, - SerializedItem::Terminal { item_id: 3 }, + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 2), + SerializedItem::new("Terminal", 3), ], }; - let workspace = default_workspace(dock_pane, &Default::default()); - - db.save_workspace(&["/tmp"], None, &workspace); + let workspace = default_workspace(&["/tmp"], dock_pane, &Default::default()); + db.save_workspace(None, &workspace); + let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); assert_eq!(workspace.dock_pane, new_workspace.dock_pane); @@ -467,30 +539,30 @@ mod tests { children: vec![ SerializedPaneGroup::Pane(SerializedPane { children: vec![ - SerializedItem::Terminal { item_id: 1 }, - SerializedItem::Terminal { item_id: 2 }, + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), ], }), SerializedPaneGroup::Pane(SerializedPane { children: vec![ - SerializedItem::Terminal { item_id: 4 }, - SerializedItem::Terminal { item_id: 3 }, + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 3), ], }), ], }, SerializedPaneGroup::Pane(SerializedPane { children: vec![ - SerializedItem::Terminal { item_id: 5 }, - SerializedItem::Terminal { item_id: 6 }, + SerializedItem::new("Terminal", 5), + SerializedItem::new("Terminal", 6), ], }), ], }; - let workspace = default_workspace(Default::default(), ¢er_pane); + let workspace = default_workspace(&["/tmp"], Default::default(), ¢er_pane); - db.save_workspace(&["/tmp"], None, &workspace); + db.save_workspace(None, &workspace); assert_eq!(workspace.center_group, center_pane); } diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index 7afd186a36ff01189dcaaf43fb9af450d623955d..adc6ea7c1ac18e8b50413128f810dd95716f6398 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -3,7 +3,7 @@ use std::{ sync::Arc, }; -use anyhow::{bail, Result}; +use anyhow::Result; use gpui::Axis; @@ -16,10 +16,10 @@ use sqlez::{ use crate::dock::DockPosition; #[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct WorkspaceId(Arc>); +pub struct WorkspaceId(Arc>); impl WorkspaceId { - pub fn paths(self) -> Arc> { + pub fn paths(&self) -> Arc> { self.0.clone() } } @@ -52,6 +52,7 @@ impl Column for WorkspaceId { #[derive(Debug, PartialEq, Eq)] pub struct SerializedWorkspace { + pub workspace_id: WorkspaceId, pub dock_position: DockPosition, pub center_group: SerializedPaneGroup, pub dock_pane: SerializedPane, @@ -90,67 +91,33 @@ pub type GroupId = i64; pub type PaneId = i64; pub type ItemId = usize; -pub(crate) enum SerializedItemKind { - Editor, - Diagnostics, - ProjectSearch, - Terminal, +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct SerializedItem { + pub kind: Arc, + pub item_id: ItemId, } -impl Bind for SerializedItemKind { - fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result { - match self { - SerializedItemKind::Editor => "Editor", - SerializedItemKind::Diagnostics => "Diagnostics", - SerializedItemKind::ProjectSearch => "ProjectSearch", - SerializedItemKind::Terminal => "Terminal", +impl SerializedItem { + pub fn new(kind: impl AsRef, item_id: ItemId) -> Self { + Self { + kind: Arc::from(kind.as_ref()), + item_id, } - .bind(statement, start_index) } } -impl Column for SerializedItemKind { - fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> { - String::column(statement, start_index).and_then(|(kind_text, next_index)| { - Ok(( - match kind_text.as_ref() { - "Editor" => SerializedItemKind::Editor, - "Diagnostics" => SerializedItemKind::Diagnostics, - "ProjectSearch" => SerializedItemKind::ProjectSearch, - "Terminal" => SerializedItemKind::Terminal, - _ => bail!("Stored serialized item kind is incorrect"), - }, - next_index, - )) - }) +impl Bind for &SerializedItem { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = statement.bind(self.kind.clone(), start_index)?; + statement.bind(self.item_id, next_index) } } -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum SerializedItem { - Editor { item_id: usize, path: Arc }, - Diagnostics { item_id: usize }, - ProjectSearch { item_id: usize, query: String }, - Terminal { item_id: usize }, -} - -impl SerializedItem { - pub fn item_id(&self) -> usize { - match self { - SerializedItem::Editor { item_id, .. } => *item_id, - SerializedItem::Diagnostics { item_id } => *item_id, - SerializedItem::ProjectSearch { item_id, .. } => *item_id, - SerializedItem::Terminal { item_id } => *item_id, - } - } - - pub(crate) fn kind(&self) -> SerializedItemKind { - match self { - SerializedItem::Editor { .. } => SerializedItemKind::Editor, - SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics, - SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch, - SerializedItem::Terminal { .. } => SerializedItemKind::Terminal, - } +impl Column for SerializedItem { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (kind, next_index) = Arc::::column(statement, start_index)?; + let (item_id, next_index) = ItemId::column(statement, next_index)?; + Ok((SerializedItem { kind, item_id }, next_index)) } } @@ -187,8 +154,8 @@ mod tests { db.exec(indoc::indoc! {" CREATE TABLE workspace_id_test( - workspace_id BLOB, - dock_anchor TEXT + workspace_id BLOB, + dock_anchor TEXT );"}) .unwrap()() .unwrap(); diff --git a/crates/workspace/src/shared_screen.rs b/crates/workspace/src/shared_screen.rs index d6a69490a5a52e907d28326a2a76514ad7823e95..28623950dfc82948710b800700019e6d49ea222a 100644 --- a/crates/workspace/src/shared_screen.rs +++ b/crates/workspace/src/shared_screen.rs @@ -1,7 +1,7 @@ use crate::{ item::ItemEvent, persistence::model::{ItemId, WorkspaceId}, - Item, ItemNavHistory, + Item, ItemNavHistory, Pane, Workspace, }; use anyhow::{anyhow, Result}; use call::participant::{Frame, RemoteVideoTrack}; @@ -10,8 +10,10 @@ use futures::StreamExt; use gpui::{ elements::*, geometry::{rect::RectF, vector::vec2f}, - Entity, ModelHandle, MouseButton, RenderContext, Task, View, ViewContext, + Entity, ModelHandle, MouseButton, RenderContext, Task, View, ViewContext, ViewHandle, + WeakViewHandle, }; +use project::Project; use settings::Settings; use smallvec::SmallVec; use std::{ @@ -191,10 +193,12 @@ impl Item for SharedScreen { } fn deserialize( - workspace_id: WorkspaceId, - item_id: ItemId, - cx: &mut ViewContext, - ) -> Result { - Err(anyhow!("SharedScreen can not be deserialized")) + _project: ModelHandle, + _workspace: WeakViewHandle, + _workspace_id: WorkspaceId, + _item_id: ItemId, + _cx: &mut ViewContext, + ) -> Task>> { + unreachable!("Shared screen can not be deserialized") } } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index c51979f6555fe6acf4f0625372814736d5fc4e94..3d170818e251213c70e6e9e5558cdab189f74671 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -44,7 +44,8 @@ use language::LanguageRegistry; use log::{error, warn}; pub use pane::*; pub use pane_group::*; -use persistence::model::{ItemId, WorkspaceId}; +use persistence::model::SerializedItem; +pub use persistence::model::{ItemId, WorkspaceId}; use postage::prelude::Stream; use project::{Project, ProjectEntryId, ProjectPath, ProjectStore, Worktree, WorktreeId}; use serde::Deserialize; @@ -57,7 +58,7 @@ use theme::{Theme, ThemeRegistry}; pub use toolbar::{ToolbarItemLocation, ToolbarItemView}; use util::ResultExt; -use crate::persistence::model::SerializedWorkspace; +use crate::persistence::model::{SerializedPane, SerializedWorkspace}; #[derive(Clone, PartialEq)] pub struct RemoveWorktreeFromProject(pub WorktreeId); @@ -337,22 +338,27 @@ pub fn register_followable_item(cx: &mut MutableAppContext) { }); } -type SerializableItemBuilders = HashMap< - &'static str, - fn(WorkspaceId, ItemId, &mut ViewContext) -> Option>, +type ItemDeserializers = HashMap< + Arc, + fn( + ModelHandle, + WeakViewHandle, + WorkspaceId, + ItemId, + &mut ViewContext, + ) -> Task>>, >; pub fn register_deserializable_item(cx: &mut MutableAppContext) { - cx.update_default_global(|deserializers: &mut SerializableItemBuilders, _| { + cx.update_default_global(|deserializers: &mut ItemDeserializers, _cx| { if let Some(serialized_item_kind) = I::serialized_item_kind() { - deserializers.insert(serialized_item_kind, |workspace_id, item_id, cx| { - if let Some(v) = - cx.add_option_view(|cx| I::deserialize(workspace_id, item_id, cx).log_err()) - { - Some(Box::new(v)) - } else { - None - } - }); + deserializers.insert( + Arc::from(serialized_item_kind), + |project, workspace, workspace_id, item_id, cx| { + let task = I::deserialize(project, workspace, workspace_id, item_id, cx); + cx.foreground() + .spawn(async { Ok(Box::new(task.await?) as Box<_>) }) + }, + ); } }); } @@ -549,6 +555,8 @@ impl Workspace { } project::Event::WorktreeRemoved(_) | project::Event::WorktreeAdded => { this.update_window_title(cx); + // TODO: Cache workspace_id on workspace and read from it here + this.serialize_workspace(None, cx); } project::Event::DisconnectedFromHost => { this.update_window_edited(cx); @@ -568,21 +576,9 @@ impl Workspace { .detach(); cx.focus(¢er_pane); cx.emit(Event::PaneAdded(center_pane.clone())); - let dock = Dock::new( - dock_default_factory, - serialized_workspace - .as_ref() - .map(|ws| ws.dock_position) - .clone(), - cx, - ); + let dock = Dock::new(dock_default_factory, cx); let dock_pane = dock.pane().clone(); - if let Some(serialized_workspace) = serialized_workspace { - - // Fill them in? - } - let fs = project.read(cx).fs().clone(); let user_store = project.read(cx).user_store(); let client = project.read(cx).client(); @@ -636,13 +632,13 @@ impl Workspace { let mut this = Workspace { modal: None, - weak_self: weak_handle, + weak_self: weak_handle.clone(), center: PaneGroup::new(center_pane.clone()), dock, // When removing an item, the last element remaining in this array // is used to find where focus should fallback to. As such, the order // of these two variables is important. - panes: vec![dock_pane, center_pane.clone()], + panes: vec![dock_pane.clone(), center_pane.clone()], panes_by_item: Default::default(), active_pane: center_pane.clone(), last_active_center_pane: Some(center_pane.downgrade()), @@ -655,7 +651,7 @@ impl Workspace { fs, left_sidebar, right_sidebar, - project, + project: project.clone(), leader_state: Default::default(), follower_states_by_leader: Default::default(), last_leaders_by_pane: Default::default(), @@ -663,9 +659,15 @@ impl Workspace { active_call, _observe_current_user, }; - this.project_remote_id_changed(this.project.read(cx).remote_id(), cx); + this.project_remote_id_changed(project.read(cx).remote_id(), cx); cx.defer(|this, cx| this.update_window_title(cx)); + if let Some(serialized_workspace) = serialized_workspace { + cx.defer(move |_, cx| { + Self::load_from_serialized_workspace(weak_handle, serialized_workspace, cx) + }); + } + this } @@ -1315,6 +1317,7 @@ impl Workspace { pub fn add_item(&mut self, item: Box, cx: &mut ViewContext) { let active_pane = self.active_pane().clone(); Pane::add_item(self, &active_pane, item, true, true, None, cx); + self.serialize_workspace(None, cx); } pub fn open_path( @@ -1519,6 +1522,7 @@ impl Workspace { entry.remove(); } } + self.serialize_workspace(None, cx); } _ => {} } @@ -2250,6 +2254,140 @@ impl Workspace { _ => {} } } + + fn workspace_id(&self, cx: &AppContext) -> WorkspaceId { + self.project() + .read(cx) + .visible_worktrees(cx) + .map(|worktree| worktree.read(cx).abs_path()) + .collect::>() + .into() + } + + fn serialize_workspace(&self, old_id: Option, cx: &mut MutableAppContext) { + let dock_pane = SerializedPane { + children: self + .dock + .pane() + .read(cx) + .items() + .filter_map(|item_handle| { + Some(SerializedItem { + kind: Arc::from(item_handle.serialized_item_kind()?), + item_id: item_handle.id(), + }) + }) + .collect::>(), + }; + + let serialized_workspace = SerializedWorkspace { + workspace_id: self.workspace_id(cx), + dock_position: self.dock.position(), + dock_pane, + center_group: Default::default(), + }; + + cx.background() + .spawn(async move { + persistence::DB.save_workspace(old_id, &serialized_workspace); + }) + .detach(); + } + + fn load_from_serialized_workspace( + workspace: WeakViewHandle, + serialized_workspace: SerializedWorkspace, + cx: &mut MutableAppContext, + ) { + // fn process_splits( + // pane_group: SerializedPaneGroup, + // parent: Option, + // workspace: ViewHandle, + // cx: &mut AsyncAppContext, + // ) { + // match pane_group { + // SerializedPaneGroup::Group { axis, children } => { + // process_splits(pane_group, parent) + // } + // SerializedPaneGroup::Pane(pane) => { + // process_pane(pane) + // }, + // } + // } + + async fn deserialize_pane( + project: ModelHandle, + pane: SerializedPane, + pane_handle: ViewHandle, + workspace_id: WorkspaceId, + workspace: &ViewHandle, + cx: &mut AsyncAppContext, + ) { + for item in pane.children { + let project = project.clone(); + let workspace_id = workspace_id.clone(); + let item_handle = pane_handle + .update(cx, |_, cx| { + if let Some(deserializer) = cx.global::().get(&item.kind) + { + deserializer( + project, + workspace.downgrade(), + workspace_id, + item.item_id, + cx, + ) + } else { + Task::ready(Err(anyhow!( + "Deserializer does not exist for item kind: {}", + item.kind + ))) + } + }) + .await + .log_err(); + + if let Some(item_handle) = item_handle { + workspace.update(cx, |workspace, cx| { + Pane::add_item( + workspace, + &pane_handle, + item_handle, + false, + false, + None, + cx, + ); + }) + } + } + } + + cx.spawn(|mut cx| async move { + if let Some(workspace) = workspace.upgrade(&cx) { + let (project, dock_pane_handle) = workspace.read_with(&cx, |workspace, _| { + (workspace.project().clone(), workspace.dock_pane().clone()) + }); + deserialize_pane( + project, + serialized_workspace.dock_pane, + dock_pane_handle, + serialized_workspace.workspace_id, + &workspace, + &mut cx, + ) + .await; + + // Traverse the splits tree and add to things + // process_splits(serialized_workspace.center_group, None, workspace, &mut cx); + + workspace.update(&mut cx, |workspace, cx| { + Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx) + }); + } + }) + .detach(); + } } impl Entity for Workspace { diff --git a/crates/workspace/test.db b/crates/workspace/test.db index 7491ccde3ad6a8c23785f69b92b679e622b1e62c..9c94aa3162f548eeb541b3b23e9ccab9d09271e2 100644 GIT binary patch delta 1224 zcmb`F&r6g+7{_PcpEJYmJiV^6nQp$IDCL^FYh~HNME92OMw#6-5v#?l+=A-VR-y$S zv^f=9&{+uNrL)jgihdj-`~##e77<+=-}QZGp@M>j_dUGd&-4AxJkPx4vQ{o@OHED; zAw=XGuGn(ArzwUGjPHrV9J0v_f<-YZ;kyLZ8|lgT7r=<+|2F1(+) zJFDQiL-`Z=k-R%NaC&fHC|^s?oE#buTdBRG(&Ca3h}q`d;(be}c*4>&Z3eOPz5Y8^ zkN1T&zofb~WGD3^n`<=eWNRy4xaE(>ZsX3d#{IK>8-;!`%!}eXcf~627Y*>mq$`N7 z_|C3p)LA@*%@*>g;l`EH^tG!KlT%`r=WB}l{8-cZf8Ft>_}cvA?|byOQuqJIq_Y-= zH3VzEbM6I@er$TQu}2fj-3hTi9v7S4P=CnH<@yfF=LnO(1%?d-8?yW@m~Fa8quPhZ zEvtgy6;ymDvjntTvyewgDk`u>r%Kn0lVZJajEy1q0MB6_#(c;1fV3MX@(>DZxvk4@ zs=`T!CIzens&(LPg@l%PN{h?mF|H$c4HYQDVbJ*-{*+JiUare+HsdVEEbQSF)+1NF zF)~$}DPEqqq~~ZAYQKS6mIRO*5v+@x8QY>VRUIV}p{!N4vB;Lt!ws`$z=&$9h8a~~ zO{Nz4^s3KRnGwrqlF<~kB32jLGB`lQj8I#Mw(phk65^lvV_xDf;UlcV1DJrL;K;#^ za=XKjb5m0zED+SB2n_@^9a~oP3+lYVt-t)ybm#A6U6$8MtIO z3n~b633ewivdVKzH!ROD$}TQQOirC#$mfcr*lP1EsWiB0n>}R$;T)i~QJbyh1kjyo z%E83n$-uv#zY}OhEkB0|6RU|VhcV-1$2?&r*NTF~yp+@wBLgENT|+}%LlXrfLn}iw zAhKs<^^@c@p6rmPIQh9gzZgP>3mB42{D&C$5AomLENHNepH+g{gb{2YE0YSdA16r9 zBnLLOt9xUzAyupI0*Zw7)f1B?JEr{tf(7_$z@* zGx<3rSy(|zAu6SqfgDB-Fo&5*jcIa1ygC~rD}x^=2gDi{CI$hZD^BrGoM6BPavFjQ E01p0r!vFvP From 75d3d46b1b7582c59637c642a0f889978e474399 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Thu, 17 Nov 2022 12:43:16 -0800 Subject: [PATCH 50/86] wip serialize editor --- crates/editor/src/editor.rs | 1 + crates/editor/src/items.rs | 34 +++++++++++++++++++++++++++------ crates/terminal/src/terminal.rs | 2 -- 3 files changed, 29 insertions(+), 8 deletions(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index ce810bab0c508bb04c4c56c79af614f8fdbc02df..d1ccc5e8f3020a420fd46f383d6add7961954c64 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -373,6 +373,7 @@ pub fn init(cx: &mut MutableAppContext) { workspace::register_project_item::(cx); workspace::register_followable_item::(cx); + workspace::register_deserializable_item::(cx); } trait InvalidationRegion { diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index ae9bbd57484ec04f5b1f8e304e5e4f274c747234..005012294868881a52aed66cbc2a9ee388160b4a 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -1,9 +1,9 @@ use crate::{ display_map::ToDisplayPoint, link_go_to_definition::hide_link_definition, - movement::surrounding_word, Anchor, Autoscroll, Editor, Event, ExcerptId, MultiBuffer, - MultiBufferSnapshot, NavigationData, ToPoint as _, FORMAT_TIMEOUT, + movement::surrounding_word, Anchor, Autoscroll, Editor, EditorMode, Event, ExcerptId, + MultiBuffer, MultiBufferSnapshot, NavigationData, ToPoint as _, FORMAT_TIMEOUT, }; -use anyhow::{anyhow, Result}; +use anyhow::{anyhow, Context, Result}; use futures::FutureExt; use gpui::{ elements::*, geometry::vector::vec2f, AppContext, Entity, ModelHandle, MutableAppContext, @@ -558,14 +558,36 @@ impl Item for Editor { } fn deserialize( - _project: ModelHandle, + project: ModelHandle, _workspace: WeakViewHandle, _workspace_id: WorkspaceId, _item_id: ItemId, - _cx: &mut ViewContext, + cx: &mut ViewContext, ) -> Task>> { // Look up the path with this key associated, create a self with that path - unimplemented!() + let path = Path::new("."); + if let Some(project_item) = project.update(cx, |project, cx| { + let (worktree, path) = project.find_local_worktree(path, cx)?; + let project_path = ProjectPath { + worktree_id: worktree.read(cx).id(), + path: path.into(), + }; + + Some(project.open_path(project_path, cx)) + }) { + cx.spawn(|pane, mut cx| async move { + let (_, project_item) = project_item.await?; + let buffer = project_item + .downcast::() + .context("Project item at stored path was not a buffer")?; + + Ok(cx.update(|cx| { + cx.add_view(pane, |cx| Editor::for_buffer(buffer, Some(project), cx)) + })) + }) + } else { + Task::ready(Err(anyhow!("Could not load file from stored path"))) + } } } diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 15b3b4e66ed7b7f71bf91408c091bc313378d34a..66a64903d354ea111622428aa197029457ac0fc4 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -69,8 +69,6 @@ use lazy_static::lazy_static; pub fn init(cx: &mut MutableAppContext) { terminal_view::init(cx); terminal_container_view::init(cx); - - register_deserializable_item::(cx); } ///Scrolling is unbearably sluggish by default. Alacritty supports a configurable From 6530658c3ec202fcc958349f3e5fb4cf4fd1f95a Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 18 Nov 2022 14:20:52 -0800 Subject: [PATCH 51/86] Added center group deserialization --- Cargo.lock | 16 +- crates/db/src/db.rs | 20 ++- crates/db/src/kvp.rs | 2 +- crates/editor/src/items.rs | 3 +- crates/sqlez/src/connection.rs | 23 +-- crates/sqlez/src/migrations.rs | 8 +- crates/sqlez/src/savepoint.rs | 2 +- crates/sqlez/src/statement.rs | 6 +- crates/sqlez/src/thread_safe_connection.rs | 15 +- crates/workspace/Cargo.toml | 1 + crates/workspace/src/pane_group.rs | 14 +- crates/workspace/src/persistence.rs | 44 ++++-- crates/workspace/src/persistence/model.rs | 88 ++++++++++- crates/workspace/src/workspace.rs | 171 ++++++++++----------- 14 files changed, 264 insertions(+), 149 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b4df5a9ab9d1ce6dda66750bf6627806fe65e5f0..d53e91aa71afc034cecacb32c57f0ed12b2c730b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -327,6 +327,17 @@ dependencies = [ "syn", ] +[[package]] +name = "async-recursion" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cda8f4bcc10624c4e85bc66b3f452cca98cfa5ca002dc83a16aad2367641bea" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-stream" version = "0.3.3" @@ -943,7 +954,7 @@ name = "client" version = "0.1.0" dependencies = [ "anyhow", - "async-recursion", + "async-recursion 0.3.2", "async-tungstenite", "collections", "db", @@ -7624,6 +7635,7 @@ name = "workspace" version = "0.1.0" dependencies = [ "anyhow", + "async-recursion 1.0.0", "bincode", "call", "client", @@ -7697,7 +7709,7 @@ dependencies = [ "anyhow", "assets", "async-compression", - "async-recursion", + "async-recursion 0.3.2", "async-trait", "auto_update", "backtrace", diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 9bb4286b832e6867a1711156a6eb20dd9e43dabb..39891718fb3a796c386b8f823a4b73f3b6169fd4 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,6 +1,6 @@ pub mod kvp; -use std::fs::create_dir_all; +use std::fs::{create_dir_all, remove_dir_all}; use std::path::Path; #[cfg(any(test, feature = "test-support"))] @@ -10,7 +10,7 @@ use indoc::indoc; use sqlez::connection::Connection; use sqlez::domain::{Domain, Migrator}; use sqlez::thread_safe_connection::ThreadSafeConnection; -use util::channel::RELEASE_CHANNEL_NAME; +use util::channel::{ReleaseChannel, RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; use util::paths::DB_DIR; const INITIALIZE_QUERY: &'static str = indoc! {" @@ -26,18 +26,18 @@ pub fn open_file_db() -> ThreadSafeConnection { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); - // if *RELEASE_CHANNEL == ReleaseChannel::Dev { - // remove_dir_all(¤t_db_dir).ok(); - // } + if *RELEASE_CHANNEL == ReleaseChannel::Dev && std::env::var("WIPE_DB").is_ok() { + remove_dir_all(¤t_db_dir).ok(); + } create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); let db_path = current_db_dir.join(Path::new("db.sqlite")); - ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) + ThreadSafeConnection::new(Some(db_path.to_string_lossy().as_ref()), true) .with_initialize_query(INITIALIZE_QUERY) } -pub fn open_memory_db(db_name: &str) -> ThreadSafeConnection { +pub fn open_memory_db(db_name: Option<&str>) -> ThreadSafeConnection { ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY) } @@ -65,7 +65,11 @@ macro_rules! connection { } lazy_static! { - pub static ref $id: $t = $t(::db::open_file_db()); + pub static ref $id: $t = $t(if cfg!(any(test, feature = "test-support")) { + ::db::open_memory_db(None) + } else { + ::db::open_file_db() + }); } }; } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index dd82c17615de4a65dcfe7936937ac523835b5030..3cdcd99016f7ef91903070bc2b02f0d0983fd492 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -61,7 +61,7 @@ mod tests { #[test] fn test_kvp() -> Result<()> { - let db = KeyValueStore(crate::open_memory_db("test_kvp")); + let db = KeyValueStore(crate::open_memory_db(Some("test_kvp"))); assert_eq!(db.read_kvp("key-1").unwrap(), None); diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 005012294868881a52aed66cbc2a9ee388160b4a..60ac8d5278b029a1f14db9a9f47f6365eab96609 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -554,7 +554,8 @@ impl Item for Editor { } fn serialized_item_kind() -> Option<&'static str> { - Some("Editor") + // TODO: Some("Editor") + None } fn deserialize( diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 8ab1e345d83853de600fd4fc6f0f416ce62abcce..1eaeb090e136315a1ac32ef42419f8d84a163db9 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -42,11 +42,16 @@ impl Connection { /// Attempts to open the database at uri. If it fails, a shared memory db will be opened /// instead. pub fn open_file(uri: &str) -> Self { - Self::open(uri, true).unwrap_or_else(|_| Self::open_memory(uri)) + Self::open(uri, true).unwrap_or_else(|_| Self::open_memory(Some(uri))) } - pub fn open_memory(uri: &str) -> Self { - let in_memory_path = format!("file:{}?mode=memory&cache=shared", uri); + pub fn open_memory(uri: Option<&str>) -> Self { + let in_memory_path = if let Some(uri) = uri { + format!("file:{}?mode=memory&cache=shared", uri) + } else { + ":memory:".to_string() + }; + Self::open(&in_memory_path, false).expect("Could not create fallback in memory db") } @@ -110,7 +115,7 @@ mod test { #[test] fn string_round_trips() -> Result<()> { - let connection = Connection::open_memory("string_round_trips"); + let connection = Connection::open_memory(Some("string_round_trips")); connection .exec(indoc! {" CREATE TABLE text ( @@ -136,7 +141,7 @@ mod test { #[test] fn tuple_round_trips() { - let connection = Connection::open_memory("tuple_round_trips"); + let connection = Connection::open_memory(Some("tuple_round_trips")); connection .exec(indoc! {" CREATE TABLE test ( @@ -170,7 +175,7 @@ mod test { #[test] fn bool_round_trips() { - let connection = Connection::open_memory("bool_round_trips"); + let connection = Connection::open_memory(Some("bool_round_trips")); connection .exec(indoc! {" CREATE TABLE bools ( @@ -196,7 +201,7 @@ mod test { #[test] fn backup_works() { - let connection1 = Connection::open_memory("backup_works"); + let connection1 = Connection::open_memory(Some("backup_works")); connection1 .exec(indoc! {" CREATE TABLE blobs ( @@ -211,7 +216,7 @@ mod test { .unwrap(); // Backup connection1 to connection2 - let connection2 = Connection::open_memory("backup_works_other"); + let connection2 = Connection::open_memory(Some("backup_works_other")); connection1.backup_main(&connection2).unwrap(); // Delete the added blob and verify its deleted on the other side @@ -224,7 +229,7 @@ mod test { #[test] fn multi_step_statement_works() { - let connection = Connection::open_memory("multi_step_statement_works"); + let connection = Connection::open_memory(Some("multi_step_statement_works")); connection .exec(indoc! {" diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 1f4b3f0f7c979dc844253dcb1655beb7bea3a561..23af04bbf4e30be0af9c05ccfa1f8bcc0e56cf4b 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -62,7 +62,7 @@ mod test { #[test] fn test_migrations_are_added_to_table() { - let connection = Connection::open_memory("migrations_are_added_to_table"); + let connection = Connection::open_memory(Some("migrations_are_added_to_table")); // Create first migration with a single step and run it connection @@ -131,7 +131,7 @@ mod test { #[test] fn test_migration_setup_works() { - let connection = Connection::open_memory("migration_setup_works"); + let connection = Connection::open_memory(Some("migration_setup_works")); connection .exec(indoc! {" @@ -163,7 +163,7 @@ mod test { #[test] fn migrations_dont_rerun() { - let connection = Connection::open_memory("migrations_dont_rerun"); + let connection = Connection::open_memory(Some("migrations_dont_rerun")); // Create migration which clears a tabl @@ -222,7 +222,7 @@ mod test { #[test] fn changed_migration_fails() { - let connection = Connection::open_memory("changed_migration_fails"); + let connection = Connection::open_memory(Some("changed_migration_fails")); // Create a migration with two steps and run it connection diff --git a/crates/sqlez/src/savepoint.rs b/crates/sqlez/src/savepoint.rs index 9751aac51d90966c8c6aaa5386d2bc9f3da9573e..09c2e941482f829cd6c5c83cd14aad0f091d6562 100644 --- a/crates/sqlez/src/savepoint.rs +++ b/crates/sqlez/src/savepoint.rs @@ -59,7 +59,7 @@ mod tests { #[test] fn test_nested_savepoints() -> Result<()> { - let connection = Connection::open_memory("nested_savepoints"); + let connection = Connection::open_memory(Some("nested_savepoints")); connection .exec(indoc! {" diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index 40118dd9237e3ff3bbfd4c01773427cf393638e7..f0afc0e020d61f0c32516469a6d97f4c40aaa343 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -352,7 +352,7 @@ mod test { #[test] fn blob_round_trips() { - let connection1 = Connection::open_memory("blob_round_trips"); + let connection1 = Connection::open_memory(Some("blob_round_trips")); connection1 .exec(indoc! {" CREATE TABLE blobs ( @@ -369,7 +369,7 @@ mod test { assert_eq!(write.step().unwrap(), StepResult::Done); // Read the blob from the - let connection2 = Connection::open_memory("blob_round_trips"); + let connection2 = Connection::open_memory(Some("blob_round_trips")); let mut read = Statement::prepare(&connection2, "SELECT * FROM blobs").unwrap(); assert_eq!(read.step().unwrap(), StepResult::Row); assert_eq!(read.column_blob(0).unwrap(), blob); @@ -383,7 +383,7 @@ mod test { #[test] pub fn maybe_returns_options() { - let connection = Connection::open_memory("maybe_returns_options"); + let connection = Connection::open_memory(Some("maybe_returns_options")); connection .exec(indoc! {" CREATE TABLE texts ( diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index e85ba4c51a275dd464cbd6ff44c698e2a6c74352..f415c32960cbd81ecbba2f3abc481d211f16f454 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -9,7 +9,7 @@ use crate::{ }; pub struct ThreadSafeConnection { - uri: Arc, + uri: Option>, persistent: bool, initialize_query: Option<&'static str>, connection: Arc>, @@ -20,9 +20,13 @@ unsafe impl Send for ThreadSafeConnection {} unsafe impl Sync for ThreadSafeConnection {} impl ThreadSafeConnection { - pub fn new(uri: &str, persistent: bool) -> Self { + pub fn new(uri: Option<&str>, persistent: bool) -> Self { + if persistent == true && uri == None { + // This panic is securing the unwrap in open_file(), don't remove it! + panic!("Cannot create a persistent connection without a URI") + } Self { - uri: Arc::from(uri), + uri: uri.map(|str| Arc::from(str)), persistent, initialize_query: None, connection: Default::default(), @@ -41,13 +45,14 @@ impl ThreadSafeConnection { /// called from the deref function. /// If opening fails, the connection falls back to a shared memory connection fn open_file(&self) -> Connection { - Connection::open_file(self.uri.as_ref()) + // This unwrap is secured by a panic in the constructor. Be careful if you remove it! + Connection::open_file(self.uri.as_ref().unwrap()) } /// Opens a shared memory connection using the file path as the identifier. This unwraps /// as we expect it always to succeed fn open_shared_memory(&self) -> Connection { - Connection::open_memory(self.uri.as_ref()) + Connection::open_memory(self.uri.as_ref().map(|str| str.deref())) } // Open a new connection for the given domain, leaving this diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index 822a008eedb02f799c9f03c0d77a6893c930a40a..0ce3bc220b89503af9d99bc8517d96ab1684a039 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -33,6 +33,7 @@ settings = { path = "../settings" } sqlez = { path = "../sqlez" } theme = { path = "../theme" } util = { path = "../util" } +async-recursion = "1.0.0" bincode = "1.2.1" anyhow = "1.0.38" futures = "0.3" diff --git a/crates/workspace/src/pane_group.rs b/crates/workspace/src/pane_group.rs index 6442429b0d923b0dd6fd44072fce152826500718..b8e73d6f6f3c3f7dec6cb023be5dbf14f4facf1c 100644 --- a/crates/workspace/src/pane_group.rs +++ b/crates/workspace/src/pane_group.rs @@ -13,10 +13,14 @@ use theme::Theme; #[derive(Clone, Debug, Eq, PartialEq)] pub struct PaneGroup { - root: Member, + pub(crate) root: Member, } impl PaneGroup { + pub(crate) fn with_root(root: Member) -> Self { + Self { root } + } + pub fn new(pane: ViewHandle) -> Self { Self { root: Member::Pane(pane), @@ -85,7 +89,7 @@ impl PaneGroup { } #[derive(Clone, Debug, Eq, PartialEq)] -enum Member { +pub(crate) enum Member { Axis(PaneAxis), Pane(ViewHandle), } @@ -276,9 +280,9 @@ impl Member { } #[derive(Clone, Debug, Eq, PartialEq)] -struct PaneAxis { - axis: Axis, - members: Vec, +pub(crate) struct PaneAxis { + pub axis: Axis, + pub members: Vec, } impl PaneAxis { diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index cc07a76596677c7b51d9cf8b1e7d5e6cb593f987..f7517ec8bf79c4d4f8a8b6f5df59da22741e1e14 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -55,8 +55,8 @@ impl Domain for Workspace { CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL, this is a dock pane - position INTEGER, -- NULL, this is a dock pane + parent_group_id INTEGER, -- NULL means that this is a dock pane + position INTEGER, -- NULL means that this is a dock pane FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE ON UPDATE CASCADE, @@ -164,7 +164,7 @@ impl WorkspaceDb { }) .with_context(|| { format!( - "Update workspace with roots {:?}", + "Update workspace with roots {:?} failed.", workspace.workspace_id.paths() ) }) @@ -196,6 +196,17 @@ impl WorkspaceDb { .into_iter() .next() .context("No center pane group") + .map(|pane_group| { + // Rewrite the special case of the root being a leaf node + if let SerializedPaneGroup::Group { axis: Axis::Horizontal, ref children } = pane_group { + if children.len() == 1 { + if let Some(SerializedPaneGroup::Pane(pane)) = children.get(0) { + return SerializedPaneGroup::Pane(pane.clone()) + } + } + } + pane_group + }) } fn get_pane_group_children<'a>( @@ -242,9 +253,12 @@ impl WorkspaceDb { pane_group: &SerializedPaneGroup, parent: Option<(GroupId, usize)>, ) -> Result<()> { - if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) { - bail!("Pane groups must have a SerializedPaneGroup::Group at the root") - } + // Rewrite the root node to fit with the database + let pane_group = if parent.is_none() && matches!(pane_group, SerializedPaneGroup::Pane { .. }) { + SerializedPaneGroup::Group { axis: Axis::Horizontal, children: vec![pane_group.clone()] } + } else { + pane_group.clone() + }; match pane_group { SerializedPaneGroup::Group { axis, children } => { @@ -254,7 +268,7 @@ impl WorkspaceDb { INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?) RETURNING group_id"})? - ((workspace_id, parent_id, position, *axis))? + ((workspace_id, parent_id, position, axis))? .ok_or_else(|| anyhow!("Couldn't retrieve group_id from inserted pane_group"))?; for (position, group) in children.iter().enumerate() { @@ -262,7 +276,9 @@ impl WorkspaceDb { } Ok(()) } - SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent), + SerializedPaneGroup::Pane(pane) => { + self.save_pane(workspace_id, &pane, parent) + }, } } @@ -324,7 +340,7 @@ impl WorkspaceDb { #[cfg(test)] mod tests { - use db::open_memory_db; + use db::{open_memory_db, write_db_to}; use settings::DockAnchor; use super::*; @@ -333,7 +349,7 @@ mod tests { fn test_full_workspace_serialization() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_full_workspace_serialization")); + let db = WorkspaceDb(open_memory_db(Some("test_full_workspace_serialization"))); let dock_pane = crate::persistence::model::SerializedPane { children: vec![ @@ -407,7 +423,7 @@ mod tests { fn test_workspace_assignment() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_basic_functionality")); + let db = WorkspaceDb(open_memory_db(Some("test_basic_functionality"))); let workspace_1 = SerializedWorkspace { workspace_id: (["/tmp", "/tmp2"]).into(), @@ -500,7 +516,7 @@ mod tests { fn test_basic_dock_pane() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("basic_dock_pane")); + let db = WorkspaceDb(open_memory_db(Some("basic_dock_pane"))); let dock_pane = crate::persistence::model::SerializedPane { children: vec![ @@ -514,7 +530,7 @@ mod tests { let workspace = default_workspace(&["/tmp"], dock_pane, &Default::default()); db.save_workspace(None, &workspace); - + write_db_to(&db, "dest.db").unwrap(); let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); assert_eq!(workspace.dock_pane, new_workspace.dock_pane); @@ -524,7 +540,7 @@ mod tests { fn test_simple_split() { // env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("simple_split")); + let db = WorkspaceDb(open_memory_db(Some("simple_split"))); // ----------------- // | 1,2 | 5,6 | diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index adc6ea7c1ac18e8b50413128f810dd95716f6398..b4b5db5f11462874d0e7b19fd28820d24b144fb7 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -5,15 +5,20 @@ use std::{ use anyhow::Result; -use gpui::Axis; +use async_recursion::async_recursion; +use gpui::{AsyncAppContext, Axis, ModelHandle, Task, ViewHandle}; +use project::Project; use settings::DockAnchor; use sqlez::{ bindable::{Bind, Column}, statement::Statement, }; +use util::ResultExt; -use crate::dock::DockPosition; +use crate::{ + dock::DockPosition, item::ItemHandle, ItemDeserializers, Member, Pane, PaneAxis, Workspace, +}; #[derive(Debug, Clone, PartialEq, Eq)] pub struct WorkspaceId(Arc>); @@ -69,9 +74,42 @@ pub enum SerializedPaneGroup { impl Default for SerializedPaneGroup { fn default() -> Self { - Self::Group { - axis: Axis::Horizontal, - children: vec![Self::Pane(Default::default())], + Self::Pane(SerializedPane { + children: Vec::new(), + }) + } +} + +impl SerializedPaneGroup { + #[async_recursion(?Send)] + pub(crate) async fn deserialize( + &self, + project: &ModelHandle, + workspace_id: &WorkspaceId, + workspace: &ViewHandle, + cx: &mut AsyncAppContext, + ) -> Member { + match self { + SerializedPaneGroup::Group { axis, children } => { + let mut members = Vec::new(); + for child in children { + let new_member = child + .deserialize(project, workspace_id, workspace, cx) + .await; + members.push(new_member); + } + Member::Axis(PaneAxis { + axis: *axis, + members, + }) + } + SerializedPaneGroup::Pane(serialized_pane) => { + let pane = workspace.update(cx, |workspace, cx| workspace.add_pane(cx)); + serialized_pane + .deserialize_to(project, &pane, workspace_id, workspace, cx) + .await; + Member::Pane(pane) + } } } } @@ -85,6 +123,44 @@ impl SerializedPane { pub fn new(children: Vec) -> Self { SerializedPane { children } } + + pub async fn deserialize_to( + &self, + project: &ModelHandle, + pane_handle: &ViewHandle, + workspace_id: &WorkspaceId, + workspace: &ViewHandle, + cx: &mut AsyncAppContext, + ) { + for item in self.children.iter() { + let project = project.clone(); + let workspace_id = workspace_id.clone(); + let item_handle = pane_handle + .update(cx, |_, cx| { + if let Some(deserializer) = cx.global::().get(&item.kind) { + deserializer( + project, + workspace.downgrade(), + workspace_id, + item.item_id, + cx, + ) + } else { + Task::ready(Err(anyhow::anyhow!( + "Deserializer does not exist for item kind: {}", + item.kind + ))) + } + }) + .await + .log_err(); + if let Some(item_handle) = item_handle { + workspace.update(cx, |workspace, cx| { + Pane::add_item(workspace, &pane_handle, item_handle, false, false, None, cx); + }) + } + } + } } pub type GroupId = i64; @@ -150,7 +226,7 @@ mod tests { #[test] fn test_workspace_round_trips() { - let db = Connection::open_memory("workspace_id_round_trips"); + let db = Connection::open_memory(Some("workspace_id_round_trips")); db.exec(indoc::indoc! {" CREATE TABLE workspace_id_test( diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 3d170818e251213c70e6e9e5558cdab189f74671..072bd80e1dfde190bede15e7adf7baec00d3f903 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -58,7 +58,7 @@ use theme::{Theme, ThemeRegistry}; pub use toolbar::{ToolbarItemLocation, ToolbarItemView}; use util::ResultExt; -use crate::persistence::model::{SerializedPane, SerializedWorkspace}; +use crate::persistence::model::{SerializedPane, SerializedPaneGroup, SerializedWorkspace}; #[derive(Clone, PartialEq)] pub struct RemoveWorktreeFromProject(pub WorktreeId); @@ -2264,27 +2264,62 @@ impl Workspace { .into() } - fn serialize_workspace(&self, old_id: Option, cx: &mut MutableAppContext) { - let dock_pane = SerializedPane { - children: self - .dock - .pane() - .read(cx) - .items() - .filter_map(|item_handle| { - Some(SerializedItem { - kind: Arc::from(item_handle.serialized_item_kind()?), - item_id: item_handle.id(), + fn remove_panes(&mut self, member: Member, cx: &mut ViewContext) { + match member { + Member::Axis(PaneAxis { members, .. }) => { + for child in members.iter() { + self.remove_panes(child.clone(), cx) + } + } + Member::Pane(pane) => self.remove_pane(pane.clone(), cx), + } + } + + fn serialize_workspace(&self, old_id: Option, cx: &AppContext) { + fn serialize_pane_handle( + pane_handle: &ViewHandle, + cx: &AppContext, + ) -> SerializedPane { + SerializedPane { + children: pane_handle + .read(cx) + .items() + .filter_map(|item_handle| { + Some(SerializedItem { + kind: Arc::from(item_handle.serialized_item_kind()?), + item_id: item_handle.id(), + }) }) - }) - .collect::>(), - }; + .collect::>(), + } + } + + let dock_pane = serialize_pane_handle(self.dock.pane(), cx); + + fn build_serialized_pane_group( + pane_group: &Member, + cx: &AppContext, + ) -> SerializedPaneGroup { + match pane_group { + Member::Axis(PaneAxis { axis, members }) => SerializedPaneGroup::Group { + axis: *axis, + children: members + .iter() + .map(|member| build_serialized_pane_group(member, cx)) + .collect::>(), + }, + Member::Pane(pane_handle) => { + SerializedPaneGroup::Pane(serialize_pane_handle(&pane_handle, cx)) + } + } + } + let center_group = build_serialized_pane_group(&self.center.root, cx); let serialized_workspace = SerializedWorkspace { workspace_id: self.workspace_id(cx), dock_position: self.dock.position(), dock_pane, - center_group: Default::default(), + center_group, }; cx.background() @@ -2299,87 +2334,43 @@ impl Workspace { serialized_workspace: SerializedWorkspace, cx: &mut MutableAppContext, ) { - // fn process_splits( - // pane_group: SerializedPaneGroup, - // parent: Option, - // workspace: ViewHandle, - // cx: &mut AsyncAppContext, - // ) { - // match pane_group { - // SerializedPaneGroup::Group { axis, children } => { - // process_splits(pane_group, parent) - // } - // SerializedPaneGroup::Pane(pane) => { - // process_pane(pane) - // }, - // } - // } - - async fn deserialize_pane( - project: ModelHandle, - pane: SerializedPane, - pane_handle: ViewHandle, - workspace_id: WorkspaceId, - workspace: &ViewHandle, - cx: &mut AsyncAppContext, - ) { - for item in pane.children { - let project = project.clone(); - let workspace_id = workspace_id.clone(); - let item_handle = pane_handle - .update(cx, |_, cx| { - if let Some(deserializer) = cx.global::().get(&item.kind) - { - deserializer( - project, - workspace.downgrade(), - workspace_id, - item.item_id, - cx, - ) - } else { - Task::ready(Err(anyhow!( - "Deserializer does not exist for item kind: {}", - item.kind - ))) - } - }) - .await - .log_err(); - - if let Some(item_handle) = item_handle { - workspace.update(cx, |workspace, cx| { - Pane::add_item( - workspace, - &pane_handle, - item_handle, - false, - false, - None, - cx, - ); - }) - } - } - } - cx.spawn(|mut cx| async move { if let Some(workspace) = workspace.upgrade(&cx) { let (project, dock_pane_handle) = workspace.read_with(&cx, |workspace, _| { (workspace.project().clone(), workspace.dock_pane().clone()) }); - deserialize_pane( - project, - serialized_workspace.dock_pane, - dock_pane_handle, - serialized_workspace.workspace_id, - &workspace, - &mut cx, - ) - .await; + + serialized_workspace + .dock_pane + .deserialize_to( + &project, + &dock_pane_handle, + &serialized_workspace.workspace_id, + &workspace, + &mut cx, + ) + .await; // Traverse the splits tree and add to things - // process_splits(serialized_workspace.center_group, None, workspace, &mut cx); + + let root = serialized_workspace + .center_group + .deserialize( + &project, + &serialized_workspace.workspace_id, + &workspace, + &mut cx, + ) + .await; + + // Remove old panes from workspace panes list + workspace.update(&mut cx, |workspace, cx| { + workspace.remove_panes(workspace.center.root.clone(), cx); + + // Swap workspace center group + workspace.center = PaneGroup::with_root(root); + cx.notify(); + }); workspace.update(&mut cx, |workspace, cx| { Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx) From a0cb6542ba87b201bd0108673098f62ff3fc0dee Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 18 Nov 2022 16:56:17 -0800 Subject: [PATCH 52/86] Polishing workspace data structures Co-authored-by: kay@zed.dev --- crates/sqlez/src/migrations.rs | 2 +- crates/sqlez/src/statement.rs | 8 +- crates/sqlez/src/thread_safe_connection.rs | 47 ++++ crates/workspace/dest.db | Bin 0 -> 36864 bytes crates/workspace/src/persistence.rs | 288 ++++++++++++++------- crates/workspace/src/persistence/model.rs | 29 ++- crates/workspace/src/workspace.rs | 39 +-- 7 files changed, 287 insertions(+), 126 deletions(-) create mode 100644 crates/workspace/dest.db diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 23af04bbf4e30be0af9c05ccfa1f8bcc0e56cf4b..d77d54095bc7b434410eb8058361dd20aaef56a2 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -58,7 +58,7 @@ impl Connection { mod test { use indoc::indoc; - use crate::connection::Connection; + use crate::{connection::Connection, thread_safe_connection::ThreadSafeConnection}; #[test] fn test_migrations_are_added_to_table() { diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index f0afc0e020d61f0c32516469a6d97f4c40aaa343..164929010b2698401724e7c6493b0212948d709c 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -59,11 +59,11 @@ impl<'a> Statement<'a> { ); remaining_sql = CStr::from_ptr(remaining_sql_ptr); statement.raw_statements.push(raw_statement); - } - connection - .last_error() - .with_context(|| format!("Prepare call failed for query:\n{}", query.as_ref()))?; + connection.last_error().with_context(|| { + format!("Prepare call failed for query:\n{}", query.as_ref()) + })?; + } } Ok(statement) diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index f415c32960cbd81ecbba2f3abc481d211f16f454..4ed180540746ce96290380c632591eccfeae5d8d 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -109,3 +109,50 @@ impl Deref for ThreadSafeConnection { }) } } + +#[cfg(test)] +mod test { + use std::ops::Deref; + + use crate::domain::Domain; + + use super::ThreadSafeConnection; + + #[test] + #[should_panic] + fn wild_zed_lost_failure() { + enum TestWorkspace {} + impl Domain for TestWorkspace { + fn name() -> &'static str { + "workspace" + } + + fn migrations() -> &'static [&'static str] { + &[" + CREATE TABLE workspaces( + workspace_id BLOB PRIMARY KEY, + dock_visible INTEGER, -- Boolean + dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' + dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, + FOREIGN KEY(dock_pane) REFERENCES panes(pane_id), + FOREIGN KEY(active_pane) REFERENCES panes(pane_id) + ) STRICT; + + CREATE TABLE panes( + pane_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + active INTEGER NOT NULL, -- Boolean + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE + ) STRICT; + "] + } + } + + let _ = ThreadSafeConnection::::new(None, false) + .with_initialize_query("PRAGMA FOREIGN_KEYS=true") + .deref(); + } +} diff --git a/crates/workspace/dest.db b/crates/workspace/dest.db new file mode 100644 index 0000000000000000000000000000000000000000..95cbdffc92f30106519872ee986be92fdb20ebfa GIT binary patch literal 36864 zcmeI4(N5z=6o&2Hg@jayUQ~+8O$V)3Arf@8+KWnyR%#LqjT#b~n5uhGS#IJI3*yPz z4zRszVYN4vdfThMLLa2h(5L7Nbmlmocw$(Jw5zt!Zv`07*yB0h`OkRHM56aq%a1+w zJr752%$^z_8m4J{&X{2s>*8lc{G^9Ge~_Mtzh%7M@)R;p+^^1B6j9`=gc2$v51T6 zq&eU*n*{#LiAP;}}7dnC1<_`!hL);D4p=}?lpxUj_M zL4Ksvyw~Yk&HXm}(t5F(9YB?Jtv#!2wHsECsfI|uhI)`RI&HRVwJZUuQR_8oyH>X4 z=x|peoVIM`9y_&5d!nq1Hq(}DZ~AkR-VVFXgIf0m88LOt)@;xMXSGyiJ-gd%*wx~4 z`FYti1TF6(dU+}?cic(L>D(a)J5T7(%H`^Eq5SBP`TAQDMc)fzFLY>R*>XXXNVX+W z%G3zOKQq{5^kX*^T|37ipNun-A|**lv61HnQcPksmP*HEAs6bAeG{DVoR{x(b5;W> z@~r#GWaSKYn>Y0CG~04MPOOeLQ{$`GNGWyQPaiE&kX{wYA2cfZwXAs{8ErykE@E=% zm9TFJIkzACXP$DMiYyuAbtUXMW_17iX#;ntrkYLKnF~f5<~}4qcG=q*CW`%tz#qu75m?sbpOU z^$xs2;uyMJ%2|5=O^$Q2IQQ6q2bGu&T@kphIO(6zG|66hF*!N*M_v@Wqj92=U2Cs) z)UsLQs4L!fwqrLBte#yvIGlTrWMCi(Bj$K=0}e;m5RIBz0Ux=3kT56nqu3iOid3WSp{q*ORqeBq7+qbw;CJd|LG0#os!J|#h3m# zKmY_l00ck)1V8`;KmY_l00ck)1QwV;N!o_|e}O#=(?9?OKmY_l00ck)1V8`;KmY_l zz%cOrKU4q$AOHd&00JNY0w4eaAOHd&00N6o0Qvvo`xq9200@8p2!H?xfB*=900@8p z2!H_ce`o*%KmY_l00ck)1V8`;KmY_l00b7F0P_FE_c1I40T2KI5C8!X009sH0T2KI i5C8%E|37E|1V8`;KmY_l00ck)1V8`;KmY_5pTOV8W&uk8 literal 0 HcmV?d00001 diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index f7517ec8bf79c4d4f8a8b6f5df59da22741e1e14..3f04e5046151e7cf9c29624a94dd56268957f34a 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -26,6 +26,7 @@ use model::{ connection!(DB: WorkspaceDb); + impl Domain for Workspace { fn name() -> &'static str { "workspace" @@ -37,7 +38,9 @@ impl Domain for Workspace { workspace_id BLOB PRIMARY KEY, dock_visible INTEGER, -- Boolean dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' - timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL + dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, + FOREIGN KEY(dock_pane) REFERENCES panes(pane_id) ) STRICT; CREATE TABLE pane_groups( @@ -55,14 +58,21 @@ impl Domain for Workspace { CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id BLOB NOT NULL, - parent_group_id INTEGER, -- NULL means that this is a dock pane - position INTEGER, -- NULL means that this is a dock pane + active INTEGER NOT NULL, -- Boolean FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - ON UPDATE CASCADE, - FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ON UPDATE CASCADE ) STRICT; + CREATE TABLE center_panes( + pane_id INTEGER PRIMARY KEY, + parent_group_id INTEGER, -- NULL means that this is a root pane + position INTEGER, -- NULL means that this is a root pane + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) + ON DELETE CASCADE, + FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE + ) STRICT; + CREATE TABLE items( item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique workspace_id BLOB NOT NULL, @@ -131,12 +141,13 @@ impl WorkspaceDb { workspace: &SerializedWorkspace, ) { self.with_savepoint("update_worktrees", || { + self.exec_bound(indoc! {" + UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1; + DELETE FROM pane_groups WHERE workspace_id = ?1; + DELETE FROM panes WHERE workspace_id = ?1;"})? + (old_id.as_ref().unwrap_or(&workspace.workspace_id)).context("Clearing old panes")?; + if let Some(old_id) = old_id { - self.exec_bound(indoc! {" - DELETE FROM pane_groups WHERE workspace_id = ?"})?(&old_id)?; - - // If collision, delete - self.exec_bound(indoc! {" UPDATE OR REPLACE workspaces SET workspace_id = ?, @@ -147,18 +158,26 @@ impl WorkspaceDb { &workspace.workspace_id, workspace.dock_position, &old_id, - ))?; + )).context("Updating workspace with new worktree roots")?; } else { - self.exec_bound(indoc! {" - DELETE FROM pane_groups WHERE workspace_id = ?"})?(&workspace.workspace_id)?; self.exec_bound( "INSERT OR REPLACE INTO workspaces(workspace_id, dock_visible, dock_anchor) VALUES (?, ?, ?)", - )?((&workspace.workspace_id, workspace.dock_position))?; + )?((&workspace.workspace_id, workspace.dock_position)).context("Uodating workspace")?; } // Save center pane group and dock pane - self.save_pane_group(&workspace.workspace_id, &workspace.center_group, None)?; - self.save_pane(&workspace.workspace_id, &workspace.dock_pane, None)?; + self.save_pane_group(&workspace.workspace_id, &workspace.center_group, None).context("save pane group in save workspace")?; + + let dock_id = self.save_pane(&workspace.workspace_id, &workspace.dock_pane, None, true).context("save pane in save workspace")?; + + // Complete workspace initialization + self.exec_bound(indoc! {" + UPDATE workspaces + SET dock_pane = ? + WHERE workspace_id = ?"})?(( + dock_id, + &workspace.workspace_id, + )).context("Finishing initialization with dock pane")?; Ok(()) }) @@ -196,38 +215,42 @@ impl WorkspaceDb { .into_iter() .next() .context("No center pane group") - .map(|pane_group| { - // Rewrite the special case of the root being a leaf node - if let SerializedPaneGroup::Group { axis: Axis::Horizontal, ref children } = pane_group { - if children.len() == 1 { - if let Some(SerializedPaneGroup::Pane(pane)) = children.get(0) { - return SerializedPaneGroup::Pane(pane.clone()) - } - } - } - pane_group - }) } - fn get_pane_group_children<'a>( + fn get_pane_group_children( &self, workspace_id: &WorkspaceId, group_id: Option, ) -> Result> { - self.select_bound::<(Option, &WorkspaceId), (Option, Option, Option)>(indoc! {" - SELECT group_id, axis, pane_id - FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id - FROM pane_groups - UNION - SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id - FROM panes - -- Remove the dock panes from the union - WHERE parent_group_id IS NOT NULL and position IS NOT NULL) + type GroupKey<'a> = (Option, &'a WorkspaceId); + type GroupOrPane = (Option, Option, Option, Option); + self.select_bound::(indoc! {" + SELECT group_id, axis, pane_id, active + FROM (SELECT + group_id, + axis, + NULL as pane_id, + NULL as active, + position, + parent_group_id, + workspace_id + FROM pane_groups + UNION + SELECT + NULL, + NULL, + center_panes.pane_id, + panes.active as active, + position, + parent_group_id, + panes.workspace_id as workspace_id + FROM center_panes + JOIN panes ON center_panes.pane_id = panes.pane_id) WHERE parent_group_id IS ? AND workspace_id = ? ORDER BY position "})?((group_id, workspace_id))? .into_iter() - .map(|(group_id, axis, pane_id)| { + .map(|(group_id, axis, pane_id, active)| { if let Some((group_id, axis)) = group_id.zip(axis) { Ok(SerializedPaneGroup::Group { axis, @@ -236,10 +259,8 @@ impl WorkspaceDb { Some(group_id), )?, }) - } else if let Some(pane_id) = pane_id { - Ok(SerializedPaneGroup::Pane(SerializedPane { - children: self.get_items( pane_id)?, - })) + } else if let Some((pane_id, active)) = pane_id.zip(active) { + Ok(SerializedPaneGroup::Pane(SerializedPane::new(self.get_items( pane_id)?, active))) } else { bail!("Pane Group Child was neither a pane group or a pane"); } @@ -253,22 +274,15 @@ impl WorkspaceDb { pane_group: &SerializedPaneGroup, parent: Option<(GroupId, usize)>, ) -> Result<()> { - // Rewrite the root node to fit with the database - let pane_group = if parent.is_none() && matches!(pane_group, SerializedPaneGroup::Pane { .. }) { - SerializedPaneGroup::Group { axis: Axis::Horizontal, children: vec![pane_group.clone()] } - } else { - pane_group.clone() - }; - match pane_group { SerializedPaneGroup::Group { axis, children } => { let (parent_id, position) = unzip_option(parent); let group_id = self.select_row_bound::<_, i64>(indoc!{" - INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) - VALUES (?, ?, ?, ?) - RETURNING group_id"})? - ((workspace_id, parent_id, position, axis))? + INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) + VALUES (?, ?, ?, ?) + RETURNING group_id"})? + ((workspace_id, parent_id, position, *axis))? .ok_or_else(|| anyhow!("Couldn't retrieve group_id from inserted pane_group"))?; for (position, group) in children.iter().enumerate() { @@ -277,21 +291,24 @@ impl WorkspaceDb { Ok(()) } SerializedPaneGroup::Pane(pane) => { - self.save_pane(workspace_id, &pane, parent) + self.save_pane(workspace_id, &pane, parent, false)?; + Ok(()) }, } } pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { - let pane_id = self.select_row_bound(indoc! {" - SELECT pane_id FROM panes - WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?( + let (pane_id, active) = self.select_row_bound(indoc! {" + SELECT pane_id, active + FROM panes + WHERE pane_id = (SELECT dock_pane FROM workspaces WHERE workspace_id = ?)"})?( workspace_id, )? .context("No dock pane for workspace")?; Ok(SerializedPane::new( self.get_items(pane_id).context("Reading items")?, + active )) } @@ -299,20 +316,32 @@ impl WorkspaceDb { &self, workspace_id: &WorkspaceId, pane: &SerializedPane, - parent: Option<(GroupId, usize)>, - ) -> Result<()> { - let (parent_id, order) = unzip_option(parent); - + parent: Option<(GroupId, usize)>, // None indicates BOTH dock pane AND center_pane + dock: bool, + ) -> Result { let pane_id = self.select_row_bound::<_, i64>(indoc!{" - INSERT INTO panes(workspace_id, parent_group_id, position) - VALUES (?, ?, ?) + INSERT INTO panes(workspace_id, active) + VALUES (?, ?) RETURNING pane_id"}, - )?((workspace_id, parent_id, order))? + )?((workspace_id, pane.active))? .ok_or_else(|| anyhow!("Could not retrieve inserted pane_id"))?; + + if !dock { + let (parent_id, order) = unzip_option(parent); + self.exec_bound(indoc! {" + INSERT INTO center_panes(pane_id, parent_group_id, position) + VALUES (?, ?, ?)"})?(( + pane_id, parent_id, order + ))?; + } self.save_items(workspace_id, pane_id, &pane.children) - .context("Saving items") + .context("Saving items")?; + + Ok(pane_id) } + + pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { Ok(self.select_bound(indoc! {" @@ -352,6 +381,7 @@ mod tests { let db = WorkspaceDb(open_memory_db(Some("test_full_workspace_serialization"))); let dock_pane = crate::persistence::model::SerializedPane { + children: vec![ SerializedItem::new("Terminal", 1), SerializedItem::new("Terminal", 2), @@ -359,6 +389,7 @@ mod tests { SerializedItem::new("Terminal", 4), ], + active: false }; // ----------------- @@ -372,28 +403,30 @@ mod tests { SerializedPaneGroup::Group { axis: gpui::Axis::Vertical, children: vec![ - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ SerializedItem::new("Terminal", 5), SerializedItem::new("Terminal", 6), ], - }), - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ + false) + ), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ SerializedItem::new("Terminal", 7), SerializedItem::new("Terminal", 8), - ], - }), + false, + )), ], }, - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ SerializedItem::new("Terminal", 9), SerializedItem::new("Terminal", 10), ], - }), + false, + )), ], }; @@ -518,14 +551,14 @@ mod tests { let db = WorkspaceDb(open_memory_db(Some("basic_dock_pane"))); - let dock_pane = crate::persistence::model::SerializedPane { - children: vec![ + let dock_pane = crate::persistence::model::SerializedPane::new( + vec![ SerializedItem::new("Terminal", 1), SerializedItem::new("Terminal", 4), SerializedItem::new("Terminal", 2), SerializedItem::new("Terminal", 3), - ], - }; + ], false + ); let workspace = default_workspace(&["/tmp"], dock_pane, &Default::default()); @@ -538,7 +571,7 @@ mod tests { #[test] fn test_simple_split() { - // env_logger::try_init().ok(); + env_logger::try_init().ok(); let db = WorkspaceDb(open_memory_db(Some("simple_split"))); @@ -553,33 +586,96 @@ mod tests { SerializedPaneGroup::Group { axis: gpui::Axis::Vertical, children: vec![ - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), - ], - }), - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 3), - ], - }), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), + ], + false)), + SerializedPaneGroup::Pane(SerializedPane::new(vec![ + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 3), + ], true)), ], }, - SerializedPaneGroup::Pane(SerializedPane { - children: vec![ + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ SerializedItem::new("Terminal", 5), SerializedItem::new("Terminal", 6), ], - }), + false)), ], }; let workspace = default_workspace(&["/tmp"], Default::default(), ¢er_pane); db.save_workspace(None, &workspace); + + let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); + + assert_eq!(workspace.center_group, new_workspace.center_group); + } + + #[test] + fn test_cleanup_panes() { + env_logger::try_init().ok(); + + let db = WorkspaceDb(open_memory_db(Some("test_cleanup_panes"))); + + let center_pane = SerializedPaneGroup::Group { + axis: gpui::Axis::Horizontal, + children: vec![ + SerializedPaneGroup::Group { + axis: gpui::Axis::Vertical, + children: vec![ + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), + ], + false)), + SerializedPaneGroup::Pane(SerializedPane::new(vec![ + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 3), + ], true)), + ], + }, + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 5), + SerializedItem::new("Terminal", 6), + ], + false)), + ], + }; + + let id = &["/tmp"]; + + let mut workspace = default_workspace(id, Default::default(), ¢er_pane); + + db.save_workspace(None, &workspace); + + workspace.center_group = SerializedPaneGroup::Group { + axis: gpui::Axis::Vertical, + children: vec![ + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), + ], + false)), + SerializedPaneGroup::Pane(SerializedPane::new(vec![ + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 3), + ], true)), + ], + }; + + db.save_workspace(None, &workspace); + + let new_workspace = db.workspace_for_roots(id).unwrap(); + + assert_eq!(workspace.center_group, new_workspace.center_group); - assert_eq!(workspace.center_group, center_pane); } } diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index b4b5db5f11462874d0e7b19fd28820d24b144fb7..9eca121c213a0e9b081449356fcec460ce65d966 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -76,6 +76,7 @@ impl Default for SerializedPaneGroup { fn default() -> Self { Self::Pane(SerializedPane { children: Vec::new(), + active: false, }) } } @@ -88,27 +89,35 @@ impl SerializedPaneGroup { workspace_id: &WorkspaceId, workspace: &ViewHandle, cx: &mut AsyncAppContext, - ) -> Member { + ) -> (Member, Option>) { match self { SerializedPaneGroup::Group { axis, children } => { + let mut current_active_pane = None; let mut members = Vec::new(); for child in children { - let new_member = child + let (new_member, active_pane) = child .deserialize(project, workspace_id, workspace, cx) .await; members.push(new_member); + + current_active_pane = current_active_pane.or(active_pane); } - Member::Axis(PaneAxis { - axis: *axis, - members, - }) + ( + Member::Axis(PaneAxis { + axis: *axis, + members, + }), + current_active_pane, + ) } SerializedPaneGroup::Pane(serialized_pane) => { let pane = workspace.update(cx, |workspace, cx| workspace.add_pane(cx)); + let active = serialized_pane.active; serialized_pane .deserialize_to(project, &pane, workspace_id, workspace, cx) .await; - Member::Pane(pane) + + (Member::Pane(pane.clone()), active.then(|| pane)) } } } @@ -116,12 +125,13 @@ impl SerializedPaneGroup { #[derive(Debug, PartialEq, Eq, Default, Clone)] pub struct SerializedPane { + pub(crate) active: bool, pub(crate) children: Vec, } impl SerializedPane { - pub fn new(children: Vec) -> Self { - SerializedPane { children } + pub fn new(children: Vec, active: bool) -> Self { + SerializedPane { children, active } } pub async fn deserialize_to( @@ -154,6 +164,7 @@ impl SerializedPane { }) .await .log_err(); + if let Some(item_handle) = item_handle { workspace.update(cx, |workspace, cx| { Pane::add_item(workspace, &pane_handle, item_handle, false, false, None, cx); diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 072bd80e1dfde190bede15e7adf7baec00d3f903..fbe21be81cd8af42526b69e5b5a59ca9e85d16a2 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -2280,18 +2280,22 @@ impl Workspace { pane_handle: &ViewHandle, cx: &AppContext, ) -> SerializedPane { - SerializedPane { - children: pane_handle - .read(cx) - .items() - .filter_map(|item_handle| { - Some(SerializedItem { - kind: Arc::from(item_handle.serialized_item_kind()?), - item_id: item_handle.id(), + let (items, active) = { + let pane = pane_handle.read(cx); + ( + pane.items() + .filter_map(|item_handle| { + Some(SerializedItem { + kind: Arc::from(item_handle.serialized_item_kind()?), + item_id: item_handle.id(), + }) }) - }) - .collect::>(), - } + .collect::>(), + pane.is_active(), + ) + }; + + SerializedPane::new(items, active) } let dock_pane = serialize_pane_handle(self.dock.pane(), cx); @@ -2353,7 +2357,7 @@ impl Workspace { // Traverse the splits tree and add to things - let root = serialized_workspace + let (root, active_pane) = serialized_workspace .center_group .deserialize( &project, @@ -2369,11 +2373,14 @@ impl Workspace { // Swap workspace center group workspace.center = PaneGroup::with_root(root); - cx.notify(); - }); - workspace.update(&mut cx, |workspace, cx| { - Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx) + Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx); + + if let Some(active_pane) = active_pane { + cx.focus(active_pane); + } + + cx.notify(); }); } }) From 992b94eef3e48242a6df24517b7eb8f1efeb6351 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 18 Nov 2022 17:06:33 -0800 Subject: [PATCH 53/86] Rebased to main --- crates/collab/src/integration_tests.rs | 4 ++-- crates/collab/src/main.rs | 2 +- crates/editor/src/items.rs | 4 ++-- crates/editor/src/persistence.rs | 2 +- crates/sqlez/src/migrations.rs | 2 +- crates/terminal/src/terminal.rs | 2 -- crates/workspace/src/persistence/model.rs | 4 +--- crates/workspace/src/workspace.rs | 4 ++-- 8 files changed, 10 insertions(+), 14 deletions(-) diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index e1b242713f6cf8cad163190e92f2e45d606f0a6b..386ccfbbff6b7838362fb8dda2253a088943fc5d 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -1,6 +1,6 @@ use crate::{ - db::{NewUserParams, ProjectId, TestDb, UserId}, - rpc::{Executor, Server, Store}, + db::{NewUserParams, ProjectId, SqliteTestDb as TestDb, UserId}, + rpc::{Executor, Server}, AppState, }; diff --git a/crates/collab/src/main.rs b/crates/collab/src/main.rs index d26ea1a0fa8edbb92113d7cc2f812b4bc28c0d19..dc98a2ee6855c072f5adc9ed95dbad38626eca48 100644 --- a/crates/collab/src/main.rs +++ b/crates/collab/src/main.rs @@ -9,11 +9,11 @@ mod db_tests; #[cfg(test)] mod integration_tests; -use crate::db::{Db, PostgresDb}; use crate::rpc::ResultExt as _; use anyhow::anyhow; use axum::{routing::get, Router}; use collab::{Error, Result}; +use db::DefaultDb as Db; use serde::Deserialize; use std::{ env::args, diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 60ac8d5278b029a1f14db9a9f47f6365eab96609..5d900cd942c6bd0d62a1bafa9b4c51fc40482152 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -1,7 +1,7 @@ use crate::{ display_map::ToDisplayPoint, link_go_to_definition::hide_link_definition, - movement::surrounding_word, Anchor, Autoscroll, Editor, EditorMode, Event, ExcerptId, - MultiBuffer, MultiBufferSnapshot, NavigationData, ToPoint as _, FORMAT_TIMEOUT, + movement::surrounding_word, Anchor, Autoscroll, Editor, Event, ExcerptId, MultiBuffer, + MultiBufferSnapshot, NavigationData, ToPoint as _, FORMAT_TIMEOUT, }; use anyhow::{anyhow, Context, Result}; use futures::FutureExt; diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 4b39f9463886b74ac4bca0079a4b692a323614eb..acac2eff4c0726682ecf482c99f111106006b5b7 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -24,7 +24,7 @@ impl Domain for Editor { } impl EditorDb { - fn get_path(_item_id: ItemId, _workspace_id: WorktreeId) -> PathBuf { + fn _get_path(_item_id: ItemId, _workspace_id: WorktreeId) -> PathBuf { unimplemented!(); } } diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index d77d54095bc7b434410eb8058361dd20aaef56a2..23af04bbf4e30be0af9c05ccfa1f8bcc0e56cf4b 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -58,7 +58,7 @@ impl Connection { mod test { use indoc::indoc; - use crate::{connection::Connection, thread_safe_connection::ThreadSafeConnection}; + use crate::connection::Connection; #[test] fn test_migrations_are_added_to_table() { diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 66a64903d354ea111622428aa197029457ac0fc4..7e469e19fec03564140f5063f1ee8e243331d345 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -34,9 +34,7 @@ use mappings::mouse::{ use procinfo::LocalProcessInfo; use settings::{AlternateScroll, Settings, Shell, TerminalBlink}; -use terminal_container_view::TerminalContainer; use util::ResultExt; -use workspace::register_deserializable_item; use std::{ cmp::min, diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index 9eca121c213a0e9b081449356fcec460ce65d966..0d4aade867307f676a3b8bfd36cc4b16294ecb9e 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -16,9 +16,7 @@ use sqlez::{ }; use util::ResultExt; -use crate::{ - dock::DockPosition, item::ItemHandle, ItemDeserializers, Member, Pane, PaneAxis, Workspace, -}; +use crate::{dock::DockPosition, ItemDeserializers, Member, Pane, PaneAxis, Workspace}; #[derive(Debug, Clone, PartialEq, Eq)] pub struct WorkspaceId(Arc>); diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index fbe21be81cd8af42526b69e5b5a59ca9e85d16a2..66fdd19c70b79f50829d7480863189dad852eb55 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -677,7 +677,7 @@ impl Workspace { cx: &mut MutableAppContext, ) -> Task<( ViewHandle, - Vec, Arc>>>, + Vec, anyhow::Error>>>, )> { let project_handle = Project::local( app_state.client.clone(), @@ -740,7 +740,7 @@ impl Workspace { Some( workspace .update(&mut cx, |workspace, cx| { - workspace.open_path(project_path, true, cx) + workspace.open_path(project_path, None, true, cx) }) .await, ) From 7ceb5e815e0050a14f922bc36e33d17a8622474f Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 18 Nov 2022 17:18:23 -0800 Subject: [PATCH 54/86] workspace level integration of serialization complete! Time for item level integration.... Co-Authored-By: kay@zed.dev --- crates/workspace/src/persistence.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 3f04e5046151e7cf9c29624a94dd56268957f34a..772e98f84b445102d85b0e4260470d80fd6ef1ef 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -211,13 +211,13 @@ impl WorkspaceDb { &self, workspace_id: &WorkspaceId, ) -> Result { - self.get_pane_group_children(workspace_id, None)? + self.get_pane_group(workspace_id, None)? .into_iter() .next() .context("No center pane group") } - fn get_pane_group_children( + fn get_pane_group( &self, workspace_id: &WorkspaceId, group_id: Option, @@ -254,7 +254,7 @@ impl WorkspaceDb { if let Some((group_id, axis)) = group_id.zip(axis) { Ok(SerializedPaneGroup::Group { axis, - children: self.get_pane_group_children( + children: self.get_pane_group( workspace_id, Some(group_id), )?, @@ -265,6 +265,14 @@ impl WorkspaceDb { bail!("Pane Group Child was neither a pane group or a pane"); } }) + // Filter out panes and pane groups which don't have any children or items + .filter(|pane_group| { + match pane_group { + Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(), + Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(), + _ => true, + } + }) .collect::>() } From 9077b058a2d4286908b833442746e59f62dcf8cd Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 18 Nov 2022 17:26:01 -0800 Subject: [PATCH 55/86] removed test file --- crates/workspace/dest.db | Bin 36864 -> 0 bytes crates/workspace/src/persistence.rs | 4 ++-- 2 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 crates/workspace/dest.db diff --git a/crates/workspace/dest.db b/crates/workspace/dest.db deleted file mode 100644 index 95cbdffc92f30106519872ee986be92fdb20ebfa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36864 zcmeI4(N5z=6o&2Hg@jayUQ~+8O$V)3Arf@8+KWnyR%#LqjT#b~n5uhGS#IJI3*yPz z4zRszVYN4vdfThMLLa2h(5L7Nbmlmocw$(Jw5zt!Zv`07*yB0h`OkRHM56aq%a1+w zJr752%$^z_8m4J{&X{2s>*8lc{G^9Ge~_Mtzh%7M@)R;p+^^1B6j9`=gc2$v51T6 zq&eU*n*{#LiAP;}}7dnC1<_`!hL);D4p=}?lpxUj_M zL4Ksvyw~Yk&HXm}(t5F(9YB?Jtv#!2wHsECsfI|uhI)`RI&HRVwJZUuQR_8oyH>X4 z=x|peoVIM`9y_&5d!nq1Hq(}DZ~AkR-VVFXgIf0m88LOt)@;xMXSGyiJ-gd%*wx~4 z`FYti1TF6(dU+}?cic(L>D(a)J5T7(%H`^Eq5SBP`TAQDMc)fzFLY>R*>XXXNVX+W z%G3zOKQq{5^kX*^T|37ipNun-A|**lv61HnQcPksmP*HEAs6bAeG{DVoR{x(b5;W> z@~r#GWaSKYn>Y0CG~04MPOOeLQ{$`GNGWyQPaiE&kX{wYA2cfZwXAs{8ErykE@E=% zm9TFJIkzACXP$DMiYyuAbtUXMW_17iX#;ntrkYLKnF~f5<~}4qcG=q*CW`%tz#qu75m?sbpOU z^$xs2;uyMJ%2|5=O^$Q2IQQ6q2bGu&T@kphIO(6zG|66hF*!N*M_v@Wqj92=U2Cs) z)UsLQs4L!fwqrLBte#yvIGlTrWMCi(Bj$K=0}e;m5RIBz0Ux=3kT56nqu3iOid3WSp{q*ORqeBq7+qbw;CJd|LG0#os!J|#h3m# zKmY_l00ck)1V8`;KmY_l00ck)1QwV;N!o_|e}O#=(?9?OKmY_l00ck)1V8`;KmY_l zz%cOrKU4q$AOHd&00JNY0w4eaAOHd&00N6o0Qvvo`xq9200@8p2!H?xfB*=900@8p z2!H_ce`o*%KmY_l00ck)1V8`;KmY_l00b7F0P_FE_c1I40T2KI5C8!X009sH0T2KI i5C8%E|37E|1V8`;KmY_l00ck)1V8`;KmY_5pTOV8W&uk8 diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 772e98f84b445102d85b0e4260470d80fd6ef1ef..b8beaa0e6d7ff379148b5ecdd1a61f6e13961b53 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -377,7 +377,7 @@ impl WorkspaceDb { #[cfg(test)] mod tests { - use db::{open_memory_db, write_db_to}; + use db::{open_memory_db}; use settings::DockAnchor; use super::*; @@ -571,7 +571,7 @@ mod tests { let workspace = default_workspace(&["/tmp"], dock_pane, &Default::default()); db.save_workspace(None, &workspace); - write_db_to(&db, "dest.db").unwrap(); + let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); assert_eq!(workspace.dock_pane, new_workspace.dock_pane); From cb1d2cd1f2984f3c6b2e8de36ee4321785c13c11 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Fri, 18 Nov 2022 20:59:59 -0800 Subject: [PATCH 56/86] WIP serializing and deserializing editors --- crates/editor/src/editor.rs | 27 ++++++++++++++- crates/editor/src/items.rs | 25 +++++++------- crates/editor/src/persistence.rs | 31 ++++++++++++++---- crates/sqlez/src/thread_safe_connection.rs | 38 +++++++++++----------- 4 files changed, 83 insertions(+), 38 deletions(-) diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index d1ccc5e8f3020a420fd46f383d6add7961954c64..81cf7a921137f924d3dc96dc2c59e7bdb45d67ea 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -83,7 +83,7 @@ use theme::{DiagnosticStyle, Theme}; use util::{post_inc, ResultExt, TryFutureExt}; use workspace::{ItemNavHistory, Workspace}; -use crate::git::diff_hunk_to_display; +use crate::{git::diff_hunk_to_display, persistence::DB}; const CURSOR_BLINK_INTERVAL: Duration = Duration::from_millis(500); const SCROLLBAR_SHOW_INTERVAL: Duration = Duration::from_secs(1); @@ -1137,6 +1137,31 @@ impl Editor { cx: &mut ViewContext, ) -> Self { let buffer = cx.add_model(|cx| MultiBuffer::singleton(buffer, cx)); + if let Some(project) = project.as_ref() { + if let Some(file) = buffer + .read(cx) + .as_singleton() + .and_then(|buffer| buffer.read(cx).file()) + .and_then(|file| file.as_local()) + { + let item_id = cx.weak_handle().id(); + let workspace_id = project + .read(cx) + .visible_worktrees(cx) + .map(|worktree| worktree.read(cx).abs_path()) + .collect::>() + .into(); + let path = file.abs_path(cx); + dbg!(&path); + + cx.background() + .spawn(async move { + DB.save_path(item_id, workspace_id, path).log_err(); + }) + .detach(); + } + } + Self::new(EditorMode::Full, buffer, project, None, cx) } diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 5d900cd942c6bd0d62a1bafa9b4c51fc40482152..f7dcd57f42426d6239ec5a7f75fb5dbfff23ea62 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -1,8 +1,3 @@ -use crate::{ - display_map::ToDisplayPoint, link_go_to_definition::hide_link_definition, - movement::surrounding_word, Anchor, Autoscroll, Editor, Event, ExcerptId, MultiBuffer, - MultiBufferSnapshot, NavigationData, ToPoint as _, FORMAT_TIMEOUT, -}; use anyhow::{anyhow, Context, Result}; use futures::FutureExt; use gpui::{ @@ -29,6 +24,12 @@ use workspace::{ ItemId, ItemNavHistory, Pane, StatusItemView, ToolbarItemLocation, Workspace, WorkspaceId, }; +use crate::{ + display_map::ToDisplayPoint, link_go_to_definition::hide_link_definition, + movement::surrounding_word, persistence::DB, Anchor, Autoscroll, Editor, Event, ExcerptId, + MultiBuffer, MultiBufferSnapshot, NavigationData, ToPoint as _, FORMAT_TIMEOUT, +}; + pub const MAX_TAB_TITLE_LEN: usize = 24; impl FollowableItem for Editor { @@ -554,21 +555,21 @@ impl Item for Editor { } fn serialized_item_kind() -> Option<&'static str> { - // TODO: Some("Editor") - None + Some("Editor") } fn deserialize( project: ModelHandle, _workspace: WeakViewHandle, - _workspace_id: WorkspaceId, - _item_id: ItemId, + workspace_id: WorkspaceId, + item_id: ItemId, cx: &mut ViewContext, ) -> Task>> { - // Look up the path with this key associated, create a self with that path - let path = Path::new("."); if let Some(project_item) = project.update(cx, |project, cx| { - let (worktree, path) = project.find_local_worktree(path, cx)?; + // Look up the path with this key associated, create a self with that path + let path = DB.get_path(item_id, workspace_id).ok()?; + dbg!(&path); + let (worktree, path) = project.find_local_worktree(&path, cx)?; let project_path = ProjectPath { worktree_id: worktree.read(cx).id(), path: path.into(), diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index acac2eff4c0726682ecf482c99f111106006b5b7..2c190d86086e799e3221cadaffc4de5f6a7c0856 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,11 +1,11 @@ -use std::path::PathBuf; +use std::path::{Path, PathBuf}; +use anyhow::{Context, Result}; use db::connection; use indoc::indoc; use lazy_static::lazy_static; -use project::WorktreeId; use sqlez::domain::Domain; -use workspace::{ItemId, Workspace}; +use workspace::{ItemId, Workspace, WorkspaceId}; use crate::Editor; @@ -18,13 +18,32 @@ impl Domain for Editor { fn migrations() -> &'static [&'static str] { &[indoc! {" - + CREATE TABLE editors( + item_id INTEGER NOT NULL, + workspace_id BLOB NOT NULL, + path BLOB NOT NULL, + PRIMARY KEY(item_id, workspace_id) + ) STRICT; "}] } } impl EditorDb { - fn _get_path(_item_id: ItemId, _workspace_id: WorktreeId) -> PathBuf { - unimplemented!(); + pub fn get_path(&self, item_id: ItemId, workspace_id: WorkspaceId) -> Result { + self.select_row_bound(indoc! {" + SELECT path FROM editors + WHERE item_id = ? AND workspace_id = ?"})?((item_id, &workspace_id))? + .context("Path not found for serialized editor") + } + + pub fn save_path( + &self, + item_id: ItemId, + workspace_id: WorkspaceId, + path: PathBuf, + ) -> Result<()> { + self.exec_bound::<(ItemId, &WorkspaceId, &Path)>(indoc! {" + INSERT OR REPLACE INTO editors(item_id, workspace_id, path) + VALUES (?, ?, ?)"})?((item_id, &workspace_id, &path)) } } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 4ed180540746ce96290380c632591eccfeae5d8d..5a5095ad7771063128239e76394c5a71560e8c93 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -129,25 +129,25 @@ mod test { fn migrations() -> &'static [&'static str] { &[" - CREATE TABLE workspaces( - workspace_id BLOB PRIMARY KEY, - dock_visible INTEGER, -- Boolean - dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' - dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet - timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, - FOREIGN KEY(dock_pane) REFERENCES panes(pane_id), - FOREIGN KEY(active_pane) REFERENCES panes(pane_id) - ) STRICT; - - CREATE TABLE panes( - pane_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, - active INTEGER NOT NULL, -- Boolean - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE - ) STRICT; - "] + CREATE TABLE workspaces( + workspace_id BLOB PRIMARY KEY, + dock_visible INTEGER, -- Boolean + dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' + dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet + timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, + FOREIGN KEY(dock_pane) REFERENCES panes(pane_id), + FOREIGN KEY(active_pane) REFERENCES panes(pane_id) + ) STRICT; + + CREATE TABLE panes( + pane_id INTEGER PRIMARY KEY, + workspace_id BLOB NOT NULL, + active INTEGER NOT NULL, -- Boolean + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE + ) STRICT; + "] } } From a8ed95e1dcce910ec3b4bb8298885d2b67a7ea8a Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 19 Nov 2022 15:14:13 -0800 Subject: [PATCH 57/86] Implementing persistence for the terminal working directory, found an issue with my current data model. :( --- Cargo.lock | 2 +- crates/db/src/db.rs | 12 ++-- crates/editor/src/persistence.rs | 7 ++- crates/terminal/Cargo.toml | 1 + crates/terminal/src/persistence.rs | 61 +++++++++++++++++++ crates/terminal/src/terminal.rs | 21 ++++++- .../terminal/src/terminal_container_view.rs | 18 ++++-- crates/workspace/Cargo.toml | 1 - crates/workspace/src/persistence.rs | 3 +- crates/workspace/src/persistence/model.rs | 8 +-- 10 files changed, 113 insertions(+), 21 deletions(-) create mode 100644 crates/terminal/src/persistence.rs diff --git a/Cargo.lock b/Cargo.lock index d53e91aa71afc034cecacb32c57f0ed12b2c730b..e887dfee66b7d4e0ed3a72bea56bef96bfef6a84 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5889,6 +5889,7 @@ dependencies = [ "anyhow", "client", "context_menu", + "db", "dirs 4.0.0", "editor", "futures 0.3.25", @@ -7659,7 +7660,6 @@ dependencies = [ "serde_json", "settings", "smallvec", - "sqlez", "theme", "util", ] diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 39891718fb3a796c386b8f823a4b73f3b6169fd4..6e4e6e0619e19f1e8a6b2825126f5c84ac05657c 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,11 +1,15 @@ pub mod kvp; +// Re-export indoc and sqlez so clients only need to include us +pub use indoc::indoc; +pub use lazy_static; +pub use sqlez; + use std::fs::{create_dir_all, remove_dir_all}; use std::path::Path; #[cfg(any(test, feature = "test-support"))] use anyhow::Result; -use indoc::indoc; #[cfg(any(test, feature = "test-support"))] use sqlez::connection::Connection; use sqlez::domain::{Domain, Migrator}; @@ -54,17 +58,17 @@ pub fn write_db_to>( #[macro_export] macro_rules! connection { ($id:ident: $t:ident<$d:ty>) => { - pub struct $t(::sqlez::thread_safe_connection::ThreadSafeConnection<$d>); + pub struct $t(::db::sqlez::thread_safe_connection::ThreadSafeConnection<$d>); impl ::std::ops::Deref for $t { - type Target = ::sqlez::thread_safe_connection::ThreadSafeConnection<$d>; + type Target = ::db::sqlez::thread_safe_connection::ThreadSafeConnection<$d>; fn deref(&self) -> &Self::Target { &self.0 } } - lazy_static! { + ::db::lazy_static::lazy_static! { pub static ref $id: $t = $t(if cfg!(any(test, feature = "test-support")) { ::db::open_memory_db(None) } else { diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 2c190d86086e799e3221cadaffc4de5f6a7c0856..5870bc71e53044d9e6938c8fc79d5bef138c0bcb 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -3,7 +3,6 @@ use std::path::{Path, PathBuf}; use anyhow::{Context, Result}; use db::connection; use indoc::indoc; -use lazy_static::lazy_static; use sqlez::domain::Domain; use workspace::{ItemId, Workspace, WorkspaceId}; @@ -22,7 +21,11 @@ impl Domain for Editor { item_id INTEGER NOT NULL, workspace_id BLOB NOT NULL, path BLOB NOT NULL, - PRIMARY KEY(item_id, workspace_id) + PRIMARY KEY(item_id, workspace_id), + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE + ) STRICT; "}] } diff --git a/crates/terminal/Cargo.toml b/crates/terminal/Cargo.toml index 785cf3365b9e62afbd076147f1f23d947b9757d9..5593ee92d4dc4fc4135c8f30a6dbaeee6753eb6d 100644 --- a/crates/terminal/Cargo.toml +++ b/crates/terminal/Cargo.toml @@ -17,6 +17,7 @@ settings = { path = "../settings" } theme = { path = "../theme" } util = { path = "../util" } workspace = { path = "../workspace" } +db = { path = "../db" } alacritty_terminal = { git = "https://github.com/zed-industries/alacritty", rev = "a51dbe25d67e84d6ed4261e640d3954fbdd9be45" } procinfo = { git = "https://github.com/zed-industries/wezterm", rev = "5cd757e5f2eb039ed0c6bb6512223e69d5efc64d", default-features = false } smallvec = { version = "1.6", features = ["union"] } diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs new file mode 100644 index 0000000000000000000000000000000000000000..c7808b0dbfa3fd8e84a60b61e41b565cbcce90ac --- /dev/null +++ b/crates/terminal/src/persistence.rs @@ -0,0 +1,61 @@ +use std::path::{Path, PathBuf}; + +use db::{connection, indoc, sqlez::domain::Domain}; +use util::{iife, ResultExt}; +use workspace::{ItemId, Workspace, WorkspaceId}; + +use crate::Terminal; + +connection!(TERMINAL_CONNECTION: TerminalDb<(Workspace, Terminal)>); + +impl Domain for Terminal { + fn name() -> &'static str { + "terminal" + } + + fn migrations() -> &'static [&'static str] { + &[indoc! {" + CREATE TABLE terminals ( + item_id INTEGER, + workspace_id BLOB, + working_directory BLOB, + PRIMARY KEY(item_id, workspace_id), + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ON UPDATE CASCADE + ) STRICT; + "}] + } +} + +impl TerminalDb { + pub fn save_working_directory( + &self, + item_id: ItemId, + workspace_id: &WorkspaceId, + working_directory: &Path, + ) { + iife!({ + self.exec_bound::<(ItemId, &WorkspaceId, &Path)>(indoc! {" + INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) + VALUES (?, ?, ?) + "})?((item_id, workspace_id, working_directory)) + }) + .log_err(); + } + + pub fn get_working_directory( + &self, + item_id: ItemId, + workspace_id: &WorkspaceId, + ) -> Option { + iife!({ + self.select_row_bound::<(ItemId, &WorkspaceId), PathBuf>(indoc! {" + SELECT working_directory + FROM terminals + WHERE item_id = ? workspace_id = ?"})?((item_id, workspace_id)) + }) + .log_err() + .flatten() + } +} diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 7e469e19fec03564140f5063f1ee8e243331d345..088729ff02d6e556471ec1d9abc31002899cfabc 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -1,4 +1,5 @@ pub mod mappings; +mod persistence; pub mod terminal_container_view; pub mod terminal_element; pub mod terminal_view; @@ -32,9 +33,11 @@ use mappings::mouse::{ alt_scroll, grid_point, mouse_button_report, mouse_moved_report, mouse_side, scroll_report, }; +use persistence::TERMINAL_CONNECTION; use procinfo::LocalProcessInfo; use settings::{AlternateScroll, Settings, Shell, TerminalBlink}; use util::ResultExt; +use workspace::{ItemId, WorkspaceId}; use std::{ cmp::min, @@ -281,6 +284,8 @@ impl TerminalBuilder { blink_settings: Option, alternate_scroll: &AlternateScroll, window_id: usize, + item_id: ItemId, + workspace_id: WorkspaceId, ) -> Result { let pty_config = { let alac_shell = shell.clone().and_then(|shell| match shell { @@ -385,6 +390,8 @@ impl TerminalBuilder { last_mouse_position: None, next_link_id: 0, selection_phase: SelectionPhase::Ended, + workspace_id, + item_id, }; Ok(TerminalBuilder { @@ -528,6 +535,8 @@ pub struct Terminal { scroll_px: f32, next_link_id: usize, selection_phase: SelectionPhase, + item_id: ItemId, + workspace_id: WorkspaceId, } impl Terminal { @@ -567,7 +576,17 @@ impl Terminal { cx.emit(Event::Wakeup); if self.update_process_info() { - cx.emit(Event::TitleChanged) + cx.emit(Event::TitleChanged); + + if let Some(foreground_info) = self.foreground_process_info { + cx.background().spawn(async move { + TERMINAL_CONNECTION.save_working_directory( + self.item_id, + &self.workspace_id, + &foreground_info.cwd, + ); + }); + } } } AlacTermEvent::ColorRequest(idx, fun_ptr) => { diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal/src/terminal_container_view.rs index 49b6ae341f56d26294e09be8c2bea322cee4e7af..2789f8167687da98a3d92247747e367d0f561fd0 100644 --- a/crates/terminal/src/terminal_container_view.rs +++ b/crates/terminal/src/terminal_container_view.rs @@ -1,3 +1,4 @@ +use crate::persistence::TERMINAL_CONNECTION; use crate::terminal_view::TerminalView; use crate::{Event, Terminal, TerminalBuilder, TerminalError}; @@ -13,7 +14,7 @@ use workspace::{ item::{Item, ItemEvent}, ToolbarItemLocation, Workspace, }; -use workspace::{register_deserializable_item, Pane}; +use workspace::{register_deserializable_item, ItemId, Pane, WorkspaceId}; use project::{LocalWorktree, Project, ProjectPath}; use settings::{AlternateScroll, Settings, WorkingDirectory}; @@ -89,6 +90,8 @@ impl TerminalContainer { pub fn new( working_directory: Option, modal: bool, + item_id: ItemId, + workspace_id: WorkspaceId, cx: &mut ViewContext, ) -> Self { let settings = cx.global::(); @@ -115,6 +118,8 @@ impl TerminalContainer { settings.terminal_overrides.blinking.clone(), scroll, cx.window_id(), + item_id, + workspace_id, ) { Ok(terminal) => { let terminal = cx.add_model(|cx| terminal.subscribe(cx)); @@ -386,13 +391,14 @@ impl Item for TerminalContainer { fn deserialize( _project: ModelHandle, _workspace: WeakViewHandle, - _workspace_id: workspace::WorkspaceId, - _item_id: workspace::ItemId, + workspace_id: workspace::WorkspaceId, + item_id: workspace::ItemId, cx: &mut ViewContext, ) -> Task>> { - // TODO: Pull the current working directory out of the DB. - - Task::ready(Ok(cx.add_view(|cx| TerminalContainer::new(None, false, cx)))) + let working_directory = TERMINAL_CONNECTION.get_working_directory(item_id, &workspace_id); + Task::ready(Ok(cx.add_view(|cx| { + TerminalContainer::new(working_directory, false, cx) + }))) } } diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index 0ce3bc220b89503af9d99bc8517d96ab1684a039..b67ccdeeb73b22b680055287abdb835c5178d959 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -30,7 +30,6 @@ language = { path = "../language" } menu = { path = "../menu" } project = { path = "../project" } settings = { path = "../settings" } -sqlez = { path = "../sqlez" } theme = { path = "../theme" } util = { path = "../util" } async-recursion = "1.0.0" diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index b8beaa0e6d7ff379148b5ecdd1a61f6e13961b53..372c4cafce617690536a709db00db2a9dfcddfe6 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -9,10 +9,9 @@ use anyhow::{anyhow, bail, Result, Context}; use db::connection; use gpui::Axis; use indoc::indoc; -use lazy_static::lazy_static; -use sqlez::domain::Domain; +use db::sqlez::domain::Domain; use util::{iife, unzip_option, ResultExt}; use crate::dock::DockPosition; diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index 0d4aade867307f676a3b8bfd36cc4b16294ecb9e..5f046d76ee088f928b7630aed2c6aab887254057 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -8,12 +8,12 @@ use anyhow::Result; use async_recursion::async_recursion; use gpui::{AsyncAppContext, Axis, ModelHandle, Task, ViewHandle}; -use project::Project; -use settings::DockAnchor; -use sqlez::{ +use db::sqlez::{ bindable::{Bind, Column}, statement::Statement, }; +use project::Project; +use settings::DockAnchor; use util::ResultExt; use crate::{dock::DockPosition, ItemDeserializers, Member, Pane, PaneAxis, Workspace}; @@ -228,8 +228,8 @@ impl Column for DockPosition { #[cfg(test)] mod tests { + use db::sqlez::connection::Connection; use settings::DockAnchor; - use sqlez::connection::Connection; use super::WorkspaceId; From e659823e6c309561c276d0ba451cb6ef331484c7 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sun, 20 Nov 2022 19:19:42 -0800 Subject: [PATCH 58/86] WIP termial implementation. need some way of getting the currently valid workspace ID --- crates/db/Cargo.toml | 1 - crates/sqlez/src/lib.rs | 2 + crates/sqlez/src/typed_statements.rs | 54 +++++++++++++++++++ crates/terminal/src/persistence.rs | 46 ++++++---------- crates/terminal/src/terminal.rs | 26 ++++----- .../terminal/src/terminal_container_view.rs | 10 ++-- 6 files changed, 84 insertions(+), 55 deletions(-) diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 27a11bea7bcfad2110770cd0708c63909fcc7d8c..70721c310c75a0d81b7b20086d80b4b521b61005 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -23,7 +23,6 @@ log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } - [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } tempdir = { version = "0.3.7" } diff --git a/crates/sqlez/src/lib.rs b/crates/sqlez/src/lib.rs index ecebbd264301040aa5c40a9e9daa4d52184081cc..c5d2658666933ee710470269e316bc3ac943fdd7 100644 --- a/crates/sqlez/src/lib.rs +++ b/crates/sqlez/src/lib.rs @@ -1,3 +1,5 @@ +pub use anyhow; + pub mod bindable; pub mod connection; pub mod domain; diff --git a/crates/sqlez/src/typed_statements.rs b/crates/sqlez/src/typed_statements.rs index 98f51b970a1e856df60f0f574419fdea0ea7d757..ce289437c2bbb768633b239cb95da8dae10815af 100644 --- a/crates/sqlez/src/typed_statements.rs +++ b/crates/sqlez/src/typed_statements.rs @@ -52,3 +52,57 @@ impl Connection { Ok(move |bindings| statement.with_bindings(bindings)?.maybe_row::()) } } + +#[macro_export] +macro_rules! exec_method { + ($id:ident(): $sql:literal) => { + pub fn $id(&self) -> $crate::anyhow::Result<()> { + iife!({ + self.exec($sql)?() + }) + } + }; + ($id:ident($($arg:ident: $arg_type:ty),+): $sql:literal) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { + iife!({ + self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) + }) + } + }; +} + +#[macro_export] +macro_rules! select_method { + ($id:ident() -> $return_type:ty: $sql:literal) => { + pub fn $id(&self) -> $crate::anyhow::Result> { + iife!({ + self.select::<$return_type>($sql)?(()) + }) + } + }; + ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + iife!({ + self.exec_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + }) + } + }; +} + +#[macro_export] +macro_rules! select_row_method { + ($id:ident() -> $return_type:ty: $sql:literal) => { + pub fn $id(&self) -> $crate::anyhow::Result> { + iife!({ + self.select_row::<$return_type>($sql)?(()) + }) + } + }; + ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + iife!({ + self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + }) + } + }; +} diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index c7808b0dbfa3fd8e84a60b61e41b565cbcce90ac..9c721055304207698af1a6edee40f7c07d47ce51 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -1,7 +1,10 @@ use std::path::{Path, PathBuf}; -use db::{connection, indoc, sqlez::domain::Domain}; -use util::{iife, ResultExt}; +use db::{ + connection, indoc, + sqlez::{domain::Domain, exec_method, select_row_method}, +}; +use util::iife; use workspace::{ItemId, Workspace, WorkspaceId}; use crate::Terminal; @@ -29,33 +32,16 @@ impl Domain for Terminal { } impl TerminalDb { - pub fn save_working_directory( - &self, - item_id: ItemId, - workspace_id: &WorkspaceId, - working_directory: &Path, - ) { - iife!({ - self.exec_bound::<(ItemId, &WorkspaceId, &Path)>(indoc! {" - INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) - VALUES (?, ?, ?) - "})?((item_id, workspace_id, working_directory)) - }) - .log_err(); - } + exec_method!( + save_working_directory(item_id: ItemId, workspace_id: &WorkspaceId, working_directory: &Path): + "INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) + VALUES (?, ?, ?)" + ); - pub fn get_working_directory( - &self, - item_id: ItemId, - workspace_id: &WorkspaceId, - ) -> Option { - iife!({ - self.select_row_bound::<(ItemId, &WorkspaceId), PathBuf>(indoc! {" - SELECT working_directory - FROM terminals - WHERE item_id = ? workspace_id = ?"})?((item_id, workspace_id)) - }) - .log_err() - .flatten() - } + select_row_method!( + get_working_directory(item_id: ItemId, workspace_id: &WorkspaceId) -> PathBuf: + "SELECT working_directory + FROM terminals + WHERE item_id = ? workspace_id = ?" + ); } diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 088729ff02d6e556471ec1d9abc31002899cfabc..1c564afc63b67607c74df333973b9e547bb81f06 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -33,11 +33,9 @@ use mappings::mouse::{ alt_scroll, grid_point, mouse_button_report, mouse_moved_report, mouse_side, scroll_report, }; -use persistence::TERMINAL_CONNECTION; use procinfo::LocalProcessInfo; use settings::{AlternateScroll, Settings, Shell, TerminalBlink}; use util::ResultExt; -use workspace::{ItemId, WorkspaceId}; use std::{ cmp::min, @@ -284,8 +282,6 @@ impl TerminalBuilder { blink_settings: Option, alternate_scroll: &AlternateScroll, window_id: usize, - item_id: ItemId, - workspace_id: WorkspaceId, ) -> Result { let pty_config = { let alac_shell = shell.clone().and_then(|shell| match shell { @@ -390,8 +386,6 @@ impl TerminalBuilder { last_mouse_position: None, next_link_id: 0, selection_phase: SelectionPhase::Ended, - workspace_id, - item_id, }; Ok(TerminalBuilder { @@ -535,8 +529,6 @@ pub struct Terminal { scroll_px: f32, next_link_id: usize, selection_phase: SelectionPhase, - item_id: ItemId, - workspace_id: WorkspaceId, } impl Terminal { @@ -578,15 +570,15 @@ impl Terminal { if self.update_process_info() { cx.emit(Event::TitleChanged); - if let Some(foreground_info) = self.foreground_process_info { - cx.background().spawn(async move { - TERMINAL_CONNECTION.save_working_directory( - self.item_id, - &self.workspace_id, - &foreground_info.cwd, - ); - }); - } + // if let Some(foreground_info) = self.foreground_process_info { + // cx.background().spawn(async move { + // TERMINAL_CONNECTION.save_working_directory( + // self.item_id, + // &self.workspace_id, + // &foreground_info.cwd, + // ); + // }); + // } } } AlacTermEvent::ColorRequest(idx, fun_ptr) => { diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal/src/terminal_container_view.rs index 2789f8167687da98a3d92247747e367d0f561fd0..88d4862bdca9d779323d2ecc756d432f83a84ba2 100644 --- a/crates/terminal/src/terminal_container_view.rs +++ b/crates/terminal/src/terminal_container_view.rs @@ -8,13 +8,13 @@ use gpui::{ actions, elements::*, AnyViewHandle, AppContext, Entity, ModelHandle, MutableAppContext, Task, View, ViewContext, ViewHandle, WeakViewHandle, }; -use util::truncate_and_trailoff; +use util::{truncate_and_trailoff, ResultExt}; use workspace::searchable::{SearchEvent, SearchOptions, SearchableItem, SearchableItemHandle}; use workspace::{ item::{Item, ItemEvent}, ToolbarItemLocation, Workspace, }; -use workspace::{register_deserializable_item, ItemId, Pane, WorkspaceId}; +use workspace::{register_deserializable_item, Pane}; use project::{LocalWorktree, Project, ProjectPath}; use settings::{AlternateScroll, Settings, WorkingDirectory}; @@ -90,8 +90,6 @@ impl TerminalContainer { pub fn new( working_directory: Option, modal: bool, - item_id: ItemId, - workspace_id: WorkspaceId, cx: &mut ViewContext, ) -> Self { let settings = cx.global::(); @@ -118,8 +116,6 @@ impl TerminalContainer { settings.terminal_overrides.blinking.clone(), scroll, cx.window_id(), - item_id, - workspace_id, ) { Ok(terminal) => { let terminal = cx.add_model(|cx| terminal.subscribe(cx)); @@ -397,7 +393,7 @@ impl Item for TerminalContainer { ) -> Task>> { let working_directory = TERMINAL_CONNECTION.get_working_directory(item_id, &workspace_id); Task::ready(Ok(cx.add_view(|cx| { - TerminalContainer::new(working_directory, false, cx) + TerminalContainer::new(working_directory.log_err().flatten(), false, cx) }))) } } From a47f2ca445673b3f8896be4b554cb77f4fc39892 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sun, 20 Nov 2022 22:41:10 -0800 Subject: [PATCH 59/86] Added UUID based, stable workspace ID for caching on item startup. Completed first sketch of terminal persistence. Still need to debug it though.... --- Cargo.lock | 2 + crates/db/Cargo.toml | 1 + crates/db/src/db.rs | 141 ++++++- crates/diagnostics/src/diagnostics.rs | 6 +- crates/editor/src/editor.rs | 50 +-- crates/editor/src/items.rs | 5 +- crates/editor/src/persistence.rs | 18 +- crates/search/src/project_search.rs | 4 +- crates/sqlez/src/bindable.rs | 7 + crates/sqlez/src/typed_statements.rs | 54 --- crates/terminal/src/persistence.rs | 20 +- crates/terminal/src/terminal.rs | 35 +- .../terminal/src/terminal_container_view.rs | 40 +- crates/workspace/src/dock.rs | 2 +- crates/workspace/src/item.rs | 31 +- crates/workspace/src/persistence.rs | 345 +++++++++--------- crates/workspace/src/persistence/model.rs | 49 ++- crates/workspace/src/shared_screen.rs | 10 +- crates/workspace/src/workspace.rs | 43 ++- crates/zed/src/main.rs | 4 +- 20 files changed, 502 insertions(+), 365 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e887dfee66b7d4e0ed3a72bea56bef96bfef6a84..f4998f235a7778d51d956ec803dcc55ea8b33c71 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1572,6 +1572,7 @@ dependencies = [ "sqlez", "tempdir", "util", + "uuid 1.2.2", ] [[package]] @@ -6834,6 +6835,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" dependencies = [ "getrandom 0.2.8", + "rand 0.8.5", ] [[package]] diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 70721c310c75a0d81b7b20086d80b4b521b61005..7e58b2e9bfcee5420287bd0f321f007db5f81e39 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -22,6 +22,7 @@ lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } +uuid = { version = "1.2.2", features = ["v4", "fast-rng"] } [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 6e4e6e0619e19f1e8a6b2825126f5c84ac05657c..aa09dc812dfd007427b879a021cb918b931842eb 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,21 +1,26 @@ pub mod kvp; -// Re-export indoc and sqlez so clients only need to include us +// Re-export +pub use anyhow; pub use indoc::indoc; pub use lazy_static; pub use sqlez; - -use std::fs::{create_dir_all, remove_dir_all}; -use std::path::Path; +use sqlez::bindable::{Bind, Column}; #[cfg(any(test, feature = "test-support"))] use anyhow::Result; #[cfg(any(test, feature = "test-support"))] use sqlez::connection::Connection; -use sqlez::domain::{Domain, Migrator}; +#[cfg(any(test, feature = "test-support"))] +use sqlez::domain::Domain; + +use sqlez::domain::Migrator; use sqlez::thread_safe_connection::ThreadSafeConnection; +use std::fs::{create_dir_all, remove_dir_all}; +use std::path::Path; use util::channel::{ReleaseChannel, RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; use util::paths::DB_DIR; +use uuid::Uuid as RealUuid; const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA journal_mode=WAL; @@ -25,6 +30,47 @@ const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA case_sensitive_like=TRUE; "}; +#[derive(Debug, Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct Uuid(RealUuid); + +impl std::ops::Deref for Uuid { + type Target = RealUuid; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl Bind for Uuid { + fn bind( + &self, + statement: &sqlez::statement::Statement, + start_index: i32, + ) -> anyhow::Result { + statement.bind(self.as_bytes(), start_index) + } +} + +impl Column for Uuid { + fn column( + statement: &mut sqlez::statement::Statement, + start_index: i32, + ) -> anyhow::Result<(Self, i32)> { + let blob = statement.column_blob(start_index)?; + Ok((Uuid::from_bytes(blob)?, start_index + 1)) + } +} + +impl Uuid { + pub fn new() -> Self { + Uuid(RealUuid::new_v4()) + } + + fn from_bytes(bytes: &[u8]) -> anyhow::Result { + Ok(Uuid(RealUuid::from_bytes(bytes.try_into()?))) + } +} + /// Open or create a database at the given directory path. pub fn open_file_db() -> ThreadSafeConnection { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM @@ -77,3 +123,88 @@ macro_rules! connection { } }; } + +#[macro_export] +macro_rules! exec_method { + ($id:ident(): $sql:literal) => { + pub fn $id(&self) -> $crate::sqlez::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.exec($sql)?() + .context(::std::format!( + "Error in {}, exec failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } + }; + ($id:ident($($arg:ident: $arg_type:ty),+): $sql:literal) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } + }; +} + +#[macro_export] +macro_rules! select_method { + ($id:ident() -> $return_type:ty: $sql:literal) => { + pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { + use $crate::anyhow::Context; + + self.select::<$return_type>($sql)?(()) + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } + }; + ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { + use $crate::anyhow::Context; + + self.select_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } + }; +} + +#[macro_export] +macro_rules! select_row_method { + ($id:ident() -> $return_type:ty: $sql:literal) => { + pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { + use $crate::anyhow::Context; + + self.select_row::<$return_type>($sql)?(()) + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } + }; + ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { + use $crate::anyhow::Context; + + self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + + } + }; +} diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index 639a1087247c2c138a8858199cc8217578a98fbd..ef8b81ac660867b6781e2c01885fe35c5cfe179a 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -584,7 +584,11 @@ impl Item for ProjectDiagnosticsEditor { }); } - fn clone_on_split(&self, cx: &mut ViewContext) -> Option + fn clone_on_split( + &self, + _workspace_id: workspace::WorkspaceId, + cx: &mut ViewContext, + ) -> Option where Self: Sized, { diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 81cf7a921137f924d3dc96dc2c59e7bdb45d67ea..d66fc3e28c2439fdf22c7afc01df4c70061ae4ad 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -83,7 +83,7 @@ use theme::{DiagnosticStyle, Theme}; use util::{post_inc, ResultExt, TryFutureExt}; use workspace::{ItemNavHistory, Workspace}; -use crate::{git::diff_hunk_to_display, persistence::DB}; +use crate::git::diff_hunk_to_display; const CURSOR_BLINK_INTERVAL: Duration = Duration::from_millis(500); const SCROLLBAR_SHOW_INTERVAL: Duration = Duration::from_secs(1); @@ -1137,30 +1137,30 @@ impl Editor { cx: &mut ViewContext, ) -> Self { let buffer = cx.add_model(|cx| MultiBuffer::singleton(buffer, cx)); - if let Some(project) = project.as_ref() { - if let Some(file) = buffer - .read(cx) - .as_singleton() - .and_then(|buffer| buffer.read(cx).file()) - .and_then(|file| file.as_local()) - { - let item_id = cx.weak_handle().id(); - let workspace_id = project - .read(cx) - .visible_worktrees(cx) - .map(|worktree| worktree.read(cx).abs_path()) - .collect::>() - .into(); - let path = file.abs_path(cx); - dbg!(&path); - - cx.background() - .spawn(async move { - DB.save_path(item_id, workspace_id, path).log_err(); - }) - .detach(); - } - } + // if let Some(project) = project.as_ref() { + // if let Some(file) = buffer + // .read(cx) + // .as_singleton() + // .and_then(|buffer| buffer.read(cx).file()) + // .and_then(|file| file.as_local()) + // { + // // let item_id = cx.weak_handle().id(); + // // let workspace_id = project + // // .read(cx) + // // .visible_worktrees(cx) + // // .map(|worktree| worktree.read(cx).abs_path()) + // // .collect::>() + // // .into(); + // let path = file.abs_path(cx); + // dbg!(&path); + + // // cx.background() + // // .spawn(async move { + // // DB.save_path(item_id, workspace_id, path).log_err(); + // // }) + // // .detach(); + // } + // } Self::new(EditorMode::Full, buffer, project, None, cx) } diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index f7dcd57f42426d6239ec5a7f75fb5dbfff23ea62..1e695d2364df47c31cabad8a3731422e5439b995 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -368,7 +368,7 @@ impl Item for Editor { self.buffer.read(cx).is_singleton() } - fn clone_on_split(&self, cx: &mut ViewContext) -> Option + fn clone_on_split(&self, _workspace_id: WorkspaceId, cx: &mut ViewContext) -> Option where Self: Sized, { @@ -561,14 +561,13 @@ impl Item for Editor { fn deserialize( project: ModelHandle, _workspace: WeakViewHandle, - workspace_id: WorkspaceId, + workspace_id: workspace::WorkspaceId, item_id: ItemId, cx: &mut ViewContext, ) -> Task>> { if let Some(project_item) = project.update(cx, |project, cx| { // Look up the path with this key associated, create a self with that path let path = DB.get_path(item_id, workspace_id).ok()?; - dbg!(&path); let (worktree, path) = project.find_local_worktree(&path, cx)?; let project_path = ProjectPath { worktree_id: worktree.read(cx).id(), diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 5870bc71e53044d9e6938c8fc79d5bef138c0bcb..4a44a32447c7087c3c7950d3630e626af1d4972d 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,7 +1,7 @@ use std::path::{Path, PathBuf}; use anyhow::{Context, Result}; -use db::connection; +use db::{connection, exec_method}; use indoc::indoc; use sqlez::domain::Domain; use workspace::{ItemId, Workspace, WorkspaceId}; @@ -35,18 +35,12 @@ impl EditorDb { pub fn get_path(&self, item_id: ItemId, workspace_id: WorkspaceId) -> Result { self.select_row_bound(indoc! {" SELECT path FROM editors - WHERE item_id = ? AND workspace_id = ?"})?((item_id, &workspace_id))? + WHERE item_id = ? AND workspace_id = ?"})?((item_id, workspace_id))? .context("Path not found for serialized editor") } - pub fn save_path( - &self, - item_id: ItemId, - workspace_id: WorkspaceId, - path: PathBuf, - ) -> Result<()> { - self.exec_bound::<(ItemId, &WorkspaceId, &Path)>(indoc! {" - INSERT OR REPLACE INTO editors(item_id, workspace_id, path) - VALUES (?, ?, ?)"})?((item_id, &workspace_id, &path)) - } + exec_method!(save_path(item_id: ItemId, workspace_id: WorkspaceId, path: &Path): + "INSERT OR REPLACE INTO editors(item_id, workspace_id, path) + VALUES (?, ?, ?)" + ); } diff --git a/crates/search/src/project_search.rs b/crates/search/src/project_search.rs index 322d035870caf5045024c70073413e31718848f6..6fa7d07d6f96739ea5ed9ba6b185e16ebf113a69 100644 --- a/crates/search/src/project_search.rs +++ b/crates/search/src/project_search.rs @@ -26,7 +26,7 @@ use util::ResultExt as _; use workspace::{ item::{Item, ItemEvent, ItemHandle}, searchable::{Direction, SearchableItem, SearchableItemHandle}, - ItemNavHistory, Pane, ToolbarItemLocation, ToolbarItemView, Workspace, + ItemNavHistory, Pane, ToolbarItemLocation, ToolbarItemView, Workspace, WorkspaceId, }; actions!(project_search, [SearchInNew, ToggleFocus]); @@ -315,7 +315,7 @@ impl Item for ProjectSearchView { .update(cx, |editor, cx| editor.reload(project, cx)) } - fn clone_on_split(&self, cx: &mut ViewContext) -> Option + fn clone_on_split(&self, _workspace_id: WorkspaceId, cx: &mut ViewContext) -> Option where Self: Sized, { diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index 1e4f0df33fd1e39b676999a1c30cd6c0269052e5..18c4acedad6bc73460ae1d36a6e901536c49f49b 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -36,6 +36,13 @@ impl Bind for &[u8] { } } +impl Bind for &[u8; C] { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + statement.bind_blob(start_index, self.as_slice())?; + Ok(start_index + 1) + } +} + impl Bind for Vec { fn bind(&self, statement: &Statement, start_index: i32) -> Result { statement.bind_blob(start_index, self)?; diff --git a/crates/sqlez/src/typed_statements.rs b/crates/sqlez/src/typed_statements.rs index ce289437c2bbb768633b239cb95da8dae10815af..98f51b970a1e856df60f0f574419fdea0ea7d757 100644 --- a/crates/sqlez/src/typed_statements.rs +++ b/crates/sqlez/src/typed_statements.rs @@ -52,57 +52,3 @@ impl Connection { Ok(move |bindings| statement.with_bindings(bindings)?.maybe_row::()) } } - -#[macro_export] -macro_rules! exec_method { - ($id:ident(): $sql:literal) => { - pub fn $id(&self) -> $crate::anyhow::Result<()> { - iife!({ - self.exec($sql)?() - }) - } - }; - ($id:ident($($arg:ident: $arg_type:ty),+): $sql:literal) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { - iife!({ - self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) - }) - } - }; -} - -#[macro_export] -macro_rules! select_method { - ($id:ident() -> $return_type:ty: $sql:literal) => { - pub fn $id(&self) -> $crate::anyhow::Result> { - iife!({ - self.select::<$return_type>($sql)?(()) - }) - } - }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { - iife!({ - self.exec_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) - }) - } - }; -} - -#[macro_export] -macro_rules! select_row_method { - ($id:ident() -> $return_type:ty: $sql:literal) => { - pub fn $id(&self) -> $crate::anyhow::Result> { - iife!({ - self.select_row::<$return_type>($sql)?(()) - }) - } - }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { - iife!({ - self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) - }) - } - }; -} diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index 9c721055304207698af1a6edee40f7c07d47ce51..8928164676ddbd3c7eafdc298bb955f2d58e77e0 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -1,10 +1,7 @@ use std::path::{Path, PathBuf}; -use db::{ - connection, indoc, - sqlez::{domain::Domain, exec_method, select_row_method}, -}; -use util::iife; +use db::{connection, exec_method, indoc, select_row_method, sqlez::domain::Domain}; + use workspace::{ItemId, Workspace, WorkspaceId}; use crate::Terminal; @@ -19,13 +16,12 @@ impl Domain for Terminal { fn migrations() -> &'static [&'static str] { &[indoc! {" CREATE TABLE terminals ( - item_id INTEGER, workspace_id BLOB, + item_id INTEGER, working_directory BLOB, - PRIMARY KEY(item_id, workspace_id), + PRIMARY KEY(workspace_id, item_id), FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - ON UPDATE CASCADE ) STRICT; "}] } @@ -33,15 +29,15 @@ impl Domain for Terminal { impl TerminalDb { exec_method!( - save_working_directory(item_id: ItemId, workspace_id: &WorkspaceId, working_directory: &Path): + save_working_directory(model_id: ItemId, workspace_id: WorkspaceId, working_directory: &Path): "INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) - VALUES (?, ?, ?)" + VALUES (?1, ?2, ?3)" ); select_row_method!( - get_working_directory(item_id: ItemId, workspace_id: &WorkspaceId) -> PathBuf: + get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> PathBuf: "SELECT working_directory FROM terminals - WHERE item_id = ? workspace_id = ?" + WHERE item_id = ? AND workspace_id = ?" ); } diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index 1c564afc63b67607c74df333973b9e547bb81f06..fdf16b78256ac6a5ce0ea27c86ed5ed610300a11 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -33,9 +33,11 @@ use mappings::mouse::{ alt_scroll, grid_point, mouse_button_report, mouse_moved_report, mouse_side, scroll_report, }; +use persistence::TERMINAL_CONNECTION; use procinfo::LocalProcessInfo; use settings::{AlternateScroll, Settings, Shell, TerminalBlink}; use util::ResultExt; +use workspace::{ItemId, WorkspaceId}; use std::{ cmp::min, @@ -282,6 +284,8 @@ impl TerminalBuilder { blink_settings: Option, alternate_scroll: &AlternateScroll, window_id: usize, + item_id: ItemId, + workspace_id: WorkspaceId, ) -> Result { let pty_config = { let alac_shell = shell.clone().and_then(|shell| match shell { @@ -386,6 +390,8 @@ impl TerminalBuilder { last_mouse_position: None, next_link_id: 0, selection_phase: SelectionPhase::Ended, + workspace_id, + item_id, }; Ok(TerminalBuilder { @@ -529,6 +535,8 @@ pub struct Terminal { scroll_px: f32, next_link_id: usize, selection_phase: SelectionPhase, + workspace_id: WorkspaceId, + item_id: ItemId, } impl Terminal { @@ -566,20 +574,6 @@ impl Terminal { } AlacTermEvent::Wakeup => { cx.emit(Event::Wakeup); - - if self.update_process_info() { - cx.emit(Event::TitleChanged); - - // if let Some(foreground_info) = self.foreground_process_info { - // cx.background().spawn(async move { - // TERMINAL_CONNECTION.save_working_directory( - // self.item_id, - // &self.workspace_id, - // &foreground_info.cwd, - // ); - // }); - // } - } } AlacTermEvent::ColorRequest(idx, fun_ptr) => { self.events @@ -888,6 +882,19 @@ impl Terminal { if self.update_process_info() { cx.emit(Event::TitleChanged); + + if let Some(foreground_info) = &self.foreground_process_info { + let cwd = foreground_info.cwd.clone(); + let item_id = self.item_id; + let workspace_id = self.workspace_id; + cx.background() + .spawn(async move { + TERMINAL_CONNECTION + .save_working_directory(item_id, workspace_id, cwd.as_path()) + .log_err(); + }) + .detach(); + } } //Note that the ordering of events matters for event processing diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal/src/terminal_container_view.rs index 88d4862bdca9d779323d2ecc756d432f83a84ba2..fdda38864230a67dc16a5eeb62858d90a2006b8b 100644 --- a/crates/terminal/src/terminal_container_view.rs +++ b/crates/terminal/src/terminal_container_view.rs @@ -1,6 +1,6 @@ use crate::persistence::TERMINAL_CONNECTION; use crate::terminal_view::TerminalView; -use crate::{Event, Terminal, TerminalBuilder, TerminalError}; +use crate::{Event, TerminalBuilder, TerminalError}; use alacritty_terminal::index::Point; use dirs::home_dir; @@ -14,7 +14,7 @@ use workspace::{ item::{Item, ItemEvent}, ToolbarItemLocation, Workspace, }; -use workspace::{register_deserializable_item, Pane}; +use workspace::{register_deserializable_item, Pane, WorkspaceId}; use project::{LocalWorktree, Project, ProjectPath}; use settings::{AlternateScroll, Settings, WorkingDirectory}; @@ -82,7 +82,9 @@ impl TerminalContainer { .unwrap_or(WorkingDirectory::CurrentProjectDirectory); let working_directory = get_working_directory(workspace, cx, strategy); - let view = cx.add_view(|cx| TerminalContainer::new(working_directory, false, cx)); + let view = cx.add_view(|cx| { + TerminalContainer::new(working_directory, false, workspace.database_id(), cx) + }); workspace.add_item(Box::new(view), cx); } @@ -90,6 +92,7 @@ impl TerminalContainer { pub fn new( working_directory: Option, modal: bool, + workspace_id: WorkspaceId, cx: &mut ViewContext, ) -> Self { let settings = cx.global::(); @@ -116,10 +119,13 @@ impl TerminalContainer { settings.terminal_overrides.blinking.clone(), scroll, cx.window_id(), + cx.view_id(), + workspace_id, ) { Ok(terminal) => { let terminal = cx.add_model(|cx| terminal.subscribe(cx)); let view = cx.add_view(|cx| TerminalView::from_terminal(terminal, modal, cx)); + cx.subscribe(&view, |_this, _content, event, cx| cx.emit(*event)) .detach(); TerminalContainerContent::Connected(view) @@ -139,18 +145,6 @@ impl TerminalContainer { } } - pub fn from_terminal( - terminal: ModelHandle, - modal: bool, - cx: &mut ViewContext, - ) -> Self { - let connected_view = cx.add_view(|cx| TerminalView::from_terminal(terminal, modal, cx)); - TerminalContainer { - content: TerminalContainerContent::Connected(connected_view), - associated_directory: None, - } - } - fn connected(&self) -> Option> { match &self.content { TerminalContainerContent::Connected(vh) => Some(vh.clone()), @@ -278,13 +272,18 @@ impl Item for TerminalContainer { .boxed() } - fn clone_on_split(&self, cx: &mut ViewContext) -> Option { + fn clone_on_split( + &self, + workspace_id: WorkspaceId, + cx: &mut ViewContext, + ) -> Option { //From what I can tell, there's no way to tell the current working //Directory of the terminal from outside the shell. There might be //solutions to this, but they are non-trivial and require more IPC Some(TerminalContainer::new( self.associated_directory.clone(), false, + workspace_id, cx, )) } @@ -391,9 +390,14 @@ impl Item for TerminalContainer { item_id: workspace::ItemId, cx: &mut ViewContext, ) -> Task>> { - let working_directory = TERMINAL_CONNECTION.get_working_directory(item_id, &workspace_id); + let working_directory = TERMINAL_CONNECTION.get_working_directory(item_id, workspace_id); Task::ready(Ok(cx.add_view(|cx| { - TerminalContainer::new(working_directory.log_err().flatten(), false, cx) + TerminalContainer::new( + working_directory.log_err().flatten(), + false, + workspace_id, + cx, + ) }))) } } diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index 2e4fbcad6f9419603826a83bf558885e2235e1ff..fb28571172832ae411849112d704058e30ed02c0 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -206,7 +206,7 @@ impl Dock { cx.focus(last_active_center_pane); } cx.emit(crate::Event::DockAnchorChanged); - workspace.serialize_workspace(None, cx); + workspace.serialize_workspace(cx); cx.notify(); } diff --git a/crates/workspace/src/item.rs b/crates/workspace/src/item.rs index d006f2fe1507bf5c0192493c3fb2c0835a3c718c..b990ba20a2413f46e80689f1e493486e3645713b 100644 --- a/crates/workspace/src/item.rs +++ b/crates/workspace/src/item.rs @@ -22,11 +22,8 @@ use theme::Theme; use util::ResultExt; use crate::{ - pane, - persistence::model::{ItemId, WorkspaceId}, - searchable::SearchableItemHandle, - DelayedDebouncedEditAction, FollowableItemBuilders, ItemNavHistory, Pane, ToolbarItemLocation, - Workspace, + pane, persistence::model::ItemId, searchable::SearchableItemHandle, DelayedDebouncedEditAction, + FollowableItemBuilders, ItemNavHistory, Pane, ToolbarItemLocation, Workspace, WorkspaceId, }; #[derive(Eq, PartialEq, Hash)] @@ -52,7 +49,7 @@ pub trait Item: View { fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]>; fn is_singleton(&self, cx: &AppContext) -> bool; fn set_nav_history(&mut self, _: ItemNavHistory, _: &mut ViewContext); - fn clone_on_split(&self, _: &mut ViewContext) -> Option + fn clone_on_split(&self, _workspace_id: WorkspaceId, _: &mut ViewContext) -> Option where Self: Sized, { @@ -121,7 +118,9 @@ pub trait Item: View { fn breadcrumbs(&self, _theme: &Theme, _cx: &AppContext) -> Option> { None } + fn serialized_item_kind() -> Option<&'static str>; + fn deserialize( project: ModelHandle, workspace: WeakViewHandle, @@ -144,7 +143,11 @@ pub trait ItemHandle: 'static + fmt::Debug { fn project_entry_ids(&self, cx: &AppContext) -> SmallVec<[ProjectEntryId; 3]>; fn is_singleton(&self, cx: &AppContext) -> bool; fn boxed_clone(&self) -> Box; - fn clone_on_split(&self, cx: &mut MutableAppContext) -> Option>; + fn clone_on_split( + &self, + workspace_id: WorkspaceId, + cx: &mut MutableAppContext, + ) -> Option>; fn added_to_pane( &self, workspace: &mut Workspace, @@ -246,9 +249,13 @@ impl ItemHandle for ViewHandle { Box::new(self.clone()) } - fn clone_on_split(&self, cx: &mut MutableAppContext) -> Option> { + fn clone_on_split( + &self, + workspace_id: WorkspaceId, + cx: &mut MutableAppContext, + ) -> Option> { self.update(cx, |item, cx| { - cx.add_option_view(|cx| item.clone_on_split(cx)) + cx.add_option_view(|cx| item.clone_on_split(workspace_id, cx)) }) .map(|handle| Box::new(handle) as Box) } @@ -812,7 +819,11 @@ pub(crate) mod test { self.push_to_nav_history(cx); } - fn clone_on_split(&self, _: &mut ViewContext) -> Option + fn clone_on_split( + &self, + _workspace_id: WorkspaceId, + _: &mut ViewContext, + ) -> Option where Self: Sized, { diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 372c4cafce617690536a709db00db2a9dfcddfe6..bd59afd497070330875a897e2c3a258730d0e08e 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -2,39 +2,38 @@ pub mod model; -use std::path::{Path, PathBuf}; -use std::sync::Arc; +use std::path::Path; -use anyhow::{anyhow, bail, Result, Context}; +use anyhow::{anyhow, bail, Context, Result}; use db::connection; use gpui::Axis; use indoc::indoc; - use db::sqlez::domain::Domain; use util::{iife, unzip_option, ResultExt}; use crate::dock::DockPosition; +use crate::WorkspaceId; use super::Workspace; use model::{ - GroupId, PaneId, SerializedItem, SerializedPane, SerializedPaneGroup, - SerializedWorkspace, WorkspaceId, + GroupId, PaneId, SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace, + WorkspaceLocation, }; connection!(DB: WorkspaceDb); - impl Domain for Workspace { fn name() -> &'static str { "workspace" } - + fn migrations() -> &'static [&'static str] { &[indoc! {" CREATE TABLE workspaces( workspace_id BLOB PRIMARY KEY, + workspace_location BLOB NOT NULL UNIQUE, dock_visible INTEGER, -- Boolean dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet @@ -97,21 +96,25 @@ impl WorkspaceDb { &self, worktree_roots: &[P], ) -> Option { - let workspace_id: WorkspaceId = worktree_roots.into(); + let workspace_location: WorkspaceLocation = worktree_roots.into(); // Note that we re-assign the workspace_id here in case it's empty // and we've grabbed the most recent workspace - let (workspace_id, dock_position): (WorkspaceId, DockPosition) = iife!({ + let (workspace_id, workspace_location, dock_position): ( + WorkspaceId, + WorkspaceLocation, + DockPosition, + ) = iife!({ if worktree_roots.len() == 0 { self.select_row(indoc! {" - SELECT workspace_id, dock_visible, dock_anchor + SELECT workspace_id, workspace_location, dock_visible, dock_anchor FROM workspaces ORDER BY timestamp DESC LIMIT 1"})?()? } else { self.select_row_bound(indoc! {" - SELECT workspace_id, dock_visible, dock_anchor + SELECT workspace_id, workspace_location, dock_visible, dock_anchor FROM workspaces - WHERE workspace_id = ?"})?(&workspace_id)? + WHERE workspace_location = ?"})?(&workspace_location)? } .context("No workspaces found") }) @@ -119,13 +122,14 @@ impl WorkspaceDb { .flatten()?; Some(SerializedWorkspace { - workspace_id: workspace_id.clone(), + id: workspace_id, + location: workspace_location.clone(), dock_pane: self - .get_dock_pane(&workspace_id) + .get_dock_pane(workspace_id) .context("Getting dock pane") .log_err()?, center_group: self - .get_center_pane_group(&workspace_id) + .get_center_pane_group(workspace_id) .context("Getting center group") .log_err()?, dock_position, @@ -134,72 +138,61 @@ impl WorkspaceDb { /// Saves a workspace using the worktree roots. Will garbage collect any workspaces /// that used this workspace previously - pub fn save_workspace( - &self, - old_id: Option, - workspace: &SerializedWorkspace, - ) { + pub fn save_workspace(&self, workspace: &SerializedWorkspace) { self.with_savepoint("update_worktrees", || { + // Clear out panes and pane_groups self.exec_bound(indoc! {" UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1; DELETE FROM pane_groups WHERE workspace_id = ?1; - DELETE FROM panes WHERE workspace_id = ?1;"})? - (old_id.as_ref().unwrap_or(&workspace.workspace_id)).context("Clearing old panes")?; - - if let Some(old_id) = old_id { - self.exec_bound(indoc! {" - UPDATE OR REPLACE workspaces - SET workspace_id = ?, - dock_visible = ?, - dock_anchor = ?, - timestamp = CURRENT_TIMESTAMP - WHERE workspace_id = ?"})?(( - &workspace.workspace_id, - workspace.dock_position, - &old_id, - )).context("Updating workspace with new worktree roots")?; - } else { - self.exec_bound( - "INSERT OR REPLACE INTO workspaces(workspace_id, dock_visible, dock_anchor) VALUES (?, ?, ?)", - )?((&workspace.workspace_id, workspace.dock_position)).context("Uodating workspace")?; - } - + DELETE FROM panes WHERE workspace_id = ?1;"})?(workspace.id) + .context("Clearing old panes")?; + + // Update or insert + self.exec_bound(indoc! { + "INSERT OR REPLACE INTO + workspaces(workspace_id, workspace_location, dock_visible, dock_anchor, timestamp) + VALUES + (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP)" + })?((workspace.id, &workspace.location, workspace.dock_position)) + .context("Updating workspace")?; + // Save center pane group and dock pane - self.save_pane_group(&workspace.workspace_id, &workspace.center_group, None).context("save pane group in save workspace")?; - - let dock_id = self.save_pane(&workspace.workspace_id, &workspace.dock_pane, None, true).context("save pane in save workspace")?; - + self.save_pane_group(workspace.id, &workspace.center_group, None) + .context("save pane group in save workspace")?; + + let dock_id = self + .save_pane(workspace.id, &workspace.dock_pane, None, true) + .context("save pane in save workspace")?; + // Complete workspace initialization self.exec_bound(indoc! {" UPDATE workspaces SET dock_pane = ? - WHERE workspace_id = ?"})?(( - dock_id, - &workspace.workspace_id, - )).context("Finishing initialization with dock pane")?; + WHERE workspace_id = ?"})?((dock_id, workspace.id)) + .context("Finishing initialization with dock pane")?; Ok(()) }) .with_context(|| { format!( - "Update workspace with roots {:?} failed.", - workspace.workspace_id.paths() + "Update workspace with roots {:?} and id {:?} failed.", + workspace.location.paths(), + workspace.id ) }) .log_err(); } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(&self, limit: usize) -> Vec>> { + pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, WorkspaceLocation)> { iife!({ // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html Ok::<_, anyhow::Error>( - self.select_bound::( - "SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?", + self.select_bound::( + "SELECT workspace_id, workspace_location FROM workspaces ORDER BY timestamp DESC LIMIT ?", )?(limit)? .into_iter() - .map(|id| id.paths()) - .collect::>>>(), + .collect::>(), ) }) .log_err() @@ -208,7 +201,7 @@ impl WorkspaceDb { pub(crate) fn get_center_pane_group( &self, - workspace_id: &WorkspaceId, + workspace_id: WorkspaceId, ) -> Result { self.get_pane_group(workspace_id, None)? .into_iter() @@ -218,10 +211,10 @@ impl WorkspaceDb { fn get_pane_group( &self, - workspace_id: &WorkspaceId, + workspace_id: WorkspaceId, group_id: Option, ) -> Result> { - type GroupKey<'a> = (Option, &'a WorkspaceId); + type GroupKey = (Option, WorkspaceId); type GroupOrPane = (Option, Option, Option, Option); self.select_bound::(indoc! {" SELECT group_id, axis, pane_id, active @@ -253,31 +246,29 @@ impl WorkspaceDb { if let Some((group_id, axis)) = group_id.zip(axis) { Ok(SerializedPaneGroup::Group { axis, - children: self.get_pane_group( - workspace_id, - Some(group_id), - )?, + children: self.get_pane_group(workspace_id, Some(group_id))?, }) } else if let Some((pane_id, active)) = pane_id.zip(active) { - Ok(SerializedPaneGroup::Pane(SerializedPane::new(self.get_items( pane_id)?, active))) + Ok(SerializedPaneGroup::Pane(SerializedPane::new( + self.get_items(pane_id)?, + active, + ))) } else { bail!("Pane Group Child was neither a pane group or a pane"); } }) // Filter out panes and pane groups which don't have any children or items - .filter(|pane_group| { - match pane_group { - Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(), - Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(), - _ => true, - } + .filter(|pane_group| match pane_group { + Ok(SerializedPaneGroup::Group { children, .. }) => !children.is_empty(), + Ok(SerializedPaneGroup::Pane(pane)) => !pane.children.is_empty(), + _ => true, }) .collect::>() } pub(crate) fn save_pane_group( &self, - workspace_id: &WorkspaceId, + workspace_id: WorkspaceId, pane_group: &SerializedPaneGroup, parent: Option<(GroupId, usize)>, ) -> Result<()> { @@ -285,26 +276,31 @@ impl WorkspaceDb { SerializedPaneGroup::Group { axis, children } => { let (parent_id, position) = unzip_option(parent); - let group_id = self.select_row_bound::<_, i64>(indoc!{" + let group_id = self.select_row_bound::<_, i64>(indoc! {" INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?) - RETURNING group_id"})? - ((workspace_id, parent_id, position, *axis))? - .ok_or_else(|| anyhow!("Couldn't retrieve group_id from inserted pane_group"))?; - + RETURNING group_id"})?(( + workspace_id, + parent_id, + position, + *axis, + ))? + .ok_or_else(|| anyhow!("Couldn't retrieve group_id from inserted pane_group"))?; + for (position, group) in children.iter().enumerate() { self.save_pane_group(workspace_id, group, Some((group_id, position)))? } + Ok(()) } SerializedPaneGroup::Pane(pane) => { self.save_pane(workspace_id, &pane, parent, false)?; Ok(()) - }, + } } } - pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result { + pub(crate) fn get_dock_pane(&self, workspace_id: WorkspaceId) -> Result { let (pane_id, active) = self.select_row_bound(indoc! {" SELECT pane_id, active FROM panes @@ -315,40 +311,35 @@ impl WorkspaceDb { Ok(SerializedPane::new( self.get_items(pane_id).context("Reading items")?, - active + active, )) } pub(crate) fn save_pane( &self, - workspace_id: &WorkspaceId, + workspace_id: WorkspaceId, pane: &SerializedPane, parent: Option<(GroupId, usize)>, // None indicates BOTH dock pane AND center_pane dock: bool, ) -> Result { - let pane_id = self.select_row_bound::<_, i64>(indoc!{" + let pane_id = self.select_row_bound::<_, i64>(indoc! {" INSERT INTO panes(workspace_id, active) VALUES (?, ?) - RETURNING pane_id"}, - )?((workspace_id, pane.active))? + RETURNING pane_id"})?((workspace_id, pane.active))? .ok_or_else(|| anyhow!("Could not retrieve inserted pane_id"))?; - + if !dock { let (parent_id, order) = unzip_option(parent); self.exec_bound(indoc! {" INSERT INTO center_panes(pane_id, parent_group_id, position) - VALUES (?, ?, ?)"})?(( - pane_id, parent_id, order - ))?; + VALUES (?, ?, ?)"})?((pane_id, parent_id, order))?; } self.save_items(workspace_id, pane_id, &pane.children) .context("Saving items")?; - + Ok(pane_id) } - - pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { Ok(self.select_bound(indoc! {" @@ -359,7 +350,7 @@ impl WorkspaceDb { pub(crate) fn save_items( &self, - workspace_id: &WorkspaceId, + workspace_id: WorkspaceId, pane_id: PaneId, items: &[SerializedItem], ) -> Result<()> { @@ -376,7 +367,8 @@ impl WorkspaceDb { #[cfg(test)] mod tests { - use db::{open_memory_db}; + + use db::{open_memory_db, Uuid}; use settings::DockAnchor; use super::*; @@ -388,15 +380,13 @@ mod tests { let db = WorkspaceDb(open_memory_db(Some("test_full_workspace_serialization"))); let dock_pane = crate::persistence::model::SerializedPane { - children: vec![ SerializedItem::new("Terminal", 1), SerializedItem::new("Terminal", 2), SerializedItem::new("Terminal", 3), SerializedItem::new("Terminal", 4), - ], - active: false + active: false, }; // ----------------- @@ -415,8 +405,8 @@ mod tests { SerializedItem::new("Terminal", 5), SerializedItem::new("Terminal", 6), ], - false) - ), + false, + )), SerializedPaneGroup::Pane(SerializedPane::new( vec![ SerializedItem::new("Terminal", 7), @@ -430,7 +420,6 @@ mod tests { vec![ SerializedItem::new("Terminal", 9), SerializedItem::new("Terminal", 10), - ], false, )), @@ -438,25 +427,24 @@ mod tests { }; let workspace = SerializedWorkspace { - workspace_id: (["/tmp", "/tmp2"]).into(), - dock_position: DockPosition::Shown(DockAnchor::Bottom), + id: Uuid::new(), + location: (["/tmp", "/tmp2"]).into(), + dock_position: DockPosition::Shown(DockAnchor::Bottom), center_group, dock_pane, }; - - db.save_workspace(None, &workspace); + + db.save_workspace(&workspace); let round_trip_workspace = db.workspace_for_roots(&["/tmp2", "/tmp"]); - + assert_eq!(workspace, round_trip_workspace.unwrap()); // Test guaranteed duplicate IDs - db.save_workspace(None, &workspace); - db.save_workspace(None, &workspace); - + db.save_workspace(&workspace); + db.save_workspace(&workspace); + let round_trip_workspace = db.workspace_for_roots(&["/tmp", "/tmp2"]); assert_eq!(workspace, round_trip_workspace.unwrap()); - - } #[test] @@ -466,21 +454,23 @@ mod tests { let db = WorkspaceDb(open_memory_db(Some("test_basic_functionality"))); let workspace_1 = SerializedWorkspace { - workspace_id: (["/tmp", "/tmp2"]).into(), + id: WorkspaceId::new(), + location: (["/tmp", "/tmp2"]).into(), dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), }; let mut workspace_2 = SerializedWorkspace { - workspace_id: (["/tmp"]).into(), + id: WorkspaceId::new(), + location: (["/tmp"]).into(), dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), }; - db.save_workspace(None, &workspace_1); - db.save_workspace(None, &workspace_2); + db.save_workspace(&workspace_1); + db.save_workspace(&workspace_2); // Test that paths are treated as a set assert_eq!( @@ -497,8 +487,9 @@ mod tests { assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None); // Test 'mutate' case of updating a pre-existing id - workspace_2.workspace_id = (["/tmp", "/tmp2"]).into(); - db.save_workspace(Some((&["/tmp"]).into()), &workspace_2); + workspace_2.location = (["/tmp", "/tmp2"]).into(); + + db.save_workspace(&workspace_2); assert_eq!( db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), workspace_2 @@ -506,33 +497,28 @@ mod tests { // Test other mechanism for mutating let mut workspace_3 = SerializedWorkspace { - workspace_id: (&["/tmp", "/tmp2"]).into(), + id: WorkspaceId::new(), + location: (&["/tmp", "/tmp2"]).into(), dock_position: DockPosition::Shown(DockAnchor::Right), center_group: Default::default(), dock_pane: Default::default(), }; - - db.save_workspace(None, &workspace_3); + db.save_workspace(&workspace_3); assert_eq!( db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), workspace_3 ); // Make sure that updating paths differently also works - workspace_3.workspace_id = (["/tmp3", "/tmp4", "/tmp2"]).into(); - db.save_workspace( - Some((&["/tmp", "/tmp2"]).into()), - &workspace_3, - ); + workspace_3.location = (["/tmp3", "/tmp4", "/tmp2"]).into(); + db.save_workspace(&workspace_3); assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None); assert_eq!( db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]) .unwrap(), workspace_3 ); - - } use crate::dock::DockPosition; @@ -545,7 +531,8 @@ mod tests { center_group: &SerializedPaneGroup, ) -> SerializedWorkspace { SerializedWorkspace { - workspace_id: workspace_id.into(), + id: WorkspaceId::new(), + location: workspace_id.into(), dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Right), center_group: center_group.clone(), dock_pane, @@ -564,12 +551,13 @@ mod tests { SerializedItem::new("Terminal", 4), SerializedItem::new("Terminal", 2), SerializedItem::new("Terminal", 3), - ], false + ], + false, ); let workspace = default_workspace(&["/tmp"], dock_pane, &Default::default()); - db.save_workspace(None, &workspace); + db.save_workspace(&workspace); let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); @@ -593,16 +581,20 @@ mod tests { SerializedPaneGroup::Group { axis: gpui::Axis::Vertical, children: vec![ - SerializedPaneGroup::Pane(SerializedPane::new( - vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), - ], - false)), - SerializedPaneGroup::Pane(SerializedPane::new(vec![ - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 3), - ], true)), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), + ], + false, + )), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 3), + ], + true, + )), ], }, SerializedPaneGroup::Pane(SerializedPane::new( @@ -610,41 +602,46 @@ mod tests { SerializedItem::new("Terminal", 5), SerializedItem::new("Terminal", 6), ], - false)), + false, + )), ], }; let workspace = default_workspace(&["/tmp"], Default::default(), ¢er_pane); - db.save_workspace(None, &workspace); - + db.save_workspace(&workspace); + let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); assert_eq!(workspace.center_group, new_workspace.center_group); } - + #[test] fn test_cleanup_panes() { env_logger::try_init().ok(); - + let db = WorkspaceDb(open_memory_db(Some("test_cleanup_panes"))); - + let center_pane = SerializedPaneGroup::Group { axis: gpui::Axis::Horizontal, children: vec![ SerializedPaneGroup::Group { axis: gpui::Axis::Vertical, children: vec![ - SerializedPaneGroup::Pane(SerializedPane::new( - vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), - ], - false)), - SerializedPaneGroup::Pane(SerializedPane::new(vec![ - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 3), - ], true)), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), + ], + false, + )), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 3), + ], + true, + )), ], }, SerializedPaneGroup::Pane(SerializedPane::new( @@ -652,37 +649,41 @@ mod tests { SerializedItem::new("Terminal", 5), SerializedItem::new("Terminal", 6), ], - false)), + false, + )), ], }; let id = &["/tmp"]; - + let mut workspace = default_workspace(id, Default::default(), ¢er_pane); - db.save_workspace(None, &workspace); - + db.save_workspace(&workspace); + workspace.center_group = SerializedPaneGroup::Group { axis: gpui::Axis::Vertical, children: vec![ - SerializedPaneGroup::Pane(SerializedPane::new( - vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), - ], - false)), - SerializedPaneGroup::Pane(SerializedPane::new(vec![ - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 3), - ], true)), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 1), + SerializedItem::new("Terminal", 2), + ], + false, + )), + SerializedPaneGroup::Pane(SerializedPane::new( + vec![ + SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 3), + ], + true, + )), ], }; - - db.save_workspace(None, &workspace); - + + db.save_workspace(&workspace); + let new_workspace = db.workspace_for_roots(id).unwrap(); assert_eq!(workspace.center_group, new_workspace.center_group); - } } diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index 5f046d76ee088f928b7630aed2c6aab887254057..ff8be51406f0d68eacc18d88ba061241ae75a0af 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -16,18 +16,20 @@ use project::Project; use settings::DockAnchor; use util::ResultExt; -use crate::{dock::DockPosition, ItemDeserializers, Member, Pane, PaneAxis, Workspace}; +use crate::{ + dock::DockPosition, ItemDeserializers, Member, Pane, PaneAxis, Workspace, WorkspaceId, +}; #[derive(Debug, Clone, PartialEq, Eq)] -pub struct WorkspaceId(Arc>); +pub struct WorkspaceLocation(Arc>); -impl WorkspaceId { +impl WorkspaceLocation { pub fn paths(&self) -> Arc> { self.0.clone() } } -impl, T: IntoIterator> From for WorkspaceId { +impl, T: IntoIterator> From for WorkspaceLocation { fn from(iterator: T) -> Self { let mut roots = iterator .into_iter() @@ -38,7 +40,7 @@ impl, T: IntoIterator> From for WorkspaceId { } } -impl Bind for &WorkspaceId { +impl Bind for &WorkspaceLocation { fn bind(&self, statement: &Statement, start_index: i32) -> Result { bincode::serialize(&self.0) .expect("Bincode serialization of paths should not fail") @@ -46,16 +48,20 @@ impl Bind for &WorkspaceId { } } -impl Column for WorkspaceId { +impl Column for WorkspaceLocation { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { let blob = statement.column_blob(start_index)?; - Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1)) + Ok(( + WorkspaceLocation(bincode::deserialize(blob)?), + start_index + 1, + )) } } #[derive(Debug, PartialEq, Eq)] pub struct SerializedWorkspace { - pub workspace_id: WorkspaceId, + pub id: WorkspaceId, + pub location: WorkspaceLocation, pub dock_position: DockPosition, pub center_group: SerializedPaneGroup, pub dock_pane: SerializedPane, @@ -70,10 +76,11 @@ pub enum SerializedPaneGroup { Pane(SerializedPane), } +#[cfg(test)] impl Default for SerializedPaneGroup { fn default() -> Self { Self::Pane(SerializedPane { - children: Vec::new(), + children: vec![SerializedItem::default()], active: false, }) } @@ -84,7 +91,7 @@ impl SerializedPaneGroup { pub(crate) async fn deserialize( &self, project: &ModelHandle, - workspace_id: &WorkspaceId, + workspace_id: WorkspaceId, workspace: &ViewHandle, cx: &mut AsyncAppContext, ) -> (Member, Option>) { @@ -136,13 +143,12 @@ impl SerializedPane { &self, project: &ModelHandle, pane_handle: &ViewHandle, - workspace_id: &WorkspaceId, + workspace_id: WorkspaceId, workspace: &ViewHandle, cx: &mut AsyncAppContext, ) { for item in self.children.iter() { let project = project.clone(); - let workspace_id = workspace_id.clone(); let item_handle = pane_handle .update(cx, |_, cx| { if let Some(deserializer) = cx.global::().get(&item.kind) { @@ -191,6 +197,16 @@ impl SerializedItem { } } +#[cfg(test)] +impl Default for SerializedItem { + fn default() -> Self { + SerializedItem { + kind: Arc::from("Terminal"), + item_id: 100000, + } + } +} + impl Bind for &SerializedItem { fn bind(&self, statement: &Statement, start_index: i32) -> Result { let next_index = statement.bind(self.kind.clone(), start_index)?; @@ -231,7 +247,7 @@ mod tests { use db::sqlez::connection::Connection; use settings::DockAnchor; - use super::WorkspaceId; + use super::WorkspaceLocation; #[test] fn test_workspace_round_trips() { @@ -245,7 +261,7 @@ mod tests { .unwrap()() .unwrap(); - let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]); + let workspace_id: WorkspaceLocation = WorkspaceLocation::from(&["\test2", "\test1"]); db.exec_bound("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)") .unwrap()((&workspace_id, DockAnchor::Bottom)) @@ -255,7 +271,10 @@ mod tests { db.select_row("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1") .unwrap()() .unwrap(), - Some((WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom)) + Some(( + WorkspaceLocation::from(&["\test1", "\test2"]), + DockAnchor::Bottom + )) ); } } diff --git a/crates/workspace/src/shared_screen.rs b/crates/workspace/src/shared_screen.rs index 28623950dfc82948710b800700019e6d49ea222a..7dee642423c805e9581520b523c5182d424c8ccb 100644 --- a/crates/workspace/src/shared_screen.rs +++ b/crates/workspace/src/shared_screen.rs @@ -1,7 +1,5 @@ use crate::{ - item::ItemEvent, - persistence::model::{ItemId, WorkspaceId}, - Item, ItemNavHistory, Pane, Workspace, + item::ItemEvent, persistence::model::ItemId, Item, ItemNavHistory, Pane, Workspace, WorkspaceId, }; use anyhow::{anyhow, Result}; use call::participant::{Frame, RemoteVideoTrack}; @@ -148,7 +146,11 @@ impl Item for SharedScreen { self.nav_history = Some(history); } - fn clone_on_split(&self, cx: &mut ViewContext) -> Option { + fn clone_on_split( + &self, + _workspace_id: WorkspaceId, + cx: &mut ViewContext, + ) -> Option { let track = self.track.upgrade()?; Some(Self::new(&track, self.peer_id, self.user.clone(), cx)) } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 66fdd19c70b79f50829d7480863189dad852eb55..4b02522496150091c26806d1d2c11882f539fe72 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -26,6 +26,7 @@ use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; +use db::Uuid; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -45,7 +46,7 @@ use log::{error, warn}; pub use pane::*; pub use pane_group::*; use persistence::model::SerializedItem; -pub use persistence::model::{ItemId, WorkspaceId}; +pub use persistence::model::{ItemId, WorkspaceLocation}; use postage::prelude::Stream; use project::{Project, ProjectEntryId, ProjectPath, ProjectStore, Worktree, WorktreeId}; use serde::Deserialize; @@ -128,6 +129,8 @@ pub struct OpenProjectEntryInPane { project_entry: ProjectEntryId, } +pub type WorkspaceId = Uuid; + impl_internal_actions!( workspace, [ @@ -530,6 +533,7 @@ pub struct Workspace { last_leaders_by_pane: HashMap, PeerId>, window_edited: bool, active_call: Option<(ModelHandle, Vec)>, + database_id: WorkspaceId, _observe_current_user: Task<()>, } @@ -556,7 +560,7 @@ impl Workspace { project::Event::WorktreeRemoved(_) | project::Event::WorktreeAdded => { this.update_window_title(cx); // TODO: Cache workspace_id on workspace and read from it here - this.serialize_workspace(None, cx); + this.serialize_workspace(cx); } project::Event::DisconnectedFromHost => { this.update_window_edited(cx); @@ -630,6 +634,12 @@ impl Workspace { active_call = Some((call, subscriptions)); } + let id = if let Some(id) = serialized_workspace.as_ref().map(|ws| ws.id) { + id + } else { + WorkspaceId::new() + }; + let mut this = Workspace { modal: None, weak_self: weak_handle.clone(), @@ -657,6 +667,7 @@ impl Workspace { last_leaders_by_pane: Default::default(), window_edited: false, active_call, + database_id: id, _observe_current_user, }; this.project_remote_id_changed(project.read(cx).remote_id(), cx); @@ -1317,7 +1328,7 @@ impl Workspace { pub fn add_item(&mut self, item: Box, cx: &mut ViewContext) { let active_pane = self.active_pane().clone(); Pane::add_item(self, &active_pane, item, true, true, None, cx); - self.serialize_workspace(None, cx); + self.serialize_workspace(cx); } pub fn open_path( @@ -1522,7 +1533,7 @@ impl Workspace { entry.remove(); } } - self.serialize_workspace(None, cx); + self.serialize_workspace(cx); } _ => {} } @@ -1544,7 +1555,7 @@ impl Workspace { pane.read(cx).active_item().map(|item| { let new_pane = self.add_pane(cx); - if let Some(clone) = item.clone_on_split(cx.as_mut()) { + if let Some(clone) = item.clone_on_split(self.database_id(), cx.as_mut()) { Pane::add_item(self, &new_pane, clone, true, true, None, cx); } self.center.split(&pane, &new_pane, direction).unwrap(); @@ -2255,7 +2266,11 @@ impl Workspace { } } - fn workspace_id(&self, cx: &AppContext) -> WorkspaceId { + pub fn database_id(&self) -> WorkspaceId { + self.database_id + } + + fn location(&self, cx: &AppContext) -> WorkspaceLocation { self.project() .read(cx) .visible_worktrees(cx) @@ -2275,7 +2290,7 @@ impl Workspace { } } - fn serialize_workspace(&self, old_id: Option, cx: &AppContext) { + fn serialize_workspace(&self, cx: &AppContext) { fn serialize_pane_handle( pane_handle: &ViewHandle, cx: &AppContext, @@ -2320,7 +2335,8 @@ impl Workspace { let center_group = build_serialized_pane_group(&self.center.root, cx); let serialized_workspace = SerializedWorkspace { - workspace_id: self.workspace_id(cx), + id: self.database_id, + location: self.location(cx), dock_position: self.dock.position(), dock_pane, center_group, @@ -2328,7 +2344,7 @@ impl Workspace { cx.background() .spawn(async move { - persistence::DB.save_workspace(old_id, &serialized_workspace); + persistence::DB.save_workspace(&serialized_workspace); }) .detach(); } @@ -2349,7 +2365,7 @@ impl Workspace { .deserialize_to( &project, &dock_pane_handle, - &serialized_workspace.workspace_id, + serialized_workspace.id, &workspace, &mut cx, ) @@ -2359,12 +2375,7 @@ impl Workspace { let (root, active_pane) = serialized_workspace .center_group - .deserialize( - &project, - &serialized_workspace.workspace_id, - &workspace, - &mut cx, - ) + .deserialize(&project, serialized_workspace.id, &workspace, &mut cx) .await; // Remove old panes from workspace panes list diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 53273b45d8ed0057f03b87f2cab9c2bff2772811..c95b7c4071e3458654dfdaca5b99b06bd5de7e0b 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -597,6 +597,8 @@ pub fn default_item_factory( let working_directory = get_working_directory(workspace, cx, strategy); - let terminal_handle = cx.add_view(|cx| TerminalContainer::new(working_directory, false, cx)); + let terminal_handle = cx.add_view(|cx| { + TerminalContainer::new(working_directory, false, workspace.database_id(), cx) + }); Box::new(terminal_handle) } From e1eff3f4cd28b335610cc6dacc8c7b73d6f1a34c Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sun, 20 Nov 2022 23:44:30 -0800 Subject: [PATCH 60/86] WIP: Some bugs switching to database provided IDs, terminal titles don't reload when restored from serialized, workspace tests are no longer passing but should be easy to fix when it isn't 11:44 --- Cargo.lock | 2 - crates/db/Cargo.toml | 1 - crates/db/src/db.rs | 45 +------------------ crates/editor/src/persistence.rs | 2 +- crates/sqlez/src/thread_safe_connection.rs | 4 +- crates/terminal/src/persistence.rs | 4 +- crates/workspace/src/persistence.rs | 50 ++++++++++++---------- crates/workspace/src/persistence/model.rs | 2 +- crates/workspace/src/workspace.rs | 7 ++- 9 files changed, 37 insertions(+), 80 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f4998f235a7778d51d956ec803dcc55ea8b33c71..e887dfee66b7d4e0ed3a72bea56bef96bfef6a84 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1572,7 +1572,6 @@ dependencies = [ "sqlez", "tempdir", "util", - "uuid 1.2.2", ] [[package]] @@ -6835,7 +6834,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "422ee0de9031b5b948b97a8fc04e3aa35230001a722ddd27943e0be31564ce4c" dependencies = [ "getrandom 0.2.8", - "rand 0.8.5", ] [[package]] diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 7e58b2e9bfcee5420287bd0f321f007db5f81e39..70721c310c75a0d81b7b20086d80b4b521b61005 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -22,7 +22,6 @@ lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } -uuid = { version = "1.2.2", features = ["v4", "fast-rng"] } [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index aa09dc812dfd007427b879a021cb918b931842eb..7ec4a1222350d24a8df2d3dc2c55eae5ba5f65a1 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -5,7 +5,6 @@ pub use anyhow; pub use indoc::indoc; pub use lazy_static; pub use sqlez; -use sqlez::bindable::{Bind, Column}; #[cfg(any(test, feature = "test-support"))] use anyhow::Result; @@ -20,7 +19,6 @@ use std::fs::{create_dir_all, remove_dir_all}; use std::path::Path; use util::channel::{ReleaseChannel, RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; use util::paths::DB_DIR; -use uuid::Uuid as RealUuid; const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA journal_mode=WAL; @@ -30,47 +28,6 @@ const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA case_sensitive_like=TRUE; "}; -#[derive(Debug, Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct Uuid(RealUuid); - -impl std::ops::Deref for Uuid { - type Target = RealUuid; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl Bind for Uuid { - fn bind( - &self, - statement: &sqlez::statement::Statement, - start_index: i32, - ) -> anyhow::Result { - statement.bind(self.as_bytes(), start_index) - } -} - -impl Column for Uuid { - fn column( - statement: &mut sqlez::statement::Statement, - start_index: i32, - ) -> anyhow::Result<(Self, i32)> { - let blob = statement.column_blob(start_index)?; - Ok((Uuid::from_bytes(blob)?, start_index + 1)) - } -} - -impl Uuid { - pub fn new() -> Self { - Uuid(RealUuid::new_v4()) - } - - fn from_bytes(bytes: &[u8]) -> anyhow::Result { - Ok(Uuid(RealUuid::from_bytes(bytes.try_into()?))) - } -} - /// Open or create a database at the given directory path. pub fn open_file_db() -> ThreadSafeConnection { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM @@ -186,7 +143,7 @@ macro_rules! select_row_method { pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; - self.select_row::<$return_type>($sql)?(()) + self.select_row::<$return_type>($sql)?() .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 4a44a32447c7087c3c7950d3630e626af1d4972d..234403738438a2d6465e305ff9f3f79a1be93aa5 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -19,7 +19,7 @@ impl Domain for Editor { &[indoc! {" CREATE TABLE editors( item_id INTEGER NOT NULL, - workspace_id BLOB NOT NULL, + workspace_id INTEGER NOT NULL, path BLOB NOT NULL, PRIMARY KEY(item_id, workspace_id), FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 5a5095ad7771063128239e76394c5a71560e8c93..7c5bf6388cc0d699768ae77b455338e38614961c 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -130,7 +130,7 @@ mod test { fn migrations() -> &'static [&'static str] { &[" CREATE TABLE workspaces( - workspace_id BLOB PRIMARY KEY, + workspace_id INTEGER PRIMARY KEY, dock_visible INTEGER, -- Boolean dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet @@ -141,7 +141,7 @@ mod test { CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, + workspace_id INTEGER NOT NULL, active INTEGER NOT NULL, -- Boolean FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index 8928164676ddbd3c7eafdc298bb955f2d58e77e0..d624724e5cde884332e46c2dd21248cc7647f9dc 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -16,7 +16,7 @@ impl Domain for Terminal { fn migrations() -> &'static [&'static str] { &[indoc! {" CREATE TABLE terminals ( - workspace_id BLOB, + workspace_id INTEGER, item_id INTEGER, working_directory BLOB, PRIMARY KEY(workspace_id, item_id), @@ -29,7 +29,7 @@ impl Domain for Terminal { impl TerminalDb { exec_method!( - save_working_directory(model_id: ItemId, workspace_id: WorkspaceId, working_directory: &Path): + save_working_directory(item_id: ItemId, workspace_id: WorkspaceId, working_directory: &Path): "INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) VALUES (?1, ?2, ?3)" ); diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index bd59afd497070330875a897e2c3a258730d0e08e..f6357448176e274f37fccf5d26779fe4be0af880 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -5,7 +5,7 @@ pub mod model; use std::path::Path; use anyhow::{anyhow, bail, Context, Result}; -use db::connection; +use db::{connection, select_row_method}; use gpui::Axis; use indoc::indoc; @@ -32,8 +32,8 @@ impl Domain for Workspace { fn migrations() -> &'static [&'static str] { &[indoc! {" CREATE TABLE workspaces( - workspace_id BLOB PRIMARY KEY, - workspace_location BLOB NOT NULL UNIQUE, + workspace_id INTEGER PRIMARY KEY, + workspace_location BLOB UNIQUE, dock_visible INTEGER, -- Boolean dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet @@ -43,7 +43,7 @@ impl Domain for Workspace { CREATE TABLE pane_groups( group_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, + workspace_id INTEGER NOT NULL, parent_group_id INTEGER, -- NULL indicates that this is a root node position INTEGER, -- NULL indicates that this is a root node axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' @@ -55,7 +55,7 @@ impl Domain for Workspace { CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, - workspace_id BLOB NOT NULL, + workspace_id INTEGER NOT NULL, active INTEGER NOT NULL, -- Boolean FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE @@ -73,7 +73,7 @@ impl Domain for Workspace { CREATE TABLE items( item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique - workspace_id BLOB NOT NULL, + workspace_id INTEGER NOT NULL, pane_id INTEGER NOT NULL, kind TEXT NOT NULL, position INTEGER NOT NULL, @@ -149,10 +149,12 @@ impl WorkspaceDb { // Update or insert self.exec_bound(indoc! { - "INSERT OR REPLACE INTO + "INSERT INTO workspaces(workspace_id, workspace_location, dock_visible, dock_anchor, timestamp) VALUES - (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP)" + (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP) + ON CONFLICT DO UPDATE SET + workspace_location = ?2, dock_visible = ?3, dock_anchor = ?4, timestamp = CURRENT_TIMESTAMP" })?((workspace.id, &workspace.location, workspace.dock_position)) .context("Updating workspace")?; @@ -183,6 +185,11 @@ impl WorkspaceDb { .log_err(); } + select_row_method!( + next_id() -> WorkspaceId: + "INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id" + ); + /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, WorkspaceLocation)> { iife!({ @@ -199,10 +206,7 @@ impl WorkspaceDb { .unwrap_or_default() } - pub(crate) fn get_center_pane_group( - &self, - workspace_id: WorkspaceId, - ) -> Result { + fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result { self.get_pane_group(workspace_id, None)? .into_iter() .next() @@ -266,7 +270,7 @@ impl WorkspaceDb { .collect::>() } - pub(crate) fn save_pane_group( + fn save_pane_group( &self, workspace_id: WorkspaceId, pane_group: &SerializedPaneGroup, @@ -300,7 +304,7 @@ impl WorkspaceDb { } } - pub(crate) fn get_dock_pane(&self, workspace_id: WorkspaceId) -> Result { + fn get_dock_pane(&self, workspace_id: WorkspaceId) -> Result { let (pane_id, active) = self.select_row_bound(indoc! {" SELECT pane_id, active FROM panes @@ -315,7 +319,7 @@ impl WorkspaceDb { )) } - pub(crate) fn save_pane( + fn save_pane( &self, workspace_id: WorkspaceId, pane: &SerializedPane, @@ -341,14 +345,14 @@ impl WorkspaceDb { Ok(pane_id) } - pub(crate) fn get_items(&self, pane_id: PaneId) -> Result> { + fn get_items(&self, pane_id: PaneId) -> Result> { Ok(self.select_bound(indoc! {" SELECT kind, item_id FROM items WHERE pane_id = ? ORDER BY position"})?(pane_id)?) } - pub(crate) fn save_items( + fn save_items( &self, workspace_id: WorkspaceId, pane_id: PaneId, @@ -368,7 +372,7 @@ impl WorkspaceDb { #[cfg(test)] mod tests { - use db::{open_memory_db, Uuid}; + use db::open_memory_db; use settings::DockAnchor; use super::*; @@ -427,7 +431,7 @@ mod tests { }; let workspace = SerializedWorkspace { - id: Uuid::new(), + id: 5, location: (["/tmp", "/tmp2"]).into(), dock_position: DockPosition::Shown(DockAnchor::Bottom), center_group, @@ -454,7 +458,7 @@ mod tests { let db = WorkspaceDb(open_memory_db(Some("test_basic_functionality"))); let workspace_1 = SerializedWorkspace { - id: WorkspaceId::new(), + id: 1, location: (["/tmp", "/tmp2"]).into(), dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), @@ -462,7 +466,7 @@ mod tests { }; let mut workspace_2 = SerializedWorkspace { - id: WorkspaceId::new(), + id: 2, location: (["/tmp"]).into(), dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), @@ -497,7 +501,7 @@ mod tests { // Test other mechanism for mutating let mut workspace_3 = SerializedWorkspace { - id: WorkspaceId::new(), + id: 3, location: (&["/tmp", "/tmp2"]).into(), dock_position: DockPosition::Shown(DockAnchor::Right), center_group: Default::default(), @@ -531,7 +535,7 @@ mod tests { center_group: &SerializedPaneGroup, ) -> SerializedWorkspace { SerializedWorkspace { - id: WorkspaceId::new(), + id: 4, location: workspace_id.into(), dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Right), center_group: center_group.clone(), diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index ff8be51406f0d68eacc18d88ba061241ae75a0af..111a6904c65f4f555e18be496ad1873325305916 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -255,7 +255,7 @@ mod tests { db.exec(indoc::indoc! {" CREATE TABLE workspace_id_test( - workspace_id BLOB, + workspace_id INTEGER, dock_anchor TEXT );"}) .unwrap()() diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 4b02522496150091c26806d1d2c11882f539fe72..0a4a6c87407c8133d14a031c2c5c4ee9f2537ec9 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -26,7 +26,6 @@ use anyhow::{anyhow, Context, Result}; use call::ActiveCall; use client::{proto, Client, PeerId, TypedEnvelope, UserStore}; use collections::{hash_map, HashMap, HashSet}; -use db::Uuid; use dock::{DefaultItemFactory, Dock, ToggleDockButton}; use drag_and_drop::DragAndDrop; use fs::{self, Fs}; @@ -45,8 +44,8 @@ use language::LanguageRegistry; use log::{error, warn}; pub use pane::*; pub use pane_group::*; -use persistence::model::SerializedItem; pub use persistence::model::{ItemId, WorkspaceLocation}; +use persistence::{model::SerializedItem, DB}; use postage::prelude::Stream; use project::{Project, ProjectEntryId, ProjectPath, ProjectStore, Worktree, WorktreeId}; use serde::Deserialize; @@ -129,7 +128,7 @@ pub struct OpenProjectEntryInPane { project_entry: ProjectEntryId, } -pub type WorkspaceId = Uuid; +pub type WorkspaceId = i64; impl_internal_actions!( workspace, @@ -637,7 +636,7 @@ impl Workspace { let id = if let Some(id) = serialized_workspace.as_ref().map(|ws| ws.id) { id } else { - WorkspaceId::new() + DB.next_id().log_err().flatten().unwrap_or(0) }; let mut this = Workspace { From cf4c103660e396375b5a4aa090e9d4103b4afb09 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 21 Nov 2022 09:30:41 -0800 Subject: [PATCH 61/86] Fixed workspace tests --- crates/workspace/src/persistence.rs | 81 ++++++++++++++++++++++++++++- 1 file changed, 79 insertions(+), 2 deletions(-) diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index f6357448176e274f37fccf5d26779fe4be0af880..88a894a92239de74247dbc2262ce7c8466d8383a 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -147,14 +147,19 @@ impl WorkspaceDb { DELETE FROM panes WHERE workspace_id = ?1;"})?(workspace.id) .context("Clearing old panes")?; + self.exec_bound(indoc! {" + DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ? + "})?((&workspace.location, workspace.id)) + .context("clearing out old locations")?; + // Update or insert self.exec_bound(indoc! { "INSERT INTO workspaces(workspace_id, workspace_location, dock_visible, dock_anchor, timestamp) VALUES (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP) - ON CONFLICT DO UPDATE SET - workspace_location = ?2, dock_visible = ?3, dock_anchor = ?4, timestamp = CURRENT_TIMESTAMP" + ON CONFLICT DO UPDATE SET + workspace_location = ?2, dock_visible = ?3, dock_anchor = ?4, timestamp = CURRENT_TIMESTAMP" })?((workspace.id, &workspace.location, workspace.dock_position)) .context("Updating workspace")?; @@ -372,11 +377,83 @@ impl WorkspaceDb { #[cfg(test)] mod tests { + use std::sync::Arc; + use db::open_memory_db; use settings::DockAnchor; use super::*; + #[test] + fn test_workspace_id_stability() { + env_logger::try_init().ok(); + + let db = WorkspaceDb(open_memory_db(Some("test_workspace_id_stability"))); + + db.migrate( + "test_table", + &["CREATE TABLE test_table( + text TEXT, + workspace_id INTEGER, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ) STRICT;"], + ) + .unwrap(); + + let mut workspace_1 = SerializedWorkspace { + id: 1, + location: (["/tmp", "/tmp2"]).into(), + dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), + center_group: Default::default(), + dock_pane: Default::default(), + }; + + let mut workspace_2 = SerializedWorkspace { + id: 2, + location: (["/tmp"]).into(), + dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), + center_group: Default::default(), + dock_pane: Default::default(), + }; + + db.save_workspace(&workspace_1); + + db.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + .unwrap()(("test-text-1", 1)) + .unwrap(); + + db.save_workspace(&workspace_2); + + db.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + .unwrap()(("test-text-2", 2)) + .unwrap(); + + workspace_1.location = (["/tmp", "/tmp3"]).into(); + db.save_workspace(&workspace_1); + db.save_workspace(&workspace_1); + + workspace_2.dock_pane.children.push(SerializedItem { + kind: Arc::from("Test"), + item_id: 10, + }); + db.save_workspace(&workspace_2); + + let test_text_1 = db + .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") + .unwrap()(2) + .unwrap() + .unwrap(); + assert_eq!(test_text_1, "test-text-2"); + + let test_text_2 = db + .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") + .unwrap()(1) + .unwrap() + .unwrap(); + assert_eq!(test_text_2, "test-text-1"); + } + #[test] fn test_full_workspace_serialization() { env_logger::try_init().ok(); From 76c42af62aa64775d81b480dec45ae2d915ee02b Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 21 Nov 2022 09:47:29 -0800 Subject: [PATCH 62/86] Finished terminal working directory restoration --- crates/terminal/src/terminal.rs | 34 ++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index fdf16b78256ac6a5ce0ea27c86ed5ed610300a11..b5192b68765a7debb6303e8da9d6e3fc858309f7 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -574,6 +574,23 @@ impl Terminal { } AlacTermEvent::Wakeup => { cx.emit(Event::Wakeup); + + if self.update_process_info() { + cx.emit(Event::TitleChanged); + + if let Some(foreground_info) = &self.foreground_process_info { + let cwd = foreground_info.cwd.clone(); + let item_id = self.item_id; + let workspace_id = self.workspace_id; + cx.background() + .spawn(async move { + TERMINAL_CONNECTION + .save_working_directory(item_id, workspace_id, cwd.as_path()) + .log_err(); + }) + .detach(); + } + } } AlacTermEvent::ColorRequest(idx, fun_ptr) => { self.events @@ -880,23 +897,6 @@ impl Terminal { return; }; - if self.update_process_info() { - cx.emit(Event::TitleChanged); - - if let Some(foreground_info) = &self.foreground_process_info { - let cwd = foreground_info.cwd.clone(); - let item_id = self.item_id; - let workspace_id = self.workspace_id; - cx.background() - .spawn(async move { - TERMINAL_CONNECTION - .save_working_directory(item_id, workspace_id, cwd.as_path()) - .log_err(); - }) - .detach(); - } - } - //Note that the ordering of events matters for event processing while let Some(e) = self.events.pop_front() { self.process_terminal_event(&e, &mut terminal, cx) From 37174f45f0f8e403e031ee683c1e1f8e6b8c1e87 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 21 Nov 2022 10:38:16 -0800 Subject: [PATCH 63/86] Touched up sql macro --- crates/db/src/db.rs | 61 ++++++++++++----------------- crates/editor/src/persistence.rs | 11 +++--- crates/terminal/src/persistence.rs | 14 +++---- crates/workspace/src/persistence.rs | 47 ++++++++++++++++++---- 4 files changed, 78 insertions(+), 55 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 7ec4a1222350d24a8df2d3dc2c55eae5ba5f65a1..20b2ac142aee75e5f07a06b3f355320e7510c5da 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -82,36 +82,31 @@ macro_rules! connection { } #[macro_export] -macro_rules! exec_method { - ($id:ident(): $sql:literal) => { - pub fn $id(&self) -> $crate::sqlez::anyhow::Result<()> { - use $crate::anyhow::Context; - - self.exec($sql)?() - .context(::std::format!( - "Error in {}, exec failed to execute or parse for: {}", - ::std::stringify!($id), - ::std::stringify!($sql), - )) - } +macro_rules! sql_method { + ($id:ident() -> Result<()>: $sql:literal) => { + pub fn $id(&self) -> $crate::sqlez::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.exec($sql)?().context(::std::format!( + "Error in {}, exec failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } }; - ($id:ident($($arg:ident: $arg_type:ty),+): $sql:literal) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<()> { - use $crate::anyhow::Context; - - self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) - .context(::std::format!( - "Error in {}, exec_bound failed to execute or parse for: {}", - ::std::stringify!($id), - ::std::stringify!($sql), - )) - } + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<()>: $sql:literal) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } }; -} - -#[macro_export] -macro_rules! select_method { - ($id:ident() -> $return_type:ty: $sql:literal) => { + ($id:ident() -> Result>: $sql:literal) => { pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; @@ -123,7 +118,7 @@ macro_rules! select_method { )) } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:literal) => { pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; @@ -135,11 +130,7 @@ macro_rules! select_method { )) } }; -} - -#[macro_export] -macro_rules! select_row_method { - ($id:ident() -> $return_type:ty: $sql:literal) => { + ($id:ident() -> Result>: $sql:literal) => { pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; @@ -151,7 +142,7 @@ macro_rules! select_row_method { )) } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> $return_type:ty: $sql:literal) => { + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:literal) => { pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 234403738438a2d6465e305ff9f3f79a1be93aa5..b2186e2432c6a7b77fd67ce2b18cedf336be6919 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,7 +1,7 @@ use std::path::{Path, PathBuf}; use anyhow::{Context, Result}; -use db::{connection, exec_method}; +use db::{connection, sql_method}; use indoc::indoc; use sqlez::domain::Domain; use workspace::{ItemId, Workspace, WorkspaceId}; @@ -39,8 +39,9 @@ impl EditorDb { .context("Path not found for serialized editor") } - exec_method!(save_path(item_id: ItemId, workspace_id: WorkspaceId, path: &Path): - "INSERT OR REPLACE INTO editors(item_id, workspace_id, path) - VALUES (?, ?, ?)" - ); + sql_method! { + save_path(item_id: ItemId, workspace_id: WorkspaceId, path: &Path) -> Result<()>: + "INSERT OR REPLACE INTO editors(item_id, workspace_id, path) + VALUES (?, ?, ?)" + } } diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index d624724e5cde884332e46c2dd21248cc7647f9dc..384dcc18e0fb3be659fb16d2b381548017cb89b0 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -1,6 +1,6 @@ use std::path::{Path, PathBuf}; -use db::{connection, exec_method, indoc, select_row_method, sqlez::domain::Domain}; +use db::{connection, indoc, sql_method, sqlez::domain::Domain}; use workspace::{ItemId, Workspace, WorkspaceId}; @@ -28,16 +28,16 @@ impl Domain for Terminal { } impl TerminalDb { - exec_method!( - save_working_directory(item_id: ItemId, workspace_id: WorkspaceId, working_directory: &Path): + sql_method! { + save_working_directory(item_id: ItemId, workspace_id: WorkspaceId, working_directory: &Path) -> Result<()>: "INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) VALUES (?1, ?2, ?3)" - ); + } - select_row_method!( - get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> PathBuf: + sql_method! { + get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> Result>: "SELECT working_directory FROM terminals WHERE item_id = ? AND workspace_id = ?" - ); + } } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 88a894a92239de74247dbc2262ce7c8466d8383a..a4073d27d39a04b27406f7e25350be2481e3eeae 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -5,7 +5,7 @@ pub mod model; use std::path::Path; use anyhow::{anyhow, bail, Context, Result}; -use db::{connection, select_row_method}; +use db::{connection, sql_method}; use gpui::Axis; use indoc::indoc; @@ -190,10 +190,10 @@ impl WorkspaceDb { .log_err(); } - select_row_method!( - next_id() -> WorkspaceId: + sql_method! { + next_id() -> Result>: "INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id" - ); + } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, WorkspaceLocation)> { @@ -384,6 +384,37 @@ mod tests { use super::*; + #[test] + fn test_next_id_stability() { + env_logger::try_init().ok(); + + let db = WorkspaceDb(open_memory_db(Some("test_workspace_id_stability"))); + + db.migrate( + "test_table", + &["CREATE TABLE test_table( + text TEXT, + workspace_id INTEGER, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ) STRICT;"], + ) + .unwrap(); + + let id = db.next_id().unwrap(); + + db.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + .unwrap()(("test-text-1", id)) + .unwrap(); + + let test_text_1 = db + .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") + .unwrap()(1) + .unwrap() + .unwrap(); + assert_eq!(test_text_1, "test-text-1"); + } + #[test] fn test_workspace_id_stability() { env_logger::try_init().ok(); @@ -439,19 +470,19 @@ mod tests { }); db.save_workspace(&workspace_2); - let test_text_1 = db + let test_text_2 = db .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") .unwrap()(2) .unwrap() .unwrap(); - assert_eq!(test_text_1, "test-text-2"); + assert_eq!(test_text_2, "test-text-2"); - let test_text_2 = db + let test_text_1 = db .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") .unwrap()(1) .unwrap() .unwrap(); - assert_eq!(test_text_2, "test-text-1"); + assert_eq!(test_text_1, "test-text-1"); } #[test] From 2dc1130902e0936adff67e53d11737a102304071 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 21 Nov 2022 10:52:19 -0800 Subject: [PATCH 64/86] Added extra sql methods --- crates/db/src/db.rs | 46 +++++++++++++++++++++++++++----- crates/editor/src/items.rs | 1 + crates/editor/src/persistence.rs | 16 +++++------ 3 files changed, 49 insertions(+), 14 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 20b2ac142aee75e5f07a06b3f355320e7510c5da..bde69fead7440652bf81b8e66fca4275c0b35f5e 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -83,7 +83,7 @@ macro_rules! connection { #[macro_export] macro_rules! sql_method { - ($id:ident() -> Result<()>: $sql:literal) => { + ($id:ident() -> Result<()>: $sql:expr) => { pub fn $id(&self) -> $crate::sqlez::anyhow::Result<()> { use $crate::anyhow::Context; @@ -94,7 +94,7 @@ macro_rules! sql_method { )) } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<()>: $sql:literal) => { + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<()>: $sql:expr) => { pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<()> { use $crate::anyhow::Context; @@ -106,7 +106,7 @@ macro_rules! sql_method { )) } }; - ($id:ident() -> Result>: $sql:literal) => { + ($id:ident() -> Result>: $sql:expr) => { pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; @@ -118,7 +118,7 @@ macro_rules! sql_method { )) } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:literal) => { + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; @@ -130,7 +130,7 @@ macro_rules! sql_method { )) } }; - ($id:ident() -> Result>: $sql:literal) => { + ($id:ident() -> Result>: $sql:expr) => { pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; @@ -142,7 +142,7 @@ macro_rules! sql_method { )) } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:literal) => { + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { use $crate::anyhow::Context; @@ -155,4 +155,38 @@ macro_rules! sql_method { } }; + ($id:ident() -> Result<$return_type:ty>>: $sql:expr) => { + pub fn $id(&self) -> $crate::sqlez::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + self.select_row::<$return_type>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } + }; + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty>>: $sql:expr) => { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + } + }; } diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index 1e695d2364df47c31cabad8a3731422e5439b995..aea0d8b4372cf82a39f41ff7ec9903e6849d27ec 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -568,6 +568,7 @@ impl Item for Editor { if let Some(project_item) = project.update(cx, |project, cx| { // Look up the path with this key associated, create a self with that path let path = DB.get_path(item_id, workspace_id).ok()?; + let (worktree, path) = project.find_local_worktree(&path, cx)?; let project_path = ProjectPath { worktree_id: worktree.read(cx).id(), diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index b2186e2432c6a7b77fd67ce2b18cedf336be6919..5747558700f818d1d9f9bc8c8d49fe47d494414c 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,6 +1,5 @@ use std::path::{Path, PathBuf}; -use anyhow::{Context, Result}; use db::{connection, sql_method}; use indoc::indoc; use sqlez::domain::Domain; @@ -32,16 +31,17 @@ impl Domain for Editor { } impl EditorDb { - pub fn get_path(&self, item_id: ItemId, workspace_id: WorkspaceId) -> Result { - self.select_row_bound(indoc! {" - SELECT path FROM editors - WHERE item_id = ? AND workspace_id = ?"})?((item_id, workspace_id))? - .context("Path not found for serialized editor") + sql_method! { + get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result>: + indoc! {" + SELECT path FROM editors + WHERE item_id = ? AND workspace_id = ?"} } sql_method! { save_path(item_id: ItemId, workspace_id: WorkspaceId, path: &Path) -> Result<()>: - "INSERT OR REPLACE INTO editors(item_id, workspace_id, path) - VALUES (?, ?, ?)" + indoc! {" + INSERT OR REPLACE INTO editors(item_id, workspace_id, path) + VALUES (?, ?, ?)"} } } From 3e0f9d27a7a9aa9156dda51e80cf944d09205bfb Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 21 Nov 2022 13:42:26 -0800 Subject: [PATCH 65/86] Made dev tools not break everything about the db Also improved multi statements to allow out of order parameter binding in statements Ensured that all statements are run for maybe_row and single, and that of all statements only 1 of them returns only 1 row Made bind and column calls add useful context to errors Co-authored-by: kay@zed.dev --- crates/db/src/db.rs | 33 ++-- crates/editor/src/persistence.rs | 2 +- crates/sqlez/src/bindable.rs | 49 ++++-- crates/sqlez/src/connection.rs | 6 + crates/sqlez/src/statement.rs | 185 +++++++++++++++------- crates/sqlez/src/typed_statements.rs | 10 +- crates/terminal/src/persistence.rs | 18 ++- crates/workspace/src/persistence.rs | 10 +- crates/workspace/src/persistence/model.rs | 4 +- crates/workspace/src/workspace.rs | 12 +- dest-term.db | Bin 0 -> 45056 bytes dest-workspace.db | Bin 0 -> 36864 bytes dest.db | Bin 0 -> 45056 bytes 13 files changed, 219 insertions(+), 110 deletions(-) create mode 100644 dest-term.db create mode 100644 dest-workspace.db create mode 100644 dest.db diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index bde69fead7440652bf81b8e66fca4275c0b35f5e..b3370db753b0307aad2e425c69856ea798bd8330 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -6,17 +6,11 @@ pub use indoc::indoc; pub use lazy_static; pub use sqlez; -#[cfg(any(test, feature = "test-support"))] -use anyhow::Result; -#[cfg(any(test, feature = "test-support"))] -use sqlez::connection::Connection; -#[cfg(any(test, feature = "test-support"))] -use sqlez::domain::Domain; - use sqlez::domain::Migrator; use sqlez::thread_safe_connection::ThreadSafeConnection; use std::fs::{create_dir_all, remove_dir_all}; use std::path::Path; +use std::sync::atomic::{AtomicBool, Ordering}; use util::channel::{ReleaseChannel, RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; use util::paths::DB_DIR; @@ -28,13 +22,21 @@ const INITIALIZE_QUERY: &'static str = indoc! {" PRAGMA case_sensitive_like=TRUE; "}; +lazy_static::lazy_static! { + static ref DB_WIPED: AtomicBool = AtomicBool::new(false); +} + /// Open or create a database at the given directory path. pub fn open_file_db() -> ThreadSafeConnection { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); - if *RELEASE_CHANNEL == ReleaseChannel::Dev && std::env::var("WIPE_DB").is_ok() { + if *RELEASE_CHANNEL == ReleaseChannel::Dev + && std::env::var("WIPE_DB").is_ok() + && !DB_WIPED.load(Ordering::Acquire) + { remove_dir_all(¤t_db_dir).ok(); + DB_WIPED.store(true, Ordering::Relaxed); } create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); @@ -48,15 +50,6 @@ pub fn open_memory_db(db_name: Option<&str>) -> ThreadSafeConnectio ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY) } -#[cfg(any(test, feature = "test-support"))] -pub fn write_db_to>( - conn: &ThreadSafeConnection, - dest: P, -) -> Result<()> { - let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref()); - conn.backup_main(&destination) -} - /// Implements a basic DB wrapper for a given domain #[macro_export] macro_rules! connection { @@ -155,11 +148,11 @@ macro_rules! sql_method { } }; - ($id:ident() -> Result<$return_type:ty>>: $sql:expr) => { + ($id:ident() -> Result<$return_type:ty>: $sql:expr) => { pub fn $id(&self) -> $crate::sqlez::anyhow::Result<$return_type> { use $crate::anyhow::Context; - self.select_row::<$return_type>($sql)?(($($arg),+)) + self.select_row::<$return_type>($sql)?() .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), @@ -172,7 +165,7 @@ macro_rules! sql_method { )) } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty>>: $sql:expr) => { + ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty>: $sql:expr) => { pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<$return_type> { use $crate::anyhow::Context; diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 5747558700f818d1d9f9bc8c8d49fe47d494414c..a77eec7fd132b7155b2df9a0b05bc6468a4ef70f 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -32,7 +32,7 @@ impl Domain for Editor { impl EditorDb { sql_method! { - get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result>: + get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result: indoc! {" SELECT path FROM editors WHERE item_id = ? AND workspace_id = ?"} diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index 18c4acedad6bc73460ae1d36a6e901536c49f49b..51f67dd03f053ec4747f14ae1eb6cdef7e8ee573 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -5,7 +5,7 @@ use std::{ sync::Arc, }; -use anyhow::Result; +use anyhow::{Context, Result}; use crate::statement::{SqlType, Statement}; @@ -19,61 +19,82 @@ pub trait Column: Sized { impl Bind for bool { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind(self.then_some(1).unwrap_or(0), start_index) + statement + .bind(self.then_some(1).unwrap_or(0), start_index) + .with_context(|| format!("Failed to bind bool at index {start_index}")) } } impl Column for bool { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - i32::column(statement, start_index).map(|(i, next_index)| (i != 0, next_index)) + i32::column(statement, start_index) + .map(|(i, next_index)| (i != 0, next_index)) + .with_context(|| format!("Failed to read bool at index {start_index}")) } } impl Bind for &[u8] { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind_blob(start_index, self)?; + statement + .bind_blob(start_index, self) + .with_context(|| format!("Failed to bind &[u8] at index {start_index}"))?; Ok(start_index + 1) } } impl Bind for &[u8; C] { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind_blob(start_index, self.as_slice())?; + statement + .bind_blob(start_index, self.as_slice()) + .with_context(|| format!("Failed to bind &[u8; C] at index {start_index}"))?; Ok(start_index + 1) } } impl Bind for Vec { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind_blob(start_index, self)?; + statement + .bind_blob(start_index, self) + .with_context(|| format!("Failed to bind Vec at index {start_index}"))?; Ok(start_index + 1) } } impl Column for Vec { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let result = statement.column_blob(start_index)?; + let result = statement + .column_blob(start_index) + .with_context(|| format!("Failed to read Vec at index {start_index}"))?; + Ok((Vec::from(result), start_index + 1)) } } impl Bind for f64 { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind_double(start_index, *self)?; + statement + .bind_double(start_index, *self) + .with_context(|| format!("Failed to bind f64 at index {start_index}"))?; Ok(start_index + 1) } } impl Column for f64 { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let result = statement.column_double(start_index)?; + let result = statement + .column_double(start_index) + .with_context(|| format!("Failed to parse f64 at index {start_index}"))?; + Ok((result, start_index + 1)) } } impl Bind for i32 { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind_int(start_index, *self)?; + statement + .bind_int(start_index, *self) + .with_context(|| format!("Failed to bind i32 at index {start_index}"))?; + Ok(start_index + 1) } } @@ -87,7 +108,9 @@ impl Column for i32 { impl Bind for i64 { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind_int64(start_index, *self)?; + statement + .bind_int64(start_index, *self) + .with_context(|| format!("Failed to bind i64 at index {start_index}"))?; Ok(start_index + 1) } } @@ -101,7 +124,9 @@ impl Column for i64 { impl Bind for usize { fn bind(&self, statement: &Statement, start_index: i32) -> Result { - (*self as i64).bind(statement, start_index) + (*self as i64) + .bind(statement, start_index) + .with_context(|| format!("Failed to bind usize at index {start_index}")) } } diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 1eaeb090e136315a1ac32ef42419f8d84a163db9..5a71cefb52bdf33100bea53a0ccaa74303a957c3 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -1,6 +1,7 @@ use std::{ ffi::{CStr, CString}, marker::PhantomData, + path::Path, }; use anyhow::{anyhow, Result}; @@ -73,6 +74,11 @@ impl Connection { } } + pub fn backup_main_to(&self, destination: impl AsRef) -> Result<()> { + let destination = Self::open_file(destination.as_ref().to_string_lossy().as_ref()); + self.backup_main(&destination) + } + pub(crate) fn last_error(&self) -> Result<()> { unsafe { let code = sqlite3_errcode(self.sqlite3); diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index 164929010b2698401724e7c6493b0212948d709c..0a7305c6edc02ee3d8a05f4a5dbb262c557c17d5 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -19,8 +19,6 @@ pub struct Statement<'a> { pub enum StepResult { Row, Done, - Misuse, - Other(i32), } #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -40,12 +38,14 @@ impl<'a> Statement<'a> { connection, phantom: PhantomData, }; - unsafe { - let sql = CString::new(query.as_ref())?; + let sql = CString::new(query.as_ref()).context("Error creating cstr")?; let mut remaining_sql = sql.as_c_str(); while { - let remaining_sql_str = remaining_sql.to_str()?.trim(); + let remaining_sql_str = remaining_sql + .to_str() + .context("Parsing remaining sql")? + .trim(); remaining_sql_str != ";" && !remaining_sql_str.is_empty() } { let mut raw_statement = 0 as *mut sqlite3_stmt; @@ -92,116 +92,136 @@ impl<'a> Statement<'a> { } } - pub fn bind_blob(&self, index: i32, blob: &[u8]) -> Result<()> { - let index = index as c_int; - let blob_pointer = blob.as_ptr() as *const _; - let len = blob.len() as c_int; + fn bind_index_with(&self, index: i32, bind: impl Fn(&*mut sqlite3_stmt) -> ()) -> Result<()> { + let mut any_succeed = false; unsafe { for raw_statement in self.raw_statements.iter() { - sqlite3_bind_blob(*raw_statement, index, blob_pointer, len, SQLITE_TRANSIENT()); + if index <= sqlite3_bind_parameter_count(*raw_statement) { + bind(raw_statement); + self.connection + .last_error() + .with_context(|| format!("Failed to bind value at index {index}"))?; + any_succeed = true; + } else { + continue; + } } } - self.connection.last_error() + if any_succeed { + Ok(()) + } else { + Err(anyhow!("Failed to bind parameters")) + } + } + + pub fn bind_blob(&self, index: i32, blob: &[u8]) -> Result<()> { + let index = index as c_int; + let blob_pointer = blob.as_ptr() as *const _; + let len = blob.len() as c_int; + + self.bind_index_with(index, |raw_statement| unsafe { + sqlite3_bind_blob(*raw_statement, index, blob_pointer, len, SQLITE_TRANSIENT()); + }) } pub fn column_blob<'b>(&'b mut self, index: i32) -> Result<&'b [u8]> { let index = index as c_int; let pointer = unsafe { sqlite3_column_blob(self.current_statement(), index) }; - self.connection.last_error()?; + self.connection + .last_error() + .with_context(|| format!("Failed to read blob at index {index}"))?; if pointer.is_null() { return Ok(&[]); } let len = unsafe { sqlite3_column_bytes(self.current_statement(), index) as usize }; - self.connection.last_error()?; + self.connection + .last_error() + .with_context(|| format!("Failed to read length of blob at index {index}"))?; + unsafe { Ok(slice::from_raw_parts(pointer as *const u8, len)) } } pub fn bind_double(&self, index: i32, double: f64) -> Result<()> { let index = index as c_int; - unsafe { - for raw_statement in self.raw_statements.iter() { - sqlite3_bind_double(*raw_statement, index, double); - } - } - self.connection.last_error() + self.bind_index_with(index, |raw_statement| unsafe { + sqlite3_bind_double(*raw_statement, index, double); + }) } pub fn column_double(&self, index: i32) -> Result { let index = index as c_int; let result = unsafe { sqlite3_column_double(self.current_statement(), index) }; - self.connection.last_error()?; + self.connection + .last_error() + .with_context(|| format!("Failed to read double at index {index}"))?; Ok(result) } pub fn bind_int(&self, index: i32, int: i32) -> Result<()> { let index = index as c_int; - - unsafe { - for raw_statement in self.raw_statements.iter() { - sqlite3_bind_int(*raw_statement, index, int); - } - }; - self.connection.last_error() + self.bind_index_with(index, |raw_statement| unsafe { + sqlite3_bind_int(*raw_statement, index, int); + }) } pub fn column_int(&self, index: i32) -> Result { let index = index as c_int; let result = unsafe { sqlite3_column_int(self.current_statement(), index) }; - self.connection.last_error()?; + self.connection + .last_error() + .with_context(|| format!("Failed to read int at index {index}"))?; Ok(result) } pub fn bind_int64(&self, index: i32, int: i64) -> Result<()> { let index = index as c_int; - unsafe { - for raw_statement in self.raw_statements.iter() { - sqlite3_bind_int64(*raw_statement, index, int); - } - } - self.connection.last_error() + self.bind_index_with(index, |raw_statement| unsafe { + sqlite3_bind_int64(*raw_statement, index, int); + }) } pub fn column_int64(&self, index: i32) -> Result { let index = index as c_int; let result = unsafe { sqlite3_column_int64(self.current_statement(), index) }; - self.connection.last_error()?; + self.connection + .last_error() + .with_context(|| format!("Failed to read i64 at index {index}"))?; Ok(result) } pub fn bind_null(&self, index: i32) -> Result<()> { let index = index as c_int; - unsafe { - for raw_statement in self.raw_statements.iter() { - sqlite3_bind_null(*raw_statement, index); - } - } - self.connection.last_error() + self.bind_index_with(index, |raw_statement| unsafe { + sqlite3_bind_null(*raw_statement, index); + }) } pub fn bind_text(&self, index: i32, text: &str) -> Result<()> { let index = index as c_int; let text_pointer = text.as_ptr() as *const _; let len = text.len() as c_int; - unsafe { - for raw_statement in self.raw_statements.iter() { - sqlite3_bind_text(*raw_statement, index, text_pointer, len, SQLITE_TRANSIENT()); - } - } - self.connection.last_error() + + self.bind_index_with(index, |raw_statement| unsafe { + sqlite3_bind_text(*raw_statement, index, text_pointer, len, SQLITE_TRANSIENT()); + }) } pub fn column_text<'b>(&'b mut self, index: i32) -> Result<&'b str> { let index = index as c_int; let pointer = unsafe { sqlite3_column_text(self.current_statement(), index) }; - self.connection.last_error()?; + self.connection + .last_error() + .with_context(|| format!("Failed to read text from column {index}"))?; if pointer.is_null() { return Ok(""); } let len = unsafe { sqlite3_column_bytes(self.current_statement(), index) as usize }; - self.connection.last_error()?; + self.connection + .last_error() + .with_context(|| format!("Failed to read text length at {index}"))?; let slice = unsafe { slice::from_raw_parts(pointer as *const u8, len) }; Ok(str::from_utf8(slice)?) @@ -247,11 +267,11 @@ impl<'a> Statement<'a> { self.step() } } - SQLITE_MISUSE => Ok(StepResult::Misuse), - other => self - .connection - .last_error() - .map(|_| StepResult::Other(other)), + SQLITE_MISUSE => Err(anyhow!("Statement step returned SQLITE_MISUSE")), + _other_error => { + self.connection.last_error()?; + unreachable!("Step returned error code and last error failed to catch it"); + } } } } @@ -293,11 +313,17 @@ impl<'a> Statement<'a> { callback: impl FnOnce(&mut Statement) -> Result, ) -> Result { if this.step()? != StepResult::Row { + return Err(anyhow!("single called with query that returns no rows.")); + } + let result = callback(this)?; + + if this.step()? != StepResult::Done { return Err(anyhow!( - "Single(Map) called with query that returns no rows." + "single called with a query that returns more than one row." )); } - callback(this) + + Ok(result) } let result = logic(self, callback); self.reset(); @@ -316,10 +342,21 @@ impl<'a> Statement<'a> { this: &mut Statement, callback: impl FnOnce(&mut Statement) -> Result, ) -> Result> { - if this.step()? != StepResult::Row { + if this.step().context("Failed on step call")? != StepResult::Row { return Ok(None); } - callback(this).map(|r| Some(r)) + + let result = callback(this) + .map(|r| Some(r)) + .context("Failed to parse row result")?; + + if this.step().context("Second step call")? != StepResult::Done { + return Err(anyhow!( + "maybe called with a query that returns more than one row." + )); + } + + Ok(result) } let result = logic(self, callback); self.reset(); @@ -350,6 +387,38 @@ mod test { statement::{Statement, StepResult}, }; + #[test] + fn binding_multiple_statements_with_parameter_gaps() { + let connection = + Connection::open_memory(Some("binding_multiple_statements_with_parameter_gaps")); + + connection + .exec(indoc! {" + CREATE TABLE test ( + col INTEGER + )"}) + .unwrap()() + .unwrap(); + + let statement = Statement::prepare( + &connection, + indoc! {" + INSERT INTO test(col) VALUES (?3); + SELECT * FROM test WHERE col = ?1"}, + ) + .unwrap(); + + statement + .bind_int(1, 1) + .expect("Could not bind parameter to first index"); + statement + .bind_int(2, 2) + .expect("Could not bind parameter to second index"); + statement + .bind_int(3, 3) + .expect("Could not bind parameter to third index"); + } + #[test] fn blob_round_trips() { let connection1 = Connection::open_memory(Some("blob_round_trips")); diff --git a/crates/sqlez/src/typed_statements.rs b/crates/sqlez/src/typed_statements.rs index 98f51b970a1e856df60f0f574419fdea0ea7d757..c7d8b20aa556d93fd08024a32667c7337d9b1013 100644 --- a/crates/sqlez/src/typed_statements.rs +++ b/crates/sqlez/src/typed_statements.rs @@ -1,4 +1,4 @@ -use anyhow::Result; +use anyhow::{Context, Result}; use crate::{ bindable::{Bind, Column}, @@ -49,6 +49,12 @@ impl Connection { query: &str, ) -> Result Result>> { let mut statement = Statement::prepare(&self, query)?; - Ok(move |bindings| statement.with_bindings(bindings)?.maybe_row::()) + Ok(move |bindings| { + statement + .with_bindings(bindings) + .context("Bindings failed")? + .maybe_row::() + .context("Maybe row failed") + }) } } diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index 384dcc18e0fb3be659fb16d2b381548017cb89b0..07bca0c66fa2f2fbb1e7ec6ca6c11e3e44084e70 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -29,15 +29,21 @@ impl Domain for Terminal { impl TerminalDb { sql_method! { - save_working_directory(item_id: ItemId, workspace_id: WorkspaceId, working_directory: &Path) -> Result<()>: - "INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) - VALUES (?1, ?2, ?3)" + save_working_directory(item_id: ItemId, + workspace_id: WorkspaceId, + working_directory: &Path) -> Result<()>: + indoc!{" + INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) + VALUES (?1, ?2, ?3) + "} } sql_method! { get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> Result>: - "SELECT working_directory - FROM terminals - WHERE item_id = ? AND workspace_id = ?" + indoc!{" + SELECT working_directory + FROM terminals + WHERE item_id = ? AND workspace_id = ? + "} } } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index a4073d27d39a04b27406f7e25350be2481e3eeae..477e5a496021c10992629d8ee26c77ba50ba94eb 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -152,7 +152,7 @@ impl WorkspaceDb { "})?((&workspace.location, workspace.id)) .context("clearing out old locations")?; - // Update or insert + // Upsert self.exec_bound(indoc! { "INSERT INTO workspaces(workspace_id, workspace_location, dock_visible, dock_anchor, timestamp) @@ -190,8 +190,8 @@ impl WorkspaceDb { .log_err(); } - sql_method! { - next_id() -> Result>: + sql_method!{ + next_id() -> Result: "INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id" } @@ -402,6 +402,10 @@ mod tests { .unwrap(); let id = db.next_id().unwrap(); + // Assert the empty row got inserted + assert_eq!(Some(id), db.select_row_bound:: + ("SELECT workspace_id FROM workspaces WHERE workspace_id = ?").unwrap() + (id).unwrap()); db.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") .unwrap()(("test-text-1", id)) diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index 111a6904c65f4f555e18be496ad1873325305916..2f0bc050d21ee025d8cb06f57c08c3cc31ef2f87 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -3,7 +3,7 @@ use std::{ sync::Arc, }; -use anyhow::Result; +use anyhow::{Context, Result}; use async_recursion::async_recursion; use gpui::{AsyncAppContext, Axis, ModelHandle, Task, ViewHandle}; @@ -52,7 +52,7 @@ impl Column for WorkspaceLocation { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { let blob = statement.column_blob(start_index)?; Ok(( - WorkspaceLocation(bincode::deserialize(blob)?), + WorkspaceLocation(bincode::deserialize(blob).context("Bincode failed")?), start_index + 1, )) } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 0a4a6c87407c8133d14a031c2c5c4ee9f2537ec9..155c95e4e823d95caa8099bd94b480364e3144bd 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -633,11 +633,11 @@ impl Workspace { active_call = Some((call, subscriptions)); } - let id = if let Some(id) = serialized_workspace.as_ref().map(|ws| ws.id) { - id - } else { - DB.next_id().log_err().flatten().unwrap_or(0) - }; + let database_id = serialized_workspace + .as_ref() + .map(|ws| ws.id) + .or_else(|| DB.next_id().log_err()) + .unwrap_or(0); let mut this = Workspace { modal: None, @@ -666,7 +666,7 @@ impl Workspace { last_leaders_by_pane: Default::default(), window_edited: false, active_call, - database_id: id, + database_id, _observe_current_user, }; this.project_remote_id_changed(project.read(cx).remote_id(), cx); diff --git a/dest-term.db b/dest-term.db new file mode 100644 index 0000000000000000000000000000000000000000..d6115b0670d7ce5e17610aa1d678d320a1615e5a GIT binary patch literal 45056 zcmeI4+iv4T9LAk&b68hZxj~&q-i&G5PN|v6PwmF_N2CZ zST1(8+b(;91aH6%55PO{7`zNK<8#ucu;OSwt4Ss^_GJFQ@0;<=Sn9!}t{qt9E9yB$ zK-Q%bdr`ztKZrYwT45;^nYL+N4C_i z$^vV*4%)3PRc?|4t>0;DYjRalmr9cDn%0GXGGf238>ayk-}MB5zLq5CFTbwU6h$e$ zS{D#`dwua%&%qN}Avd1P&>_7&jr0cH?kaikfM`dyPuO2@WQo`tJ~^|kb7GsT#HTDf zEOu!?PF?%t)VfiMv|e$_*l@E#O_VZaOroiuvzElI39q6o7@NAWz7xfNl^$FrelVp5 z`&-vNxqgk&iA1XKbLujn8iI2ON3)Px( z|9XBhPk65pp61xlN6Hb-CE4G!CQoSK{ECGrIN*r6T(2 z)q>DNv`V~Ha|(p54XI4@m?`wxktz`=-+zo@V4qpjg}ke8QaWPVh~LUaovk((a#Aoc zu+3naE6R7J#MD>s&efEA_e!sxC1fJ797dF^Oi_yAi}uF~B$2)jtXr+j1}S@J1lIL- zcBy%roquNq#swQ?^P?dbOm$D44+v>|W_bZ?$EZOblEx?0v%jHkz`p)dN?tc8{&^ke za;02TmX}N4z9a0BnlMe>$(bX~zj>ZOX82=7na2E{!{-NN(Ca)Js9}gE9Ukjvwr{i9 zO6)W-R%N@AILUB_N7NGP1icR>VekU z-Jfw+O`4M%3BplIbkFSVY5h)B*;&5n_LScI-n>sNkom za!%$nKbjAk)G=%~WG=#I6UlKxQRF1oh_mG80{h)^?M){Rp52nN2*85ka(s)YiXBm6i+O9{tNpglL)X`N3z>i6T3H4>(zL_C>|tcZPsmw z)uPw}j^6SkkJw_xZhGbc%&eyyF~@(~Zu*AI*Z=IHKMoK80T2KI5C8!X009sH0T2KI z5CDM!6OegqSpOGTVT1+(AOHd&00JNY0w4eaAOHd&00NSP=l^g42!H?xfB*=900@8p z2!H?xfB*;-p8(eX#n%`Sf&d7B00@8p2!H?xfB*=900@8p*8lJT2!H?xfB*=900@8p z2!H?xfB*;-p8(eX#n%`Sf&d7B00@8p2!H?xfB*=900@8p{{J6500JNY0w4eaAOHd& z00JNY0w4ea#V3I0|HaoB5rO~+fB*=900@8p2!H?xfB*=90G|KD10VnbAOHd&00JNY Q0w4eaAOHd&P<#Tv0Rl18NB{r; literal 0 HcmV?d00001 diff --git a/dest-workspace.db b/dest-workspace.db new file mode 100644 index 0000000000000000000000000000000000000000..90682f86421cbe4f9180ef12caecb0620d5218ee GIT binary patch literal 36864 zcmeI4-EQMV6vyp$^Uj*jXw*IBu!+ z!zTM!eUgv%p}<<|f!b1=HMPyc0w-RFTJV4zH`%`0Pz9@6rCqD+tI?9q$-YN%TC%-x z+o@f#J9>FeJ}8N{7Qc_*+DWT^RB1gSBc|GH&pIuL7BU6a)>`$NR!Aq6_Z3+Zw2VvV z>6xhB)h8~e^Db%JEz;M<)n+oKJbWlW`;7deXIQRbcd2L5bSma0(GuTTrbdMPBc1ho zKh|y0bobQelW|0SawgBK*vQZ=&!5BuEASqj*<3h~*EYc!&#C;UE{^&@fIKQcnH-A2 z?ouP(T+FrwjeV<~d|-TGi7N%teWxU7oL+B|9@H!PNX8WKd+Sq~8!-#$L&uJApBq1V z&o$4DP`z+)c|*O*`I*p$?iIB<+HB0^uwZT=-NH0h6dy{VQ(wPzFQq(qAV2>mBr^r; z!I&l!3zP!*)c-(%(9*vG>rPh|!_4gIuCd&mE#AM&onNv6{Zd5Pcr^3`Q?1Epm$BR@ zhV6=eoaNXfmivg?=GWYE#p%DLDD|2X49WAB+$Z5&rwr&)j8QGFBpyf^Hgn~o_Sw1KBltcngLs-#=J0k=s*s^CkP zt8&-nBhsy9o(^0}sh7gf`$pf_k1qEdKXwC(q;uYLVX!{8axNR_VuV3KNZmG0WEbf$wtW?}Bmv-MPWkiXb{aq3- z93TJ!AOHd&00JNY0w4eaAOHd&00JvWV0~TM@c#ZUe)Nw61V8`;KmY_l00ck)1V8`; zKmY_lV1)@_{lCKE!ZZ*70T2KI5C8!X009sH0T2KI5SS9c`X8nB%05JggUkpjuj@#S$G! zIdm5-+NA5vfb9)-vj^Ba>@oH-BdN2UI9su!`U#0ckrK`S@B2uR6p;OgT|2PI*VJ>2 zfUHSxN#(NiDIt<1mDukb`;8BNemg#4fBE@MM@jlne|LW2Qj#lArG@Y3e_q&~dnq5) zf2;ml|E2P@`r}O%pd18100fFj;Kjn7nzFc9x^M#H@yH4+&#_%&S9Tk$U;#^Mv=&MO`?&6zTO;ME6 zt2F_Ux7QaR^&C8r6>{Ur3?0(j)kts9?XHmf_lb6B`-J@jhn9%F;geI_IwQ8ZLVU`y z!(x{PpM~Wcj>`Z;s;Y| zu(!!A7%4SvgD9??19+al9G6_L`?UEYZB|kfF{PS`KbrNb^09KAKZM5ms`zu~V!m2a z?%gZB{7U#@Xt~TYx@cK8UCX&7TN0Wj3L@d3=_z6B$A-tM)(<>-GS0Y4n2EcJIF{jZ zKd}i`>>h)7lo^QEeWl8O>gKEmV&vKGC_+xeAUCOzuPztcl*VDy{&F0Ab4J&Gp;Saa zy_^?%h*pV@YEFT$wIP*>9y5hLJ5nVA<@=8@4D3^Dx{!DE4N6B$8}VJ)sI%4PLQV=M z2DTYYb4B^Cl$iSR{o6I=?%mR>X9<}IEQb*#D^rwW_@e!>0!gH=1M5aBvq8!p8i94S zon30)WamFvfpN}8+5Bk81ykJ<=OaQIUszti+A(U72c+>C_3Uq{8?e)VOUbJS#lNoO zTr8Js%FP>)NKUh&|qEMU?;;PF{Y9yRv^3(*pB4~hBFS$w5e{j23?J`2YohG(R8h|qwZ_1 zoxK@P)ucJOksutUMEA@-p4M(vmF=bL9#5$*`f8fyYw9|m-+Cn>^YaHnX2(A8gbIF| zD(7TQ^P~BoNgcy>L*^oUHjx}B6h%&QkGM*1&a>|=*WQ-u@30RXAOHd&00JQJ1`~Mx ztX%n^esLUcG2|Y)ai(9~J(zX*hl5gOvHsI1$s{F*Aj$PHz5HjVm+NwVLr}~=#mX;| z0;|Bnt^%*pK2kkehQu@dSxdWgqj+M;^MBYUnM8oiI+ESCn%KRWTCc|YMe!gpYqM@$ ztQN%OY!KmY_l00ck)1V8`; zKmY_l00cmw_yn;2FTTcz5ClK~1V8`;KmY_l00ck)1V8`;@c;kd0T2KI5C8!X009sH z0T2KI5C8!XC_Vwa|1ZAAh!6xo00ck)1V8`;KmY_l00ck)1n~YJ9smIl009sH0T2KI Q5C8!X009sHf#MVR9n-hdKL7v# literal 0 HcmV?d00001 From b01243109e0b3378bec24756f593379bdbed4ab6 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 21 Nov 2022 13:43:43 -0800 Subject: [PATCH 66/86] Removed database test files --- .gitignore | 2 +- dest-term.db | Bin 45056 -> 0 bytes dest-workspace.db | Bin 36864 -> 0 bytes dest.db | Bin 45056 -> 0 bytes 4 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 dest-term.db delete mode 100644 dest-workspace.db delete mode 100644 dest.db diff --git a/.gitignore b/.gitignore index e2d90adbb153879e0438f72ce9cfcc526917a794..356f4d97cde31206e575c7aa18772570e21bda7c 100644 --- a/.gitignore +++ b/.gitignore @@ -18,4 +18,4 @@ DerivedData/ .swiftpm/config/registries.json .swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata .netrc -crates/db/test-db.db +**/*.db diff --git a/dest-term.db b/dest-term.db deleted file mode 100644 index d6115b0670d7ce5e17610aa1d678d320a1615e5a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 45056 zcmeI4+iv4T9LAk&b68hZxj~&q-i&G5PN|v6PwmF_N2CZ zST1(8+b(;91aH6%55PO{7`zNK<8#ucu;OSwt4Ss^_GJFQ@0;<=Sn9!}t{qt9E9yB$ zK-Q%bdr`ztKZrYwT45;^nYL+N4C_i z$^vV*4%)3PRc?|4t>0;DYjRalmr9cDn%0GXGGf238>ayk-}MB5zLq5CFTbwU6h$e$ zS{D#`dwua%&%qN}Avd1P&>_7&jr0cH?kaikfM`dyPuO2@WQo`tJ~^|kb7GsT#HTDf zEOu!?PF?%t)VfiMv|e$_*l@E#O_VZaOroiuvzElI39q6o7@NAWz7xfNl^$FrelVp5 z`&-vNxqgk&iA1XKbLujn8iI2ON3)Px( z|9XBhPk65pp61xlN6Hb-CE4G!CQoSK{ECGrIN*r6T(2 z)q>DNv`V~Ha|(p54XI4@m?`wxktz`=-+zo@V4qpjg}ke8QaWPVh~LUaovk((a#Aoc zu+3naE6R7J#MD>s&efEA_e!sxC1fJ797dF^Oi_yAi}uF~B$2)jtXr+j1}S@J1lIL- zcBy%roquNq#swQ?^P?dbOm$D44+v>|W_bZ?$EZOblEx?0v%jHkz`p)dN?tc8{&^ke za;02TmX}N4z9a0BnlMe>$(bX~zj>ZOX82=7na2E{!{-NN(Ca)Js9}gE9Ukjvwr{i9 zO6)W-R%N@AILUB_N7NGP1icR>VekU z-Jfw+O`4M%3BplIbkFSVY5h)B*;&5n_LScI-n>sNkom za!%$nKbjAk)G=%~WG=#I6UlKxQRF1oh_mG80{h)^?M){Rp52nN2*85ka(s)YiXBm6i+O9{tNpglL)X`N3z>i6T3H4>(zL_C>|tcZPsmw z)uPw}j^6SkkJw_xZhGbc%&eyyF~@(~Zu*AI*Z=IHKMoK80T2KI5C8!X009sH0T2KI z5CDM!6OegqSpOGTVT1+(AOHd&00JNY0w4eaAOHd&00NSP=l^g42!H?xfB*=900@8p z2!H?xfB*;-p8(eX#n%`Sf&d7B00@8p2!H?xfB*=900@8p*8lJT2!H?xfB*=900@8p z2!H?xfB*;-p8(eX#n%`Sf&d7B00@8p2!H?xfB*=900@8p{{J6500JNY0w4eaAOHd& z00JNY0w4ea#V3I0|HaoB5rO~+fB*=900@8p2!H?xfB*=90G|KD10VnbAOHd&00JNY Q0w4eaAOHd&P<#Tv0Rl18NB{r; diff --git a/dest-workspace.db b/dest-workspace.db deleted file mode 100644 index 90682f86421cbe4f9180ef12caecb0620d5218ee..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36864 zcmeI4-EQMV6vyp$^Uj*jXw*IBu!+ z!zTM!eUgv%p}<<|f!b1=HMPyc0w-RFTJV4zH`%`0Pz9@6rCqD+tI?9q$-YN%TC%-x z+o@f#J9>FeJ}8N{7Qc_*+DWT^RB1gSBc|GH&pIuL7BU6a)>`$NR!Aq6_Z3+Zw2VvV z>6xhB)h8~e^Db%JEz;M<)n+oKJbWlW`;7deXIQRbcd2L5bSma0(GuTTrbdMPBc1ho zKh|y0bobQelW|0SawgBK*vQZ=&!5BuEASqj*<3h~*EYc!&#C;UE{^&@fIKQcnH-A2 z?ouP(T+FrwjeV<~d|-TGi7N%teWxU7oL+B|9@H!PNX8WKd+Sq~8!-#$L&uJApBq1V z&o$4DP`z+)c|*O*`I*p$?iIB<+HB0^uwZT=-NH0h6dy{VQ(wPzFQq(qAV2>mBr^r; z!I&l!3zP!*)c-(%(9*vG>rPh|!_4gIuCd&mE#AM&onNv6{Zd5Pcr^3`Q?1Epm$BR@ zhV6=eoaNXfmivg?=GWYE#p%DLDD|2X49WAB+$Z5&rwr&)j8QGFBpyf^Hgn~o_Sw1KBltcngLs-#=J0k=s*s^CkP zt8&-nBhsy9o(^0}sh7gf`$pf_k1qEdKXwC(q;uYLVX!{8axNR_VuV3KNZmG0WEbf$wtW?}Bmv-MPWkiXb{aq3- z93TJ!AOHd&00JNY0w4eaAOHd&00JvWV0~TM@c#ZUe)Nw61V8`;KmY_l00ck)1V8`; zKmY_lV1)@_{lCKE!ZZ*70T2KI5C8!X009sH0T2KI5SS9c`X8nB%05JggUkpjuj@#S$G! zIdm5-+NA5vfb9)-vj^Ba>@oH-BdN2UI9su!`U#0ckrK`S@B2uR6p;OgT|2PI*VJ>2 zfUHSxN#(NiDIt<1mDukb`;8BNemg#4fBE@MM@jlne|LW2Qj#lArG@Y3e_q&~dnq5) zf2;ml|E2P@`r}O%pd18100fFj;Kjn7nzFc9x^M#H@yH4+&#_%&S9Tk$U;#^Mv=&MO`?&6zTO;ME6 zt2F_Ux7QaR^&C8r6>{Ur3?0(j)kts9?XHmf_lb6B`-J@jhn9%F;geI_IwQ8ZLVU`y z!(x{PpM~Wcj>`Z;s;Y| zu(!!A7%4SvgD9??19+al9G6_L`?UEYZB|kfF{PS`KbrNb^09KAKZM5ms`zu~V!m2a z?%gZB{7U#@Xt~TYx@cK8UCX&7TN0Wj3L@d3=_z6B$A-tM)(<>-GS0Y4n2EcJIF{jZ zKd}i`>>h)7lo^QEeWl8O>gKEmV&vKGC_+xeAUCOzuPztcl*VDy{&F0Ab4J&Gp;Saa zy_^?%h*pV@YEFT$wIP*>9y5hLJ5nVA<@=8@4D3^Dx{!DE4N6B$8}VJ)sI%4PLQV=M z2DTYYb4B^Cl$iSR{o6I=?%mR>X9<}IEQb*#D^rwW_@e!>0!gH=1M5aBvq8!p8i94S zon30)WamFvfpN}8+5Bk81ykJ<=OaQIUszti+A(U72c+>C_3Uq{8?e)VOUbJS#lNoO zTr8Js%FP>)NKUh&|qEMU?;;PF{Y9yRv^3(*pB4~hBFS$w5e{j23?J`2YohG(R8h|qwZ_1 zoxK@P)ucJOksutUMEA@-p4M(vmF=bL9#5$*`f8fyYw9|m-+Cn>^YaHnX2(A8gbIF| zD(7TQ^P~BoNgcy>L*^oUHjx}B6h%&QkGM*1&a>|=*WQ-u@30RXAOHd&00JQJ1`~Mx ztX%n^esLUcG2|Y)ai(9~J(zX*hl5gOvHsI1$s{F*Aj$PHz5HjVm+NwVLr}~=#mX;| z0;|Bnt^%*pK2kkehQu@dSxdWgqj+M;^MBYUnM8oiI+ESCn%KRWTCc|YMe!gpYqM@$ ztQN%OY!KmY_l00ck)1V8`; zKmY_l00cmw_yn;2FTTcz5ClK~1V8`;KmY_l00ck)1V8`;@c;kd0T2KI5C8!X009sH z0T2KI5C8!XC_Vwa|1ZAAh!6xo00ck)1V8`;KmY_l00ck)1n~YJ9smIl009sH0T2KI Q5C8!X009sHf#MVR9n-hdKL7v# From 1cc3e4820a1e32c32b3e6f41d8b55b9800b047bd Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 23 Nov 2022 01:53:58 -0800 Subject: [PATCH 67/86] working serialized writes with panics on failure. Everything seems to be working --- Cargo.lock | 3 + crates/collab/src/integration_tests.rs | 4 + crates/collab_ui/src/collab_ui.rs | 1 + crates/command_palette/src/command_palette.rs | 2 +- crates/db/src/db.rs | 143 +++++++- crates/db/src/kvp.rs | 2 +- crates/db/test.db | Bin 40960 -> 0 bytes crates/diagnostics/src/diagnostics.rs | 1 + crates/editor/src/editor.rs | 29 +- crates/editor/src/items.rs | 39 ++- crates/editor/src/persistence.rs | 4 +- .../src/test/editor_lsp_test_context.rs | 1 + crates/file_finder/src/file_finder.rs | 12 +- crates/project_panel/src/project_panel.rs | 2 + crates/sqlez/Cargo.toml | 5 +- crates/sqlez/src/bindable.rs | 12 + crates/sqlez/src/connection.rs | 12 +- crates/sqlez/src/lib.rs | 5 +- crates/sqlez/src/migrations.rs | 58 ++-- crates/sqlez/src/statement.rs | 11 +- crates/sqlez/src/thread_safe_connection.rs | 133 +++++--- crates/sqlez/src/util.rs | 28 ++ crates/terminal/src/persistence.rs | 40 ++- crates/terminal/src/terminal.rs | 21 +- .../terminal/src/terminal_container_view.rs | 8 + .../src/tests/terminal_test_context.rs | 1 + crates/vim/src/test/vim_test_context.rs | 1 + crates/workspace/src/dock.rs | 2 +- crates/workspace/src/item.rs | 11 +- crates/workspace/src/pane.rs | 8 +- crates/workspace/src/persistence.rs | 307 ++++++++++-------- crates/workspace/src/persistence/model.rs | 2 +- crates/workspace/src/workspace.rs | 71 ++-- crates/zed/src/zed.rs | 14 +- 34 files changed, 675 insertions(+), 318 deletions(-) delete mode 100644 crates/db/test.db create mode 100644 crates/sqlez/src/util.rs diff --git a/Cargo.lock b/Cargo.lock index e887dfee66b7d4e0ed3a72bea56bef96bfef6a84..150149c529b8396fe40a90e13491e28032440914 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5590,8 +5590,11 @@ name = "sqlez" version = "0.1.0" dependencies = [ "anyhow", + "futures 0.3.25", "indoc", + "lazy_static", "libsqlite3-sys", + "parking_lot 0.11.2", "thread_local", ] diff --git a/crates/collab/src/integration_tests.rs b/crates/collab/src/integration_tests.rs index 386ccfbbff6b7838362fb8dda2253a088943fc5d..989f0ac586b0bf4cf41ed30e76a9e738b18d7c91 100644 --- a/crates/collab/src/integration_tests.rs +++ b/crates/collab/src/integration_tests.rs @@ -909,6 +909,7 @@ async fn test_host_disconnect( let (_, workspace_b) = cx_b.add_window(|cx| { Workspace::new( Default::default(), + 0, project_b.clone(), |_, _| unimplemented!(), cx, @@ -3711,6 +3712,7 @@ async fn test_collaborating_with_code_actions( let (_window_b, workspace_b) = cx_b.add_window(|cx| { Workspace::new( Default::default(), + 0, project_b.clone(), |_, _| unimplemented!(), cx, @@ -3938,6 +3940,7 @@ async fn test_collaborating_with_renames(cx_a: &mut TestAppContext, cx_b: &mut T let (_window_b, workspace_b) = cx_b.add_window(|cx| { Workspace::new( Default::default(), + 0, project_b.clone(), |_, _| unimplemented!(), cx, @@ -6075,6 +6078,7 @@ impl TestClient { cx.add_view(&root_view, |cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, diff --git a/crates/collab_ui/src/collab_ui.rs b/crates/collab_ui/src/collab_ui.rs index 3a20a2fc6930c9362fc1972889c32134240a3b55..964cec0f82a7ac0a74567d70261fe1b18f80a14a 100644 --- a/crates/collab_ui/src/collab_ui.rs +++ b/crates/collab_ui/src/collab_ui.rs @@ -53,6 +53,7 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { let mut workspace = Workspace::new( Default::default(), + 0, project, app_state.default_item_factory, cx, diff --git a/crates/command_palette/src/command_palette.rs b/crates/command_palette/src/command_palette.rs index 5af23b45d720ecfad4ed9faa3dd777d1238f2022..3742e36c7222932039ec96ba2d244cde53273f96 100644 --- a/crates/command_palette/src/command_palette.rs +++ b/crates/command_palette/src/command_palette.rs @@ -351,7 +351,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), [], cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let editor = cx.add_view(&workspace, |cx| { let mut editor = Editor::single_line(None, cx); diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index b3370db753b0307aad2e425c69856ea798bd8330..b42b264b562941490420335d603f020448e79322 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -42,11 +42,11 @@ pub fn open_file_db() -> ThreadSafeConnection { create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); let db_path = current_db_dir.join(Path::new("db.sqlite")); - ThreadSafeConnection::new(Some(db_path.to_string_lossy().as_ref()), true) + ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) .with_initialize_query(INITIALIZE_QUERY) } -pub fn open_memory_db(db_name: Option<&str>) -> ThreadSafeConnection { +pub fn open_memory_db(db_name: &str) -> ThreadSafeConnection { ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY) } @@ -66,7 +66,7 @@ macro_rules! connection { ::db::lazy_static::lazy_static! { pub static ref $id: $t = $t(if cfg!(any(test, feature = "test-support")) { - ::db::open_memory_db(None) + ::db::open_memory_db(stringify!($id)) } else { ::db::open_file_db() }); @@ -77,7 +77,7 @@ macro_rules! connection { #[macro_export] macro_rules! sql_method { ($id:ident() -> Result<()>: $sql:expr) => { - pub fn $id(&self) -> $crate::sqlez::anyhow::Result<()> { + pub fn $id(&self) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; self.exec($sql)?().context(::std::format!( @@ -87,8 +87,21 @@ macro_rules! sql_method { )) } }; + (async $id:ident() -> Result<()>: $sql:expr) => { + pub async fn $id(&self) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.write(|connection| { + connection.exec($sql)?().context(::std::format!( + "Error in {}, exec failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<()>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<()> { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) @@ -99,8 +112,22 @@ macro_rules! sql_method { )) } }; + (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()>: $sql:expr) => { + pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.write(move |connection| { + connection.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; ($id:ident() -> Result>: $sql:expr) => { - pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { + pub fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select::<$return_type>($sql)?(()) @@ -111,8 +138,22 @@ macro_rules! sql_method { )) } }; + (async $id:ident() -> Result>: $sql:expr) => { + pub async fn $id(&self) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.write(|connection| { + connection.select::<$return_type>($sql)?(()) + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) @@ -123,8 +164,22 @@ macro_rules! sql_method { )) } }; + (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { + pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.write(|connection| { + connection.select_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; ($id:ident() -> Result>: $sql:expr) => { - pub fn $id(&self) -> $crate::sqlez::anyhow::Result> { + pub fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select_row::<$return_type>($sql)?() @@ -135,8 +190,22 @@ macro_rules! sql_method { )) } }; + (async $id:ident() -> Result>: $sql:expr) => { + pub async fn $id(&self) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.write(|connection| { + connection.select_row::<$return_type>($sql)?() + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result> { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) @@ -148,8 +217,22 @@ macro_rules! sql_method { } }; + (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { + pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.write(|connection| { + connection.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; ($id:ident() -> Result<$return_type:ty>: $sql:expr) => { - pub fn $id(&self) -> $crate::sqlez::anyhow::Result<$return_type> { + pub fn $id(&self) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; self.select_row::<$return_type>($sql)?() @@ -165,8 +248,27 @@ macro_rules! sql_method { )) } }; + (async $id:ident() -> Result<$return_type:ty>: $sql:expr) => { + pub async fn $id(&self) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + self.write(|connection| { + connection.select_row::<$return_type>($sql)?() + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::sqlez::anyhow::Result<$return_type> { + pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) @@ -182,4 +284,23 @@ macro_rules! sql_method { )) } }; + (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty>: $sql:expr) => { + pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + self.write(|connection| { + connection.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + ::std::stringify!($sql), + )) + }).await + } + }; } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 3cdcd99016f7ef91903070bc2b02f0d0983fd492..dd82c17615de4a65dcfe7936937ac523835b5030 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -61,7 +61,7 @@ mod tests { #[test] fn test_kvp() -> Result<()> { - let db = KeyValueStore(crate::open_memory_db(Some("test_kvp"))); + let db = KeyValueStore(crate::open_memory_db("test_kvp")); assert_eq!(db.read_kvp("key-1").unwrap(), None); diff --git a/crates/db/test.db b/crates/db/test.db deleted file mode 100644 index cedefe5f832586d90e62e9a50c9e8c7506cf81e7..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40960 zcmeI*(Qn&C90zbaX%fd%Yb1oiCRJRuX=$XEHf`ad5rZ^!OGKJ(iHn#(icDi#D~+AS zPS^2*sz4DEyzw9KFbN@q5bsEP=N*Yh{tX^DpJT_aQ>7ZIRI0DFCg(f*eE$5-PVX|g z^+y%m*2rgOx2@V_m3xKbdG0+zIF6g4KO+6{7H56RTcPiKAbnAChI_MdXIlD_OA8M; z>D!sVrTX+wGe6(@P0R`p(*Inv8}cCl0SG_<0uZ>q0{w!J%49P9*`BR_dZf|cB-$2x zNtbGJQIUyK+^EQeWzCC(E>5zcH%Ymw$lG#_RCg6p-K$g<$l@YV4t0yrfA*oqHq2V& zSl6Bsy}3XvlV-cchG~KrsWRGpJ&&HE*zD|uKe?-Xl~S;Gd{3&Vy`qMVo`b){A=DYk7Q<+D zO?m+vW^+umE?a?mLT?M!z8^KxCUmiEkTVua~fJ?d%UPUkmrDW1%fW-n@_cZ5dXOF3g|!TF5u z@+`cAhPL&CuIhXOS;JH;)YxG@aogK8+p2E3abwb!t#$k`upECMww0KhrpKL1|Hw%{ z%=~)m6PmyR0SG_<0uX=z1R!ut1o}%;iPwa)$IgH}^lKm+p<{N%n*tg&a&~5(f>5Vb=90uX=z1Rwwb2tWV=5P$##An@`6 zugBN;QL_fId+};|?8Jck{r_`L`a^pD@@>GL5P$##AOHafKmY;|fB*y_009V0K;T|1 z$*pl5{nH2T68xfn_%|)`e3Glut;UY}hr6)n|4Yv}=}+kw>3f>M0s#m>00Izz00bZa z0SG_<0uX?}?6g5Er z0uX=z1Rwwb2tWV=5P$##uB!l^|9@R0gLXjx0uX=z1Rwwb2tWV=5P$##CMWPOz-*A4 diff --git a/crates/diagnostics/src/diagnostics.rs b/crates/diagnostics/src/diagnostics.rs index ef8b81ac660867b6781e2c01885fe35c5cfe179a..f1c612a58d4102d1c108e6e48b757b04aa017fed 100644 --- a/crates/diagnostics/src/diagnostics.rs +++ b/crates/diagnostics/src/diagnostics.rs @@ -805,6 +805,7 @@ mod tests { let (_, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index d66fc3e28c2439fdf22c7afc01df4c70061ae4ad..63db71edaea29ac96c306a57e65e27cff2703af9 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -81,7 +81,7 @@ use std::{ pub use sum_tree::Bias; use theme::{DiagnosticStyle, Theme}; use util::{post_inc, ResultExt, TryFutureExt}; -use workspace::{ItemNavHistory, Workspace}; +use workspace::{ItemNavHistory, Workspace, WorkspaceId}; use crate::git::diff_hunk_to_display; @@ -584,6 +584,7 @@ pub struct Editor { pending_rename: Option, searchable: bool, cursor_shape: CursorShape, + workspace_id: Option, keymap_context_layers: BTreeMap, input_enabled: bool, leader_replica_id: Option, @@ -1137,31 +1138,6 @@ impl Editor { cx: &mut ViewContext, ) -> Self { let buffer = cx.add_model(|cx| MultiBuffer::singleton(buffer, cx)); - // if let Some(project) = project.as_ref() { - // if let Some(file) = buffer - // .read(cx) - // .as_singleton() - // .and_then(|buffer| buffer.read(cx).file()) - // .and_then(|file| file.as_local()) - // { - // // let item_id = cx.weak_handle().id(); - // // let workspace_id = project - // // .read(cx) - // // .visible_worktrees(cx) - // // .map(|worktree| worktree.read(cx).abs_path()) - // // .collect::>() - // // .into(); - // let path = file.abs_path(cx); - // dbg!(&path); - - // // cx.background() - // // .spawn(async move { - // // DB.save_path(item_id, workspace_id, path).log_err(); - // // }) - // // .detach(); - // } - // } - Self::new(EditorMode::Full, buffer, project, None, cx) } @@ -1262,6 +1238,7 @@ impl Editor { searchable: true, override_text_style: None, cursor_shape: Default::default(), + workspace_id: None, keymap_context_layers: Default::default(), input_enabled: true, leader_replica_id: None, diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index aea0d8b4372cf82a39f41ff7ec9903e6849d27ec..e724156faea5d48b18270c7ac42bd730c6a3a660 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -17,7 +17,7 @@ use std::{ path::{Path, PathBuf}, }; use text::Selection; -use util::TryFutureExt; +use util::{ResultExt, TryFutureExt}; use workspace::{ item::{FollowableItem, Item, ItemEvent, ItemHandle, ProjectItem}, searchable::{Direction, SearchEvent, SearchableItem, SearchableItemHandle}, @@ -554,6 +554,43 @@ impl Item for Editor { Some(breadcrumbs) } + fn added_to_workspace(&mut self, workspace: &mut Workspace, cx: &mut ViewContext) { + let workspace_id = workspace.database_id(); + let item_id = cx.view_id(); + + fn serialize( + buffer: ModelHandle, + workspace_id: WorkspaceId, + item_id: ItemId, + cx: &mut MutableAppContext, + ) { + if let Some(file) = buffer.read(cx).file().and_then(|file| file.as_local()) { + let path = file.abs_path(cx); + + cx.background() + .spawn(async move { + DB.save_path(item_id, workspace_id, path.clone()) + .await + .log_err() + }) + .detach(); + } + } + + if let Some(buffer) = self.buffer().read(cx).as_singleton() { + serialize(buffer.clone(), workspace_id, item_id, cx); + + cx.subscribe(&buffer, |this, buffer, event, cx| { + if let Some(workspace_id) = this.workspace_id { + if let language::Event::FileHandleChanged = event { + serialize(buffer, workspace_id, cx.view_id(), cx); + } + } + }) + .detach(); + } + } + fn serialized_item_kind() -> Option<&'static str> { Some("Editor") } diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index a77eec7fd132b7155b2df9a0b05bc6468a4ef70f..b2f76294aa355236fc32bf8a5ae7ee3526ae88bd 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,4 +1,4 @@ -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use db::{connection, sql_method}; use indoc::indoc; @@ -39,7 +39,7 @@ impl EditorDb { } sql_method! { - save_path(item_id: ItemId, workspace_id: WorkspaceId, path: &Path) -> Result<()>: + async save_path(item_id: ItemId, workspace_id: WorkspaceId, path: PathBuf) -> Result<()>: indoc! {" INSERT OR REPLACE INTO editors(item_id, workspace_id, path) VALUES (?, ?, ?)"} diff --git a/crates/editor/src/test/editor_lsp_test_context.rs b/crates/editor/src/test/editor_lsp_test_context.rs index 9cf305ad37540230e5648f68e090be8a03941c14..b65b09cf17c03c828031aed78bfebab6d4dc8ac0 100644 --- a/crates/editor/src/test/editor_lsp_test_context.rs +++ b/crates/editor/src/test/editor_lsp_test_context.rs @@ -66,6 +66,7 @@ impl<'a> EditorLspTestContext<'a> { let (window_id, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, diff --git a/crates/file_finder/src/file_finder.rs b/crates/file_finder/src/file_finder.rs index b0016002fa9f57d6df60a36a95669ed77728c58b..5122a46c2c161b474627bf7bfabeb72b4625f884 100644 --- a/crates/file_finder/src/file_finder.rs +++ b/crates/file_finder/src/file_finder.rs @@ -317,7 +317,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; let (window_id, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); cx.dispatch_action(window_id, Toggle); @@ -373,7 +373,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/dir".as_ref()], cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); @@ -449,7 +449,7 @@ mod tests { ) .await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); @@ -475,7 +475,7 @@ mod tests { ) .await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); @@ -529,7 +529,7 @@ mod tests { ) .await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); @@ -569,7 +569,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let (_, finder) = cx.add_window(|cx| FileFinder::new(workspace.read(cx).project().clone(), cx)); diff --git a/crates/project_panel/src/project_panel.rs b/crates/project_panel/src/project_panel.rs index dae1f70aae02344d4437e3c98ec72d3f61858758..e88f3004eb6a6a2a183241f39a1d5dca18936d14 100644 --- a/crates/project_panel/src/project_panel.rs +++ b/crates/project_panel/src/project_panel.rs @@ -1396,6 +1396,7 @@ mod tests { let (_, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, @@ -1495,6 +1496,7 @@ mod tests { let (_, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, diff --git a/crates/sqlez/Cargo.toml b/crates/sqlez/Cargo.toml index cbb4504a04ef9871106333e9b365c7f236495445..cab1af7d6c5e38277a248f3870e29e1fbcb2258f 100644 --- a/crates/sqlez/Cargo.toml +++ b/crates/sqlez/Cargo.toml @@ -9,4 +9,7 @@ edition = "2021" anyhow = { version = "1.0.38", features = ["backtrace"] } indoc = "1.0.7" libsqlite3-sys = { version = "0.25.2", features = ["bundled"] } -thread_local = "1.1.4" \ No newline at end of file +thread_local = "1.1.4" +lazy_static = "1.4" +parking_lot = "0.11.1" +futures = "0.3" \ No newline at end of file diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index 51f67dd03f053ec4747f14ae1eb6cdef7e8ee573..ffef7814f9327d758e0f8aa4bb36b8644f7b5bc8 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -322,6 +322,18 @@ impl Bind for &Path { } } +impl Bind for Arc { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + self.as_ref().bind(statement, start_index) + } +} + +impl Bind for PathBuf { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + (self.as_ref() as &Path).bind(statement, start_index) + } +} + impl Column for PathBuf { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { let blob = statement.column_blob(start_index)?; diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 5a71cefb52bdf33100bea53a0ccaa74303a957c3..4beddb4fed37c60f220c6084fd3d7aa0af4708b6 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -10,16 +10,18 @@ use libsqlite3_sys::*; pub struct Connection { pub(crate) sqlite3: *mut sqlite3, persistent: bool, - phantom: PhantomData, + pub(crate) write: bool, + _sqlite: PhantomData, } unsafe impl Send for Connection {} impl Connection { - fn open(uri: &str, persistent: bool) -> Result { + pub(crate) fn open(uri: &str, persistent: bool) -> Result { let mut connection = Self { sqlite3: 0 as *mut _, persistent, - phantom: PhantomData, + write: true, + _sqlite: PhantomData, }; let flags = SQLITE_OPEN_CREATE | SQLITE_OPEN_NOMUTEX | SQLITE_OPEN_READWRITE; @@ -60,6 +62,10 @@ impl Connection { self.persistent } + pub fn can_write(&self) -> bool { + self.write + } + pub fn backup_main(&self, destination: &Connection) -> Result<()> { unsafe { let backup = sqlite3_backup_init( diff --git a/crates/sqlez/src/lib.rs b/crates/sqlez/src/lib.rs index c5d2658666933ee710470269e316bc3ac943fdd7..a22cfff2b30c68f9b6d5cd1e1e7f57e1d5fdd52f 100644 --- a/crates/sqlez/src/lib.rs +++ b/crates/sqlez/src/lib.rs @@ -1,5 +1,3 @@ -pub use anyhow; - pub mod bindable; pub mod connection; pub mod domain; @@ -8,3 +6,6 @@ pub mod savepoint; pub mod statement; pub mod thread_safe_connection; pub mod typed_statements; +mod util; + +pub use anyhow; diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 23af04bbf4e30be0af9c05ccfa1f8bcc0e56cf4b..6c0aafaf20d4ce0dcab87c8ec193b6a8ecf4d3c9 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -11,46 +11,48 @@ use crate::connection::Connection; impl Connection { pub fn migrate(&self, domain: &'static str, migrations: &[&'static str]) -> Result<()> { - // Setup the migrations table unconditionally - self.exec(indoc! {" - CREATE TABLE IF NOT EXISTS migrations ( + self.with_savepoint("migrating", || { + // Setup the migrations table unconditionally + self.exec(indoc! {" + CREATE TABLE IF NOT EXISTS migrations ( domain TEXT, step INTEGER, migration TEXT - )"})?()?; + )"})?()?; - let completed_migrations = - self.select_bound::<&str, (String, usize, String)>(indoc! {" + let completed_migrations = + self.select_bound::<&str, (String, usize, String)>(indoc! {" SELECT domain, step, migration FROM migrations WHERE domain = ? ORDER BY step - "})?(domain)?; + "})?(domain)?; - let mut store_completed_migration = - self.exec_bound("INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)")?; + let mut store_completed_migration = self + .exec_bound("INSERT INTO migrations (domain, step, migration) VALUES (?, ?, ?)")?; - for (index, migration) in migrations.iter().enumerate() { - if let Some((_, _, completed_migration)) = completed_migrations.get(index) { - if completed_migration != migration { - return Err(anyhow!(formatdoc! {" - Migration changed for {} at step {} - - Stored migration: - {} - - Proposed migration: - {}", domain, index, completed_migration, migration})); - } else { - // Migration already run. Continue - continue; + for (index, migration) in migrations.iter().enumerate() { + if let Some((_, _, completed_migration)) = completed_migrations.get(index) { + if completed_migration != migration { + return Err(anyhow!(formatdoc! {" + Migration changed for {} at step {} + + Stored migration: + {} + + Proposed migration: + {}", domain, index, completed_migration, migration})); + } else { + // Migration already run. Continue + continue; + } } - } - self.exec(migration)?()?; - store_completed_migration((domain, index, *migration))?; - } + self.exec(migration)?()?; + store_completed_migration((domain, index, *migration))?; + } - Ok(()) + Ok(()) + }) } } diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index 0a7305c6edc02ee3d8a05f4a5dbb262c557c17d5..86035f5d0acd322a0edb31c534fa14b0fbc9257d 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -2,7 +2,7 @@ use std::ffi::{c_int, CStr, CString}; use std::marker::PhantomData; use std::{ptr, slice, str}; -use anyhow::{anyhow, Context, Result}; +use anyhow::{anyhow, bail, Context, Result}; use libsqlite3_sys::*; use crate::bindable::{Bind, Column}; @@ -57,12 +57,21 @@ impl<'a> Statement<'a> { &mut raw_statement, &mut remaining_sql_ptr, ); + remaining_sql = CStr::from_ptr(remaining_sql_ptr); statement.raw_statements.push(raw_statement); connection.last_error().with_context(|| { format!("Prepare call failed for query:\n{}", query.as_ref()) })?; + + if !connection.can_write() && sqlite3_stmt_readonly(raw_statement) == 0 { + let sql = CStr::from_ptr(sqlite3_sql(raw_statement)); + + bail!( + "Write statement prepared with connection that is not write capable. SQL:\n{} ", + sql.to_str()?) + } } } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 7c5bf6388cc0d699768ae77b455338e38614961c..5402c6b5e196c5c3b055be0155bec6e65e763c68 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -1,36 +1,41 @@ -use std::{marker::PhantomData, ops::Deref, sync::Arc}; - -use connection::Connection; +use futures::{Future, FutureExt}; +use lazy_static::lazy_static; +use parking_lot::RwLock; +use std::{collections::HashMap, marker::PhantomData, ops::Deref, sync::Arc, thread}; use thread_local::ThreadLocal; use crate::{ - connection, + connection::Connection, domain::{Domain, Migrator}, + util::UnboundedSyncSender, }; +type QueuedWrite = Box; + +lazy_static! { + static ref QUEUES: RwLock, UnboundedSyncSender>> = + Default::default(); +} + pub struct ThreadSafeConnection { - uri: Option>, + uri: Arc, persistent: bool, initialize_query: Option<&'static str>, - connection: Arc>, - _pd: PhantomData, + connections: Arc>, + _migrator: PhantomData, } unsafe impl Send for ThreadSafeConnection {} unsafe impl Sync for ThreadSafeConnection {} impl ThreadSafeConnection { - pub fn new(uri: Option<&str>, persistent: bool) -> Self { - if persistent == true && uri == None { - // This panic is securing the unwrap in open_file(), don't remove it! - panic!("Cannot create a persistent connection without a URI") - } + pub fn new(uri: &str, persistent: bool) -> Self { Self { - uri: uri.map(|str| Arc::from(str)), + uri: Arc::from(uri), persistent, initialize_query: None, - connection: Default::default(), - _pd: PhantomData, + connections: Default::default(), + _migrator: PhantomData, } } @@ -46,13 +51,13 @@ impl ThreadSafeConnection { /// If opening fails, the connection falls back to a shared memory connection fn open_file(&self) -> Connection { // This unwrap is secured by a panic in the constructor. Be careful if you remove it! - Connection::open_file(self.uri.as_ref().unwrap()) + Connection::open_file(self.uri.as_ref()) } /// Opens a shared memory connection using the file path as the identifier. This unwraps /// as we expect it always to succeed fn open_shared_memory(&self) -> Connection { - Connection::open_memory(self.uri.as_ref().map(|str| str.deref())) + Connection::open_memory(Some(self.uri.as_ref())) } // Open a new connection for the given domain, leaving this @@ -62,10 +67,74 @@ impl ThreadSafeConnection { uri: self.uri.clone(), persistent: self.persistent, initialize_query: self.initialize_query, - connection: Default::default(), - _pd: PhantomData, + connections: Default::default(), + _migrator: PhantomData, } } + + pub fn write( + &self, + callback: impl 'static + Send + FnOnce(&Connection) -> T, + ) -> impl Future { + // Startup write thread for this database if one hasn't already + // been started and insert a channel to queue work for it + if !QUEUES.read().contains_key(&self.uri) { + use std::sync::mpsc::channel; + + let (sender, reciever) = channel::(); + let mut write_connection = self.create_connection(); + // Enable writes for this connection + write_connection.write = true; + thread::spawn(move || { + while let Ok(write) = reciever.recv() { + write(&write_connection) + } + }); + + let mut queues = QUEUES.write(); + queues.insert(self.uri.clone(), UnboundedSyncSender::new(sender)); + } + + // Grab the queue for this database + let queues = QUEUES.read(); + let write_channel = queues.get(&self.uri).unwrap(); + + // Create a one shot channel for the result of the queued write + // so we can await on the result + let (sender, reciever) = futures::channel::oneshot::channel(); + write_channel + .send(Box::new(move |connection| { + sender.send(callback(connection)).ok(); + })) + .expect("Could not send write action to background thread"); + + reciever.map(|response| response.expect("Background thread unexpectedly closed")) + } + + pub(crate) fn create_connection(&self) -> Connection { + let mut connection = if self.persistent { + self.open_file() + } else { + self.open_shared_memory() + }; + + // Enable writes for the migrations and initialization queries + connection.write = true; + + if let Some(initialize_query) = self.initialize_query { + connection.exec(initialize_query).expect(&format!( + "Initialize query failed to execute: {}", + initialize_query + ))() + .unwrap(); + } + + M::migrate(&connection).expect("Migrations failed"); + + // Disable db writes for normal thread local connection + connection.write = false; + connection + } } impl Clone for ThreadSafeConnection { @@ -74,8 +143,8 @@ impl Clone for ThreadSafeConnection { uri: self.uri.clone(), persistent: self.persistent, initialize_query: self.initialize_query.clone(), - connection: self.connection.clone(), - _pd: PhantomData, + connections: self.connections.clone(), + _migrator: PhantomData, } } } @@ -88,25 +157,7 @@ impl Deref for ThreadSafeConnection { type Target = Connection; fn deref(&self) -> &Self::Target { - self.connection.get_or(|| { - let connection = if self.persistent { - self.open_file() - } else { - self.open_shared_memory() - }; - - if let Some(initialize_query) = self.initialize_query { - connection.exec(initialize_query).expect(&format!( - "Initialize query failed to execute: {}", - initialize_query - ))() - .unwrap(); - } - - M::migrate(&connection).expect("Migrations failed"); - - connection - }) + self.connections.get_or(|| self.create_connection()) } } @@ -151,7 +202,7 @@ mod test { } } - let _ = ThreadSafeConnection::::new(None, false) + let _ = ThreadSafeConnection::::new("wild_zed_lost_failure", false) .with_initialize_query("PRAGMA FOREIGN_KEYS=true") .deref(); } diff --git a/crates/sqlez/src/util.rs b/crates/sqlez/src/util.rs new file mode 100644 index 0000000000000000000000000000000000000000..b5366cffc43c2c1dcf2ec870ef612cb3ee772a5b --- /dev/null +++ b/crates/sqlez/src/util.rs @@ -0,0 +1,28 @@ +use std::ops::Deref; +use std::sync::mpsc::Sender; + +use parking_lot::Mutex; +use thread_local::ThreadLocal; + +pub struct UnboundedSyncSender { + clonable_sender: Mutex>, + local_senders: ThreadLocal>, +} + +impl UnboundedSyncSender { + pub fn new(sender: Sender) -> Self { + Self { + clonable_sender: Mutex::new(sender), + local_senders: ThreadLocal::new(), + } + } +} + +impl Deref for UnboundedSyncSender { + type Target = Sender; + + fn deref(&self) -> &Self::Target { + self.local_senders + .get_or(|| self.clonable_sender.lock().clone()) + } +} diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index 07bca0c66fa2f2fbb1e7ec6ca6c11e3e44084e70..1e9b846f389c95e376c1941243c3834b4b54ef50 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -1,4 +1,4 @@ -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use db::{connection, indoc, sql_method, sqlez::domain::Domain}; @@ -17,7 +17,7 @@ impl Domain for Terminal { &[indoc! {" CREATE TABLE terminals ( workspace_id INTEGER, - item_id INTEGER, + item_id INTEGER UNIQUE, working_directory BLOB, PRIMARY KEY(workspace_id, item_id), FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) @@ -29,21 +29,35 @@ impl Domain for Terminal { impl TerminalDb { sql_method! { - save_working_directory(item_id: ItemId, - workspace_id: WorkspaceId, - working_directory: &Path) -> Result<()>: - indoc!{" - INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) - VALUES (?1, ?2, ?3) + async update_workspace_id( + new_id: WorkspaceId, + old_id: WorkspaceId, + item_id: ItemId + ) -> Result<()>: + indoc! {" + UPDATE terminals + SET workspace_id = ? + WHERE workspace_id = ? AND item_id = ? "} } + sql_method! { + async save_working_directory( + item_id: ItemId, + workspace_id: WorkspaceId, + working_directory: PathBuf) -> Result<()>: + indoc!{" + INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) + VALUES (?1, ?2, ?3) + "} + } + sql_method! { get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> Result>: - indoc!{" - SELECT working_directory - FROM terminals - WHERE item_id = ? AND workspace_id = ? - "} + indoc!{" + SELECT working_directory + FROM terminals + WHERE item_id = ? AND workspace_id = ? + "} } } diff --git a/crates/terminal/src/terminal.rs b/crates/terminal/src/terminal.rs index b5192b68765a7debb6303e8da9d6e3fc858309f7..0cbb6d36b11bffd07c04f5fae65504b0dac29136 100644 --- a/crates/terminal/src/terminal.rs +++ b/crates/terminal/src/terminal.rs @@ -57,7 +57,8 @@ use gpui::{ geometry::vector::{vec2f, Vector2F}, keymap::Keystroke, scene::{MouseDown, MouseDrag, MouseScrollWheel, MouseUp}, - ClipboardItem, Entity, ModelContext, MouseButton, MouseMovedEvent, MutableAppContext, Task, + AppContext, ClipboardItem, Entity, ModelContext, MouseButton, MouseMovedEvent, + MutableAppContext, Task, }; use crate::mappings::{ @@ -585,7 +586,8 @@ impl Terminal { cx.background() .spawn(async move { TERMINAL_CONNECTION - .save_working_directory(item_id, workspace_id, cwd.as_path()) + .save_working_directory(item_id, workspace_id, cwd) + .await .log_err(); }) .detach(); @@ -1192,6 +1194,21 @@ impl Terminal { } } + pub fn set_workspace_id(&mut self, id: WorkspaceId, cx: &AppContext) { + let old_workspace_id = self.workspace_id; + let item_id = self.item_id; + cx.background() + .spawn(async move { + TERMINAL_CONNECTION + .update_workspace_id(id, old_workspace_id, item_id) + .await + .log_err() + }) + .detach(); + + self.workspace_id = id; + } + pub fn find_matches( &mut self, query: project::search::SearchQuery, diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal/src/terminal_container_view.rs index fdda38864230a67dc16a5eeb62858d90a2006b8b..a6c28d4baf944af937075a36b6aa1d32e9d38ef7 100644 --- a/crates/terminal/src/terminal_container_view.rs +++ b/crates/terminal/src/terminal_container_view.rs @@ -400,6 +400,14 @@ impl Item for TerminalContainer { ) }))) } + + fn added_to_workspace(&mut self, workspace: &mut Workspace, cx: &mut ViewContext) { + if let Some(connected) = self.connected() { + let id = workspace.database_id(); + let terminal_handle = connected.read(cx).terminal().clone(); + terminal_handle.update(cx, |terminal, cx| terminal.set_workspace_id(id, cx)) + } + } } impl SearchableItem for TerminalContainer { diff --git a/crates/terminal/src/tests/terminal_test_context.rs b/crates/terminal/src/tests/terminal_test_context.rs index 352ce4a0d2707e2e9a89f04339e2fa8fcdd55690..67ebb558052f40a7f0d273034c3f7802fe8586ba 100644 --- a/crates/terminal/src/tests/terminal_test_context.rs +++ b/crates/terminal/src/tests/terminal_test_context.rs @@ -31,6 +31,7 @@ impl<'a> TerminalTestContext<'a> { let (_, workspace) = self.cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, diff --git a/crates/vim/src/test/vim_test_context.rs b/crates/vim/src/test/vim_test_context.rs index 68c08f2f7afa55654aa161176b7e8fba7a4f217f..e0d972896f26509195b75e0a7b95847358256eec 100644 --- a/crates/vim/src/test/vim_test_context.rs +++ b/crates/vim/src/test/vim_test_context.rs @@ -44,6 +44,7 @@ impl<'a> VimTestContext<'a> { let (window_id, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index fb28571172832ae411849112d704058e30ed02c0..0879166bbe733faf5b9ee0e86695cf3bfe391e39 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -575,7 +575,7 @@ mod tests { cx.update(|cx| init(cx)); let project = Project::test(fs, [], cx).await; let (window_id, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, default_item_factory, cx) + Workspace::new(Default::default(), 0, project, default_item_factory, cx) }); workspace.update(cx, |workspace, cx| { diff --git a/crates/workspace/src/item.rs b/crates/workspace/src/item.rs index b990ba20a2413f46e80689f1e493486e3645713b..e44e7ca09d0de00470ed8a211928c5f556391c2d 100644 --- a/crates/workspace/src/item.rs +++ b/crates/workspace/src/item.rs @@ -119,6 +119,8 @@ pub trait Item: View { None } + fn added_to_workspace(&mut self, _workspace: &mut Workspace, _cx: &mut ViewContext) {} + fn serialized_item_kind() -> Option<&'static str>; fn deserialize( @@ -267,7 +269,10 @@ impl ItemHandle for ViewHandle { cx: &mut ViewContext, ) { let history = pane.read(cx).nav_history_for_item(self); - self.update(cx, |this, cx| this.set_nav_history(history, cx)); + self.update(cx, |this, cx| { + this.set_nav_history(history, cx); + this.added_to_workspace(workspace, cx); + }); if let Some(followed_item) = self.to_followable_item_handle(cx) { if let Some(message) = followed_item.to_state_proto(cx) { @@ -426,6 +431,10 @@ impl ItemHandle for ViewHandle { }) .detach(); } + + cx.defer(|workspace, cx| { + workspace.serialize_workspace(cx); + }); } fn deactivated(&self, cx: &mut MutableAppContext) { diff --git a/crates/workspace/src/pane.rs b/crates/workspace/src/pane.rs index 5db8d6feec03bf2c2fe72e084b073618f7bf3d2a..428865ec3b1024b550a0698352d2221b257de554 100644 --- a/crates/workspace/src/pane.rs +++ b/crates/workspace/src/pane.rs @@ -1647,7 +1647,7 @@ mod tests { let project = Project::test(fs, None, cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); @@ -1737,7 +1737,7 @@ mod tests { let project = Project::test(fs, None, cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); @@ -1815,7 +1815,7 @@ mod tests { let project = Project::test(fs, None, cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); @@ -1926,7 +1926,7 @@ mod tests { let project = Project::test(fs, None, cx).await; let (_, workspace) = - cx.add_window(|cx| Workspace::new(None, project, |_, _| unimplemented!(), cx)); + cx.add_window(|cx| Workspace::new(None, 0, project, |_, _| unimplemented!(), cx)); let pane = workspace.read_with(cx, |workspace, _| workspace.active_pane().clone()); add_labled_item(&workspace, &pane, "A", cx); diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 477e5a496021c10992629d8ee26c77ba50ba94eb..66b36221193d13fd5f05b18b94286affc4c87871 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -5,7 +5,7 @@ pub mod model; use std::path::Path; use anyhow::{anyhow, bail, Context, Result}; -use db::{connection, sql_method}; +use db::{connection, sql_method, sqlez::connection::Connection}; use gpui::Axis; use indoc::indoc; @@ -138,60 +138,71 @@ impl WorkspaceDb { /// Saves a workspace using the worktree roots. Will garbage collect any workspaces /// that used this workspace previously - pub fn save_workspace(&self, workspace: &SerializedWorkspace) { - self.with_savepoint("update_worktrees", || { - // Clear out panes and pane_groups - self.exec_bound(indoc! {" - UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1; - DELETE FROM pane_groups WHERE workspace_id = ?1; - DELETE FROM panes WHERE workspace_id = ?1;"})?(workspace.id) - .context("Clearing old panes")?; - - self.exec_bound(indoc! {" - DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ? - "})?((&workspace.location, workspace.id)) - .context("clearing out old locations")?; - - // Upsert - self.exec_bound(indoc! { - "INSERT INTO - workspaces(workspace_id, workspace_location, dock_visible, dock_anchor, timestamp) - VALUES - (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP) - ON CONFLICT DO UPDATE SET - workspace_location = ?2, dock_visible = ?3, dock_anchor = ?4, timestamp = CURRENT_TIMESTAMP" - })?((workspace.id, &workspace.location, workspace.dock_position)) - .context("Updating workspace")?; - - // Save center pane group and dock pane - self.save_pane_group(workspace.id, &workspace.center_group, None) - .context("save pane group in save workspace")?; - - let dock_id = self - .save_pane(workspace.id, &workspace.dock_pane, None, true) - .context("save pane in save workspace")?; - - // Complete workspace initialization - self.exec_bound(indoc! {" - UPDATE workspaces - SET dock_pane = ? - WHERE workspace_id = ?"})?((dock_id, workspace.id)) - .context("Finishing initialization with dock pane")?; - - Ok(()) - }) - .with_context(|| { - format!( - "Update workspace with roots {:?} and id {:?} failed.", - workspace.location.paths(), - workspace.id - ) + pub async fn save_workspace(&self, workspace: SerializedWorkspace) { + self.write(move |conn| { + conn.with_savepoint("update_worktrees", || { + // Clear out panes and pane_groups + conn.exec_bound(indoc! {" + UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1; + DELETE FROM pane_groups WHERE workspace_id = ?1; + DELETE FROM panes WHERE workspace_id = ?1;"})?(workspace.id) + .context("Clearing old panes")?; + + conn.exec_bound(indoc! {" + DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ?"})?( + ( + &workspace.location, + workspace.id.clone(), + ) + ) + .context("clearing out old locations")?; + + // Upsert + conn.exec_bound(indoc! {" + INSERT INTO workspaces( + workspace_id, + workspace_location, + dock_visible, + dock_anchor, + timestamp + ) + VALUES (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP) + ON CONFLICT DO + UPDATE SET + workspace_location = ?2, + dock_visible = ?3, + dock_anchor = ?4, + timestamp = CURRENT_TIMESTAMP + "})?(( + workspace.id, + &workspace.location, + workspace.dock_position, + )) + .context("Updating workspace")?; + + // Save center pane group and dock pane + Self::save_pane_group(conn, workspace.id, &workspace.center_group, None) + .context("save pane group in save workspace")?; + + let dock_id = Self::save_pane(conn, workspace.id, &workspace.dock_pane, None, true) + .context("save pane in save workspace")?; + + // Complete workspace initialization + conn.exec_bound(indoc! {" + UPDATE workspaces + SET dock_pane = ? + WHERE workspace_id = ?"})?((dock_id, workspace.id)) + .context("Finishing initialization with dock pane")?; + + Ok(()) + }) + .log_err(); }) - .log_err(); + .await; } - sql_method!{ - next_id() -> Result: + sql_method! { + async next_id() -> Result: "INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id" } @@ -276,7 +287,7 @@ impl WorkspaceDb { } fn save_pane_group( - &self, + conn: &Connection, workspace_id: WorkspaceId, pane_group: &SerializedPaneGroup, parent: Option<(GroupId, usize)>, @@ -285,7 +296,7 @@ impl WorkspaceDb { SerializedPaneGroup::Group { axis, children } => { let (parent_id, position) = unzip_option(parent); - let group_id = self.select_row_bound::<_, i64>(indoc! {" + let group_id = conn.select_row_bound::<_, i64>(indoc! {" INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?) RETURNING group_id"})?(( @@ -297,13 +308,13 @@ impl WorkspaceDb { .ok_or_else(|| anyhow!("Couldn't retrieve group_id from inserted pane_group"))?; for (position, group) in children.iter().enumerate() { - self.save_pane_group(workspace_id, group, Some((group_id, position)))? + Self::save_pane_group(conn, workspace_id, group, Some((group_id, position)))? } Ok(()) } SerializedPaneGroup::Pane(pane) => { - self.save_pane(workspace_id, &pane, parent, false)?; + Self::save_pane(conn, workspace_id, &pane, parent, false)?; Ok(()) } } @@ -325,13 +336,13 @@ impl WorkspaceDb { } fn save_pane( - &self, + conn: &Connection, workspace_id: WorkspaceId, pane: &SerializedPane, parent: Option<(GroupId, usize)>, // None indicates BOTH dock pane AND center_pane dock: bool, ) -> Result { - let pane_id = self.select_row_bound::<_, i64>(indoc! {" + let pane_id = conn.select_row_bound::<_, i64>(indoc! {" INSERT INTO panes(workspace_id, active) VALUES (?, ?) RETURNING pane_id"})?((workspace_id, pane.active))? @@ -339,13 +350,12 @@ impl WorkspaceDb { if !dock { let (parent_id, order) = unzip_option(parent); - self.exec_bound(indoc! {" + conn.exec_bound(indoc! {" INSERT INTO center_panes(pane_id, parent_group_id, position) VALUES (?, ?, ?)"})?((pane_id, parent_id, order))?; } - self.save_items(workspace_id, pane_id, &pane.children) - .context("Saving items")?; + Self::save_items(conn, workspace_id, pane_id, &pane.children).context("Saving items")?; Ok(pane_id) } @@ -358,12 +368,12 @@ impl WorkspaceDb { } fn save_items( - &self, + conn: &Connection, workspace_id: WorkspaceId, pane_id: PaneId, items: &[SerializedItem], ) -> Result<()> { - let mut insert = self.exec_bound( + let mut insert = conn.exec_bound( "INSERT INTO items(workspace_id, pane_id, position, kind, item_id) VALUES (?, ?, ?, ?, ?)", ).context("Preparing insertion")?; for (position, item) in items.iter().enumerate() { @@ -384,32 +394,44 @@ mod tests { use super::*; - #[test] - fn test_next_id_stability() { + #[gpui::test] + async fn test_next_id_stability() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db(Some("test_workspace_id_stability"))); + let db = WorkspaceDb(open_memory_db("test_next_id_stability")); + + db.write(|conn| { + conn.migrate( + "test_table", + &[indoc! {" + CREATE TABLE test_table( + text TEXT, + workspace_id INTEGER, + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ) STRICT;"}], + ) + .unwrap(); + }) + .await; - db.migrate( - "test_table", - &["CREATE TABLE test_table( - text TEXT, - workspace_id INTEGER, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ) STRICT;"], - ) - .unwrap(); - - let id = db.next_id().unwrap(); + let id = db.next_id().await.unwrap(); // Assert the empty row got inserted - assert_eq!(Some(id), db.select_row_bound:: - ("SELECT workspace_id FROM workspaces WHERE workspace_id = ?").unwrap() - (id).unwrap()); - - db.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") - .unwrap()(("test-text-1", id)) - .unwrap(); + assert_eq!( + Some(id), + db.select_row_bound::( + "SELECT workspace_id FROM workspaces WHERE workspace_id = ?" + ) + .unwrap()(id) + .unwrap() + ); + + db.write(move |conn| { + conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + .unwrap()(("test-text-1", id)) + .unwrap() + }) + .await; let test_text_1 = db .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") @@ -418,22 +440,27 @@ mod tests { .unwrap(); assert_eq!(test_text_1, "test-text-1"); } - - #[test] - fn test_workspace_id_stability() { - env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db(Some("test_workspace_id_stability"))); + #[gpui::test] + async fn test_workspace_id_stability() { + env_logger::try_init().ok(); - db.migrate( - "test_table", - &["CREATE TABLE test_table( - text TEXT, - workspace_id INTEGER, - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ) STRICT;"], - ) + let db = WorkspaceDb(open_memory_db("test_workspace_id_stability")); + + db.write(|conn| { + conn.migrate( + "test_table", + &[indoc! {" + CREATE TABLE test_table( + text TEXT, + workspace_id INTEGER, + FOREIGN KEY(workspace_id) + REFERENCES workspaces(workspace_id) + ON DELETE CASCADE + ) STRICT;"}], + ) + }) + .await .unwrap(); let mut workspace_1 = SerializedWorkspace { @@ -452,27 +479,33 @@ mod tests { dock_pane: Default::default(), }; - db.save_workspace(&workspace_1); + db.save_workspace(workspace_1.clone()).await; - db.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") - .unwrap()(("test-text-1", 1)) - .unwrap(); + db.write(|conn| { + conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + .unwrap()(("test-text-1", 1)) + .unwrap(); + }) + .await; - db.save_workspace(&workspace_2); + db.save_workspace(workspace_2.clone()).await; - db.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") - .unwrap()(("test-text-2", 2)) - .unwrap(); + db.write(|conn| { + conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + .unwrap()(("test-text-2", 2)) + .unwrap(); + }) + .await; workspace_1.location = (["/tmp", "/tmp3"]).into(); - db.save_workspace(&workspace_1); - db.save_workspace(&workspace_1); + db.save_workspace(workspace_1.clone()).await; + db.save_workspace(workspace_1).await; workspace_2.dock_pane.children.push(SerializedItem { kind: Arc::from("Test"), item_id: 10, }); - db.save_workspace(&workspace_2); + db.save_workspace(workspace_2).await; let test_text_2 = db .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") @@ -489,11 +522,11 @@ mod tests { assert_eq!(test_text_1, "test-text-1"); } - #[test] - fn test_full_workspace_serialization() { + #[gpui::test] + async fn test_full_workspace_serialization() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db(Some("test_full_workspace_serialization"))); + let db = WorkspaceDb(open_memory_db("test_full_workspace_serialization")); let dock_pane = crate::persistence::model::SerializedPane { children: vec![ @@ -550,24 +583,24 @@ mod tests { dock_pane, }; - db.save_workspace(&workspace); + db.save_workspace(workspace.clone()).await; let round_trip_workspace = db.workspace_for_roots(&["/tmp2", "/tmp"]); assert_eq!(workspace, round_trip_workspace.unwrap()); // Test guaranteed duplicate IDs - db.save_workspace(&workspace); - db.save_workspace(&workspace); + db.save_workspace(workspace.clone()).await; + db.save_workspace(workspace.clone()).await; let round_trip_workspace = db.workspace_for_roots(&["/tmp", "/tmp2"]); assert_eq!(workspace, round_trip_workspace.unwrap()); } - #[test] - fn test_workspace_assignment() { + #[gpui::test] + async fn test_workspace_assignment() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db(Some("test_basic_functionality"))); + let db = WorkspaceDb(open_memory_db("test_basic_functionality")); let workspace_1 = SerializedWorkspace { id: 1, @@ -585,8 +618,8 @@ mod tests { dock_pane: Default::default(), }; - db.save_workspace(&workspace_1); - db.save_workspace(&workspace_2); + db.save_workspace(workspace_1.clone()).await; + db.save_workspace(workspace_2.clone()).await; // Test that paths are treated as a set assert_eq!( @@ -605,7 +638,7 @@ mod tests { // Test 'mutate' case of updating a pre-existing id workspace_2.location = (["/tmp", "/tmp2"]).into(); - db.save_workspace(&workspace_2); + db.save_workspace(workspace_2.clone()).await; assert_eq!( db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), workspace_2 @@ -620,7 +653,7 @@ mod tests { dock_pane: Default::default(), }; - db.save_workspace(&workspace_3); + db.save_workspace(workspace_3.clone()).await; assert_eq!( db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(), workspace_3 @@ -628,7 +661,7 @@ mod tests { // Make sure that updating paths differently also works workspace_3.location = (["/tmp3", "/tmp4", "/tmp2"]).into(); - db.save_workspace(&workspace_3); + db.save_workspace(workspace_3.clone()).await; assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None); assert_eq!( db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"]) @@ -655,11 +688,11 @@ mod tests { } } - #[test] - fn test_basic_dock_pane() { + #[gpui::test] + async fn test_basic_dock_pane() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db(Some("basic_dock_pane"))); + let db = WorkspaceDb(open_memory_db("basic_dock_pane")); let dock_pane = crate::persistence::model::SerializedPane::new( vec![ @@ -673,18 +706,18 @@ mod tests { let workspace = default_workspace(&["/tmp"], dock_pane, &Default::default()); - db.save_workspace(&workspace); + db.save_workspace(workspace.clone()).await; let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); assert_eq!(workspace.dock_pane, new_workspace.dock_pane); } - #[test] - fn test_simple_split() { + #[gpui::test] + async fn test_simple_split() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db(Some("simple_split"))); + let db = WorkspaceDb(open_memory_db("simple_split")); // ----------------- // | 1,2 | 5,6 | @@ -725,18 +758,18 @@ mod tests { let workspace = default_workspace(&["/tmp"], Default::default(), ¢er_pane); - db.save_workspace(&workspace); + db.save_workspace(workspace.clone()).await; let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap(); assert_eq!(workspace.center_group, new_workspace.center_group); } - #[test] - fn test_cleanup_panes() { + #[gpui::test] + async fn test_cleanup_panes() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db(Some("test_cleanup_panes"))); + let db = WorkspaceDb(open_memory_db("test_cleanup_panes")); let center_pane = SerializedPaneGroup::Group { axis: gpui::Axis::Horizontal, @@ -774,7 +807,7 @@ mod tests { let mut workspace = default_workspace(id, Default::default(), ¢er_pane); - db.save_workspace(&workspace); + db.save_workspace(workspace.clone()).await; workspace.center_group = SerializedPaneGroup::Group { axis: gpui::Axis::Vertical, @@ -796,7 +829,7 @@ mod tests { ], }; - db.save_workspace(&workspace); + db.save_workspace(workspace.clone()).await; let new_workspace = db.workspace_for_roots(id).unwrap(); diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index 2f0bc050d21ee025d8cb06f57c08c3cc31ef2f87..dc6d8ba8ee5b70bdd62adb3013207c0d45aacea7 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -58,7 +58,7 @@ impl Column for WorkspaceLocation { } } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct SerializedWorkspace { pub id: WorkspaceId, pub location: WorkspaceLocation, diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 155c95e4e823d95caa8099bd94b480364e3144bd..9755c2c6caa3811ed4dc9db8c97a961d450a2cda 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -539,6 +539,7 @@ pub struct Workspace { impl Workspace { pub fn new( serialized_workspace: Option, + workspace_id: WorkspaceId, project: ModelHandle, dock_default_factory: DefaultItemFactory, cx: &mut ViewContext, @@ -558,7 +559,6 @@ impl Workspace { } project::Event::WorktreeRemoved(_) | project::Event::WorktreeAdded => { this.update_window_title(cx); - // TODO: Cache workspace_id on workspace and read from it here this.serialize_workspace(cx); } project::Event::DisconnectedFromHost => { @@ -633,12 +633,6 @@ impl Workspace { active_call = Some((call, subscriptions)); } - let database_id = serialized_workspace - .as_ref() - .map(|ws| ws.id) - .or_else(|| DB.next_id().log_err()) - .unwrap_or(0); - let mut this = Workspace { modal: None, weak_self: weak_handle.clone(), @@ -666,7 +660,7 @@ impl Workspace { last_leaders_by_pane: Default::default(), window_edited: false, active_call, - database_id, + database_id: workspace_id, _observe_current_user, }; this.project_remote_id_changed(project.read(cx).remote_id(), cx); @@ -699,10 +693,17 @@ impl Workspace { ); cx.spawn(|mut cx| async move { + let serialized_workspace = persistence::DB.workspace_for_roots(&abs_paths.as_slice()); + + let paths_to_open = serialized_workspace + .as_ref() + .map(|workspace| workspace.location.paths()) + .unwrap_or(Arc::new(abs_paths)); + // Get project paths for all of the abs_paths let mut worktree_roots: HashSet> = Default::default(); let mut project_paths = Vec::new(); - for path in abs_paths.iter() { + for path in paths_to_open.iter() { if let Some((worktree, project_entry)) = cx .update(|cx| { Workspace::project_path_for_path(project_handle.clone(), &path, true, cx) @@ -717,14 +718,17 @@ impl Workspace { } } - // Use the resolved worktree roots to get the serialized_db from the database - let serialized_workspace = persistence::DB - .workspace_for_roots(&Vec::from_iter(worktree_roots.into_iter())[..]); + let workspace_id = if let Some(serialized_workspace) = serialized_workspace.as_ref() { + serialized_workspace.id + } else { + DB.next_id().await.unwrap_or(0) + }; // Use the serialized workspace to construct the new window let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| { let mut workspace = Workspace::new( serialized_workspace, + workspace_id, project_handle, app_state.default_item_factory, cx, @@ -735,8 +739,8 @@ impl Workspace { // Call open path for each of the project paths // (this will bring them to the front if they were in the serialized workspace) - debug_assert!(abs_paths.len() == project_paths.len()); - let tasks = abs_paths + debug_assert!(paths_to_open.len() == project_paths.len()); + let tasks = paths_to_open .iter() .cloned() .zip(project_paths.into_iter()) @@ -1327,7 +1331,6 @@ impl Workspace { pub fn add_item(&mut self, item: Box, cx: &mut ViewContext) { let active_pane = self.active_pane().clone(); Pane::add_item(self, &active_pane, item, true, true, None, cx); - self.serialize_workspace(cx); } pub fn open_path( @@ -1532,10 +1535,11 @@ impl Workspace { entry.remove(); } } - self.serialize_workspace(cx); } _ => {} } + + self.serialize_workspace(cx); } else if self.dock.visible_pane().is_none() { error!("pane {} not found", pane_id); } @@ -2342,9 +2346,7 @@ impl Workspace { }; cx.background() - .spawn(async move { - persistence::DB.save_workspace(&serialized_workspace); - }) + .spawn(persistence::DB.save_workspace(serialized_workspace)) .detach(); } @@ -2642,9 +2644,13 @@ pub fn open_paths( fn open_new(app_state: &Arc, cx: &mut MutableAppContext) -> Task<()> { let task = Workspace::new_local(Vec::new(), app_state.clone(), cx); cx.spawn(|mut cx| async move { - let (workspace, _) = task.await; + let (workspace, opened_paths) = task.await; - workspace.update(&mut cx, |_, cx| cx.dispatch_action(NewFile)) + workspace.update(&mut cx, |_, cx| { + if opened_paths.is_empty() { + cx.dispatch_action(NewFile); + } + }) }) } @@ -2677,6 +2683,7 @@ mod tests { let (_, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), default_item_factory, cx, @@ -2748,6 +2755,7 @@ mod tests { let (window_id, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), default_item_factory, cx, @@ -2851,6 +2859,7 @@ mod tests { let (window_id, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), default_item_factory, cx, @@ -2895,8 +2904,9 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, None, cx).await; - let (window_id, workspace) = cx - .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), 0, project, default_item_factory, cx) + }); let item1 = cx.add_view(&workspace, |_| { let mut item = TestItem::new(); @@ -2991,8 +3001,9 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, [], cx).await; - let (window_id, workspace) = cx - .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), 0, project, default_item_factory, cx) + }); // Create several workspace items with single project entries, and two // workspace items with multiple project entries. @@ -3093,8 +3104,9 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, [], cx).await; - let (window_id, workspace) = cx - .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); + let (window_id, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), 0, project, default_item_factory, cx) + }); let item = cx.add_view(&workspace, |_| { let mut item = TestItem::new(); @@ -3211,8 +3223,9 @@ mod tests { let fs = FakeFs::new(cx.background()); let project = Project::test(fs, [], cx).await; - let (_, workspace) = cx - .add_window(|cx| Workspace::new(Default::default(), project, default_item_factory, cx)); + let (_, workspace) = cx.add_window(|cx| { + Workspace::new(Default::default(), 0, project, default_item_factory, cx) + }); let item = cx.add_view(&workspace, |_| { let mut item = TestItem::new(); diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 0abcbeac485d5fcaef5c452f6847b31835b0f6ff..3693a5e580398dc380a03a79c7b70d27f4c34b64 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -809,7 +809,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let entries = cx.read(|cx| workspace.file_project_paths(cx)); @@ -930,7 +930,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/dir1".as_ref()], cx).await; let (_, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); // Open a file within an existing worktree. @@ -1091,7 +1091,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; let (window_id, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); // Open a file within an existing worktree. @@ -1135,7 +1135,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; project.update(cx, |project, _| project.languages().add(rust_lang())); let (window_id, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let worktree = cx.read(|cx| workspace.read(cx).worktrees(cx).next().unwrap()); @@ -1226,7 +1226,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), [], cx).await; project.update(cx, |project, _| project.languages().add(rust_lang())); let (window_id, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); // Create a new untitled buffer @@ -1281,7 +1281,7 @@ mod tests { let project = Project::test(app_state.fs.clone(), ["/root".as_ref()], cx).await; let (window_id, workspace) = cx.add_window(|cx| { - Workspace::new(Default::default(), project, |_, _| unimplemented!(), cx) + Workspace::new(Default::default(), 0, project, |_, _| unimplemented!(), cx) }); let entries = cx.read(|cx| workspace.file_project_paths(cx)); @@ -1359,6 +1359,7 @@ mod tests { let (_, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, @@ -1630,6 +1631,7 @@ mod tests { let (_, workspace) = cx.add_window(|cx| { Workspace::new( Default::default(), + 0, project.clone(), |_, _| unimplemented!(), cx, From 359b8aaf47573473cce3334d7af48d01eac972df Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Thu, 24 Nov 2022 00:02:07 -0800 Subject: [PATCH 68/86] rename sql_method to query and adjust the syntax to more closely match function definitions --- crates/auto_update/src/auto_update.rs | 11 +- crates/client/src/telemetry.rs | 4 +- crates/db/src/db.rs | 152 +++++++++++++++++--------- crates/db/src/kvp.rs | 76 +++++++------ crates/editor/src/persistence.rs | 24 ++-- crates/terminal/src/persistence.rs | 42 +++---- crates/workspace/src/persistence.rs | 7 +- 7 files changed, 192 insertions(+), 124 deletions(-) diff --git a/crates/auto_update/src/auto_update.rs b/crates/auto_update/src/auto_update.rs index 2a8d2fcf05bb650f31581baa384d5f6e9860941a..d3fcc36c2fd43eac6c7c32d5c8cf38fc509b99eb 100644 --- a/crates/auto_update/src/auto_update.rs +++ b/crates/auto_update/src/auto_update.rs @@ -297,9 +297,16 @@ impl AutoUpdater { ) -> Task> { cx.background().spawn(async move { if should_show { - KEY_VALUE_STORE.write_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY, "")?; + KEY_VALUE_STORE + .write_kvp( + SHOULD_SHOW_UPDATE_NOTIFICATION_KEY.to_string(), + "".to_string(), + ) + .await?; } else { - KEY_VALUE_STORE.delete_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?; + KEY_VALUE_STORE + .delete_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY.to_string()) + .await?; } Ok(()) }) diff --git a/crates/client/src/telemetry.rs b/crates/client/src/telemetry.rs index 0ce1a07f1b2fd231315c18a999670391405c1fad..a81f33c604100afc6adceb13aa28a234cc1c1d0e 100644 --- a/crates/client/src/telemetry.rs +++ b/crates/client/src/telemetry.rs @@ -157,7 +157,9 @@ impl Telemetry { device_id } else { let device_id = Uuid::new_v4().to_string(); - KEY_VALUE_STORE.write_kvp("device_id", &device_id)?; + KEY_VALUE_STORE + .write_kvp("device_id".to_string(), device_id.clone()) + .await?; device_id }; diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index b42b264b562941490420335d603f020448e79322..1da51ef867bd9d995840383c7b10556b90742eec 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -75,45 +75,59 @@ macro_rules! connection { } #[macro_export] -macro_rules! sql_method { - ($id:ident() -> Result<()>: $sql:expr) => { - pub fn $id(&self) -> $crate::anyhow::Result<()> { +macro_rules! query { + ($vis:vis fn $id:ident() -> Result<()> { $sql:expr }) => { + $vis fn $id(&self) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; self.exec($sql)?().context(::std::format!( "Error in {}, exec failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident() -> Result<()>: $sql:expr) => { - pub async fn $id(&self) -> $crate::anyhow::Result<()> { + ($vis:vis async fn $id:ident() -> Result<()> { $sql:expr }) => { + $vis async fn $id(&self) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; self.write(|connection| { connection.exec($sql)?().context(::std::format!( "Error in {}, exec failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<()>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $sql:expr }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()>: $sql:expr) => { - pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { + ($vis:vis async fn $id:ident($arg:ident: $arg_type:ty) -> Result<()> { $sql:expr }) => { + $vis async fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.write(move |connection| { + connection.exec_bound::<$arg_type>($sql)?($arg) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + $sql, + )) + }).await + } + }; + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $sql:expr }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; self.write(move |connection| { @@ -121,24 +135,24 @@ macro_rules! sql_method { .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } }; - ($id:ident() -> Result>: $sql:expr) => { - pub fn $id(&self) -> $crate::anyhow::Result> { + ($vis:vis fn $id:ident() -> Result> { $sql:expr }) => { + $vis fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select::<$return_type>($sql)?(()) .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident() -> Result>: $sql:expr) => { + ($vis:vis async fn $id:ident() -> Result> { $sql:expr }) => { pub async fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; @@ -147,25 +161,25 @@ macro_rules! sql_method { .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { - pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.write(|connection| { @@ -173,25 +187,25 @@ macro_rules! sql_method { .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } }; - ($id:ident() -> Result>: $sql:expr) => { - pub fn $id(&self) -> $crate::anyhow::Result> { + ($vis:vis fn $id:ident() -> Result> { $sql:expr }) => { + $vis fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select_row::<$return_type>($sql)?() .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident() -> Result>: $sql:expr) => { - pub async fn $id(&self) -> $crate::anyhow::Result> { + ($vis:vis async fn $id:ident() -> Result> { $sql:expr }) => { + $vis async fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.write(|connection| { @@ -199,57 +213,70 @@ macro_rules! sql_method { .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result> { $sql:expr }) => { + $vis fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.select_row_bound::<$arg_type, $return_type>($sql)?($arg) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + $sql, + )) + + } + }; + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result>: $sql:expr) => { - pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.write(|connection| { - connection.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + connection.select_row_bound::<($($arg_type),+), $return_type>(indoc! { $sql })?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } }; - ($id:ident() -> Result<$return_type:ty>: $sql:expr) => { - pub fn $id(&self) -> $crate::anyhow::Result<$return_type> { + ($vis:vis fn $id:ident() -> Result<$return_type:ty> { $sql:expr }) => { + $vis fn $id(&self) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; - self.select_row::<$return_type>($sql)?() + self.select_row::<$return_type>(indoc! { $sql })?() .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident() -> Result<$return_type:ty>: $sql:expr) => { - pub async fn $id(&self) -> $crate::anyhow::Result<$return_type> { + ($vis:vis async fn $id:ident() -> Result<$return_type:ty> { $sql:expr }) => { + $vis async fn $id(&self) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; self.write(|connection| { @@ -257,35 +284,52 @@ macro_rules! sql_method { .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } }; - ($id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty>: $sql:expr) => { - pub fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { + ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result<$return_type:ty> { $sql:expr }) => { + pub fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + self.select_row_bound::<$arg_type, $return_type>($sql)?($arg) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + $sql, + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + $sql, + )) + } + }; + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $sql:expr }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) } }; - (async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty>: $sql:expr) => { - pub async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { + ($vis:vis fn async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $sql:expr }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; self.write(|connection| { @@ -293,12 +337,12 @@ macro_rules! sql_method { .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - ::std::stringify!($sql), + $sql, )) }).await } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index dd82c17615de4a65dcfe7936937ac523835b5030..1763ed964cfc03ceb61a65804f08cdfad155f2d4 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,17 +1,27 @@ -use anyhow::Result; use indoc::indoc; use sqlez::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; -use std::ops::Deref; -lazy_static::lazy_static! { - pub static ref KEY_VALUE_STORE: KeyValueStore = - KeyValueStore(crate::open_file_db()); -} +use crate::{open_file_db, open_memory_db, query}; -#[derive(Clone)] pub struct KeyValueStore(ThreadSafeConnection); +impl std::ops::Deref for KeyValueStore { + type Target = ThreadSafeConnection; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +lazy_static::lazy_static! { + pub static ref KEY_VALUE_STORE: KeyValueStore = KeyValueStore(if cfg!(any(test, feature = "test-support")) { + open_memory_db(stringify!($id)) + } else { + open_file_db() + }); +} + impl Domain for KeyValueStore { fn name() -> &'static str { "kvp" @@ -27,56 +37,52 @@ impl Domain for KeyValueStore { } } -impl Deref for KeyValueStore { - type Target = ThreadSafeConnection; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - impl KeyValueStore { - pub fn read_kvp(&self, key: &str) -> Result> { - self.select_row_bound("SELECT value FROM kv_store WHERE key = (?)")?(key) + query! { + pub fn read_kvp(key: &str) -> Result> { + "SELECT value FROM kv_store WHERE key = (?)" + } } - pub fn write_kvp(&self, key: &str, value: &str) -> Result<()> { - self.exec_bound("INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))")?(( - key, value, - ))?; - - Ok(()) + query! { + pub async fn write_kvp(key: String, value: String) -> Result<()> { + "INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))" + } } - pub fn delete_kvp(&self, key: &str) -> Result<()> { - self.exec_bound("DELETE FROM kv_store WHERE key = (?)")?(key) + query! { + pub async fn delete_kvp(key: String) -> Result<()> { + "DELETE FROM kv_store WHERE key = (?)" + } } } #[cfg(test)] mod tests { - use anyhow::Result; - use crate::kvp::KeyValueStore; - #[test] - fn test_kvp() -> Result<()> { + #[gpui::test] + async fn test_kvp() { let db = KeyValueStore(crate::open_memory_db("test_kvp")); assert_eq!(db.read_kvp("key-1").unwrap(), None); - db.write_kvp("key-1", "one").unwrap(); + db.write_kvp("key-1".to_string(), "one".to_string()) + .await + .unwrap(); assert_eq!(db.read_kvp("key-1").unwrap(), Some("one".to_string())); - db.write_kvp("key-1", "one-2").unwrap(); + db.write_kvp("key-1".to_string(), "one-2".to_string()) + .await + .unwrap(); assert_eq!(db.read_kvp("key-1").unwrap(), Some("one-2".to_string())); - db.write_kvp("key-2", "two").unwrap(); + db.write_kvp("key-2".to_string(), "two".to_string()) + .await + .unwrap(); assert_eq!(db.read_kvp("key-2").unwrap(), Some("two".to_string())); - db.delete_kvp("key-1").unwrap(); + db.delete_kvp("key-1".to_string()).await.unwrap(); assert_eq!(db.read_kvp("key-1").unwrap(), None); - - Ok(()) } } diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index b2f76294aa355236fc32bf8a5ae7ee3526ae88bd..548be88c8036208d5eaa4792ad70ba4c1de8a1a4 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use db::{connection, sql_method}; +use db::{connection, query}; use indoc::indoc; use sqlez::domain::Domain; use workspace::{ItemId, Workspace, WorkspaceId}; @@ -31,17 +31,21 @@ impl Domain for Editor { } impl EditorDb { - sql_method! { - get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result: - indoc! {" - SELECT path FROM editors - WHERE item_id = ? AND workspace_id = ?"} + query! { + pub fn get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result { + indoc!{" + SELECT path FROM editors + WHERE item_id = ? AND workspace_id = ? + "} + } } - sql_method! { - async save_path(item_id: ItemId, workspace_id: WorkspaceId, path: PathBuf) -> Result<()>: - indoc! {" + query! { + pub async fn save_path(item_id: ItemId, workspace_id: WorkspaceId, path: PathBuf) -> Result<()> { + indoc!{" INSERT OR REPLACE INTO editors(item_id, workspace_id, path) - VALUES (?, ?, ?)"} + VALUES (?, ?, ?) + "} + } } } diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index 1e9b846f389c95e376c1941243c3834b4b54ef50..5fb7758bec2f4a960ebed74d1bb31dc7d14763ae 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use db::{connection, indoc, sql_method, sqlez::domain::Domain}; +use db::{connection, indoc, query, sqlez::domain::Domain}; use workspace::{ItemId, Workspace, WorkspaceId}; @@ -28,36 +28,40 @@ impl Domain for Terminal { } impl TerminalDb { - sql_method! { - async update_workspace_id( + query! { + pub async fn update_workspace_id( new_id: WorkspaceId, old_id: WorkspaceId, item_id: ItemId - ) -> Result<()>: - indoc! {" - UPDATE terminals - SET workspace_id = ? - WHERE workspace_id = ? AND item_id = ? - "} + ) -> Result<()> { + indoc!{" + UPDATE terminals + SET workspace_id = ? + WHERE workspace_id = ? AND item_id = ? + "} + } } - sql_method! { - async save_working_directory( + query! { + pub async fn save_working_directory( item_id: ItemId, workspace_id: WorkspaceId, - working_directory: PathBuf) -> Result<()>: - indoc!{" - INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) - VALUES (?1, ?2, ?3) - "} + working_directory: PathBuf + ) -> Result<()> { + indoc!{" + INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) + VALUES (?1, ?2, ?3) + "} + } } - sql_method! { - get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> Result>: + query! { + pub fn get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> Result> { indoc!{" SELECT working_directory - FROM terminals + FROM terminals WHERE item_id = ? AND workspace_id = ? "} + } } } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 66b36221193d13fd5f05b18b94286affc4c87871..17b0aad13f77611bab5956d7b9a7f716d4f852cf 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -5,7 +5,7 @@ pub mod model; use std::path::Path; use anyhow::{anyhow, bail, Context, Result}; -use db::{connection, sql_method, sqlez::connection::Connection}; +use db::{connection, query, sqlez::connection::Connection}; use gpui::Axis; use indoc::indoc; @@ -201,9 +201,10 @@ impl WorkspaceDb { .await; } - sql_method! { - async next_id() -> Result: + query! { + pub async fn next_id() -> Result { "INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id" + } } /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots From 260164a711a0e36dbf3962a7d2e94c5542fa6df6 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Mon, 28 Nov 2022 12:26:13 -0800 Subject: [PATCH 69/86] Added basic syntax checker to sqlez --- crates/sqlez/src/statement.rs | 73 +++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index 86035f5d0acd322a0edb31c534fa14b0fbc9257d..f3970827f852515a62ce9589050da90bc52129b7 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -489,3 +489,76 @@ mod test { ); } } + +mod syntax_check { + use std::{ + ffi::{CStr, CString}, + ptr, + }; + + use libsqlite3_sys::{ + sqlite3_close, sqlite3_errmsg, sqlite3_error_offset, sqlite3_extended_errcode, + sqlite3_extended_result_codes, sqlite3_finalize, sqlite3_open_v2, sqlite3_prepare_v2, + sqlite3_stmt, SQLITE_OPEN_CREATE, SQLITE_OPEN_NOMUTEX, SQLITE_OPEN_READWRITE, + }; + + fn syntax_errors(sql: &str) -> Option<(String, i32)> { + let mut sqlite3 = 0 as *mut _; + let mut raw_statement = 0 as *mut sqlite3_stmt; + + let flags = SQLITE_OPEN_CREATE | SQLITE_OPEN_NOMUTEX | SQLITE_OPEN_READWRITE; + unsafe { + let memory_str = CString::new(":memory:").unwrap(); + sqlite3_open_v2(memory_str.as_ptr(), &mut sqlite3, flags, 0 as *const _); + + let sql = CString::new(sql).unwrap(); + + // Turn on extended error codes + sqlite3_extended_result_codes(sqlite3, 1); + + sqlite3_prepare_v2( + sqlite3, + sql.as_c_str().as_ptr(), + -1, + &mut raw_statement, + &mut ptr::null(), + ); + + let res = sqlite3_extended_errcode(sqlite3); + let offset = sqlite3_error_offset(sqlite3); + + if res == 1 && offset != -1 { + let message = sqlite3_errmsg(sqlite3); + let err_msg = + String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) + .into_owned(); + + sqlite3_finalize(*&mut raw_statement); + sqlite3_close(sqlite3); + + return Some((err_msg, offset)); + } else { + sqlite3_finalize(*&mut raw_statement); + sqlite3_close(sqlite3); + + None + } + } + } + + #[cfg(test)] + mod test { + use super::syntax_errors; + + #[test] + fn test_check_syntax() { + assert!(syntax_errors("SELECT FROM").is_some()); + + assert!(syntax_errors("SELECT col FROM table_t;").is_none()); + + assert!(syntax_errors("CREATE TABLE t(col TEXT,) STRICT;").is_some()); + + assert!(syntax_errors("CREATE TABLE t(col TEXT) STRICT;").is_none()); + } + } +} From dd9d20be25094aeff536f6149eea03566618e6eb Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Mon, 28 Nov 2022 17:42:18 -0800 Subject: [PATCH 70/86] Added sql! proc macro which checks syntax errors on sql code and displays them with reasonable underline locations Co-Authored-By: Mikayla Maki --- Cargo.lock | 12 ++ Cargo.toml | 2 + crates/db/Cargo.toml | 1 + crates/db/src/db.rs | 163 +++++++++++++-------- crates/db/src/kvp.rs | 21 ++- crates/editor/src/persistence.rs | 21 +-- crates/gpui_macros/Cargo.toml | 1 + crates/sqlez/src/connection.rs | 63 +++++++- crates/sqlez/src/domain.rs | 6 + crates/sqlez/src/statement.rs | 73 --------- crates/sqlez/src/thread_safe_connection.rs | 2 +- crates/sqlez_macros/Cargo.toml | 16 ++ crates/sqlez_macros/src/sqlez_macros.rs | 78 ++++++++++ crates/terminal/src/persistence.rs | 28 ++-- crates/workspace/src/persistence.rs | 66 ++++----- 15 files changed, 342 insertions(+), 211 deletions(-) create mode 100644 crates/sqlez_macros/Cargo.toml create mode 100644 crates/sqlez_macros/src/sqlez_macros.rs diff --git a/Cargo.lock b/Cargo.lock index 150149c529b8396fe40a90e13491e28032440914..9e3181575f4182ca844bcd5daa11981979921240 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1570,6 +1570,7 @@ dependencies = [ "parking_lot 0.11.2", "serde", "sqlez", + "sqlez_macros", "tempdir", "util", ] @@ -5598,6 +5599,17 @@ dependencies = [ "thread_local", ] +[[package]] +name = "sqlez_macros" +version = "0.1.0" +dependencies = [ + "lazy_static", + "proc-macro2", + "quote", + "sqlez", + "syn", +] + [[package]] name = "sqlformat" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index a97f272e47ebd329b674544d0a27d639c94339de..c4f54d6a90de8d65be9ed32c5979a40270707aac 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,6 +45,8 @@ members = [ "crates/search", "crates/settings", "crates/snippet", + "crates/sqlez", + "crates/sqlez_macros", "crates/sum_tree", "crates/terminal", "crates/text", diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 70721c310c75a0d81b7b20086d80b4b521b61005..2d88d4ece5a08c18817368daa9f3ac580ba52c3a 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -14,6 +14,7 @@ test-support = [] collections = { path = "../collections" } gpui = { path = "../gpui" } sqlez = { path = "../sqlez" } +sqlez_macros = { path = "../sqlez_macros" } util = { path = "../util" } anyhow = "1.0.57" indoc = "1.0.4" diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 1da51ef867bd9d995840383c7b10556b90742eec..adf6f5c035f7d904b83a1787e7ea6e07e8c24401 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -5,6 +5,7 @@ pub use anyhow; pub use indoc::indoc; pub use lazy_static; pub use sqlez; +pub use sqlez_macros; use sqlez::domain::Migrator; use sqlez::thread_safe_connection::ThreadSafeConnection; @@ -76,273 +77,315 @@ macro_rules! connection { #[macro_export] macro_rules! query { - ($vis:vis fn $id:ident() -> Result<()> { $sql:expr }) => { + ($vis:vis fn $id:ident() -> Result<()> { $($sql:tt)+ }) => { $vis fn $id(&self) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; - self.exec($sql)?().context(::std::format!( + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.exec(sql_stmt)?().context(::std::format!( "Error in {}, exec failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt, )) } }; - ($vis:vis async fn $id:ident() -> Result<()> { $sql:expr }) => { + ($vis:vis async fn $id:ident() -> Result<()> { $($sql:tt)+ }) => { $vis async fn $id(&self) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; + self.write(|connection| { - connection.exec($sql)?().context(::std::format!( + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.exec(sql_stmt)?().context(::std::format!( "Error in {}, exec failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $sql:expr }) => { + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $($sql:tt)+ }) => { $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; - self.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.exec_bound::<($($arg_type),+)>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis async fn $id:ident($arg:ident: $arg_type:ty) -> Result<()> { $sql:expr }) => { + ($vis:vis async fn $id:ident($arg:ident: $arg_type:ty) -> Result<()> { $($sql:tt)+ }) => { $vis async fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; + self.write(move |connection| { - connection.exec_bound::<$arg_type>($sql)?($arg) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.exec_bound::<$arg_type>(sql_stmt)?($arg) .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $sql:expr }) => { + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $($sql:tt)+ }) => { $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; self.write(move |connection| { - connection.exec_bound::<($($arg_type),+)>($sql)?(($($arg),+)) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.exec_bound::<($($arg_type),+)>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis fn $id:ident() -> Result> { $sql:expr }) => { + ($vis:vis fn $id:ident() -> Result> { $($sql:tt)+ }) => { $vis fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; - self.select::<$return_type>($sql)?(()) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select::<$return_type>(sql_stmt)?(()) .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis async fn $id:ident() -> Result> { $sql:expr }) => { + ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { pub async fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.write(|connection| { - connection.select::<$return_type>($sql)?(()) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select::<$return_type>(sql_stmt)?(()) .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; - self.select_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.write(|connection| { - connection.select_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, exec_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis fn $id:ident() -> Result> { $sql:expr }) => { + ($vis:vis fn $id:ident() -> Result> { $($sql:tt)+ }) => { $vis fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; - self.select_row::<$return_type>($sql)?() + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row::<$return_type>(sql_stmt)?() .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis async fn $id:ident() -> Result> { $sql:expr }) => { + ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { $vis async fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; self.write(|connection| { - connection.select_row::<$return_type>($sql)?() + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_row::<$return_type>(sql_stmt)?() .context(::std::format!( "Error in {}, select_row failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result> { $sql:expr }) => { + ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result> { $($sql:tt)+ }) => { $vis fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result> { use $crate::anyhow::Context; - self.select_row_bound::<$arg_type, $return_type>($sql)?($arg) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<$arg_type, $return_type>(sql_stmt)?($arg) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; - self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $sql:expr }) => { + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { use $crate::anyhow::Context; + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + connection.select_row_bound::<($($arg_type),+), $return_type>(indoc! { $sql })?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis fn $id:ident() -> Result<$return_type:ty> { $sql:expr }) => { + ($vis:vis fn $id:ident() -> Result<$return_type:ty> { $($sql:tt)+ }) => { $vis fn $id(&self) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + self.select_row::<$return_type>(indoc! { $sql })?() .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis async fn $id:ident() -> Result<$return_type:ty> { $sql:expr }) => { + ($vis:vis async fn $id:ident() -> Result<$return_type:ty> { $($sql:tt)+ }) => { $vis async fn $id(&self) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; self.write(|connection| { - connection.select_row::<$return_type>($sql)?() + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_row::<$return_type>(sql_stmt)?() .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } }; - ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result<$return_type:ty> { $sql:expr }) => { + ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result<$return_type:ty> { $($sql:tt)+ }) => { pub fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; - self.select_row_bound::<$arg_type, $return_type>($sql)?($arg) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<$arg_type, $return_type>(sql_stmt)?($arg) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $sql:expr }) => { + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $($sql:tt)+ }) => { $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; - self.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) } }; - ($vis:vis fn async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $sql:expr }) => { + ($vis:vis fn async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $($sql:tt)+ }) => { $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { use $crate::anyhow::Context; + self.write(|connection| { - connection.select_row_bound::<($($arg_type),+), $return_type>($sql)?(($($arg),+)) + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) .context(::std::format!( "Error in {}, select_row_bound failed to execute or parse for: {}", ::std::stringify!($id), - $sql, + sql_stmt ))? .context(::std::format!( "Error in {}, select_row_bound expected single row result but found none for: {}", ::std::stringify!($id), - $sql, + sql_stmt )) }).await } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 1763ed964cfc03ceb61a65804f08cdfad155f2d4..b3f2a716cbc3698996d8b7eaed6689c3736f8c51 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,6 +1,5 @@ -use indoc::indoc; - use sqlez::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; +use sqlez_macros::sql; use crate::{open_file_db, open_memory_db, query}; @@ -28,31 +27,31 @@ impl Domain for KeyValueStore { } fn migrations() -> &'static [&'static str] { - &[indoc! {" - CREATE TABLE kv_store( - key TEXT PRIMARY KEY, - value TEXT NOT NULL - ) STRICT; - "}] + &[sql!( + CREATE TABLE kv_store( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ) STRICT; + )] } } impl KeyValueStore { query! { pub fn read_kvp(key: &str) -> Result> { - "SELECT value FROM kv_store WHERE key = (?)" + SELECT value FROM kv_store WHERE key = (?) } } query! { pub async fn write_kvp(key: String, value: String) -> Result<()> { - "INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))" + INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?)) } } query! { pub async fn delete_kvp(key: String) -> Result<()> { - "DELETE FROM kv_store WHERE key = (?)" + DELETE FROM kv_store WHERE key = (?) } } } diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 548be88c8036208d5eaa4792ad70ba4c1de8a1a4..22b0f158c12bf9ff7408a141479553ea485724fc 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,12 +1,11 @@ use std::path::PathBuf; +use crate::Editor; +use db::sqlez_macros::sql; use db::{connection, query}; -use indoc::indoc; use sqlez::domain::Domain; use workspace::{ItemId, Workspace, WorkspaceId}; -use crate::Editor; - connection!(DB: EditorDb<(Workspace, Editor)>); impl Domain for Editor { @@ -15,7 +14,7 @@ impl Domain for Editor { } fn migrations() -> &'static [&'static str] { - &[indoc! {" + &[sql! ( CREATE TABLE editors( item_id INTEGER NOT NULL, workspace_id INTEGER NOT NULL, @@ -26,26 +25,22 @@ impl Domain for Editor { ON UPDATE CASCADE ) STRICT; - "}] + )] } } impl EditorDb { query! { pub fn get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result { - indoc!{" - SELECT path FROM editors - WHERE item_id = ? AND workspace_id = ? - "} + SELECT path FROM editors + WHERE item_id = ? AND workspace_id = ? } } query! { pub async fn save_path(item_id: ItemId, workspace_id: WorkspaceId, path: PathBuf) -> Result<()> { - indoc!{" - INSERT OR REPLACE INTO editors(item_id, workspace_id, path) - VALUES (?, ?, ?) - "} + INSERT OR REPLACE INTO editors(item_id, workspace_id, path) + VALUES (?, ?, ?) } } } diff --git a/crates/gpui_macros/Cargo.toml b/crates/gpui_macros/Cargo.toml index d8fc0521cc430e268e1902ee9f871c6799ed6728..e35e0b1d2b56819adb819ae37e30841a9b7a99dd 100644 --- a/crates/gpui_macros/Cargo.toml +++ b/crates/gpui_macros/Cargo.toml @@ -12,3 +12,4 @@ doctest = false syn = "1.0" quote = "1.0" proc-macro2 = "1.0" + diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 4beddb4fed37c60f220c6084fd3d7aa0af4708b6..6d859be23fee39d535d7267fe7ef91b6a0d6efff 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -2,6 +2,7 @@ use std::{ ffi::{CStr, CString}, marker::PhantomData, path::Path, + ptr, }; use anyhow::{anyhow, Result}; @@ -85,6 +86,45 @@ impl Connection { self.backup_main(&destination) } + pub fn sql_has_syntax_error(&self, sql: &str) -> Option<(String, usize)> { + let sql = CString::new(sql).unwrap(); + let mut remaining_sql = sql.as_c_str(); + let sql_start = remaining_sql.as_ptr(); + + unsafe { + while { + let remaining_sql_str = remaining_sql.to_str().unwrap().trim(); + remaining_sql_str != ";" && !remaining_sql_str.is_empty() + } { + let mut raw_statement = 0 as *mut sqlite3_stmt; + let mut remaining_sql_ptr = ptr::null(); + sqlite3_prepare_v2( + self.sqlite3, + remaining_sql.as_ptr(), + -1, + &mut raw_statement, + &mut remaining_sql_ptr, + ); + + let res = sqlite3_errcode(self.sqlite3); + let offset = sqlite3_error_offset(self.sqlite3); + + if res == 1 && offset >= 0 { + let message = sqlite3_errmsg(self.sqlite3); + let err_msg = + String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) + .into_owned(); + let sub_statement_correction = + remaining_sql.as_ptr() as usize - sql_start as usize; + + return Some((err_msg, offset as usize + sub_statement_correction)); + } + remaining_sql = CStr::from_ptr(remaining_sql_ptr); + } + } + None + } + pub(crate) fn last_error(&self) -> Result<()> { unsafe { let code = sqlite3_errcode(self.sqlite3); @@ -259,10 +299,31 @@ mod test { assert_eq!( connection - .select_row::("SELECt * FROM test") + .select_row::("SELECT * FROM test") .unwrap()() .unwrap(), Some(2) ); } + + #[test] + fn test_sql_has_syntax_errors() { + let connection = Connection::open_memory(Some("test_sql_has_syntax_errors")); + let first_stmt = + "CREATE TABLE kv_store(key TEXT PRIMARY KEY, value TEXT NOT NULL) STRICT ;"; + let second_stmt = "SELECT FROM"; + + let second_offset = connection.sql_has_syntax_error(second_stmt).unwrap().1; + + let res = connection + .sql_has_syntax_error(&format!("{}\n{}", first_stmt, second_stmt)) + .map(|(_, offset)| offset); + + assert_eq!( + res, + Some(first_stmt.len() + second_offset + 1) // TODO: This value is wrong! + ); + + panic!("{:?}", res) + } } diff --git a/crates/sqlez/src/domain.rs b/crates/sqlez/src/domain.rs index b7cfbaef887ce482f781dcaa8fe1676ae2cd9794..3a477b2bc9ad121bd8b7a024b867ecc0a3ee45eb 100644 --- a/crates/sqlez/src/domain.rs +++ b/crates/sqlez/src/domain.rs @@ -9,6 +9,12 @@ pub trait Migrator { fn migrate(connection: &Connection) -> anyhow::Result<()>; } +impl Migrator for () { + fn migrate(_connection: &Connection) -> anyhow::Result<()> { + Ok(()) // Do nothing + } +} + impl Migrator for D { fn migrate(connection: &Connection) -> anyhow::Result<()> { connection.migrate(Self::name(), Self::migrations()) diff --git a/crates/sqlez/src/statement.rs b/crates/sqlez/src/statement.rs index f3970827f852515a62ce9589050da90bc52129b7..86035f5d0acd322a0edb31c534fa14b0fbc9257d 100644 --- a/crates/sqlez/src/statement.rs +++ b/crates/sqlez/src/statement.rs @@ -489,76 +489,3 @@ mod test { ); } } - -mod syntax_check { - use std::{ - ffi::{CStr, CString}, - ptr, - }; - - use libsqlite3_sys::{ - sqlite3_close, sqlite3_errmsg, sqlite3_error_offset, sqlite3_extended_errcode, - sqlite3_extended_result_codes, sqlite3_finalize, sqlite3_open_v2, sqlite3_prepare_v2, - sqlite3_stmt, SQLITE_OPEN_CREATE, SQLITE_OPEN_NOMUTEX, SQLITE_OPEN_READWRITE, - }; - - fn syntax_errors(sql: &str) -> Option<(String, i32)> { - let mut sqlite3 = 0 as *mut _; - let mut raw_statement = 0 as *mut sqlite3_stmt; - - let flags = SQLITE_OPEN_CREATE | SQLITE_OPEN_NOMUTEX | SQLITE_OPEN_READWRITE; - unsafe { - let memory_str = CString::new(":memory:").unwrap(); - sqlite3_open_v2(memory_str.as_ptr(), &mut sqlite3, flags, 0 as *const _); - - let sql = CString::new(sql).unwrap(); - - // Turn on extended error codes - sqlite3_extended_result_codes(sqlite3, 1); - - sqlite3_prepare_v2( - sqlite3, - sql.as_c_str().as_ptr(), - -1, - &mut raw_statement, - &mut ptr::null(), - ); - - let res = sqlite3_extended_errcode(sqlite3); - let offset = sqlite3_error_offset(sqlite3); - - if res == 1 && offset != -1 { - let message = sqlite3_errmsg(sqlite3); - let err_msg = - String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) - .into_owned(); - - sqlite3_finalize(*&mut raw_statement); - sqlite3_close(sqlite3); - - return Some((err_msg, offset)); - } else { - sqlite3_finalize(*&mut raw_statement); - sqlite3_close(sqlite3); - - None - } - } - } - - #[cfg(test)] - mod test { - use super::syntax_errors; - - #[test] - fn test_check_syntax() { - assert!(syntax_errors("SELECT FROM").is_some()); - - assert!(syntax_errors("SELECT col FROM table_t;").is_none()); - - assert!(syntax_errors("CREATE TABLE t(col TEXT,) STRICT;").is_some()); - - assert!(syntax_errors("CREATE TABLE t(col TEXT) STRICT;").is_none()); - } - } -} diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 5402c6b5e196c5c3b055be0155bec6e65e763c68..88199ff0c8262dcc480826075eaea436d55c7b8b 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -17,7 +17,7 @@ lazy_static! { Default::default(); } -pub struct ThreadSafeConnection { +pub struct ThreadSafeConnection { uri: Arc, persistent: bool, initialize_query: Option<&'static str>, diff --git a/crates/sqlez_macros/Cargo.toml b/crates/sqlez_macros/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..413a3d30f50f3994aeb41e917c969283873f829a --- /dev/null +++ b/crates/sqlez_macros/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "sqlez_macros" +version = "0.1.0" +edition = "2021" + +[lib] +path = "src/sqlez_macros.rs" +proc-macro = true +doctest = false + +[dependencies] +syn = "1.0" +quote = "1.0" +proc-macro2 = "1.0" +lazy_static = "1.4" +sqlez = { path = "../sqlez" } \ No newline at end of file diff --git a/crates/sqlez_macros/src/sqlez_macros.rs b/crates/sqlez_macros/src/sqlez_macros.rs new file mode 100644 index 0000000000000000000000000000000000000000..25249b89b612a59d470e8f990608db926187bff9 --- /dev/null +++ b/crates/sqlez_macros/src/sqlez_macros.rs @@ -0,0 +1,78 @@ +use proc_macro::{Delimiter, Span, TokenStream, TokenTree}; +use sqlez::thread_safe_connection::ThreadSafeConnection; +use syn::Error; + +lazy_static::lazy_static! { + static ref SQLITE: ThreadSafeConnection = ThreadSafeConnection::new(":memory:", false); +} + +#[proc_macro] +pub fn sql(tokens: TokenStream) -> TokenStream { + let mut sql_tokens = vec![]; + flatten_stream(tokens.clone(), &mut sql_tokens); + + // Lookup of spans by offset at the end of the token + let mut spans: Vec<(usize, Span)> = Vec::new(); + let mut sql = String::new(); + for (token_text, span) in sql_tokens { + sql.push_str(&token_text); + spans.push((sql.len(), span)); + } + + let error = SQLITE.sql_has_syntax_error(sql.trim()); + + if let Some((error, error_offset)) = error { + let error_span = spans + .into_iter() + .skip_while(|(offset, _)| offset <= &error_offset) + .map(|(_, span)| span) + .next() + .unwrap_or(Span::call_site()); + + let error_text = format!("Sql Error: {}\nFor Query: {}", error, sql); + TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error()) + } else { + format!("r#\"{}\"#", &sql).parse().unwrap() + } +} + +/// This method exists to normalize the representation of groups +/// to always include spaces between tokens. This is why we don't use the usual .to_string(). +/// This allows our token search in token_at_offset to resolve +/// ambiguity of '(tokens)' vs. '( token )', due to sqlite requiring byte offsets +fn flatten_stream(tokens: TokenStream, result: &mut Vec<(String, Span)>) { + for token_tree in tokens.into_iter() { + match token_tree { + TokenTree::Group(group) => { + // push open delimiter + result.push((open_delimiter(group.delimiter()), group.span())); + // recurse + flatten_stream(group.stream(), result); + // push close delimiter + result.push((close_delimiter(group.delimiter()), group.span())); + } + TokenTree::Ident(ident) => { + result.push((format!("{} ", ident.to_string()), ident.span())); + } + leaf_tree => result.push((leaf_tree.to_string(), leaf_tree.span())), + } + } +} + +fn open_delimiter(delimiter: Delimiter) -> String { + match delimiter { + Delimiter::Parenthesis => "(".to_string(), + Delimiter::Brace => "[".to_string(), + Delimiter::Bracket => "{".to_string(), + Delimiter::None => "".to_string(), + } +} + +fn close_delimiter(delimiter: Delimiter) -> String { + match delimiter { + Delimiter::Parenthesis => ")".to_string(), + Delimiter::Brace => "]".to_string(), + Delimiter::Bracket => "}".to_string(), + Delimiter::None => "".to_string(), + } +} diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index 5fb7758bec2f4a960ebed74d1bb31dc7d14763ae..f9cfb6fc010d1cc0b229ae3bfb8f727a83ec73c0 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use db::{connection, indoc, query, sqlez::domain::Domain}; +use db::{connection, query, sqlez::domain::Domain, sqlez_macros::sql}; use workspace::{ItemId, Workspace, WorkspaceId}; @@ -14,7 +14,7 @@ impl Domain for Terminal { } fn migrations() -> &'static [&'static str] { - &[indoc! {" + &[sql!( CREATE TABLE terminals ( workspace_id INTEGER, item_id INTEGER UNIQUE, @@ -23,7 +23,7 @@ impl Domain for Terminal { FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE ) STRICT; - "}] + )] } } @@ -34,11 +34,9 @@ impl TerminalDb { old_id: WorkspaceId, item_id: ItemId ) -> Result<()> { - indoc!{" - UPDATE terminals - SET workspace_id = ? - WHERE workspace_id = ? AND item_id = ? - "} + UPDATE terminals + SET workspace_id = ? + WHERE workspace_id = ? AND item_id = ? } } @@ -48,20 +46,16 @@ impl TerminalDb { workspace_id: WorkspaceId, working_directory: PathBuf ) -> Result<()> { - indoc!{" - INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) - VALUES (?1, ?2, ?3) - "} + INSERT OR REPLACE INTO terminals(item_id, workspace_id, working_directory) + VALUES (?, ?, ?) } } query! { pub fn get_working_directory(item_id: ItemId, workspace_id: WorkspaceId) -> Result> { - indoc!{" - SELECT working_directory - FROM terminals - WHERE item_id = ? AND workspace_id = ? - "} + SELECT working_directory + FROM terminals + WHERE item_id = ? AND workspace_id = ? } } } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 17b0aad13f77611bab5956d7b9a7f716d4f852cf..0d35c19d5dd96f9c096835f92771e96bdd99d3bc 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -5,7 +5,7 @@ pub mod model; use std::path::Path; use anyhow::{anyhow, bail, Context, Result}; -use db::{connection, query, sqlez::connection::Connection}; +use db::{connection, query, sqlez::connection::Connection, sqlez_macros::sql}; use gpui::Axis; use indoc::indoc; @@ -30,49 +30,49 @@ impl Domain for Workspace { } fn migrations() -> &'static [&'static str] { - &[indoc! {" + &[sql!( CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY, workspace_location BLOB UNIQUE, - dock_visible INTEGER, -- Boolean - dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded' - dock_pane INTEGER, -- NULL indicates that we don't have a dock pane yet + dock_visible INTEGER, // Boolean + dock_anchor TEXT, // Enum: 'Bottom' / 'Right' / 'Expanded' + dock_pane INTEGER, // NULL indicates that we don't have a dock pane yet timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, FOREIGN KEY(dock_pane) REFERENCES panes(pane_id) ) STRICT; - + CREATE TABLE pane_groups( group_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, - parent_group_id INTEGER, -- NULL indicates that this is a root node - position INTEGER, -- NULL indicates that this is a root node - axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal' - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE + parent_group_id INTEGER, // NULL indicates that this is a root node + position INTEGER, // NULL indicates that this is a root node + axis TEXT NOT NULL, // Enum: 'Vertical' / 'Horizontal' + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE ON UPDATE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; - + CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, - active INTEGER NOT NULL, -- Boolean - FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE + active INTEGER NOT NULL, // Boolean + FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) + ON DELETE CASCADE ON UPDATE CASCADE ) STRICT; - + CREATE TABLE center_panes( pane_id INTEGER PRIMARY KEY, - parent_group_id INTEGER, -- NULL means that this is a root pane - position INTEGER, -- NULL means that this is a root pane - FOREIGN KEY(pane_id) REFERENCES panes(pane_id) + parent_group_id INTEGER, // NULL means that this is a root pane + position INTEGER, // NULL means that this is a root pane + FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; - + CREATE TABLE items( - item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique + item_id INTEGER NOT NULL, // This is the item's view id, so this is not unique workspace_id INTEGER NOT NULL, pane_id INTEGER NOT NULL, kind TEXT NOT NULL, @@ -84,7 +84,7 @@ impl Domain for Workspace { ON DELETE CASCADE, PRIMARY KEY(item_id, workspace_id) ) STRICT; - "}] + )] } } @@ -158,26 +158,22 @@ impl WorkspaceDb { .context("clearing out old locations")?; // Upsert - conn.exec_bound(indoc! {" + conn.exec_bound(sql!( INSERT INTO workspaces( - workspace_id, - workspace_location, - dock_visible, - dock_anchor, + workspace_id, + workspace_location, + dock_visible, + dock_anchor, timestamp - ) + ) VALUES (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP) ON CONFLICT DO - UPDATE SET + UPDATE SET workspace_location = ?2, dock_visible = ?3, dock_anchor = ?4, timestamp = CURRENT_TIMESTAMP - "})?(( - workspace.id, - &workspace.location, - workspace.dock_position, - )) + ))?((workspace.id, &workspace.location, workspace.dock_position)) .context("Updating workspace")?; // Save center pane group and dock pane @@ -203,7 +199,7 @@ impl WorkspaceDb { query! { pub async fn next_id() -> Result { - "INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id" + INSERT INTO workspaces DEFAULT VALUES RETURNING workspace_id } } From 9cd6894dc56ab414912009d487a81ce6e89e7fbc Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Tue, 29 Nov 2022 12:16:49 -0800 Subject: [PATCH 71/86] Added multi-threading problem test --- crates/sqlez/src/connection.rs | 11 ++---- crates/sqlez/src/thread_safe_connection.rs | 43 +++++++++++++++++++++- test.rs | 0 3 files changed, 45 insertions(+), 9 deletions(-) delete mode 100644 test.rs diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 6d859be23fee39d535d7267fe7ef91b6a0d6efff..0456266594a898d34fbd761d045963a94ff8635e 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -108,9 +108,11 @@ impl Connection { let res = sqlite3_errcode(self.sqlite3); let offset = sqlite3_error_offset(self.sqlite3); + let message = sqlite3_errmsg(self.sqlite3); + + sqlite3_finalize(raw_statement); if res == 1 && offset >= 0 { - let message = sqlite3_errmsg(self.sqlite3); let err_msg = String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes()) .into_owned(); @@ -319,11 +321,6 @@ mod test { .sql_has_syntax_error(&format!("{}\n{}", first_stmt, second_stmt)) .map(|(_, offset)| offset); - assert_eq!( - res, - Some(first_stmt.len() + second_offset + 1) // TODO: This value is wrong! - ); - - panic!("{:?}", res) + assert_eq!(res, Some(first_stmt.len() + second_offset + 1)); } } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 88199ff0c8262dcc480826075eaea436d55c7b8b..6c35d1e945630307c941ee400aa02c47ca626ca6 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -126,7 +126,7 @@ impl ThreadSafeConnection { "Initialize query failed to execute: {}", initialize_query ))() - .unwrap(); + .unwrap() } M::migrate(&connection).expect("Migrations failed"); @@ -163,12 +163,51 @@ impl Deref for ThreadSafeConnection { #[cfg(test)] mod test { - use std::ops::Deref; + use std::{fs, ops::Deref, thread}; use crate::domain::Domain; use super::ThreadSafeConnection; + #[test] + fn many_initialize_and_migrate_queries_at_once() { + let mut handles = vec![]; + + enum TestDomain {} + impl Domain for TestDomain { + fn name() -> &'static str { + "test" + } + fn migrations() -> &'static [&'static str] { + &["CREATE TABLE test(col1 TEXT, col2 TEXT) STRICT;"] + } + } + + for _ in 0..100 { + handles.push(thread::spawn(|| { + let _ = ThreadSafeConnection::::new("annoying-test.db", false) + .with_initialize_query( + " + PRAGMA journal_mode=WAL; + PRAGMA synchronous=NORMAL; + PRAGMA busy_timeout=1; + PRAGMA foreign_keys=TRUE; + PRAGMA case_sensitive_like=TRUE; + ", + ) + .deref(); + })); + } + + for handle in handles { + let _ = handle.join(); + } + + // fs::remove_file("annoying-test.db").unwrap(); + // fs::remove_file("annoying-test.db-shm").unwrap(); + // fs::remove_file("annoying-test.db-wal").unwrap(); + } + #[test] #[should_panic] fn wild_zed_lost_failure() { diff --git a/test.rs b/test.rs deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 From a29ccb4ff83cd764182caebd092c81cdbc729499 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 30 Nov 2022 10:54:01 -0800 Subject: [PATCH 72/86] make thread safe connection more thread safe Co-Authored-By: Mikayla Maki --- Cargo.lock | 2 + crates/db/Cargo.toml | 1 + crates/db/src/db.rs | 32 ++- crates/db/src/kvp.rs | 6 +- crates/sqlez/Cargo.toml | 1 + crates/sqlez/src/migrations.rs | 6 +- crates/sqlez/src/thread_safe_connection.rs | 230 +++++++++++++-------- crates/sqlez/src/util.rs | 4 + crates/sqlez_macros/src/sqlez_macros.rs | 2 +- crates/workspace/src/persistence.rs | 14 +- crates/workspace/src/workspace.rs | 17 +- crates/zed/src/zed.rs | 5 +- 12 files changed, 196 insertions(+), 124 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e3181575f4182ca844bcd5daa11981979921240..fd1bb4ea0aee7468b180a1311cd9610b6fd0d25f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1569,6 +1569,7 @@ dependencies = [ "log", "parking_lot 0.11.2", "serde", + "smol", "sqlez", "sqlez_macros", "tempdir", @@ -5596,6 +5597,7 @@ dependencies = [ "lazy_static", "libsqlite3-sys", "parking_lot 0.11.2", + "smol", "thread_local", ] diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 2d88d4ece5a08c18817368daa9f3ac580ba52c3a..69c90e02f911cec9252b582c009efc67270e0f42 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -23,6 +23,7 @@ lazy_static = "1.4.0" log = { version = "0.4.16", features = ["kv_unstable_serde"] } parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } +smol = "1.2" [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index adf6f5c035f7d904b83a1787e7ea6e07e8c24401..701aa5765675a271cd58228129515ec4a048b673 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -4,31 +4,36 @@ pub mod kvp; pub use anyhow; pub use indoc::indoc; pub use lazy_static; +pub use smol; pub use sqlez; pub use sqlez_macros; use sqlez::domain::Migrator; use sqlez::thread_safe_connection::ThreadSafeConnection; +use sqlez_macros::sql; use std::fs::{create_dir_all, remove_dir_all}; use std::path::Path; use std::sync::atomic::{AtomicBool, Ordering}; use util::channel::{ReleaseChannel, RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; use util::paths::DB_DIR; -const INITIALIZE_QUERY: &'static str = indoc! {" - PRAGMA journal_mode=WAL; +const CONNECTION_INITIALIZE_QUERY: &'static str = sql!( PRAGMA synchronous=NORMAL; PRAGMA busy_timeout=1; PRAGMA foreign_keys=TRUE; PRAGMA case_sensitive_like=TRUE; -"}; +); + +const DB_INITIALIZE_QUERY: &'static str = sql!( + PRAGMA journal_mode=WAL; +); lazy_static::lazy_static! { static ref DB_WIPED: AtomicBool = AtomicBool::new(false); } /// Open or create a database at the given directory path. -pub fn open_file_db() -> ThreadSafeConnection { +pub async fn open_file_db() -> ThreadSafeConnection { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); @@ -43,12 +48,19 @@ pub fn open_file_db() -> ThreadSafeConnection { create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); let db_path = current_db_dir.join(Path::new("db.sqlite")); - ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true) - .with_initialize_query(INITIALIZE_QUERY) + ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) + .with_db_initialization_query(DB_INITIALIZE_QUERY) + .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) + .build() + .await } -pub fn open_memory_db(db_name: &str) -> ThreadSafeConnection { - ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY) +pub async fn open_memory_db(db_name: &str) -> ThreadSafeConnection { + ThreadSafeConnection::::builder(db_name, false) + .with_db_initialization_query(DB_INITIALIZE_QUERY) + .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) + .build() + .await } /// Implements a basic DB wrapper for a given domain @@ -67,9 +79,9 @@ macro_rules! connection { ::db::lazy_static::lazy_static! { pub static ref $id: $t = $t(if cfg!(any(test, feature = "test-support")) { - ::db::open_memory_db(stringify!($id)) + $crate::smol::block_on(::db::open_memory_db(stringify!($id))) } else { - ::db::open_file_db() + $crate::smol::block_on(::db::open_file_db()) }); } }; diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index b3f2a716cbc3698996d8b7eaed6689c3736f8c51..da796fa469bba761769bd7225f90c2c5843b9665 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -15,9 +15,9 @@ impl std::ops::Deref for KeyValueStore { lazy_static::lazy_static! { pub static ref KEY_VALUE_STORE: KeyValueStore = KeyValueStore(if cfg!(any(test, feature = "test-support")) { - open_memory_db(stringify!($id)) + smol::block_on(open_memory_db("KEY_VALUE_STORE")) } else { - open_file_db() + smol::block_on(open_file_db()) }); } @@ -62,7 +62,7 @@ mod tests { #[gpui::test] async fn test_kvp() { - let db = KeyValueStore(crate::open_memory_db("test_kvp")); + let db = KeyValueStore(crate::open_memory_db("test_kvp").await); assert_eq!(db.read_kvp("key-1").unwrap(), None); diff --git a/crates/sqlez/Cargo.toml b/crates/sqlez/Cargo.toml index cab1af7d6c5e38277a248f3870e29e1fbcb2258f..8a7f1ba415649d26c776e50a61408eda7d9540c9 100644 --- a/crates/sqlez/Cargo.toml +++ b/crates/sqlez/Cargo.toml @@ -9,6 +9,7 @@ edition = "2021" anyhow = { version = "1.0.38", features = ["backtrace"] } indoc = "1.0.7" libsqlite3-sys = { version = "0.25.2", features = ["bundled"] } +smol = "1.2" thread_local = "1.1.4" lazy_static = "1.4" parking_lot = "0.11.1" diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 6c0aafaf20d4ce0dcab87c8ec193b6a8ecf4d3c9..41c505f85b11ea79ffcfbbcffeed29224c9fab63 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -15,9 +15,9 @@ impl Connection { // Setup the migrations table unconditionally self.exec(indoc! {" CREATE TABLE IF NOT EXISTS migrations ( - domain TEXT, - step INTEGER, - migration TEXT + domain TEXT, + step INTEGER, + migration TEXT )"})?()?; let completed_migrations = diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 6c35d1e945630307c941ee400aa02c47ca626ca6..880a58d194fe78e110f92dc6e79c2a10d4369f76 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -1,4 +1,4 @@ -use futures::{Future, FutureExt}; +use futures::{channel::oneshot, Future, FutureExt}; use lazy_static::lazy_static; use parking_lot::RwLock; use std::{collections::HashMap, marker::PhantomData, ops::Deref, sync::Arc, thread}; @@ -10,17 +10,25 @@ use crate::{ util::UnboundedSyncSender, }; -type QueuedWrite = Box; +const MIGRATION_RETRIES: usize = 10; +type QueuedWrite = Box; lazy_static! { + /// List of queues of tasks by database uri. This lets us serialize writes to the database + /// and have a single worker thread per db file. This means many thread safe connections + /// (possibly with different migrations) could all be communicating with the same background + /// thread. static ref QUEUES: RwLock, UnboundedSyncSender>> = Default::default(); } +/// Thread safe connection to a given database file or in memory db. This can be cloned, shared, static, +/// whatever. It derefs to a synchronous connection by thread that is read only. A write capable connection +/// may be accessed by passing a callback to the `write` function which will queue the callback pub struct ThreadSafeConnection { uri: Arc, persistent: bool, - initialize_query: Option<&'static str>, + connection_initialize_query: Option<&'static str>, connections: Arc>, _migrator: PhantomData, } @@ -28,87 +36,125 @@ pub struct ThreadSafeConnection { unsafe impl Send for ThreadSafeConnection {} unsafe impl Sync for ThreadSafeConnection {} -impl ThreadSafeConnection { - pub fn new(uri: &str, persistent: bool) -> Self { - Self { - uri: Arc::from(uri), - persistent, - initialize_query: None, - connections: Default::default(), - _migrator: PhantomData, - } - } +pub struct ThreadSafeConnectionBuilder { + db_initialize_query: Option<&'static str>, + connection: ThreadSafeConnection, +} +impl ThreadSafeConnectionBuilder { /// Sets the query to run every time a connection is opened. This must - /// be infallible (EG only use pragma statements) - pub fn with_initialize_query(mut self, initialize_query: &'static str) -> Self { - self.initialize_query = Some(initialize_query); + /// be infallible (EG only use pragma statements) and not cause writes. + /// to the db or it will panic. + pub fn with_connection_initialize_query(mut self, initialize_query: &'static str) -> Self { + self.connection.connection_initialize_query = Some(initialize_query); + self + } + + /// Queues an initialization query for the database file. This must be infallible + /// but may cause changes to the database file such as with `PRAGMA journal_mode` + pub fn with_db_initialization_query(mut self, initialize_query: &'static str) -> Self { + self.db_initialize_query = Some(initialize_query); self } + pub async fn build(self) -> ThreadSafeConnection { + let db_initialize_query = self.db_initialize_query; + + self.connection + .write(move |connection| { + if let Some(db_initialize_query) = db_initialize_query { + connection.exec(db_initialize_query).expect(&format!( + "Db initialize query failed to execute: {}", + db_initialize_query + ))() + .unwrap(); + } + + let mut failure_result = None; + for _ in 0..MIGRATION_RETRIES { + failure_result = Some(M::migrate(connection)); + if failure_result.as_ref().unwrap().is_ok() { + break; + } + } + + failure_result.unwrap().expect("Migration failed"); + }) + .await; + + self.connection + } +} + +impl ThreadSafeConnection { + pub fn builder(uri: &str, persistent: bool) -> ThreadSafeConnectionBuilder { + ThreadSafeConnectionBuilder:: { + db_initialize_query: None, + connection: Self { + uri: Arc::from(uri), + persistent, + connection_initialize_query: None, + connections: Default::default(), + _migrator: PhantomData, + }, + } + } + /// Opens a new db connection with the initialized file path. This is internal and only /// called from the deref function. - /// If opening fails, the connection falls back to a shared memory connection fn open_file(&self) -> Connection { - // This unwrap is secured by a panic in the constructor. Be careful if you remove it! Connection::open_file(self.uri.as_ref()) } - /// Opens a shared memory connection using the file path as the identifier. This unwraps - /// as we expect it always to succeed + /// Opens a shared memory connection using the file path as the identifier. This is internal + /// and only called from the deref function. fn open_shared_memory(&self) -> Connection { Connection::open_memory(Some(self.uri.as_ref())) } - // Open a new connection for the given domain, leaving this - // connection intact. - pub fn for_domain(&self) -> ThreadSafeConnection { - ThreadSafeConnection { - uri: self.uri.clone(), - persistent: self.persistent, - initialize_query: self.initialize_query, - connections: Default::default(), - _migrator: PhantomData, - } - } - - pub fn write( - &self, - callback: impl 'static + Send + FnOnce(&Connection) -> T, - ) -> impl Future { + fn queue_write_task(&self, callback: QueuedWrite) { // Startup write thread for this database if one hasn't already // been started and insert a channel to queue work for it if !QUEUES.read().contains_key(&self.uri) { - use std::sync::mpsc::channel; - - let (sender, reciever) = channel::(); - let mut write_connection = self.create_connection(); - // Enable writes for this connection - write_connection.write = true; - thread::spawn(move || { - while let Ok(write) = reciever.recv() { - write(&write_connection) - } - }); - let mut queues = QUEUES.write(); - queues.insert(self.uri.clone(), UnboundedSyncSender::new(sender)); + if !queues.contains_key(&self.uri) { + use std::sync::mpsc::channel; + + let (sender, reciever) = channel::(); + let mut write_connection = self.create_connection(); + // Enable writes for this connection + write_connection.write = true; + thread::spawn(move || { + while let Ok(write) = reciever.recv() { + write(&write_connection) + } + }); + + queues.insert(self.uri.clone(), UnboundedSyncSender::new(sender)); + } } // Grab the queue for this database let queues = QUEUES.read(); let write_channel = queues.get(&self.uri).unwrap(); + write_channel + .send(callback) + .expect("Could not send write action to backgorund thread"); + } + + pub fn write( + &self, + callback: impl 'static + Send + FnOnce(&Connection) -> T, + ) -> impl Future { // Create a one shot channel for the result of the queued write // so we can await on the result - let (sender, reciever) = futures::channel::oneshot::channel(); - write_channel - .send(Box::new(move |connection| { - sender.send(callback(connection)).ok(); - })) - .expect("Could not send write action to background thread"); + let (sender, reciever) = oneshot::channel(); + self.queue_write_task(Box::new(move |connection| { + sender.send(callback(connection)).ok(); + })); - reciever.map(|response| response.expect("Background thread unexpectedly closed")) + reciever.map(|response| response.expect("Background writer thread unexpectedly closed")) } pub(crate) fn create_connection(&self) -> Connection { @@ -118,10 +164,11 @@ impl ThreadSafeConnection { self.open_shared_memory() }; - // Enable writes for the migrations and initialization queries - connection.write = true; + // Disallow writes on the connection. The only writes allowed for thread safe connections + // are from the background thread that can serialize them. + connection.write = false; - if let Some(initialize_query) = self.initialize_query { + if let Some(initialize_query) = self.connection_initialize_query { connection.exec(initialize_query).expect(&format!( "Initialize query failed to execute: {}", initialize_query @@ -129,20 +176,34 @@ impl ThreadSafeConnection { .unwrap() } - M::migrate(&connection).expect("Migrations failed"); - - // Disable db writes for normal thread local connection - connection.write = false; connection } } +impl ThreadSafeConnection<()> { + /// Special constructor for ThreadSafeConnection which disallows db initialization and migrations. + /// This allows construction to be infallible and not write to the db. + pub fn new( + uri: &str, + persistent: bool, + connection_initialize_query: Option<&'static str>, + ) -> Self { + Self { + uri: Arc::from(uri), + persistent, + connection_initialize_query, + connections: Default::default(), + _migrator: PhantomData, + } + } +} + impl Clone for ThreadSafeConnection { fn clone(&self) -> Self { Self { uri: self.uri.clone(), persistent: self.persistent, - initialize_query: self.initialize_query.clone(), + connection_initialize_query: self.connection_initialize_query.clone(), connections: self.connections.clone(), _migrator: PhantomData, } @@ -163,11 +224,11 @@ impl Deref for ThreadSafeConnection { #[cfg(test)] mod test { - use std::{fs, ops::Deref, thread}; + use indoc::indoc; + use lazy_static::__Deref; + use std::thread; - use crate::domain::Domain; - - use super::ThreadSafeConnection; + use crate::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; #[test] fn many_initialize_and_migrate_queries_at_once() { @@ -185,27 +246,22 @@ mod test { for _ in 0..100 { handles.push(thread::spawn(|| { - let _ = ThreadSafeConnection::::new("annoying-test.db", false) - .with_initialize_query( - " - PRAGMA journal_mode=WAL; - PRAGMA synchronous=NORMAL; - PRAGMA busy_timeout=1; - PRAGMA foreign_keys=TRUE; - PRAGMA case_sensitive_like=TRUE; - ", - ) - .deref(); + let builder = + ThreadSafeConnection::::builder("annoying-test.db", false) + .with_db_initialization_query("PRAGMA journal_mode=WAL") + .with_connection_initialize_query(indoc! {" + PRAGMA synchronous=NORMAL; + PRAGMA busy_timeout=1; + PRAGMA foreign_keys=TRUE; + PRAGMA case_sensitive_like=TRUE; + "}); + let _ = smol::block_on(builder.build()).deref(); })); } for handle in handles { let _ = handle.join(); } - - // fs::remove_file("annoying-test.db").unwrap(); - // fs::remove_file("annoying-test.db-shm").unwrap(); - // fs::remove_file("annoying-test.db-wal").unwrap(); } #[test] @@ -241,8 +297,10 @@ mod test { } } - let _ = ThreadSafeConnection::::new("wild_zed_lost_failure", false) - .with_initialize_query("PRAGMA FOREIGN_KEYS=true") - .deref(); + let builder = + ThreadSafeConnection::::builder("wild_zed_lost_failure", false) + .with_connection_initialize_query("PRAGMA FOREIGN_KEYS=true"); + + smol::block_on(builder.build()); } } diff --git a/crates/sqlez/src/util.rs b/crates/sqlez/src/util.rs index b5366cffc43c2c1dcf2ec870ef612cb3ee772a5b..ce0353b15e09f3dbfb42d43d8ad795dda260e97a 100644 --- a/crates/sqlez/src/util.rs +++ b/crates/sqlez/src/util.rs @@ -4,6 +4,10 @@ use std::sync::mpsc::Sender; use parking_lot::Mutex; use thread_local::ThreadLocal; +/// Unbounded standard library sender which is stored per thread to get around +/// the lack of sync on the standard library version while still being unbounded +/// Note: this locks on the cloneable sender, but its done once per thread, so it +/// shouldn't result in too much contention pub struct UnboundedSyncSender { clonable_sender: Mutex>, local_senders: ThreadLocal>, diff --git a/crates/sqlez_macros/src/sqlez_macros.rs b/crates/sqlez_macros/src/sqlez_macros.rs index 25249b89b612a59d470e8f990608db926187bff9..532503a3e61a1eeaddfac03012e069875dd2c16d 100644 --- a/crates/sqlez_macros/src/sqlez_macros.rs +++ b/crates/sqlez_macros/src/sqlez_macros.rs @@ -3,7 +3,7 @@ use sqlez::thread_safe_connection::ThreadSafeConnection; use syn::Error; lazy_static::lazy_static! { - static ref SQLITE: ThreadSafeConnection = ThreadSafeConnection::new(":memory:", false); + static ref SQLITE: ThreadSafeConnection = ThreadSafeConnection::new(":memory:", false, None); } #[proc_macro] diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 0d35c19d5dd96f9c096835f92771e96bdd99d3bc..c8b31cd2543837cf611ff49617b18a6eac5bfd19 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -395,7 +395,7 @@ mod tests { async fn test_next_id_stability() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_next_id_stability")); + let db = WorkspaceDb(open_memory_db("test_next_id_stability").await); db.write(|conn| { conn.migrate( @@ -442,7 +442,7 @@ mod tests { async fn test_workspace_id_stability() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_workspace_id_stability")); + let db = WorkspaceDb(open_memory_db("test_workspace_id_stability").await); db.write(|conn| { conn.migrate( @@ -523,7 +523,7 @@ mod tests { async fn test_full_workspace_serialization() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_full_workspace_serialization")); + let db = WorkspaceDb(open_memory_db("test_full_workspace_serialization").await); let dock_pane = crate::persistence::model::SerializedPane { children: vec![ @@ -597,7 +597,7 @@ mod tests { async fn test_workspace_assignment() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_basic_functionality")); + let db = WorkspaceDb(open_memory_db("test_basic_functionality").await); let workspace_1 = SerializedWorkspace { id: 1, @@ -689,7 +689,7 @@ mod tests { async fn test_basic_dock_pane() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("basic_dock_pane")); + let db = WorkspaceDb(open_memory_db("basic_dock_pane").await); let dock_pane = crate::persistence::model::SerializedPane::new( vec![ @@ -714,7 +714,7 @@ mod tests { async fn test_simple_split() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("simple_split")); + let db = WorkspaceDb(open_memory_db("simple_split").await); // ----------------- // | 1,2 | 5,6 | @@ -766,7 +766,7 @@ mod tests { async fn test_cleanup_panes() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_cleanup_panes")); + let db = WorkspaceDb(open_memory_db("test_cleanup_panes").await); let center_pane = SerializedPaneGroup::Group { axis: gpui::Axis::Horizontal, diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 9755c2c6caa3811ed4dc9db8c97a961d450a2cda..584f6392d197f94dc820c5e5c1604559ce1be42b 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -162,11 +162,7 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { let app_state = Arc::downgrade(&app_state); move |_: &NewFile, cx: &mut MutableAppContext| { if let Some(app_state) = app_state.upgrade() { - let task = open_new(&app_state, cx); - cx.spawn(|_| async { - task.await; - }) - .detach(); + open_new(&app_state, cx).detach(); } } }); @@ -174,11 +170,7 @@ pub fn init(app_state: Arc, cx: &mut MutableAppContext) { let app_state = Arc::downgrade(&app_state); move |_: &NewWindow, cx: &mut MutableAppContext| { if let Some(app_state) = app_state.upgrade() { - let task = open_new(&app_state, cx); - cx.spawn(|_| async { - task.await; - }) - .detach(); + open_new(&app_state, cx).detach(); } } }); @@ -2641,13 +2633,16 @@ pub fn open_paths( }) } -fn open_new(app_state: &Arc, cx: &mut MutableAppContext) -> Task<()> { +pub fn open_new(app_state: &Arc, cx: &mut MutableAppContext) -> Task<()> { let task = Workspace::new_local(Vec::new(), app_state.clone(), cx); cx.spawn(|mut cx| async move { + eprintln!("Open new task spawned"); let (workspace, opened_paths) = task.await; + eprintln!("workspace and path items created"); workspace.update(&mut cx, |_, cx| { if opened_paths.is_empty() { + eprintln!("new file redispatched"); cx.dispatch_action(NewFile); } }) diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 3693a5e580398dc380a03a79c7b70d27f4c34b64..0a25cfb66f5ac142068f1c5562791a2eecbcd44b 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -626,7 +626,7 @@ mod tests { use theme::ThemeRegistry; use workspace::{ item::{Item, ItemHandle}, - open_paths, pane, NewFile, Pane, SplitDirection, WorkspaceHandle, + open_new, open_paths, pane, NewFile, Pane, SplitDirection, WorkspaceHandle, }; #[gpui::test] @@ -762,8 +762,7 @@ mod tests { #[gpui::test] async fn test_new_empty_workspace(cx: &mut TestAppContext) { let app_state = init(cx); - cx.dispatch_global_action(workspace::NewFile); - cx.foreground().run_until_parked(); + cx.update(|cx| open_new(&app_state, cx)).await; let window_id = *cx.window_ids().first().unwrap(); let workspace = cx.root_view::(window_id).unwrap(); From 1b225fa37c72a754b9bc9eaeb84ea5b07862b67c Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 30 Nov 2022 12:34:42 -0800 Subject: [PATCH 73/86] fix test failures --- crates/db/src/db.rs | 9 +++ crates/sqlez/src/thread_safe_connection.rs | 89 +++++++++++++--------- 2 files changed, 64 insertions(+), 34 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 701aa5765675a271cd58228129515ec4a048b673..1ac1d1604bbb8fc7d94c49db26a2dc0a62fa1acf 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -4,6 +4,7 @@ pub mod kvp; pub use anyhow; pub use indoc::indoc; pub use lazy_static; +use parking_lot::Mutex; pub use smol; pub use sqlez; pub use sqlez_macros; @@ -59,6 +60,14 @@ pub async fn open_memory_db(db_name: &str) -> ThreadSafeConnection< ThreadSafeConnection::::builder(db_name, false) .with_db_initialization_query(DB_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) + // Serialize queued writes via a mutex and run them synchronously + .with_write_queue_constructor(Box::new(|connection| { + let connection = Mutex::new(connection); + Box::new(move |queued_write| { + let connection = connection.lock(); + queued_write(&connection) + }) + })) .build() .await } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 880a58d194fe78e110f92dc6e79c2a10d4369f76..b17c87d63f6fc3fcb2f3153bb7abed96d8f7935c 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -13,12 +13,14 @@ use crate::{ const MIGRATION_RETRIES: usize = 10; type QueuedWrite = Box; +type WriteQueueConstructor = + Box Box>; lazy_static! { /// List of queues of tasks by database uri. This lets us serialize writes to the database /// and have a single worker thread per db file. This means many thread safe connections /// (possibly with different migrations) could all be communicating with the same background /// thread. - static ref QUEUES: RwLock, UnboundedSyncSender>> = + static ref QUEUES: RwLock, Box>> = Default::default(); } @@ -38,6 +40,7 @@ unsafe impl Sync for ThreadSafeConnection {} pub struct ThreadSafeConnectionBuilder { db_initialize_query: Option<&'static str>, + write_queue_constructor: Option, connection: ThreadSafeConnection, } @@ -50,6 +53,18 @@ impl ThreadSafeConnectionBuilder { self } + /// Specifies how the thread safe connection should serialize writes. If provided + /// the connection will call the write_queue_constructor for each database file in + /// this process. The constructor is responsible for setting up a background thread or + /// async task which handles queued writes with the provided connection. + pub fn with_write_queue_constructor( + mut self, + write_queue_constructor: WriteQueueConstructor, + ) -> Self { + self.write_queue_constructor = Some(write_queue_constructor); + self + } + /// Queues an initialization query for the database file. This must be infallible /// but may cause changes to the database file such as with `PRAGMA journal_mode` pub fn with_db_initialization_query(mut self, initialize_query: &'static str) -> Self { @@ -58,6 +73,38 @@ impl ThreadSafeConnectionBuilder { } pub async fn build(self) -> ThreadSafeConnection { + if !QUEUES.read().contains_key(&self.connection.uri) { + let mut queues = QUEUES.write(); + if !queues.contains_key(&self.connection.uri) { + let mut write_connection = self.connection.create_connection(); + // Enable writes for this connection + write_connection.write = true; + if let Some(mut write_queue_constructor) = self.write_queue_constructor { + let write_channel = write_queue_constructor(write_connection); + queues.insert(self.connection.uri.clone(), write_channel); + } else { + use std::sync::mpsc::channel; + + let (sender, reciever) = channel::(); + thread::spawn(move || { + while let Ok(write) = reciever.recv() { + write(&write_connection) + } + }); + + let sender = UnboundedSyncSender::new(sender); + queues.insert( + self.connection.uri.clone(), + Box::new(move |queued_write| { + sender + .send(queued_write) + .expect("Could not send write action to backgorund thread"); + }), + ); + } + } + } + let db_initialize_query = self.db_initialize_query; self.connection @@ -90,6 +137,7 @@ impl ThreadSafeConnection { pub fn builder(uri: &str, persistent: bool) -> ThreadSafeConnectionBuilder { ThreadSafeConnectionBuilder:: { db_initialize_query: None, + write_queue_constructor: None, connection: Self { uri: Arc::from(uri), persistent, @@ -112,48 +160,21 @@ impl ThreadSafeConnection { Connection::open_memory(Some(self.uri.as_ref())) } - fn queue_write_task(&self, callback: QueuedWrite) { - // Startup write thread for this database if one hasn't already - // been started and insert a channel to queue work for it - if !QUEUES.read().contains_key(&self.uri) { - let mut queues = QUEUES.write(); - if !queues.contains_key(&self.uri) { - use std::sync::mpsc::channel; - - let (sender, reciever) = channel::(); - let mut write_connection = self.create_connection(); - // Enable writes for this connection - write_connection.write = true; - thread::spawn(move || { - while let Ok(write) = reciever.recv() { - write(&write_connection) - } - }); - - queues.insert(self.uri.clone(), UnboundedSyncSender::new(sender)); - } - } - - // Grab the queue for this database - let queues = QUEUES.read(); - let write_channel = queues.get(&self.uri).unwrap(); - - write_channel - .send(callback) - .expect("Could not send write action to backgorund thread"); - } - pub fn write( &self, callback: impl 'static + Send + FnOnce(&Connection) -> T, ) -> impl Future { + let queues = QUEUES.read(); + let write_channel = queues + .get(&self.uri) + .expect("Queues are inserted when build is called. This should always succeed"); + // Create a one shot channel for the result of the queued write // so we can await on the result let (sender, reciever) = oneshot::channel(); - self.queue_write_task(Box::new(move |connection| { + write_channel(Box::new(move |connection| { sender.send(callback(connection)).ok(); })); - reciever.map(|response| response.expect("Background writer thread unexpectedly closed")) } From f68e8d4664e4322eb88add438d4ca015c0daaffc Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 30 Nov 2022 16:19:46 -0800 Subject: [PATCH 74/86] Address some issues with the sqlez_macros --- Cargo.lock | 1 + crates/db/src/db.rs | 34 +++--- crates/db/src/kvp.rs | 28 ++--- crates/editor/src/items.rs | 40 ++++--- crates/editor/src/persistence.rs | 3 +- crates/sqlez/src/thread_safe_connection.rs | 87 ++++++++------ crates/sqlez_macros/Cargo.toml | 3 +- crates/sqlez_macros/src/sqlez_macros.rs | 23 ++-- crates/workspace/src/persistence.rs | 132 ++++++++++----------- crates/workspace/src/workspace.rs | 8 +- 10 files changed, 184 insertions(+), 175 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fd1bb4ea0aee7468b180a1311cd9610b6fd0d25f..4312b7e830f8aa7d85171aeb6e3a5b8d764bcd1d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5609,6 +5609,7 @@ dependencies = [ "proc-macro2", "quote", "sqlez", + "sqlformat", "syn", ] diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 1ac1d1604bbb8fc7d94c49db26a2dc0a62fa1acf..3fc069405d74119bff841465b3891d628c21c50c 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -4,7 +4,6 @@ pub mod kvp; pub use anyhow; pub use indoc::indoc; pub use lazy_static; -use parking_lot::Mutex; pub use smol; pub use sqlez; pub use sqlez_macros; @@ -34,7 +33,7 @@ lazy_static::lazy_static! { } /// Open or create a database at the given directory path. -pub async fn open_file_db() -> ThreadSafeConnection { +pub async fn open_db() -> ThreadSafeConnection { // Use 0 for now. Will implement incrementing and clearing of old db files soon TM let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); @@ -56,18 +55,15 @@ pub async fn open_file_db() -> ThreadSafeConnection { .await } -pub async fn open_memory_db(db_name: &str) -> ThreadSafeConnection { +#[cfg(any(test, feature = "test-support"))] +pub async fn open_test_db(db_name: &str) -> ThreadSafeConnection { + use sqlez::thread_safe_connection::locking_queue; + ThreadSafeConnection::::builder(db_name, false) .with_db_initialization_query(DB_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) // Serialize queued writes via a mutex and run them synchronously - .with_write_queue_constructor(Box::new(|connection| { - let connection = Mutex::new(connection); - Box::new(move |queued_write| { - let connection = connection.lock(); - queued_write(&connection) - }) - })) + .with_write_queue_constructor(locking_queue()) .build() .await } @@ -76,22 +72,24 @@ pub async fn open_memory_db(db_name: &str) -> ThreadSafeConnection< #[macro_export] macro_rules! connection { ($id:ident: $t:ident<$d:ty>) => { - pub struct $t(::db::sqlez::thread_safe_connection::ThreadSafeConnection<$d>); + pub struct $t($crate::sqlez::thread_safe_connection::ThreadSafeConnection<$d>); impl ::std::ops::Deref for $t { - type Target = ::db::sqlez::thread_safe_connection::ThreadSafeConnection<$d>; + type Target = $crate::sqlez::thread_safe_connection::ThreadSafeConnection<$d>; fn deref(&self) -> &Self::Target { &self.0 } } - ::db::lazy_static::lazy_static! { - pub static ref $id: $t = $t(if cfg!(any(test, feature = "test-support")) { - $crate::smol::block_on(::db::open_memory_db(stringify!($id))) - } else { - $crate::smol::block_on(::db::open_file_db()) - }); + #[cfg(any(test, feature = "test-support"))] + $crate::lazy_static::lazy_static! { + pub static ref $id: $t = $t($crate::smol::block_on($crate::open_test_db(stringify!($id)))); + } + + #[cfg(not(any(test, feature = "test-support")))] + $crate::lazy_static::lazy_static! { + pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db())); } }; } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index da796fa469bba761769bd7225f90c2c5843b9665..70ee9f64dafb2089fc8f2236d43dcd5fb19e4fcd 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,25 +1,9 @@ -use sqlez::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; +use sqlez::domain::Domain; use sqlez_macros::sql; -use crate::{open_file_db, open_memory_db, query}; +use crate::{connection, query}; -pub struct KeyValueStore(ThreadSafeConnection); - -impl std::ops::Deref for KeyValueStore { - type Target = ThreadSafeConnection; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -lazy_static::lazy_static! { - pub static ref KEY_VALUE_STORE: KeyValueStore = KeyValueStore(if cfg!(any(test, feature = "test-support")) { - smol::block_on(open_memory_db("KEY_VALUE_STORE")) - } else { - smol::block_on(open_file_db()) - }); -} +connection!(KEY_VALUE_STORE: KeyValueStore); impl Domain for KeyValueStore { fn name() -> &'static str { @@ -27,8 +11,10 @@ impl Domain for KeyValueStore { } fn migrations() -> &'static [&'static str] { + // Legacy migrations using rusqlite may have already created kv_store during alpha, + // migrations must be infallible so this must have 'IF NOT EXISTS' &[sql!( - CREATE TABLE kv_store( + CREATE TABLE IF NOT EXISTS kv_store( key TEXT PRIMARY KEY, value TEXT NOT NULL ) STRICT; @@ -62,7 +48,7 @@ mod tests { #[gpui::test] async fn test_kvp() { - let db = KeyValueStore(crate::open_memory_db("test_kvp").await); + let db = KeyValueStore(crate::open_test_db("test_kvp").await); assert_eq!(db.read_kvp("key-1").unwrap(), None); diff --git a/crates/editor/src/items.rs b/crates/editor/src/items.rs index e724156faea5d48b18270c7ac42bd730c6a3a660..afe659af61cafb164f6f6c6a7c8bff07f85d451d 100644 --- a/crates/editor/src/items.rs +++ b/crates/editor/src/items.rs @@ -602,31 +602,37 @@ impl Item for Editor { item_id: ItemId, cx: &mut ViewContext, ) -> Task>> { - if let Some(project_item) = project.update(cx, |project, cx| { + let project_item: Result<_> = project.update(cx, |project, cx| { // Look up the path with this key associated, create a self with that path - let path = DB.get_path(item_id, workspace_id).ok()?; + let path = DB + .get_path(item_id, workspace_id)? + .context("No path stored for this editor")?; - let (worktree, path) = project.find_local_worktree(&path, cx)?; + let (worktree, path) = project + .find_local_worktree(&path, cx) + .with_context(|| format!("No worktree for path: {path:?}"))?; let project_path = ProjectPath { worktree_id: worktree.read(cx).id(), path: path.into(), }; - Some(project.open_path(project_path, cx)) - }) { - cx.spawn(|pane, mut cx| async move { - let (_, project_item) = project_item.await?; - let buffer = project_item - .downcast::() - .context("Project item at stored path was not a buffer")?; - - Ok(cx.update(|cx| { - cx.add_view(pane, |cx| Editor::for_buffer(buffer, Some(project), cx)) - })) + Ok(project.open_path(project_path, cx)) + }); + + project_item + .map(|project_item| { + cx.spawn(|pane, mut cx| async move { + let (_, project_item) = project_item.await?; + let buffer = project_item + .downcast::() + .context("Project item at stored path was not a buffer")?; + + Ok(cx.update(|cx| { + cx.add_view(pane, |cx| Editor::for_buffer(buffer, Some(project), cx)) + })) + }) }) - } else { - Task::ready(Err(anyhow!("Could not load file from stored path"))) - } + .unwrap_or_else(|error| Task::ready(Err(error))) } } diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 22b0f158c12bf9ff7408a141479553ea485724fc..3416f479e7e2a29a01f4abdc26081307e37a0b46 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -23,7 +23,6 @@ impl Domain for Editor { FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE ON UPDATE CASCADE - ) STRICT; )] } @@ -31,7 +30,7 @@ impl Domain for Editor { impl EditorDb { query! { - pub fn get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result { + pub fn get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result> { SELECT path FROM editors WHERE item_id = ? AND workspace_id = ? } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index b17c87d63f6fc3fcb2f3153bb7abed96d8f7935c..82697d1f907729684ab6daf69b9d2c3beccc7055 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -1,6 +1,6 @@ use futures::{channel::oneshot, Future, FutureExt}; use lazy_static::lazy_static; -use parking_lot::RwLock; +use parking_lot::{Mutex, RwLock}; use std::{collections::HashMap, marker::PhantomData, ops::Deref, sync::Arc, thread}; use thread_local::ThreadLocal; @@ -73,37 +73,8 @@ impl ThreadSafeConnectionBuilder { } pub async fn build(self) -> ThreadSafeConnection { - if !QUEUES.read().contains_key(&self.connection.uri) { - let mut queues = QUEUES.write(); - if !queues.contains_key(&self.connection.uri) { - let mut write_connection = self.connection.create_connection(); - // Enable writes for this connection - write_connection.write = true; - if let Some(mut write_queue_constructor) = self.write_queue_constructor { - let write_channel = write_queue_constructor(write_connection); - queues.insert(self.connection.uri.clone(), write_channel); - } else { - use std::sync::mpsc::channel; - - let (sender, reciever) = channel::(); - thread::spawn(move || { - while let Ok(write) = reciever.recv() { - write(&write_connection) - } - }); - - let sender = UnboundedSyncSender::new(sender); - queues.insert( - self.connection.uri.clone(), - Box::new(move |queued_write| { - sender - .send(queued_write) - .expect("Could not send write action to backgorund thread"); - }), - ); - } - } - } + self.connection + .initialize_queues(self.write_queue_constructor); let db_initialize_query = self.db_initialize_query; @@ -134,6 +105,40 @@ impl ThreadSafeConnectionBuilder { } impl ThreadSafeConnection { + fn initialize_queues(&self, write_queue_constructor: Option) { + if !QUEUES.read().contains_key(&self.uri) { + let mut queues = QUEUES.write(); + if !queues.contains_key(&self.uri) { + let mut write_connection = self.create_connection(); + // Enable writes for this connection + write_connection.write = true; + if let Some(mut write_queue_constructor) = write_queue_constructor { + let write_channel = write_queue_constructor(write_connection); + queues.insert(self.uri.clone(), write_channel); + } else { + use std::sync::mpsc::channel; + + let (sender, reciever) = channel::(); + thread::spawn(move || { + while let Ok(write) = reciever.recv() { + write(&write_connection) + } + }); + + let sender = UnboundedSyncSender::new(sender); + queues.insert( + self.uri.clone(), + Box::new(move |queued_write| { + sender + .send(queued_write) + .expect("Could not send write action to backgorund thread"); + }), + ); + } + } + } + } + pub fn builder(uri: &str, persistent: bool) -> ThreadSafeConnectionBuilder { ThreadSafeConnectionBuilder:: { db_initialize_query: None, @@ -208,14 +213,18 @@ impl ThreadSafeConnection<()> { uri: &str, persistent: bool, connection_initialize_query: Option<&'static str>, + write_queue_constructor: Option, ) -> Self { - Self { + let connection = Self { uri: Arc::from(uri), persistent, connection_initialize_query, connections: Default::default(), _migrator: PhantomData, - } + }; + + connection.initialize_queues(write_queue_constructor); + connection } } @@ -243,6 +252,16 @@ impl Deref for ThreadSafeConnection { } } +pub fn locking_queue() -> WriteQueueConstructor { + Box::new(|connection| { + let connection = Mutex::new(connection); + Box::new(move |queued_write| { + let connection = connection.lock(); + queued_write(&connection) + }) + }) +} + #[cfg(test)] mod test { use indoc::indoc; diff --git a/crates/sqlez_macros/Cargo.toml b/crates/sqlez_macros/Cargo.toml index 413a3d30f50f3994aeb41e917c969283873f829a..423b4945005ec72a115b746be7d2f8f668a38be6 100644 --- a/crates/sqlez_macros/Cargo.toml +++ b/crates/sqlez_macros/Cargo.toml @@ -13,4 +13,5 @@ syn = "1.0" quote = "1.0" proc-macro2 = "1.0" lazy_static = "1.4" -sqlez = { path = "../sqlez" } \ No newline at end of file +sqlez = { path = "../sqlez" } +sqlformat = "0.2" \ No newline at end of file diff --git a/crates/sqlez_macros/src/sqlez_macros.rs b/crates/sqlez_macros/src/sqlez_macros.rs index 532503a3e61a1eeaddfac03012e069875dd2c16d..c937e704ae82c34d6a18fa69a772b517eb5e8f40 100644 --- a/crates/sqlez_macros/src/sqlez_macros.rs +++ b/crates/sqlez_macros/src/sqlez_macros.rs @@ -1,9 +1,11 @@ use proc_macro::{Delimiter, Span, TokenStream, TokenTree}; -use sqlez::thread_safe_connection::ThreadSafeConnection; +use sqlez::thread_safe_connection::{locking_queue, ThreadSafeConnection}; use syn::Error; lazy_static::lazy_static! { - static ref SQLITE: ThreadSafeConnection = ThreadSafeConnection::new(":memory:", false, None); + static ref SQLITE: ThreadSafeConnection = { + ThreadSafeConnection::new(":memory:", false, None, Some(locking_queue())) + }; } #[proc_macro] @@ -20,6 +22,7 @@ pub fn sql(tokens: TokenStream) -> TokenStream { } let error = SQLITE.sql_has_syntax_error(sql.trim()); + let formatted_sql = sqlformat::format(&sql, &sqlformat::QueryParams::None, Default::default()); if let Some((error, error_offset)) = error { let error_span = spans @@ -29,10 +32,10 @@ pub fn sql(tokens: TokenStream) -> TokenStream { .next() .unwrap_or(Span::call_site()); - let error_text = format!("Sql Error: {}\nFor Query: {}", error, sql); + let error_text = format!("Sql Error: {}\nFor Query: {}", error, formatted_sql); TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error()) } else { - format!("r#\"{}\"#", &sql).parse().unwrap() + format!("r#\"{}\"#", &formatted_sql).parse().unwrap() } } @@ -61,18 +64,18 @@ fn flatten_stream(tokens: TokenStream, result: &mut Vec<(String, Span)>) { fn open_delimiter(delimiter: Delimiter) -> String { match delimiter { - Delimiter::Parenthesis => "(".to_string(), - Delimiter::Brace => "[".to_string(), - Delimiter::Bracket => "{".to_string(), + Delimiter::Parenthesis => "( ".to_string(), + Delimiter::Brace => "[ ".to_string(), + Delimiter::Bracket => "{ ".to_string(), Delimiter::None => "".to_string(), } } fn close_delimiter(delimiter: Delimiter) -> String { match delimiter { - Delimiter::Parenthesis => ")".to_string(), - Delimiter::Brace => "]".to_string(), - Delimiter::Bracket => "}".to_string(), + Delimiter::Parenthesis => " ) ".to_string(), + Delimiter::Brace => " ] ".to_string(), + Delimiter::Bracket => " } ".to_string(), Delimiter::None => "".to_string(), } } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index c8b31cd2543837cf611ff49617b18a6eac5bfd19..d08c9de9a00c1314146e7f585f10c8fa37893097 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -7,7 +7,6 @@ use std::path::Path; use anyhow::{anyhow, bail, Context, Result}; use db::{connection, query, sqlez::connection::Connection, sqlez_macros::sql}; use gpui::Axis; -use indoc::indoc; use db::sqlez::domain::Domain; use util::{iife, unzip_option, ResultExt}; @@ -106,15 +105,15 @@ impl WorkspaceDb { DockPosition, ) = iife!({ if worktree_roots.len() == 0 { - self.select_row(indoc! {" + self.select_row(sql!( SELECT workspace_id, workspace_location, dock_visible, dock_anchor - FROM workspaces - ORDER BY timestamp DESC LIMIT 1"})?()? + FROM workspaces + ORDER BY timestamp DESC LIMIT 1))?()? } else { - self.select_row_bound(indoc! {" + self.select_row_bound(sql!( SELECT workspace_id, workspace_location, dock_visible, dock_anchor FROM workspaces - WHERE workspace_location = ?"})?(&workspace_location)? + WHERE workspace_location = ?))?(&workspace_location)? } .context("No workspaces found") }) @@ -142,19 +141,15 @@ impl WorkspaceDb { self.write(move |conn| { conn.with_savepoint("update_worktrees", || { // Clear out panes and pane_groups - conn.exec_bound(indoc! {" + conn.exec_bound(sql!( UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1; DELETE FROM pane_groups WHERE workspace_id = ?1; - DELETE FROM panes WHERE workspace_id = ?1;"})?(workspace.id) + DELETE FROM panes WHERE workspace_id = ?1;))?(workspace.id) .context("Clearing old panes")?; - conn.exec_bound(indoc! {" - DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ?"})?( - ( - &workspace.location, - workspace.id.clone(), - ) - ) + conn.exec_bound(sql!( + DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ? + ))?((&workspace.location, workspace.id.clone())) .context("clearing out old locations")?; // Upsert @@ -184,10 +179,11 @@ impl WorkspaceDb { .context("save pane in save workspace")?; // Complete workspace initialization - conn.exec_bound(indoc! {" + conn.exec_bound(sql!( UPDATE workspaces SET dock_pane = ? - WHERE workspace_id = ?"})?((dock_id, workspace.id)) + WHERE workspace_id = ? + ))?((dock_id, workspace.id)) .context("Finishing initialization with dock pane")?; Ok(()) @@ -203,20 +199,13 @@ impl WorkspaceDb { } } - /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots - pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, WorkspaceLocation)> { - iife!({ - // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html - Ok::<_, anyhow::Error>( - self.select_bound::( - "SELECT workspace_id, workspace_location FROM workspaces ORDER BY timestamp DESC LIMIT ?", - )?(limit)? - .into_iter() - .collect::>(), - ) - }) - .log_err() - .unwrap_or_default() + query! { + pub fn recent_workspaces(limit: usize) -> Result> { + SELECT workspace_id, workspace_location + FROM workspaces + ORDER BY timestamp DESC + LIMIT ? + } } fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result { @@ -233,7 +222,7 @@ impl WorkspaceDb { ) -> Result> { type GroupKey = (Option, WorkspaceId); type GroupOrPane = (Option, Option, Option, Option); - self.select_bound::(indoc! {" + self.select_bound::(sql!( SELECT group_id, axis, pane_id, active FROM (SELECT group_id, @@ -243,7 +232,7 @@ impl WorkspaceDb { position, parent_group_id, workspace_id - FROM pane_groups + FROM pane_groups UNION SELECT NULL, @@ -257,7 +246,7 @@ impl WorkspaceDb { JOIN panes ON center_panes.pane_id = panes.pane_id) WHERE parent_group_id IS ? AND workspace_id = ? ORDER BY position - "})?((group_id, workspace_id))? + ))?((group_id, workspace_id))? .into_iter() .map(|(group_id, axis, pane_id, active)| { if let Some((group_id, axis)) = group_id.zip(axis) { @@ -293,10 +282,11 @@ impl WorkspaceDb { SerializedPaneGroup::Group { axis, children } => { let (parent_id, position) = unzip_option(parent); - let group_id = conn.select_row_bound::<_, i64>(indoc! {" + let group_id = conn.select_row_bound::<_, i64>(sql!( INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?) - RETURNING group_id"})?(( + RETURNING group_id + ))?(( workspace_id, parent_id, position, @@ -318,10 +308,11 @@ impl WorkspaceDb { } fn get_dock_pane(&self, workspace_id: WorkspaceId) -> Result { - let (pane_id, active) = self.select_row_bound(indoc! {" + let (pane_id, active) = self.select_row_bound(sql!( SELECT pane_id, active FROM panes - WHERE pane_id = (SELECT dock_pane FROM workspaces WHERE workspace_id = ?)"})?( + WHERE pane_id = (SELECT dock_pane FROM workspaces WHERE workspace_id = ?) + ))?( workspace_id, )? .context("No dock pane for workspace")?; @@ -339,17 +330,19 @@ impl WorkspaceDb { parent: Option<(GroupId, usize)>, // None indicates BOTH dock pane AND center_pane dock: bool, ) -> Result { - let pane_id = conn.select_row_bound::<_, i64>(indoc! {" + let pane_id = conn.select_row_bound::<_, i64>(sql!( INSERT INTO panes(workspace_id, active) VALUES (?, ?) - RETURNING pane_id"})?((workspace_id, pane.active))? + RETURNING pane_id + ))?((workspace_id, pane.active))? .ok_or_else(|| anyhow!("Could not retrieve inserted pane_id"))?; if !dock { let (parent_id, order) = unzip_option(parent); - conn.exec_bound(indoc! {" + conn.exec_bound(sql!( INSERT INTO center_panes(pane_id, parent_group_id, position) - VALUES (?, ?, ?)"})?((pane_id, parent_id, order))?; + VALUES (?, ?, ?) + ))?((pane_id, parent_id, order))?; } Self::save_items(conn, workspace_id, pane_id, &pane.children).context("Saving items")?; @@ -358,10 +351,11 @@ impl WorkspaceDb { } fn get_items(&self, pane_id: PaneId) -> Result> { - Ok(self.select_bound(indoc! {" + Ok(self.select_bound(sql!( SELECT kind, item_id FROM items WHERE pane_id = ? - ORDER BY position"})?(pane_id)?) + ORDER BY position + ))?(pane_id)?) } fn save_items( @@ -370,10 +364,11 @@ impl WorkspaceDb { pane_id: PaneId, items: &[SerializedItem], ) -> Result<()> { - let mut insert = conn.exec_bound( - "INSERT INTO items(workspace_id, pane_id, position, kind, item_id) VALUES (?, ?, ?, ?, ?)", - ).context("Preparing insertion")?; + let mut insert = conn.exec_bound(sql!( + INSERT INTO items(workspace_id, pane_id, position, kind, item_id) VALUES (?, ?, ?, ?, ?) + )).context("Preparing insertion")?; for (position, item) in items.iter().enumerate() { + dbg!(item); insert((workspace_id, pane_id, position, item))?; } @@ -386,7 +381,7 @@ mod tests { use std::sync::Arc; - use db::open_memory_db; + use db::open_test_db; use settings::DockAnchor; use super::*; @@ -395,18 +390,19 @@ mod tests { async fn test_next_id_stability() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_next_id_stability").await); + let db = WorkspaceDb(open_test_db("test_next_id_stability").await); db.write(|conn| { conn.migrate( "test_table", - &[indoc! {" + &[sql!( CREATE TABLE test_table( text TEXT, workspace_id INTEGER, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - ) STRICT;"}], + ) STRICT; + )], ) .unwrap(); }) @@ -416,22 +412,22 @@ mod tests { // Assert the empty row got inserted assert_eq!( Some(id), - db.select_row_bound::( - "SELECT workspace_id FROM workspaces WHERE workspace_id = ?" - ) + db.select_row_bound::(sql!( + SELECT workspace_id FROM workspaces WHERE workspace_id = ? + )) .unwrap()(id) .unwrap() ); db.write(move |conn| { - conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?))) .unwrap()(("test-text-1", id)) .unwrap() }) .await; let test_text_1 = db - .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") + .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?)) .unwrap()(1) .unwrap() .unwrap(); @@ -442,19 +438,19 @@ mod tests { async fn test_workspace_id_stability() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_workspace_id_stability").await); + let db = WorkspaceDb(open_test_db("test_workspace_id_stability").await); db.write(|conn| { conn.migrate( "test_table", - &[indoc! {" + &[sql!( CREATE TABLE test_table( text TEXT, workspace_id INTEGER, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE - ) STRICT;"}], + ) STRICT;)], ) }) .await @@ -479,7 +475,7 @@ mod tests { db.save_workspace(workspace_1.clone()).await; db.write(|conn| { - conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?))) .unwrap()(("test-text-1", 1)) .unwrap(); }) @@ -488,7 +484,7 @@ mod tests { db.save_workspace(workspace_2.clone()).await; db.write(|conn| { - conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)") + conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?))) .unwrap()(("test-text-2", 2)) .unwrap(); }) @@ -505,14 +501,14 @@ mod tests { db.save_workspace(workspace_2).await; let test_text_2 = db - .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") + .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?)) .unwrap()(2) .unwrap() .unwrap(); assert_eq!(test_text_2, "test-text-2"); let test_text_1 = db - .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?") + .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?)) .unwrap()(1) .unwrap() .unwrap(); @@ -523,7 +519,7 @@ mod tests { async fn test_full_workspace_serialization() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_full_workspace_serialization").await); + let db = WorkspaceDb(open_test_db("test_full_workspace_serialization").await); let dock_pane = crate::persistence::model::SerializedPane { children: vec![ @@ -597,7 +593,7 @@ mod tests { async fn test_workspace_assignment() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_basic_functionality").await); + let db = WorkspaceDb(open_test_db("test_basic_functionality").await); let workspace_1 = SerializedWorkspace { id: 1, @@ -689,7 +685,7 @@ mod tests { async fn test_basic_dock_pane() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("basic_dock_pane").await); + let db = WorkspaceDb(open_test_db("basic_dock_pane").await); let dock_pane = crate::persistence::model::SerializedPane::new( vec![ @@ -714,7 +710,7 @@ mod tests { async fn test_simple_split() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("simple_split").await); + let db = WorkspaceDb(open_test_db("simple_split").await); // ----------------- // | 1,2 | 5,6 | @@ -766,7 +762,7 @@ mod tests { async fn test_cleanup_panes() { env_logger::try_init().ok(); - let db = WorkspaceDb(open_memory_db("test_cleanup_panes").await); + let db = WorkspaceDb(open_test_db("test_cleanup_panes").await); let center_pane = SerializedPaneGroup::Group { axis: gpui::Axis::Horizontal, diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 584f6392d197f94dc820c5e5c1604559ce1be42b..da796b5b44bcbc79ffceba168d2f10a729aa078d 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -2365,7 +2365,6 @@ impl Workspace { .await; // Traverse the splits tree and add to things - let (root, active_pane) = serialized_workspace .center_group .deserialize(&project, serialized_workspace.id, &workspace, &mut cx) @@ -2384,6 +2383,10 @@ impl Workspace { cx.focus(active_pane); } + if workspace.items(cx).next().is_none() { + cx.dispatch_action(NewFile); + } + cx.notify(); }); } @@ -2636,13 +2639,10 @@ pub fn open_paths( pub fn open_new(app_state: &Arc, cx: &mut MutableAppContext) -> Task<()> { let task = Workspace::new_local(Vec::new(), app_state.clone(), cx); cx.spawn(|mut cx| async move { - eprintln!("Open new task spawned"); let (workspace, opened_paths) = task.await; - eprintln!("workspace and path items created"); workspace.update(&mut cx, |_, cx| { if opened_paths.is_empty() { - eprintln!("new file redispatched"); cx.dispatch_action(NewFile); } }) From 8a48567857cfd5fd77d9350ec53809ac68364076 Mon Sep 17 00:00:00 2001 From: Kay Simmons Date: Wed, 30 Nov 2022 17:28:49 -0800 Subject: [PATCH 75/86] Reactivate the correct item in each pane when deserializing --- crates/db/src/db.rs | 4 +- .../terminal/src/terminal_container_view.rs | 1 - crates/workspace/src/persistence.rs | 67 ++++++++++--------- crates/workspace/src/persistence/model.rs | 31 +++++++-- crates/workspace/src/workspace.rs | 40 ++++++----- 5 files changed, 84 insertions(+), 59 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 3fc069405d74119bff841465b3891d628c21c50c..ea355a91a6d5a87443f1ed252bd6fabc21a49523 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -113,7 +113,6 @@ macro_rules! query { $vis async fn $id(&self) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; - self.write(|connection| { let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); @@ -143,7 +142,6 @@ macro_rules! query { $vis async fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<()> { use $crate::anyhow::Context; - self.write(move |connection| { let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); @@ -186,7 +184,7 @@ macro_rules! query { )) } }; - ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { + ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { pub async fn $id(&self) -> $crate::anyhow::Result> { use $crate::anyhow::Context; diff --git a/crates/terminal/src/terminal_container_view.rs b/crates/terminal/src/terminal_container_view.rs index a6c28d4baf944af937075a36b6aa1d32e9d38ef7..8f4bfeeb5364d6c3c49f20976230c17efb148379 100644 --- a/crates/terminal/src/terminal_container_view.rs +++ b/crates/terminal/src/terminal_container_view.rs @@ -137,7 +137,6 @@ impl TerminalContainer { TerminalContainerContent::Error(view) } }; - // cx.focus(content.handle()); TerminalContainer { content, diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index d08c9de9a00c1314146e7f585f10c8fa37893097..213033a90f588e8c3b257e6276dba1a96aeb2b14 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -76,6 +76,7 @@ impl Domain for Workspace { pane_id INTEGER NOT NULL, kind TEXT NOT NULL, position INTEGER NOT NULL, + active INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE ON UPDATE CASCADE, @@ -352,7 +353,7 @@ impl WorkspaceDb { fn get_items(&self, pane_id: PaneId) -> Result> { Ok(self.select_bound(sql!( - SELECT kind, item_id FROM items + SELECT kind, item_id, active FROM items WHERE pane_id = ? ORDER BY position ))?(pane_id)?) @@ -365,10 +366,9 @@ impl WorkspaceDb { items: &[SerializedItem], ) -> Result<()> { let mut insert = conn.exec_bound(sql!( - INSERT INTO items(workspace_id, pane_id, position, kind, item_id) VALUES (?, ?, ?, ?, ?) + INSERT INTO items(workspace_id, pane_id, position, kind, item_id, active) VALUES (?, ?, ?, ?, ?, ?) )).context("Preparing insertion")?; for (position, item) in items.iter().enumerate() { - dbg!(item); insert((workspace_id, pane_id, position, item))?; } @@ -497,6 +497,7 @@ mod tests { workspace_2.dock_pane.children.push(SerializedItem { kind: Arc::from("Test"), item_id: 10, + active: true, }); db.save_workspace(workspace_2).await; @@ -523,10 +524,10 @@ mod tests { let dock_pane = crate::persistence::model::SerializedPane { children: vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), - SerializedItem::new("Terminal", 3), - SerializedItem::new("Terminal", 4), + SerializedItem::new("Terminal", 1, false), + SerializedItem::new("Terminal", 2, false), + SerializedItem::new("Terminal", 3, true), + SerializedItem::new("Terminal", 4, false), ], active: false, }; @@ -544,15 +545,15 @@ mod tests { children: vec![ SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 5), - SerializedItem::new("Terminal", 6), + SerializedItem::new("Terminal", 5, false), + SerializedItem::new("Terminal", 6, true), ], false, )), SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 7), - SerializedItem::new("Terminal", 8), + SerializedItem::new("Terminal", 7, true), + SerializedItem::new("Terminal", 8, false), ], false, )), @@ -560,8 +561,8 @@ mod tests { }, SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 9), - SerializedItem::new("Terminal", 10), + SerializedItem::new("Terminal", 9, false), + SerializedItem::new("Terminal", 10, true), ], false, )), @@ -689,10 +690,10 @@ mod tests { let dock_pane = crate::persistence::model::SerializedPane::new( vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 2), - SerializedItem::new("Terminal", 3), + SerializedItem::new("Terminal", 1, false), + SerializedItem::new("Terminal", 4, false), + SerializedItem::new("Terminal", 2, false), + SerializedItem::new("Terminal", 3, true), ], false, ); @@ -725,15 +726,15 @@ mod tests { children: vec![ SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), + SerializedItem::new("Terminal", 1, false), + SerializedItem::new("Terminal", 2, true), ], false, )), SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 3), + SerializedItem::new("Terminal", 4, false), + SerializedItem::new("Terminal", 3, true), ], true, )), @@ -741,8 +742,8 @@ mod tests { }, SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 5), - SerializedItem::new("Terminal", 6), + SerializedItem::new("Terminal", 5, true), + SerializedItem::new("Terminal", 6, false), ], false, )), @@ -772,15 +773,15 @@ mod tests { children: vec![ SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), + SerializedItem::new("Terminal", 1, false), + SerializedItem::new("Terminal", 2, true), ], false, )), SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 3), + SerializedItem::new("Terminal", 4, false), + SerializedItem::new("Terminal", 3, true), ], true, )), @@ -788,8 +789,8 @@ mod tests { }, SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 5), - SerializedItem::new("Terminal", 6), + SerializedItem::new("Terminal", 5, false), + SerializedItem::new("Terminal", 6, true), ], false, )), @@ -807,15 +808,15 @@ mod tests { children: vec![ SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 1), - SerializedItem::new("Terminal", 2), + SerializedItem::new("Terminal", 1, false), + SerializedItem::new("Terminal", 2, true), ], false, )), SerializedPaneGroup::Pane(SerializedPane::new( vec![ - SerializedItem::new("Terminal", 4), - SerializedItem::new("Terminal", 3), + SerializedItem::new("Terminal", 4, true), + SerializedItem::new("Terminal", 3, false), ], true, )), diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index dc6d8ba8ee5b70bdd62adb3013207c0d45aacea7..c6943ab622d58ed0c024bbbb21a0655cd503222d 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -147,7 +147,8 @@ impl SerializedPane { workspace: &ViewHandle, cx: &mut AsyncAppContext, ) { - for item in self.children.iter() { + let mut active_item_index = None; + for (index, item) in self.children.iter().enumerate() { let project = project.clone(); let item_handle = pane_handle .update(cx, |_, cx| { @@ -174,6 +175,16 @@ impl SerializedPane { Pane::add_item(workspace, &pane_handle, item_handle, false, false, None, cx); }) } + + if item.active { + active_item_index = Some(index); + } + } + + if let Some(active_item_index) = active_item_index { + pane_handle.update(cx, |pane, cx| { + pane.activate_item(active_item_index, false, false, cx); + }) } } } @@ -186,13 +197,15 @@ pub type ItemId = usize; pub struct SerializedItem { pub kind: Arc, pub item_id: ItemId, + pub active: bool, } impl SerializedItem { - pub fn new(kind: impl AsRef, item_id: ItemId) -> Self { + pub fn new(kind: impl AsRef, item_id: ItemId, active: bool) -> Self { Self { kind: Arc::from(kind.as_ref()), item_id, + active, } } } @@ -203,6 +216,7 @@ impl Default for SerializedItem { SerializedItem { kind: Arc::from("Terminal"), item_id: 100000, + active: false, } } } @@ -210,7 +224,8 @@ impl Default for SerializedItem { impl Bind for &SerializedItem { fn bind(&self, statement: &Statement, start_index: i32) -> Result { let next_index = statement.bind(self.kind.clone(), start_index)?; - statement.bind(self.item_id, next_index) + let next_index = statement.bind(self.item_id, next_index)?; + statement.bind(self.active, next_index) } } @@ -218,7 +233,15 @@ impl Column for SerializedItem { fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { let (kind, next_index) = Arc::::column(statement, start_index)?; let (item_id, next_index) = ItemId::column(statement, next_index)?; - Ok((SerializedItem { kind, item_id }, next_index)) + let (active, next_index) = bool::column(statement, next_index)?; + Ok(( + SerializedItem { + kind, + item_id, + active, + }, + next_index, + )) } } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index da796b5b44bcbc79ffceba168d2f10a729aa078d..82d95389d8822bf2f40986ccf12cc0d724566c9b 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -2292,12 +2292,14 @@ impl Workspace { ) -> SerializedPane { let (items, active) = { let pane = pane_handle.read(cx); + let active_item_id = pane.active_item().map(|item| item.id()); ( pane.items() .filter_map(|item_handle| { Some(SerializedItem { kind: Arc::from(item_handle.serialized_item_kind()?), item_id: item_handle.id(), + active: Some(item_handle.id()) == active_item_id, }) }) .collect::>(), @@ -2308,8 +2310,6 @@ impl Workspace { SerializedPane::new(items, active) } - let dock_pane = serialize_pane_handle(self.dock.pane(), cx); - fn build_serialized_pane_group( pane_group: &Member, cx: &AppContext, @@ -2327,19 +2327,25 @@ impl Workspace { } } } - let center_group = build_serialized_pane_group(&self.center.root, cx); - - let serialized_workspace = SerializedWorkspace { - id: self.database_id, - location: self.location(cx), - dock_position: self.dock.position(), - dock_pane, - center_group, - }; - cx.background() - .spawn(persistence::DB.save_workspace(serialized_workspace)) - .detach(); + let location = self.location(cx); + + if !location.paths().is_empty() { + let dock_pane = serialize_pane_handle(self.dock.pane(), cx); + let center_group = build_serialized_pane_group(&self.center.root, cx); + + let serialized_workspace = SerializedWorkspace { + id: self.database_id, + location: self.location(cx), + dock_position: self.dock.position(), + dock_pane, + center_group, + }; + + cx.background() + .spawn(persistence::DB.save_workspace(serialized_workspace)) + .detach(); + } } fn load_from_serialized_workspace( @@ -2380,13 +2386,11 @@ impl Workspace { Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx); if let Some(active_pane) = active_pane { + // Change the focus to the workspace first so that we retrigger focus in on the pane. + cx.focus_self(); cx.focus(active_pane); } - if workspace.items(cx).next().is_none() { - cx.dispatch_action(NewFile); - } - cx.notify(); }); } From b8d423555ba6aa1e965ef7c73a0fbe5a1a33f40b Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 1 Dec 2022 12:02:38 -0800 Subject: [PATCH 76/86] Added side bar restoration --- crates/workspace/src/persistence.rs | 27 +++++++++++++++++------ crates/workspace/src/persistence/model.rs | 1 + crates/workspace/src/workspace.rs | 7 ++++++ 3 files changed, 28 insertions(+), 7 deletions(-) diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index 213033a90f588e8c3b257e6276dba1a96aeb2b14..db591410878ffb2c4ef1689bcc3dd68a0a5ccd2f 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -36,6 +36,7 @@ impl Domain for Workspace { dock_visible INTEGER, // Boolean dock_anchor TEXT, // Enum: 'Bottom' / 'Right' / 'Expanded' dock_pane INTEGER, // NULL indicates that we don't have a dock pane yet + project_panel_open INTEGER, //Boolean timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, FOREIGN KEY(dock_pane) REFERENCES panes(pane_id) ) STRICT; @@ -100,19 +101,20 @@ impl WorkspaceDb { // Note that we re-assign the workspace_id here in case it's empty // and we've grabbed the most recent workspace - let (workspace_id, workspace_location, dock_position): ( + let (workspace_id, workspace_location, project_panel_open, dock_position): ( WorkspaceId, WorkspaceLocation, + bool, DockPosition, ) = iife!({ if worktree_roots.len() == 0 { self.select_row(sql!( - SELECT workspace_id, workspace_location, dock_visible, dock_anchor + SELECT workspace_id, workspace_location, project_panel_open, dock_visible, dock_anchor FROM workspaces ORDER BY timestamp DESC LIMIT 1))?()? } else { self.select_row_bound(sql!( - SELECT workspace_id, workspace_location, dock_visible, dock_anchor + SELECT workspace_id, workspace_location, project_panel_open, dock_visible, dock_anchor FROM workspaces WHERE workspace_location = ?))?(&workspace_location)? } @@ -133,6 +135,7 @@ impl WorkspaceDb { .context("Getting center group") .log_err()?, dock_position, + project_panel_open }) } @@ -158,18 +161,20 @@ impl WorkspaceDb { INSERT INTO workspaces( workspace_id, workspace_location, + project_panel_open, dock_visible, dock_anchor, timestamp ) - VALUES (?1, ?2, ?3, ?4, CURRENT_TIMESTAMP) + VALUES (?1, ?2, ?3, ?4, ?5, CURRENT_TIMESTAMP) ON CONFLICT DO UPDATE SET workspace_location = ?2, - dock_visible = ?3, - dock_anchor = ?4, + project_panel_open = ?3, + dock_visible = ?4, + dock_anchor = ?5, timestamp = CURRENT_TIMESTAMP - ))?((workspace.id, &workspace.location, workspace.dock_position)) + ))?((workspace.id, &workspace.location, workspace.project_panel_open, workspace.dock_position)) .context("Updating workspace")?; // Save center pane group and dock pane @@ -273,6 +278,7 @@ impl WorkspaceDb { .collect::>() } + fn save_pane_group( conn: &Connection, workspace_id: WorkspaceId, @@ -462,6 +468,7 @@ mod tests { dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), + project_panel_open: true }; let mut workspace_2 = SerializedWorkspace { @@ -470,6 +477,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), + project_panel_open: false }; db.save_workspace(workspace_1.clone()).await; @@ -575,6 +583,7 @@ mod tests { dock_position: DockPosition::Shown(DockAnchor::Bottom), center_group, dock_pane, + project_panel_open: true }; db.save_workspace(workspace.clone()).await; @@ -602,6 +611,7 @@ mod tests { dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), + project_panel_open: true, }; let mut workspace_2 = SerializedWorkspace { @@ -610,6 +620,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), + project_panel_open: false, }; db.save_workspace(workspace_1.clone()).await; @@ -645,6 +656,7 @@ mod tests { dock_position: DockPosition::Shown(DockAnchor::Right), center_group: Default::default(), dock_pane: Default::default(), + project_panel_open: false }; db.save_workspace(workspace_3.clone()).await; @@ -679,6 +691,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Right), center_group: center_group.clone(), dock_pane, + project_panel_open: true } } diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index c6943ab622d58ed0c024bbbb21a0655cd503222d..c57c992d7b2b05aaa71bdd5497082a24a158c1ea 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -65,6 +65,7 @@ pub struct SerializedWorkspace { pub dock_position: DockPosition, pub center_group: SerializedPaneGroup, pub dock_pane: SerializedPane, + pub project_panel_open: bool, } #[derive(Debug, PartialEq, Eq, Clone)] diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 82d95389d8822bf2f40986ccf12cc0d724566c9b..66ef63f27f7386e461a6c5e9c560506d9a503a81 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -2340,6 +2340,7 @@ impl Workspace { dock_position: self.dock.position(), dock_pane, center_group, + project_panel_open: self.left_sidebar.read(cx).is_open(), }; cx.background() @@ -2383,6 +2384,12 @@ impl Workspace { // Swap workspace center group workspace.center = PaneGroup::with_root(root); + // Note, if this is moved after 'set_dock_position' + // it causes an infinite loop. + if serialized_workspace.project_panel_open { + workspace.toggle_sidebar_item_focus(SidebarSide::Left, 0, cx) + } + Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx); if let Some(active_pane) = active_pane { From 189a820113dd0409ee7736e370087d6b7792f9d0 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 1 Dec 2022 14:16:38 -0800 Subject: [PATCH 77/86] First draft of graceful corruption restoration --- crates/db/src/db.rs | 79 ++++++++++++++++++++-- crates/sqlez/src/thread_safe_connection.rs | 43 ++++++------ crates/util/src/lib.rs | 7 ++ 3 files changed, 103 insertions(+), 26 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index ea355a91a6d5a87443f1ed252bd6fabc21a49523..6de51cb0e6818ed765c965679b6befb871411a27 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -2,6 +2,7 @@ pub mod kvp; // Re-export pub use anyhow; +use anyhow::Context; pub use indoc::indoc; pub use lazy_static; pub use smol; @@ -14,9 +15,13 @@ use sqlez_macros::sql; use std::fs::{create_dir_all, remove_dir_all}; use std::path::Path; use std::sync::atomic::{AtomicBool, Ordering}; +use std::time::{SystemTime, UNIX_EPOCH}; +use util::{async_iife, ResultExt}; use util::channel::{ReleaseChannel, RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; use util::paths::DB_DIR; +// TODO: Add a savepoint to the thread safe connection initialization and migrations + const CONNECTION_INITIALIZE_QUERY: &'static str = sql!( PRAGMA synchronous=NORMAL; PRAGMA busy_timeout=1; @@ -28,31 +33,90 @@ const DB_INITIALIZE_QUERY: &'static str = sql!( PRAGMA journal_mode=WAL; ); +const FALLBACK_DB_NAME: &'static str = "FALLBACK_MEMORY_DB"; + lazy_static::lazy_static! { static ref DB_WIPED: AtomicBool = AtomicBool::new(false); } /// Open or create a database at the given directory path. pub async fn open_db() -> ThreadSafeConnection { - // Use 0 for now. Will implement incrementing and clearing of old db files soon TM - let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); + let db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); + // If WIPE_DB, delete 0-{channel} if *RELEASE_CHANNEL == ReleaseChannel::Dev && std::env::var("WIPE_DB").is_ok() && !DB_WIPED.load(Ordering::Acquire) { - remove_dir_all(¤t_db_dir).ok(); - DB_WIPED.store(true, Ordering::Relaxed); + remove_dir_all(&db_dir).ok(); + DB_WIPED.store(true, Ordering::Release); } - create_dir_all(¤t_db_dir).expect("Should be able to create the database directory"); - let db_path = current_db_dir.join(Path::new("db.sqlite")); + let connection = async_iife!({ + // If no db folder, create one at 0-{channel} + create_dir_all(&db_dir).context("Could not create db directory")?; + let db_path = db_dir.join(Path::new("db.sqlite")); + + // Try building a connection + if let Some(connection) = ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) + .with_db_initialization_query(DB_INITIALIZE_QUERY) + .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) + .build() + .await + .log_err() { + return Ok(connection) + } + + let backup_timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect( + "System clock is set before the unix timestamp, Zed does not support this region of spacetime" + ) + .as_millis(); + + // If failed, move 0-{channel} to {current unix timestamp}-{channel} + let backup_db_dir = (*DB_DIR).join(Path::new(&format!( + "{}{}", + backup_timestamp, + *RELEASE_CHANNEL_NAME + ))); + + std::fs::rename(&db_dir, backup_db_dir) + .context("Failed clean up corrupted database, panicking.")?; + + // TODO: Set a constant with the failed timestamp and error so we can notify the user + + // Create a new 0-{channel} + create_dir_all(&db_dir).context("Should be able to create the database directory")?; + let db_path = db_dir.join(Path::new("db.sqlite")); + + // Try again + ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) + .with_db_initialization_query(DB_INITIALIZE_QUERY) + .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) + .build() + .await + }).await.log_err(); + + if let Some(connection) = connection { + return connection; + } + + // TODO: Set another constant so that we can escalate the notification + + // If still failed, create an in memory db with a known name + open_fallback_db().await +} - ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) +async fn open_fallback_db() -> ThreadSafeConnection { + ThreadSafeConnection::::builder(FALLBACK_DB_NAME, false) .with_db_initialization_query(DB_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) .build() .await + .expect( + "Fallback in memory database failed. Likely initialization queries or migrations have fundamental errors", + ) } #[cfg(any(test, feature = "test-support"))] @@ -66,6 +130,7 @@ pub async fn open_test_db(db_name: &str) -> ThreadSafeConnection .with_write_queue_constructor(locking_queue()) .build() .await + .unwrap() } /// Implements a basic DB wrapper for a given domain diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 82697d1f907729684ab6daf69b9d2c3beccc7055..4849e785b556a8c4b45fdeb21128adc19d576bcf 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -1,3 +1,4 @@ +use anyhow::Context; use futures::{channel::oneshot, Future, FutureExt}; use lazy_static::lazy_static; use parking_lot::{Mutex, RwLock}; @@ -72,7 +73,7 @@ impl ThreadSafeConnectionBuilder { self } - pub async fn build(self) -> ThreadSafeConnection { + pub async fn build(self) -> anyhow::Result> { self.connection .initialize_queues(self.write_queue_constructor); @@ -81,26 +82,33 @@ impl ThreadSafeConnectionBuilder { self.connection .write(move |connection| { if let Some(db_initialize_query) = db_initialize_query { - connection.exec(db_initialize_query).expect(&format!( - "Db initialize query failed to execute: {}", - db_initialize_query - ))() - .unwrap(); + connection.exec(db_initialize_query).with_context(|| { + format!( + "Db initialize query failed to execute: {}", + db_initialize_query + ) + })?()?; } - let mut failure_result = None; + // Retry failed migrations in case they were run in parallel from different + // processes. This gives a best attempt at migrating before bailing + let mut migration_result = + anyhow::Result::<()>::Err(anyhow::anyhow!("Migration never run")); + for _ in 0..MIGRATION_RETRIES { - failure_result = Some(M::migrate(connection)); - if failure_result.as_ref().unwrap().is_ok() { + migration_result = connection + .with_savepoint("thread_safe_multi_migration", || M::migrate(connection)); + + if migration_result.is_ok() { break; } } - failure_result.unwrap().expect("Migration failed"); + migration_result }) - .await; + .await?; - self.connection + Ok(self.connection) } } @@ -240,10 +248,6 @@ impl Clone for ThreadSafeConnection { } } -// TODO: -// 1. When migration or initialization fails, move the corrupted db to a holding place and create a new one -// 2. If the new db also fails, downgrade to a shared in memory db -// 3. In either case notify the user about what went wrong impl Deref for ThreadSafeConnection { type Target = Connection; @@ -265,7 +269,7 @@ pub fn locking_queue() -> WriteQueueConstructor { #[cfg(test)] mod test { use indoc::indoc; - use lazy_static::__Deref; + use std::ops::Deref; use std::thread; use crate::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; @@ -295,7 +299,8 @@ mod test { PRAGMA foreign_keys=TRUE; PRAGMA case_sensitive_like=TRUE; "}); - let _ = smol::block_on(builder.build()).deref(); + + let _ = smol::block_on(builder.build()).unwrap().deref(); })); } @@ -341,6 +346,6 @@ mod test { ThreadSafeConnection::::builder("wild_zed_lost_failure", false) .with_connection_initialize_query("PRAGMA FOREIGN_KEYS=true"); - smol::block_on(builder.build()); + smol::block_on(builder.build()).unwrap(); } } diff --git a/crates/util/src/lib.rs b/crates/util/src/lib.rs index 78536f01d07fc9656e4246b33bc186d0412adf22..0e83bb5f19982f36254c5e7ae3f7c7e1e2e8a5a5 100644 --- a/crates/util/src/lib.rs +++ b/crates/util/src/lib.rs @@ -223,6 +223,13 @@ macro_rules! iife { }; } +#[macro_export] +macro_rules! async_iife { + ($block:block) => { + (|| async move { $block })() + }; +} + #[cfg(test)] mod tests { use super::*; From 5e240f98f0b80a5f2ebd902c690957e11a7d63b6 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Thu, 1 Dec 2022 18:31:05 -0800 Subject: [PATCH 78/86] Reworked thread safe connection be threadsafer,,,, again Co-Authored-By: kay@zed.dev --- crates/db/src/db.rs | 543 ++++++++------------- crates/db/src/kvp.rs | 29 +- crates/db/src/query.rs | 314 ++++++++++++ crates/editor/src/persistence.rs | 27 +- crates/sqlez/src/bindable.rs | 164 ++++--- crates/sqlez/src/connection.rs | 14 +- crates/sqlez/src/domain.rs | 4 +- crates/sqlez/src/migrations.rs | 3 + crates/sqlez/src/thread_safe_connection.rs | 141 +++--- crates/terminal/src/persistence.rs | 19 +- crates/workspace/src/persistence.rs | 44 +- crates/workspace/src/workspace.rs | 5 +- 12 files changed, 732 insertions(+), 575 deletions(-) create mode 100644 crates/db/src/query.rs diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 6de51cb0e6818ed765c965679b6befb871411a27..6c6688b0d1ae6f5e6bfb120ea862012bb038190a 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -1,26 +1,27 @@ pub mod kvp; +pub mod query; // Re-export pub use anyhow; use anyhow::Context; pub use indoc::indoc; pub use lazy_static; +use parking_lot::{Mutex, RwLock}; pub use smol; pub use sqlez; pub use sqlez_macros; +pub use util::channel::{RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; +pub use util::paths::DB_DIR; use sqlez::domain::Migrator; use sqlez::thread_safe_connection::ThreadSafeConnection; use sqlez_macros::sql; use std::fs::{create_dir_all, remove_dir_all}; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{SystemTime, UNIX_EPOCH}; use util::{async_iife, ResultExt}; -use util::channel::{ReleaseChannel, RELEASE_CHANNEL, RELEASE_CHANNEL_NAME}; -use util::paths::DB_DIR; - -// TODO: Add a savepoint to the thread safe connection initialization and migrations +use util::channel::ReleaseChannel; const CONNECTION_INITIALIZE_QUERY: &'static str = sql!( PRAGMA synchronous=NORMAL; @@ -36,79 +37,117 @@ const DB_INITIALIZE_QUERY: &'static str = sql!( const FALLBACK_DB_NAME: &'static str = "FALLBACK_MEMORY_DB"; lazy_static::lazy_static! { - static ref DB_WIPED: AtomicBool = AtomicBool::new(false); + static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(()); + static ref DB_WIPED: RwLock = RwLock::new(false); + pub static ref BACKUP_DB_PATH: RwLock> = RwLock::new(None); + pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); } /// Open or create a database at the given directory path. -pub async fn open_db() -> ThreadSafeConnection { - let db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME))); +/// This will retry a couple times if there are failures. If opening fails once, the db directory +/// is moved to a backup folder and a new one is created. If that fails, a shared in memory db is created. +/// In either case, static variables are set so that the user can be notified. +pub async fn open_db(wipe_db: bool, db_dir: &Path, release_channel: &ReleaseChannel) -> ThreadSafeConnection { + let main_db_dir = db_dir.join(Path::new(&format!("0-{}", release_channel.name()))); // If WIPE_DB, delete 0-{channel} - if *RELEASE_CHANNEL == ReleaseChannel::Dev - && std::env::var("WIPE_DB").is_ok() - && !DB_WIPED.load(Ordering::Acquire) + if release_channel == &ReleaseChannel::Dev + && wipe_db + && !*DB_WIPED.read() { - remove_dir_all(&db_dir).ok(); - DB_WIPED.store(true, Ordering::Release); + let mut db_wiped = DB_WIPED.write(); + if !*db_wiped { + remove_dir_all(&main_db_dir).ok(); + + *db_wiped = true; + } } let connection = async_iife!({ + // Note: This still has a race condition where 1 set of migrations succeeds + // (e.g. (Workspace, Editor)) and another fails (e.g. (Workspace, Terminal)) + // This will cause the first connection to have the database taken out + // from under it. This *should* be fine though. The second dabatase failure will + // cause errors in the log and so should be observed by developers while writing + // soon-to-be good migrations. If user databases are corrupted, we toss them out + // and try again from a blank. As long as running all migrations from start to end + // is ok, this race condition will never be triggered. + // + // Basically: Don't ever push invalid migrations to stable or everyone will have + // a bad time. + // If no db folder, create one at 0-{channel} - create_dir_all(&db_dir).context("Could not create db directory")?; - let db_path = db_dir.join(Path::new("db.sqlite")); - - // Try building a connection - if let Some(connection) = ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) - .with_db_initialization_query(DB_INITIALIZE_QUERY) - .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) - .build() - .await - .log_err() { - return Ok(connection) + create_dir_all(&main_db_dir).context("Could not create db directory")?; + let db_path = main_db_dir.join(Path::new("db.sqlite")); + + // Optimistically open databases in parallel + if !DB_FILE_OPERATIONS.is_locked() { + // Try building a connection + if let Some(connection) = open_main_db(&db_path).await { + return Ok(connection) + }; } + // Take a lock in the failure case so that we move the db once per process instead + // of potentially multiple times from different threads. This shouldn't happen in the + // normal path + let _lock = DB_FILE_OPERATIONS.lock(); + if let Some(connection) = open_main_db(&db_path).await { + return Ok(connection) + }; + let backup_timestamp = SystemTime::now() .duration_since(UNIX_EPOCH) - .expect( - "System clock is set before the unix timestamp, Zed does not support this region of spacetime" - ) + .expect("System clock is set before the unix timestamp, Zed does not support this region of spacetime") .as_millis(); // If failed, move 0-{channel} to {current unix timestamp}-{channel} - let backup_db_dir = (*DB_DIR).join(Path::new(&format!( - "{}{}", + let backup_db_dir = db_dir.join(Path::new(&format!( + "{}-{}", backup_timestamp, - *RELEASE_CHANNEL_NAME + release_channel.name(), ))); - std::fs::rename(&db_dir, backup_db_dir) + std::fs::rename(&main_db_dir, &backup_db_dir) .context("Failed clean up corrupted database, panicking.")?; - // TODO: Set a constant with the failed timestamp and error so we can notify the user - + // Set a static ref with the failed timestamp and error so we can notify the user + { + let mut guard = BACKUP_DB_PATH.write(); + *guard = Some(backup_db_dir); + } + // Create a new 0-{channel} - create_dir_all(&db_dir).context("Should be able to create the database directory")?; - let db_path = db_dir.join(Path::new("db.sqlite")); + create_dir_all(&main_db_dir).context("Should be able to create the database directory")?; + let db_path = main_db_dir.join(Path::new("db.sqlite")); // Try again - ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) - .with_db_initialization_query(DB_INITIALIZE_QUERY) - .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) - .build() - .await + open_main_db(&db_path).await.context("Could not newly created db") }).await.log_err(); - if let Some(connection) = connection { + if let Some(connection) = connection { return connection; } - // TODO: Set another constant so that we can escalate the notification + // Set another static ref so that we can escalate the notification + ALL_FILE_DB_FAILED.store(true, Ordering::Release); // If still failed, create an in memory db with a known name open_fallback_db().await } +async fn open_main_db(db_path: &PathBuf) -> Option> { + println!("Opening main db"); + ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) + .with_db_initialization_query(DB_INITIALIZE_QUERY) + .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) + .build() + .await + .log_err() +} + async fn open_fallback_db() -> ThreadSafeConnection { + println!("Opening fallback db"); ThreadSafeConnection::::builder(FALLBACK_DB_NAME, false) .with_db_initialization_query(DB_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) @@ -135,17 +174,27 @@ pub async fn open_test_db(db_name: &str) -> ThreadSafeConnection /// Implements a basic DB wrapper for a given domain #[macro_export] -macro_rules! connection { - ($id:ident: $t:ident<$d:ty>) => { - pub struct $t($crate::sqlez::thread_safe_connection::ThreadSafeConnection<$d>); +macro_rules! define_connection { + (pub static ref $id:ident: $t:ident<()> = $migrations:expr;) => { + pub struct $t($crate::sqlez::thread_safe_connection::ThreadSafeConnection<$t>); impl ::std::ops::Deref for $t { - type Target = $crate::sqlez::thread_safe_connection::ThreadSafeConnection<$d>; + type Target = $crate::sqlez::thread_safe_connection::ThreadSafeConnection<$t>; fn deref(&self) -> &Self::Target { &self.0 } } + + impl $crate::sqlez::domain::Domain for $t { + fn name() -> &'static str { + stringify!($t) + } + + fn migrations() -> &'static [&'static str] { + $migrations + } + } #[cfg(any(test, feature = "test-support"))] $crate::lazy_static::lazy_static! { @@ -154,322 +203,124 @@ macro_rules! connection { #[cfg(not(any(test, feature = "test-support")))] $crate::lazy_static::lazy_static! { - pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db())); - } - }; -} - -#[macro_export] -macro_rules! query { - ($vis:vis fn $id:ident() -> Result<()> { $($sql:tt)+ }) => { - $vis fn $id(&self) -> $crate::anyhow::Result<()> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.exec(sql_stmt)?().context(::std::format!( - "Error in {}, exec failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt, - )) + pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(std::env::var("WIPE_DB").is_ok(), &$crate::DB_DIR, &$crate::RELEASE_CHANNEL))); } }; - ($vis:vis async fn $id:ident() -> Result<()> { $($sql:tt)+ }) => { - $vis async fn $id(&self) -> $crate::anyhow::Result<()> { - use $crate::anyhow::Context; - - self.write(|connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + (pub static ref $id:ident: $t:ident<$($d:ty),+> = $migrations:expr;) => { + pub struct $t($crate::sqlez::thread_safe_connection::ThreadSafeConnection<( $($d),+, $t )>); - connection.exec(sql_stmt)?().context(::std::format!( - "Error in {}, exec failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await - } - }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $($sql:tt)+ }) => { - $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.exec_bound::<($($arg_type),+)>(sql_stmt)?(($($arg),+)) - .context(::std::format!( - "Error in {}, exec_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - } - }; - ($vis:vis async fn $id:ident($arg:ident: $arg_type:ty) -> Result<()> { $($sql:tt)+ }) => { - $vis async fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<()> { - use $crate::anyhow::Context; - - self.write(move |connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - connection.exec_bound::<$arg_type>(sql_stmt)?($arg) - .context(::std::format!( - "Error in {}, exec_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await - } - }; - ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $($sql:tt)+ }) => { - $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { - use $crate::anyhow::Context; - - self.write(move |connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - connection.exec_bound::<($($arg_type),+)>(sql_stmt)?(($($arg),+)) - .context(::std::format!( - "Error in {}, exec_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await - } - }; - ($vis:vis fn $id:ident() -> Result> { $($sql:tt)+ }) => { - $vis fn $id(&self) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.select::<$return_type>(sql_stmt)?(()) - .context(::std::format!( - "Error in {}, select_row failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - } - }; - ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { - pub async fn $id(&self) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - self.write(|connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - connection.select::<$return_type>(sql_stmt)?(()) - .context(::std::format!( - "Error in {}, select_row failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await - } - }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { - $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.select_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) - .context(::std::format!( - "Error in {}, exec_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - } - }; - ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { - $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - self.write(|connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + impl ::std::ops::Deref for $t { + type Target = $crate::sqlez::thread_safe_connection::ThreadSafeConnection<($($d),+, $t)>; - connection.select_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) - .context(::std::format!( - "Error in {}, exec_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await + fn deref(&self) -> &Self::Target { + &self.0 + } } - }; - ($vis:vis fn $id:ident() -> Result> { $($sql:tt)+ }) => { - $vis fn $id(&self) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.select_row::<$return_type>(sql_stmt)?() - .context(::std::format!( - "Error in {}, select_row failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - } - }; - ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { - $vis async fn $id(&self) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - self.write(|connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - connection.select_row::<$return_type>(sql_stmt)?() - .context(::std::format!( - "Error in {}, select_row failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await + + impl $crate::sqlez::domain::Domain for $t { + fn name() -> &'static str { + stringify!($t) + } + + fn migrations() -> &'static [&'static str] { + $migrations + } } - }; - ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result> { $($sql:tt)+ }) => { - $vis fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.select_row_bound::<$arg_type, $return_type>(sql_stmt)?($arg) - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) + #[cfg(any(test, feature = "test-support"))] + $crate::lazy_static::lazy_static! { + pub static ref $id: $t = $t($crate::smol::block_on($crate::open_test_db(stringify!($id)))); } - }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { - $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - - } - }; - ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { - $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { - use $crate::anyhow::Context; - - - self.write(|connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - connection.select_row_bound::<($($arg_type),+), $return_type>(indoc! { $sql })?(($($arg),+)) - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await + #[cfg(not(any(test, feature = "test-support")))] + $crate::lazy_static::lazy_static! { + pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(std::env::var("WIPE_DB").is_ok(), &$crate::DB_DIR, &$crate::RELEASE_CHANNEL))); } }; - ($vis:vis fn $id:ident() -> Result<$return_type:ty> { $($sql:tt)+ }) => { - $vis fn $id(&self) -> $crate::anyhow::Result<$return_type> { - use $crate::anyhow::Context; +} - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); +#[cfg(test)] +mod tests { + use std::thread; - self.select_row::<$return_type>(indoc! { $sql })?() - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - ))? - .context(::std::format!( - "Error in {}, select_row_bound expected single row result but found none for: {}", - ::std::stringify!($id), - sql_stmt - )) - } - }; - ($vis:vis async fn $id:ident() -> Result<$return_type:ty> { $($sql:tt)+ }) => { - $vis async fn $id(&self) -> $crate::anyhow::Result<$return_type> { - use $crate::anyhow::Context; + use sqlez::domain::Domain; + use sqlez_macros::sql; + use tempdir::TempDir; + use util::channel::ReleaseChannel; - self.write(|connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - connection.select_row::<$return_type>(sql_stmt)?() - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - ))? - .context(::std::format!( - "Error in {}, select_row_bound expected single row result but found none for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await + use crate::open_db; + + enum TestDB {} + + impl Domain for TestDB { + fn name() -> &'static str { + "db_tests" } - }; - ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result<$return_type:ty> { $($sql:tt)+ }) => { - pub fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<$return_type> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - self.select_row_bound::<$arg_type, $return_type>(sql_stmt)?($arg) - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - ))? - .context(::std::format!( - "Error in {}, select_row_bound expected single row result but found none for: {}", - ::std::stringify!($id), - sql_stmt - )) + fn migrations() -> &'static [&'static str] { + &[sql!( + CREATE TABLE test(value); + )] } - }; - ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $($sql:tt)+ }) => { - $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { - use $crate::anyhow::Context; - - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); - - self.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - ))? - .context(::std::format!( - "Error in {}, select_row_bound expected single row result but found none for: {}", - ::std::stringify!($id), - sql_stmt - )) - } - }; - ($vis:vis fn async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $($sql:tt)+ }) => { - $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { - use $crate::anyhow::Context; - - - self.write(|connection| { - let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + } + + // Test that wipe_db exists and works and gives a new db + #[test] + fn test_wipe_db() { + env_logger::try_init().ok(); + + smol::block_on(async { + let tempdir = TempDir::new("DbTests").unwrap(); + + let test_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + test_db.write(|connection| + connection.exec(sql!( + INSERT INTO test(value) VALUES (10) + )).unwrap()().unwrap() + ).await; + drop(test_db); + + let mut guards = vec![]; + for _ in 0..5 { + let path = tempdir.path().to_path_buf(); + let guard = thread::spawn(move || smol::block_on(async { + let test_db = open_db::(true, &path, &ReleaseChannel::Dev).await; + + assert!(test_db.select_row::<()>(sql!(SELECT value FROM test)).unwrap()().unwrap().is_none()) + })); + + guards.push(guard); + } + + for guard in guards { + guard.join().unwrap(); + } + }) + } - connection.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) - .context(::std::format!( - "Error in {}, select_row_bound failed to execute or parse for: {}", - ::std::stringify!($id), - sql_stmt - ))? - .context(::std::format!( - "Error in {}, select_row_bound expected single row result but found none for: {}", - ::std::stringify!($id), - sql_stmt - )) - }).await - } - }; + // Test a file system failure (like in create_dir_all()) + #[test] + fn test_file_system_failure() { + + } + + // Test happy path where everything exists and opens + #[test] + fn test_open_db() { + + } + + // Test bad migration panics + #[test] + fn test_bad_migration_panics() { + + } + + /// Test that DB exists but corrupted (causing recreate) + #[test] + fn test_db_corruption() { + + + // open_db(db_dir, release_channel) + } } diff --git a/crates/db/src/kvp.rs b/crates/db/src/kvp.rs index 70ee9f64dafb2089fc8f2236d43dcd5fb19e4fcd..0b0cdd9aa1117744bc554d0dfd26d37ac152371e 100644 --- a/crates/db/src/kvp.rs +++ b/crates/db/src/kvp.rs @@ -1,26 +1,15 @@ -use sqlez::domain::Domain; use sqlez_macros::sql; -use crate::{connection, query}; +use crate::{define_connection, query}; -connection!(KEY_VALUE_STORE: KeyValueStore); - -impl Domain for KeyValueStore { - fn name() -> &'static str { - "kvp" - } - - fn migrations() -> &'static [&'static str] { - // Legacy migrations using rusqlite may have already created kv_store during alpha, - // migrations must be infallible so this must have 'IF NOT EXISTS' - &[sql!( - CREATE TABLE IF NOT EXISTS kv_store( - key TEXT PRIMARY KEY, - value TEXT NOT NULL - ) STRICT; - )] - } -} +define_connection!(pub static ref KEY_VALUE_STORE: KeyValueStore<()> = + &[sql!( + CREATE TABLE IF NOT EXISTS kv_store( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ) STRICT; + )]; +); impl KeyValueStore { query! { diff --git a/crates/db/src/query.rs b/crates/db/src/query.rs new file mode 100644 index 0000000000000000000000000000000000000000..731fca15cb5c47b58e89aac1eb2a7b42189829c2 --- /dev/null +++ b/crates/db/src/query.rs @@ -0,0 +1,314 @@ +#[macro_export] +macro_rules! query { + ($vis:vis fn $id:ident() -> Result<()> { $($sql:tt)+ }) => { + $vis fn $id(&self) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.exec(sql_stmt)?().context(::std::format!( + "Error in {}, exec failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt, + )) + } + }; + ($vis:vis async fn $id:ident() -> Result<()> { $($sql:tt)+ }) => { + $vis async fn $id(&self) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.exec(sql_stmt)?().context(::std::format!( + "Error in {}, exec failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $($sql:tt)+ }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.exec_bound::<($($arg_type),+)>(sql_stmt)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + } + }; + ($vis:vis async fn $id:ident($arg:ident: $arg_type:ty) -> Result<()> { $($sql:tt)+ }) => { + $vis async fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.write(move |connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.exec_bound::<$arg_type>(sql_stmt)?($arg) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<()> { $($sql:tt)+ }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<()> { + use $crate::anyhow::Context; + + self.write(move |connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.exec_bound::<($($arg_type),+)>(sql_stmt)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis fn $id:ident() -> Result> { $($sql:tt)+ }) => { + $vis fn $id(&self) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select::<$return_type>(sql_stmt)?(()) + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + } + }; + ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { + pub async fn $id(&self) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select::<$return_type>(sql_stmt)?(()) + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + } + }; + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) + .context(::std::format!( + "Error in {}, exec_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis fn $id:ident() -> Result> { $($sql:tt)+ }) => { + $vis fn $id(&self) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row::<$return_type>(sql_stmt)?() + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + } + }; + ($vis:vis async fn $id:ident() -> Result> { $($sql:tt)+ }) => { + $vis async fn $id(&self) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_row::<$return_type>(sql_stmt)?() + .context(::std::format!( + "Error in {}, select_row failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result> { $($sql:tt)+ }) => { + $vis fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<$arg_type, $return_type>(sql_stmt)?($arg) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + + } + }; + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + + } + }; + ($vis:vis async fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result> { $($sql:tt)+ }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result> { + use $crate::anyhow::Context; + + + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_row_bound::<($($arg_type),+), $return_type>(indoc! { $sql })?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis fn $id:ident() -> Result<$return_type:ty> { $($sql:tt)+ }) => { + $vis fn $id(&self) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row::<$return_type>(indoc! { $sql })?() + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + sql_stmt + )) + } + }; + ($vis:vis async fn $id:ident() -> Result<$return_type:ty> { $($sql:tt)+ }) => { + $vis async fn $id(&self) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_row::<$return_type>(sql_stmt)?() + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; + ($vis:vis fn $id:ident($arg:ident: $arg_type:ty) -> Result<$return_type:ty> { $($sql:tt)+ }) => { + pub fn $id(&self, $arg: $arg_type) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<$arg_type, $return_type>(sql_stmt)?($arg) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + sql_stmt + )) + } + }; + ($vis:vis fn $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $($sql:tt)+ }) => { + $vis fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + self.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + sql_stmt + )) + } + }; + ($vis:vis fn async $id:ident($($arg:ident: $arg_type:ty),+) -> Result<$return_type:ty> { $($sql:tt)+ }) => { + $vis async fn $id(&self, $($arg: $arg_type),+) -> $crate::anyhow::Result<$return_type> { + use $crate::anyhow::Context; + + + self.write(|connection| { + let sql_stmt = $crate::sqlez_macros::sql!($($sql)+); + + connection.select_row_bound::<($($arg_type),+), $return_type>(sql_stmt)?(($($arg),+)) + .context(::std::format!( + "Error in {}, select_row_bound failed to execute or parse for: {}", + ::std::stringify!($id), + sql_stmt + ))? + .context(::std::format!( + "Error in {}, select_row_bound expected single row result but found none for: {}", + ::std::stringify!($id), + sql_stmt + )) + }).await + } + }; +} diff --git a/crates/editor/src/persistence.rs b/crates/editor/src/persistence.rs index 3416f479e7e2a29a01f4abdc26081307e37a0b46..31ada105af9e8220f80e03433152959ed688f4df 100644 --- a/crates/editor/src/persistence.rs +++ b/crates/editor/src/persistence.rs @@ -1,19 +1,11 @@ use std::path::PathBuf; -use crate::Editor; use db::sqlez_macros::sql; -use db::{connection, query}; -use sqlez::domain::Domain; -use workspace::{ItemId, Workspace, WorkspaceId}; +use db::{define_connection, query}; +use workspace::{ItemId, WorkspaceDb, WorkspaceId}; -connection!(DB: EditorDb<(Workspace, Editor)>); - -impl Domain for Editor { - fn name() -> &'static str { - "editor" - } - - fn migrations() -> &'static [&'static str] { +define_connection!( + pub static ref DB: EditorDb = &[sql! ( CREATE TABLE editors( item_id INTEGER NOT NULL, @@ -21,12 +13,11 @@ impl Domain for Editor { path BLOB NOT NULL, PRIMARY KEY(item_id, workspace_id), FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE - ) STRICT; - )] - } -} + ON DELETE CASCADE + ON UPDATE CASCADE + ) STRICT; + )]; +); impl EditorDb { query! { diff --git a/crates/sqlez/src/bindable.rs b/crates/sqlez/src/bindable.rs index ffef7814f9327d758e0f8aa4bb36b8644f7b5bc8..3649037e502ca34b4a99b7a6f53de3ab4ba03ef3 100644 --- a/crates/sqlez/src/bindable.rs +++ b/crates/sqlez/src/bindable.rs @@ -137,13 +137,6 @@ impl Column for usize { } } -impl Bind for () { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - statement.bind_null(start_index)?; - Ok(start_index + 1) - } -} - impl Bind for &str { fn bind(&self, statement: &Statement, start_index: i32) -> Result { statement.bind_text(start_index, self)?; @@ -179,78 +172,6 @@ impl Column for String { } } -impl Bind for (T1, T2) { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - let next_index = self.0.bind(statement, start_index)?; - self.1.bind(statement, next_index) - } -} - -impl Column for (T1, T2) { - fn column<'a>(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let (first, next_index) = T1::column(statement, start_index)?; - let (second, next_index) = T2::column(statement, next_index)?; - Ok(((first, second), next_index)) - } -} - -impl Bind for (T1, T2, T3) { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - let next_index = self.0.bind(statement, start_index)?; - let next_index = self.1.bind(statement, next_index)?; - self.2.bind(statement, next_index) - } -} - -impl Column for (T1, T2, T3) { - fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let (first, next_index) = T1::column(statement, start_index)?; - let (second, next_index) = T2::column(statement, next_index)?; - let (third, next_index) = T3::column(statement, next_index)?; - Ok(((first, second, third), next_index)) - } -} - -impl Bind for (T1, T2, T3, T4) { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - let next_index = self.0.bind(statement, start_index)?; - let next_index = self.1.bind(statement, next_index)?; - let next_index = self.2.bind(statement, next_index)?; - self.3.bind(statement, next_index) - } -} - -impl Column for (T1, T2, T3, T4) { - fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let (first, next_index) = T1::column(statement, start_index)?; - let (second, next_index) = T2::column(statement, next_index)?; - let (third, next_index) = T3::column(statement, next_index)?; - let (fourth, next_index) = T4::column(statement, next_index)?; - Ok(((first, second, third, fourth), next_index)) - } -} - -impl Bind for (T1, T2, T3, T4, T5) { - fn bind(&self, statement: &Statement, start_index: i32) -> Result { - let next_index = self.0.bind(statement, start_index)?; - let next_index = self.1.bind(statement, next_index)?; - let next_index = self.2.bind(statement, next_index)?; - let next_index = self.3.bind(statement, next_index)?; - self.4.bind(statement, next_index) - } -} - -impl Column for (T1, T2, T3, T4, T5) { - fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { - let (first, next_index) = T1::column(statement, start_index)?; - let (second, next_index) = T2::column(statement, next_index)?; - let (third, next_index) = T3::column(statement, next_index)?; - let (fourth, next_index) = T4::column(statement, next_index)?; - let (fifth, next_index) = T5::column(statement, next_index)?; - Ok(((first, second, third, fourth, fifth), next_index)) - } -} - impl Bind for Option { fn bind(&self, statement: &Statement, start_index: i32) -> Result { if let Some(this) = self { @@ -344,3 +265,88 @@ impl Column for PathBuf { )) } } + +/// Unit impls do nothing. This simplifies query macros +impl Bind for () { + fn bind(&self, _statement: &Statement, start_index: i32) -> Result { + Ok(start_index) + } +} + +impl Column for () { + fn column(_statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + Ok(((), start_index)) + } +} + +impl Bind for (T1, T2) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + self.1.bind(statement, next_index) + } +} + +impl Column for (T1, T2) { + fn column<'a>(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + Ok(((first, second), next_index)) + } +} + +impl Bind for (T1, T2, T3) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + let next_index = self.1.bind(statement, next_index)?; + self.2.bind(statement, next_index) + } +} + +impl Column for (T1, T2, T3) { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + let (third, next_index) = T3::column(statement, next_index)?; + Ok(((first, second, third), next_index)) + } +} + +impl Bind for (T1, T2, T3, T4) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + let next_index = self.1.bind(statement, next_index)?; + let next_index = self.2.bind(statement, next_index)?; + self.3.bind(statement, next_index) + } +} + +impl Column for (T1, T2, T3, T4) { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + let (third, next_index) = T3::column(statement, next_index)?; + let (fourth, next_index) = T4::column(statement, next_index)?; + Ok(((first, second, third, fourth), next_index)) + } +} + +impl Bind for (T1, T2, T3, T4, T5) { + fn bind(&self, statement: &Statement, start_index: i32) -> Result { + let next_index = self.0.bind(statement, start_index)?; + let next_index = self.1.bind(statement, next_index)?; + let next_index = self.2.bind(statement, next_index)?; + let next_index = self.3.bind(statement, next_index)?; + self.4.bind(statement, next_index) + } +} + +impl Column for (T1, T2, T3, T4, T5) { + fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> { + let (first, next_index) = T1::column(statement, start_index)?; + let (second, next_index) = T2::column(statement, next_index)?; + let (third, next_index) = T3::column(statement, next_index)?; + let (fourth, next_index) = T4::column(statement, next_index)?; + let (fifth, next_index) = T5::column(statement, next_index)?; + Ok(((first, second, third, fourth, fifth), next_index)) + } +} diff --git a/crates/sqlez/src/connection.rs b/crates/sqlez/src/connection.rs index 0456266594a898d34fbd761d045963a94ff8635e..3342845d148ca2588c4650a8579b9ebd335640cd 100644 --- a/crates/sqlez/src/connection.rs +++ b/crates/sqlez/src/connection.rs @@ -1,4 +1,5 @@ use std::{ + cell::RefCell, ffi::{CStr, CString}, marker::PhantomData, path::Path, @@ -11,7 +12,7 @@ use libsqlite3_sys::*; pub struct Connection { pub(crate) sqlite3: *mut sqlite3, persistent: bool, - pub(crate) write: bool, + pub(crate) write: RefCell, _sqlite: PhantomData, } unsafe impl Send for Connection {} @@ -21,7 +22,7 @@ impl Connection { let mut connection = Self { sqlite3: 0 as *mut _, persistent, - write: true, + write: RefCell::new(true), _sqlite: PhantomData, }; @@ -64,7 +65,7 @@ impl Connection { } pub fn can_write(&self) -> bool { - self.write + *self.write.borrow() } pub fn backup_main(&self, destination: &Connection) -> Result<()> { @@ -152,6 +153,13 @@ impl Connection { )) } } + + pub(crate) fn with_write(&self, callback: impl FnOnce(&Connection) -> T) -> T { + *self.write.borrow_mut() = true; + let result = callback(self); + *self.write.borrow_mut() = false; + result + } } impl Drop for Connection { diff --git a/crates/sqlez/src/domain.rs b/crates/sqlez/src/domain.rs index 3a477b2bc9ad121bd8b7a024b867ecc0a3ee45eb..a83f4e18d6600ce4ac1cc3373b4b235695785522 100644 --- a/crates/sqlez/src/domain.rs +++ b/crates/sqlez/src/domain.rs @@ -1,11 +1,11 @@ use crate::connection::Connection; -pub trait Domain { +pub trait Domain: 'static { fn name() -> &'static str; fn migrations() -> &'static [&'static str]; } -pub trait Migrator { +pub trait Migrator: 'static { fn migrate(connection: &Connection) -> anyhow::Result<()>; } diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index 41c505f85b11ea79ffcfbbcffeed29224c9fab63..aa8d5fe00b4615261c8bedf4a66f312c23cfe7aa 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -12,6 +12,7 @@ use crate::connection::Connection; impl Connection { pub fn migrate(&self, domain: &'static str, migrations: &[&'static str]) -> Result<()> { self.with_savepoint("migrating", || { + println!("Processing domain"); // Setup the migrations table unconditionally self.exec(indoc! {" CREATE TABLE IF NOT EXISTS migrations ( @@ -43,11 +44,13 @@ impl Connection { {}", domain, index, completed_migration, migration})); } else { // Migration already run. Continue + println!("Migration already run"); continue; } } self.exec(migration)?()?; + println!("Ran migration"); store_completed_migration((domain, index, *migration))?; } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 4849e785b556a8c4b45fdeb21128adc19d576bcf..77ba3406a295085c3e41ce0bc09b7e259608e6b4 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -5,17 +5,13 @@ use parking_lot::{Mutex, RwLock}; use std::{collections::HashMap, marker::PhantomData, ops::Deref, sync::Arc, thread}; use thread_local::ThreadLocal; -use crate::{ - connection::Connection, - domain::{Domain, Migrator}, - util::UnboundedSyncSender, -}; +use crate::{connection::Connection, domain::Migrator, util::UnboundedSyncSender}; const MIGRATION_RETRIES: usize = 10; -type QueuedWrite = Box; +type QueuedWrite = Box; type WriteQueueConstructor = - Box Box>; + Box Box>; lazy_static! { /// List of queues of tasks by database uri. This lets us serialize writes to the database /// and have a single worker thread per db file. This means many thread safe connections @@ -28,18 +24,18 @@ lazy_static! { /// Thread safe connection to a given database file or in memory db. This can be cloned, shared, static, /// whatever. It derefs to a synchronous connection by thread that is read only. A write capable connection /// may be accessed by passing a callback to the `write` function which will queue the callback -pub struct ThreadSafeConnection { +pub struct ThreadSafeConnection { uri: Arc, persistent: bool, connection_initialize_query: Option<&'static str>, connections: Arc>, - _migrator: PhantomData, + _migrator: PhantomData<*mut M>, } -unsafe impl Send for ThreadSafeConnection {} -unsafe impl Sync for ThreadSafeConnection {} +unsafe impl Send for ThreadSafeConnection {} +unsafe impl Sync for ThreadSafeConnection {} -pub struct ThreadSafeConnectionBuilder { +pub struct ThreadSafeConnectionBuilder { db_initialize_query: Option<&'static str>, write_queue_constructor: Option, connection: ThreadSafeConnection, @@ -54,6 +50,13 @@ impl ThreadSafeConnectionBuilder { self } + /// Queues an initialization query for the database file. This must be infallible + /// but may cause changes to the database file such as with `PRAGMA journal_mode` + pub fn with_db_initialization_query(mut self, initialize_query: &'static str) -> Self { + self.db_initialize_query = Some(initialize_query); + self + } + /// Specifies how the thread safe connection should serialize writes. If provided /// the connection will call the write_queue_constructor for each database file in /// this process. The constructor is responsible for setting up a background thread or @@ -66,13 +69,6 @@ impl ThreadSafeConnectionBuilder { self } - /// Queues an initialization query for the database file. This must be infallible - /// but may cause changes to the database file such as with `PRAGMA journal_mode` - pub fn with_db_initialization_query(mut self, initialize_query: &'static str) -> Self { - self.db_initialize_query = Some(initialize_query); - self - } - pub async fn build(self) -> anyhow::Result> { self.connection .initialize_queues(self.write_queue_constructor); @@ -100,6 +96,7 @@ impl ThreadSafeConnectionBuilder { .with_savepoint("thread_safe_multi_migration", || M::migrate(connection)); if migration_result.is_ok() { + println!("Migration succeded"); break; } } @@ -113,38 +110,17 @@ impl ThreadSafeConnectionBuilder { } impl ThreadSafeConnection { - fn initialize_queues(&self, write_queue_constructor: Option) { + fn initialize_queues(&self, write_queue_constructor: Option) -> bool { if !QUEUES.read().contains_key(&self.uri) { let mut queues = QUEUES.write(); if !queues.contains_key(&self.uri) { - let mut write_connection = self.create_connection(); - // Enable writes for this connection - write_connection.write = true; - if let Some(mut write_queue_constructor) = write_queue_constructor { - let write_channel = write_queue_constructor(write_connection); - queues.insert(self.uri.clone(), write_channel); - } else { - use std::sync::mpsc::channel; - - let (sender, reciever) = channel::(); - thread::spawn(move || { - while let Ok(write) = reciever.recv() { - write(&write_connection) - } - }); - - let sender = UnboundedSyncSender::new(sender); - queues.insert( - self.uri.clone(), - Box::new(move |queued_write| { - sender - .send(queued_write) - .expect("Could not send write action to backgorund thread"); - }), - ); - } + let mut write_queue_constructor = + write_queue_constructor.unwrap_or(background_thread_queue()); + queues.insert(self.uri.clone(), write_queue_constructor()); + return true; } } + return false; } pub fn builder(uri: &str, persistent: bool) -> ThreadSafeConnectionBuilder { @@ -163,20 +139,21 @@ impl ThreadSafeConnection { /// Opens a new db connection with the initialized file path. This is internal and only /// called from the deref function. - fn open_file(&self) -> Connection { - Connection::open_file(self.uri.as_ref()) + fn open_file(uri: &str) -> Connection { + Connection::open_file(uri) } /// Opens a shared memory connection using the file path as the identifier. This is internal /// and only called from the deref function. - fn open_shared_memory(&self) -> Connection { - Connection::open_memory(Some(self.uri.as_ref())) + fn open_shared_memory(uri: &str) -> Connection { + Connection::open_memory(Some(uri)) } pub fn write( &self, callback: impl 'static + Send + FnOnce(&Connection) -> T, ) -> impl Future { + // Check and invalidate queue and maybe recreate queue let queues = QUEUES.read(); let write_channel = queues .get(&self.uri) @@ -185,24 +162,32 @@ impl ThreadSafeConnection { // Create a one shot channel for the result of the queued write // so we can await on the result let (sender, reciever) = oneshot::channel(); - write_channel(Box::new(move |connection| { - sender.send(callback(connection)).ok(); + + let thread_safe_connection = (*self).clone(); + write_channel(Box::new(move || { + let connection = thread_safe_connection.deref(); + let result = connection.with_write(|connection| callback(connection)); + sender.send(result).ok(); })); reciever.map(|response| response.expect("Background writer thread unexpectedly closed")) } - pub(crate) fn create_connection(&self) -> Connection { - let mut connection = if self.persistent { - self.open_file() + pub(crate) fn create_connection( + persistent: bool, + uri: &str, + connection_initialize_query: Option<&'static str>, + ) -> Connection { + let mut connection = if persistent { + Self::open_file(uri) } else { - self.open_shared_memory() + Self::open_shared_memory(uri) }; // Disallow writes on the connection. The only writes allowed for thread safe connections // are from the background thread that can serialize them. - connection.write = false; + *connection.write.get_mut() = false; - if let Some(initialize_query) = self.connection_initialize_query { + if let Some(initialize_query) = connection_initialize_query { connection.exec(initialize_query).expect(&format!( "Initialize query failed to execute: {}", initialize_query @@ -236,7 +221,7 @@ impl ThreadSafeConnection<()> { } } -impl Clone for ThreadSafeConnection { +impl Clone for ThreadSafeConnection { fn clone(&self) -> Self { Self { uri: self.uri.clone(), @@ -252,16 +237,41 @@ impl Deref for ThreadSafeConnection { type Target = Connection; fn deref(&self) -> &Self::Target { - self.connections.get_or(|| self.create_connection()) + self.connections.get_or(|| { + Self::create_connection(self.persistent, &self.uri, self.connection_initialize_query) + }) } } +pub fn background_thread_queue() -> WriteQueueConstructor { + use std::sync::mpsc::channel; + + Box::new(|| { + let (sender, reciever) = channel::(); + + thread::spawn(move || { + while let Ok(write) = reciever.recv() { + write() + } + }); + + let sender = UnboundedSyncSender::new(sender); + Box::new(move |queued_write| { + sender + .send(queued_write) + .expect("Could not send write action to background thread"); + }) + }) +} + pub fn locking_queue() -> WriteQueueConstructor { - Box::new(|connection| { - let connection = Mutex::new(connection); + Box::new(|| { + let mutex = Mutex::new(()); Box::new(move |queued_write| { - let connection = connection.lock(); - queued_write(&connection) + eprintln!("Write started"); + let _ = mutex.lock(); + queued_write(); + eprintln!("Write finished"); }) }) } @@ -269,7 +279,8 @@ pub fn locking_queue() -> WriteQueueConstructor { #[cfg(test)] mod test { use indoc::indoc; - use std::ops::Deref; + use lazy_static::__Deref; + use std::thread; use crate::{domain::Domain, thread_safe_connection::ThreadSafeConnection}; diff --git a/crates/terminal/src/persistence.rs b/crates/terminal/src/persistence.rs index f9cfb6fc010d1cc0b229ae3bfb8f727a83ec73c0..1669a3a546773fa461d94152953e962d4ac6ec7c 100644 --- a/crates/terminal/src/persistence.rs +++ b/crates/terminal/src/persistence.rs @@ -1,19 +1,11 @@ use std::path::PathBuf; -use db::{connection, query, sqlez::domain::Domain, sqlez_macros::sql}; +use db::{define_connection, query, sqlez_macros::sql}; -use workspace::{ItemId, Workspace, WorkspaceId}; +use workspace::{ItemId, WorkspaceDb, WorkspaceId}; -use crate::Terminal; - -connection!(TERMINAL_CONNECTION: TerminalDb<(Workspace, Terminal)>); - -impl Domain for Terminal { - fn name() -> &'static str { - "terminal" - } - - fn migrations() -> &'static [&'static str] { +define_connection! { + pub static ref TERMINAL_CONNECTION: TerminalDb = &[sql!( CREATE TABLE terminals ( workspace_id INTEGER, @@ -23,8 +15,7 @@ impl Domain for Terminal { FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE ) STRICT; - )] - } + )]; } impl TerminalDb { diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index db591410878ffb2c4ef1689bcc3dd68a0a5ccd2f..a0cc48ca1cc56db9f94e751add6afb6fd4baaa8c 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -5,30 +5,21 @@ pub mod model; use std::path::Path; use anyhow::{anyhow, bail, Context, Result}; -use db::{connection, query, sqlez::connection::Connection, sqlez_macros::sql}; +use db::{define_connection, query, sqlez::connection::Connection, sqlez_macros::sql}; use gpui::Axis; -use db::sqlez::domain::Domain; use util::{iife, unzip_option, ResultExt}; use crate::dock::DockPosition; use crate::WorkspaceId; -use super::Workspace; - use model::{ GroupId, PaneId, SerializedItem, SerializedPane, SerializedPaneGroup, SerializedWorkspace, WorkspaceLocation, }; -connection!(DB: WorkspaceDb); - -impl Domain for Workspace { - fn name() -> &'static str { - "workspace" - } - - fn migrations() -> &'static [&'static str] { +define_connection! { + pub static ref DB: WorkspaceDb<()> = &[sql!( CREATE TABLE workspaces( workspace_id INTEGER PRIMARY KEY, @@ -40,7 +31,7 @@ impl Domain for Workspace { timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, FOREIGN KEY(dock_pane) REFERENCES panes(pane_id) ) STRICT; - + CREATE TABLE pane_groups( group_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, @@ -48,29 +39,29 @@ impl Domain for Workspace { position INTEGER, // NULL indicates that this is a root node axis TEXT NOT NULL, // Enum: 'Vertical' / 'Horizontal' FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE, + ON DELETE CASCADE + ON UPDATE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; - + CREATE TABLE panes( pane_id INTEGER PRIMARY KEY, workspace_id INTEGER NOT NULL, active INTEGER NOT NULL, // Boolean FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE + ON DELETE CASCADE + ON UPDATE CASCADE ) STRICT; - + CREATE TABLE center_panes( pane_id INTEGER PRIMARY KEY, parent_group_id INTEGER, // NULL means that this is a root pane position INTEGER, // NULL means that this is a root pane FOREIGN KEY(pane_id) REFERENCES panes(pane_id) - ON DELETE CASCADE, + ON DELETE CASCADE, FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE ) STRICT; - + CREATE TABLE items( item_id INTEGER NOT NULL, // This is the item's view id, so this is not unique workspace_id INTEGER NOT NULL, @@ -79,14 +70,13 @@ impl Domain for Workspace { position INTEGER NOT NULL, active INTEGER NOT NULL, FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) - ON DELETE CASCADE - ON UPDATE CASCADE, + ON DELETE CASCADE + ON UPDATE CASCADE, FOREIGN KEY(pane_id) REFERENCES panes(pane_id) - ON DELETE CASCADE, + ON DELETE CASCADE, PRIMARY KEY(item_id, workspace_id) ) STRICT; - )] - } + )]; } impl WorkspaceDb { @@ -149,7 +139,7 @@ impl WorkspaceDb { UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1; DELETE FROM pane_groups WHERE workspace_id = ?1; DELETE FROM panes WHERE workspace_id = ?1;))?(workspace.id) - .context("Clearing old panes")?; + .expect("Clearing old panes"); conn.exec_bound(sql!( DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ? diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 66ef63f27f7386e461a6c5e9c560506d9a503a81..8e9131839dc734f56391d3eec647b7c6ee86d694 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -44,8 +44,11 @@ use language::LanguageRegistry; use log::{error, warn}; pub use pane::*; pub use pane_group::*; -pub use persistence::model::{ItemId, WorkspaceLocation}; use persistence::{model::SerializedItem, DB}; +pub use persistence::{ + model::{ItemId, WorkspaceLocation}, + WorkspaceDb, +}; use postage::prelude::Stream; use project::{Project, ProjectEntryId, ProjectPath, ProjectStore, Worktree, WorktreeId}; use serde::Deserialize; From 5262e8c77ef2d453abb1e8922a2da6403986ff8a Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 2 Dec 2022 12:43:02 -0800 Subject: [PATCH 79/86] CHANGE LOCK TO NOT BE DROPPED INSTANTLY. DANG U RUST co-authored-by: kay@zed.dev --- crates/db/Cargo.toml | 2 +- crates/db/src/db.rs | 9 ++-- crates/sqlez/Cargo.toml | 2 +- crates/sqlez/src/thread_safe_connection.rs | 6 +-- crates/sqlez_macros/src/sqlez_macros.rs | 48 ++++++++++++++-------- crates/workspace/Cargo.toml | 1 + 6 files changed, 39 insertions(+), 29 deletions(-) diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 69c90e02f911cec9252b582c009efc67270e0f42..8e12b06027a810fb85dd77c7d57f9365456018d4 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -27,5 +27,5 @@ smol = "1.2" [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } +env_logger = "0.9.1" tempdir = { version = "0.3.7" } -env_logger = "0.9.1" \ No newline at end of file diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 6c6688b0d1ae6f5e6bfb120ea862012bb038190a..7b214cb3beab593737d772d92c8aaf51cb0e285f 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -40,7 +40,7 @@ lazy_static::lazy_static! { static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(()); static ref DB_WIPED: RwLock = RwLock::new(false); pub static ref BACKUP_DB_PATH: RwLock> = RwLock::new(None); - pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); + pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); } /// Open or create a database at the given directory path. @@ -58,7 +58,6 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas let mut db_wiped = DB_WIPED.write(); if !*db_wiped { remove_dir_all(&main_db_dir).ok(); - *db_wiped = true; } } @@ -71,7 +70,7 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas // cause errors in the log and so should be observed by developers while writing // soon-to-be good migrations. If user databases are corrupted, we toss them out // and try again from a blank. As long as running all migrations from start to end - // is ok, this race condition will never be triggered. + // on a blank database is ok, this race condition will never be triggered. // // Basically: Don't ever push invalid migrations to stable or everyone will have // a bad time. @@ -137,7 +136,7 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas } async fn open_main_db(db_path: &PathBuf) -> Option> { - println!("Opening main db"); + log::info!("Opening main db"); ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) .with_db_initialization_query(DB_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) @@ -147,7 +146,7 @@ async fn open_main_db(db_path: &PathBuf) -> Option() -> ThreadSafeConnection { - println!("Opening fallback db"); + log::info!("Opening fallback db"); ThreadSafeConnection::::builder(FALLBACK_DB_NAME, false) .with_db_initialization_query(DB_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) diff --git a/crates/sqlez/Cargo.toml b/crates/sqlez/Cargo.toml index 8a7f1ba415649d26c776e50a61408eda7d9540c9..c6c018b9244aa38444a6ef320aa6facd7d13f046 100644 --- a/crates/sqlez/Cargo.toml +++ b/crates/sqlez/Cargo.toml @@ -13,4 +13,4 @@ smol = "1.2" thread_local = "1.1.4" lazy_static = "1.4" parking_lot = "0.11.1" -futures = "0.3" \ No newline at end of file +futures = "0.3" diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 77ba3406a295085c3e41ce0bc09b7e259608e6b4..7b89827979bd1824a3425f061c1e2ba4b866391d 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -266,12 +266,10 @@ pub fn background_thread_queue() -> WriteQueueConstructor { pub fn locking_queue() -> WriteQueueConstructor { Box::new(|| { - let mutex = Mutex::new(()); + let write_mutex = Mutex::new(()); Box::new(move |queued_write| { - eprintln!("Write started"); - let _ = mutex.lock(); + let _lock = write_mutex.lock(); queued_write(); - eprintln!("Write finished"); }) }) } diff --git a/crates/sqlez_macros/src/sqlez_macros.rs b/crates/sqlez_macros/src/sqlez_macros.rs index c937e704ae82c34d6a18fa69a772b517eb5e8f40..429f45db7e55442773fd1b6f5b92bdb577c1c5da 100644 --- a/crates/sqlez_macros/src/sqlez_macros.rs +++ b/crates/sqlez_macros/src/sqlez_macros.rs @@ -10,9 +10,37 @@ lazy_static::lazy_static! { #[proc_macro] pub fn sql(tokens: TokenStream) -> TokenStream { + let (spans, sql) = make_sql(tokens); + + let error = SQLITE.sql_has_syntax_error(sql.trim()); + let formatted_sql = sqlformat::format(&sql, &sqlformat::QueryParams::None, Default::default()); + + if let Some((error, error_offset)) = error { + create_error(spans, error_offset, error, &formatted_sql) + } else { + format!("r#\"{}\"#", &formatted_sql).parse().unwrap() + } +} + +fn create_error( + spans: Vec<(usize, Span)>, + error_offset: usize, + error: String, + formatted_sql: &String, +) -> TokenStream { + let error_span = spans + .into_iter() + .skip_while(|(offset, _)| offset <= &error_offset) + .map(|(_, span)| span) + .next() + .unwrap_or(Span::call_site()); + let error_text = format!("Sql Error: {}\nFor Query: {}", error, formatted_sql); + TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error()) +} + +fn make_sql(tokens: TokenStream) -> (Vec<(usize, Span)>, String) { let mut sql_tokens = vec![]; flatten_stream(tokens.clone(), &mut sql_tokens); - // Lookup of spans by offset at the end of the token let mut spans: Vec<(usize, Span)> = Vec::new(); let mut sql = String::new(); @@ -20,23 +48,7 @@ pub fn sql(tokens: TokenStream) -> TokenStream { sql.push_str(&token_text); spans.push((sql.len(), span)); } - - let error = SQLITE.sql_has_syntax_error(sql.trim()); - let formatted_sql = sqlformat::format(&sql, &sqlformat::QueryParams::None, Default::default()); - - if let Some((error, error_offset)) = error { - let error_span = spans - .into_iter() - .skip_while(|(offset, _)| offset <= &error_offset) - .map(|(_, span)| span) - .next() - .unwrap_or(Span::call_site()); - - let error_text = format!("Sql Error: {}\nFor Query: {}", error, formatted_sql); - TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error()) - } else { - format!("r#\"{}\"#", &formatted_sql).parse().unwrap() - } + (spans, sql) } /// This method exists to normalize the representation of groups diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index b67ccdeeb73b22b680055287abdb835c5178d959..917f821e4a0478e24a89a700c29a37f654ab9325 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -54,3 +54,4 @@ gpui = { path = "../gpui", features = ["test-support"] } project = { path = "../project", features = ["test-support"] } settings = { path = "../settings", features = ["test-support"] } fs = { path = "../fs", features = ["test-support"] } +db = { path = "../db", features = ["test-support"] } \ No newline at end of file From ffcad4e4e2cfd1f8514117357b3185fa4b414e0a Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Fri, 2 Dec 2022 14:30:26 -0800 Subject: [PATCH 80/86] WIP fixing dock problems --- crates/auto_update/src/update_notification.rs | 2 +- crates/client/src/telemetry.rs | 2 +- crates/db/src/db.rs | 175 +++++++++++------- crates/sqlez/src/migrations.rs | 3 - crates/sqlez/src/thread_safe_connection.rs | 1 - crates/util/src/channel.rs | 10 +- crates/workspace/Cargo.toml | 1 - crates/workspace/src/dock.rs | 8 + crates/workspace/src/persistence.rs | 33 ++-- crates/workspace/src/persistence/model.rs | 29 ++- crates/workspace/src/workspace.rs | 103 +++++++---- crates/zed/src/zed.rs | 2 +- 12 files changed, 233 insertions(+), 136 deletions(-) diff --git a/crates/auto_update/src/update_notification.rs b/crates/auto_update/src/update_notification.rs index 9963ae65b80f3dec9677aac88e7ac888f23d980e..5fbdf174227cb7f1a1bc678107a0cb49e8791b92 100644 --- a/crates/auto_update/src/update_notification.rs +++ b/crates/auto_update/src/update_notification.rs @@ -30,7 +30,7 @@ impl View for UpdateNotification { let theme = cx.global::().theme.clone(); let theme = &theme.update_notification; - let app_name = cx.global::().name(); + let app_name = cx.global::().display_name(); MouseEventHandler::::new(0, cx, |state, cx| { Flex::column() diff --git a/crates/client/src/telemetry.rs b/crates/client/src/telemetry.rs index a81f33c604100afc6adceb13aa28a234cc1c1d0e..ce8b713996be0c3ce993cb7b2473bd874a8583b0 100644 --- a/crates/client/src/telemetry.rs +++ b/crates/client/src/telemetry.rs @@ -106,7 +106,7 @@ impl Telemetry { pub fn new(client: Arc, cx: &AppContext) -> Arc { let platform = cx.platform(); let release_channel = if cx.has_global::() { - Some(cx.global::().name()) + Some(cx.global::().display_name()) } else { None }; diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 7b214cb3beab593737d772d92c8aaf51cb0e285f..c146336132361947469d04453134e245e72348aa 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -36,6 +36,8 @@ const DB_INITIALIZE_QUERY: &'static str = sql!( const FALLBACK_DB_NAME: &'static str = "FALLBACK_MEMORY_DB"; +const DB_FILE_NAME: &'static str = "db.sqlite"; + lazy_static::lazy_static! { static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(()); static ref DB_WIPED: RwLock = RwLock::new(false); @@ -48,7 +50,8 @@ lazy_static::lazy_static! { /// is moved to a backup folder and a new one is created. If that fails, a shared in memory db is created. /// In either case, static variables are set so that the user can be notified. pub async fn open_db(wipe_db: bool, db_dir: &Path, release_channel: &ReleaseChannel) -> ThreadSafeConnection { - let main_db_dir = db_dir.join(Path::new(&format!("0-{}", release_channel.name()))); + let release_channel_name = release_channel.dev_name(); + let main_db_dir = db_dir.join(Path::new(&format!("0-{}", release_channel_name))); // If WIPE_DB, delete 0-{channel} if release_channel == &ReleaseChannel::Dev @@ -77,7 +80,7 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas // If no db folder, create one at 0-{channel} create_dir_all(&main_db_dir).context("Could not create db directory")?; - let db_path = main_db_dir.join(Path::new("db.sqlite")); + let db_path = main_db_dir.join(Path::new(DB_FILE_NAME)); // Optimistically open databases in parallel if !DB_FILE_OPERATIONS.is_locked() { @@ -104,7 +107,7 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas let backup_db_dir = db_dir.join(Path::new(&format!( "{}-{}", backup_timestamp, - release_channel.name(), + release_channel_name, ))); std::fs::rename(&main_db_dir, &backup_db_dir) @@ -118,7 +121,7 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas // Create a new 0-{channel} create_dir_all(&main_db_dir).context("Should be able to create the database directory")?; - let db_path = main_db_dir.join(Path::new("db.sqlite")); + let db_path = main_db_dir.join(Path::new(DB_FILE_NAME)); // Try again open_main_db(&db_path).await.context("Could not newly created db") @@ -240,86 +243,130 @@ macro_rules! define_connection { #[cfg(test)] mod tests { - use std::thread; + use std::{thread, fs}; - use sqlez::domain::Domain; + use sqlez::{domain::Domain, connection::Connection}; use sqlez_macros::sql; use tempdir::TempDir; use util::channel::ReleaseChannel; - use crate::open_db; - - enum TestDB {} - - impl Domain for TestDB { - fn name() -> &'static str { - "db_tests" - } - - fn migrations() -> &'static [&'static str] { - &[sql!( - CREATE TABLE test(value); - )] - } - } + use crate::{open_db, DB_FILE_NAME}; // Test that wipe_db exists and works and gives a new db - #[test] - fn test_wipe_db() { - env_logger::try_init().ok(); + #[gpui::test] + async fn test_wipe_db() { + enum TestDB {} - smol::block_on(async { - let tempdir = TempDir::new("DbTests").unwrap(); - - let test_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; - test_db.write(|connection| - connection.exec(sql!( - INSERT INTO test(value) VALUES (10) - )).unwrap()().unwrap() - ).await; - drop(test_db); - - let mut guards = vec![]; - for _ in 0..5 { - let path = tempdir.path().to_path_buf(); - let guard = thread::spawn(move || smol::block_on(async { - let test_db = open_db::(true, &path, &ReleaseChannel::Dev).await; - - assert!(test_db.select_row::<()>(sql!(SELECT value FROM test)).unwrap()().unwrap().is_none()) - })); - - guards.push(guard); + impl Domain for TestDB { + fn name() -> &'static str { + "db_tests" } - for guard in guards { - guard.join().unwrap(); + fn migrations() -> &'static [&'static str] { + &[sql!( + CREATE TABLE test(value); + )] } - }) - } - - // Test a file system failure (like in create_dir_all()) - #[test] - fn test_file_system_failure() { + } - } - - // Test happy path where everything exists and opens - #[test] - fn test_open_db() { + let tempdir = TempDir::new("DbTests").unwrap(); + // Create a db and insert a marker value + let test_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + test_db.write(|connection| + connection.exec(sql!( + INSERT INTO test(value) VALUES (10) + )).unwrap()().unwrap() + ).await; + drop(test_db); + + // Opening db with wipe clears once and removes the marker value + let mut guards = vec![]; + for _ in 0..5 { + let path = tempdir.path().to_path_buf(); + let guard = thread::spawn(move || smol::block_on(async { + let test_db = open_db::(true, &path, &ReleaseChannel::Dev).await; + + assert!(test_db.select_row::<()>(sql!(SELECT value FROM test)).unwrap()().unwrap().is_none()) + })); + + guards.push(guard); + } + + for guard in guards { + guard.join().unwrap(); + } } - + // Test bad migration panics - #[test] - fn test_bad_migration_panics() { + #[gpui::test] + #[should_panic] + async fn test_bad_migration_panics() { + enum BadDB {} + impl Domain for BadDB { + fn name() -> &'static str { + "db_tests" + } + + fn migrations() -> &'static [&'static str] { + &[sql!(CREATE TABLE test(value);), + // failure because test already exists + sql!(CREATE TABLE test(value);)] + } + } + + let tempdir = TempDir::new("DbTests").unwrap(); + let _bad_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; } /// Test that DB exists but corrupted (causing recreate) - #[test] - fn test_db_corruption() { + #[gpui::test] + async fn test_db_corruption() { + enum CorruptedDB {} + + impl Domain for CorruptedDB { + fn name() -> &'static str { + "db_tests" + } + + fn migrations() -> &'static [&'static str] { + &[sql!(CREATE TABLE test(value);)] + } + } + + enum GoodDB {} + + impl Domain for GoodDB { + fn name() -> &'static str { + "db_tests" //Notice same name + } + + fn migrations() -> &'static [&'static str] { + &[sql!(CREATE TABLE test2(value);)] //But different migration + } + } + + let tempdir = TempDir::new("DbTests").unwrap(); + { + let corrupt_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + assert!(corrupt_db.persistent()); + } + + let good_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + assert!(good_db.select_row::("SELECT * FROM test2").unwrap()().unwrap().is_none()); + + let mut corrupted_backup_dir = fs::read_dir( + tempdir.path() + ).unwrap().find(|entry| { + !entry.as_ref().unwrap().file_name().to_str().unwrap().starts_with("0") + } + ).unwrap().unwrap().path(); + corrupted_backup_dir.push(DB_FILE_NAME); + dbg!(&corrupted_backup_dir); - // open_db(db_dir, release_channel) + let backup = Connection::open_file(&corrupted_backup_dir.to_string_lossy()); + assert!(backup.select_row::("SELECT * FROM test").unwrap()().unwrap().is_none()); } } diff --git a/crates/sqlez/src/migrations.rs b/crates/sqlez/src/migrations.rs index aa8d5fe00b4615261c8bedf4a66f312c23cfe7aa..41c505f85b11ea79ffcfbbcffeed29224c9fab63 100644 --- a/crates/sqlez/src/migrations.rs +++ b/crates/sqlez/src/migrations.rs @@ -12,7 +12,6 @@ use crate::connection::Connection; impl Connection { pub fn migrate(&self, domain: &'static str, migrations: &[&'static str]) -> Result<()> { self.with_savepoint("migrating", || { - println!("Processing domain"); // Setup the migrations table unconditionally self.exec(indoc! {" CREATE TABLE IF NOT EXISTS migrations ( @@ -44,13 +43,11 @@ impl Connection { {}", domain, index, completed_migration, migration})); } else { // Migration already run. Continue - println!("Migration already run"); continue; } } self.exec(migration)?()?; - println!("Ran migration"); store_completed_migration((domain, index, *migration))?; } diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 7b89827979bd1824a3425f061c1e2ba4b866391d..51d0707fd8b951d7fd9ff33fceadfed137c425a6 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -96,7 +96,6 @@ impl ThreadSafeConnectionBuilder { .with_savepoint("thread_safe_multi_migration", || M::migrate(connection)); if migration_result.is_ok() { - println!("Migration succeded"); break; } } diff --git a/crates/util/src/channel.rs b/crates/util/src/channel.rs index ab5b53b4ab47a3b6b62fec513fb41b8c8631632b..3edf26dc95e4386f3f89d67e4b2c9ad13fbed83c 100644 --- a/crates/util/src/channel.rs +++ b/crates/util/src/channel.rs @@ -22,11 +22,19 @@ pub enum ReleaseChannel { } impl ReleaseChannel { - pub fn name(&self) -> &'static str { + pub fn display_name(&self) -> &'static str { match self { ReleaseChannel::Dev => "Zed Dev", ReleaseChannel::Preview => "Zed Preview", ReleaseChannel::Stable => "Zed", } } + + pub fn dev_name(&self) -> &'static str { + match self { + ReleaseChannel::Dev => "dev", + ReleaseChannel::Preview => "preview", + ReleaseChannel::Stable => "stable", + } + } } diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index 917f821e4a0478e24a89a700c29a37f654ab9325..5894a2a44e6fffc2cb6566eb4c82edd90213d6f7 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -46,7 +46,6 @@ serde_json = { version = "1.0", features = ["preserve_order"] } smallvec = { version = "1.6", features = ["union"] } indoc = "1.0.4" - [dev-dependencies] call = { path = "../call", features = ["test-support"] } client = { path = "../client", features = ["test-support"] } diff --git a/crates/workspace/src/dock.rs b/crates/workspace/src/dock.rs index 0879166bbe733faf5b9ee0e86695cf3bfe391e39..9b1342ecd922c62c97dc7c19a2f77a1548e8118b 100644 --- a/crates/workspace/src/dock.rs +++ b/crates/workspace/src/dock.rs @@ -175,16 +175,21 @@ impl Dock { new_position: DockPosition, cx: &mut ViewContext, ) { + dbg!("starting", &new_position); workspace.dock.position = new_position; // Tell the pane about the new anchor position workspace.dock.pane.update(cx, |pane, cx| { + dbg!("setting docked"); pane.set_docked(Some(new_position.anchor()), cx) }); if workspace.dock.position.is_visible() { + dbg!("dock is visible"); // Close the right sidebar if the dock is on the right side and the right sidebar is open if workspace.dock.position.anchor() == DockAnchor::Right { + dbg!("dock anchor is right"); if workspace.right_sidebar().read(cx).is_open() { + dbg!("Toggling right sidebar"); workspace.toggle_sidebar(SidebarSide::Right, cx); } } @@ -194,8 +199,10 @@ impl Dock { if pane.read(cx).items().next().is_none() { let item_to_add = (workspace.dock.default_item_factory)(workspace, cx); // Adding the item focuses the pane by default + dbg!("Adding item to dock"); Pane::add_item(workspace, &pane, item_to_add, true, true, None, cx); } else { + dbg!("just focusing dock"); cx.focus(pane); } } else if let Some(last_active_center_pane) = workspace @@ -207,6 +214,7 @@ impl Dock { } cx.emit(crate::Event::DockAnchorChanged); workspace.serialize_workspace(cx); + dbg!("Serializing workspace after dock position changed"); cx.notify(); } diff --git a/crates/workspace/src/persistence.rs b/crates/workspace/src/persistence.rs index a0cc48ca1cc56db9f94e751add6afb6fd4baaa8c..2d4ae919f95d4fcaeb8f0a7466e39098132e1003 100644 --- a/crates/workspace/src/persistence.rs +++ b/crates/workspace/src/persistence.rs @@ -27,7 +27,7 @@ define_connection! { dock_visible INTEGER, // Boolean dock_anchor TEXT, // Enum: 'Bottom' / 'Right' / 'Expanded' dock_pane INTEGER, // NULL indicates that we don't have a dock pane yet - project_panel_open INTEGER, //Boolean + left_sidebar_open INTEGER, //Boolean timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL, FOREIGN KEY(dock_pane) REFERENCES panes(pane_id) ) STRICT; @@ -91,7 +91,7 @@ impl WorkspaceDb { // Note that we re-assign the workspace_id here in case it's empty // and we've grabbed the most recent workspace - let (workspace_id, workspace_location, project_panel_open, dock_position): ( + let (workspace_id, workspace_location, left_sidebar_open, dock_position): ( WorkspaceId, WorkspaceLocation, bool, @@ -99,12 +99,12 @@ impl WorkspaceDb { ) = iife!({ if worktree_roots.len() == 0 { self.select_row(sql!( - SELECT workspace_id, workspace_location, project_panel_open, dock_visible, dock_anchor + SELECT workspace_id, workspace_location, left_sidebar_open, dock_visible, dock_anchor FROM workspaces ORDER BY timestamp DESC LIMIT 1))?()? } else { self.select_row_bound(sql!( - SELECT workspace_id, workspace_location, project_panel_open, dock_visible, dock_anchor + SELECT workspace_id, workspace_location, left_sidebar_open, dock_visible, dock_anchor FROM workspaces WHERE workspace_location = ?))?(&workspace_location)? } @@ -125,7 +125,7 @@ impl WorkspaceDb { .context("Getting center group") .log_err()?, dock_position, - project_panel_open + left_sidebar_open }) } @@ -151,7 +151,7 @@ impl WorkspaceDb { INSERT INTO workspaces( workspace_id, workspace_location, - project_panel_open, + left_sidebar_open, dock_visible, dock_anchor, timestamp @@ -160,11 +160,11 @@ impl WorkspaceDb { ON CONFLICT DO UPDATE SET workspace_location = ?2, - project_panel_open = ?3, + left_sidebar_open = ?3, dock_visible = ?4, dock_anchor = ?5, timestamp = CURRENT_TIMESTAMP - ))?((workspace.id, &workspace.location, workspace.project_panel_open, workspace.dock_position)) + ))?((workspace.id, &workspace.location, workspace.left_sidebar_open, workspace.dock_position)) .context("Updating workspace")?; // Save center pane group and dock pane @@ -198,7 +198,8 @@ impl WorkspaceDb { query! { pub fn recent_workspaces(limit: usize) -> Result> { SELECT workspace_id, workspace_location - FROM workspaces + FROM workspaces + WHERE workspace_location IS NOT NULL ORDER BY timestamp DESC LIMIT ? } @@ -458,7 +459,7 @@ mod tests { dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), - project_panel_open: true + left_sidebar_open: true }; let mut workspace_2 = SerializedWorkspace { @@ -467,7 +468,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), - project_panel_open: false + left_sidebar_open: false }; db.save_workspace(workspace_1.clone()).await; @@ -573,7 +574,7 @@ mod tests { dock_position: DockPosition::Shown(DockAnchor::Bottom), center_group, dock_pane, - project_panel_open: true + left_sidebar_open: true }; db.save_workspace(workspace.clone()).await; @@ -601,7 +602,7 @@ mod tests { dock_position: crate::dock::DockPosition::Shown(DockAnchor::Bottom), center_group: Default::default(), dock_pane: Default::default(), - project_panel_open: true, + left_sidebar_open: true, }; let mut workspace_2 = SerializedWorkspace { @@ -610,7 +611,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Expanded), center_group: Default::default(), dock_pane: Default::default(), - project_panel_open: false, + left_sidebar_open: false, }; db.save_workspace(workspace_1.clone()).await; @@ -646,7 +647,7 @@ mod tests { dock_position: DockPosition::Shown(DockAnchor::Right), center_group: Default::default(), dock_pane: Default::default(), - project_panel_open: false + left_sidebar_open: false }; db.save_workspace(workspace_3.clone()).await; @@ -681,7 +682,7 @@ mod tests { dock_position: crate::dock::DockPosition::Hidden(DockAnchor::Right), center_group: center_group.clone(), dock_pane, - project_panel_open: true + left_sidebar_open: true } } diff --git a/crates/workspace/src/persistence/model.rs b/crates/workspace/src/persistence/model.rs index c57c992d7b2b05aaa71bdd5497082a24a158c1ea..c75488561f50735ae98c0c4e08a256995a9cba55 100644 --- a/crates/workspace/src/persistence/model.rs +++ b/crates/workspace/src/persistence/model.rs @@ -65,7 +65,7 @@ pub struct SerializedWorkspace { pub dock_position: DockPosition, pub center_group: SerializedPaneGroup, pub dock_pane: SerializedPane, - pub project_panel_open: bool, + pub left_sidebar_open: bool, } #[derive(Debug, PartialEq, Eq, Clone)] @@ -95,26 +95,33 @@ impl SerializedPaneGroup { workspace_id: WorkspaceId, workspace: &ViewHandle, cx: &mut AsyncAppContext, - ) -> (Member, Option>) { + ) -> Option<(Member, Option>)> { match self { SerializedPaneGroup::Group { axis, children } => { let mut current_active_pane = None; let mut members = Vec::new(); for child in children { - let (new_member, active_pane) = child + if let Some((new_member, active_pane)) = child .deserialize(project, workspace_id, workspace, cx) - .await; - members.push(new_member); + .await + { + members.push(new_member); - current_active_pane = current_active_pane.or(active_pane); + current_active_pane = current_active_pane.or(active_pane); + } + } + + if members.is_empty() { + return None; } - ( + + Some(( Member::Axis(PaneAxis { axis: *axis, members, }), current_active_pane, - ) + )) } SerializedPaneGroup::Pane(serialized_pane) => { let pane = workspace.update(cx, |workspace, cx| workspace.add_pane(cx)); @@ -123,7 +130,11 @@ impl SerializedPaneGroup { .deserialize_to(project, &pane, workspace_id, workspace, cx) .await; - (Member::Pane(pane.clone()), active.then(|| pane)) + if pane.read_with(cx, |pane, _| pane.items().next().is_some()) { + Some((Member::Pane(pane.clone()), active.then(|| pane))) + } else { + None + } } } } diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 8e9131839dc734f56391d3eec647b7c6ee86d694..5fb804e66dca4870dfa708b70bd3382bdd1762ae 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -1244,6 +1244,8 @@ impl Workspace { Dock::hide_on_sidebar_shown(self, sidebar_side, cx); } + self.serialize_workspace(cx); + cx.focus_self(); cx.notify(); } @@ -1275,6 +1277,9 @@ impl Workspace { } else { cx.focus_self(); } + + self.serialize_workspace(cx); + cx.notify(); } @@ -1302,6 +1307,9 @@ impl Workspace { cx.focus(active_item.to_any()); } } + + self.serialize_workspace(cx); + cx.notify(); } @@ -2268,13 +2276,20 @@ impl Workspace { self.database_id } - fn location(&self, cx: &AppContext) -> WorkspaceLocation { - self.project() - .read(cx) - .visible_worktrees(cx) - .map(|worktree| worktree.read(cx).abs_path()) - .collect::>() - .into() + fn location(&self, cx: &AppContext) -> Option { + let project = self.project().read(cx); + + if project.is_local() { + Some( + project + .visible_worktrees(cx) + .map(|worktree| worktree.read(cx).abs_path()) + .collect::>() + .into(), + ) + } else { + None + } } fn remove_panes(&mut self, member: Member, cx: &mut ViewContext) { @@ -2331,24 +2346,24 @@ impl Workspace { } } - let location = self.location(cx); - - if !location.paths().is_empty() { - let dock_pane = serialize_pane_handle(self.dock.pane(), cx); - let center_group = build_serialized_pane_group(&self.center.root, cx); - - let serialized_workspace = SerializedWorkspace { - id: self.database_id, - location: self.location(cx), - dock_position: self.dock.position(), - dock_pane, - center_group, - project_panel_open: self.left_sidebar.read(cx).is_open(), - }; - - cx.background() - .spawn(persistence::DB.save_workspace(serialized_workspace)) - .detach(); + if let Some(location) = self.location(cx) { + if !location.paths().is_empty() { + let dock_pane = serialize_pane_handle(self.dock.pane(), cx); + let center_group = build_serialized_pane_group(&self.center.root, cx); + + let serialized_workspace = SerializedWorkspace { + id: self.database_id, + location, + dock_position: self.dock.position(), + dock_pane, + center_group, + left_sidebar_open: self.left_sidebar.read(cx).is_open(), + }; + + cx.background() + .spawn(persistence::DB.save_workspace(serialized_workspace)) + .detach(); + } } } @@ -2375,34 +2390,46 @@ impl Workspace { .await; // Traverse the splits tree and add to things - let (root, active_pane) = serialized_workspace + let center_group = serialized_workspace .center_group .deserialize(&project, serialized_workspace.id, &workspace, &mut cx) .await; // Remove old panes from workspace panes list workspace.update(&mut cx, |workspace, cx| { - workspace.remove_panes(workspace.center.root.clone(), cx); + if let Some((center_group, active_pane)) = center_group { + workspace.remove_panes(workspace.center.root.clone(), cx); + + // Swap workspace center group + workspace.center = PaneGroup::with_root(center_group); + + // Change the focus to the workspace first so that we retrigger focus in on the pane. + cx.focus_self(); - // Swap workspace center group - workspace.center = PaneGroup::with_root(root); + if let Some(active_pane) = active_pane { + cx.focus(active_pane); + } else { + cx.focus(workspace.panes.last().unwrap().clone()); + } + } else { + cx.focus_self(); + } // Note, if this is moved after 'set_dock_position' // it causes an infinite loop. - if serialized_workspace.project_panel_open { - workspace.toggle_sidebar_item_focus(SidebarSide::Left, 0, cx) + if workspace.left_sidebar().read(cx).is_open() + != serialized_workspace.left_sidebar_open + { + workspace.toggle_sidebar(SidebarSide::Left, cx); } - Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx); - - if let Some(active_pane) = active_pane { - // Change the focus to the workspace first so that we retrigger focus in on the pane. - cx.focus_self(); - cx.focus(active_pane); - } + // Dock::set_dock_position(workspace, serialized_workspace.dock_position, cx); cx.notify(); }); + + // Serialize ourself to make sure our timestamps and any pane / item changes are replicated + workspace.read_with(&cx, |workspace, cx| workspace.serialize_workspace(cx)) } }) .detach(); diff --git a/crates/zed/src/zed.rs b/crates/zed/src/zed.rs index 0a25cfb66f5ac142068f1c5562791a2eecbcd44b..d86e449ff2307a9843026f1e1366fe1054556cae 100644 --- a/crates/zed/src/zed.rs +++ b/crates/zed/src/zed.rs @@ -377,7 +377,7 @@ fn quit(_: &Quit, cx: &mut gpui::MutableAppContext) { } fn about(_: &mut Workspace, _: &About, cx: &mut gpui::ViewContext) { - let app_name = cx.global::().name(); + let app_name = cx.global::().display_name(); let version = env!("CARGO_PKG_VERSION"); cx.prompt( gpui::PromptLevel::Info, From a1f273278b758bd4837eafd2517042751b7fc654 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 3 Dec 2022 16:03:46 -0800 Subject: [PATCH 81/86] Added user notifications --- crates/auto_update/src/update_notification.rs | 4 +- crates/collab_ui/src/contact_notification.rs | 2 +- crates/theme/src/theme.rs | 8 + crates/workspace/src/notifications.rs | 280 ++++++++++++++++++ crates/workspace/src/workspace.rs | 116 ++++---- styles/src/styleTree/app.ts | 2 + .../styleTree/simpleMessageNotification.ts | 31 ++ 7 files changed, 375 insertions(+), 68 deletions(-) create mode 100644 crates/workspace/src/notifications.rs create mode 100644 styles/src/styleTree/simpleMessageNotification.ts diff --git a/crates/auto_update/src/update_notification.rs b/crates/auto_update/src/update_notification.rs index 5fbdf174227cb7f1a1bc678107a0cb49e8791b92..d6f94c708d24dad6cccb6d918ac6074043567906 100644 --- a/crates/auto_update/src/update_notification.rs +++ b/crates/auto_update/src/update_notification.rs @@ -7,7 +7,7 @@ use gpui::{ use menu::Cancel; use settings::Settings; use util::channel::ReleaseChannel; -use workspace::Notification; +use workspace::notifications::Notification; pub struct UpdateNotification { version: AppVersion, @@ -28,7 +28,7 @@ impl View for UpdateNotification { fn render(&mut self, cx: &mut gpui::RenderContext<'_, Self>) -> gpui::ElementBox { let theme = cx.global::().theme.clone(); - let theme = &theme.update_notification; + let theme = &theme.simple_message_notification; let app_name = cx.global::().display_name(); diff --git a/crates/collab_ui/src/contact_notification.rs b/crates/collab_ui/src/contact_notification.rs index f543a0144610f5fc1f64d568720a3bb19f70bed0..6f0cfc68c76569aaf94abe155a1df43abd57670f 100644 --- a/crates/collab_ui/src/contact_notification.rs +++ b/crates/collab_ui/src/contact_notification.rs @@ -6,7 +6,7 @@ use gpui::{ elements::*, impl_internal_actions, Entity, ModelHandle, MutableAppContext, RenderContext, View, ViewContext, }; -use workspace::Notification; +use workspace::notifications::Notification; impl_internal_actions!(contact_notifications, [Dismiss, RespondToContactRequest]); diff --git a/crates/theme/src/theme.rs b/crates/theme/src/theme.rs index 8d2a2df18efd4821b0150eeaa6097b375248dfc2..bf6cb57adb3ce6378fed2bc5db3f4ed2d8b22962 100644 --- a/crates/theme/src/theme.rs +++ b/crates/theme/src/theme.rs @@ -31,6 +31,7 @@ pub struct Theme { pub shared_screen: ContainerStyle, pub contact_notification: ContactNotification, pub update_notification: UpdateNotification, + pub simple_message_notification: MessageNotification, pub project_shared_notification: ProjectSharedNotification, pub incoming_call_notification: IncomingCallNotification, pub tooltip: TooltipStyle, @@ -478,6 +479,13 @@ pub struct UpdateNotification { pub dismiss_button: Interactive, } +#[derive(Deserialize, Default)] +pub struct MessageNotification { + pub message: ContainedText, + pub action_message: Interactive, + pub dismiss_button: Interactive, +} + #[derive(Deserialize, Default)] pub struct ProjectSharedNotification { pub window_height: f32, diff --git a/crates/workspace/src/notifications.rs b/crates/workspace/src/notifications.rs new file mode 100644 index 0000000000000000000000000000000000000000..91656727d0efc6f161b24490f29eb99c326a8d94 --- /dev/null +++ b/crates/workspace/src/notifications.rs @@ -0,0 +1,280 @@ +use std::{any::TypeId, ops::DerefMut}; + +use collections::HashSet; +use gpui::{AnyViewHandle, Entity, MutableAppContext, View, ViewContext, ViewHandle}; + +use crate::Workspace; + +pub fn init(cx: &mut MutableAppContext) { + cx.set_global(NotificationTracker::new()); + simple_message_notification::init(cx); +} + +pub trait Notification: View { + fn should_dismiss_notification_on_event(&self, event: &::Event) -> bool; +} + +pub trait NotificationHandle { + fn id(&self) -> usize; + fn to_any(&self) -> AnyViewHandle; +} + +impl NotificationHandle for ViewHandle { + fn id(&self) -> usize { + self.id() + } + + fn to_any(&self) -> AnyViewHandle { + self.into() + } +} + +impl From<&dyn NotificationHandle> for AnyViewHandle { + fn from(val: &dyn NotificationHandle) -> Self { + val.to_any() + } +} + +struct NotificationTracker { + notifications_sent: HashSet, +} + +impl std::ops::Deref for NotificationTracker { + type Target = HashSet; + + fn deref(&self) -> &Self::Target { + &self.notifications_sent + } +} + +impl DerefMut for NotificationTracker { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.notifications_sent + } +} + +impl NotificationTracker { + fn new() -> Self { + Self { + notifications_sent: HashSet::default(), + } + } +} + +impl Workspace { + pub fn show_notification_once( + &mut self, + id: usize, + cx: &mut ViewContext, + build_notification: impl FnOnce(&mut ViewContext) -> ViewHandle, + ) { + if !cx + .global::() + .contains(&TypeId::of::()) + { + cx.update_global::(|tracker, _| { + tracker.insert(TypeId::of::()) + }); + + self.show_notification::(id, cx, build_notification) + } + } + + pub fn show_notification( + &mut self, + id: usize, + cx: &mut ViewContext, + build_notification: impl FnOnce(&mut ViewContext) -> ViewHandle, + ) { + let type_id = TypeId::of::(); + if self + .notifications + .iter() + .all(|(existing_type_id, existing_id, _)| { + (*existing_type_id, *existing_id) != (type_id, id) + }) + { + let notification = build_notification(cx); + cx.subscribe(¬ification, move |this, handle, event, cx| { + if handle.read(cx).should_dismiss_notification_on_event(event) { + this.dismiss_notification(type_id, id, cx); + } + }) + .detach(); + self.notifications + .push((type_id, id, Box::new(notification))); + cx.notify(); + } + } + + fn dismiss_notification(&mut self, type_id: TypeId, id: usize, cx: &mut ViewContext) { + self.notifications + .retain(|(existing_type_id, existing_id, _)| { + if (*existing_type_id, *existing_id) == (type_id, id) { + cx.notify(); + false + } else { + true + } + }); + } +} + +pub mod simple_message_notification { + use std::process::Command; + + use gpui::{ + actions, + elements::{Flex, MouseEventHandler, Padding, ParentElement, Svg, Text}, + impl_actions, Action, CursorStyle, Element, Entity, MouseButton, MutableAppContext, View, + ViewContext, + }; + use menu::Cancel; + use serde::Deserialize; + use settings::Settings; + + use crate::Workspace; + + use super::Notification; + + actions!(message_notifications, [CancelMessageNotification]); + + #[derive(Clone, Default, Deserialize, PartialEq)] + pub struct OsOpen(pub String); + + impl_actions!(message_notifications, [OsOpen]); + + pub fn init(cx: &mut MutableAppContext) { + cx.add_action(MessageNotification::dismiss); + cx.add_action( + |_workspace: &mut Workspace, open_action: &OsOpen, _cx: &mut ViewContext| { + #[cfg(target_os = "macos")] + { + let mut command = Command::new("open"); + command.arg(open_action.0.clone()); + + command.spawn().ok(); + } + }, + ) + } + + pub struct MessageNotification { + message: String, + click_action: Box, + click_message: String, + } + + pub enum MessageNotificationEvent { + Dismiss, + } + + impl Entity for MessageNotification { + type Event = MessageNotificationEvent; + } + + impl MessageNotification { + pub fn new, A: Action, S2: AsRef>( + message: S1, + click_action: A, + click_message: S2, + ) -> Self { + Self { + message: message.as_ref().to_string(), + click_action: Box::new(click_action) as Box, + click_message: click_message.as_ref().to_string(), + } + } + + pub fn dismiss(&mut self, _: &CancelMessageNotification, cx: &mut ViewContext) { + cx.emit(MessageNotificationEvent::Dismiss); + } + } + + impl View for MessageNotification { + fn ui_name() -> &'static str { + "MessageNotification" + } + + fn render(&mut self, cx: &mut gpui::RenderContext<'_, Self>) -> gpui::ElementBox { + let theme = cx.global::().theme.clone(); + let theme = &theme.update_notification; + + enum MessageNotificationTag {} + + let click_action = self.click_action.boxed_clone(); + let click_message = self.click_message.clone(); + let message = self.message.clone(); + + MouseEventHandler::::new(0, cx, |state, cx| { + Flex::column() + .with_child( + Flex::row() + .with_child( + Text::new(message, theme.message.text.clone()) + .contained() + .with_style(theme.message.container) + .aligned() + .top() + .left() + .flex(1., true) + .boxed(), + ) + .with_child( + MouseEventHandler::::new(0, cx, |state, _| { + let style = theme.dismiss_button.style_for(state, false); + Svg::new("icons/x_mark_8.svg") + .with_color(style.color) + .constrained() + .with_width(style.icon_width) + .aligned() + .contained() + .with_style(style.container) + .constrained() + .with_width(style.button_width) + .with_height(style.button_width) + .boxed() + }) + .with_padding(Padding::uniform(5.)) + .on_click(MouseButton::Left, move |_, cx| { + cx.dispatch_action(CancelMessageNotification) + }) + .aligned() + .constrained() + .with_height( + cx.font_cache().line_height(theme.message.text.font_size), + ) + .aligned() + .top() + .flex_float() + .boxed(), + ) + .boxed(), + ) + .with_child({ + let style = theme.action_message.style_for(state, false); + + Text::new(click_message, style.text.clone()) + .contained() + .with_style(style.container) + .boxed() + }) + .contained() + .boxed() + }) + .with_cursor_style(CursorStyle::PointingHand) + .on_click(MouseButton::Left, move |_, cx| { + cx.dispatch_any_action(click_action.boxed_clone()) + }) + .boxed() + } + } + + impl Notification for MessageNotification { + fn should_dismiss_notification_on_event(&self, event: &::Event) -> bool { + match event { + MessageNotificationEvent::Dismiss => true, + } + } + } +} diff --git a/crates/workspace/src/workspace.rs b/crates/workspace/src/workspace.rs index 5fb804e66dca4870dfa708b70bd3382bdd1762ae..ed00e4f14d0d8e6e381fdafd19785244c20d3650 100644 --- a/crates/workspace/src/workspace.rs +++ b/crates/workspace/src/workspace.rs @@ -4,6 +4,7 @@ /// specific locations. pub mod dock; pub mod item; +pub mod notifications; pub mod pane; pub mod pane_group; mod persistence; @@ -41,7 +42,9 @@ use gpui::{ }; use item::{FollowableItem, FollowableItemHandle, Item, ItemHandle, ProjectItem}; use language::LanguageRegistry; + use log::{error, warn}; +use notifications::NotificationHandle; pub use pane::*; pub use pane_group::*; use persistence::{model::SerializedItem, DB}; @@ -61,7 +64,10 @@ use theme::{Theme, ThemeRegistry}; pub use toolbar::{ToolbarItemLocation, ToolbarItemView}; use util::ResultExt; -use crate::persistence::model::{SerializedPane, SerializedPaneGroup, SerializedWorkspace}; +use crate::{ + notifications::simple_message_notification::{MessageNotification, OsOpen}, + persistence::model::{SerializedPane, SerializedPaneGroup, SerializedWorkspace}, +}; #[derive(Clone, PartialEq)] pub struct RemoveWorktreeFromProject(pub WorktreeId); @@ -151,6 +157,7 @@ impl_actions!(workspace, [ActivatePane]); pub fn init(app_state: Arc, cx: &mut MutableAppContext) { pane::init(cx); dock::init(cx); + notifications::init(cx); cx.add_global_action(open); cx.add_global_action({ @@ -453,31 +460,6 @@ impl DelayedDebouncedEditAction { } } -pub trait Notification: View { - fn should_dismiss_notification_on_event(&self, event: &::Event) -> bool; -} - -pub trait NotificationHandle { - fn id(&self) -> usize; - fn to_any(&self) -> AnyViewHandle; -} - -impl NotificationHandle for ViewHandle { - fn id(&self) -> usize { - self.id() - } - - fn to_any(&self) -> AnyViewHandle { - self.into() - } -} - -impl From<&dyn NotificationHandle> for AnyViewHandle { - fn from(val: &dyn NotificationHandle) -> Self { - val.to_any() - } -} - #[derive(Default)] struct LeaderState { followers: HashSet, @@ -732,6 +714,8 @@ impl Workspace { workspace }); + notify_if_database_failed(&workspace, &mut cx); + // Call open path for each of the project paths // (this will bring them to the front if they were in the serialized workspace) debug_assert!(paths_to_open.len() == project_paths.len()); @@ -1115,45 +1099,6 @@ impl Workspace { } } - pub fn show_notification( - &mut self, - id: usize, - cx: &mut ViewContext, - build_notification: impl FnOnce(&mut ViewContext) -> ViewHandle, - ) { - let type_id = TypeId::of::(); - if self - .notifications - .iter() - .all(|(existing_type_id, existing_id, _)| { - (*existing_type_id, *existing_id) != (type_id, id) - }) - { - let notification = build_notification(cx); - cx.subscribe(¬ification, move |this, handle, event, cx| { - if handle.read(cx).should_dismiss_notification_on_event(event) { - this.dismiss_notification(type_id, id, cx); - } - }) - .detach(); - self.notifications - .push((type_id, id, Box::new(notification))); - cx.notify(); - } - } - - fn dismiss_notification(&mut self, type_id: TypeId, id: usize, cx: &mut ViewContext) { - self.notifications - .retain(|(existing_type_id, existing_id, _)| { - if (*existing_type_id, *existing_id) == (type_id, id) { - cx.notify(); - false - } else { - true - } - }); - } - pub fn items<'a>( &'a self, cx: &'a AppContext, @@ -2436,6 +2381,47 @@ impl Workspace { } } +fn notify_if_database_failed(workspace: &ViewHandle, cx: &mut AsyncAppContext) { + if (*db::ALL_FILE_DB_FAILED).load(std::sync::atomic::Ordering::Acquire) { + workspace.update(cx, |workspace, cx| { + workspace.show_notification_once(0, cx, |cx| { + cx.add_view(|_| { + MessageNotification::new( + indoc::indoc! {" + Failed to load any database file :( + "}, + OsOpen("https://github.com/zed-industries/feedback/issues/new?assignees=&labels=defect%2Ctriage&template=2_bug_report.yml".to_string()), + "Click to let us know about this error" + ) + }) + }); + }); + } else { + let backup_path = (*db::BACKUP_DB_PATH).read(); + if let Some(backup_path) = &*backup_path { + workspace.update(cx, |workspace, cx| { + workspace.show_notification_once(0, cx, |cx| { + cx.add_view(|_| { + let backup_path = backup_path.to_string_lossy(); + MessageNotification::new( + format!( + indoc::indoc! {" + Database file was corrupted :( + Old database backed up to: + {} + "}, + backup_path + ), + OsOpen(backup_path.to_string()), + "Click to show old database in finder", + ) + }) + }); + }); + } + } +} + impl Entity for Workspace { type Event = Event; } diff --git a/styles/src/styleTree/app.ts b/styles/src/styleTree/app.ts index bd3d1571682403a701e52cd5a5457f0342c5c157..267d83050667ccb130a8f0c4b20cf37574aaf2d7 100644 --- a/styles/src/styleTree/app.ts +++ b/styles/src/styleTree/app.ts @@ -12,6 +12,7 @@ import sharedScreen from "./sharedScreen"; import projectDiagnostics from "./projectDiagnostics"; import contactNotification from "./contactNotification"; import updateNotification from "./updateNotification"; +import simpleMessageNotification from "./simpleMessageNotification"; import projectSharedNotification from "./projectSharedNotification"; import tooltip from "./tooltip"; import terminal from "./terminal"; @@ -47,6 +48,7 @@ export default function app(colorScheme: ColorScheme): Object { }, }, updateNotification: updateNotification(colorScheme), + simpleMessageNotification: simpleMessageNotification(colorScheme), tooltip: tooltip(colorScheme), terminal: terminal(colorScheme), colorScheme: { diff --git a/styles/src/styleTree/simpleMessageNotification.ts b/styles/src/styleTree/simpleMessageNotification.ts new file mode 100644 index 0000000000000000000000000000000000000000..76ff5e1ca5f3ecd30498b59f7035899a61a9d226 --- /dev/null +++ b/styles/src/styleTree/simpleMessageNotification.ts @@ -0,0 +1,31 @@ +import { ColorScheme } from "../themes/common/colorScheme"; +import { foreground, text } from "./components"; + +const headerPadding = 8; + +export default function simpleMessageNotification(colorScheme: ColorScheme): Object { + let layer = colorScheme.middle; + return { + message: { + ...text(layer, "sans", { size: "md" }), + margin: { left: headerPadding, right: headerPadding }, + }, + actionMessage: { + ...text(layer, "sans", { size: "md" }), + margin: { left: headerPadding, top: 6, bottom: 6 }, + hover: { + color: foreground(layer, "hovered"), + }, + }, + dismissButton: { + color: foreground(layer), + iconWidth: 8, + iconHeight: 8, + buttonWidth: 8, + buttonHeight: 8, + hover: { + color: foreground(layer, "hovered"), + }, + }, + }; +} From 80e035cc2cc015ac2d95adffa0857e1fce4de123 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 3 Dec 2022 16:12:07 -0800 Subject: [PATCH 82/86] Fixed bad rebase --- crates/zed/src/main.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index c95b7c4071e3458654dfdaca5b99b06bd5de7e0b..97a19b6d86f2aac54dc2520c22ed3fbff90f06b9 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -204,10 +204,10 @@ fn main() { } fn init_paths() { - std::fs::create_dir_all(&*zed::paths::CONFIG_DIR).expect("could not create config path"); - std::fs::create_dir_all(&*zed::paths::LANGUAGES_DIR).expect("could not create languages path"); - std::fs::create_dir_all(&*zed::paths::DB_DIR).expect("could not create database path"); - std::fs::create_dir_all(&*zed::paths::LOGS_DIR).expect("could not create logs path"); + std::fs::create_dir_all(&*util::paths::CONFIG_DIR).expect("could not create config path"); + std::fs::create_dir_all(&*util::paths::LANGUAGES_DIR).expect("could not create languages path"); + std::fs::create_dir_all(&*util::paths::DB_DIR).expect("could not create database path"); + std::fs::create_dir_all(&*util::paths::LOGS_DIR).expect("could not create logs path"); } fn init_logger() { From 4288f1087355ace3f071f6822404db56a24d111c Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 3 Dec 2022 16:13:02 -0800 Subject: [PATCH 83/86] And library change --- crates/journal/src/journal.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/journal/src/journal.rs b/crates/journal/src/journal.rs index 3cde8e504e92090ffc8f4c4ae94605a47f37fe46..ef1dbdc15cb69dd1d9abe9151b996a57d0d007af 100644 --- a/crates/journal/src/journal.rs +++ b/crates/journal/src/journal.rs @@ -115,7 +115,7 @@ mod tests { #[test] fn test_heading_entry_defaults_to_hour_12() { - let naive_time = NaiveTime::from_hms_milli(15, 0, 0, 0); + let naive_time = NaiveTime::from_hms_milli_opt(15, 0, 0, 0).unwrap(); let actual_heading_entry = heading_entry(naive_time, &None); let expected_heading_entry = "# 3:00 PM"; @@ -124,7 +124,7 @@ mod tests { #[test] fn test_heading_entry_is_hour_12() { - let naive_time = NaiveTime::from_hms_milli(15, 0, 0, 0); + let naive_time = NaiveTime::from_hms_milli_opt(15, 0, 0, 0).unwrap(); let actual_heading_entry = heading_entry(naive_time, &Some(HourFormat::Hour12)); let expected_heading_entry = "# 3:00 PM"; @@ -133,7 +133,7 @@ mod tests { #[test] fn test_heading_entry_is_hour_24() { - let naive_time = NaiveTime::from_hms_milli(15, 0, 0, 0); + let naive_time = NaiveTime::from_hms_milli_opt(15, 0, 0, 0).unwrap(); let actual_heading_entry = heading_entry(naive_time, &Some(HourFormat::Hour24)); let expected_heading_entry = "# 15:00"; From d609237c32ea310b08c9971a223e1014747d1f8e Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 3 Dec 2022 16:26:37 -0800 Subject: [PATCH 84/86] Found db parallelism problem :( --- crates/db/src/db.rs | 165 ++++++++++++++++++++++++++++---------------- 1 file changed, 107 insertions(+), 58 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index c146336132361947469d04453134e245e72348aa..9712f2e375b980b19ed1c3c4ec08541aecdf6115 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -16,7 +16,7 @@ pub use util::paths::DB_DIR; use sqlez::domain::Migrator; use sqlez::thread_safe_connection::ThreadSafeConnection; use sqlez_macros::sql; -use std::fs::{create_dir_all, remove_dir_all}; +use std::fs::create_dir_all; use std::path::{Path, PathBuf}; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{SystemTime, UNIX_EPOCH}; @@ -40,7 +40,7 @@ const DB_FILE_NAME: &'static str = "db.sqlite"; lazy_static::lazy_static! { static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(()); - static ref DB_WIPED: RwLock = RwLock::new(false); + // static ref DB_WIPED: RwLock = RwLock::new(false); pub static ref BACKUP_DB_PATH: RwLock> = RwLock::new(None); pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); } @@ -49,21 +49,21 @@ lazy_static::lazy_static! { /// This will retry a couple times if there are failures. If opening fails once, the db directory /// is moved to a backup folder and a new one is created. If that fails, a shared in memory db is created. /// In either case, static variables are set so that the user can be notified. -pub async fn open_db(wipe_db: bool, db_dir: &Path, release_channel: &ReleaseChannel) -> ThreadSafeConnection { +pub async fn open_db(db_dir: &Path, release_channel: &ReleaseChannel) -> ThreadSafeConnection { let release_channel_name = release_channel.dev_name(); let main_db_dir = db_dir.join(Path::new(&format!("0-{}", release_channel_name))); - // If WIPE_DB, delete 0-{channel} - if release_channel == &ReleaseChannel::Dev - && wipe_db - && !*DB_WIPED.read() - { - let mut db_wiped = DB_WIPED.write(); - if !*db_wiped { - remove_dir_all(&main_db_dir).ok(); - *db_wiped = true; - } - } + // // If WIPE_DB, delete 0-{channel} + // if release_channel == &ReleaseChannel::Dev + // && wipe_db + // && !*DB_WIPED.read() + // { + // let mut db_wiped = DB_WIPED.write(); + // if !*db_wiped { + // remove_dir_all(&main_db_dir).ok(); + // *db_wiped = true; + // } + // } let connection = async_iife!({ // Note: This still has a race condition where 1 set of migrations succeeds @@ -205,7 +205,7 @@ macro_rules! define_connection { #[cfg(not(any(test, feature = "test-support")))] $crate::lazy_static::lazy_static! { - pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(std::env::var("WIPE_DB").is_ok(), &$crate::DB_DIR, &$crate::RELEASE_CHANNEL))); + pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(&$crate::DB_DIR, &$crate::RELEASE_CHANNEL))); } }; (pub static ref $id:ident: $t:ident<$($d:ty),+> = $migrations:expr;) => { @@ -236,67 +236,66 @@ macro_rules! define_connection { #[cfg(not(any(test, feature = "test-support")))] $crate::lazy_static::lazy_static! { - pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(std::env::var("WIPE_DB").is_ok(), &$crate::DB_DIR, &$crate::RELEASE_CHANNEL))); + pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(&$crate::DB_DIR, &$crate::RELEASE_CHANNEL))); } }; } #[cfg(test)] mod tests { - use std::{thread, fs}; + use std::{fs, thread}; use sqlez::{domain::Domain, connection::Connection}; use sqlez_macros::sql; use tempdir::TempDir; - use util::channel::ReleaseChannel; use crate::{open_db, DB_FILE_NAME}; - // Test that wipe_db exists and works and gives a new db - #[gpui::test] - async fn test_wipe_db() { - enum TestDB {} + // // Test that wipe_db exists and works and gives a new db + // #[gpui::test] + // async fn test_wipe_db() { + // enum TestDB {} - impl Domain for TestDB { - fn name() -> &'static str { - "db_tests" - } + // impl Domain for TestDB { + // fn name() -> &'static str { + // "db_tests" + // } - fn migrations() -> &'static [&'static str] { - &[sql!( - CREATE TABLE test(value); - )] - } - } + // fn migrations() -> &'static [&'static str] { + // &[sql!( + // CREATE TABLE test(value); + // )] + // } + // } - let tempdir = TempDir::new("DbTests").unwrap(); + // let tempdir = TempDir::new("DbTests").unwrap(); - // Create a db and insert a marker value - let test_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; - test_db.write(|connection| - connection.exec(sql!( - INSERT INTO test(value) VALUES (10) - )).unwrap()().unwrap() - ).await; - drop(test_db); + // // Create a db and insert a marker value + // let test_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + // test_db.write(|connection| + // connection.exec(sql!( + // INSERT INTO test(value) VALUES (10) + // )).unwrap()().unwrap() + // ).await; + // drop(test_db); - // Opening db with wipe clears once and removes the marker value - let mut guards = vec![]; - for _ in 0..5 { - let path = tempdir.path().to_path_buf(); - let guard = thread::spawn(move || smol::block_on(async { - let test_db = open_db::(true, &path, &ReleaseChannel::Dev).await; + // // Opening db with wipe clears once and removes the marker value + // let mut guards = vec![]; + // for _ in 0..5 { + // let path = tempdir.path().to_path_buf(); + // let guard = thread::spawn(move || smol::block_on(async { + // let test_db = open_db::(true, &path, &ReleaseChannel::Dev).await; - assert!(test_db.select_row::<()>(sql!(SELECT value FROM test)).unwrap()().unwrap().is_none()) - })); + // assert!(test_db.select_row::<()>(sql!(SELECT value FROM test)).unwrap()().unwrap().is_none()) + // })); - guards.push(guard); - } + // guards.push(guard); + // } - for guard in guards { - guard.join().unwrap(); - } - } + // for guard in guards { + // guard.join().unwrap(); + // } + // } // Test bad migration panics #[gpui::test] @@ -317,7 +316,7 @@ mod tests { } let tempdir = TempDir::new("DbTests").unwrap(); - let _bad_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + let _bad_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; } /// Test that DB exists but corrupted (causing recreate) @@ -349,11 +348,11 @@ mod tests { let tempdir = TempDir::new("DbTests").unwrap(); { - let corrupt_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + let corrupt_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; assert!(corrupt_db.persistent()); } - let good_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + let good_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; assert!(good_db.select_row::("SELECT * FROM test2").unwrap()().unwrap().is_none()); let mut corrupted_backup_dir = fs::read_dir( @@ -369,4 +368,54 @@ mod tests { let backup = Connection::open_file(&corrupted_backup_dir.to_string_lossy()); assert!(backup.select_row::("SELECT * FROM test").unwrap()().unwrap().is_none()); } + + /// Test that DB exists but corrupted (causing recreate) + #[gpui::test] + async fn test_simultaneous_db_corruption() { + enum CorruptedDB {} + + impl Domain for CorruptedDB { + fn name() -> &'static str { + "db_tests" + } + + fn migrations() -> &'static [&'static str] { + &[sql!(CREATE TABLE test(value);)] + } + } + + enum GoodDB {} + + impl Domain for GoodDB { + fn name() -> &'static str { + "db_tests" //Notice same name + } + + fn migrations() -> &'static [&'static str] { + &[sql!(CREATE TABLE test2(value);)] //But different migration + } + } + + let tempdir = TempDir::new("DbTests").unwrap(); + { + let corrupt_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; + assert!(corrupt_db.persistent()); + } + + let mut guards = vec![]; + for _ in 0..10 { + let tmp_path = tempdir.path().to_path_buf(); + let guard = thread::spawn(move || { + let good_db = smol::block_on(open_db::(tmp_path.as_path(), &util::channel::ReleaseChannel::Dev)); + assert!(good_db.select_row::("SELECT * FROM test2").unwrap()().unwrap().is_none()); + }); + + guards.push(guard); + + } + + for guard in guards.into_iter() { + assert!(guard.join().is_ok()); + } + } } From 1ce08631580d8897de92a1357342284159e2b46e Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 3 Dec 2022 16:27:45 -0800 Subject: [PATCH 85/86] Removed old code --- crates/db/src/db.rs | 59 --------------------------------------------- 1 file changed, 59 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 9712f2e375b980b19ed1c3c4ec08541aecdf6115..878d2430e211377032c45cda55e482afe2e1c348 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -40,7 +40,6 @@ const DB_FILE_NAME: &'static str = "db.sqlite"; lazy_static::lazy_static! { static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(()); - // static ref DB_WIPED: RwLock = RwLock::new(false); pub static ref BACKUP_DB_PATH: RwLock> = RwLock::new(None); pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); } @@ -53,18 +52,6 @@ pub async fn open_db(db_dir: &Path, release_channel: &Rel let release_channel_name = release_channel.dev_name(); let main_db_dir = db_dir.join(Path::new(&format!("0-{}", release_channel_name))); - // // If WIPE_DB, delete 0-{channel} - // if release_channel == &ReleaseChannel::Dev - // && wipe_db - // && !*DB_WIPED.read() - // { - // let mut db_wiped = DB_WIPED.write(); - // if !*db_wiped { - // remove_dir_all(&main_db_dir).ok(); - // *db_wiped = true; - // } - // } - let connection = async_iife!({ // Note: This still has a race condition where 1 set of migrations succeeds // (e.g. (Workspace, Editor)) and another fails (e.g. (Workspace, Terminal)) @@ -250,52 +237,6 @@ mod tests { use tempdir::TempDir; use crate::{open_db, DB_FILE_NAME}; - - // // Test that wipe_db exists and works and gives a new db - // #[gpui::test] - // async fn test_wipe_db() { - // enum TestDB {} - - // impl Domain for TestDB { - // fn name() -> &'static str { - // "db_tests" - // } - - // fn migrations() -> &'static [&'static str] { - // &[sql!( - // CREATE TABLE test(value); - // )] - // } - // } - - // let tempdir = TempDir::new("DbTests").unwrap(); - - // // Create a db and insert a marker value - // let test_db = open_db::(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await; - // test_db.write(|connection| - // connection.exec(sql!( - // INSERT INTO test(value) VALUES (10) - // )).unwrap()().unwrap() - // ).await; - // drop(test_db); - - // // Opening db with wipe clears once and removes the marker value - // let mut guards = vec![]; - // for _ in 0..5 { - // let path = tempdir.path().to_path_buf(); - // let guard = thread::spawn(move || smol::block_on(async { - // let test_db = open_db::(true, &path, &ReleaseChannel::Dev).await; - - // assert!(test_db.select_row::<()>(sql!(SELECT value FROM test)).unwrap()().unwrap().is_none()) - // })); - - // guards.push(guard); - // } - - // for guard in guards { - // guard.join().unwrap(); - // } - // } // Test bad migration panics #[gpui::test] From 55eb0a37424e4e756f10b25a9fbae4f33f4fa638 Mon Sep 17 00:00:00 2001 From: Mikayla Maki Date: Sat, 3 Dec 2022 16:46:35 -0800 Subject: [PATCH 86/86] Fixed and error message and properly initialized the DB --- crates/db/src/db.rs | 9 ++++++--- crates/sqlez/src/thread_safe_connection.rs | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 878d2430e211377032c45cda55e482afe2e1c348..704ac4049df5790b16e6d41b87142f1b619be6e2 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -24,14 +24,14 @@ use util::{async_iife, ResultExt}; use util::channel::ReleaseChannel; const CONNECTION_INITIALIZE_QUERY: &'static str = sql!( - PRAGMA synchronous=NORMAL; - PRAGMA busy_timeout=1; PRAGMA foreign_keys=TRUE; - PRAGMA case_sensitive_like=TRUE; ); const DB_INITIALIZE_QUERY: &'static str = sql!( PRAGMA journal_mode=WAL; + PRAGMA busy_timeout=1; + PRAGMA case_sensitive_like=TRUE; + PRAGMA synchronous=NORMAL; ); const FALLBACK_DB_NAME: &'static str = "FALLBACK_MEMORY_DB"; @@ -293,6 +293,7 @@ mod tests { assert!(corrupt_db.persistent()); } + let good_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; assert!(good_db.select_row::("SELECT * FROM test2").unwrap()().unwrap().is_none()); @@ -339,10 +340,12 @@ mod tests { let tempdir = TempDir::new("DbTests").unwrap(); { + // Setup the bad database let corrupt_db = open_db::(tempdir.path(), &util::channel::ReleaseChannel::Dev).await; assert!(corrupt_db.persistent()); } + // Try to connect to it a bunch of times at once let mut guards = vec![]; for _ in 0..10 { let tmp_path = tempdir.path().to_path_buf(); diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 51d0707fd8b951d7fd9ff33fceadfed137c425a6..2c51b776edc73ca941d75a9fcb85ce0630acbe7d 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -168,7 +168,7 @@ impl ThreadSafeConnection { let result = connection.with_write(|connection| callback(connection)); sender.send(result).ok(); })); - reciever.map(|response| response.expect("Background writer thread unexpectedly closed")) + reciever.map(|response| response.expect("Write queue unexpectedly closed")) } pub(crate) fn create_connection(