Figured out a good schema for the pane serialization stuff

Mikayla Maki created

Change summary

Cargo.lock                                |   1 
crates/db/Cargo.toml                      |   1 
crates/db/examples/serialize-pane.rs      |  27 +++++
crates/db/examples/serialize_workspace.rs |  14 +-
crates/db/src/items.rs                    |   9 -
crates/db/src/kvp.rs                      |   4 
crates/db/src/migrations.rs               |   5 
crates/db/src/pane.rs                     | 113 +++++++++++++++++-------
crates/db/src/workspace.rs                |  47 ++++++---
9 files changed, 156 insertions(+), 65 deletions(-)

Detailed changes

Cargo.lock 🔗

@@ -1560,6 +1560,7 @@ dependencies = [
  "rusqlite_migration",
  "serde",
  "serde_rusqlite",
+ "settings",
  "tempdir",
 ]
 

crates/db/Cargo.toml 🔗

@@ -13,6 +13,7 @@ test-support = []
 [dependencies]
 collections = { path = "../collections" }
 gpui = { path = "../gpui" }
+settings = { path = "../settings" }
 anyhow = "1.0.57"
 async-trait = "0.1"
 lazy_static = "1.4.0"

crates/db/examples/serialize-pane.rs 🔗

@@ -0,0 +1,27 @@
+use std::{fs::File, path::Path, thread::sleep, time::Duration};
+
+const TEST_FILE: &'static str = "test-db.db";
+
+fn main() -> anyhow::Result<()> {
+    let db = db::Db::open_in_memory();
+    if db.real().is_none() {
+        return Err(anyhow::anyhow!("Migrations failed"));
+    }
+    let file = Path::new(TEST_FILE);
+
+    let f = File::create(file)?;
+    drop(f);
+
+    let workspace = db.make_new_workspace();
+
+    db.update_worktree_roots(&workspace.workspace_id, &["/tmp"]);
+
+    db.save_pane_splits(center_pane_group);
+    db.save_dock_pane();
+
+    db.write_to(file).ok();
+
+    println!("Wrote database!");
+
+    Ok(())
+}

crates/db/examples/serialize_workspace.rs 🔗

@@ -15,13 +15,13 @@ fn main() -> anyhow::Result<()> {
     db.write_kvp("test", "1")?;
     db.write_kvp("test-2", "2")?;
 
-    let workspace_1 = db.workspace_for_worktree_roots(&[]);
-    let workspace_2 = db.workspace_for_worktree_roots(&[]);
-    let workspace_3 = db.workspace_for_worktree_roots(&[]);
-    let workspace_4 = db.workspace_for_worktree_roots(&[]);
-    let workspace_5 = db.workspace_for_worktree_roots(&[]);
-    let workspace_6 = db.workspace_for_worktree_roots(&[]);
-    let workspace_7 = db.workspace_for_worktree_roots(&[]);
+    let workspace_1 = db.make_new_workspace();
+    let workspace_2 = db.make_new_workspace();
+    let workspace_3 = db.make_new_workspace();
+    let workspace_4 = db.make_new_workspace();
+    let workspace_5 = db.make_new_workspace();
+    let workspace_6 = db.make_new_workspace();
+    let workspace_7 = db.make_new_workspace();
 
     // Order scrambled + sleeps added because sqlite only has 1 second resolution on
     // their timestamps

crates/db/src/items.rs 🔗

@@ -46,15 +46,8 @@ use super::Db;
 // Items
 // Sidebars
 
+// Things I'm doing: finding about nullability for foreign keys
 pub(crate) const ITEMS_M_1: &str = "
-CREATE TABLE items(
-    workspace_id INTEGER,
-    item_id INTEGER,
-    kind TEXT NOT NULL,
-    PRIMARY KEY (workspace_id, item_id)
-    FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id)
-) STRICT;
-
 CREATE TABLE project_searches(
     workspace_id INTEGER,
     item_id INTEGER,

crates/db/src/kvp.rs 🔗

@@ -4,10 +4,14 @@ use rusqlite::OptionalExtension;
 use super::Db;
 
 pub(crate) const KVP_M_1: &str = "
+BEGIN TRANSACTION; 
+
 CREATE TABLE kv_store(
     key TEXT PRIMARY KEY,
     value TEXT NOT NULL
 ) STRICT;
+
+COMMIT;
 ";
 
 impl Db {

crates/db/src/migrations.rs 🔗

@@ -1,7 +1,7 @@
 use rusqlite_migration::{Migrations, M};
 
 // use crate::items::ITEMS_M_1;
-use crate::{kvp::KVP_M_1, WORKSPACE_M_1};
+use crate::{kvp::KVP_M_1, pane::PANE_M_1, WORKSPACE_M_1};
 
 // This must be ordered by development time! Only ever add new migrations to the end!!
 // Bad things will probably happen if you don't monotonically edit this vec!!!!
@@ -10,6 +10,7 @@ use crate::{kvp::KVP_M_1, WORKSPACE_M_1};
 lazy_static::lazy_static! {
     pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![
         M::up(KVP_M_1),
-        M::up(WORKSPACE_M_1)
+        M::up(WORKSPACE_M_1),
+        M::up(PANE_M_1)
     ]);
 }

crates/db/src/pane.rs 🔗

@@ -1,41 +1,23 @@
 use gpui::Axis;
+use settings::DockAnchor;
 
 use crate::{items::ItemId, workspace::WorkspaceId};
 
 use super::Db;
 
-pub(crate) const PANE_M_1: &str = "
-CREATE TABLE pane_groups(
-    workspace_id INTEGER,
-    group_id INTEGER,
-    axis STRING NOT NULL, -- 'Vertical' / 'Horizontal'
-    PRIMARY KEY (workspace_id, group_id)
-) STRICT;
-
-CREATE TABLE pane_group_children(
-    workspace_id INTEGER,
-    group_id INTEGER,
-    child_pane_id INTEGER,  -- Nullable
-    child_group_id INTEGER, -- Nullable
-    index INTEGER,
-    PRIMARY KEY (workspace_id, group_id)
-) STRICT;
-
-CREATE TABLE pane_items(
-    workspace_id INTEGER,
-    pane_id INTEGER,
-    item_id INTEGER, -- Array
-    index INTEGER,
-    KEY (workspace_id, pane_id)
-) STRICT;
+// We have an many-branched, unbalanced tree with three types:
+// Pane Groups
+// Panes
+// Items
 
-ALTER TABLE WORKSPACE
-ADD THESE COLS:
-center_group INTEGER NOT NULL,
-dock_pane INTEGER NOT NULL,
---    FOREIGN KEY(center_group) REFERENCES pane_groups(group_id)
---    FOREIGN KEY(dock_pane) REFERENCES pane_items(pane_id)
-";
+// The root is always a Pane Group
+// Pane Groups can have 0 (or more) Panes and/or Pane Groups as children
+// Panes can have 0 or more items as children
+// Panes can be their own root
+// Items cannot have children
+// References pointing down is hard (SQL doesn't like arrays)
+// References pointing up is easy (1-1 item / parent relationship) but is harder to query
+//
 
 #[derive(Debug, PartialEq, Eq, Copy, Clone)]
 pub struct PaneId {
@@ -93,6 +75,71 @@ pub struct SerializedPane {
     children: Vec<ItemId>,
 }
 
+pub(crate) const PANE_M_1: &str = "
+BEGIN TRANSACTION;
+
+CREATE TABLE dock_panes(
+    dock_pane_id INTEGER PRIMARY KEY,
+    workspace_id INTEGER NOT NULL,
+    anchor_position TEXT NOT NULL, -- Enum: 'Bottom' / 'Right' / 'Expanded'
+    shown INTEGER NOT NULL, -- Boolean
+    FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
+) STRICT;
+
+CREATE TABLE pane_groups(
+    group_id INTEGER PRIMARY KEY,
+    workspace_id INTEGER NOT NULL,
+    parent_group INTEGER, -- NULL indicates that this is a root node
+    axis TEXT NOT NULL, -- Enum:  'Vertical' / 'Horizontal'
+    FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
+    FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE
+) STRICT;
+
+CREATE TABLE grouped_panes(
+    pane_id INTEGER PRIMARY KEY,
+    workspace_id INTEGER NOT NULL,
+    group_id INTEGER NOT NULL,
+    idx INTEGER NOT NULL,
+    FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
+    FOREIGN KEY(group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
+) STRICT;
+
+CREATE TABLE items(
+    item_id INTEGER PRIMARY KEY,
+    workspace_id INTEGER NOT NULL,
+    kind TEXT NOT NULL,
+    FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
+) STRICT;
+
+CREATE TABLE group_items(
+    workspace_id INTEGER NOT NULL,
+    pane_id INTEGER NOT NULL,
+    item_id INTEGER NOT NULL,
+    idx INTEGER NOT NULL,
+    PRIMARY KEY (workspace_id, pane_id, item_id)
+    FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
+    FOREIGN KEY(pane_id) REFERENCES grouped_panes(pane_id) ON DELETE CASCADE,
+    FOREIGN KEY(item_id) REFERENCES items(item_id) ON DELETE CASCADE
+) STRICT;
+
+CREATE TABLE dock_items(
+    workspace_id INTEGER NOT NULL,
+    dock_pane_id INTEGER NOT NULL,
+    item_id INTEGER NOT NULL,
+    idx INTEGER NOT NULL,
+    PRIMARY KEY (workspace_id, dock_pane_id, item_id)
+    FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
+    FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE,
+    FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE
+) STRICT;
+
+COMMIT;
+";
+
+struct SerializedDockPane {
+    //Cols
+}
+
 impl Db {
     pub(crate) fn get_pane_group(&self, pane_group_id: PaneGroupId) -> SerializedPaneGroup {
         let axis = self.get_pane_group_axis(pane_group_id);
@@ -147,5 +194,7 @@ impl Db {
         unimplemented!();
     }
 
-    pub fn save_pane(&self, pane: SerializedPane) {}
+    fn save_dock_pane() {}
 }
+
+mod tests {}

crates/db/src/workspace.rs 🔗

@@ -13,10 +13,15 @@ use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup};
 
 use super::Db;
 
+// If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging
+// you might want to update some of the parsing code as well, I've left the variations in but commented
+// out
 pub(crate) const WORKSPACE_M_1: &str = "
+BEGIN TRANSACTION; 
+
 CREATE TABLE workspaces(
-    workspace_id INTEGER PRIMARY KEY AUTOINCREMENT,
-    timestamp TEXT DEFAULT CURRENT_TIMESTAMP
+    workspace_id INTEGER PRIMARY KEY,
+    timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL
 ) STRICT;
 
 CREATE TABLE worktree_roots(
@@ -25,16 +30,13 @@ CREATE TABLE worktree_roots(
     FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
     PRIMARY KEY(worktree_root, workspace_id)
 ) STRICT;
+
+COMMIT;
 ";
 
 #[derive(Debug, PartialEq, Eq, Copy, Clone, Default)]
 pub struct WorkspaceId(i64);
 
-struct WorkspaceRow {
-    pub center_group_id: PaneGroupId,
-    pub dock_pane_id: PaneId,
-}
-
 #[derive(Default, Debug)]
 pub struct SerializedWorkspace {
     pub workspace_id: WorkspaceId,
@@ -72,7 +74,7 @@ impl Db {
         }
     }
 
-    fn make_new_workspace(&self) -> SerializedWorkspace {
+    pub fn make_new_workspace(&self) -> SerializedWorkspace {
         self.real()
             .map(|db| {
                 let lock = db.connection.lock();
@@ -140,6 +142,8 @@ impl Db {
 
                 for root in worktree_roots {
                     let path = root.as_ref().as_os_str().as_bytes();
+                    // If you need to debug this, here's the string parsing:
+                    // let path = root.as_ref().to_string_lossy().to_string();
 
                     tx.execute(
                         "INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)",
@@ -162,6 +166,7 @@ impl Db {
             match logic(&mut lock, worktree_roots, workspace_id) {
                 Ok(_) => {}
                 Err(err) => {
+                    dbg!(&err);
                     log::error!(
                         "Failed to update the worktree roots for {:?}, roots: {:?}, error: {}",
                         workspace_id,
@@ -222,6 +227,9 @@ impl Db {
                         .query_map([workspace_id.0], |row| {
                             let row = row.get::<_, Vec<u8>>(0)?;
                             Ok(PathBuf::from(OsStr::from_bytes(&row)).into())
+                            // If you need to debug this, here's the string parsing:
+                            // let row = row.get::<_, String>(0)?;
+                            // Ok(PathBuf::from(row).into())
                         })?
                         .collect::<Result<Vec<_>, rusqlite::Error>>()?;
                     result.push((workspace_id, roots))
@@ -260,6 +268,7 @@ where
     where
         P: AsRef<Path> + Debug,
     {
+        // Short circuit if we can
         if worktree_roots.len() == 0 {
             return Ok(None);
         }
@@ -297,7 +306,7 @@ where
         // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out:
         //  - We start with a join of this table on itself, generating every possible
         //    pair of ((path, ID), (path, ID)), and filtering the join down to just the
-        //    *overlapping but incorrect* workspace IDs. For this small data set,
+        //    *overlapping but non-matching* workspace IDs. For this small data set,
         //    this would look like:
         //
         //    wt1.ID wt1.PATH | wt2.ID wt2.PATH
@@ -349,6 +358,8 @@ where
 
         for i in 0..worktree_roots.len() {
             let path = &worktree_roots[i].as_ref().as_os_str().as_bytes();
+            // If you need to debug this, here's the string parsing:
+            // let path = &worktree_roots[i].as_ref().to_string_lossy().to_string()
             stmt.raw_bind_parameter(i + 1, path)?
         }
         // No -1, because SQLite is 1 based
@@ -402,22 +413,26 @@ mod tests {
 
         assert_eq!(None, db.workspace_id::<String>(&[]));
 
-        db.make_new_workspace();
+        db.make_new_workspace(); //ID 1
+        db.make_new_workspace(); //ID 2
         db.update_worktree_roots(&WorkspaceId(1), &["/tmp", "/tmp2"]);
 
         // Sanity check
-        assert_eq!(Some(WorkspaceId(1)), db.workspace_id(&["/tmp", "/tmp2"]));
+        assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1)));
 
         db.update_worktree_roots::<String>(&WorkspaceId(1), &[]);
 
-        // Make sure DB doesn't consider 'no worktrees' to be a query it can answer
-        assert_eq!(None, db.workspace_id::<String>(&[]));
+        // Make sure 'no worktrees' fails correctly. returning [1, 2] from this
+        // call would be semantically correct (as those are the workspaces that
+        // don't have roots) but I'd prefer that this API to either return exactly one
+        // workspace, and None otherwise
+        assert_eq!(db.workspace_id::<String>(&[]), None,);
 
-        assert_eq!(Some(WorkspaceId(1)), db.last_workspace_id());
+        assert_eq!(db.last_workspace_id(), Some(WorkspaceId(1)));
 
         assert_eq!(
-            &(WorkspaceId(1), vec![]),
-            db.recent_workspaces(1).get(0).unwrap()
+            db.recent_workspaces(2),
+            vec![(WorkspaceId(1), vec![]), (WorkspaceId(2), vec![]),],
         )
     }