Rebase fix + Started writing the real SQL we're going to need

Mikayla Maki created

Change summary

crates/db/Cargo.toml                      |  3 
crates/db/README.md                       |  5 +
crates/db/examples/serialize_workspace.rs | 22 ++++++
crates/db/src/db.rs                       | 56 ++++++++++-----
crates/db/src/migrations.rs               |  3 
crates/db/src/pane.rs                     |  7 +
crates/db/src/workspace.rs                | 90 +++++++++---------------
test.rs                                   |  0 
8 files changed, 107 insertions(+), 79 deletions(-)

Detailed changes

crates/db/Cargo.toml 🔗

@@ -18,8 +18,9 @@ async-trait = "0.1"
 lazy_static = "1.4.0"
 log = { version = "0.4.16", features = ["kv_unstable_serde"] }
 parking_lot = "0.11.1"
-rusqlite = { version = "0.28.0", features = ["bundled", "serde_json"] }
+rusqlite = { version = "0.28.0", features = ["bundled", "serde_json", "backup"] }
 rusqlite_migration = { git = "https://github.com/cljoly/rusqlite_migration", rev = "c433555d7c1b41b103426e35756eb3144d0ebbc6" }
+>>>>>>> dd2ddc5e3 (Started writing the real SQL we're going to need)
 serde = { workspace = true }
 serde_rusqlite = "0.31.0"
 

crates/db/README.md 🔗

@@ -0,0 +1,5 @@
+# Building Queries
+
+First, craft your test data. The examples folder shows a template for building a test-db, and can be ran with `cargo run --example [your-example]`.
+
+To actually use and test your queries, import the generated DB file into https://sqliteonline.com/

crates/db/examples/serialize_workspace.rs 🔗

@@ -0,0 +1,22 @@
+use std::{fs::File, path::Path};
+
+const TEST_FILE: &'static str = "test-db.db";
+
+fn main() -> anyhow::Result<()> {
+    let db = db::Db::open_in_memory();
+    if db.real().is_none() {
+        return Err(anyhow::anyhow!("Migrations failed"));
+    }
+    let file = Path::new(TEST_FILE);
+
+    let f = File::create(file)?;
+    drop(f);
+
+    db.write_kvp("test", "1")?;
+    db.write_kvp("test", "2")?;
+    db.write_to(file).ok();
+
+    println!("Wrote database!");
+
+    Ok(())
+}

crates/db/src/db.rs 🔗

@@ -11,7 +11,7 @@ use std::sync::Arc;
 use anyhow::Result;
 use log::error;
 use parking_lot::Mutex;
-use rusqlite::Connection;
+use rusqlite::{backup, Connection};
 
 use migrations::MIGRATIONS;
 pub use workspace::*;
@@ -54,8 +54,29 @@ impl Db {
             })
     }
 
+    fn initialize(mut conn: Connection) -> Result<Mutex<Connection>> {
+        MIGRATIONS.to_latest(&mut conn)?;
+
+        conn.pragma_update(None, "journal_mode", "WAL")?;
+        conn.pragma_update(None, "synchronous", "NORMAL")?;
+        conn.pragma_update(None, "foreign_keys", true)?;
+        conn.pragma_update(None, "case_sensitive_like", true)?;
+
+        Ok(Mutex::new(conn))
+    }
+
+    pub fn persisting(&self) -> bool {
+        self.real().and_then(|db| db.path.as_ref()).is_some()
+    }
+
+    pub fn real(&self) -> Option<&RealDb> {
+        match self {
+            Db::Real(db) => Some(&db),
+            _ => None,
+        }
+    }
+
     /// Open a in memory database for testing and as a fallback.
-    #[cfg(any(test, feature = "test-support"))]
     pub fn open_in_memory() -> Self {
         Connection::open_in_memory()
             .map_err(Into::into)
@@ -75,26 +96,21 @@ impl Db {
             })
     }
 
-    fn initialize(mut conn: Connection) -> Result<Mutex<Connection>> {
-        MIGRATIONS.to_latest(&mut conn)?;
-
-        conn.pragma_update(None, "journal_mode", "WAL")?;
-        conn.pragma_update(None, "synchronous", "NORMAL")?;
-        conn.pragma_update(None, "foreign_keys", true)?;
-        conn.pragma_update(None, "case_sensitive_like", true)?;
-
-        Ok(Mutex::new(conn))
-    }
+    pub fn write_to<P: AsRef<Path>>(&self, dest: P) -> Result<()> {
+        self.real()
+            .map(|db| {
+                if db.path.is_some() {
+                    panic!("DB already exists");
+                }
 
-    pub fn persisting(&self) -> bool {
-        self.real().and_then(|db| db.path.as_ref()).is_some()
-    }
+                let lock = db.connection.lock();
+                let mut dst = Connection::open(dest)?;
+                let backup = backup::Backup::new(&lock, &mut dst)?;
+                backup.step(-1)?;
 
-    pub fn real(&self) -> Option<&RealDb> {
-        match self {
-            Db::Real(db) => Some(&db),
-            _ => None,
-        }
+                Ok(())
+            })
+            .unwrap_or(Ok(()))
     }
 }
 

crates/db/src/migrations.rs 🔗

@@ -1,7 +1,7 @@
 use rusqlite_migration::{Migrations, M};
 
 // use crate::items::ITEMS_M_1;
-use crate::kvp::KVP_M_1;
+use crate::{kvp::KVP_M_1, WORKSPACE_M_1};
 
 // This must be ordered by development time! Only ever add new migrations to the end!!
 // Bad things will probably happen if you don't monotonically edit this vec!!!!
@@ -10,5 +10,6 @@ use crate::kvp::KVP_M_1;
 lazy_static::lazy_static! {
     pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![
         M::up(KVP_M_1),
+        M::up(WORKSPACE_M_1)
     ]);
 }

crates/db/src/pane.rs 🔗

@@ -28,6 +28,13 @@ CREATE TABLE pane_items(
     index INTEGER,
     KEY (workspace_id, pane_id)
 ) STRICT;
+
+ALTER TABLE WORKSPACE
+ADD THESE COLS:
+center_group INTEGER NOT NULL,
+dock_pane INTEGER NOT NULL,
+--    FOREIGN KEY(center_group) REFERENCES pane_groups(group_id)
+--    FOREIGN KEY(dock_pane) REFERENCES pane_items(pane_id)
 ";
 
 #[derive(Debug, PartialEq, Eq, Copy, Clone)]

crates/db/src/workspace.rs 🔗

@@ -1,7 +1,6 @@
-use std::{
-    path::{Path, PathBuf},
-    sync::Arc,
-};
+use anyhow::Result;
+
+use std::{path::Path, sync::Arc};
 
 use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup};
 
@@ -9,18 +8,15 @@ use super::Db;
 
 pub(crate) const WORKSPACE_M_1: &str = "
 CREATE TABLE workspaces(
-    workspace_id INTEGER PRIMARY KEY,
-    center_group INTEGER NOT NULL,
-    dock_pane INTEGER NOT NULL,
-    timestamp INTEGER,
-    FOREIGN KEY(center_group) REFERENCES pane_groups(group_id)
-    FOREIGN KEY(dock_pane) REFERENCES pane_items(pane_id)
+    workspace_id INTEGER PRIMARY KEY AUTOINCREMENT,
+    timestamp TEXT DEFAULT CURRENT_TIMESTAMP,
+    dummy_data INTEGER
 ) STRICT;
 
 CREATE TABLE worktree_roots(
     worktree_root BLOB NOT NULL,
     workspace_id INTEGER NOT NULL,
-    FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id)
+    FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
 ) STRICT;
 ";
 
@@ -35,18 +31,19 @@ CREATE TABLE worktree_roots(
 //      Case 4: Starting Zed with multiple project folders
 //          > Zed ~/projects/Zed ~/projects/Zed.dev
 
-#[derive(Debug, PartialEq, Eq, Copy, Clone)]
-pub struct WorkspaceId(usize);
+#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)]
+pub struct WorkspaceId(i64);
 
 struct WorkspaceRow {
     pub center_group_id: PaneGroupId,
     pub dock_pane_id: PaneId,
 }
 
+#[derive(Default)]
 pub struct SerializedWorkspace {
     pub workspace_id: WorkspaceId,
-    pub center_group: SerializedPaneGroup,
-    pub dock_pane: Option<SerializedPane>,
+    // pub center_group: SerializedPaneGroup,
+    // pub dock_pane: Option<SerializedPane>,
 }
 
 impl Db {
@@ -58,28 +55,33 @@ impl Db {
     ) -> SerializedWorkspace {
         // Find the workspace id which is uniquely identified by this set of paths return it if found
         if let Some(workspace_id) = self.workspace_id(worktree_roots) {
-            let workspace_row = self.get_workspace_row(workspace_id);
-            let center_group = self.get_pane_group(workspace_row.center_group_id);
-            let dock_pane = self.get_pane(workspace_row.dock_pane_id);
+            // TODO
+            // let workspace_row = self.get_workspace_row(workspace_id);
+            // let center_group = self.get_pane_group(workspace_row.center_group_id);
+            // let dock_pane = self.get_pane(workspace_row.dock_pane_id);
 
             SerializedWorkspace {
                 workspace_id,
-                center_group,
-                dock_pane: Some(dock_pane),
+                // center_group,
+                // dock_pane: Some(dock_pane),
             }
         } else {
-            let workspace_id = self.get_next_workspace_id();
-
-            SerializedWorkspace {
-                workspace_id,
-                center_group: SerializedPaneGroup::empty_root(workspace_id),
-                dock_pane: None,
-            }
+            self.make_new_workspace()
         }
     }
 
-    fn get_next_workspace_id(&self) -> WorkspaceId {
-        unimplemented!()
+    fn make_new_workspace(&self) -> SerializedWorkspace {
+        self.real()
+            .map(|db| {
+                let lock = db.connection.lock();
+                match lock.execute("INSERT INTO workspaces(dummy_data) VALUES(1);", []) {
+                    Ok(_) => SerializedWorkspace {
+                        workspace_id: WorkspaceId(lock.last_insert_rowid()),
+                    },
+                    Err(_) => Default::default(),
+                }
+            })
+            .unwrap_or_default()
     }
 
     fn workspace_id(&self, worktree_roots: &[Arc<Path>]) -> Option<WorkspaceId> {
@@ -128,6 +130,7 @@ mod tests {
         PathBuf::from(path).into()
     }
 
+    #[test]
     fn test_detect_workspace_id() {
         let data = &[
             (WorkspaceId(1), vec![arc_path("/tmp")]),
@@ -160,6 +163,7 @@ mod tests {
         );
     }
 
+    #[test]
     fn test_tricky_overlapping_updates() {
         // DB state:
         // (/tree) -> ID: 1
@@ -202,31 +206,3 @@ mod tests {
         assert_eq!(recent_workspaces.get(2).unwrap().0, WorkspaceId(1));
     }
 }
-
-// [/tmp, /tmp2] -> ID1?
-// [/tmp] -> ID2?
-
-/*
-path | id
-/tmp   ID1
-/tmp   ID2
-/tmp2  ID1
-
-
-SELECT id
-FROM workspace_ids
-WHERE path IN (path1, path2)
-INTERSECT
-SELECT id
-FROM workspace_ids
-WHERE path = path_2
-... and etc. for each element in path array
-
-If contains row, yay! If not,
-SELECT max(id) FROm workspace_ids
-
-Select id WHERE path IN paths
-
-SELECT MAX(id)
-
-*/