Merge pull request #1773 from zed-industries/rusqlite

Kay Simmons created

Swap to sqlite for client persistence

Change summary

Cargo.lock                            |  59 +++++++
crates/auto_update/src/auto_update.rs |  15 
crates/client/src/client.rs           |   2 
crates/client/src/telemetry.rs        |  12 -
crates/db/Cargo.toml                  |   7 
crates/db/src/db.rs                   | 220 ++++++++++----------------
crates/db/src/items.rs                | 236 +++++++++++++++++++++++++++++
crates/db/src/kvp.rs                  |  82 ++++++++++
crates/db/src/migrations.rs           |  15 +
crates/zed/src/main.rs                |   8 
crates/zed/src/paths.rs               |   1 
crates/zed/src/zed.rs                 |   1 
12 files changed, 501 insertions(+), 157 deletions(-)

Detailed changes

Cargo.lock 🔗

@@ -1602,8 +1602,13 @@ dependencies = [
  "async-trait",
  "collections",
  "gpui",
+ "lazy_static",
+ "log",
  "parking_lot 0.11.2",
- "rocksdb",
+ "rusqlite",
+ "rusqlite_migration",
+ "serde",
+ "serde_rusqlite",
  "tempdir",
 ]
 
@@ -1926,6 +1931,12 @@ version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7"
 
+[[package]]
+name = "fallible-streaming-iterator"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
+
 [[package]]
 name = "fastrand"
 version = "1.8.0"
@@ -3134,6 +3145,17 @@ dependencies = [
  "zstd-sys",
 ]
 
+[[package]]
+name = "libsqlite3-sys"
+version = "0.25.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9f0455f2c1bc9a7caa792907026e469c1d91761fb0ea37cbb16427c77280cf35"
+dependencies = [
+ "cc",
+ "pkg-config",
+ "vcpkg",
+]
+
 [[package]]
 name = "libz-sys"
 version = "1.1.8"
@@ -4827,6 +4849,31 @@ dependencies = [
  "zeroize",
 ]
 
+[[package]]
+name = "rusqlite"
+version = "0.28.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a"
+dependencies = [
+ "bitflags",
+ "fallible-iterator",
+ "fallible-streaming-iterator",
+ "hashlink",
+ "libsqlite3-sys",
+ "serde_json",
+ "smallvec",
+]
+
+[[package]]
+name = "rusqlite_migration"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eda44233be97aea786691f9f6f7ef230bcf905061f4012e90f4f39e6dcf31163"
+dependencies = [
+ "log",
+ "rusqlite",
+]
+
 [[package]]
 name = "rust-embed"
 version = "6.4.1"
@@ -5222,6 +5269,16 @@ dependencies = [
  "syn",
 ]
 
+[[package]]
+name = "serde_rusqlite"
+version = "0.31.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "538b51f10ee271375cbd9caa04fa6e3e50af431a21db97caae48da92a074244a"
+dependencies = [
+ "rusqlite",
+ "serde",
+]
+
 [[package]]
 name = "serde_urlencoded"
 version = "0.7.1"

crates/auto_update/src/auto_update.rs 🔗

@@ -40,7 +40,7 @@ pub struct AutoUpdater {
     current_version: AppVersion,
     http_client: Arc<dyn HttpClient>,
     pending_poll: Option<Task<()>>,
-    db: Arc<project::Db>,
+    db: project::Db,
     server_url: String,
 }
 
@@ -55,7 +55,7 @@ impl Entity for AutoUpdater {
 }
 
 pub fn init(
-    db: Arc<project::Db>,
+    db: project::Db,
     http_client: Arc<dyn HttpClient>,
     server_url: String,
     cx: &mut MutableAppContext,
@@ -116,7 +116,7 @@ impl AutoUpdater {
 
     fn new(
         current_version: AppVersion,
-        db: Arc<project::Db>,
+        db: project::Db,
         http_client: Arc<dyn HttpClient>,
         server_url: String,
     ) -> Self {
@@ -283,9 +283,9 @@ impl AutoUpdater {
         let db = self.db.clone();
         cx.background().spawn(async move {
             if should_show {
-                db.write([(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY, "")])?;
+                db.write_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY, "")?;
             } else {
-                db.delete([(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)])?;
+                db.delete_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?;
             }
             Ok(())
         })
@@ -293,8 +293,7 @@ impl AutoUpdater {
 
     fn should_show_update_notification(&self, cx: &AppContext) -> Task<Result<bool>> {
         let db = self.db.clone();
-        cx.background().spawn(async move {
-            Ok(db.read([(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)])?[0].is_some())
-        })
+        cx.background()
+            .spawn(async move { Ok(db.read_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?.is_some()) })
     }
 }

crates/client/src/client.rs 🔗

@@ -1142,7 +1142,7 @@ impl Client {
         self.peer.respond_with_error(receipt, error)
     }
 
-    pub fn start_telemetry(&self, db: Arc<Db>) {
+    pub fn start_telemetry(&self, db: Db) {
         self.telemetry.start(db);
     }
 

crates/client/src/telemetry.rs 🔗

@@ -135,22 +135,16 @@ impl Telemetry {
         Some(self.state.lock().log_file.as_ref()?.path().to_path_buf())
     }
 
-    pub fn start(self: &Arc<Self>, db: Arc<Db>) {
+    pub fn start(self: &Arc<Self>, db: Db) {
         let this = self.clone();
         self.executor
             .spawn(
                 async move {
-                    let device_id = if let Some(device_id) = db
-                        .read(["device_id"])?
-                        .into_iter()
-                        .flatten()
-                        .next()
-                        .and_then(|bytes| String::from_utf8(bytes).ok())
-                    {
+                    let device_id = if let Ok(Some(device_id)) = db.read_kvp("device_id") {
                         device_id
                     } else {
                         let device_id = Uuid::new_v4().to_string();
-                        db.write([("device_id", device_id.as_bytes())])?;
+                        db.write_kvp("device_id", &device_id)?;
                         device_id
                     };
 

crates/db/Cargo.toml 🔗

@@ -14,8 +14,13 @@ test-support = []
 collections = { path = "../collections" }
 anyhow = "1.0.57"
 async-trait = "0.1"
+lazy_static = "1.4.0"
+log = { version = "0.4.16", features = ["kv_unstable_serde"] }
 parking_lot = "0.11.1"
-rocksdb = "0.18"
+rusqlite = { version = "0.28.0", features = ["bundled", "serde_json"] }
+rusqlite_migration = "1.0.0"
+serde = { workspace = true }
+serde_rusqlite = "0.31.0"
 
 [dev-dependencies]
 gpui = { path = "../gpui", features = ["test-support"] }

crates/db/src/db.rs 🔗

@@ -1,161 +1,119 @@
-use anyhow::Result;
-use std::path::Path;
+mod kvp;
+mod migrations;
+
+use std::fs;
+use std::path::{Path, PathBuf};
 use std::sync::Arc;
 
-pub struct Db(DbStore);
+use anyhow::Result;
+use log::error;
+use parking_lot::Mutex;
+use rusqlite::Connection;
+
+use migrations::MIGRATIONS;
 
-enum DbStore {
+#[derive(Clone)]
+pub enum Db {
+    Real(Arc<RealDb>),
     Null,
-    Real(rocksdb::DB),
+}
 
-    #[cfg(any(test, feature = "test-support"))]
-    Fake {
-        data: parking_lot::Mutex<collections::HashMap<Vec<u8>, Vec<u8>>>,
-    },
+pub struct RealDb {
+    connection: Mutex<Connection>,
+    path: Option<PathBuf>,
 }
 
 impl Db {
-    /// Open or create a database at the given file path.
-    pub fn open(path: &Path) -> Result<Arc<Self>> {
-        let db = rocksdb::DB::open_default(path)?;
-        Ok(Arc::new(Self(DbStore::Real(db))))
+    /// Open or create a database at the given directory path.
+    pub fn open(db_dir: &Path) -> Self {
+        // Use 0 for now. Will implement incrementing and clearing of old db files soon TM
+        let current_db_dir = db_dir.join(Path::new("0"));
+        fs::create_dir_all(&current_db_dir)
+            .expect("Should be able to create the database directory");
+        let db_path = current_db_dir.join(Path::new("db.sqlite"));
+
+        Connection::open(db_path)
+            .map_err(Into::into)
+            .and_then(|connection| Self::initialize(connection))
+            .map(|connection| {
+                Db::Real(Arc::new(RealDb {
+                    connection,
+                    path: Some(db_dir.to_path_buf()),
+                }))
+            })
+            .unwrap_or_else(|e| {
+                error!(
+                    "Connecting to file backed db failed. Reverting to null db. {}",
+                    e
+                );
+                Self::Null
+            })
     }
 
-    /// Open a null database that stores no data, for use as a fallback
-    /// when there is an error opening the real database.
-    pub fn null() -> Arc<Self> {
-        Arc::new(Self(DbStore::Null))
-    }
-
-    /// Open a fake database for testing.
+    /// Open a in memory database for testing and as a fallback.
     #[cfg(any(test, feature = "test-support"))]
-    pub fn open_fake() -> Arc<Self> {
-        Arc::new(Self(DbStore::Fake {
-            data: Default::default(),
-        }))
+    pub fn open_in_memory() -> Self {
+        Connection::open_in_memory()
+            .map_err(Into::into)
+            .and_then(|connection| Self::initialize(connection))
+            .map(|connection| {
+                Db::Real(Arc::new(RealDb {
+                    connection,
+                    path: None,
+                }))
+            })
+            .unwrap_or_else(|e| {
+                error!(
+                    "Connecting to in memory db failed. Reverting to null db. {}",
+                    e
+                );
+                Self::Null
+            })
     }
 
-    pub fn read<K, I>(&self, keys: I) -> Result<Vec<Option<Vec<u8>>>>
-    where
-        K: AsRef<[u8]>,
-        I: IntoIterator<Item = K>,
-    {
-        match &self.0 {
-            DbStore::Real(db) => db
-                .multi_get(keys)
-                .into_iter()
-                .map(|e| e.map_err(Into::into))
-                .collect(),
-
-            DbStore::Null => Ok(keys.into_iter().map(|_| None).collect()),
-
-            #[cfg(any(test, feature = "test-support"))]
-            DbStore::Fake { data: db } => {
-                let db = db.lock();
-                Ok(keys
-                    .into_iter()
-                    .map(|key| db.get(key.as_ref()).cloned())
-                    .collect())
-            }
-        }
-    }
+    fn initialize(mut conn: Connection) -> Result<Mutex<Connection>> {
+        MIGRATIONS.to_latest(&mut conn)?;
 
-    pub fn delete<K, I>(&self, keys: I) -> Result<()>
-    where
-        K: AsRef<[u8]>,
-        I: IntoIterator<Item = K>,
-    {
-        match &self.0 {
-            DbStore::Real(db) => {
-                let mut batch = rocksdb::WriteBatch::default();
-                for key in keys {
-                    batch.delete(key);
-                }
-                db.write(batch)?;
-            }
+        conn.pragma_update(None, "journal_mode", "WAL")?;
+        conn.pragma_update(None, "synchronous", "NORMAL")?;
+        conn.pragma_update(None, "foreign_keys", true)?;
+        conn.pragma_update(None, "case_sensitive_like", true)?;
 
-            DbStore::Null => {}
+        Ok(Mutex::new(conn))
+    }
 
-            #[cfg(any(test, feature = "test-support"))]
-            DbStore::Fake { data: db } => {
-                let mut db = db.lock();
-                for key in keys {
-                    db.remove(key.as_ref());
-                }
-            }
-        }
-        Ok(())
+    pub fn persisting(&self) -> bool {
+        self.real().and_then(|db| db.path.as_ref()).is_some()
     }
 
-    pub fn write<K, V, I>(&self, entries: I) -> Result<()>
-    where
-        K: AsRef<[u8]>,
-        V: AsRef<[u8]>,
-        I: IntoIterator<Item = (K, V)>,
-    {
-        match &self.0 {
-            DbStore::Real(db) => {
-                let mut batch = rocksdb::WriteBatch::default();
-                for (key, value) in entries {
-                    batch.put(key, value);
-                }
-                db.write(batch)?;
-            }
+    pub fn real(&self) -> Option<&RealDb> {
+        match self {
+            Db::Real(db) => Some(&db),
+            _ => None,
+        }
+    }
+}
 
-            DbStore::Null => {}
+impl Drop for Db {
+    fn drop(&mut self) {
+        match self {
+            Db::Real(real_db) => {
+                let lock = real_db.connection.lock();
 
-            #[cfg(any(test, feature = "test-support"))]
-            DbStore::Fake { data: db } => {
-                let mut db = db.lock();
-                for (key, value) in entries {
-                    db.insert(key.as_ref().into(), value.as_ref().into());
-                }
+                let _ = lock.pragma_update(None, "analysis_limit", "500");
+                let _ = lock.pragma_update(None, "optimize", "");
             }
+            Db::Null => {}
         }
-        Ok(())
     }
 }
 
 #[cfg(test)]
 mod tests {
-    use super::*;
-    use tempdir::TempDir;
-
-    #[gpui::test]
-    fn test_db() {
-        let dir = TempDir::new("db-test").unwrap();
-        let fake_db = Db::open_fake();
-        let real_db = Db::open(&dir.path().join("test.db")).unwrap();
-
-        for db in [&real_db, &fake_db] {
-            assert_eq!(
-                db.read(["key-1", "key-2", "key-3"]).unwrap(),
-                &[None, None, None]
-            );
-
-            db.write([("key-1", "one"), ("key-3", "three")]).unwrap();
-            assert_eq!(
-                db.read(["key-1", "key-2", "key-3"]).unwrap(),
-                &[
-                    Some("one".as_bytes().to_vec()),
-                    None,
-                    Some("three".as_bytes().to_vec())
-                ]
-            );
-
-            db.delete(["key-3", "key-4"]).unwrap();
-            assert_eq!(
-                db.read(["key-1", "key-2", "key-3"]).unwrap(),
-                &[Some("one".as_bytes().to_vec()), None, None,]
-            );
-        }
-
-        drop(real_db);
+    use crate::migrations::MIGRATIONS;
 
-        let real_db = Db::open(&dir.path().join("test.db")).unwrap();
-        assert_eq!(
-            real_db.read(["key-1", "key-2", "key-3"]).unwrap(),
-            &[Some("one".as_bytes().to_vec()), None, None,]
-        );
+    #[test]
+    fn test_migrations() {
+        assert!(MIGRATIONS.validate().is_ok());
     }
 }

crates/db/src/items.rs 🔗

@@ -0,0 +1,236 @@
+use std::{ffi::OsStr, os::unix::prelude::OsStrExt, path::PathBuf, sync::Arc};
+
+use anyhow::Result;
+use rusqlite::{
+    named_params, params,
+    types::{FromSql, FromSqlError, FromSqlResult, ValueRef},
+};
+
+use super::Db;
+
+pub(crate) const ITEMS_M_1: &str = "
+CREATE TABLE items(
+    id INTEGER PRIMARY KEY,
+    kind TEXT
+) STRICT;
+CREATE TABLE item_path(
+    item_id INTEGER PRIMARY KEY,
+    path BLOB
+) STRICT;
+CREATE TABLE item_query(
+    item_id INTEGER PRIMARY KEY,
+    query TEXT
+) STRICT;
+";
+
+#[derive(PartialEq, Eq, Hash, Debug)]
+pub enum SerializedItemKind {
+    Editor,
+    Terminal,
+    ProjectSearch,
+    Diagnostics,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum SerializedItem {
+    Editor(usize, PathBuf),
+    Terminal(usize),
+    ProjectSearch(usize, String),
+    Diagnostics(usize),
+}
+
+impl FromSql for SerializedItemKind {
+    fn column_result(value: ValueRef<'_>) -> FromSqlResult<Self> {
+        match value {
+            ValueRef::Null => Err(FromSqlError::InvalidType),
+            ValueRef::Integer(_) => Err(FromSqlError::InvalidType),
+            ValueRef::Real(_) => Err(FromSqlError::InvalidType),
+            ValueRef::Text(bytes) => {
+                let str = std::str::from_utf8(bytes).map_err(|_| FromSqlError::InvalidType)?;
+                match str {
+                    "Editor" => Ok(SerializedItemKind::Editor),
+                    "Terminal" => Ok(SerializedItemKind::Terminal),
+                    "ProjectSearch" => Ok(SerializedItemKind::ProjectSearch),
+                    "Diagnostics" => Ok(SerializedItemKind::Diagnostics),
+                    _ => Err(FromSqlError::InvalidType),
+                }
+            }
+            ValueRef::Blob(_) => Err(FromSqlError::InvalidType),
+        }
+    }
+}
+
+impl SerializedItem {
+    fn kind(&self) -> SerializedItemKind {
+        match self {
+            SerializedItem::Editor(_, _) => SerializedItemKind::Editor,
+            SerializedItem::Terminal(_) => SerializedItemKind::Terminal,
+            SerializedItem::ProjectSearch(_, _) => SerializedItemKind::ProjectSearch,
+            SerializedItem::Diagnostics(_) => SerializedItemKind::Diagnostics,
+        }
+    }
+
+    fn id(&self) -> usize {
+        match self {
+            SerializedItem::Editor(id, _)
+            | SerializedItem::Terminal(id)
+            | SerializedItem::ProjectSearch(id, _)
+            | SerializedItem::Diagnostics(id) => *id,
+        }
+    }
+}
+
+impl Db {
+    fn write_item(&self, serialized_item: SerializedItem) -> Result<()> {
+        let mut lock = self.connection.lock();
+        let tx = lock.transaction()?;
+
+        // Serialize the item
+        let id = serialized_item.id();
+        {
+            let kind = format!("{:?}", serialized_item.kind());
+
+            let mut stmt =
+                tx.prepare_cached("INSERT OR REPLACE INTO items(id, kind) VALUES ((?), (?))")?;
+
+            stmt.execute(params![id, kind])?;
+        }
+
+        // Serialize item data
+        match &serialized_item {
+            SerializedItem::Editor(_, path) => {
+                let mut stmt = tx.prepare_cached(
+                    "INSERT OR REPLACE INTO item_path(item_id, path) VALUES ((?), (?))",
+                )?;
+
+                let path_bytes = path.as_os_str().as_bytes();
+                stmt.execute(params![id, path_bytes])?;
+            }
+            SerializedItem::ProjectSearch(_, query) => {
+                let mut stmt = tx.prepare_cached(
+                    "INSERT OR REPLACE INTO item_query(item_id, query) VALUES ((?), (?))",
+                )?;
+
+                stmt.execute(params![id, query])?;
+            }
+            _ => {}
+        }
+
+        tx.commit()?;
+
+        Ok(())
+    }
+
+    fn delete_item(&self, item_id: usize) -> Result<()> {
+        let lock = self.connection.lock();
+
+        let mut stmt = lock.prepare_cached(
+            "
+            DELETE FROM items WHERE id = (:id);
+            DELETE FROM item_path WHERE id = (:id);
+            DELETE FROM item_query WHERE id = (:id);
+        ",
+        )?;
+
+        stmt.execute(named_params! {":id": item_id})?;
+
+        Ok(())
+    }
+
+    fn take_items(&self) -> Result<Vec<SerializedItem>> {
+        let mut lock = self.connection.lock();
+
+        let tx = lock.transaction()?;
+
+        // When working with transactions in rusqlite, need to make this kind of scope
+        // To make the borrow stuff work correctly. Don't know why, rust is wild.
+        let result = {
+            let mut read_stmt = tx.prepare_cached(
+                "
+                    SELECT items.id, items.kind, item_path.path, item_query.query
+                    FROM items
+                    LEFT JOIN item_path
+                        ON items.id = item_path.item_id
+                    LEFT JOIN item_query
+                        ON items.id = item_query.item_id
+                    ORDER BY items.id
+            ",
+            )?;
+
+            let result = read_stmt
+                .query_map([], |row| {
+                    let id: usize = row.get(0)?;
+                    let kind: SerializedItemKind = row.get(1)?;
+
+                    match kind {
+                        SerializedItemKind::Editor => {
+                            let buf: Vec<u8> = row.get(2)?;
+                            let path: PathBuf = OsStr::from_bytes(&buf).into();
+
+                            Ok(SerializedItem::Editor(id, path))
+                        }
+                        SerializedItemKind::Terminal => Ok(SerializedItem::Terminal(id)),
+                        SerializedItemKind::ProjectSearch => {
+                            let query: Arc<str> = row.get(3)?;
+                            Ok(SerializedItem::ProjectSearch(id, query.to_string()))
+                        }
+                        SerializedItemKind::Diagnostics => Ok(SerializedItem::Diagnostics(id)),
+                    }
+                })?
+                .collect::<Result<Vec<SerializedItem>, rusqlite::Error>>()?;
+
+            let mut delete_stmt = tx.prepare_cached(
+                "DELETE FROM items;
+                DELETE FROM item_path;
+                DELETE FROM item_query;",
+            )?;
+
+            delete_stmt.execute([])?;
+
+            result
+        };
+
+        tx.commit()?;
+
+        Ok(result)
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use anyhow::Result;
+
+    use super::*;
+
+    #[test]
+    fn test_items_round_trip() -> Result<()> {
+        let db = Db::open_in_memory()?;
+
+        let mut items = vec![
+            SerializedItem::Editor(0, PathBuf::from("/tmp/test.txt")),
+            SerializedItem::Terminal(1),
+            SerializedItem::ProjectSearch(2, "Test query!".to_string()),
+            SerializedItem::Diagnostics(3),
+        ];
+
+        for item in items.iter() {
+            db.write_item(item.clone())?;
+        }
+
+        assert_eq!(items, db.take_items()?);
+
+        // Check that it's empty, as expected
+        assert_eq!(Vec::<SerializedItem>::new(), db.take_items()?);
+
+        for item in items.iter() {
+            db.write_item(item.clone())?;
+        }
+
+        items.remove(2);
+        db.delete_item(2)?;
+
+        assert_eq!(items, db.take_items()?);
+
+        Ok(())
+    }
+}

crates/db/src/kvp.rs 🔗

@@ -0,0 +1,82 @@
+use anyhow::Result;
+use rusqlite::OptionalExtension;
+
+use super::Db;
+
+pub(crate) const KVP_M_1_UP: &str = "
+CREATE TABLE kv_store(
+    key TEXT PRIMARY KEY,
+    value TEXT NOT NULL
+) STRICT;
+";
+
+impl Db {
+    pub fn read_kvp(&self, key: &str) -> Result<Option<String>> {
+        self.real()
+            .map(|db| {
+                let lock = db.connection.lock();
+                let mut stmt = lock.prepare_cached("SELECT value FROM kv_store WHERE key = (?)")?;
+
+                Ok(stmt.query_row([key], |row| row.get(0)).optional()?)
+            })
+            .unwrap_or(Ok(None))
+    }
+
+    pub fn write_kvp(&self, key: &str, value: &str) -> Result<()> {
+        self.real()
+            .map(|db| {
+                let lock = db.connection.lock();
+
+                let mut stmt = lock.prepare_cached(
+                    "INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))",
+                )?;
+
+                stmt.execute([key, value])?;
+
+                Ok(())
+            })
+            .unwrap_or(Ok(()))
+    }
+
+    pub fn delete_kvp(&self, key: &str) -> Result<()> {
+        self.real()
+            .map(|db| {
+                let lock = db.connection.lock();
+
+                let mut stmt = lock.prepare_cached("DELETE FROM kv_store WHERE key = (?)")?;
+
+                stmt.execute([key])?;
+
+                Ok(())
+            })
+            .unwrap_or(Ok(()))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use anyhow::Result;
+
+    use super::*;
+
+    #[test]
+    fn test_kvp() -> Result<()> {
+        let db = Db::open_in_memory();
+
+        assert_eq!(db.read_kvp("key-1")?, None);
+
+        db.write_kvp("key-1", "one")?;
+        assert_eq!(db.read_kvp("key-1")?, Some("one".to_string()));
+
+        db.write_kvp("key-1", "one-2")?;
+        assert_eq!(db.read_kvp("key-1")?, Some("one-2".to_string()));
+
+        db.write_kvp("key-2", "two")?;
+        assert_eq!(db.read_kvp("key-2")?, Some("two".to_string()));
+
+        db.delete_kvp("key-1")?;
+        assert_eq!(db.read_kvp("key-1")?, None);
+
+        Ok(())
+    }
+}

crates/db/src/migrations.rs 🔗

@@ -0,0 +1,15 @@
+use rusqlite_migration::{Migrations, M};
+
+// use crate::items::ITEMS_M_1;
+use crate::kvp::KVP_M_1_UP;
+
+// This must be ordered by development time! Only ever add new migrations to the end!!
+// Bad things will probably happen if you don't monotonically edit this vec!!!!
+// And no re-ordering ever!!!!!!!!!! The results of these migrations are on the user's
+// file system and so everything we do here is locked in _f_o_r_e_v_e_r_.
+lazy_static::lazy_static! {
+    pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![
+        M::up(KVP_M_1_UP),
+        // M::up(ITEMS_M_1),
+    ]);
+}

crates/zed/src/main.rs 🔗

@@ -52,11 +52,9 @@ fn main() {
         .or_else(|| app.platform().app_version().ok())
         .map_or("dev".to_string(), |v| v.to_string());
     init_panic_hook(app_version, http.clone(), app.background());
-    let db = app.background().spawn(async move {
-        project::Db::open(&*zed::paths::DB)
-            .log_err()
-            .unwrap_or_else(project::Db::null)
-    });
+    let db = app
+        .background()
+        .spawn(async move { project::Db::open(&*zed::paths::DB_DIR) });
 
     load_embedded_fonts(&app);
 

crates/zed/src/paths.rs 🔗

@@ -6,7 +6,6 @@ lazy_static::lazy_static! {
     pub static ref LOGS_DIR: PathBuf = HOME.join("Library/Logs/Zed");
     pub static ref LANGUAGES_DIR: PathBuf = HOME.join("Library/Application Support/Zed/languages");
     pub static ref DB_DIR: PathBuf = HOME.join("Library/Application Support/Zed/db");
-    pub static ref DB: PathBuf = DB_DIR.join("zed.db");
     pub static ref SETTINGS: PathBuf = CONFIG_DIR.join("settings.json");
     pub static ref KEYMAP: PathBuf = CONFIG_DIR.join("keymap.json");
     pub static ref LAST_USERNAME: PathBuf = CONFIG_DIR.join("last-username.txt");

crates/zed/src/zed.rs 🔗

@@ -63,6 +63,7 @@ actions!(
         DecreaseBufferFontSize,
         ResetBufferFontSize,
         InstallCommandLineInterface,
+        ResetDatabase,
     ]
 );