Address some issues with the sqlez_macros

Kay Simmons created

Change summary

Cargo.lock                                 |   1 
crates/db/src/db.rs                        |  34 ++---
crates/db/src/kvp.rs                       |  28 +---
crates/editor/src/items.rs                 |  40 ++++---
crates/editor/src/persistence.rs           |   3 
crates/sqlez/src/thread_safe_connection.rs |  87 +++++++++------
crates/sqlez_macros/Cargo.toml             |   3 
crates/sqlez_macros/src/sqlez_macros.rs    |  23 ++-
crates/workspace/src/persistence.rs        | 132 +++++++++++------------
crates/workspace/src/workspace.rs          |   8 
10 files changed, 184 insertions(+), 175 deletions(-)

Detailed changes

Cargo.lock 🔗

@@ -5609,6 +5609,7 @@ dependencies = [
  "proc-macro2",
  "quote",
  "sqlez",
+ "sqlformat",
  "syn",
 ]
 

crates/db/src/db.rs 🔗

@@ -4,7 +4,6 @@ pub mod kvp;
 pub use anyhow;
 pub use indoc::indoc;
 pub use lazy_static;
-use parking_lot::Mutex;
 pub use smol;
 pub use sqlez;
 pub use sqlez_macros;
@@ -34,7 +33,7 @@ lazy_static::lazy_static! {
 }
 
 /// Open or create a database at the given directory path.
-pub async fn open_file_db<M: Migrator>() -> ThreadSafeConnection<M> {
+pub async fn open_db<M: Migrator>() -> ThreadSafeConnection<M> {
     // Use 0 for now. Will implement incrementing and clearing of old db files soon TM
     let current_db_dir = (*DB_DIR).join(Path::new(&format!("0-{}", *RELEASE_CHANNEL_NAME)));
 
@@ -56,18 +55,15 @@ pub async fn open_file_db<M: Migrator>() -> ThreadSafeConnection<M> {
         .await
 }
 
-pub async fn open_memory_db<M: Migrator>(db_name: &str) -> ThreadSafeConnection<M> {
+#[cfg(any(test, feature = "test-support"))]
+pub async fn open_test_db<M: Migrator>(db_name: &str) -> ThreadSafeConnection<M> {
+    use sqlez::thread_safe_connection::locking_queue;
+
     ThreadSafeConnection::<M>::builder(db_name, false)
         .with_db_initialization_query(DB_INITIALIZE_QUERY)
         .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY)
         // Serialize queued writes via a mutex and run them synchronously
-        .with_write_queue_constructor(Box::new(|connection| {
-            let connection = Mutex::new(connection);
-            Box::new(move |queued_write| {
-                let connection = connection.lock();
-                queued_write(&connection)
-            })
-        }))
+        .with_write_queue_constructor(locking_queue())
         .build()
         .await
 }
@@ -76,22 +72,24 @@ pub async fn open_memory_db<M: Migrator>(db_name: &str) -> ThreadSafeConnection<
 #[macro_export]
 macro_rules! connection {
     ($id:ident: $t:ident<$d:ty>) => {
-        pub struct $t(::db::sqlez::thread_safe_connection::ThreadSafeConnection<$d>);
+        pub struct $t($crate::sqlez::thread_safe_connection::ThreadSafeConnection<$d>);
 
         impl ::std::ops::Deref for $t {
-            type Target = ::db::sqlez::thread_safe_connection::ThreadSafeConnection<$d>;
+            type Target = $crate::sqlez::thread_safe_connection::ThreadSafeConnection<$d>;
 
             fn deref(&self) -> &Self::Target {
                 &self.0
             }
         }
 
-        ::db::lazy_static::lazy_static! {
-            pub static ref $id: $t = $t(if cfg!(any(test, feature = "test-support")) {
-                $crate::smol::block_on(::db::open_memory_db(stringify!($id)))
-            } else {
-                $crate::smol::block_on(::db::open_file_db())
-            });
+        #[cfg(any(test, feature = "test-support"))]
+        $crate::lazy_static::lazy_static! {
+            pub static ref $id: $t = $t($crate::smol::block_on($crate::open_test_db(stringify!($id))));
+        }
+
+        #[cfg(not(any(test, feature = "test-support")))]
+        $crate::lazy_static::lazy_static! {
+            pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db()));
         }
     };
 }

crates/db/src/kvp.rs 🔗

@@ -1,25 +1,9 @@
-use sqlez::{domain::Domain, thread_safe_connection::ThreadSafeConnection};
+use sqlez::domain::Domain;
 use sqlez_macros::sql;
 
-use crate::{open_file_db, open_memory_db, query};
+use crate::{connection, query};
 
-pub struct KeyValueStore(ThreadSafeConnection<KeyValueStore>);
-
-impl std::ops::Deref for KeyValueStore {
-    type Target = ThreadSafeConnection<KeyValueStore>;
-
-    fn deref(&self) -> &Self::Target {
-        &self.0
-    }
-}
-
-lazy_static::lazy_static! {
-    pub static ref KEY_VALUE_STORE: KeyValueStore = KeyValueStore(if cfg!(any(test, feature = "test-support")) {
-        smol::block_on(open_memory_db("KEY_VALUE_STORE"))
-    } else {
-        smol::block_on(open_file_db())
-    });
-}
+connection!(KEY_VALUE_STORE: KeyValueStore<KeyValueStore>);
 
 impl Domain for KeyValueStore {
     fn name() -> &'static str {
@@ -27,8 +11,10 @@ impl Domain for KeyValueStore {
     }
 
     fn migrations() -> &'static [&'static str] {
+        // Legacy migrations using rusqlite may have already created kv_store during alpha,
+        // migrations must be infallible so this must have 'IF NOT EXISTS'
         &[sql!(
-            CREATE TABLE kv_store(
+            CREATE TABLE IF NOT EXISTS kv_store(
                 key TEXT PRIMARY KEY,
                 value TEXT NOT NULL
             ) STRICT;
@@ -62,7 +48,7 @@ mod tests {
 
     #[gpui::test]
     async fn test_kvp() {
-        let db = KeyValueStore(crate::open_memory_db("test_kvp").await);
+        let db = KeyValueStore(crate::open_test_db("test_kvp").await);
 
         assert_eq!(db.read_kvp("key-1").unwrap(), None);
 

crates/editor/src/items.rs 🔗

@@ -602,31 +602,37 @@ impl Item for Editor {
         item_id: ItemId,
         cx: &mut ViewContext<Pane>,
     ) -> Task<Result<ViewHandle<Self>>> {
-        if let Some(project_item) = project.update(cx, |project, cx| {
+        let project_item: Result<_> = project.update(cx, |project, cx| {
             // Look up the path with this key associated, create a self with that path
-            let path = DB.get_path(item_id, workspace_id).ok()?;
+            let path = DB
+                .get_path(item_id, workspace_id)?
+                .context("No path stored for this editor")?;
 
-            let (worktree, path) = project.find_local_worktree(&path, cx)?;
+            let (worktree, path) = project
+                .find_local_worktree(&path, cx)
+                .with_context(|| format!("No worktree for path: {path:?}"))?;
             let project_path = ProjectPath {
                 worktree_id: worktree.read(cx).id(),
                 path: path.into(),
             };
 
-            Some(project.open_path(project_path, cx))
-        }) {
-            cx.spawn(|pane, mut cx| async move {
-                let (_, project_item) = project_item.await?;
-                let buffer = project_item
-                    .downcast::<Buffer>()
-                    .context("Project item at stored path was not a buffer")?;
-
-                Ok(cx.update(|cx| {
-                    cx.add_view(pane, |cx| Editor::for_buffer(buffer, Some(project), cx))
-                }))
+            Ok(project.open_path(project_path, cx))
+        });
+
+        project_item
+            .map(|project_item| {
+                cx.spawn(|pane, mut cx| async move {
+                    let (_, project_item) = project_item.await?;
+                    let buffer = project_item
+                        .downcast::<Buffer>()
+                        .context("Project item at stored path was not a buffer")?;
+
+                    Ok(cx.update(|cx| {
+                        cx.add_view(pane, |cx| Editor::for_buffer(buffer, Some(project), cx))
+                    }))
+                })
             })
-        } else {
-            Task::ready(Err(anyhow!("Could not load file from stored path")))
-        }
+            .unwrap_or_else(|error| Task::ready(Err(error)))
     }
 }
 

crates/editor/src/persistence.rs 🔗

@@ -23,7 +23,6 @@ impl Domain for Editor {
                 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
                     ON DELETE CASCADE
                     ON UPDATE CASCADE
-
             ) STRICT;
         )]
     }
@@ -31,7 +30,7 @@ impl Domain for Editor {
 
 impl EditorDb {
     query! {
-        pub fn get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result<PathBuf> {
+        pub fn get_path(item_id: ItemId, workspace_id: WorkspaceId) -> Result<Option<PathBuf>> {
             SELECT path FROM editors
             WHERE item_id = ? AND workspace_id = ?
         }

crates/sqlez/src/thread_safe_connection.rs 🔗

@@ -1,6 +1,6 @@
 use futures::{channel::oneshot, Future, FutureExt};
 use lazy_static::lazy_static;
-use parking_lot::RwLock;
+use parking_lot::{Mutex, RwLock};
 use std::{collections::HashMap, marker::PhantomData, ops::Deref, sync::Arc, thread};
 use thread_local::ThreadLocal;
 
@@ -73,37 +73,8 @@ impl<M: Migrator> ThreadSafeConnectionBuilder<M> {
     }
 
     pub async fn build(self) -> ThreadSafeConnection<M> {
-        if !QUEUES.read().contains_key(&self.connection.uri) {
-            let mut queues = QUEUES.write();
-            if !queues.contains_key(&self.connection.uri) {
-                let mut write_connection = self.connection.create_connection();
-                // Enable writes for this connection
-                write_connection.write = true;
-                if let Some(mut write_queue_constructor) = self.write_queue_constructor {
-                    let write_channel = write_queue_constructor(write_connection);
-                    queues.insert(self.connection.uri.clone(), write_channel);
-                } else {
-                    use std::sync::mpsc::channel;
-
-                    let (sender, reciever) = channel::<QueuedWrite>();
-                    thread::spawn(move || {
-                        while let Ok(write) = reciever.recv() {
-                            write(&write_connection)
-                        }
-                    });
-
-                    let sender = UnboundedSyncSender::new(sender);
-                    queues.insert(
-                        self.connection.uri.clone(),
-                        Box::new(move |queued_write| {
-                            sender
-                                .send(queued_write)
-                                .expect("Could not send write action to backgorund thread");
-                        }),
-                    );
-                }
-            }
-        }
+        self.connection
+            .initialize_queues(self.write_queue_constructor);
 
         let db_initialize_query = self.db_initialize_query;
 
@@ -134,6 +105,40 @@ impl<M: Migrator> ThreadSafeConnectionBuilder<M> {
 }
 
 impl<M: Migrator> ThreadSafeConnection<M> {
+    fn initialize_queues(&self, write_queue_constructor: Option<WriteQueueConstructor>) {
+        if !QUEUES.read().contains_key(&self.uri) {
+            let mut queues = QUEUES.write();
+            if !queues.contains_key(&self.uri) {
+                let mut write_connection = self.create_connection();
+                // Enable writes for this connection
+                write_connection.write = true;
+                if let Some(mut write_queue_constructor) = write_queue_constructor {
+                    let write_channel = write_queue_constructor(write_connection);
+                    queues.insert(self.uri.clone(), write_channel);
+                } else {
+                    use std::sync::mpsc::channel;
+
+                    let (sender, reciever) = channel::<QueuedWrite>();
+                    thread::spawn(move || {
+                        while let Ok(write) = reciever.recv() {
+                            write(&write_connection)
+                        }
+                    });
+
+                    let sender = UnboundedSyncSender::new(sender);
+                    queues.insert(
+                        self.uri.clone(),
+                        Box::new(move |queued_write| {
+                            sender
+                                .send(queued_write)
+                                .expect("Could not send write action to backgorund thread");
+                        }),
+                    );
+                }
+            }
+        }
+    }
+
     pub fn builder(uri: &str, persistent: bool) -> ThreadSafeConnectionBuilder<M> {
         ThreadSafeConnectionBuilder::<M> {
             db_initialize_query: None,
@@ -208,14 +213,18 @@ impl ThreadSafeConnection<()> {
         uri: &str,
         persistent: bool,
         connection_initialize_query: Option<&'static str>,
+        write_queue_constructor: Option<WriteQueueConstructor>,
     ) -> Self {
-        Self {
+        let connection = Self {
             uri: Arc::from(uri),
             persistent,
             connection_initialize_query,
             connections: Default::default(),
             _migrator: PhantomData,
-        }
+        };
+
+        connection.initialize_queues(write_queue_constructor);
+        connection
     }
 }
 
@@ -243,6 +252,16 @@ impl<M: Migrator> Deref for ThreadSafeConnection<M> {
     }
 }
 
+pub fn locking_queue() -> WriteQueueConstructor {
+    Box::new(|connection| {
+        let connection = Mutex::new(connection);
+        Box::new(move |queued_write| {
+            let connection = connection.lock();
+            queued_write(&connection)
+        })
+    })
+}
+
 #[cfg(test)]
 mod test {
     use indoc::indoc;

crates/sqlez_macros/Cargo.toml 🔗

@@ -13,4 +13,5 @@ syn = "1.0"
 quote = "1.0"
 proc-macro2 = "1.0"
 lazy_static = "1.4"
-sqlez = { path = "../sqlez" }
+sqlez = { path = "../sqlez" }
+sqlformat = "0.2"

crates/sqlez_macros/src/sqlez_macros.rs 🔗

@@ -1,9 +1,11 @@
 use proc_macro::{Delimiter, Span, TokenStream, TokenTree};
-use sqlez::thread_safe_connection::ThreadSafeConnection;
+use sqlez::thread_safe_connection::{locking_queue, ThreadSafeConnection};
 use syn::Error;
 
 lazy_static::lazy_static! {
-    static ref SQLITE: ThreadSafeConnection = ThreadSafeConnection::new(":memory:", false, None);
+    static ref SQLITE: ThreadSafeConnection =  {
+        ThreadSafeConnection::new(":memory:", false, None, Some(locking_queue()))
+    };
 }
 
 #[proc_macro]
@@ -20,6 +22,7 @@ pub fn sql(tokens: TokenStream) -> TokenStream {
     }
 
     let error = SQLITE.sql_has_syntax_error(sql.trim());
+    let formatted_sql = sqlformat::format(&sql, &sqlformat::QueryParams::None, Default::default());
 
     if let Some((error, error_offset)) = error {
         let error_span = spans
@@ -29,10 +32,10 @@ pub fn sql(tokens: TokenStream) -> TokenStream {
             .next()
             .unwrap_or(Span::call_site());
 
-        let error_text = format!("Sql Error: {}\nFor Query: {}", error, sql);
+        let error_text = format!("Sql Error: {}\nFor Query: {}", error, formatted_sql);
         TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error())
     } else {
-        format!("r#\"{}\"#", &sql).parse().unwrap()
+        format!("r#\"{}\"#", &formatted_sql).parse().unwrap()
     }
 }
 
@@ -61,18 +64,18 @@ fn flatten_stream(tokens: TokenStream, result: &mut Vec<(String, Span)>) {
 
 fn open_delimiter(delimiter: Delimiter) -> String {
     match delimiter {
-        Delimiter::Parenthesis => "(".to_string(),
-        Delimiter::Brace => "[".to_string(),
-        Delimiter::Bracket => "{".to_string(),
+        Delimiter::Parenthesis => "( ".to_string(),
+        Delimiter::Brace => "[ ".to_string(),
+        Delimiter::Bracket => "{ ".to_string(),
         Delimiter::None => "".to_string(),
     }
 }
 
 fn close_delimiter(delimiter: Delimiter) -> String {
     match delimiter {
-        Delimiter::Parenthesis => ")".to_string(),
-        Delimiter::Brace => "]".to_string(),
-        Delimiter::Bracket => "}".to_string(),
+        Delimiter::Parenthesis => " ) ".to_string(),
+        Delimiter::Brace => " ] ".to_string(),
+        Delimiter::Bracket => " } ".to_string(),
         Delimiter::None => "".to_string(),
     }
 }

crates/workspace/src/persistence.rs 🔗

@@ -7,7 +7,6 @@ use std::path::Path;
 use anyhow::{anyhow, bail, Context, Result};
 use db::{connection, query, sqlez::connection::Connection, sqlez_macros::sql};
 use gpui::Axis;
-use indoc::indoc;
 
 use db::sqlez::domain::Domain;
 use util::{iife, unzip_option, ResultExt};
@@ -106,15 +105,15 @@ impl WorkspaceDb {
             DockPosition,
         ) = iife!({
             if worktree_roots.len() == 0 {
-                self.select_row(indoc! {"
+                self.select_row(sql!(
                     SELECT workspace_id, workspace_location, dock_visible, dock_anchor
-                    FROM workspaces 
-                    ORDER BY timestamp DESC LIMIT 1"})?()?
+                    FROM workspaces
+                    ORDER BY timestamp DESC LIMIT 1))?()?
             } else {
-                self.select_row_bound(indoc! {"
+                self.select_row_bound(sql!(
                     SELECT workspace_id, workspace_location, dock_visible, dock_anchor
                     FROM workspaces 
-                    WHERE workspace_location = ?"})?(&workspace_location)?
+                    WHERE workspace_location = ?))?(&workspace_location)?
             }
             .context("No workspaces found")
         })
@@ -142,19 +141,15 @@ impl WorkspaceDb {
         self.write(move |conn| {
             conn.with_savepoint("update_worktrees", || {
                 // Clear out panes and pane_groups
-                conn.exec_bound(indoc! {"
+                conn.exec_bound(sql!(
                     UPDATE workspaces SET dock_pane = NULL WHERE workspace_id = ?1;
                     DELETE FROM pane_groups WHERE workspace_id = ?1;
-                    DELETE FROM panes WHERE workspace_id = ?1;"})?(workspace.id)
+                    DELETE FROM panes WHERE workspace_id = ?1;))?(workspace.id)
                 .context("Clearing old panes")?;
 
-                conn.exec_bound(indoc! {"
-                DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ?"})?(
-                    (
-                    &workspace.location,
-                    workspace.id.clone(),
-                )
-                )
+                conn.exec_bound(sql!(
+                    DELETE FROM workspaces WHERE workspace_location = ? AND workspace_id != ?
+                ))?((&workspace.location, workspace.id.clone()))
                 .context("clearing out old locations")?;
 
                 // Upsert
@@ -184,10 +179,11 @@ impl WorkspaceDb {
                     .context("save pane in save workspace")?;
 
                 // Complete workspace initialization
-                conn.exec_bound(indoc! {"
+                conn.exec_bound(sql!(
                     UPDATE workspaces
                     SET dock_pane = ?
-                    WHERE workspace_id = ?"})?((dock_id, workspace.id))
+                    WHERE workspace_id = ?
+                ))?((dock_id, workspace.id))
                 .context("Finishing initialization with dock pane")?;
 
                 Ok(())
@@ -203,20 +199,13 @@ impl WorkspaceDb {
         }
     }
 
-    /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots
-    pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, WorkspaceLocation)> {
-        iife!({
-            // TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html
-            Ok::<_, anyhow::Error>(
-                self.select_bound::<usize, (WorkspaceId, WorkspaceLocation)>(
-                    "SELECT workspace_id, workspace_location FROM workspaces ORDER BY timestamp DESC LIMIT ?",
-                )?(limit)?
-                .into_iter()
-                .collect::<Vec<(WorkspaceId, WorkspaceLocation)>>(),
-            )
-        })
-        .log_err()
-        .unwrap_or_default()
+    query! {
+        pub fn recent_workspaces(limit: usize) -> Result<Vec<(WorkspaceId, WorkspaceLocation)>> {
+            SELECT workspace_id, workspace_location 
+            FROM workspaces 
+            ORDER BY timestamp DESC 
+            LIMIT ?
+        }
     }
 
     fn get_center_pane_group(&self, workspace_id: WorkspaceId) -> Result<SerializedPaneGroup> {
@@ -233,7 +222,7 @@ impl WorkspaceDb {
     ) -> Result<Vec<SerializedPaneGroup>> {
         type GroupKey = (Option<GroupId>, WorkspaceId);
         type GroupOrPane = (Option<GroupId>, Option<Axis>, Option<PaneId>, Option<bool>);
-        self.select_bound::<GroupKey, GroupOrPane>(indoc! {"
+        self.select_bound::<GroupKey, GroupOrPane>(sql!(
             SELECT group_id, axis, pane_id, active
                 FROM (SELECT 
                         group_id,
@@ -243,7 +232,7 @@ impl WorkspaceDb {
                         position,
                         parent_group_id,
                         workspace_id
-                      FROM pane_groups
+                      FROM pane_groups 
                      UNION
                       SELECT 
                         NULL,
@@ -257,7 +246,7 @@ impl WorkspaceDb {
                       JOIN panes ON center_panes.pane_id = panes.pane_id) 
             WHERE parent_group_id IS ? AND workspace_id = ?
             ORDER BY position
-            "})?((group_id, workspace_id))?
+        ))?((group_id, workspace_id))?
         .into_iter()
         .map(|(group_id, axis, pane_id, active)| {
             if let Some((group_id, axis)) = group_id.zip(axis) {
@@ -293,10 +282,11 @@ impl WorkspaceDb {
             SerializedPaneGroup::Group { axis, children } => {
                 let (parent_id, position) = unzip_option(parent);
 
-                let group_id = conn.select_row_bound::<_, i64>(indoc! {"
+                let group_id = conn.select_row_bound::<_, i64>(sql!(
                         INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) 
                         VALUES (?, ?, ?, ?) 
-                        RETURNING group_id"})?((
+                        RETURNING group_id
+                ))?((
                     workspace_id,
                     parent_id,
                     position,
@@ -318,10 +308,11 @@ impl WorkspaceDb {
     }
 
     fn get_dock_pane(&self, workspace_id: WorkspaceId) -> Result<SerializedPane> {
-        let (pane_id, active) = self.select_row_bound(indoc! {"
+        let (pane_id, active) = self.select_row_bound(sql!(
             SELECT pane_id, active
             FROM panes
-            WHERE pane_id = (SELECT dock_pane FROM workspaces WHERE workspace_id = ?)"})?(
+            WHERE pane_id = (SELECT dock_pane FROM workspaces WHERE workspace_id = ?)
+        ))?(
             workspace_id,
         )?
         .context("No dock pane for workspace")?;
@@ -339,17 +330,19 @@ impl WorkspaceDb {
         parent: Option<(GroupId, usize)>, // None indicates BOTH dock pane AND center_pane
         dock: bool,
     ) -> Result<PaneId> {
-        let pane_id = conn.select_row_bound::<_, i64>(indoc! {"
+        let pane_id = conn.select_row_bound::<_, i64>(sql!(
             INSERT INTO panes(workspace_id, active) 
             VALUES (?, ?) 
-            RETURNING pane_id"})?((workspace_id, pane.active))?
+            RETURNING pane_id
+        ))?((workspace_id, pane.active))?
         .ok_or_else(|| anyhow!("Could not retrieve inserted pane_id"))?;
 
         if !dock {
             let (parent_id, order) = unzip_option(parent);
-            conn.exec_bound(indoc! {"
+            conn.exec_bound(sql!(
                 INSERT INTO center_panes(pane_id, parent_group_id, position)
-                VALUES (?, ?, ?)"})?((pane_id, parent_id, order))?;
+                VALUES (?, ?, ?)
+            ))?((pane_id, parent_id, order))?;
         }
 
         Self::save_items(conn, workspace_id, pane_id, &pane.children).context("Saving items")?;
@@ -358,10 +351,11 @@ impl WorkspaceDb {
     }
 
     fn get_items(&self, pane_id: PaneId) -> Result<Vec<SerializedItem>> {
-        Ok(self.select_bound(indoc! {"
+        Ok(self.select_bound(sql!(
             SELECT kind, item_id FROM items
             WHERE pane_id = ?
-            ORDER BY position"})?(pane_id)?)
+            ORDER BY position
+        ))?(pane_id)?)
     }
 
     fn save_items(
@@ -370,10 +364,11 @@ impl WorkspaceDb {
         pane_id: PaneId,
         items: &[SerializedItem],
     ) -> Result<()> {
-        let mut insert = conn.exec_bound(
-            "INSERT INTO items(workspace_id, pane_id, position, kind, item_id) VALUES (?, ?, ?, ?, ?)",
-        ).context("Preparing insertion")?;
+        let mut insert = conn.exec_bound(sql!(
+            INSERT INTO items(workspace_id, pane_id, position, kind, item_id) VALUES (?, ?, ?, ?, ?)
+        )).context("Preparing insertion")?;
         for (position, item) in items.iter().enumerate() {
+            dbg!(item);
             insert((workspace_id, pane_id, position, item))?;
         }
 
@@ -386,7 +381,7 @@ mod tests {
 
     use std::sync::Arc;
 
-    use db::open_memory_db;
+    use db::open_test_db;
     use settings::DockAnchor;
 
     use super::*;
@@ -395,18 +390,19 @@ mod tests {
     async fn test_next_id_stability() {
         env_logger::try_init().ok();
 
-        let db = WorkspaceDb(open_memory_db("test_next_id_stability").await);
+        let db = WorkspaceDb(open_test_db("test_next_id_stability").await);
 
         db.write(|conn| {
             conn.migrate(
                 "test_table",
-                &[indoc! {"
+                &[sql!(
                     CREATE TABLE test_table(
                         text TEXT,
                         workspace_id INTEGER,
                         FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
                             ON DELETE CASCADE
-                    ) STRICT;"}],
+                    ) STRICT;
+                )],
             )
             .unwrap();
         })
@@ -416,22 +412,22 @@ mod tests {
         // Assert the empty row got inserted
         assert_eq!(
             Some(id),
-            db.select_row_bound::<WorkspaceId, WorkspaceId>(
-                "SELECT workspace_id FROM workspaces WHERE workspace_id = ?"
-            )
+            db.select_row_bound::<WorkspaceId, WorkspaceId>(sql!(
+                SELECT workspace_id FROM workspaces WHERE workspace_id = ?
+            ))
             .unwrap()(id)
             .unwrap()
         );
 
         db.write(move |conn| {
-            conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)")
+            conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
                 .unwrap()(("test-text-1", id))
             .unwrap()
         })
         .await;
 
         let test_text_1 = db
-            .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?")
+            .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
             .unwrap()(1)
         .unwrap()
         .unwrap();
@@ -442,19 +438,19 @@ mod tests {
     async fn test_workspace_id_stability() {
         env_logger::try_init().ok();
 
-        let db = WorkspaceDb(open_memory_db("test_workspace_id_stability").await);
+        let db = WorkspaceDb(open_test_db("test_workspace_id_stability").await);
 
         db.write(|conn| {
             conn.migrate(
                 "test_table",
-                &[indoc! {"
+                &[sql!(
                     CREATE TABLE test_table(
                         text TEXT,
                         workspace_id INTEGER,
                         FOREIGN KEY(workspace_id) 
                             REFERENCES workspaces(workspace_id)
                             ON DELETE CASCADE
-                    ) STRICT;"}],
+                    ) STRICT;)],
             )
         })
         .await
@@ -479,7 +475,7 @@ mod tests {
         db.save_workspace(workspace_1.clone()).await;
 
         db.write(|conn| {
-            conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)")
+            conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
                 .unwrap()(("test-text-1", 1))
             .unwrap();
         })
@@ -488,7 +484,7 @@ mod tests {
         db.save_workspace(workspace_2.clone()).await;
 
         db.write(|conn| {
-            conn.exec_bound("INSERT INTO test_table(text, workspace_id) VALUES (?, ?)")
+            conn.exec_bound(sql!(INSERT INTO test_table(text, workspace_id) VALUES (?, ?)))
                 .unwrap()(("test-text-2", 2))
             .unwrap();
         })
@@ -505,14 +501,14 @@ mod tests {
         db.save_workspace(workspace_2).await;
 
         let test_text_2 = db
-            .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?")
+            .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
             .unwrap()(2)
         .unwrap()
         .unwrap();
         assert_eq!(test_text_2, "test-text-2");
 
         let test_text_1 = db
-            .select_row_bound::<_, String>("SELECT text FROM test_table WHERE workspace_id = ?")
+            .select_row_bound::<_, String>(sql!(SELECT text FROM test_table WHERE workspace_id = ?))
             .unwrap()(1)
         .unwrap()
         .unwrap();
@@ -523,7 +519,7 @@ mod tests {
     async fn test_full_workspace_serialization() {
         env_logger::try_init().ok();
 
-        let db = WorkspaceDb(open_memory_db("test_full_workspace_serialization").await);
+        let db = WorkspaceDb(open_test_db("test_full_workspace_serialization").await);
 
         let dock_pane = crate::persistence::model::SerializedPane {
             children: vec![
@@ -597,7 +593,7 @@ mod tests {
     async fn test_workspace_assignment() {
         env_logger::try_init().ok();
 
-        let db = WorkspaceDb(open_memory_db("test_basic_functionality").await);
+        let db = WorkspaceDb(open_test_db("test_basic_functionality").await);
 
         let workspace_1 = SerializedWorkspace {
             id: 1,
@@ -689,7 +685,7 @@ mod tests {
     async fn test_basic_dock_pane() {
         env_logger::try_init().ok();
 
-        let db = WorkspaceDb(open_memory_db("basic_dock_pane").await);
+        let db = WorkspaceDb(open_test_db("basic_dock_pane").await);
 
         let dock_pane = crate::persistence::model::SerializedPane::new(
             vec![
@@ -714,7 +710,7 @@ mod tests {
     async fn test_simple_split() {
         env_logger::try_init().ok();
 
-        let db = WorkspaceDb(open_memory_db("simple_split").await);
+        let db = WorkspaceDb(open_test_db("simple_split").await);
 
         //  -----------------
         //  | 1,2   | 5,6   |
@@ -766,7 +762,7 @@ mod tests {
     async fn test_cleanup_panes() {
         env_logger::try_init().ok();
 
-        let db = WorkspaceDb(open_memory_db("test_cleanup_panes").await);
+        let db = WorkspaceDb(open_test_db("test_cleanup_panes").await);
 
         let center_pane = SerializedPaneGroup::Group {
             axis: gpui::Axis::Horizontal,

crates/workspace/src/workspace.rs 🔗

@@ -2365,7 +2365,6 @@ impl Workspace {
                     .await;
 
                 // Traverse the splits tree and add to things
-
                 let (root, active_pane) = serialized_workspace
                     .center_group
                     .deserialize(&project, serialized_workspace.id, &workspace, &mut cx)
@@ -2384,6 +2383,10 @@ impl Workspace {
                         cx.focus(active_pane);
                     }
 
+                    if workspace.items(cx).next().is_none() {
+                        cx.dispatch_action(NewFile);
+                    }
+
                     cx.notify();
                 });
             }
@@ -2636,13 +2639,10 @@ pub fn open_paths(
 pub fn open_new(app_state: &Arc<AppState>, cx: &mut MutableAppContext) -> Task<()> {
     let task = Workspace::new_local(Vec::new(), app_state.clone(), cx);
     cx.spawn(|mut cx| async move {
-        eprintln!("Open new task spawned");
         let (workspace, opened_paths) = task.await;
-        eprintln!("workspace and path items created");
 
         workspace.update(&mut cx, |_, cx| {
             if opened_paths.is_empty() {
-                eprintln!("new file redispatched");
                 cx.dispatch_action(NewFile);
             }
         })