1use anyhow::{bail, Context, Result};
2use util::{iife, ResultExt};
3
4use std::{
5 fmt::Debug,
6 os::unix::prelude::OsStrExt,
7 path::{Path, PathBuf},
8 sync::Arc,
9};
10
11use indoc::indoc;
12use sqlez::{
13 bindable::{Bind, Column},
14 connection::Connection,
15 migrations::Migration,
16 statement::Statement,
17};
18
19use crate::pane::{SerializedDockPane, SerializedPaneGroup};
20
21use super::Db;
22
23// If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging
24// you might want to update some of the parsing code as well, I've left the variations in but commented
25// out. This will panic if run on an existing db that has already been migrated
26pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new(
27 "workspace",
28 &[indoc! {"
29 CREATE TABLE workspaces(
30 workspace_id INTEGER PRIMARY KEY,
31 dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded'
32 dock_visible INTEGER, -- Boolean
33 timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL
34 ) STRICT;
35
36 CREATE TABLE worktree_roots(
37 worktree_root BLOB NOT NULL,
38 workspace_id INTEGER NOT NULL,
39 FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
40 PRIMARY KEY(worktree_root, workspace_id)
41 ) STRICT;"}],
42);
43
44#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)]
45pub struct WorkspaceId(i64);
46
47impl Bind for WorkspaceId {
48 fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
49 self.0.bind(statement, start_index)
50 }
51}
52
53impl Column for WorkspaceId {
54 fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
55 i64::column(statement, start_index).map(|(id, next_index)| (Self(id), next_index))
56 }
57}
58
59#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)]
60pub enum DockAnchor {
61 #[default]
62 Bottom,
63 Right,
64 Expanded,
65}
66
67impl Bind for DockAnchor {
68 fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
69 match self {
70 DockAnchor::Bottom => "Bottom",
71 DockAnchor::Right => "Right",
72 DockAnchor::Expanded => "Expanded",
73 }
74 .bind(statement, start_index)
75 }
76}
77
78impl Column for DockAnchor {
79 fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
80 String::column(statement, start_index).and_then(|(anchor_text, next_index)| {
81 Ok((
82 match anchor_text.as_ref() {
83 "Bottom" => DockAnchor::Bottom,
84 "Right" => DockAnchor::Right,
85 "Expanded" => DockAnchor::Expanded,
86 _ => bail!("Stored dock anchor is incorrect"),
87 },
88 next_index,
89 ))
90 })
91 }
92}
93
94type WorkspaceRow = (WorkspaceId, DockAnchor, bool);
95
96#[derive(Default, Debug)]
97pub struct SerializedWorkspace {
98 pub worktree_roots: Vec<Arc<Path>>,
99 pub center_group: SerializedPaneGroup,
100 pub dock_anchor: DockAnchor,
101 pub dock_visible: bool,
102 pub dock_pane: SerializedDockPane,
103}
104
105impl Db {
106 /// Finds or creates a workspace id for the given set of worktree roots. If the passed worktree roots is empty,
107 /// returns the last workspace which was updated
108 pub fn workspace_for_roots<P>(&self, worktree_roots: &[P]) -> Option<SerializedWorkspace>
109 where
110 P: AsRef<Path> + Debug,
111 {
112 // Find the workspace id which is uniquely identified by this set of paths
113 // return it if found
114 let mut workspace_row = self.workspace(worktree_roots);
115 if workspace_row.is_none() && worktree_roots.len() == 0 {
116 workspace_row = self.last_workspace_id();
117 }
118
119 workspace_row.and_then(
120 |(workspace_id, dock_anchor, dock_visible)| SerializedWorkspace {
121 dock_pane: self.get_dock_pane(workspace_id)?,
122 center_group: self.get_center_group(workspace_id),
123 dock_anchor,
124 dock_visible,
125 },
126 )
127 }
128
129 fn workspace<P>(&self, worktree_roots: &[P]) -> Option<WorkspaceRow>
130 where
131 P: AsRef<Path> + Debug,
132 {
133 get_workspace(worktree_roots, &self)
134 .log_err()
135 .unwrap_or_default()
136 }
137
138 // fn get_workspace_row(&self, workspace_id: WorkspaceId) -> WorkspaceRow {
139 // unimplemented!()
140 // }
141
142 /// Updates the open paths for the given workspace id. Will garbage collect items from
143 /// any workspace ids which are no replaced by the new workspace id. Updates the timestamps
144 /// in the workspace id table
145 pub fn update_worktrees<P>(&self, workspace_id: &WorkspaceId, worktree_roots: &[P])
146 where
147 P: AsRef<Path> + Debug,
148 {
149 self.with_savepoint("update_worktrees", |conn| {
150 update_worktree_roots(conn, workspace_id, worktree_roots)
151 })
152 .context("Update workspace {workspace_id:?} with roots {worktree_roots:?}")
153 .log_err();
154 }
155
156 fn last_workspace_id(&self) -> Option<WorkspaceRow> {
157 iife! ({
158 self.prepare("SELECT workspace_id, dock_anchor, dock_visible FROM workspaces ORDER BY timestamp DESC LIMIT 1")?
159 .maybe_row::<WorkspaceRow>()
160 }).log_err()?
161 }
162
163 /// Returns the previous workspace ids sorted by last modified along with their opened worktree roots
164 pub fn recent_workspaces(&self, limit: usize) -> Vec<Vec<PathBuf>> {
165 self.with_savepoint("recent_workspaces", |conn| {
166 let mut stmt =
167 conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?;
168
169 conn.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")?
170 .with_bindings(limit)?
171 .rows::<WorkspaceId>()?
172 .iter()
173 .map(|workspace_id| stmt.with_bindings(workspace_id.0)?.rows::<PathBuf>())
174 .collect::<Result<_>>()
175 })
176 .log_err()
177 .unwrap_or_default()
178 }
179}
180
181fn update_worktree_roots<P>(
182 connection: &Connection,
183 workspace_id: &WorkspaceId,
184 worktree_roots: &[P],
185) -> Result<()>
186where
187 P: AsRef<Path> + Debug,
188{
189 // Lookup any old WorkspaceIds which have the same set of roots, and delete them.
190 let preexisting_workspace = get_workspace(worktree_roots, &connection)?;
191 if let Some((preexisting_workspace_id, _, _)) = preexisting_workspace {
192 if preexisting_workspace_id != *workspace_id {
193 // Should also delete fields in other tables with cascading updates
194 connection
195 .prepare("DELETE FROM workspaces WHERE workspace_id = ?")?
196 .with_bindings(preexisting_workspace_id)?
197 .exec()?;
198 }
199 }
200
201 connection
202 .prepare("DELETE FROM worktree_roots WHERE workspace_id = ?")?
203 .with_bindings(workspace_id.0)?
204 .exec()?;
205
206 for root in worktree_roots {
207 let path = root.as_ref().as_os_str().as_bytes();
208 // If you need to debug this, here's the string parsing:
209 // let path = root.as_ref().to_string_lossy().to_string();
210
211 connection
212 .prepare("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")?
213 .with_bindings((workspace_id.0, path))?
214 .exec()?;
215 }
216
217 connection
218 .prepare("UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")?
219 .with_bindings(workspace_id.0)?
220 .exec()?;
221
222 Ok(())
223}
224
225fn get_workspace<P>(worktree_roots: &[P], connection: &Connection) -> Result<Option<WorkspaceRow>>
226where
227 P: AsRef<Path> + Debug,
228{
229 // Short circuit if we can
230 if worktree_roots.len() == 0 {
231 return Ok(None);
232 }
233
234 // Prepare the array binding string. SQL doesn't have syntax for this, so
235 // we have to do it ourselves.
236 let array_binding_stmt = format!(
237 "({})",
238 (0..worktree_roots.len())
239 .map(|index| format!("?{}", index + 1))
240 .collect::<Vec<_>>()
241 .join(", ")
242 );
243
244 // Any workspace can have multiple independent paths, and these paths
245 // can overlap in the database. Take this test data for example:
246 //
247 // [/tmp, /tmp2] -> 1
248 // [/tmp] -> 2
249 // [/tmp2, /tmp3] -> 3
250 //
251 // This would be stred in the database like so:
252 //
253 // ID PATH
254 // 1 /tmp
255 // 1 /tmp2
256 // 2 /tmp
257 // 3 /tmp2
258 // 3 /tmp3
259 //
260 // Note how both /tmp and /tmp2 are associated with multiple workspace IDs.
261 // So, given an array of worktree roots, how can we find the exactly matching ID?
262 // Let's analyze what happens when querying for [/tmp, /tmp2], from the inside out:
263 // - We start with a join of this table on itself, generating every possible
264 // pair of ((path, ID), (path, ID)), and filtering the join down to just the
265 // *overlapping but non-matching* workspace IDs. For this small data set,
266 // this would look like:
267 //
268 // wt1.ID wt1.PATH | wt2.ID wt2.PATH
269 // 3 /tmp3 3 /tmp2
270 //
271 // - Moving one SELECT out, we use the first pair's ID column to invert the selection,
272 // meaning we now have a list of all the entries for our array, minus overlapping sets,
273 // but including *subsets* of our worktree roots:
274 //
275 // ID PATH
276 // 1 /tmp
277 // 1 /tmp2
278 // 2 /tmp
279 //
280 // - To trim out the subsets, we can to exploit the PRIMARY KEY constraint that there are no
281 // duplicate entries in this table. Using a GROUP BY and a COUNT we can find the subsets of
282 // our keys:
283 //
284 // ID num_matching
285 // 1 2
286 // 2 1
287 //
288 // - And with one final WHERE num_matching = $num_of_worktree_roots, we're done! We've found the
289 // matching ID correctly :D
290 //
291 // Note: due to limitations in SQLite's query binding, we have to generate the prepared
292 // statement with string substitution (the {array_bind}) below, and then bind the
293 // parameters by number.
294 let query = format!(
295 r#"
296 SELECT workspaces.workspace_id, workspaces.dock_anchor, workspaces.dock_visible
297 FROM (SELECT workspace_id
298 FROM (SELECT count(workspace_id) as num_matching, workspace_id FROM worktree_roots
299 WHERE worktree_root in {array_bind} AND workspace_id NOT IN
300 (SELECT wt1.workspace_id FROM worktree_roots as wt1
301 JOIN worktree_roots as wt2
302 ON wt1.workspace_id = wt2.workspace_id
303 WHERE wt1.worktree_root NOT in {array_bind} AND wt2.worktree_root in {array_bind})
304 GROUP BY workspace_id)
305 WHERE num_matching = ?) as matching_workspace
306 JOIN workspaces ON workspaces.workspace_id = matching_workspace.workspace_id
307 "#,
308 array_bind = array_binding_stmt
309 );
310
311 // This will only be called on start up and when root workspaces change, no need to waste memory
312 // caching it.
313 let mut stmt = connection.prepare(&query)?;
314
315 // Make sure we bound the parameters correctly
316 debug_assert!(worktree_roots.len() as i32 + 1 == stmt.parameter_count());
317
318 let root_bytes: Vec<&[u8]> = worktree_roots
319 .iter()
320 .map(|root| root.as_ref().as_os_str().as_bytes())
321 .collect();
322
323 let num_of_roots = root_bytes.len();
324
325 stmt.with_bindings((root_bytes, num_of_roots))?
326 .maybe_row::<WorkspaceRow>()
327}
328
329#[cfg(test)]
330mod tests {
331
332 use std::{path::PathBuf, thread::sleep, time::Duration};
333
334 use crate::Db;
335
336 use super::WorkspaceId;
337
338 #[test]
339 fn test_new_worktrees_for_roots() {
340 env_logger::init();
341 let db = Db::open_in_memory("test_new_worktrees_for_roots");
342
343 // Test creation in 0 case
344 let workspace_1 = db.workspace_for_roots::<String>(&[]);
345 assert_eq!(workspace_1.workspace_id, WorkspaceId(1));
346
347 // Test pulling from recent workspaces
348 let workspace_1 = db.workspace_for_roots::<String>(&[]);
349 assert_eq!(workspace_1.workspace_id, WorkspaceId(1));
350
351 // Ensure the timestamps are different
352 sleep(Duration::from_secs(1));
353 db.make_new_workspace::<String>(&[]);
354
355 // Test pulling another value from recent workspaces
356 let workspace_2 = db.workspace_for_roots::<String>(&[]);
357 assert_eq!(workspace_2.workspace_id, WorkspaceId(2));
358
359 // Ensure the timestamps are different
360 sleep(Duration::from_secs(1));
361
362 // Test creating a new workspace that doesn't exist already
363 let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]);
364 assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
365
366 // Make sure it's in the recent workspaces....
367 let workspace_3 = db.workspace_for_roots::<String>(&[]);
368 assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
369
370 // And that it can be pulled out again
371 let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]);
372 assert_eq!(workspace_3.workspace_id, WorkspaceId(3));
373 }
374
375 #[test]
376 fn test_empty_worktrees() {
377 let db = Db::open_in_memory("test_empty_worktrees");
378
379 assert_eq!(None, db.workspace::<String>(&[]));
380
381 db.make_new_workspace::<String>(&[]); //ID 1
382 db.make_new_workspace::<String>(&[]); //ID 2
383 db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]);
384
385 // Sanity check
386 assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(1));
387
388 db.update_worktrees::<String>(&WorkspaceId(1), &[]);
389
390 // Make sure 'no worktrees' fails correctly. returning [1, 2] from this
391 // call would be semantically correct (as those are the workspaces that
392 // don't have roots) but I'd prefer that this API to either return exactly one
393 // workspace, and None otherwise
394 assert_eq!(db.workspace::<String>(&[]), None,);
395
396 assert_eq!(db.last_workspace_id().unwrap().0, WorkspaceId(1));
397
398 assert_eq!(
399 db.recent_workspaces(2),
400 vec![Vec::<PathBuf>::new(), Vec::<PathBuf>::new()],
401 )
402 }
403
404 #[test]
405 fn test_more_workspace_ids() {
406 let data = &[
407 (WorkspaceId(1), vec!["/tmp1"]),
408 (WorkspaceId(2), vec!["/tmp1", "/tmp2"]),
409 (WorkspaceId(3), vec!["/tmp1", "/tmp2", "/tmp3"]),
410 (WorkspaceId(4), vec!["/tmp2", "/tmp3"]),
411 (WorkspaceId(5), vec!["/tmp2", "/tmp3", "/tmp4"]),
412 (WorkspaceId(6), vec!["/tmp2", "/tmp4"]),
413 (WorkspaceId(7), vec!["/tmp2"]),
414 ];
415
416 let db = Db::open_in_memory("test_more_workspace_ids");
417
418 for (workspace_id, entries) in data {
419 db.make_new_workspace::<String>(&[]);
420 db.update_worktrees(workspace_id, entries);
421 }
422
423 assert_eq!(WorkspaceId(1), db.workspace(&["/tmp1"]).unwrap().0);
424 assert_eq!(db.workspace(&["/tmp1", "/tmp2"]).unwrap().0, WorkspaceId(2));
425 assert_eq!(
426 db.workspace(&["/tmp1", "/tmp2", "/tmp3"]).unwrap().0,
427 WorkspaceId(3)
428 );
429 assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(4));
430 assert_eq!(
431 db.workspace(&["/tmp2", "/tmp3", "/tmp4"]).unwrap().0,
432 WorkspaceId(5)
433 );
434 assert_eq!(db.workspace(&["/tmp2", "/tmp4"]).unwrap().0, WorkspaceId(6));
435 assert_eq!(db.workspace(&["/tmp2"]).unwrap().0, WorkspaceId(7));
436
437 assert_eq!(db.workspace(&["/tmp1", "/tmp5"]), None);
438 assert_eq!(db.workspace(&["/tmp5"]), None);
439 assert_eq!(db.workspace(&["/tmp2", "/tmp3", "/tmp4", "/tmp5"]), None);
440 }
441
442 #[test]
443 fn test_detect_workspace_id() {
444 let data = &[
445 (WorkspaceId(1), vec!["/tmp"]),
446 (WorkspaceId(2), vec!["/tmp", "/tmp2"]),
447 (WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]),
448 ];
449
450 let db = Db::open_in_memory("test_detect_workspace_id");
451
452 for (workspace_id, entries) in data {
453 db.make_new_workspace::<String>(&[]);
454 db.update_worktrees(workspace_id, entries);
455 }
456
457 assert_eq!(db.workspace(&["/tmp2"]), None);
458 assert_eq!(db.workspace(&["/tmp2", "/tmp3"]), None);
459 assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1));
460 assert_eq!(db.workspace(&["/tmp", "/tmp2"]).unwrap().0, WorkspaceId(2));
461 assert_eq!(
462 db.workspace(&["/tmp", "/tmp2", "/tmp3"]).unwrap().0,
463 WorkspaceId(3)
464 );
465 }
466
467 #[test]
468 fn test_tricky_overlapping_updates() {
469 // DB state:
470 // (/tree) -> ID: 1
471 // (/tree, /tree2) -> ID: 2
472 // (/tree2, /tree3) -> ID: 3
473
474 // -> User updates 2 to: (/tree2, /tree3)
475
476 // DB state:
477 // (/tree) -> ID: 1
478 // (/tree2, /tree3) -> ID: 2
479 // Get rid of 3 for garbage collection
480
481 let data = &[
482 (WorkspaceId(1), vec!["/tmp"]),
483 (WorkspaceId(2), vec!["/tmp", "/tmp2"]),
484 (WorkspaceId(3), vec!["/tmp2", "/tmp3"]),
485 ];
486
487 let db = Db::open_in_memory("test_tricky_overlapping_update");
488
489 // Load in the test data
490 for (workspace_id, entries) in data {
491 db.make_new_workspace::<String>(&[]);
492 db.update_worktrees(workspace_id, entries);
493 }
494
495 sleep(Duration::from_secs(1));
496 // Execute the update
497 db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]);
498
499 // Make sure that workspace 3 doesn't exist
500 assert_eq!(db.workspace(&["/tmp2", "/tmp3"]).unwrap().0, WorkspaceId(2));
501
502 // And that workspace 1 was untouched
503 assert_eq!(db.workspace(&["/tmp"]).unwrap().0, WorkspaceId(1));
504
505 // And that workspace 2 is no longer registered under these roots
506 assert_eq!(db.workspace(&["/tmp", "/tmp2"]), None);
507
508 assert_eq!(db.last_workspace_id().unwrap().0, WorkspaceId(2));
509
510 let recent_workspaces = db.recent_workspaces(10);
511 assert_eq!(
512 recent_workspaces.get(0).unwrap(),
513 &vec![PathBuf::from("/tmp2"), PathBuf::from("/tmp3")]
514 );
515 assert_eq!(
516 recent_workspaces.get(1).unwrap(),
517 &vec![PathBuf::from("/tmp")]
518 );
519 }
520}