1use assert_cmd::cargo::cargo_bin_cmd;
2use loro::{ExportMode, LoroDoc, VersionVector};
3use predicates::prelude::*;
4
5#[test]
6fn sync_help_shows_usage() {
7 let mut cmd = cargo_bin_cmd!("td");
8 cmd.args(["sync", "--help"]);
9 cmd.assert()
10 .success()
11 .stdout(predicate::str::contains("Wormhole code"));
12}
13
14#[test]
15fn sync_invalid_code_format_fails() {
16 let home = tempfile::tempdir().unwrap();
17 let cwd = tempfile::tempdir().unwrap();
18
19 cargo_bin_cmd!("td")
20 .args(["project", "init", "synctest"])
21 .current_dir(cwd.path())
22 .env("HOME", home.path())
23 .assert()
24 .success();
25
26 let mut cmd = cargo_bin_cmd!("td");
27 cmd.args(["sync", "not-a-valid-code"])
28 .current_dir(cwd.path())
29 .env("HOME", home.path());
30 cmd.assert().failure();
31}
32
33/// Two peers sync over a real wormhole connection.
34///
35/// Setup: both stores share the same project_id (simulating a project
36/// that was cloned to a second machine). Each side creates a task the
37/// other doesn't have. After sync, both should see both tasks.
38#[test]
39fn sync_exchanges_tasks_between_peers() {
40 use std::fs;
41 use yatd::db;
42 use yatd::model;
43
44 let home_a = tempfile::tempdir().unwrap();
45 let cwd_a = tempfile::tempdir().unwrap();
46 let home_b = tempfile::tempdir().unwrap();
47 let cwd_b = tempfile::tempdir().unwrap();
48
49 // --- Set up peer A: init a project and create a task ---
50 std::env::set_var("HOME", home_a.path());
51 let store_a = db::init(cwd_a.path(), "shared").unwrap();
52 let id_a = model::gen_id();
53 store_a
54 .apply_and_persist(|doc| {
55 let tasks = doc.get_map("tasks");
56 let task = db::insert_task_map(&tasks, &id_a)?;
57 task.insert("title", "task from A")?;
58 task.insert("description", "")?;
59 task.insert("type", "task")?;
60 task.insert("priority", "medium")?;
61 task.insert("status", "open")?;
62 task.insert("effort", "medium")?;
63 task.insert("parent", "")?;
64 task.insert("created_at", model::now_utc())?;
65 task.insert("updated_at", model::now_utc())?;
66 task.insert("deleted_at", "")?;
67 task.insert_container("labels", loro::LoroMap::new())?;
68 task.insert_container("blockers", loro::LoroMap::new())?;
69 task.insert_container("logs", loro::LoroMap::new())?;
70 Ok(())
71 })
72 .unwrap();
73
74 // --- Set up peer B: clone from A's snapshot, then add its own task ---
75 //
76 // Copy A's project directory so B has the same project_id and
77 // initial state, then create a separate device_id for B.
78 let data_a = home_a.path().join(".local/share/td/projects/shared");
79 let data_b = home_b.path().join(".local/share/td/projects/shared");
80 fs::create_dir_all(data_b.join("changes")).unwrap();
81 // Copy only the base snapshot — A's change deltas stay with A.
82 fs::copy(data_a.join("base.loro"), data_b.join("base.loro")).unwrap();
83
84 // Write a binding so db::open from cwd_b resolves to "shared".
85 let binding_dir = home_b.path().join(".local/share/td");
86 fs::create_dir_all(&binding_dir).unwrap();
87 let canonical_b = fs::canonicalize(cwd_b.path()).unwrap();
88 let bindings = serde_json::json!({
89 "bindings": {
90 canonical_b.to_string_lossy().to_string(): "shared"
91 }
92 });
93 fs::write(
94 binding_dir.join("bindings.json"),
95 serde_json::to_string_pretty(&bindings).unwrap(),
96 )
97 .unwrap();
98
99 std::env::set_var("HOME", home_b.path());
100 let store_b = db::open(cwd_b.path()).unwrap();
101 let id_b = model::gen_id();
102 store_b
103 .apply_and_persist(|doc| {
104 let tasks = doc.get_map("tasks");
105 let task = db::insert_task_map(&tasks, &id_b)?;
106 task.insert("title", "task from B")?;
107 task.insert("description", "")?;
108 task.insert("type", "task")?;
109 task.insert("priority", "high")?;
110 task.insert("status", "open")?;
111 task.insert("effort", "low")?;
112 task.insert("parent", "")?;
113 task.insert("created_at", model::now_utc())?;
114 task.insert("updated_at", model::now_utc())?;
115 task.insert("deleted_at", "")?;
116 task.insert_container("labels", loro::LoroMap::new())?;
117 task.insert_container("blockers", loro::LoroMap::new())?;
118 task.insert_container("logs", loro::LoroMap::new())?;
119 Ok(())
120 })
121 .unwrap();
122
123 // Verify pre-sync: A has 1 task, B has 2 (init snapshot + its own).
124 // A's delta hasn't been applied to B's snapshot yet, so B only sees
125 // tasks from the base snapshot plus its own delta. Meanwhile A has
126 // the base snapshot plus its own delta.
127 let a_tasks_before = store_a.list_tasks().unwrap();
128 let b_tasks_before = store_b.list_tasks().unwrap();
129 assert_eq!(a_tasks_before.len(), 1, "A should have 1 task before sync");
130 assert_eq!(b_tasks_before.len(), 1, "B should have 1 task before sync");
131
132 // --- Sync via real wormhole ---
133 let rt = tokio::runtime::Runtime::new().unwrap();
134 rt.block_on(async {
135 use magic_wormhole::{MailboxConnection, Wormhole};
136 use yatd::cmd::sync::{exchange, wormhole_config};
137
138 // Peer A creates the mailbox.
139 let mailbox_a = MailboxConnection::create(wormhole_config(), 2)
140 .await
141 .unwrap();
142 let code = mailbox_a.code().clone();
143
144 // Peer B connects with the code.
145 let mailbox_b = MailboxConnection::connect(wormhole_config(), code, false)
146 .await
147 .unwrap();
148
149 // Both complete SPAKE2 key exchange concurrently.
150 let (wormhole_a, wormhole_b) =
151 tokio::try_join!(Wormhole::connect(mailbox_a), Wormhole::connect(mailbox_b),).unwrap();
152
153 // Run the sync protocol on both sides concurrently.
154 let (report_a, report_b) = tokio::try_join!(
155 exchange(&store_a, wormhole_a),
156 exchange(&store_b, wormhole_b),
157 )
158 .unwrap();
159
160 assert!(report_a.imported, "A should have imported B's changes");
161 assert!(report_b.imported, "B should have imported A's changes");
162 assert!(report_a.sent_bytes > 0);
163 assert!(report_b.sent_bytes > 0);
164 });
165
166 // --- Verify convergence ---
167 let a_tasks = store_a.list_tasks().unwrap();
168 let b_tasks = store_b.list_tasks().unwrap();
169
170 assert_eq!(a_tasks.len(), 2, "A should have 2 tasks after sync");
171 assert_eq!(b_tasks.len(), 2, "B should have 2 tasks after sync");
172
173 let a_titles: Vec<&str> = a_tasks.iter().map(|t| t.title.as_str()).collect();
174 let b_titles: Vec<&str> = b_tasks.iter().map(|t| t.title.as_str()).collect();
175 assert!(a_titles.contains(&"task from A"));
176 assert!(a_titles.contains(&"task from B"));
177 assert!(b_titles.contains(&"task from A"));
178 assert!(b_titles.contains(&"task from B"));
179}
180
181#[test]
182fn try_open_returns_none_without_binding() {
183 use yatd::db;
184
185 let home = tempfile::tempdir().unwrap();
186 let cwd = tempfile::tempdir().unwrap();
187
188 std::env::set_var("HOME", home.path());
189 assert!(
190 db::try_open(cwd.path()).unwrap().is_none(),
191 "expected no store when cwd is unbound and TD_PROJECT is unset"
192 );
193}
194
195#[test]
196fn bootstrap_from_peer_creates_openable_store() {
197 use yatd::db;
198 use yatd::model;
199
200 let home_a = tempfile::tempdir().unwrap();
201 let cwd_a = tempfile::tempdir().unwrap();
202 std::env::set_var("HOME", home_a.path());
203 let source = db::init(cwd_a.path(), "shared").unwrap();
204
205 let id = model::gen_id();
206 source
207 .apply_and_persist(|doc| {
208 let tasks = doc.get_map("tasks");
209 let task = db::insert_task_map(&tasks, &id)?;
210 task.insert("title", "bootstrapped task")?;
211 task.insert("description", "")?;
212 task.insert("type", "task")?;
213 task.insert("priority", "medium")?;
214 task.insert("status", "open")?;
215 task.insert("effort", "medium")?;
216 task.insert("parent", "")?;
217 task.insert("created_at", model::now_utc())?;
218 task.insert("updated_at", model::now_utc())?;
219 task.insert("deleted_at", "")?;
220 task.insert_container("labels", loro::LoroMap::new())?;
221 task.insert_container("blockers", loro::LoroMap::new())?;
222 task.insert_container("logs", loro::LoroMap::new())?;
223 Ok(())
224 })
225 .unwrap();
226
227 let full_delta = source
228 .doc()
229 .export(ExportMode::updates(&VersionVector::default()))
230 .unwrap();
231
232 let home_b = tempfile::tempdir().unwrap();
233 let root_b = home_b.path().join(".local/share/td");
234 let store_b = db::Store::bootstrap_from_peer(&root_b, "shared", &full_delta).unwrap();
235
236 assert_eq!(store_b.project_name(), "shared");
237 assert!(
238 root_b.join("projects/shared/base.loro").exists(),
239 "bootstrap should persist a base snapshot"
240 );
241
242 let reopened = db::Store::open(&root_b, "shared").unwrap();
243 let tasks = reopened.list_tasks().unwrap();
244 assert_eq!(tasks.len(), 1);
245 assert_eq!(tasks[0].title, "bootstrapped task");
246}
247
248#[test]
249fn bootstrap_from_peer_rejects_missing_project_id() {
250 use yatd::db;
251
252 let doc = LoroDoc::new();
253 doc.get_map("tasks");
254 let meta = doc.get_map("meta");
255 meta.insert("schema_version", 1i64).unwrap();
256 doc.commit();
257
258 let delta = doc
259 .export(ExportMode::updates(&VersionVector::default()))
260 .unwrap();
261
262 let home = tempfile::tempdir().unwrap();
263 let root = home.path().join(".local/share/td");
264 let err = db::Store::bootstrap_from_peer(&root, "shared", &delta).unwrap_err();
265
266 assert!(
267 err.to_string()
268 .contains("missing required project identity"),
269 "unexpected error: {err:#}"
270 );
271 assert!(
272 !root.join("projects/shared/base.loro").exists(),
273 "bootstrap should not persist snapshot for invalid peer doc"
274 );
275}
276
277/// Helper: insert a minimal valid task into a doc via apply_and_persist.
278fn insert_task(store: &yatd::db::Store, title: &str) {
279 let id = yatd::model::gen_id();
280 store
281 .apply_and_persist(|doc| {
282 let tasks = doc.get_map("tasks");
283 let task = yatd::db::insert_task_map(&tasks, &id)?;
284 task.insert("title", title)?;
285 task.insert("description", "")?;
286 task.insert("type", "task")?;
287 task.insert("priority", "medium")?;
288 task.insert("status", "open")?;
289 task.insert("effort", "medium")?;
290 task.insert("parent", "")?;
291 task.insert("created_at", yatd::model::now_utc())?;
292 task.insert("updated_at", yatd::model::now_utc())?;
293 task.insert("deleted_at", "")?;
294 task.insert_container("labels", loro::LoroMap::new())?;
295 task.insert_container("blockers", loro::LoroMap::new())?;
296 task.insert_container("logs", loro::LoroMap::new())?;
297 Ok(())
298 })
299 .unwrap();
300}
301
302/// Both peers have the same project (same project_id) with no directory
303/// binding/selection. SyncAll should discover the shared project and converge
304/// both stores to the same state.
305#[test]
306fn sync_all_exchanges_shared_projects() {
307 use std::fs;
308 use yatd::cmd::sync::{build_local_manifest, sync_all_exchange, wormhole_config};
309 use yatd::db;
310
311 let home_a = tempfile::tempdir().unwrap();
312 let home_b = tempfile::tempdir().unwrap();
313 let cwd_a = tempfile::tempdir().unwrap();
314 let cwd_b = tempfile::tempdir().unwrap();
315
316 let data_root_a = home_a.path().join(".local/share/td");
317 let data_root_b = home_b.path().join(".local/share/td");
318 fs::create_dir_all(data_root_a.join("projects")).unwrap();
319 fs::create_dir_all(data_root_b.join("projects")).unwrap();
320
321 // Peer A: init "shared" and add a task.
322 let store_a = db::Store::init(&data_root_a, "shared").unwrap();
323 insert_task(&store_a, "task from A");
324
325 // Peer B: bootstrap from A's base snapshot (same project_id), add its own task.
326 let proj_b = data_root_b.join("projects/shared");
327 fs::create_dir_all(proj_b.join("changes")).unwrap();
328 fs::copy(
329 data_root_a.join("projects/shared/base.loro"),
330 proj_b.join("base.loro"),
331 )
332 .unwrap();
333 let store_b = db::Store::open(&data_root_b, "shared").unwrap();
334 insert_task(&store_b, "task from B");
335
336 // Build manifests from disk (HOME-free: uses explicit data_root).
337 let manifest_a = build_local_manifest(&data_root_a).unwrap();
338 let manifest_b = build_local_manifest(&data_root_b).unwrap();
339 assert_eq!(manifest_a.len(), 1);
340 assert_eq!(manifest_b.len(), 1);
341 assert_eq!(
342 manifest_a[0].project_id, manifest_b[0].project_id,
343 "both sides must share the same project_id"
344 );
345
346 let rt = tokio::runtime::Runtime::new().unwrap();
347 let (results_a, results_b) = rt.block_on(async {
348 use magic_wormhole::{MailboxConnection, Wormhole};
349
350 let mailbox_a = MailboxConnection::create(wormhole_config(), 2)
351 .await
352 .unwrap();
353 let code = mailbox_a.code().clone();
354 let mailbox_b = MailboxConnection::connect(wormhole_config(), code, false)
355 .await
356 .unwrap();
357 let (wormhole_a, wormhole_b) =
358 tokio::try_join!(Wormhole::connect(mailbox_a), Wormhole::connect(mailbox_b)).unwrap();
359
360 tokio::try_join!(
361 sync_all_exchange(cwd_a.path(), &data_root_a, manifest_a, wormhole_a),
362 sync_all_exchange(cwd_b.path(), &data_root_b, manifest_b, wormhole_b),
363 )
364 .unwrap()
365 });
366
367 assert_eq!(
368 results_a.len(),
369 1,
370 "A should have synced exactly one project"
371 );
372 assert_eq!(
373 results_b.len(),
374 1,
375 "B should have synced exactly one project"
376 );
377
378 let (store_a_synced, report_a) = &results_a[0];
379 let (store_b_synced, report_b) = &results_b[0];
380
381 // Both peers should have imported: A has "task A", B starts from A's empty
382 // base then adds "task B". After sync, both have distinct changes to exchange.
383 assert!(report_a.imported, "A should have imported B's task");
384 assert!(report_b.imported, "B should have imported A's task");
385
386 let a_tasks = store_a_synced.list_tasks().unwrap();
387 let b_tasks = store_b_synced.list_tasks().unwrap();
388 assert_eq!(a_tasks.len(), 2, "A should have 2 tasks after SyncAll");
389 assert_eq!(b_tasks.len(), 2, "B should have 2 tasks after SyncAll");
390
391 let a_titles: Vec<&str> = a_tasks.iter().map(|t| t.title.as_str()).collect();
392 let b_titles: Vec<&str> = b_tasks.iter().map(|t| t.title.as_str()).collect();
393 assert!(a_titles.contains(&"task from A"));
394 assert!(a_titles.contains(&"task from B"));
395 assert!(b_titles.contains(&"task from A"));
396 assert!(b_titles.contains(&"task from B"));
397}
398
399/// Both peers have projects but no project_ids in common. SyncAll should
400/// complete without error and return an empty result on both sides.
401#[test]
402fn sync_all_no_intersection_is_noop() {
403 use std::fs;
404 use yatd::cmd::sync::{build_local_manifest, sync_all_exchange, wormhole_config};
405 use yatd::db;
406
407 let home_a = tempfile::tempdir().unwrap();
408 let home_b = tempfile::tempdir().unwrap();
409 let cwd_a = tempfile::tempdir().unwrap();
410 let cwd_b = tempfile::tempdir().unwrap();
411
412 let data_root_a = home_a.path().join(".local/share/td");
413 let data_root_b = home_b.path().join(".local/share/td");
414 fs::create_dir_all(data_root_a.join("projects")).unwrap();
415 fs::create_dir_all(data_root_b.join("projects")).unwrap();
416
417 // A has "alpha", B has "bravo" — independently initialised, different project_ids.
418 let _ = db::Store::init(&data_root_a, "alpha").unwrap();
419 let _ = db::Store::init(&data_root_b, "bravo").unwrap();
420
421 let manifest_a = build_local_manifest(&data_root_a).unwrap();
422 let manifest_b = build_local_manifest(&data_root_b).unwrap();
423 assert_eq!(manifest_a.len(), 1);
424 assert_eq!(manifest_b.len(), 1);
425 assert_ne!(
426 manifest_a[0].project_id, manifest_b[0].project_id,
427 "projects must have different ids"
428 );
429
430 let rt = tokio::runtime::Runtime::new().unwrap();
431 let (results_a, results_b) = rt.block_on(async {
432 use magic_wormhole::{MailboxConnection, Wormhole};
433
434 let mailbox_a = MailboxConnection::create(wormhole_config(), 2)
435 .await
436 .unwrap();
437 let code = mailbox_a.code().clone();
438 let mailbox_b = MailboxConnection::connect(wormhole_config(), code, false)
439 .await
440 .unwrap();
441 let (wormhole_a, wormhole_b) =
442 tokio::try_join!(Wormhole::connect(mailbox_a), Wormhole::connect(mailbox_b)).unwrap();
443
444 tokio::try_join!(
445 sync_all_exchange(cwd_a.path(), &data_root_a, manifest_a, wormhole_a),
446 sync_all_exchange(cwd_b.path(), &data_root_b, manifest_b, wormhole_b),
447 )
448 .unwrap()
449 });
450
451 assert!(
452 results_a.is_empty(),
453 "A: no shared projects, result should be empty"
454 );
455 assert!(
456 results_b.is_empty(),
457 "B: no shared projects, result should be empty"
458 );
459}