1use assert_cmd::cargo::cargo_bin_cmd;
2use loro::{ExportMode, LoroDoc, VersionVector};
3use predicates::prelude::*;
4
5#[test]
6fn sync_help_shows_usage() {
7 let mut cmd = cargo_bin_cmd!("td");
8 cmd.args(["sync", "--help"]);
9 cmd.assert()
10 .success()
11 .stdout(predicate::str::contains("Wormhole code"));
12}
13
14#[test]
15fn sync_invalid_code_format_fails() {
16 let home = tempfile::tempdir().unwrap();
17 let cwd = tempfile::tempdir().unwrap();
18
19 cargo_bin_cmd!("td")
20 .args(["project", "init", "synctest"])
21 .current_dir(cwd.path())
22 .env("HOME", home.path())
23 .assert()
24 .success();
25
26 let mut cmd = cargo_bin_cmd!("td");
27 cmd.args(["sync", "not-a-valid-code"])
28 .current_dir(cwd.path())
29 .env("HOME", home.path());
30 cmd.assert().failure();
31}
32
33/// Two peers sync over a real wormhole connection.
34///
35/// Setup: both stores share the same project_id (simulating a project
36/// that was cloned to a second machine). Each side creates a task the
37/// other doesn't have. After sync, both should see both tasks.
38#[test]
39fn sync_exchanges_tasks_between_peers() {
40 use std::fs;
41 use yatd::db;
42
43 let home_a = tempfile::tempdir().unwrap();
44 let cwd_a = tempfile::tempdir().unwrap();
45 let home_b = tempfile::tempdir().unwrap();
46 let cwd_b = tempfile::tempdir().unwrap();
47
48 // --- Set up peer A: init a project and create a task ---
49 std::env::set_var("HOME", home_a.path());
50 let store_a = db::init(cwd_a.path(), "shared").unwrap();
51 let id_a = db::gen_id();
52 store_a
53 .apply_and_persist(|doc| {
54 let tasks = doc.get_map("tasks");
55 let task = db::insert_task_map(&tasks, &id_a)?;
56 task.insert("title", "task from A")?;
57 task.insert("description", "")?;
58 task.insert("type", "task")?;
59 task.insert("priority", "medium")?;
60 task.insert("status", "open")?;
61 task.insert("effort", "medium")?;
62 task.insert("parent", "")?;
63 task.insert("created_at", db::now_utc())?;
64 task.insert("updated_at", db::now_utc())?;
65 task.insert("deleted_at", "")?;
66 task.insert_container("labels", loro::LoroMap::new())?;
67 task.insert_container("blockers", loro::LoroMap::new())?;
68 task.insert_container("logs", loro::LoroMap::new())?;
69 Ok(())
70 })
71 .unwrap();
72
73 // --- Set up peer B: clone from A's snapshot, then add its own task ---
74 //
75 // Copy A's project directory so B has the same project_id and
76 // initial state, then create a separate device_id for B.
77 let data_a = home_a.path().join(".local/share/td/projects/shared");
78 let data_b = home_b.path().join(".local/share/td/projects/shared");
79 fs::create_dir_all(data_b.join("changes")).unwrap();
80 // Copy only the base snapshot — A's change deltas stay with A.
81 fs::copy(data_a.join("base.loro"), data_b.join("base.loro")).unwrap();
82
83 // Write a binding so db::open from cwd_b resolves to "shared".
84 let binding_dir = home_b.path().join(".local/share/td");
85 fs::create_dir_all(&binding_dir).unwrap();
86 let canonical_b = fs::canonicalize(cwd_b.path()).unwrap();
87 let bindings = serde_json::json!({
88 "bindings": {
89 canonical_b.to_string_lossy().to_string(): "shared"
90 }
91 });
92 fs::write(
93 binding_dir.join("bindings.json"),
94 serde_json::to_string_pretty(&bindings).unwrap(),
95 )
96 .unwrap();
97
98 std::env::set_var("HOME", home_b.path());
99 let store_b = db::open(cwd_b.path()).unwrap();
100 let id_b = db::gen_id();
101 store_b
102 .apply_and_persist(|doc| {
103 let tasks = doc.get_map("tasks");
104 let task = db::insert_task_map(&tasks, &id_b)?;
105 task.insert("title", "task from B")?;
106 task.insert("description", "")?;
107 task.insert("type", "task")?;
108 task.insert("priority", "high")?;
109 task.insert("status", "open")?;
110 task.insert("effort", "low")?;
111 task.insert("parent", "")?;
112 task.insert("created_at", db::now_utc())?;
113 task.insert("updated_at", db::now_utc())?;
114 task.insert("deleted_at", "")?;
115 task.insert_container("labels", loro::LoroMap::new())?;
116 task.insert_container("blockers", loro::LoroMap::new())?;
117 task.insert_container("logs", loro::LoroMap::new())?;
118 Ok(())
119 })
120 .unwrap();
121
122 // Verify pre-sync: A has 1 task, B has 2 (init snapshot + its own).
123 // A's delta hasn't been applied to B's snapshot yet, so B only sees
124 // tasks from the base snapshot plus its own delta. Meanwhile A has
125 // the base snapshot plus its own delta.
126 let a_tasks_before = store_a.list_tasks().unwrap();
127 let b_tasks_before = store_b.list_tasks().unwrap();
128 assert_eq!(a_tasks_before.len(), 1, "A should have 1 task before sync");
129 assert_eq!(b_tasks_before.len(), 1, "B should have 1 task before sync");
130
131 // --- Sync via real wormhole ---
132 let rt = tokio::runtime::Runtime::new().unwrap();
133 rt.block_on(async {
134 use magic_wormhole::{MailboxConnection, Wormhole};
135 use yatd::cmd::sync::{exchange, wormhole_config};
136
137 // Peer A creates the mailbox.
138 let mailbox_a = MailboxConnection::create(wormhole_config(), 2)
139 .await
140 .unwrap();
141 let code = mailbox_a.code().clone();
142
143 // Peer B connects with the code.
144 let mailbox_b = MailboxConnection::connect(wormhole_config(), code, false)
145 .await
146 .unwrap();
147
148 // Both complete SPAKE2 key exchange concurrently.
149 let (wormhole_a, wormhole_b) =
150 tokio::try_join!(Wormhole::connect(mailbox_a), Wormhole::connect(mailbox_b),).unwrap();
151
152 // Run the sync protocol on both sides concurrently.
153 let (report_a, report_b) = tokio::try_join!(
154 exchange(&store_a, wormhole_a),
155 exchange(&store_b, wormhole_b),
156 )
157 .unwrap();
158
159 assert!(report_a.imported, "A should have imported B's changes");
160 assert!(report_b.imported, "B should have imported A's changes");
161 assert!(report_a.sent_bytes > 0);
162 assert!(report_b.sent_bytes > 0);
163 });
164
165 // --- Verify convergence ---
166 let a_tasks = store_a.list_tasks().unwrap();
167 let b_tasks = store_b.list_tasks().unwrap();
168
169 assert_eq!(a_tasks.len(), 2, "A should have 2 tasks after sync");
170 assert_eq!(b_tasks.len(), 2, "B should have 2 tasks after sync");
171
172 let a_titles: Vec<&str> = a_tasks.iter().map(|t| t.title.as_str()).collect();
173 let b_titles: Vec<&str> = b_tasks.iter().map(|t| t.title.as_str()).collect();
174 assert!(a_titles.contains(&"task from A"));
175 assert!(a_titles.contains(&"task from B"));
176 assert!(b_titles.contains(&"task from A"));
177 assert!(b_titles.contains(&"task from B"));
178}
179
180#[test]
181fn try_open_returns_none_without_binding() {
182 use yatd::db;
183
184 let home = tempfile::tempdir().unwrap();
185 let cwd = tempfile::tempdir().unwrap();
186
187 std::env::set_var("HOME", home.path());
188 assert!(
189 db::try_open(cwd.path()).unwrap().is_none(),
190 "expected no store when cwd is unbound and TD_PROJECT is unset"
191 );
192}
193
194#[test]
195fn bootstrap_from_peer_creates_openable_store() {
196 use yatd::db;
197
198 let home_a = tempfile::tempdir().unwrap();
199 let cwd_a = tempfile::tempdir().unwrap();
200 std::env::set_var("HOME", home_a.path());
201 let source = db::init(cwd_a.path(), "shared").unwrap();
202
203 let id = db::gen_id();
204 source
205 .apply_and_persist(|doc| {
206 let tasks = doc.get_map("tasks");
207 let task = db::insert_task_map(&tasks, &id)?;
208 task.insert("title", "bootstrapped task")?;
209 task.insert("description", "")?;
210 task.insert("type", "task")?;
211 task.insert("priority", "medium")?;
212 task.insert("status", "open")?;
213 task.insert("effort", "medium")?;
214 task.insert("parent", "")?;
215 task.insert("created_at", db::now_utc())?;
216 task.insert("updated_at", db::now_utc())?;
217 task.insert("deleted_at", "")?;
218 task.insert_container("labels", loro::LoroMap::new())?;
219 task.insert_container("blockers", loro::LoroMap::new())?;
220 task.insert_container("logs", loro::LoroMap::new())?;
221 Ok(())
222 })
223 .unwrap();
224
225 let full_delta = source
226 .doc()
227 .export(ExportMode::updates(&VersionVector::default()))
228 .unwrap();
229
230 let home_b = tempfile::tempdir().unwrap();
231 let root_b = home_b.path().join(".local/share/td");
232 let store_b = db::Store::bootstrap_from_peer(&root_b, "shared", &full_delta).unwrap();
233
234 assert_eq!(store_b.project_name(), "shared");
235 assert!(
236 root_b.join("projects/shared/base.loro").exists(),
237 "bootstrap should persist a base snapshot"
238 );
239
240 let reopened = db::Store::open(&root_b, "shared").unwrap();
241 let tasks = reopened.list_tasks().unwrap();
242 assert_eq!(tasks.len(), 1);
243 assert_eq!(tasks[0].title, "bootstrapped task");
244}
245
246#[test]
247fn bootstrap_from_peer_rejects_missing_project_id() {
248 use yatd::db;
249
250 let doc = LoroDoc::new();
251 doc.get_map("tasks");
252 let meta = doc.get_map("meta");
253 meta.insert("schema_version", 1i64).unwrap();
254 doc.commit();
255
256 let delta = doc
257 .export(ExportMode::updates(&VersionVector::default()))
258 .unwrap();
259
260 let home = tempfile::tempdir().unwrap();
261 let root = home.path().join(".local/share/td");
262 let err = db::Store::bootstrap_from_peer(&root, "shared", &delta).unwrap_err();
263
264 assert!(
265 err.to_string()
266 .contains("missing required project identity"),
267 "unexpected error: {err:#}"
268 );
269 assert!(
270 !root.join("projects/shared/base.loro").exists(),
271 "bootstrap should not persist snapshot for invalid peer doc"
272 );
273}
274
275/// Helper: insert a minimal valid task into a doc via apply_and_persist.
276fn insert_task(store: &yatd::db::Store, title: &str) {
277 let id = yatd::db::gen_id();
278 store
279 .apply_and_persist(|doc| {
280 let tasks = doc.get_map("tasks");
281 let task = yatd::db::insert_task_map(&tasks, &id)?;
282 task.insert("title", title)?;
283 task.insert("description", "")?;
284 task.insert("type", "task")?;
285 task.insert("priority", "medium")?;
286 task.insert("status", "open")?;
287 task.insert("effort", "medium")?;
288 task.insert("parent", "")?;
289 task.insert("created_at", yatd::db::now_utc())?;
290 task.insert("updated_at", yatd::db::now_utc())?;
291 task.insert("deleted_at", "")?;
292 task.insert_container("labels", loro::LoroMap::new())?;
293 task.insert_container("blockers", loro::LoroMap::new())?;
294 task.insert_container("logs", loro::LoroMap::new())?;
295 Ok(())
296 })
297 .unwrap();
298}
299
300/// Both peers have the same project (same project_id) with no directory
301/// binding/selection. SyncAll should discover the shared project and converge
302/// both stores to the same state.
303#[test]
304fn sync_all_exchanges_shared_projects() {
305 use std::fs;
306 use yatd::cmd::sync::{build_local_manifest, sync_all_exchange, wormhole_config};
307 use yatd::db;
308
309 let home_a = tempfile::tempdir().unwrap();
310 let home_b = tempfile::tempdir().unwrap();
311 let cwd_a = tempfile::tempdir().unwrap();
312 let cwd_b = tempfile::tempdir().unwrap();
313
314 let data_root_a = home_a.path().join(".local/share/td");
315 let data_root_b = home_b.path().join(".local/share/td");
316 fs::create_dir_all(data_root_a.join("projects")).unwrap();
317 fs::create_dir_all(data_root_b.join("projects")).unwrap();
318
319 // Peer A: init "shared" and add a task.
320 let store_a = db::Store::init(&data_root_a, "shared").unwrap();
321 insert_task(&store_a, "task from A");
322
323 // Peer B: bootstrap from A's base snapshot (same project_id), add its own task.
324 let proj_b = data_root_b.join("projects/shared");
325 fs::create_dir_all(proj_b.join("changes")).unwrap();
326 fs::copy(
327 data_root_a.join("projects/shared/base.loro"),
328 proj_b.join("base.loro"),
329 )
330 .unwrap();
331 let store_b = db::Store::open(&data_root_b, "shared").unwrap();
332 insert_task(&store_b, "task from B");
333
334 // Build manifests from disk (HOME-free: uses explicit data_root).
335 let manifest_a = build_local_manifest(&data_root_a).unwrap();
336 let manifest_b = build_local_manifest(&data_root_b).unwrap();
337 assert_eq!(manifest_a.len(), 1);
338 assert_eq!(manifest_b.len(), 1);
339 assert_eq!(
340 manifest_a[0].project_id, manifest_b[0].project_id,
341 "both sides must share the same project_id"
342 );
343
344 let rt = tokio::runtime::Runtime::new().unwrap();
345 let (results_a, results_b) = rt.block_on(async {
346 use magic_wormhole::{MailboxConnection, Wormhole};
347
348 let mailbox_a = MailboxConnection::create(wormhole_config(), 2)
349 .await
350 .unwrap();
351 let code = mailbox_a.code().clone();
352 let mailbox_b = MailboxConnection::connect(wormhole_config(), code, false)
353 .await
354 .unwrap();
355 let (wormhole_a, wormhole_b) =
356 tokio::try_join!(Wormhole::connect(mailbox_a), Wormhole::connect(mailbox_b)).unwrap();
357
358 tokio::try_join!(
359 sync_all_exchange(cwd_a.path(), &data_root_a, manifest_a, wormhole_a),
360 sync_all_exchange(cwd_b.path(), &data_root_b, manifest_b, wormhole_b),
361 )
362 .unwrap()
363 });
364
365 assert_eq!(
366 results_a.len(),
367 1,
368 "A should have synced exactly one project"
369 );
370 assert_eq!(
371 results_b.len(),
372 1,
373 "B should have synced exactly one project"
374 );
375
376 let (store_a_synced, report_a) = &results_a[0];
377 let (store_b_synced, report_b) = &results_b[0];
378
379 // Both peers should have imported: A has "task A", B starts from A's empty
380 // base then adds "task B". After sync, both have distinct changes to exchange.
381 assert!(report_a.imported, "A should have imported B's task");
382 assert!(report_b.imported, "B should have imported A's task");
383
384 let a_tasks = store_a_synced.list_tasks().unwrap();
385 let b_tasks = store_b_synced.list_tasks().unwrap();
386 assert_eq!(a_tasks.len(), 2, "A should have 2 tasks after SyncAll");
387 assert_eq!(b_tasks.len(), 2, "B should have 2 tasks after SyncAll");
388
389 let a_titles: Vec<&str> = a_tasks.iter().map(|t| t.title.as_str()).collect();
390 let b_titles: Vec<&str> = b_tasks.iter().map(|t| t.title.as_str()).collect();
391 assert!(a_titles.contains(&"task from A"));
392 assert!(a_titles.contains(&"task from B"));
393 assert!(b_titles.contains(&"task from A"));
394 assert!(b_titles.contains(&"task from B"));
395}
396
397/// Both peers have projects but no project_ids in common. SyncAll should
398/// complete without error and return an empty result on both sides.
399#[test]
400fn sync_all_no_intersection_is_noop() {
401 use std::fs;
402 use yatd::cmd::sync::{build_local_manifest, sync_all_exchange, wormhole_config};
403 use yatd::db;
404
405 let home_a = tempfile::tempdir().unwrap();
406 let home_b = tempfile::tempdir().unwrap();
407 let cwd_a = tempfile::tempdir().unwrap();
408 let cwd_b = tempfile::tempdir().unwrap();
409
410 let data_root_a = home_a.path().join(".local/share/td");
411 let data_root_b = home_b.path().join(".local/share/td");
412 fs::create_dir_all(data_root_a.join("projects")).unwrap();
413 fs::create_dir_all(data_root_b.join("projects")).unwrap();
414
415 // A has "alpha", B has "bravo" — independently initialised, different project_ids.
416 let _ = db::Store::init(&data_root_a, "alpha").unwrap();
417 let _ = db::Store::init(&data_root_b, "bravo").unwrap();
418
419 let manifest_a = build_local_manifest(&data_root_a).unwrap();
420 let manifest_b = build_local_manifest(&data_root_b).unwrap();
421 assert_eq!(manifest_a.len(), 1);
422 assert_eq!(manifest_b.len(), 1);
423 assert_ne!(
424 manifest_a[0].project_id, manifest_b[0].project_id,
425 "projects must have different ids"
426 );
427
428 let rt = tokio::runtime::Runtime::new().unwrap();
429 let (results_a, results_b) = rt.block_on(async {
430 use magic_wormhole::{MailboxConnection, Wormhole};
431
432 let mailbox_a = MailboxConnection::create(wormhole_config(), 2)
433 .await
434 .unwrap();
435 let code = mailbox_a.code().clone();
436 let mailbox_b = MailboxConnection::connect(wormhole_config(), code, false)
437 .await
438 .unwrap();
439 let (wormhole_a, wormhole_b) =
440 tokio::try_join!(Wormhole::connect(mailbox_a), Wormhole::connect(mailbox_b)).unwrap();
441
442 tokio::try_join!(
443 sync_all_exchange(cwd_a.path(), &data_root_a, manifest_a, wormhole_a),
444 sync_all_exchange(cwd_b.path(), &data_root_b, manifest_b, wormhole_b),
445 )
446 .unwrap()
447 });
448
449 assert!(
450 results_a.is_empty(),
451 "A: no shared projects, result should be empty"
452 );
453 assert!(
454 results_b.is_empty(),
455 "B: no shared projects, result should be empty"
456 );
457}