1use assert_cmd::Command;
2use loro::{ExportMode, LoroDoc, VersionVector};
3use predicates::prelude::*;
4
5#[test]
6fn sync_help_shows_usage() {
7 let mut cmd = Command::cargo_bin("td").unwrap();
8 cmd.args(["sync", "--help"]);
9 cmd.assert()
10 .success()
11 .stdout(predicate::str::contains("Wormhole code"));
12}
13
14#[test]
15fn sync_invalid_code_format_fails() {
16 let home = tempfile::tempdir().unwrap();
17 let cwd = tempfile::tempdir().unwrap();
18
19 Command::cargo_bin("td")
20 .unwrap()
21 .args(["init", "synctest"])
22 .current_dir(cwd.path())
23 .env("HOME", home.path())
24 .assert()
25 .success();
26
27 let mut cmd = Command::cargo_bin("td").unwrap();
28 cmd.args(["sync", "not-a-valid-code"])
29 .current_dir(cwd.path())
30 .env("HOME", home.path());
31 cmd.assert().failure();
32}
33
34/// Two peers sync over a real wormhole connection.
35///
36/// Setup: both stores share the same project_id (simulating a project
37/// that was cloned to a second machine). Each side creates a task the
38/// other doesn't have. After sync, both should see both tasks.
39#[test]
40fn sync_exchanges_tasks_between_peers() {
41 use std::fs;
42 use yatd::db;
43
44 let home_a = tempfile::tempdir().unwrap();
45 let cwd_a = tempfile::tempdir().unwrap();
46 let home_b = tempfile::tempdir().unwrap();
47 let cwd_b = tempfile::tempdir().unwrap();
48
49 // --- Set up peer A: init a project and create a task ---
50 std::env::set_var("HOME", home_a.path());
51 let store_a = db::init(cwd_a.path(), "shared").unwrap();
52 let id_a = db::gen_id();
53 store_a
54 .apply_and_persist(|doc| {
55 let tasks = doc.get_map("tasks");
56 let task = db::insert_task_map(&tasks, &id_a)?;
57 task.insert("title", "task from A")?;
58 task.insert("description", "")?;
59 task.insert("type", "task")?;
60 task.insert("priority", "medium")?;
61 task.insert("status", "open")?;
62 task.insert("effort", "medium")?;
63 task.insert("parent", "")?;
64 task.insert("created_at", db::now_utc())?;
65 task.insert("updated_at", db::now_utc())?;
66 task.insert("deleted_at", "")?;
67 task.insert_container("labels", loro::LoroMap::new())?;
68 task.insert_container("blockers", loro::LoroMap::new())?;
69 task.insert_container("logs", loro::LoroMap::new())?;
70 Ok(())
71 })
72 .unwrap();
73
74 // --- Set up peer B: clone from A's snapshot, then add its own task ---
75 //
76 // Copy A's project directory so B has the same project_id and
77 // initial state, then create a separate device_id for B.
78 let data_a = home_a.path().join(".local/share/td/projects/shared");
79 let data_b = home_b.path().join(".local/share/td/projects/shared");
80 fs::create_dir_all(data_b.join("changes")).unwrap();
81 // Copy only the base snapshot — A's change deltas stay with A.
82 fs::copy(data_a.join("base.loro"), data_b.join("base.loro")).unwrap();
83
84 // Write a binding so db::open from cwd_b resolves to "shared".
85 let binding_dir = home_b.path().join(".local/share/td");
86 fs::create_dir_all(&binding_dir).unwrap();
87 let canonical_b = fs::canonicalize(cwd_b.path()).unwrap();
88 let bindings = serde_json::json!({
89 "bindings": {
90 canonical_b.to_string_lossy().to_string(): "shared"
91 }
92 });
93 fs::write(
94 binding_dir.join("bindings.json"),
95 serde_json::to_string_pretty(&bindings).unwrap(),
96 )
97 .unwrap();
98
99 std::env::set_var("HOME", home_b.path());
100 let store_b = db::open(cwd_b.path()).unwrap();
101 let id_b = db::gen_id();
102 store_b
103 .apply_and_persist(|doc| {
104 let tasks = doc.get_map("tasks");
105 let task = db::insert_task_map(&tasks, &id_b)?;
106 task.insert("title", "task from B")?;
107 task.insert("description", "")?;
108 task.insert("type", "task")?;
109 task.insert("priority", "high")?;
110 task.insert("status", "open")?;
111 task.insert("effort", "low")?;
112 task.insert("parent", "")?;
113 task.insert("created_at", db::now_utc())?;
114 task.insert("updated_at", db::now_utc())?;
115 task.insert("deleted_at", "")?;
116 task.insert_container("labels", loro::LoroMap::new())?;
117 task.insert_container("blockers", loro::LoroMap::new())?;
118 task.insert_container("logs", loro::LoroMap::new())?;
119 Ok(())
120 })
121 .unwrap();
122
123 // Verify pre-sync: A has 1 task, B has 2 (init snapshot + its own).
124 // A's delta hasn't been applied to B's snapshot yet, so B only sees
125 // tasks from the base snapshot plus its own delta. Meanwhile A has
126 // the base snapshot plus its own delta.
127 let a_tasks_before = store_a.list_tasks().unwrap();
128 let b_tasks_before = store_b.list_tasks().unwrap();
129 assert_eq!(a_tasks_before.len(), 1, "A should have 1 task before sync");
130 assert_eq!(b_tasks_before.len(), 1, "B should have 1 task before sync");
131
132 // --- Sync via real wormhole ---
133 let rt = tokio::runtime::Runtime::new().unwrap();
134 rt.block_on(async {
135 use magic_wormhole::{MailboxConnection, Wormhole};
136 use yatd::cmd::sync::{exchange, wormhole_config};
137
138 // Peer A creates the mailbox.
139 let mailbox_a = MailboxConnection::create(wormhole_config(), 2)
140 .await
141 .unwrap();
142 let code = mailbox_a.code().clone();
143
144 // Peer B connects with the code.
145 let mailbox_b = MailboxConnection::connect(wormhole_config(), code, false)
146 .await
147 .unwrap();
148
149 // Both complete SPAKE2 key exchange concurrently.
150 let (wormhole_a, wormhole_b) =
151 tokio::try_join!(Wormhole::connect(mailbox_a), Wormhole::connect(mailbox_b),).unwrap();
152
153 // Run the sync protocol on both sides concurrently.
154 let (report_a, report_b) = tokio::try_join!(
155 exchange(&store_a, wormhole_a),
156 exchange(&store_b, wormhole_b),
157 )
158 .unwrap();
159
160 assert!(report_a.imported, "A should have imported B's changes");
161 assert!(report_b.imported, "B should have imported A's changes");
162 assert!(report_a.sent_bytes > 0);
163 assert!(report_b.sent_bytes > 0);
164 });
165
166 // --- Verify convergence ---
167 let a_tasks = store_a.list_tasks().unwrap();
168 let b_tasks = store_b.list_tasks().unwrap();
169
170 assert_eq!(a_tasks.len(), 2, "A should have 2 tasks after sync");
171 assert_eq!(b_tasks.len(), 2, "B should have 2 tasks after sync");
172
173 let a_titles: Vec<&str> = a_tasks.iter().map(|t| t.title.as_str()).collect();
174 let b_titles: Vec<&str> = b_tasks.iter().map(|t| t.title.as_str()).collect();
175 assert!(a_titles.contains(&"task from A"));
176 assert!(a_titles.contains(&"task from B"));
177 assert!(b_titles.contains(&"task from A"));
178 assert!(b_titles.contains(&"task from B"));
179}
180
181#[test]
182fn try_open_returns_none_without_binding() {
183 use yatd::db;
184
185 let home = tempfile::tempdir().unwrap();
186 let cwd = tempfile::tempdir().unwrap();
187
188 std::env::set_var("HOME", home.path());
189 assert!(
190 db::try_open(cwd.path()).unwrap().is_none(),
191 "expected no store when cwd is unbound and TD_PROJECT is unset"
192 );
193}
194
195#[test]
196fn bootstrap_from_peer_creates_openable_store() {
197 use yatd::db;
198
199 let home_a = tempfile::tempdir().unwrap();
200 let cwd_a = tempfile::tempdir().unwrap();
201 std::env::set_var("HOME", home_a.path());
202 let source = db::init(cwd_a.path(), "shared").unwrap();
203
204 let id = db::gen_id();
205 source
206 .apply_and_persist(|doc| {
207 let tasks = doc.get_map("tasks");
208 let task = db::insert_task_map(&tasks, &id)?;
209 task.insert("title", "bootstrapped task")?;
210 task.insert("description", "")?;
211 task.insert("type", "task")?;
212 task.insert("priority", "medium")?;
213 task.insert("status", "open")?;
214 task.insert("effort", "medium")?;
215 task.insert("parent", "")?;
216 task.insert("created_at", db::now_utc())?;
217 task.insert("updated_at", db::now_utc())?;
218 task.insert("deleted_at", "")?;
219 task.insert_container("labels", loro::LoroMap::new())?;
220 task.insert_container("blockers", loro::LoroMap::new())?;
221 task.insert_container("logs", loro::LoroMap::new())?;
222 Ok(())
223 })
224 .unwrap();
225
226 let full_delta = source
227 .doc()
228 .export(ExportMode::updates(&VersionVector::default()))
229 .unwrap();
230
231 let home_b = tempfile::tempdir().unwrap();
232 let root_b = home_b.path().join(".local/share/td");
233 let store_b = db::Store::bootstrap_from_peer(&root_b, "shared", &full_delta).unwrap();
234
235 assert_eq!(store_b.project_name(), "shared");
236 assert!(
237 root_b.join("projects/shared/base.loro").exists(),
238 "bootstrap should persist a base snapshot"
239 );
240
241 let reopened = db::Store::open(&root_b, "shared").unwrap();
242 let tasks = reopened.list_tasks().unwrap();
243 assert_eq!(tasks.len(), 1);
244 assert_eq!(tasks[0].title, "bootstrapped task");
245}
246
247#[test]
248fn bootstrap_from_peer_rejects_missing_project_id() {
249 use yatd::db;
250
251 let doc = LoroDoc::new();
252 doc.get_map("tasks");
253 let meta = doc.get_map("meta");
254 meta.insert("schema_version", 1i64).unwrap();
255 doc.commit();
256
257 let delta = doc
258 .export(ExportMode::updates(&VersionVector::default()))
259 .unwrap();
260
261 let home = tempfile::tempdir().unwrap();
262 let root = home.path().join(".local/share/td");
263 let err = db::Store::bootstrap_from_peer(&root, "shared", &delta).unwrap_err();
264
265 assert!(
266 err.to_string()
267 .contains("missing required project identity"),
268 "unexpected error: {err:#}"
269 );
270 assert!(
271 !root.join("projects/shared/base.loro").exists(),
272 "bootstrap should not persist snapshot for invalid peer doc"
273 );
274}