1use assert_cmd::cargo::cargo_bin_cmd;
2use loro::{ExportMode, LoroDoc, VersionVector};
3use predicates::prelude::*;
4
5#[test]
6fn sync_help_shows_usage() {
7 let mut cmd = cargo_bin_cmd!("td");
8 cmd.args(["sync", "--help"]);
9 cmd.assert()
10 .success()
11 .stdout(predicate::str::contains("Wormhole code"));
12}
13
14#[test]
15fn sync_invalid_code_format_fails() {
16 let home = tempfile::tempdir().unwrap();
17 let cwd = tempfile::tempdir().unwrap();
18
19 cargo_bin_cmd!("td")
20 .args(["project", "init", "synctest"])
21 .current_dir(cwd.path())
22 .env("HOME", home.path())
23 .assert()
24 .success();
25
26 let mut cmd = cargo_bin_cmd!("td");
27 cmd.args(["sync", "not-a-valid-code"])
28 .current_dir(cwd.path())
29 .env("HOME", home.path());
30 cmd.assert().failure();
31}
32
33/// Two peers sync over a real wormhole connection.
34///
35/// Setup: both stores share the same project_id (simulating a project
36/// that was cloned to a second machine). Each side creates a task the
37/// other doesn't have. After sync, both should see both tasks.
38#[test]
39fn sync_exchanges_tasks_between_peers() {
40 use std::fs;
41 use yatd::db;
42
43 let home_a = tempfile::tempdir().unwrap();
44 let cwd_a = tempfile::tempdir().unwrap();
45 let home_b = tempfile::tempdir().unwrap();
46 let cwd_b = tempfile::tempdir().unwrap();
47
48 // --- Set up peer A: init a project and create a task ---
49 std::env::set_var("HOME", home_a.path());
50 let store_a = db::init(cwd_a.path(), "shared").unwrap();
51 let id_a = db::gen_id();
52 store_a
53 .apply_and_persist(|doc| {
54 let tasks = doc.get_map("tasks");
55 let task = db::insert_task_map(&tasks, &id_a)?;
56 task.insert("title", "task from A")?;
57 task.insert("description", "")?;
58 task.insert("type", "task")?;
59 task.insert("priority", "medium")?;
60 task.insert("status", "open")?;
61 task.insert("effort", "medium")?;
62 task.insert("parent", "")?;
63 task.insert("created_at", db::now_utc())?;
64 task.insert("updated_at", db::now_utc())?;
65 task.insert("deleted_at", "")?;
66 task.insert_container("labels", loro::LoroMap::new())?;
67 task.insert_container("blockers", loro::LoroMap::new())?;
68 task.insert_container("logs", loro::LoroMap::new())?;
69 Ok(())
70 })
71 .unwrap();
72
73 // --- Set up peer B: clone from A's snapshot, then add its own task ---
74 //
75 // Copy A's project directory so B has the same project_id and
76 // initial state, then create a separate device_id for B.
77 let data_a = home_a.path().join(".local/share/td/projects/shared");
78 let data_b = home_b.path().join(".local/share/td/projects/shared");
79 fs::create_dir_all(data_b.join("changes")).unwrap();
80 // Copy only the base snapshot — A's change deltas stay with A.
81 fs::copy(data_a.join("base.loro"), data_b.join("base.loro")).unwrap();
82
83 // Write a binding so db::open from cwd_b resolves to "shared".
84 let binding_dir = home_b.path().join(".local/share/td");
85 fs::create_dir_all(&binding_dir).unwrap();
86 let canonical_b = fs::canonicalize(cwd_b.path()).unwrap();
87 let bindings = serde_json::json!({
88 "bindings": {
89 canonical_b.to_string_lossy().to_string(): "shared"
90 }
91 });
92 fs::write(
93 binding_dir.join("bindings.json"),
94 serde_json::to_string_pretty(&bindings).unwrap(),
95 )
96 .unwrap();
97
98 std::env::set_var("HOME", home_b.path());
99 let store_b = db::open(cwd_b.path()).unwrap();
100 let id_b = db::gen_id();
101 store_b
102 .apply_and_persist(|doc| {
103 let tasks = doc.get_map("tasks");
104 let task = db::insert_task_map(&tasks, &id_b)?;
105 task.insert("title", "task from B")?;
106 task.insert("description", "")?;
107 task.insert("type", "task")?;
108 task.insert("priority", "high")?;
109 task.insert("status", "open")?;
110 task.insert("effort", "low")?;
111 task.insert("parent", "")?;
112 task.insert("created_at", db::now_utc())?;
113 task.insert("updated_at", db::now_utc())?;
114 task.insert("deleted_at", "")?;
115 task.insert_container("labels", loro::LoroMap::new())?;
116 task.insert_container("blockers", loro::LoroMap::new())?;
117 task.insert_container("logs", loro::LoroMap::new())?;
118 Ok(())
119 })
120 .unwrap();
121
122 // Verify pre-sync: A has 1 task, B has 2 (init snapshot + its own).
123 // A's delta hasn't been applied to B's snapshot yet, so B only sees
124 // tasks from the base snapshot plus its own delta. Meanwhile A has
125 // the base snapshot plus its own delta.
126 let a_tasks_before = store_a.list_tasks().unwrap();
127 let b_tasks_before = store_b.list_tasks().unwrap();
128 assert_eq!(a_tasks_before.len(), 1, "A should have 1 task before sync");
129 assert_eq!(b_tasks_before.len(), 1, "B should have 1 task before sync");
130
131 // --- Sync via real wormhole ---
132 let rt = tokio::runtime::Runtime::new().unwrap();
133 rt.block_on(async {
134 use magic_wormhole::{MailboxConnection, Wormhole};
135 use yatd::cmd::sync::{exchange, wormhole_config};
136
137 // Peer A creates the mailbox.
138 let mailbox_a = MailboxConnection::create(wormhole_config(), 2)
139 .await
140 .unwrap();
141 let code = mailbox_a.code().clone();
142
143 // Peer B connects with the code.
144 let mailbox_b = MailboxConnection::connect(wormhole_config(), code, false)
145 .await
146 .unwrap();
147
148 // Both complete SPAKE2 key exchange concurrently.
149 let (wormhole_a, wormhole_b) =
150 tokio::try_join!(Wormhole::connect(mailbox_a), Wormhole::connect(mailbox_b),).unwrap();
151
152 // Run the sync protocol on both sides concurrently.
153 let (report_a, report_b) = tokio::try_join!(
154 exchange(&store_a, wormhole_a),
155 exchange(&store_b, wormhole_b),
156 )
157 .unwrap();
158
159 assert!(report_a.imported, "A should have imported B's changes");
160 assert!(report_b.imported, "B should have imported A's changes");
161 assert!(report_a.sent_bytes > 0);
162 assert!(report_b.sent_bytes > 0);
163 });
164
165 // --- Verify convergence ---
166 let a_tasks = store_a.list_tasks().unwrap();
167 let b_tasks = store_b.list_tasks().unwrap();
168
169 assert_eq!(a_tasks.len(), 2, "A should have 2 tasks after sync");
170 assert_eq!(b_tasks.len(), 2, "B should have 2 tasks after sync");
171
172 let a_titles: Vec<&str> = a_tasks.iter().map(|t| t.title.as_str()).collect();
173 let b_titles: Vec<&str> = b_tasks.iter().map(|t| t.title.as_str()).collect();
174 assert!(a_titles.contains(&"task from A"));
175 assert!(a_titles.contains(&"task from B"));
176 assert!(b_titles.contains(&"task from A"));
177 assert!(b_titles.contains(&"task from B"));
178}
179
180#[test]
181fn try_open_returns_none_without_binding() {
182 use yatd::db;
183
184 let home = tempfile::tempdir().unwrap();
185 let cwd = tempfile::tempdir().unwrap();
186
187 std::env::set_var("HOME", home.path());
188 assert!(
189 db::try_open(cwd.path()).unwrap().is_none(),
190 "expected no store when cwd is unbound and TD_PROJECT is unset"
191 );
192}
193
194#[test]
195fn bootstrap_from_peer_creates_openable_store() {
196 use yatd::db;
197
198 let home_a = tempfile::tempdir().unwrap();
199 let cwd_a = tempfile::tempdir().unwrap();
200 std::env::set_var("HOME", home_a.path());
201 let source = db::init(cwd_a.path(), "shared").unwrap();
202
203 let id = db::gen_id();
204 source
205 .apply_and_persist(|doc| {
206 let tasks = doc.get_map("tasks");
207 let task = db::insert_task_map(&tasks, &id)?;
208 task.insert("title", "bootstrapped task")?;
209 task.insert("description", "")?;
210 task.insert("type", "task")?;
211 task.insert("priority", "medium")?;
212 task.insert("status", "open")?;
213 task.insert("effort", "medium")?;
214 task.insert("parent", "")?;
215 task.insert("created_at", db::now_utc())?;
216 task.insert("updated_at", db::now_utc())?;
217 task.insert("deleted_at", "")?;
218 task.insert_container("labels", loro::LoroMap::new())?;
219 task.insert_container("blockers", loro::LoroMap::new())?;
220 task.insert_container("logs", loro::LoroMap::new())?;
221 Ok(())
222 })
223 .unwrap();
224
225 let full_delta = source
226 .doc()
227 .export(ExportMode::updates(&VersionVector::default()))
228 .unwrap();
229
230 let home_b = tempfile::tempdir().unwrap();
231 let root_b = home_b.path().join(".local/share/td");
232 let store_b = db::Store::bootstrap_from_peer(&root_b, "shared", &full_delta).unwrap();
233
234 assert_eq!(store_b.project_name(), "shared");
235 assert!(
236 root_b.join("projects/shared/base.loro").exists(),
237 "bootstrap should persist a base snapshot"
238 );
239
240 let reopened = db::Store::open(&root_b, "shared").unwrap();
241 let tasks = reopened.list_tasks().unwrap();
242 assert_eq!(tasks.len(), 1);
243 assert_eq!(tasks[0].title, "bootstrapped task");
244}
245
246#[test]
247fn bootstrap_from_peer_rejects_missing_project_id() {
248 use yatd::db;
249
250 let doc = LoroDoc::new();
251 doc.get_map("tasks");
252 let meta = doc.get_map("meta");
253 meta.insert("schema_version", 1i64).unwrap();
254 doc.commit();
255
256 let delta = doc
257 .export(ExportMode::updates(&VersionVector::default()))
258 .unwrap();
259
260 let home = tempfile::tempdir().unwrap();
261 let root = home.path().join(".local/share/td");
262 let err = db::Store::bootstrap_from_peer(&root, "shared", &delta).unwrap_err();
263
264 assert!(
265 err.to_string()
266 .contains("missing required project identity"),
267 "unexpected error: {err:#}"
268 );
269 assert!(
270 !root.join("projects/shared/base.loro").exists(),
271 "bootstrap should not persist snapshot for invalid peer doc"
272 );
273}