cli_sync.rs

  1use assert_cmd::Command;
  2use predicates::prelude::*;
  3
  4#[test]
  5fn sync_help_shows_usage() {
  6    let mut cmd = Command::cargo_bin("td").unwrap();
  7    cmd.args(["sync", "--help"]);
  8    cmd.assert()
  9        .success()
 10        .stdout(predicate::str::contains("Wormhole code"));
 11}
 12
 13#[test]
 14fn sync_invalid_code_format_fails() {
 15    let home = tempfile::tempdir().unwrap();
 16    let cwd = tempfile::tempdir().unwrap();
 17
 18    Command::cargo_bin("td")
 19        .unwrap()
 20        .args(["init", "synctest"])
 21        .current_dir(cwd.path())
 22        .env("HOME", home.path())
 23        .assert()
 24        .success();
 25
 26    let mut cmd = Command::cargo_bin("td").unwrap();
 27    cmd.args(["sync", "not-a-valid-code"])
 28        .current_dir(cwd.path())
 29        .env("HOME", home.path());
 30    cmd.assert().failure();
 31}
 32
 33/// Two peers sync over a real wormhole connection.
 34///
 35/// Setup: both stores share the same project_id (simulating a project
 36/// that was cloned to a second machine).  Each side creates a task the
 37/// other doesn't have.  After sync, both should see both tasks.
 38#[test]
 39fn sync_exchanges_tasks_between_peers() {
 40    use std::fs;
 41    use yatd::db;
 42
 43    let home_a = tempfile::tempdir().unwrap();
 44    let cwd_a = tempfile::tempdir().unwrap();
 45    let home_b = tempfile::tempdir().unwrap();
 46    let cwd_b = tempfile::tempdir().unwrap();
 47
 48    // --- Set up peer A: init a project and create a task ---
 49    std::env::set_var("HOME", home_a.path());
 50    let store_a = db::init(cwd_a.path(), "shared").unwrap();
 51    let id_a = db::gen_id();
 52    store_a
 53        .apply_and_persist(|doc| {
 54            let tasks = doc.get_map("tasks");
 55            let task = db::insert_task_map(&tasks, &id_a)?;
 56            task.insert("title", "task from A")?;
 57            task.insert("description", "")?;
 58            task.insert("type", "task")?;
 59            task.insert("priority", "medium")?;
 60            task.insert("status", "open")?;
 61            task.insert("effort", "medium")?;
 62            task.insert("parent", "")?;
 63            task.insert("created_at", db::now_utc())?;
 64            task.insert("updated_at", db::now_utc())?;
 65            task.insert("deleted_at", "")?;
 66            task.insert_container("labels", loro::LoroMap::new())?;
 67            task.insert_container("blockers", loro::LoroMap::new())?;
 68            task.insert_container("logs", loro::LoroMap::new())?;
 69            Ok(())
 70        })
 71        .unwrap();
 72
 73    // --- Set up peer B: clone from A's snapshot, then add its own task ---
 74    //
 75    // Copy A's project directory so B has the same project_id and
 76    // initial state, then create a separate device_id for B.
 77    let data_a = home_a.path().join(".local/share/td/projects/shared");
 78    let data_b = home_b.path().join(".local/share/td/projects/shared");
 79    fs::create_dir_all(data_b.join("changes")).unwrap();
 80    // Copy only the base snapshot — A's change deltas stay with A.
 81    fs::copy(data_a.join("base.loro"), data_b.join("base.loro")).unwrap();
 82
 83    // Write a binding so db::open from cwd_b resolves to "shared".
 84    let binding_dir = home_b.path().join(".local/share/td");
 85    fs::create_dir_all(&binding_dir).unwrap();
 86    let canonical_b = fs::canonicalize(cwd_b.path()).unwrap();
 87    let bindings = serde_json::json!({
 88        "bindings": {
 89            canonical_b.to_string_lossy().to_string(): "shared"
 90        }
 91    });
 92    fs::write(
 93        binding_dir.join("bindings.json"),
 94        serde_json::to_string_pretty(&bindings).unwrap(),
 95    )
 96    .unwrap();
 97
 98    std::env::set_var("HOME", home_b.path());
 99    let store_b = db::open(cwd_b.path()).unwrap();
100    let id_b = db::gen_id();
101    store_b
102        .apply_and_persist(|doc| {
103            let tasks = doc.get_map("tasks");
104            let task = db::insert_task_map(&tasks, &id_b)?;
105            task.insert("title", "task from B")?;
106            task.insert("description", "")?;
107            task.insert("type", "task")?;
108            task.insert("priority", "high")?;
109            task.insert("status", "open")?;
110            task.insert("effort", "low")?;
111            task.insert("parent", "")?;
112            task.insert("created_at", db::now_utc())?;
113            task.insert("updated_at", db::now_utc())?;
114            task.insert("deleted_at", "")?;
115            task.insert_container("labels", loro::LoroMap::new())?;
116            task.insert_container("blockers", loro::LoroMap::new())?;
117            task.insert_container("logs", loro::LoroMap::new())?;
118            Ok(())
119        })
120        .unwrap();
121
122    // Verify pre-sync: A has 1 task, B has 2 (init snapshot + its own).
123    // A's delta hasn't been applied to B's snapshot yet, so B only sees
124    // tasks from the base snapshot plus its own delta.  Meanwhile A has
125    // the base snapshot plus its own delta.
126    let a_tasks_before = store_a.list_tasks().unwrap();
127    let b_tasks_before = store_b.list_tasks().unwrap();
128    assert_eq!(a_tasks_before.len(), 1, "A should have 1 task before sync");
129    assert_eq!(b_tasks_before.len(), 1, "B should have 1 task before sync");
130
131    // --- Sync via real wormhole ---
132    let rt = tokio::runtime::Runtime::new().unwrap();
133    rt.block_on(async {
134        use magic_wormhole::{MailboxConnection, Wormhole};
135        use yatd::cmd::sync::{exchange, wormhole_config};
136
137        // Peer A creates the mailbox.
138        let mailbox_a = MailboxConnection::create(wormhole_config(), 2)
139            .await
140            .unwrap();
141        let code = mailbox_a.code().clone();
142
143        // Peer B connects with the code.
144        let mailbox_b = MailboxConnection::connect(wormhole_config(), code, false)
145            .await
146            .unwrap();
147
148        // Both complete SPAKE2 key exchange concurrently.
149        let (wormhole_a, wormhole_b) =
150            tokio::try_join!(Wormhole::connect(mailbox_a), Wormhole::connect(mailbox_b),).unwrap();
151
152        // Run the sync protocol on both sides concurrently.
153        let (report_a, report_b) = tokio::try_join!(
154            exchange(&store_a, wormhole_a),
155            exchange(&store_b, wormhole_b),
156        )
157        .unwrap();
158
159        assert!(report_a.imported, "A should have imported B's changes");
160        assert!(report_b.imported, "B should have imported A's changes");
161        assert!(report_a.sent_bytes > 0);
162        assert!(report_b.sent_bytes > 0);
163    });
164
165    // --- Verify convergence ---
166    let a_tasks = store_a.list_tasks().unwrap();
167    let b_tasks = store_b.list_tasks().unwrap();
168
169    assert_eq!(a_tasks.len(), 2, "A should have 2 tasks after sync");
170    assert_eq!(b_tasks.len(), 2, "B should have 2 tasks after sync");
171
172    let a_titles: Vec<&str> = a_tasks.iter().map(|t| t.title.as_str()).collect();
173    let b_titles: Vec<&str> = b_tasks.iter().map(|t| t.title.as_str()).collect();
174    assert!(a_titles.contains(&"task from A"));
175    assert!(a_titles.contains(&"task from B"));
176    assert!(b_titles.contains(&"task from A"));
177    assert!(b_titles.contains(&"task from B"));
178}