diff --git a/SKILL.md b/SKILL.md index 00ffe5336d446a7c62885ecce3a41b5ba3674b0d..0e2ad1f508ddd5c64f866e6e83ea8bb48ae02743 100644 --- a/SKILL.md +++ b/SKILL.md @@ -60,6 +60,18 @@ td list -l frontend # by label # Full context on a task td show td-a1b2c3 +# Append notes as you go, not just once at the end. +# Focus on why: what the user asked for, what decision was made, and why. +# Keep file paths as context, not the headline. +td log td-a1b2c3 "User asked for logs in show/export/import but not in list --json. Kept list untouched to avoid breaking existing scripts that parse its output." +td log td-a1b2c3 "User wanted task validation before insert. Added it so 'task not found' is explicit and immediate instead of a less obvious DB failure." +td log td-a1b2c3 "Kept ON DELETE CASCADE only on task_logs because the user scoped labels/blockers cascade changes to a separate task." +td log td-a1b2c3 "Stayed with positional message input (no stdin) because that was explicitly requested and is easier to replay from shell history." +td log td-a1b2c3 "Used timestamp+id ordering so handoff readers get a stable timeline even when two entries land in the same second." +td log td-a1b2c3 "Import replaces logs for the task to keep repeated imports deterministic and consistent with label/blocker import behavior." +td log td-a1b2c3 "CI failed only on migration version assertions after adding 0004; updated expected version and re-ran full suite to confirm no behavioral regressions." +td log td-a1b2c3 "Ran manual round-trip (init -> create -> log -> show -> export/import -> show) to prove logs survive transfer and stay in chronological order." + # Task status or details changed td update td-a1b2c3 -s in_progress td update td-a1b2c3 -p high -e low -t "Revised title" -d "Added context" diff --git a/src/cli.rs b/src/cli.rs index a032c27d49f88b4c66466577324f489669ad33aa..ca6a491193d17c853f6fe5e9898289dde822669e 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -77,6 +77,14 @@ pub enum Command { id: String, }, + /// Append a work log entry to a task + Log { + /// Task ID + id: String, + /// Log entry body + message: String, + }, + /// Update a task Update { /// Task ID diff --git a/src/cmd/export.rs b/src/cmd/export.rs index f212cc70f3087999b930fd5d2cc88a04a6810aaf..1390e934ee5fd56691dab868d0f087ff68be9366 100644 --- a/src/cmd/export.rs +++ b/src/cmd/export.rs @@ -1,8 +1,18 @@ use anyhow::Result; +use serde::Serialize; use std::path::Path; use crate::db; +#[derive(Serialize)] +struct ExportTask { + #[serde(flatten)] + task: db::Task, + labels: Vec, + blockers: Vec, + logs: Vec, +} + pub fn run(root: &Path) -> Result<()> { let conn = db::open(root)?; @@ -18,7 +28,8 @@ pub fn run(root: &Path) -> Result<()> { for t in &tasks { let labels = db::load_labels(&conn, &t.id)?; let blockers = db::load_blockers(&conn, &t.id)?; - let detail = db::TaskDetail { + let logs = db::load_logs(&conn, &t.id)?; + let detail = ExportTask { task: db::Task { id: t.id.clone(), title: t.title.clone(), @@ -33,6 +44,7 @@ pub fn run(root: &Path) -> Result<()> { }, labels, blockers, + logs, }; println!("{}", serde_json::to_string(&detail)?); } diff --git a/src/cmd/import.rs b/src/cmd/import.rs index ad0fe3f2fd2e2f71164958ea7b26327e992e1943..7dc4dd74f58fbb9099a39efa91267100a0d3ea46 100644 --- a/src/cmd/import.rs +++ b/src/cmd/import.rs @@ -27,6 +27,14 @@ struct ImportTask { labels: Vec, #[serde(default)] blockers: Vec, + #[serde(default)] + logs: Vec, +} + +#[derive(Deserialize)] +struct ImportLogEntry { + timestamp: String, + body: String, } fn default_type() -> String { @@ -96,6 +104,15 @@ pub fn run(root: &Path, file: &str) -> Result<()> { [&t.id, blk], )?; } + + // Replace logs. + conn.execute("DELETE FROM task_logs WHERE task_id = ?1", [&t.id])?; + for log in &t.logs { + conn.execute( + "INSERT INTO task_logs (task_id, timestamp, body) VALUES (?1, ?2, ?3)", + rusqlite::params![&t.id, &log.timestamp, &log.body], + )?; + } } eprintln!("info: import complete"); diff --git a/src/cmd/log.rs b/src/cmd/log.rs new file mode 100644 index 0000000000000000000000000000000000000000..e978475cdd31477cabe9dc7c0055eac92439718e --- /dev/null +++ b/src/cmd/log.rs @@ -0,0 +1,39 @@ +use anyhow::{bail, Result}; +use std::path::Path; + +use crate::db; + +pub fn run(root: &Path, id: &str, message: &str, json: bool) -> Result<()> { + let conn = db::open(root)?; + + if !db::task_exists(&conn, id)? { + bail!("task {id} not found"); + } + + let timestamp = db::now_utc(); + conn.execute( + "INSERT INTO task_logs (task_id, timestamp, body) + VALUES (?1, ?2, ?3)", + rusqlite::params![id, timestamp, message], + )?; + let log_id = conn.last_insert_rowid(); + conn.execute( + "UPDATE tasks SET updated = ?1 WHERE id = ?2", + rusqlite::params![db::now_utc(), id], + )?; + + let entry = db::LogEntry { + id: log_id, + task_id: id.to_string(), + timestamp, + body: message.to_string(), + }; + + if json { + println!("{}", serde_json::to_string(&entry)?); + } else { + println!("logged to {id}"); + } + + Ok(()) +} diff --git a/src/cmd/mod.rs b/src/cmd/mod.rs index 9d5cf06da5c7d0e38e898e4c8feec8895b8ca5a8..bfe45d07b66f621d59a83415e2c56d3b882ca392 100644 --- a/src/cmd/mod.rs +++ b/src/cmd/mod.rs @@ -7,6 +7,7 @@ mod import; mod init; mod label; mod list; +mod log; mod next; mod ready; mod reopen; @@ -76,6 +77,10 @@ pub fn dispatch(cli: &Cli) -> Result<()> { let root = require_root()?; show::run(&root, id, cli.json) } + Command::Log { id, message } => { + let root = require_root()?; + log::run(&root, id, message, cli.json) + } Command::Update { id, status, diff --git a/src/cmd/show.rs b/src/cmd/show.rs index f5443a6e05d6d13f88f3f6f2b80486fbeb74b952..4d55e117341af4c1c7c38244284983de5500ace2 100644 --- a/src/cmd/show.rs +++ b/src/cmd/show.rs @@ -1,8 +1,18 @@ use anyhow::{bail, Result}; +use serde::Serialize; use std::path::Path; use crate::db; +#[derive(Serialize)] +struct ShowDetail { + #[serde(flatten)] + task: db::Task, + labels: Vec, + blockers: Vec, + logs: Vec, +} + pub fn run(root: &Path, id: &str, json: bool) -> Result<()> { let conn = db::open(root)?; @@ -15,9 +25,16 @@ pub fn run(root: &Path, id: &str, json: bool) -> Result<()> { } let detail = db::load_task_detail(&conn, id)?; + let logs = db::load_logs(&conn, id)?; if json { - println!("{}", serde_json::to_string(&detail)?); + let out = ShowDetail { + task: detail.task, + labels: detail.labels, + blockers: detail.blockers, + logs, + }; + println!("{}", serde_json::to_string(&out)?); return Ok(()); } @@ -82,5 +99,13 @@ pub fn run(root: &Path, id: &str, json: bool) -> Result<()> { // Timestamps at the bottom println!("created {} ยท updated {}", t.created, t.updated); + if !logs.is_empty() { + println!(); + println!("--- log ---"); + for log in &logs { + println!("[{}] {}", log.timestamp, log.body); + } + } + Ok(()) } diff --git a/src/db.rs b/src/db.rs index e0608a9682312357489db98ed850be561ef44642..34b87032512cc91f8ec99db14e1f842cb88928e9 100644 --- a/src/db.rs +++ b/src/db.rs @@ -49,6 +49,15 @@ pub struct TaskDetail { pub blockers: Vec, } +/// A work log entry attached to a task. +#[derive(Debug, Serialize)] +pub struct LogEntry { + pub id: i64, + pub task_id: String, + pub timestamp: String, + pub body: String, +} + /// Parse a priority label to its integer value. /// /// Accepts "low" (3), "medium" (2), or "high" (1). @@ -132,6 +141,27 @@ pub fn load_blockers(conn: &Connection, task_id: &str) -> Result> { Ok(blockers) } +/// Load log entries for a task in chronological order. +pub fn load_logs(conn: &Connection, task_id: &str) -> Result> { + let mut stmt = conn.prepare( + "SELECT id, task_id, timestamp, body + FROM task_logs + WHERE task_id = ?1 + ORDER BY timestamp ASC, id ASC", + )?; + let logs = stmt + .query_map([task_id], |r| { + Ok(LogEntry { + id: r.get("id")?, + task_id: r.get("task_id")?, + timestamp: r.get("timestamp")?, + body: r.get("body")?, + }) + })? + .collect::>>()?; + Ok(logs) +} + /// Load blockers for a task, partitioned by whether they are resolved. /// /// Returns `(open, resolved)` where open blockers have a non-closed status diff --git a/src/migrate.rs b/src/migrate.rs index 2113c8509716376349aa78aadc2bf1ad650dffda..dc1361690593c4fb939db9f789cf09e57cdfedf6 100644 --- a/src/migrate.rs +++ b/src/migrate.rs @@ -39,6 +39,12 @@ static MIGRATIONS: &[Migration] = &[ post_hook_up: None, post_hook_down: None, }, + Migration { + up_sql: include_str!("migrations/0004_task_logs.up.sql"), + down_sql: include_str!("migrations/0004_task_logs.down.sql"), + post_hook_up: None, + post_hook_down: None, + }, ]; /// Read the current schema version from the database. @@ -156,6 +162,8 @@ mod tests { .unwrap(); conn.execute_batch("SELECT task_id FROM blockers LIMIT 0") .unwrap(); + conn.execute_batch("SELECT task_id FROM task_logs LIMIT 0") + .unwrap(); } #[test] diff --git a/src/migrations/0004_task_logs.down.sql b/src/migrations/0004_task_logs.down.sql new file mode 100644 index 0000000000000000000000000000000000000000..34b17054be3ea4617d09be5458db855b1932c6a1 --- /dev/null +++ b/src/migrations/0004_task_logs.down.sql @@ -0,0 +1 @@ +DROP TABLE task_logs; diff --git a/src/migrations/0004_task_logs.up.sql b/src/migrations/0004_task_logs.up.sql new file mode 100644 index 0000000000000000000000000000000000000000..0d0c6908d8d868d86296ec94170f3eb814acb90e --- /dev/null +++ b/src/migrations/0004_task_logs.up.sql @@ -0,0 +1,7 @@ +CREATE TABLE task_logs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id TEXT NOT NULL, + timestamp TEXT NOT NULL, + body TEXT NOT NULL, + FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE CASCADE +); diff --git a/tests/cli_log.rs b/tests/cli_log.rs new file mode 100644 index 0000000000000000000000000000000000000000..32b51b9dabdc6d0a6a7b185ef9f0218637d5497e --- /dev/null +++ b/tests/cli_log.rs @@ -0,0 +1,192 @@ +use assert_cmd::Command; +use predicates::prelude::*; +use tempfile::TempDir; + +fn td() -> Command { + Command::cargo_bin("td").unwrap() +} + +fn init_tmp() -> TempDir { + let tmp = TempDir::new().unwrap(); + td().arg("init").current_dir(&tmp).assert().success(); + tmp +} + +fn create_task(dir: &TempDir, title: &str) -> String { + let out = td() + .args(["--json", "create", title]) + .current_dir(dir) + .output() + .unwrap(); + let v: serde_json::Value = serde_json::from_slice(&out.stdout).unwrap(); + v["id"].as_str().unwrap().to_string() +} + +#[test] +fn log_human_reports_task_id() { + let tmp = init_tmp(); + let id = create_task(&tmp, "Write docs"); + + td().args(["log", &id, "Drafted command docs"]) + .current_dir(&tmp) + .assert() + .success() + .stdout(predicate::str::contains(format!("logged to {id}"))); +} + +#[test] +fn log_json_emits_created_log_entry() { + let tmp = init_tmp(); + let id = create_task(&tmp, "Investigate timeout"); + + let out = td() + .args(["--json", "log", &id, "Collected stack traces"]) + .current_dir(&tmp) + .output() + .unwrap(); + let v: serde_json::Value = serde_json::from_slice(&out.stdout).unwrap(); + + assert!(v["id"].is_i64()); + assert_eq!(v["task_id"].as_str().unwrap(), id); + assert_eq!(v["body"].as_str().unwrap(), "Collected stack traces"); + assert!(v["timestamp"].as_str().unwrap().ends_with('Z')); +} + +#[test] +fn log_nonexistent_task_fails() { + let tmp = init_tmp(); + + td().args(["log", "td-nope", "No task"]) + .current_dir(&tmp) + .assert() + .failure() + .stderr(predicate::str::contains("task td-nope not found")); +} + +#[test] +fn show_human_displays_logs_chronologically() { + let tmp = init_tmp(); + let id = create_task(&tmp, "Investigate auth issue"); + + td().args(["log", &id, "First note"]) + .current_dir(&tmp) + .assert() + .success(); + td().args(["log", &id, "Second note"]) + .current_dir(&tmp) + .assert() + .success(); + + let out = td().args(["show", &id]).current_dir(&tmp).output().unwrap(); + let stdout = String::from_utf8(out.stdout).unwrap(); + let first = stdout.find("First note").unwrap(); + let second = stdout.find("Second note").unwrap(); + + assert!(stdout.contains("--- log ---")); + assert!(first < second, "expected logs in insertion order: {stdout}"); +} + +#[test] +fn show_json_includes_logs_array() { + let tmp = init_tmp(); + let id = create_task(&tmp, "Implement parser"); + + td().args(["log", &id, "Mapped grammar rules"]) + .current_dir(&tmp) + .assert() + .success(); + + let out = td() + .args(["--json", "show", &id]) + .current_dir(&tmp) + .output() + .unwrap(); + let v: serde_json::Value = serde_json::from_slice(&out.stdout).unwrap(); + + let logs = v["logs"].as_array().unwrap(); + assert_eq!(logs.len(), 1); + assert_eq!(logs[0]["body"].as_str().unwrap(), "Mapped grammar rules"); +} + +#[test] +fn multiple_log_entries_are_ordered() { + let tmp = init_tmp(); + let id = create_task(&tmp, "Refactor planner"); + + for msg in ["step one", "step two", "step three"] { + td().args(["log", &id, msg]) + .current_dir(&tmp) + .assert() + .success(); + } + + let out = td() + .args(["--json", "show", &id]) + .current_dir(&tmp) + .output() + .unwrap(); + let v: serde_json::Value = serde_json::from_slice(&out.stdout).unwrap(); + let logs = v["logs"].as_array().unwrap(); + + assert_eq!(logs.len(), 3); + assert_eq!(logs[0]["body"].as_str().unwrap(), "step one"); + assert_eq!(logs[1]["body"].as_str().unwrap(), "step two"); + assert_eq!(logs[2]["body"].as_str().unwrap(), "step three"); +} + +#[test] +fn export_import_round_trips_logs() { + let tmp = init_tmp(); + let id = create_task(&tmp, "Port backend"); + td().args(["log", &id, "Measured baseline"]) + .current_dir(&tmp) + .assert() + .success(); + td().args(["log", &id, "Applied optimization"]) + .current_dir(&tmp) + .assert() + .success(); + + let export_out = td().arg("export").current_dir(&tmp).output().unwrap(); + let exported = String::from_utf8(export_out.stdout).unwrap(); + let export_file = tmp.path().join("logs.jsonl"); + std::fs::write(&export_file, &exported).unwrap(); + + let tmp2 = TempDir::new().unwrap(); + td().arg("init").current_dir(&tmp2).assert().success(); + td().args(["import", export_file.to_str().unwrap()]) + .current_dir(&tmp2) + .assert() + .success(); + + let out = td() + .args(["--json", "show", &id]) + .current_dir(&tmp2) + .output() + .unwrap(); + let v: serde_json::Value = serde_json::from_slice(&out.stdout).unwrap(); + let logs = v["logs"].as_array().unwrap(); + + assert_eq!(logs.len(), 2); + assert_eq!(logs[0]["body"].as_str().unwrap(), "Measured baseline"); + assert_eq!(logs[1]["body"].as_str().unwrap(), "Applied optimization"); +} + +#[test] +fn list_json_does_not_include_logs() { + let tmp = init_tmp(); + let id = create_task(&tmp, "Keep list lean"); + td().args(["log", &id, "This should not surface in list"]) + .current_dir(&tmp) + .assert() + .success(); + + let out = td() + .args(["--json", "list"]) + .current_dir(&tmp) + .output() + .unwrap(); + let v: serde_json::Value = serde_json::from_slice(&out.stdout).unwrap(); + + assert!(v[0].get("logs").is_none()); +} diff --git a/tests/cli_migrate.rs b/tests/cli_migrate.rs index 993b760decfd8bbd9d335f4c7d740e89a2fe98df..900cff2019ed932f28c92b005cd6c12db47c7a0b 100644 --- a/tests/cli_migrate.rs +++ b/tests/cli_migrate.rs @@ -20,8 +20,8 @@ fn fresh_init_sets_latest_version() { let version: u32 = conn .pragma_query_value(None, "user_version", |row| row.get(0)) .unwrap(); - // Version should be 3 (migration 0001 + 0002 + 0003). - assert_eq!(version, 3); + // Version should be 4 (migration 0001 + 0002 + 0003 + 0004). + assert_eq!(version, 4); } #[test] @@ -81,7 +81,7 @@ fn legacy_db_is_migrated_on_open() { let version: u32 = conn .pragma_query_value(None, "user_version", |row| row.get(0)) .unwrap(); - assert_eq!(version, 3); + assert_eq!(version, 4); } #[test]