Detailed changes
@@ -60,6 +60,18 @@ td list -l frontend # by label
# Full context on a task
td show td-a1b2c3
+# Append notes as you go, not just once at the end.
+# Focus on why: what the user asked for, what decision was made, and why.
+# Keep file paths as context, not the headline.
+td log td-a1b2c3 "User asked for logs in show/export/import but not in list --json. Kept list untouched to avoid breaking existing scripts that parse its output."
+td log td-a1b2c3 "User wanted task validation before insert. Added it so 'task not found' is explicit and immediate instead of a less obvious DB failure."
+td log td-a1b2c3 "Kept ON DELETE CASCADE only on task_logs because the user scoped labels/blockers cascade changes to a separate task."
+td log td-a1b2c3 "Stayed with positional message input (no stdin) because that was explicitly requested and is easier to replay from shell history."
+td log td-a1b2c3 "Used timestamp+id ordering so handoff readers get a stable timeline even when two entries land in the same second."
+td log td-a1b2c3 "Import replaces logs for the task to keep repeated imports deterministic and consistent with label/blocker import behavior."
+td log td-a1b2c3 "CI failed only on migration version assertions after adding 0004; updated expected version and re-ran full suite to confirm no behavioral regressions."
+td log td-a1b2c3 "Ran manual round-trip (init -> create -> log -> show -> export/import -> show) to prove logs survive transfer and stay in chronological order."
+
# Task status or details changed
td update td-a1b2c3 -s in_progress
td update td-a1b2c3 -p high -e low -t "Revised title" -d "Added context"
@@ -77,6 +77,14 @@ pub enum Command {
id: String,
},
+ /// Append a work log entry to a task
+ Log {
+ /// Task ID
+ id: String,
+ /// Log entry body
+ message: String,
+ },
+
/// Update a task
Update {
/// Task ID
@@ -1,8 +1,18 @@
use anyhow::Result;
+use serde::Serialize;
use std::path::Path;
use crate::db;
+#[derive(Serialize)]
+struct ExportTask {
+ #[serde(flatten)]
+ task: db::Task,
+ labels: Vec<String>,
+ blockers: Vec<String>,
+ logs: Vec<db::LogEntry>,
+}
+
pub fn run(root: &Path) -> Result<()> {
let conn = db::open(root)?;
@@ -18,7 +28,8 @@ pub fn run(root: &Path) -> Result<()> {
for t in &tasks {
let labels = db::load_labels(&conn, &t.id)?;
let blockers = db::load_blockers(&conn, &t.id)?;
- let detail = db::TaskDetail {
+ let logs = db::load_logs(&conn, &t.id)?;
+ let detail = ExportTask {
task: db::Task {
id: t.id.clone(),
title: t.title.clone(),
@@ -33,6 +44,7 @@ pub fn run(root: &Path) -> Result<()> {
},
labels,
blockers,
+ logs,
};
println!("{}", serde_json::to_string(&detail)?);
}
@@ -27,6 +27,14 @@ struct ImportTask {
labels: Vec<String>,
#[serde(default)]
blockers: Vec<String>,
+ #[serde(default)]
+ logs: Vec<ImportLogEntry>,
+}
+
+#[derive(Deserialize)]
+struct ImportLogEntry {
+ timestamp: String,
+ body: String,
}
fn default_type() -> String {
@@ -96,6 +104,15 @@ pub fn run(root: &Path, file: &str) -> Result<()> {
[&t.id, blk],
)?;
}
+
+ // Replace logs.
+ conn.execute("DELETE FROM task_logs WHERE task_id = ?1", [&t.id])?;
+ for log in &t.logs {
+ conn.execute(
+ "INSERT INTO task_logs (task_id, timestamp, body) VALUES (?1, ?2, ?3)",
+ rusqlite::params![&t.id, &log.timestamp, &log.body],
+ )?;
+ }
}
eprintln!("info: import complete");
@@ -0,0 +1,39 @@
+use anyhow::{bail, Result};
+use std::path::Path;
+
+use crate::db;
+
+pub fn run(root: &Path, id: &str, message: &str, json: bool) -> Result<()> {
+ let conn = db::open(root)?;
+
+ if !db::task_exists(&conn, id)? {
+ bail!("task {id} not found");
+ }
+
+ let timestamp = db::now_utc();
+ conn.execute(
+ "INSERT INTO task_logs (task_id, timestamp, body)
+ VALUES (?1, ?2, ?3)",
+ rusqlite::params![id, timestamp, message],
+ )?;
+ let log_id = conn.last_insert_rowid();
+ conn.execute(
+ "UPDATE tasks SET updated = ?1 WHERE id = ?2",
+ rusqlite::params![db::now_utc(), id],
+ )?;
+
+ let entry = db::LogEntry {
+ id: log_id,
+ task_id: id.to_string(),
+ timestamp,
+ body: message.to_string(),
+ };
+
+ if json {
+ println!("{}", serde_json::to_string(&entry)?);
+ } else {
+ println!("logged to {id}");
+ }
+
+ Ok(())
+}
@@ -7,6 +7,7 @@ mod import;
mod init;
mod label;
mod list;
+mod log;
mod next;
mod ready;
mod reopen;
@@ -76,6 +77,10 @@ pub fn dispatch(cli: &Cli) -> Result<()> {
let root = require_root()?;
show::run(&root, id, cli.json)
}
+ Command::Log { id, message } => {
+ let root = require_root()?;
+ log::run(&root, id, message, cli.json)
+ }
Command::Update {
id,
status,
@@ -1,8 +1,18 @@
use anyhow::{bail, Result};
+use serde::Serialize;
use std::path::Path;
use crate::db;
+#[derive(Serialize)]
+struct ShowDetail {
+ #[serde(flatten)]
+ task: db::Task,
+ labels: Vec<String>,
+ blockers: Vec<String>,
+ logs: Vec<db::LogEntry>,
+}
+
pub fn run(root: &Path, id: &str, json: bool) -> Result<()> {
let conn = db::open(root)?;
@@ -15,9 +25,16 @@ pub fn run(root: &Path, id: &str, json: bool) -> Result<()> {
}
let detail = db::load_task_detail(&conn, id)?;
+ let logs = db::load_logs(&conn, id)?;
if json {
- println!("{}", serde_json::to_string(&detail)?);
+ let out = ShowDetail {
+ task: detail.task,
+ labels: detail.labels,
+ blockers: detail.blockers,
+ logs,
+ };
+ println!("{}", serde_json::to_string(&out)?);
return Ok(());
}
@@ -82,5 +99,13 @@ pub fn run(root: &Path, id: &str, json: bool) -> Result<()> {
// Timestamps at the bottom
println!("created {} ยท updated {}", t.created, t.updated);
+ if !logs.is_empty() {
+ println!();
+ println!("--- log ---");
+ for log in &logs {
+ println!("[{}] {}", log.timestamp, log.body);
+ }
+ }
+
Ok(())
}
@@ -49,6 +49,15 @@ pub struct TaskDetail {
pub blockers: Vec<String>,
}
+/// A work log entry attached to a task.
+#[derive(Debug, Serialize)]
+pub struct LogEntry {
+ pub id: i64,
+ pub task_id: String,
+ pub timestamp: String,
+ pub body: String,
+}
+
/// Parse a priority label to its integer value.
///
/// Accepts "low" (3), "medium" (2), or "high" (1).
@@ -132,6 +141,27 @@ pub fn load_blockers(conn: &Connection, task_id: &str) -> Result<Vec<String>> {
Ok(blockers)
}
+/// Load log entries for a task in chronological order.
+pub fn load_logs(conn: &Connection, task_id: &str) -> Result<Vec<LogEntry>> {
+ let mut stmt = conn.prepare(
+ "SELECT id, task_id, timestamp, body
+ FROM task_logs
+ WHERE task_id = ?1
+ ORDER BY timestamp ASC, id ASC",
+ )?;
+ let logs = stmt
+ .query_map([task_id], |r| {
+ Ok(LogEntry {
+ id: r.get("id")?,
+ task_id: r.get("task_id")?,
+ timestamp: r.get("timestamp")?,
+ body: r.get("body")?,
+ })
+ })?
+ .collect::<rusqlite::Result<Vec<LogEntry>>>()?;
+ Ok(logs)
+}
+
/// Load blockers for a task, partitioned by whether they are resolved.
///
/// Returns `(open, resolved)` where open blockers have a non-closed status
@@ -39,6 +39,12 @@ static MIGRATIONS: &[Migration] = &[
post_hook_up: None,
post_hook_down: None,
},
+ Migration {
+ up_sql: include_str!("migrations/0004_task_logs.up.sql"),
+ down_sql: include_str!("migrations/0004_task_logs.down.sql"),
+ post_hook_up: None,
+ post_hook_down: None,
+ },
];
/// Read the current schema version from the database.
@@ -156,6 +162,8 @@ mod tests {
.unwrap();
conn.execute_batch("SELECT task_id FROM blockers LIMIT 0")
.unwrap();
+ conn.execute_batch("SELECT task_id FROM task_logs LIMIT 0")
+ .unwrap();
}
#[test]
@@ -0,0 +1 @@
+DROP TABLE task_logs;
@@ -0,0 +1,7 @@
+CREATE TABLE task_logs (
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
+ task_id TEXT NOT NULL,
+ timestamp TEXT NOT NULL,
+ body TEXT NOT NULL,
+ FOREIGN KEY (task_id) REFERENCES tasks(id) ON DELETE CASCADE
+);
@@ -0,0 +1,192 @@
+use assert_cmd::Command;
+use predicates::prelude::*;
+use tempfile::TempDir;
+
+fn td() -> Command {
+ Command::cargo_bin("td").unwrap()
+}
+
+fn init_tmp() -> TempDir {
+ let tmp = TempDir::new().unwrap();
+ td().arg("init").current_dir(&tmp).assert().success();
+ tmp
+}
+
+fn create_task(dir: &TempDir, title: &str) -> String {
+ let out = td()
+ .args(["--json", "create", title])
+ .current_dir(dir)
+ .output()
+ .unwrap();
+ let v: serde_json::Value = serde_json::from_slice(&out.stdout).unwrap();
+ v["id"].as_str().unwrap().to_string()
+}
+
+#[test]
+fn log_human_reports_task_id() {
+ let tmp = init_tmp();
+ let id = create_task(&tmp, "Write docs");
+
+ td().args(["log", &id, "Drafted command docs"])
+ .current_dir(&tmp)
+ .assert()
+ .success()
+ .stdout(predicate::str::contains(format!("logged to {id}")));
+}
+
+#[test]
+fn log_json_emits_created_log_entry() {
+ let tmp = init_tmp();
+ let id = create_task(&tmp, "Investigate timeout");
+
+ let out = td()
+ .args(["--json", "log", &id, "Collected stack traces"])
+ .current_dir(&tmp)
+ .output()
+ .unwrap();
+ let v: serde_json::Value = serde_json::from_slice(&out.stdout).unwrap();
+
+ assert!(v["id"].is_i64());
+ assert_eq!(v["task_id"].as_str().unwrap(), id);
+ assert_eq!(v["body"].as_str().unwrap(), "Collected stack traces");
+ assert!(v["timestamp"].as_str().unwrap().ends_with('Z'));
+}
+
+#[test]
+fn log_nonexistent_task_fails() {
+ let tmp = init_tmp();
+
+ td().args(["log", "td-nope", "No task"])
+ .current_dir(&tmp)
+ .assert()
+ .failure()
+ .stderr(predicate::str::contains("task td-nope not found"));
+}
+
+#[test]
+fn show_human_displays_logs_chronologically() {
+ let tmp = init_tmp();
+ let id = create_task(&tmp, "Investigate auth issue");
+
+ td().args(["log", &id, "First note"])
+ .current_dir(&tmp)
+ .assert()
+ .success();
+ td().args(["log", &id, "Second note"])
+ .current_dir(&tmp)
+ .assert()
+ .success();
+
+ let out = td().args(["show", &id]).current_dir(&tmp).output().unwrap();
+ let stdout = String::from_utf8(out.stdout).unwrap();
+ let first = stdout.find("First note").unwrap();
+ let second = stdout.find("Second note").unwrap();
+
+ assert!(stdout.contains("--- log ---"));
+ assert!(first < second, "expected logs in insertion order: {stdout}");
+}
+
+#[test]
+fn show_json_includes_logs_array() {
+ let tmp = init_tmp();
+ let id = create_task(&tmp, "Implement parser");
+
+ td().args(["log", &id, "Mapped grammar rules"])
+ .current_dir(&tmp)
+ .assert()
+ .success();
+
+ let out = td()
+ .args(["--json", "show", &id])
+ .current_dir(&tmp)
+ .output()
+ .unwrap();
+ let v: serde_json::Value = serde_json::from_slice(&out.stdout).unwrap();
+
+ let logs = v["logs"].as_array().unwrap();
+ assert_eq!(logs.len(), 1);
+ assert_eq!(logs[0]["body"].as_str().unwrap(), "Mapped grammar rules");
+}
+
+#[test]
+fn multiple_log_entries_are_ordered() {
+ let tmp = init_tmp();
+ let id = create_task(&tmp, "Refactor planner");
+
+ for msg in ["step one", "step two", "step three"] {
+ td().args(["log", &id, msg])
+ .current_dir(&tmp)
+ .assert()
+ .success();
+ }
+
+ let out = td()
+ .args(["--json", "show", &id])
+ .current_dir(&tmp)
+ .output()
+ .unwrap();
+ let v: serde_json::Value = serde_json::from_slice(&out.stdout).unwrap();
+ let logs = v["logs"].as_array().unwrap();
+
+ assert_eq!(logs.len(), 3);
+ assert_eq!(logs[0]["body"].as_str().unwrap(), "step one");
+ assert_eq!(logs[1]["body"].as_str().unwrap(), "step two");
+ assert_eq!(logs[2]["body"].as_str().unwrap(), "step three");
+}
+
+#[test]
+fn export_import_round_trips_logs() {
+ let tmp = init_tmp();
+ let id = create_task(&tmp, "Port backend");
+ td().args(["log", &id, "Measured baseline"])
+ .current_dir(&tmp)
+ .assert()
+ .success();
+ td().args(["log", &id, "Applied optimization"])
+ .current_dir(&tmp)
+ .assert()
+ .success();
+
+ let export_out = td().arg("export").current_dir(&tmp).output().unwrap();
+ let exported = String::from_utf8(export_out.stdout).unwrap();
+ let export_file = tmp.path().join("logs.jsonl");
+ std::fs::write(&export_file, &exported).unwrap();
+
+ let tmp2 = TempDir::new().unwrap();
+ td().arg("init").current_dir(&tmp2).assert().success();
+ td().args(["import", export_file.to_str().unwrap()])
+ .current_dir(&tmp2)
+ .assert()
+ .success();
+
+ let out = td()
+ .args(["--json", "show", &id])
+ .current_dir(&tmp2)
+ .output()
+ .unwrap();
+ let v: serde_json::Value = serde_json::from_slice(&out.stdout).unwrap();
+ let logs = v["logs"].as_array().unwrap();
+
+ assert_eq!(logs.len(), 2);
+ assert_eq!(logs[0]["body"].as_str().unwrap(), "Measured baseline");
+ assert_eq!(logs[1]["body"].as_str().unwrap(), "Applied optimization");
+}
+
+#[test]
+fn list_json_does_not_include_logs() {
+ let tmp = init_tmp();
+ let id = create_task(&tmp, "Keep list lean");
+ td().args(["log", &id, "This should not surface in list"])
+ .current_dir(&tmp)
+ .assert()
+ .success();
+
+ let out = td()
+ .args(["--json", "list"])
+ .current_dir(&tmp)
+ .output()
+ .unwrap();
+ let v: serde_json::Value = serde_json::from_slice(&out.stdout).unwrap();
+
+ assert!(v[0].get("logs").is_none());
+}
@@ -20,8 +20,8 @@ fn fresh_init_sets_latest_version() {
let version: u32 = conn
.pragma_query_value(None, "user_version", |row| row.get(0))
.unwrap();
- // Version should be 3 (migration 0001 + 0002 + 0003).
- assert_eq!(version, 3);
+ // Version should be 4 (migration 0001 + 0002 + 0003 + 0004).
+ assert_eq!(version, 4);
}
#[test]
@@ -81,7 +81,7 @@ fn legacy_db_is_migrated_on_open() {
let version: u32 = conn
.pragma_query_value(None, "user_version", |row| row.get(0))
.unwrap();
- assert_eq!(version, 3);
+ assert_eq!(version, 4);
}
#[test]