From 610cc1b1385e3ef4ca5e12f8783aeb6feda25b2f Mon Sep 17 00:00:00 2001 From: Agus Zubiaga Date: Fri, 12 Dec 2025 09:43:16 -0300 Subject: [PATCH] edit prediction cli: Cargo-style progress output (#44675) Release Notes: - N/A --- Cargo.lock | 1 + crates/edit_prediction_cli/Cargo.toml | 1 + .../edit_prediction_cli/src/format_prompt.rs | 8 +- .../edit_prediction_cli/src/load_project.rs | 49 ++- crates/edit_prediction_cli/src/main.rs | 47 ++- crates/edit_prediction_cli/src/predict.rs | 46 ++- crates/edit_prediction_cli/src/progress.rs | 372 ++++++++++++++++++ .../src/retrieve_context.rs | 37 +- crates/edit_prediction_cli/src/score.rs | 5 + 9 files changed, 509 insertions(+), 57 deletions(-) create mode 100644 crates/edit_prediction_cli/src/progress.rs diff --git a/Cargo.lock b/Cargo.lock index 7631b8f4c5d46437452ca1b42dfc1a2609cb0c54..2447303bacc666324a99c54247ab70f950d3bb0c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5179,6 +5179,7 @@ dependencies = [ "language_model", "language_models", "languages", + "libc", "log", "node_runtime", "paths", diff --git a/crates/edit_prediction_cli/Cargo.toml b/crates/edit_prediction_cli/Cargo.toml index 14f146a122b55b5a05529d4a32302a6dd65825d7..61e55e09a3b0b46a7d6ad0338be3ab76c1e08401 100644 --- a/crates/edit_prediction_cli/Cargo.toml +++ b/crates/edit_prediction_cli/Cargo.toml @@ -34,6 +34,7 @@ language_extension.workspace = true language_model.workspace = true language_models.workspace = true languages = { workspace = true, features = ["load-grammars"] } +libc.workspace = true log.workspace = true node_runtime.workspace = true paths.workspace = true diff --git a/crates/edit_prediction_cli/src/format_prompt.rs b/crates/edit_prediction_cli/src/format_prompt.rs index 598d98fdb7646585641dd9fc47668506935644f4..2225f1d294144753408968c6f464988378e2691d 100644 --- a/crates/edit_prediction_cli/src/format_prompt.rs +++ b/crates/edit_prediction_cli/src/format_prompt.rs @@ -3,6 +3,7 @@ use crate::{ example::{Example, ExamplePrompt}, headless::EpAppState, load_project::run_load_project, + progress::{Progress, Step}, retrieve_context::run_context_retrieval, }; use edit_prediction::{ @@ -17,9 +18,12 @@ pub async fn run_format_prompt( example: &mut Example, prompt_format: PromptFormat, app_state: Arc, + progress: Arc, mut cx: AsyncApp, ) { - run_context_retrieval(example, app_state.clone(), cx.clone()).await; + run_context_retrieval(example, app_state.clone(), progress.clone(), cx.clone()).await; + + let _step_progress = progress.start(Step::FormatPrompt, &example.name); match prompt_format { PromptFormat::Teacher => { @@ -31,7 +35,7 @@ pub async fn run_format_prompt( }); } PromptFormat::Zeta2 => { - run_load_project(example, app_state, cx.clone()).await; + run_load_project(example, app_state, progress.clone(), cx.clone()).await; let ep_store = cx .update(|cx| EditPredictionStore::try_global(cx).unwrap()) diff --git a/crates/edit_prediction_cli/src/load_project.rs b/crates/edit_prediction_cli/src/load_project.rs index 3e0b34241164801a30f959f759e1c0419ba324ff..895105966713f653a0ce8277387276a0ae40a4bc 100644 --- a/crates/edit_prediction_cli/src/load_project.rs +++ b/crates/edit_prediction_cli/src/load_project.rs @@ -2,6 +2,7 @@ use crate::{ example::{Example, ExampleBuffer, ExampleState}, headless::EpAppState, paths::{REPOS_DIR, WORKTREES_DIR}, + progress::{InfoStyle, Progress, Step, StepProgress}, }; use anyhow::{Result, anyhow}; use collections::HashMap; @@ -24,30 +25,47 @@ use std::{ use util::{paths::PathStyle, rel_path::RelPath}; use zeta_prompt::CURSOR_MARKER; -pub async fn run_load_project(example: &mut Example, app_state: Arc, mut cx: AsyncApp) { +pub async fn run_load_project( + example: &mut Example, + app_state: Arc, + progress: Arc, + mut cx: AsyncApp, +) { if example.state.is_some() { return; } - let project = setup_project(example, &app_state, &mut cx).await; + let progress = progress.start(Step::LoadProject, &example.name); + + let project = setup_project(example, &app_state, &progress, &mut cx).await; let _open_buffers = apply_edit_history(example, &project, &mut cx) .await .unwrap(); let (buffer, cursor_position) = cursor_position(example, &project, &mut cx).await; - example.buffer = buffer + let (example_buffer, language_name) = buffer .read_with(&cx, |buffer, _cx| { let cursor_point = cursor_position.to_point(&buffer); - Some(ExampleBuffer { - content: buffer.text(), - cursor_row: cursor_point.row, - cursor_column: cursor_point.column, - cursor_offset: cursor_position.to_offset(&buffer), - }) + let language_name = buffer + .language() + .map(|l| l.name().to_string()) + .unwrap_or_else(|| "Unknown".to_string()); + ( + ExampleBuffer { + content: buffer.text(), + cursor_row: cursor_point.row, + cursor_column: cursor_point.column, + cursor_offset: cursor_position.to_offset(&buffer), + }, + language_name, + ) }) .unwrap(); + progress.set_info(language_name, InfoStyle::Normal); + + example.buffer = Some(example_buffer); example.state = Some(ExampleState { buffer, project, @@ -131,13 +149,14 @@ async fn cursor_position( async fn setup_project( example: &mut Example, app_state: &Arc, + step_progress: &Arc, cx: &mut AsyncApp, ) -> Entity { let ep_store = cx .update(|cx| EditPredictionStore::try_global(cx).unwrap()) .unwrap(); - let worktree_path = setup_worktree(example).await; + let worktree_path = setup_worktree(example, step_progress).await; if let Some(project) = app_state.project_cache.get(&example.repository_url) { ep_store @@ -158,7 +177,7 @@ async fn setup_project( .update(cx, |buffer, cx| buffer.reload(cx)) .unwrap() .await - .unwrap(); + .ok(); } return project; } @@ -208,7 +227,7 @@ async fn setup_project( project } -pub async fn setup_worktree(example: &Example) -> PathBuf { +async fn setup_worktree(example: &Example, step_progress: &Arc) -> PathBuf { let (repo_owner, repo_name) = example.repo_name().expect("failed to get repo name"); let repo_dir = REPOS_DIR.join(repo_owner.as_ref()).join(repo_name.as_ref()); let worktree_path = WORKTREES_DIR @@ -217,7 +236,7 @@ pub async fn setup_worktree(example: &Example) -> PathBuf { let repo_lock = lock_repo(&repo_dir).await; if !repo_dir.is_dir() { - eprintln!("Cloning repository {}", example.repository_url); + step_progress.set_substatus(format!("cloning {}", repo_name)); fs::create_dir_all(&repo_dir).unwrap(); run_git(&repo_dir, &["init"]).await.unwrap(); run_git( @@ -237,6 +256,7 @@ pub async fn setup_worktree(example: &Example) -> PathBuf { let revision = if let Ok(revision) = revision { revision } else { + step_progress.set_substatus("fetching"); if run_git( &repo_dir, &["fetch", "--depth", "1", "origin", &example.revision], @@ -253,6 +273,7 @@ pub async fn setup_worktree(example: &Example) -> PathBuf { }; // Create the worktree for this example if needed. + step_progress.set_substatus("preparing worktree"); if worktree_path.is_dir() { run_git(&worktree_path, &["clean", "--force", "-d"]) .await @@ -288,6 +309,7 @@ pub async fn setup_worktree(example: &Example) -> PathBuf { // Apply the uncommitted diff for this example. if !example.uncommitted_diff.is_empty() { + step_progress.set_substatus("applying diff"); let mut apply_process = smol::process::Command::new("git") .current_dir(&worktree_path) .args(&["apply", "-"]) @@ -314,6 +336,7 @@ pub async fn setup_worktree(example: &Example) -> PathBuf { } } + step_progress.clear_substatus(); worktree_path } diff --git a/crates/edit_prediction_cli/src/main.rs b/crates/edit_prediction_cli/src/main.rs index 1091f0acfa182b95ed18bc6d560aaf7bca6225c7..b053af128c82c1aeefb35756ec28bc22a3ff2387 100644 --- a/crates/edit_prediction_cli/src/main.rs +++ b/crates/edit_prediction_cli/src/main.rs @@ -7,6 +7,7 @@ mod load_project; mod metrics; mod paths; mod predict; +mod progress; mod retrieve_context; mod score; @@ -15,8 +16,6 @@ use edit_prediction::EditPredictionStore; use gpui::Application; use reqwest_client::ReqwestClient; use serde::{Deserialize, Serialize}; -use std::sync::atomic::AtomicUsize; -use std::sync::atomic::Ordering::SeqCst; use std::{path::PathBuf, sync::Arc}; use crate::distill::run_distill; @@ -24,6 +23,7 @@ use crate::example::{group_examples_by_repo, read_examples, write_examples}; use crate::format_prompt::run_format_prompt; use crate::load_project::run_load_project; use crate::predict::run_prediction; +use crate::progress::Progress; use crate::retrieve_context::run_context_retrieval; use crate::score::run_scoring; @@ -112,7 +112,7 @@ impl EpArgs { } fn main() { - zlog::init(); + let _ = zlog::try_init(Some("error".into())); zlog::init_output_stderr(); let args = EpArgs::parse(); @@ -151,33 +151,41 @@ fn main() { predict::sync_batches(&args.provider).await }; - let example_count = examples.len(); - let example_ix = AtomicUsize::new(0); - let mut grouped_examples = group_examples_by_repo(&mut examples); + let total_examples = examples.len(); + let progress = Progress::new(total_examples); + let mut grouped_examples = group_examples_by_repo(&mut examples); let example_batches = grouped_examples.chunks_mut(args.max_parallelism); + for example_batch in example_batches { let futures = example_batch.into_iter().map(|repo_examples| async { for example in repo_examples.iter_mut() { - eprintln!( - "Processing example: {}/{}", - example_ix.load(SeqCst) + 1, - example_count - ); - example_ix.fetch_add(1, SeqCst); match &command { Command::ParseExample => {} Command::LoadProject => { - run_load_project(example, app_state.clone(), cx.clone()).await; + run_load_project( + example, + app_state.clone(), + progress.clone(), + cx.clone(), + ) + .await; } Command::Context => { - run_context_retrieval(example, app_state.clone(), cx.clone()).await; + run_context_retrieval( + example, + app_state.clone(), + progress.clone(), + cx.clone(), + ) + .await; } Command::FormatPrompt(args) => { run_format_prompt( example, args.prompt_format, app_state.clone(), + progress.clone(), cx.clone(), ) .await; @@ -188,6 +196,7 @@ fn main() { Some(args.provider), args.repetitions, app_state.clone(), + progress.clone(), cx.clone(), ) .await; @@ -196,7 +205,14 @@ fn main() { run_distill(example).await; } Command::Score(args) | Command::Eval(args) => { - run_scoring(example, &args, app_state.clone(), cx.clone()).await; + run_scoring( + example, + &args, + app_state.clone(), + progress.clone(), + cx.clone(), + ) + .await; } Command::Clean => { unreachable!() @@ -206,6 +222,7 @@ fn main() { }); futures::future::join_all(futures).await; } + progress.clear(); if args.output.is_some() || !matches!(command, Command::Eval(_)) { write_examples(&examples, output.as_ref()); diff --git a/crates/edit_prediction_cli/src/predict.rs b/crates/edit_prediction_cli/src/predict.rs index 4ff3e1d947fd886633108cbba0d32909f72304e4..14628a896273f7ff11166a1daac248598e198847 100644 --- a/crates/edit_prediction_cli/src/predict.rs +++ b/crates/edit_prediction_cli/src/predict.rs @@ -6,6 +6,7 @@ use crate::{ headless::EpAppState, load_project::run_load_project, paths::{LATEST_EXAMPLE_RUN_DIR, RUN_DIR}, + progress::{InfoStyle, Progress, Step}, retrieve_context::run_context_retrieval, }; use edit_prediction::{DebugEvent, EditPredictionStore}; @@ -24,29 +25,41 @@ pub async fn run_prediction( provider: Option, repetition_count: usize, app_state: Arc, + progress: Arc, mut cx: AsyncApp, ) { if !example.predictions.is_empty() { return; } - run_context_retrieval(example, app_state.clone(), cx.clone()).await; - let provider = provider.unwrap(); + run_context_retrieval(example, app_state.clone(), progress.clone(), cx.clone()).await; + if matches!( provider, PredictionProvider::Teacher | PredictionProvider::TeacherNonBatching ) { + let _step_progress = progress.start(Step::Predict, &example.name); + if example.prompt.is_none() { - run_format_prompt(example, PromptFormat::Teacher, app_state.clone(), cx).await; + run_format_prompt( + example, + PromptFormat::Teacher, + app_state.clone(), + progress, + cx, + ) + .await; } let batched = matches!(provider, PredictionProvider::Teacher); return predict_anthropic(example, repetition_count, batched).await; } - run_load_project(example, app_state.clone(), cx.clone()).await; + run_load_project(example, app_state.clone(), progress.clone(), cx.clone()).await; + + let _step_progress = progress.start(Step::Predict, &example.name); if matches!( provider, @@ -181,18 +194,31 @@ pub async fn run_prediction( .await .unwrap(); + let actual_patch = prediction + .and_then(|prediction| { + let prediction = prediction.prediction.ok()?; + prediction.edit_preview.as_unified_diff(&prediction.edits) + }) + .unwrap_or_default(); + + let has_prediction = !actual_patch.is_empty(); + updated_example .lock() .unwrap() .predictions .last_mut() .unwrap() - .actual_patch = prediction - .and_then(|prediction| { - let prediction = prediction.prediction.ok()?; - prediction.edit_preview.as_unified_diff(&prediction.edits) - }) - .unwrap_or_default(); + .actual_patch = actual_patch; + + if ix == repetition_count - 1 { + let (info, style) = if has_prediction { + ("predicted", InfoStyle::Normal) + } else { + ("no prediction", InfoStyle::Warning) + }; + _step_progress.set_info(info, style); + } } ep_store diff --git a/crates/edit_prediction_cli/src/progress.rs b/crates/edit_prediction_cli/src/progress.rs new file mode 100644 index 0000000000000000000000000000000000000000..5cd906d89a20813676b09af0d2cbeca532c5ba12 --- /dev/null +++ b/crates/edit_prediction_cli/src/progress.rs @@ -0,0 +1,372 @@ +use std::{ + borrow::Cow, + collections::HashMap, + io::{IsTerminal, Write}, + sync::{Arc, Mutex}, + time::{Duration, Instant}, +}; + +pub struct Progress { + inner: Mutex, +} + +struct ProgressInner { + completed: Vec, + in_progress: HashMap, + is_tty: bool, + terminal_width: usize, + max_example_name_len: usize, + status_lines_displayed: usize, + total_examples: usize, +} + +#[derive(Clone)] +struct InProgressTask { + step: Step, + started_at: Instant, + substatus: Option, + info: Option<(String, InfoStyle)>, +} + +struct CompletedTask { + step: Step, + example_name: String, + duration: Duration, + info: Option<(String, InfoStyle)>, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] +pub enum Step { + LoadProject, + Context, + FormatPrompt, + Predict, + Score, +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum InfoStyle { + Normal, + Warning, +} + +impl Step { + pub fn label(&self) -> &'static str { + match self { + Step::LoadProject => "Load", + Step::Context => "Context", + Step::FormatPrompt => "Format", + Step::Predict => "Predict", + Step::Score => "Score", + } + } + + fn color_code(&self) -> &'static str { + match self { + Step::LoadProject => "\x1b[33m", + Step::Context => "\x1b[35m", + Step::FormatPrompt => "\x1b[34m", + Step::Predict => "\x1b[32m", + Step::Score => "\x1b[31m", + } + } +} + +const RIGHT_MARGIN: usize = 4; + +impl Progress { + pub fn new(total_examples: usize) -> Arc { + Arc::new(Self { + inner: Mutex::new(ProgressInner { + completed: Vec::new(), + in_progress: HashMap::new(), + is_tty: std::io::stderr().is_terminal(), + terminal_width: get_terminal_width(), + max_example_name_len: 0, + status_lines_displayed: 0, + total_examples, + }), + }) + } + + pub fn start(self: &Arc, step: Step, example_name: &str) -> Arc { + { + let mut inner = self.inner.lock().unwrap(); + + Self::clear_status_lines(&mut inner); + + inner.max_example_name_len = inner.max_example_name_len.max(example_name.len()); + + inner.in_progress.insert( + example_name.to_string(), + InProgressTask { + step, + started_at: Instant::now(), + substatus: None, + info: None, + }, + ); + + Self::print_status_lines(&mut inner); + } + + Arc::new(StepProgress { + progress: self.clone(), + step, + example_name: example_name.to_string(), + }) + } + + pub fn finish(&self, step: Step, example_name: &str) { + let mut inner = self.inner.lock().unwrap(); + + let task = inner.in_progress.remove(example_name); + if let Some(task) = task { + if task.step == step { + inner.completed.push(CompletedTask { + step: task.step, + example_name: example_name.to_string(), + duration: task.started_at.elapsed(), + info: task.info, + }); + + Self::clear_status_lines(&mut inner); + Self::print_completed(&inner, inner.completed.last().unwrap()); + Self::print_status_lines(&mut inner); + } else { + inner.in_progress.insert(example_name.to_string(), task); + } + } + } + + fn clear_status_lines(inner: &mut ProgressInner) { + if inner.is_tty && inner.status_lines_displayed > 0 { + // Move up and clear each line we previously displayed + for _ in 0..inner.status_lines_displayed { + eprint!("\x1b[A\x1b[K"); + } + let _ = std::io::stderr().flush(); + inner.status_lines_displayed = 0; + } + } + + fn print_completed(inner: &ProgressInner, task: &CompletedTask) { + let duration = format_duration(task.duration); + let name_width = inner.max_example_name_len; + + if inner.is_tty { + let reset = "\x1b[0m"; + let bold = "\x1b[1m"; + let dim = "\x1b[2m"; + + let yellow = "\x1b[33m"; + let info_part = task + .info + .as_ref() + .map(|(s, style)| { + if *style == InfoStyle::Warning { + format!("{yellow}{s}{reset}") + } else { + s.to_string() + } + }) + .unwrap_or_default(); + + let prefix = format!( + "{bold}{color}{label:>12}{reset} {name:12} {name: = inner.in_progress.iter().collect(); + tasks.sort_by_key(|(name, _)| *name); + + let mut lines_printed = 0; + + for (name, task) in tasks.iter() { + let elapsed = format_duration(task.started_at.elapsed()); + let substatus_part = task + .substatus + .as_ref() + .map(|s| truncate_with_ellipsis(s, 30)) + .unwrap_or_default(); + + let step_label = task.step.label(); + let step_color = task.step.color_code(); + let name_width = inner.max_example_name_len; + + let prefix = format!( + "{bold}{step_color}{step_label:>12}{reset} {name:, + step: Step, + example_name: String, +} + +impl StepProgress { + pub fn set_substatus(&self, substatus: impl Into>) { + let mut inner = self.progress.inner.lock().unwrap(); + if let Some(task) = inner.in_progress.get_mut(&self.example_name) { + task.substatus = Some(substatus.into().into_owned()); + Progress::clear_status_lines(&mut inner); + Progress::print_status_lines(&mut inner); + } + } + + pub fn clear_substatus(&self) { + let mut inner = self.progress.inner.lock().unwrap(); + if let Some(task) = inner.in_progress.get_mut(&self.example_name) { + task.substatus = None; + Progress::clear_status_lines(&mut inner); + Progress::print_status_lines(&mut inner); + } + } + + pub fn set_info(&self, info: impl Into, style: InfoStyle) { + let mut inner = self.progress.inner.lock().unwrap(); + if let Some(task) = inner.in_progress.get_mut(&self.example_name) { + task.info = Some((info.into(), style)); + } + } +} + +impl Drop for StepProgress { + fn drop(&mut self) { + self.progress.finish(self.step, &self.example_name); + } +} + +#[cfg(unix)] +fn get_terminal_width() -> usize { + unsafe { + let mut winsize: libc::winsize = std::mem::zeroed(); + if libc::ioctl(libc::STDERR_FILENO, libc::TIOCGWINSZ, &mut winsize) == 0 + && winsize.ws_col > 0 + { + winsize.ws_col as usize + } else { + 80 + } + } +} + +#[cfg(not(unix))] +fn get_terminal_width() -> usize { + 80 +} + +fn strip_ansi_len(s: &str) -> usize { + let mut len = 0; + let mut in_escape = false; + for c in s.chars() { + if c == '\x1b' { + in_escape = true; + } else if in_escape { + if c == 'm' { + in_escape = false; + } + } else { + len += 1; + } + } + len +} + +fn truncate_with_ellipsis(s: &str, max_len: usize) -> String { + if s.len() <= max_len { + s.to_string() + } else { + format!("{}…", &s[..max_len.saturating_sub(1)]) + } +} + +fn format_duration(duration: Duration) -> String { + const MINUTE_IN_MILLIS: f32 = 60. * 1000.; + + let millis = duration.as_millis() as f32; + if millis < 1000.0 { + format!("{}ms", millis) + } else if millis < MINUTE_IN_MILLIS { + format!("{:.1}s", millis / 1_000.0) + } else { + format!("{:.1}m", millis / MINUTE_IN_MILLIS) + } +} diff --git a/crates/edit_prediction_cli/src/retrieve_context.rs b/crates/edit_prediction_cli/src/retrieve_context.rs index 0ef7a4676e30189f1417c0a8c339e8ac7f76e0ef..83b5906e976ca3a1a6bdff6a96c36713eef08058 100644 --- a/crates/edit_prediction_cli/src/retrieve_context.rs +++ b/crates/edit_prediction_cli/src/retrieve_context.rs @@ -2,6 +2,7 @@ use crate::{ example::{Example, ExampleContext}, headless::EpAppState, load_project::run_load_project, + progress::{InfoStyle, Progress, Step, StepProgress}, }; use collections::HashSet; use edit_prediction::{DebugEvent, EditPredictionStore}; @@ -9,18 +10,22 @@ use futures::{FutureExt as _, StreamExt as _, channel::mpsc}; use gpui::{AsyncApp, Entity}; use language::Buffer; use project::Project; -use std::{sync::Arc, time::Duration}; +use std::sync::Arc; +use std::time::Duration; pub async fn run_context_retrieval( example: &mut Example, app_state: Arc, + progress: Arc, mut cx: AsyncApp, ) { if example.context.is_some() { return; } - run_load_project(example, app_state.clone(), cx.clone()).await; + run_load_project(example, app_state.clone(), progress.clone(), cx.clone()).await; + + let step_progress = progress.start(Step::Context, &example.name); let state = example.state.as_ref().unwrap(); let project = state.project.clone(); @@ -30,7 +35,7 @@ pub async fn run_context_retrieval( project.register_buffer_with_language_servers(&state.buffer, cx) }) .unwrap(); - wait_for_language_servers_to_start(example, &project, &state.buffer, &mut cx).await; + wait_for_language_servers_to_start(&project, &state.buffer, &step_progress, &mut cx).await; let ep_store = cx .update(|cx| EditPredictionStore::try_global(cx).unwrap()) @@ -58,19 +63,20 @@ pub async fn run_context_retrieval( .update(&mut cx, |store, cx| store.context_for_project(&project, cx)) .unwrap(); + let excerpt_count: usize = context_files.iter().map(|f| f.excerpts.len()).sum(); + step_progress.set_info(format!("{} excerpts", excerpt_count), InfoStyle::Normal); + example.context = Some(ExampleContext { files: context_files, }); } async fn wait_for_language_servers_to_start( - example: &Example, project: &Entity, buffer: &Entity, + step_progress: &Arc, cx: &mut AsyncApp, ) { - let log_prefix = format!("{} | ", example.name); - let lsp_store = project .read_with(cx, |project, _| project.lsp_store()) .unwrap(); @@ -89,11 +95,7 @@ async fn wait_for_language_servers_to_start( }) .unwrap_or_default(); - eprintln!( - "{}⏵ Waiting for {} language servers", - log_prefix, - language_server_ids.len() - ); + step_progress.set_substatus(format!("waiting for {} LSPs", language_server_ids.len())); let timeout = cx .background_executor() @@ -102,10 +104,10 @@ async fn wait_for_language_servers_to_start( let (mut tx, mut rx) = mpsc::channel(language_server_ids.len()); let added_subscription = cx.subscribe(project, { - let log_prefix = log_prefix.clone(); + let step_progress = step_progress.clone(); move |_, event, _| match event { project::Event::LanguageServerAdded(language_server_id, name, _) => { - eprintln!("{}+ Language server started: {}", log_prefix, name); + step_progress.set_substatus(format!("LSP started: {}", name)); tx.try_send(*language_server_id).ok(); } _ => {} @@ -137,7 +139,7 @@ async fn wait_for_language_servers_to_start( let (mut tx, mut rx) = mpsc::channel(language_server_ids.len()); let subscriptions = [ cx.subscribe(&lsp_store, { - let log_prefix = log_prefix.clone(); + let step_progress = step_progress.clone(); move |_, event, _| { if let project::LspStoreEvent::LanguageServerUpdate { message: @@ -150,12 +152,12 @@ async fn wait_for_language_servers_to_start( .. } = event { - eprintln!("{}⟲ {message}", log_prefix) + step_progress.set_substatus(message.clone()); } } }), cx.subscribe(project, { - let log_prefix = log_prefix.clone(); + let step_progress = step_progress.clone(); move |_, event, cx| match event { project::Event::DiskBasedDiagnosticsFinished { language_server_id } => { let lsp_store = lsp_store.read(cx); @@ -163,7 +165,7 @@ async fn wait_for_language_servers_to_start( .language_server_adapter_for_id(*language_server_id) .unwrap() .name(); - eprintln!("{}⚑ Language server idle: {}", log_prefix, name); + step_progress.set_substatus(format!("LSP idle: {}", name)); tx.try_send(*language_server_id).ok(); } _ => {} @@ -192,4 +194,5 @@ async fn wait_for_language_servers_to_start( } drop(subscriptions); + step_progress.clear_substatus(); } diff --git a/crates/edit_prediction_cli/src/score.rs b/crates/edit_prediction_cli/src/score.rs index 88ec5d5831c763b604c53d762a1ea9722e7279cb..23086dcc6e9279820216961ef0fe9fc65c3ea3eb 100644 --- a/crates/edit_prediction_cli/src/score.rs +++ b/crates/edit_prediction_cli/src/score.rs @@ -4,6 +4,7 @@ use crate::{ headless::EpAppState, metrics::{self, ClassificationMetrics}, predict::run_prediction, + progress::{Progress, Step}, }; use edit_prediction::udiff::DiffLine; use gpui::AsyncApp; @@ -13,6 +14,7 @@ pub async fn run_scoring( example: &mut Example, args: &PredictArgs, app_state: Arc, + progress: Arc, cx: AsyncApp, ) { run_prediction( @@ -20,10 +22,13 @@ pub async fn run_scoring( Some(args.provider), args.repetitions, app_state, + progress.clone(), cx, ) .await; + let _progress = progress.start(Step::Score, &example.name); + let expected_patch = parse_patch(&example.expected_patch); let mut scores = vec![];