1mod anthropic_client;
2mod distill;
3mod example;
4mod format_prompt;
5mod headless;
6mod load_project;
7mod metrics;
8mod paths;
9mod predict;
10mod retrieve_context;
11mod score;
12
13use clap::{Args, CommandFactory, Parser, Subcommand, ValueEnum};
14use edit_prediction::EditPredictionStore;
15use gpui::Application;
16use reqwest_client::ReqwestClient;
17use serde::{Deserialize, Serialize};
18use std::sync::atomic::AtomicUsize;
19use std::sync::atomic::Ordering::SeqCst;
20use std::{path::PathBuf, sync::Arc};
21
22use crate::distill::run_distill;
23use crate::example::{group_examples_by_repo, read_examples, write_examples};
24use crate::format_prompt::run_format_prompt;
25use crate::load_project::run_load_project;
26use crate::predict::run_prediction;
27use crate::retrieve_context::run_context_retrieval;
28use crate::score::run_scoring;
29
30#[derive(Parser, Debug)]
31#[command(name = "ep")]
32struct EpArgs {
33 #[arg(long, default_value_t = false)]
34 printenv: bool,
35 #[clap(long, default_value_t = 10)]
36 max_parallelism: usize,
37 #[command(subcommand)]
38 command: Option<Command>,
39 #[clap(global = true)]
40 inputs: Vec<PathBuf>,
41 #[arg(long, short, global = true)]
42 output: Option<PathBuf>,
43 #[arg(long, short, global = true)]
44 in_place: bool,
45}
46
47#[derive(Subcommand, Debug)]
48enum Command {
49 /// Parse markdown examples and output a combined .jsonl file
50 ParseExample,
51 /// Create git worktrees for each example and load file contents
52 LoadProject,
53 /// Retrieve context for input examples.
54 Context,
55 /// Generate a prompt string for a specific model
56 FormatPrompt(FormatPromptArgs),
57 /// Runs edit prediction
58 Predict(PredictArgs),
59 /// Computes a score based on actual and expected patches
60 Score(PredictArgs),
61 /// Prepares a distillation dataset by copying expected outputs to
62 /// predicted outputs and removing actual outputs and prompts.
63 Distill,
64 /// Print aggregated scores
65 Eval(PredictArgs),
66 /// Remove git repositories and worktrees
67 Clean,
68}
69
70#[derive(Debug, Args)]
71struct FormatPromptArgs {
72 #[clap(long)]
73 prompt_format: PromptFormat,
74}
75
76#[derive(Clone, Copy, Debug, ValueEnum, Serialize, Deserialize)]
77enum PromptFormat {
78 Teacher,
79 Zeta2,
80}
81
82#[derive(Debug, Args)]
83struct PredictArgs {
84 #[clap(long)]
85 provider: PredictionProvider,
86 #[clap(long, default_value_t = 1)]
87 repetitions: usize,
88}
89
90#[derive(Clone, Copy, Debug, ValueEnum, Serialize, Deserialize)]
91enum PredictionProvider {
92 Sweep,
93 Mercury,
94 Zeta1,
95 Zeta2,
96 Teacher,
97 TeacherNonBatching,
98}
99
100impl EpArgs {
101 fn output_path(&self) -> Option<PathBuf> {
102 if self.in_place {
103 if self.inputs.len() == 1 {
104 self.inputs.first().cloned()
105 } else {
106 panic!("--in-place requires exactly one input file")
107 }
108 } else {
109 self.output.clone()
110 }
111 }
112}
113
114fn main() {
115 zlog::init();
116 zlog::init_output_stderr();
117 let args = EpArgs::parse();
118
119 if args.printenv {
120 ::util::shell_env::print_env();
121 return;
122 }
123
124 let output = args.output_path();
125 let command = match args.command {
126 Some(cmd) => cmd,
127 None => {
128 EpArgs::command().print_help().unwrap();
129 return;
130 }
131 };
132
133 match &command {
134 Command::Clean => {
135 std::fs::remove_dir_all(&*paths::DATA_DIR).unwrap();
136 return;
137 }
138 _ => {}
139 }
140
141 let mut examples = read_examples(&args.inputs);
142 let http_client = Arc::new(ReqwestClient::new());
143 let app = Application::headless().with_http_client(http_client);
144
145 app.run(move |cx| {
146 let app_state = Arc::new(headless::init(cx));
147 EditPredictionStore::global(&app_state.client, &app_state.user_store, cx);
148
149 cx.spawn(async move |cx| {
150 if let Command::Predict(args) = &command {
151 predict::sync_batches(&args.provider).await
152 };
153
154 let example_count = examples.len();
155 let example_ix = AtomicUsize::new(0);
156 let mut grouped_examples = group_examples_by_repo(&mut examples);
157
158 let example_batches = grouped_examples.chunks_mut(args.max_parallelism);
159 for example_batch in example_batches {
160 let futures = example_batch.into_iter().map(|repo_examples| async {
161 for example in repo_examples.iter_mut() {
162 eprintln!(
163 "Processing example: {}/{}",
164 example_ix.load(SeqCst) + 1,
165 example_count
166 );
167 example_ix.fetch_add(1, SeqCst);
168 match &command {
169 Command::ParseExample => {}
170 Command::LoadProject => {
171 run_load_project(example, app_state.clone(), cx.clone()).await;
172 }
173 Command::Context => {
174 run_context_retrieval(example, app_state.clone(), cx.clone()).await;
175 }
176 Command::FormatPrompt(args) => {
177 run_format_prompt(
178 example,
179 args.prompt_format,
180 app_state.clone(),
181 cx.clone(),
182 )
183 .await;
184 }
185 Command::Predict(args) => {
186 run_prediction(
187 example,
188 Some(args.provider),
189 args.repetitions,
190 app_state.clone(),
191 cx.clone(),
192 )
193 .await;
194 }
195 Command::Distill => {
196 run_distill(example).await;
197 }
198 Command::Score(args) | Command::Eval(args) => {
199 run_scoring(example, &args, app_state.clone(), cx.clone()).await;
200 }
201 Command::Clean => {
202 unreachable!()
203 }
204 }
205 }
206 });
207 futures::future::join_all(futures).await;
208 }
209
210 if args.output.is_some() || !matches!(command, Command::Eval(_)) {
211 write_examples(&examples, output.as_ref());
212 }
213
214 match &command {
215 Command::Predict(args) => predict::sync_batches(&args.provider).await,
216 Command::Eval(_) => score::print_report(&examples),
217 _ => (),
218 };
219
220 let _ = cx.update(|cx| cx.quit());
221 })
222 .detach();
223 });
224}