1mod anthropic_client;
2mod distill;
3mod example;
4mod format_prompt;
5mod headless;
6mod load_project;
7mod metrics;
8mod paths;
9mod predict;
10mod progress;
11mod retrieve_context;
12mod score;
13
14use clap::{Args, CommandFactory, Parser, Subcommand, ValueEnum};
15use edit_prediction::EditPredictionStore;
16use gpui::Application;
17use reqwest_client::ReqwestClient;
18use serde::{Deserialize, Serialize};
19use std::{path::PathBuf, sync::Arc};
20
21use crate::distill::run_distill;
22use crate::example::{group_examples_by_repo, read_examples, write_examples};
23use crate::format_prompt::run_format_prompt;
24use crate::load_project::run_load_project;
25use crate::predict::run_prediction;
26use crate::progress::Progress;
27use crate::retrieve_context::run_context_retrieval;
28use crate::score::run_scoring;
29
30#[derive(Parser, Debug)]
31#[command(name = "ep")]
32struct EpArgs {
33 #[arg(long, default_value_t = false)]
34 printenv: bool,
35 #[clap(long, default_value_t = 10, global = true)]
36 max_parallelism: usize,
37 #[command(subcommand)]
38 command: Option<Command>,
39 #[clap(global = true)]
40 inputs: Vec<PathBuf>,
41 #[arg(long, short, global = true)]
42 output: Option<PathBuf>,
43 #[arg(long, short, global = true)]
44 in_place: bool,
45}
46
47#[derive(Subcommand, Debug)]
48enum Command {
49 /// Parse markdown examples and output a combined .jsonl file
50 ParseExample,
51 /// Create git worktrees for each example and load file contents
52 LoadProject,
53 /// Retrieve context for input examples.
54 Context,
55 /// Generate a prompt string for a specific model
56 FormatPrompt(FormatPromptArgs),
57 /// Runs edit prediction
58 Predict(PredictArgs),
59 /// Computes a score based on actual and expected patches
60 Score(PredictArgs),
61 /// Prepares a distillation dataset by copying expected outputs to
62 /// predicted outputs and removing actual outputs and prompts.
63 Distill,
64 /// Print aggregated scores
65 Eval(PredictArgs),
66 /// Remove git repositories and worktrees
67 Clean,
68}
69
70#[derive(Debug, Args)]
71struct FormatPromptArgs {
72 #[clap(long)]
73 prompt_format: PromptFormat,
74}
75
76#[derive(Clone, Copy, Debug, ValueEnum, Serialize, Deserialize)]
77enum PromptFormat {
78 Teacher,
79 Zeta2,
80}
81
82#[derive(Debug, Args)]
83struct PredictArgs {
84 #[clap(long)]
85 provider: PredictionProvider,
86 #[clap(long, default_value_t = 1)]
87 repetitions: usize,
88}
89
90#[derive(Clone, Copy, Debug, ValueEnum, Serialize, Deserialize)]
91enum PredictionProvider {
92 Sweep,
93 Mercury,
94 Zeta1,
95 Zeta2,
96 Teacher,
97 TeacherNonBatching,
98}
99
100impl EpArgs {
101 fn output_path(&self) -> Option<PathBuf> {
102 if self.in_place {
103 if self.inputs.len() == 1 {
104 self.inputs.first().cloned()
105 } else {
106 panic!("--in-place requires exactly one input file")
107 }
108 } else {
109 self.output.clone()
110 }
111 }
112}
113
114fn main() {
115 let args = EpArgs::parse();
116
117 if args.printenv {
118 ::util::shell_env::print_env();
119 return;
120 }
121
122 let output = args.output_path();
123 let command = match args.command {
124 Some(cmd) => cmd,
125 None => {
126 EpArgs::command().print_help().unwrap();
127 return;
128 }
129 };
130
131 match &command {
132 Command::Clean => {
133 std::fs::remove_dir_all(&*paths::DATA_DIR).unwrap();
134 return;
135 }
136 _ => {}
137 }
138
139 let mut examples = read_examples(&args.inputs);
140 let http_client = Arc::new(ReqwestClient::new());
141 let app = Application::headless().with_http_client(http_client);
142
143 app.run(move |cx| {
144 let app_state = Arc::new(headless::init(cx));
145 EditPredictionStore::global(&app_state.client, &app_state.user_store, cx);
146
147 cx.spawn(async move |cx| {
148 if let Command::Predict(args) = &command {
149 predict::sync_batches(&args.provider).await
150 };
151
152 let total_examples = examples.len();
153 Progress::global().set_total_examples(total_examples);
154
155 let mut grouped_examples = group_examples_by_repo(&mut examples);
156 let example_batches = grouped_examples.chunks_mut(args.max_parallelism);
157
158 for example_batch in example_batches {
159 let futures = example_batch.into_iter().map(|repo_examples| async {
160 for example in repo_examples.iter_mut() {
161 match &command {
162 Command::ParseExample => {}
163 Command::LoadProject => {
164 run_load_project(example, app_state.clone(), cx.clone()).await;
165 }
166 Command::Context => {
167 run_context_retrieval(example, app_state.clone(), cx.clone()).await;
168 }
169 Command::FormatPrompt(args) => {
170 run_format_prompt(
171 example,
172 args.prompt_format,
173 app_state.clone(),
174 cx.clone(),
175 )
176 .await;
177 }
178 Command::Predict(args) => {
179 run_prediction(
180 example,
181 Some(args.provider),
182 args.repetitions,
183 app_state.clone(),
184 cx.clone(),
185 )
186 .await;
187 }
188 Command::Distill => {
189 run_distill(example).await;
190 }
191 Command::Score(args) | Command::Eval(args) => {
192 run_scoring(example, &args, app_state.clone(), cx.clone()).await;
193 }
194 Command::Clean => {
195 unreachable!()
196 }
197 }
198 }
199 });
200 futures::future::join_all(futures).await;
201 }
202 Progress::global().clear();
203
204 if args.output.is_some() || !matches!(command, Command::Eval(_)) {
205 write_examples(&examples, output.as_ref());
206 }
207
208 match &command {
209 Command::Predict(args) => predict::sync_batches(&args.provider).await,
210 Command::Eval(_) => score::print_report(&examples),
211 _ => (),
212 };
213
214 let _ = cx.update(|cx| cx.quit());
215 })
216 .detach();
217 });
218}