1//! Quality assessment of predictions using LLM-as-a-judge.
2//!
3//! This module uses LLM Batch APIs to evaluate prediction quality.
4//! Caching is handled by the underlying client.
5
6use crate::BatchProvider;
7use crate::anthropic_client::AnthropicClient;
8use crate::example::Example;
9use crate::format_prompt::extract_cursor_excerpt_from_example;
10use crate::openai_client::OpenAiClient;
11use crate::paths::LLM_CACHE_DB;
12use crate::word_diff::unified_to_word_diff;
13use anyhow::Result;
14use serde::{Deserialize, Serialize};
15use std::io::{BufWriter, Write};
16use std::path::PathBuf;
17
18const PROMPT_TEMPLATE: &str = include_str!("prompts/qa.md");
19
20/// Arguments for the QA command.
21#[derive(Debug, Clone, clap::Args)]
22pub struct QaArgs {
23 /// Use synchronous API instead of batch
24 #[clap(long)]
25 pub no_batch: bool,
26
27 /// Wait for batch to complete (polls every 30s)
28 #[clap(long)]
29 pub wait: bool,
30
31 /// Which LLM provider to use (anthropic or openai)
32 #[clap(long, default_value = "anthropic")]
33 pub backend: BatchProvider,
34}
35
36fn model_for_backend(backend: BatchProvider) -> &'static str {
37 match backend {
38 BatchProvider::Anthropic => "claude-sonnet-4-5",
39 BatchProvider::Openai => "gpt-5.2",
40 }
41}
42
43/// Result of QA evaluation for a single prediction.
44#[derive(Debug, Clone, Serialize, Deserialize)]
45pub struct QaResult {
46 /// Free-form reasoning from the judge.
47 #[serde(default, skip_serializing_if = "Option::is_none")]
48 pub reasoning: Option<String>,
49
50 /// Does the prediction undo/revert changes the user intentionally made?
51 #[serde(default, skip_serializing_if = "Option::is_none")]
52 pub reverts_edits: Option<bool>,
53
54 /// Confidence score (1-5) for user acceptance likelihood.
55 #[serde(default, skip_serializing_if = "Option::is_none")]
56 pub confidence: Option<u8>,
57
58 /// The raw response from the model.
59 #[serde(default, skip_serializing_if = "Option::is_none")]
60 pub response: Option<String>,
61
62 /// Error message if parsing or request failed.
63 #[serde(default, skip_serializing_if = "Option::is_none")]
64 pub error: Option<String>,
65}
66
67/// Build the assessment prompt for an example.
68pub fn build_prompt(example: &Example) -> Option<String> {
69 let prediction = example.predictions.first()?;
70 let actual_patch = prediction.actual_patch.as_ref()?;
71 let prompt_inputs = example.prompt_inputs.as_ref()?;
72
73 let actual_patch_word_diff = unified_to_word_diff(actual_patch);
74
75 // Format cursor excerpt (reuse from format_prompt)
76 let cursor_excerpt = extract_cursor_excerpt_from_example(example)?;
77
78 let mut edit_history = String::new();
79 for event in &prompt_inputs.edit_history {
80 match event.as_ref() {
81 zeta_prompt::Event::BufferChange {
82 path,
83 old_path,
84 diff,
85 predicted: _,
86 in_open_source_repo: _,
87 } => {
88 edit_history.push_str(&format!("--- a{}\n", old_path.display()));
89 edit_history.push_str(&format!("+++ b{}\n", path.display()));
90 let diff_word_diff = unified_to_word_diff(diff);
91 edit_history.push_str(&diff_word_diff);
92 edit_history.push_str("\n\n");
93 }
94 }
95 }
96
97 Some(
98 PROMPT_TEMPLATE
99 .replace("{edit_history}", &edit_history)
100 .replace("{cursor_excerpt}", &cursor_excerpt)
101 .replace("{actual_patch_word_diff}", &actual_patch_word_diff),
102 )
103}
104
105/// Extract a code block from a response.
106fn extract_codeblock(response: &str) -> Option<String> {
107 let lines: Vec<&str> = response.lines().collect();
108 for (i, line) in lines.iter().enumerate() {
109 if line.starts_with("```") {
110 let start = i + 1;
111 for (j, end_line) in lines[start..].iter().enumerate() {
112 if end_line.starts_with("```") {
113 return Some(lines[start..start + j].join("\n"));
114 }
115 }
116 return Some(lines[start..].join("\n"));
117 }
118 }
119 None
120}
121
122/// Parse the LLM response into a QaResult.
123fn parse_response(response_text: &str) -> QaResult {
124 let codeblock = extract_codeblock(response_text);
125
126 // Try parsing codeblock first, then fall back to raw response
127 for text_to_parse in [codeblock.as_deref(), Some(response_text.trim())] {
128 let Some(text) = text_to_parse else {
129 continue;
130 };
131
132 if let Ok(parsed) = serde_json::from_str::<serde_json::Value>(text) {
133 return QaResult {
134 reasoning: parsed
135 .get("reasoning")
136 .and_then(|v| v.as_str())
137 .map(|s| s.to_string()),
138 reverts_edits: parsed.get("reverts_edits").and_then(|v| v.as_bool()),
139 confidence: parsed
140 .get("confidence")
141 .and_then(|v| v.as_u64())
142 .map(|v| v as u8),
143 response: Some(response_text.to_string()),
144 error: None,
145 };
146 }
147 }
148
149 // If all parsing attempts fail, return error
150 QaResult {
151 reasoning: Some(response_text.to_string()),
152 reverts_edits: None,
153 confidence: None,
154 response: Some(response_text.to_string()),
155 error: Some("Could not parse JSON from response".to_string()),
156 }
157}
158
159enum QaClient {
160 Anthropic(AnthropicClient),
161 OpenAi(OpenAiClient),
162}
163
164impl QaClient {
165 async fn generate(&self, model: &str, max_tokens: u64, prompt: &str) -> Result<Option<String>> {
166 match self {
167 QaClient::Anthropic(client) => {
168 let messages = vec![anthropic::Message {
169 role: anthropic::Role::User,
170 content: vec![anthropic::RequestContent::Text {
171 text: prompt.to_string(),
172 cache_control: None,
173 }],
174 }];
175 let response = client.generate(model, max_tokens, messages).await?;
176 Ok(response.map(|r| {
177 r.content
178 .iter()
179 .filter_map(|c| match c {
180 anthropic::ResponseContent::Text { text } => Some(text.as_str()),
181 _ => None,
182 })
183 .collect::<Vec<_>>()
184 .join("")
185 }))
186 }
187 QaClient::OpenAi(client) => {
188 let messages = vec![open_ai::RequestMessage::User {
189 content: open_ai::MessageContent::Plain(prompt.to_string()),
190 }];
191 let response = client.generate(model, max_tokens, messages).await?;
192 Ok(response.map(|r| {
193 r.choices
194 .into_iter()
195 .filter_map(|choice| match choice.message {
196 open_ai::RequestMessage::Assistant { content, .. } => {
197 content.map(|c| match c {
198 open_ai::MessageContent::Plain(text) => text,
199 open_ai::MessageContent::Multipart(parts) => parts
200 .into_iter()
201 .filter_map(|p| match p {
202 open_ai::MessagePart::Text { text } => Some(text),
203 _ => None,
204 })
205 .collect::<Vec<_>>()
206 .join(""),
207 })
208 }
209 _ => None,
210 })
211 .collect::<Vec<_>>()
212 .join("")
213 }))
214 }
215 }
216 }
217
218 async fn sync_batches(&self) -> Result<()> {
219 match self {
220 QaClient::Anthropic(client) => client.sync_batches().await,
221 QaClient::OpenAi(client) => client.sync_batches().await,
222 }
223 }
224}
225
226/// Run the QA evaluation on a set of examples.
227pub async fn run_qa(
228 examples: &mut [Example],
229 args: &QaArgs,
230 output_path: Option<&PathBuf>,
231) -> Result<()> {
232 let model = model_for_backend(args.backend);
233 let client = match args.backend {
234 BatchProvider::Anthropic => {
235 if args.no_batch {
236 QaClient::Anthropic(AnthropicClient::plain()?)
237 } else {
238 QaClient::Anthropic(AnthropicClient::batch(&LLM_CACHE_DB)?)
239 }
240 }
241 BatchProvider::Openai => {
242 if args.no_batch {
243 QaClient::OpenAi(OpenAiClient::plain()?)
244 } else {
245 QaClient::OpenAi(OpenAiClient::batch(&LLM_CACHE_DB)?)
246 }
247 }
248 };
249
250 eprintln!(
251 "Using model: {}, backend: {:?}, batching: {}",
252 model, args.backend, !args.no_batch
253 );
254
255 // First pass: send requests (client handles caching internally)
256 let mut prompts: Vec<(usize, String)> = Vec::new();
257 let mut skipped_count = 0;
258
259 for (idx, example) in examples.iter().enumerate() {
260 let Some(prompt) = build_prompt(example) else {
261 skipped_count += 1;
262 continue;
263 };
264 prompts.push((idx, prompt));
265 }
266
267 if skipped_count > 0 {
268 eprintln!("Skipping {} items with missing actual_patch", skipped_count);
269 }
270
271 eprintln!("{} items to process", prompts.len());
272
273 // Process all items
274 let mut results: Vec<(usize, Option<QaResult>)> = Vec::new();
275
276 if args.no_batch {
277 // Synchronous processing
278 for (i, (idx, prompt)) in prompts.iter().enumerate() {
279 eprint!("\rProcessing {}/{}", i + 1, prompts.len());
280
281 let response = client.generate(model, 1024, prompt).await?;
282 let result = response.map(|text| parse_response(&text));
283 results.push((*idx, result));
284 }
285 eprintln!();
286 } else {
287 // Queue all for batching
288 for (idx, prompt) in &prompts {
289 let response = client.generate(model, 1024, prompt).await?;
290 let result = response.map(|text| parse_response(&text));
291 results.push((*idx, result));
292 }
293
294 // Sync batches (upload pending, download finished)
295 client.sync_batches().await?;
296
297 if args.wait {
298 eprintln!("Waiting for batch to complete...");
299 loop {
300 std::thread::sleep(std::time::Duration::from_secs(30));
301 client.sync_batches().await?;
302
303 // Re-check all items that didn't have results
304 let mut all_done = true;
305 for (result_idx, (idx, prompt)) in prompts.iter().enumerate() {
306 if results[result_idx].1.is_none() {
307 let response = client.generate(model, 1024, prompt).await?;
308 if let Some(text) = response {
309 results[result_idx] = (*idx, Some(parse_response(&text)));
310 } else {
311 all_done = false;
312 }
313 }
314 }
315
316 let done_count = results.iter().filter(|(_, r)| r.is_some()).count();
317 if all_done {
318 break;
319 }
320 eprintln!("Still waiting... {}/{} results", done_count, prompts.len());
321 }
322 } else {
323 let pending_count = results.iter().filter(|(_, r)| r.is_none()).count();
324 if pending_count > 0 {
325 eprintln!(
326 "Batch submitted. {} pending. Run again later to retrieve results.",
327 pending_count
328 );
329 }
330 }
331 }
332
333 // Build results map by index
334 let mut results_by_idx: std::collections::HashMap<usize, QaResult> =
335 std::collections::HashMap::new();
336 for (idx, result) in results {
337 if let Some(r) = result {
338 results_by_idx.insert(idx, r);
339 }
340 }
341
342 // Output results
343 let mut writer: Box<dyn Write> = if let Some(path) = output_path {
344 Box::new(BufWriter::new(std::fs::File::create(path)?))
345 } else {
346 Box::new(std::io::stdout())
347 };
348
349 let mut num_total = 0;
350 let mut num_reverts_edits = 0;
351
352 for (idx, example) in examples.iter_mut().enumerate() {
353 // Skip examples that couldn't be processed
354 if build_prompt(example).is_none() {
355 continue;
356 }
357
358 let result = results_by_idx.get(&idx).cloned();
359
360 if result.as_ref().and_then(|r| r.reverts_edits) == Some(true) {
361 num_reverts_edits += 1;
362 }
363 num_total += 1;
364
365 // Populate QA results for each prediction (currently only first prediction is evaluated)
366 example.qa = example
367 .predictions
368 .iter()
369 .enumerate()
370 .map(|(i, _)| if i == 0 { result.clone() } else { None })
371 .collect();
372
373 writeln!(writer, "{}", serde_json::to_string(&example)?)?;
374 }
375
376 if let Some(path) = output_path {
377 eprintln!("Results written to {}", path.display());
378 }
379
380 eprintln!("Processed: {} items", num_total);
381 if num_total > 0 {
382 eprintln!(
383 "Reverts edits: {} ({:.2}%)",
384 num_reverts_edits,
385 num_reverts_edits as f64 / num_total as f64 * 100.0
386 );
387 }
388
389 Ok(())
390}