ep: Include diagnostics in `ZetaPromptInput` (#51141)

Ben Kunkle created

Closes #ISSUE

Before you mark this PR as ready for review, make sure that you have:
- [ ] Added a solid test coverage and/or screenshots from doing manual
testing
- [ ] Done a self-review taking into account security and performance
aspects
- [ ] Aligned any UI changes with the [UI
checklist](https://github.com/zed-industries/zed/blob/main/CONTRIBUTING.md#uiux-checklist)

Release Notes:

- N/A *or* Added/Fixed/Improved ...

Change summary

crates/edit_prediction/src/cursor_excerpt.rs        | 166 +++++++++
crates/edit_prediction/src/edit_prediction_tests.rs | 268 +++++++++-----
crates/edit_prediction/src/fim.rs                   |   1 
crates/edit_prediction/src/mercury.rs               |   1 
crates/edit_prediction/src/prediction.rs            |   1 
crates/edit_prediction/src/sweep_ai.rs              |   1 
crates/edit_prediction/src/zeta.rs                  |  53 ++
crates/edit_prediction_cli/src/load_project.rs      |   1 
crates/edit_prediction_cli/src/reversal_tracking.rs |   1 
crates/zeta_prompt/src/zeta_prompt.rs               |  16 
10 files changed, 413 insertions(+), 96 deletions(-)

Detailed changes

crates/edit_prediction/src/cursor_excerpt.rs 🔗

@@ -122,6 +122,172 @@ pub fn compute_syntax_ranges(
     ranges
 }
 
+/// Expands context by first trying to reach syntax boundaries,
+/// then expanding line-wise only if no syntax expansion occurred.
+pub fn expand_context_syntactically_then_linewise(
+    snapshot: &BufferSnapshot,
+    editable_range: Range<Point>,
+    context_token_limit: usize,
+) -> Range<Point> {
+    let mut start_row = editable_range.start.row;
+    let mut end_row = editable_range.end.row;
+    let mut remaining_tokens = context_token_limit;
+    let mut did_syntax_expand = false;
+
+    // Phase 1: Try to expand to containing syntax boundaries, picking the largest that fits.
+    for (boundary_start, boundary_end) in containing_syntax_boundaries(snapshot, start_row, end_row)
+    {
+        let tokens_for_start = if boundary_start < start_row {
+            estimate_tokens_for_rows(snapshot, boundary_start, start_row)
+        } else {
+            0
+        };
+        let tokens_for_end = if boundary_end > end_row {
+            estimate_tokens_for_rows(snapshot, end_row + 1, boundary_end + 1)
+        } else {
+            0
+        };
+
+        let total_needed = tokens_for_start + tokens_for_end;
+
+        if total_needed <= remaining_tokens {
+            if boundary_start < start_row {
+                start_row = boundary_start;
+            }
+            if boundary_end > end_row {
+                end_row = boundary_end;
+            }
+            remaining_tokens = remaining_tokens.saturating_sub(total_needed);
+            did_syntax_expand = true;
+        } else {
+            break;
+        }
+    }
+
+    // Phase 2: Only expand line-wise if no syntax expansion occurred.
+    if !did_syntax_expand {
+        (start_row, end_row, _) =
+            expand_linewise_biased(snapshot, start_row, end_row, remaining_tokens, true);
+    }
+
+    let start = Point::new(start_row, 0);
+    let end = Point::new(end_row, snapshot.line_len(end_row));
+    start..end
+}
+
+/// Returns an iterator of (start_row, end_row) for successively larger syntax nodes
+/// containing the given row range. Smallest containing node first.
+fn containing_syntax_boundaries(
+    snapshot: &BufferSnapshot,
+    start_row: u32,
+    end_row: u32,
+) -> impl Iterator<Item = (u32, u32)> {
+    let range = Point::new(start_row, 0)..Point::new(end_row, snapshot.line_len(end_row));
+    let mut current = snapshot.syntax_ancestor(range);
+    let mut last_rows: Option<(u32, u32)> = None;
+
+    std::iter::from_fn(move || {
+        while let Some(node) = current.take() {
+            let node_start_row = node.start_position().row as u32;
+            let node_end_row = node.end_position().row as u32;
+            let rows = (node_start_row, node_end_row);
+
+            current = node.parent();
+
+            // Skip nodes that don't extend beyond our range.
+            if node_start_row >= start_row && node_end_row <= end_row {
+                continue;
+            }
+
+            // Skip if same as last returned (some nodes have same span).
+            if last_rows == Some(rows) {
+                continue;
+            }
+
+            last_rows = Some(rows);
+            return Some(rows);
+        }
+        None
+    })
+}
+
+/// Expands line-wise with a bias toward one direction.
+/// Returns (start_row, end_row, remaining_tokens).
+fn expand_linewise_biased(
+    snapshot: &BufferSnapshot,
+    mut start_row: u32,
+    mut end_row: u32,
+    mut remaining_tokens: usize,
+    prefer_up: bool,
+) -> (u32, u32, usize) {
+    loop {
+        let can_expand_up = start_row > 0;
+        let can_expand_down = end_row < snapshot.max_point().row;
+
+        if remaining_tokens == 0 || (!can_expand_up && !can_expand_down) {
+            break;
+        }
+
+        let mut expanded = false;
+
+        // Try preferred direction first.
+        if prefer_up {
+            if can_expand_up {
+                let next_row = start_row - 1;
+                let line_tokens = line_token_count(snapshot, next_row);
+                if line_tokens <= remaining_tokens {
+                    start_row = next_row;
+                    remaining_tokens = remaining_tokens.saturating_sub(line_tokens);
+                    expanded = true;
+                }
+            }
+            if can_expand_down && remaining_tokens > 0 {
+                let next_row = end_row + 1;
+                let line_tokens = line_token_count(snapshot, next_row);
+                if line_tokens <= remaining_tokens {
+                    end_row = next_row;
+                    remaining_tokens = remaining_tokens.saturating_sub(line_tokens);
+                    expanded = true;
+                }
+            }
+        } else {
+            if can_expand_down {
+                let next_row = end_row + 1;
+                let line_tokens = line_token_count(snapshot, next_row);
+                if line_tokens <= remaining_tokens {
+                    end_row = next_row;
+                    remaining_tokens = remaining_tokens.saturating_sub(line_tokens);
+                    expanded = true;
+                }
+            }
+            if can_expand_up && remaining_tokens > 0 {
+                let next_row = start_row - 1;
+                let line_tokens = line_token_count(snapshot, next_row);
+                if line_tokens <= remaining_tokens {
+                    start_row = next_row;
+                    remaining_tokens = remaining_tokens.saturating_sub(line_tokens);
+                    expanded = true;
+                }
+            }
+        }
+
+        if !expanded {
+            break;
+        }
+    }
+
+    (start_row, end_row, remaining_tokens)
+}
+
+/// Estimates token count for rows in range [start_row, end_row).
+fn estimate_tokens_for_rows(snapshot: &BufferSnapshot, start_row: u32, end_row: u32) -> usize {
+    let mut tokens = 0;
+    for row in start_row..end_row {
+        tokens += line_token_count(snapshot, row);
+    }
+    tokens
+}
+
 #[cfg(test)]
 mod tests {
     use super::*;

crates/edit_prediction/src/edit_prediction_tests.rs 🔗

@@ -17,7 +17,10 @@ use gpui::{
     http_client::{FakeHttpClient, Response},
 };
 use indoc::indoc;
-use language::{Anchor, Buffer, CursorShape, Operation, Point, Selection, SelectionGoal};
+use language::{
+    Anchor, Buffer, CursorShape, Diagnostic, DiagnosticEntry, DiagnosticSet, DiagnosticSeverity,
+    Operation, Point, Selection, SelectionGoal,
+};
 use lsp::LanguageServerId;
 use parking_lot::Mutex;
 use pretty_assertions::{assert_eq, assert_matches};
@@ -25,7 +28,10 @@ use project::{FakeFs, Project};
 use serde_json::json;
 use settings::SettingsStore;
 use std::{path::Path, sync::Arc, time::Duration};
-use util::path;
+use util::{
+    path,
+    test::{TextRangeMarker, marked_text_ranges_by},
+};
 use uuid::Uuid;
 use zeta_prompt::ZetaPromptInput;
 
@@ -1656,97 +1662,172 @@ async fn test_rejections_flushing(cx: &mut TestAppContext) {
     assert_eq!(reject_request.rejections[1].request_id, "retry-2");
 }
 
-// Skipped until we start including diagnostics in prompt
-// #[gpui::test]
-// async fn test_request_diagnostics(cx: &mut TestAppContext) {
-//     let (ep_store, mut req_rx) = init_test_with_fake_client(cx);
-//     let fs = FakeFs::new(cx.executor());
-//     fs.insert_tree(
-//         "/root",
-//         json!({
-//             "foo.md": "Hello!\nBye"
-//         }),
-//     )
-//     .await;
-//     let project = Project::test(fs, vec![path!("/root").as_ref()], cx).await;
-
-//     let path_to_buffer_uri = lsp::Uri::from_file_path(path!("/root/foo.md")).unwrap();
-//     let diagnostic = lsp::Diagnostic {
-//         range: lsp::Range::new(lsp::Position::new(1, 1), lsp::Position::new(1, 5)),
-//         severity: Some(lsp::DiagnosticSeverity::ERROR),
-//         message: "\"Hello\" deprecated. Use \"Hi\" instead".to_string(),
-//         ..Default::default()
-//     };
-
-//     project.update(cx, |project, cx| {
-//         project.lsp_store().update(cx, |lsp_store, cx| {
-//             // Create some diagnostics
-//             lsp_store
-//                 .update_diagnostics(
-//                     LanguageServerId(0),
-//                     lsp::PublishDiagnosticsParams {
-//                         uri: path_to_buffer_uri.clone(),
-//                         diagnostics: vec![diagnostic],
-//                         version: None,
-//                     },
-//                     None,
-//                     language::DiagnosticSourceKind::Pushed,
-//                     &[],
-//                     cx,
-//                 )
-//                 .unwrap();
-//         });
-//     });
-
-//     let buffer = project
-//         .update(cx, |project, cx| {
-//             let path = project.find_project_path(path!("root/foo.md"), cx).unwrap();
-//             project.open_buffer(path, cx)
-//         })
-//         .await
-//         .unwrap();
-
-//     let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
-//     let position = snapshot.anchor_before(language::Point::new(0, 0));
-
-//     let _prediction_task = ep_store.update(cx, |ep_store, cx| {
-//         ep_store.request_prediction(&project, &buffer, position, cx)
-//     });
-
-//     let (request, _respond_tx) = req_rx.next().await.unwrap();
-
-//     assert_eq!(request.diagnostic_groups.len(), 1);
-//     let value = serde_json::from_str::<serde_json::Value>(request.diagnostic_groups[0].0.get())
-//         .unwrap();
-//     // We probably don't need all of this. TODO define a specific diagnostic type in predict_edits_v3
-//     assert_eq!(
-//         value,
-//         json!({
-//             "entries": [{
-//                 "range": {
-//                     "start": 8,
-//                     "end": 10
-//                 },
-//                 "diagnostic": {
-//                     "source": null,
-//                     "code": null,
-//                     "code_description": null,
-//                     "severity": 1,
-//                     "message": "\"Hello\" deprecated. Use \"Hi\" instead",
-//                     "markdown": null,
-//                     "group_id": 0,
-//                     "is_primary": true,
-//                     "is_disk_based": false,
-//                     "is_unnecessary": false,
-//                     "source_kind": "Pushed",
-//                     "data": null,
-//                     "underline": true
-//                 }
-//             }],
-//             "primary_ix": 0
-//         })
-//     );
-// }
+#[gpui::test]
+fn test_active_buffer_diagnostics_fetching(cx: &mut TestAppContext) {
+    let diagnostic_marker: TextRangeMarker = ('«', '»').into();
+    let search_range_marker: TextRangeMarker = ('[', ']').into();
+
+    let (text, mut ranges) = marked_text_ranges_by(
+        indoc! {r#"
+            fn alpha() {
+                let «first_value» = 1;
+            }
+
+            [fn beta() {
+                let «second_value» = 2;
+                let third_value = second_value + missing_symbol;
+            }ˇ]
+
+            fn gamma() {
+                let «fourth_value» = missing_other_symbol;
+            }
+        "#},
+        vec![diagnostic_marker.clone(), search_range_marker.clone()],
+    );
+
+    let diagnostic_ranges = ranges.remove(&diagnostic_marker).unwrap_or_default();
+    let search_ranges = ranges.remove(&search_range_marker).unwrap_or_default();
+
+    let buffer = cx.new(|cx| Buffer::local(&text, cx));
+
+    buffer.update(cx, |buffer, cx| {
+        let snapshot = buffer.snapshot();
+        let diagnostics = DiagnosticSet::new(
+            diagnostic_ranges
+                .iter()
+                .enumerate()
+                .map(|(index, range)| DiagnosticEntry {
+                    range: snapshot.offset_to_point_utf16(range.start)
+                        ..snapshot.offset_to_point_utf16(range.end),
+                    diagnostic: Diagnostic {
+                        severity: match index {
+                            0 => DiagnosticSeverity::WARNING,
+                            1 => DiagnosticSeverity::ERROR,
+                            _ => DiagnosticSeverity::HINT,
+                        },
+                        message: match index {
+                            0 => "first warning".to_string(),
+                            1 => "second error".to_string(),
+                            _ => "third hint".to_string(),
+                        },
+                        group_id: index + 1,
+                        is_primary: true,
+                        source_kind: language::DiagnosticSourceKind::Pushed,
+                        ..Diagnostic::default()
+                    },
+                }),
+            &snapshot,
+        );
+        buffer.update_diagnostics(LanguageServerId(0), diagnostics, cx);
+    });
+
+    let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
+    let search_range = snapshot.offset_to_point(search_ranges[0].start)
+        ..snapshot.offset_to_point(search_ranges[0].end);
+
+    let active_buffer_diagnostics = zeta::active_buffer_diagnostics(&snapshot, search_range, 100);
+
+    assert_eq!(
+        active_buffer_diagnostics,
+        vec![zeta_prompt::ActiveBufferDiagnostic {
+            severity: Some(1),
+            message: "second error".to_string(),
+            snippet: text,
+            snippet_buffer_row_range: 5..5,
+            diagnostic_range_in_snippet: 61..73,
+        }]
+    );
+
+    let buffer = cx.new(|cx| {
+        Buffer::local(
+            indoc! {"
+                one
+                two
+                three
+                four
+                five
+            "},
+            cx,
+        )
+    });
+
+    buffer.update(cx, |buffer, cx| {
+        let snapshot = buffer.snapshot();
+        let diagnostics = DiagnosticSet::new(
+            vec![
+                DiagnosticEntry {
+                    range: text::PointUtf16::new(0, 0)..text::PointUtf16::new(0, 3),
+                    diagnostic: Diagnostic {
+                        severity: DiagnosticSeverity::ERROR,
+                        message: "row zero".to_string(),
+                        group_id: 1,
+                        is_primary: true,
+                        source_kind: language::DiagnosticSourceKind::Pushed,
+                        ..Diagnostic::default()
+                    },
+                },
+                DiagnosticEntry {
+                    range: text::PointUtf16::new(2, 0)..text::PointUtf16::new(2, 5),
+                    diagnostic: Diagnostic {
+                        severity: DiagnosticSeverity::WARNING,
+                        message: "row two".to_string(),
+                        group_id: 2,
+                        is_primary: true,
+                        source_kind: language::DiagnosticSourceKind::Pushed,
+                        ..Diagnostic::default()
+                    },
+                },
+                DiagnosticEntry {
+                    range: text::PointUtf16::new(4, 0)..text::PointUtf16::new(4, 4),
+                    diagnostic: Diagnostic {
+                        severity: DiagnosticSeverity::INFORMATION,
+                        message: "row four".to_string(),
+                        group_id: 3,
+                        is_primary: true,
+                        source_kind: language::DiagnosticSourceKind::Pushed,
+                        ..Diagnostic::default()
+                    },
+                },
+            ],
+            &snapshot,
+        );
+        buffer.update_diagnostics(LanguageServerId(0), diagnostics, cx);
+    });
+
+    let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
+
+    let active_buffer_diagnostics =
+        zeta::active_buffer_diagnostics(&snapshot, Point::new(2, 0)..Point::new(4, 0), 100);
+
+    assert_eq!(
+        active_buffer_diagnostics
+            .iter()
+            .map(|diagnostic| (
+                diagnostic.severity,
+                diagnostic.message.clone(),
+                diagnostic.snippet.clone(),
+                diagnostic.snippet_buffer_row_range.clone(),
+                diagnostic.diagnostic_range_in_snippet.clone(),
+            ))
+            .collect::<Vec<_>>(),
+        vec![
+            (
+                Some(2),
+                "row two".to_string(),
+                "one\ntwo\nthree\nfour\nfive\n".to_string(),
+                2..2,
+                8..13,
+            ),
+            (
+                Some(3),
+                "row four".to_string(),
+                "one\ntwo\nthree\nfour\nfive\n".to_string(),
+                4..4,
+                19..23,
+            ),
+        ]
+    );
+}
 
 // Generate a model response that would apply the given diff to the active file.
 fn model_response(request: &PredictEditsV3Request, diff_to_apply: &str) -> PredictEditsV3Response {
@@ -1885,6 +1966,7 @@ async fn test_edit_prediction_basic_interpolation(cx: &mut TestAppContext) {
         inputs: ZetaPromptInput {
             events: Default::default(),
             related_files: Default::default(),
+            active_buffer_diagnostics: vec![],
             cursor_path: Path::new("").into(),
             cursor_excerpt: "".into(),
             cursor_offset_in_excerpt: 0,

crates/edit_prediction/src/fim.rs 🔗

@@ -82,6 +82,7 @@ pub fn request_prediction(
         let inputs = ZetaPromptInput {
             events,
             related_files: Some(Vec::new()),
+            active_buffer_diagnostics: Vec::new(),
             cursor_offset_in_excerpt: cursor_offset - excerpt_offset_range.start,
             cursor_path: full_path.clone(),
             excerpt_start_row: Some(excerpt_point_range.start.row),

crates/edit_prediction/src/mercury.rs 🔗

@@ -101,6 +101,7 @@ impl Mercury {
                 excerpt_start_row: Some(excerpt_point_range.start.row),
                 excerpt_ranges,
                 syntax_ranges: Some(syntax_ranges),
+                active_buffer_diagnostics: vec![],
                 in_open_source_repo: false,
                 can_collect_data: false,
                 repo_url: None,

crates/edit_prediction/src/prediction.rs 🔗

@@ -157,6 +157,7 @@ mod tests {
             inputs: ZetaPromptInput {
                 events: vec![],
                 related_files: Some(vec![]),
+                active_buffer_diagnostics: vec![],
                 cursor_path: Path::new("path.txt").into(),
                 cursor_offset_in_excerpt: 0,
                 cursor_excerpt: "".into(),

crates/edit_prediction/src/sweep_ai.rs 🔗

@@ -213,6 +213,7 @@ impl SweepAi {
             let ep_inputs = zeta_prompt::ZetaPromptInput {
                 events: inputs.events,
                 related_files: Some(inputs.related_files.clone()),
+                active_buffer_diagnostics: vec![],
                 cursor_path: full_path.clone(),
                 cursor_excerpt: request_body.file_contents.clone().into(),
                 cursor_offset_in_excerpt: request_body.cursor_position,

crates/edit_prediction/src/zeta.rs 🔗

@@ -2,7 +2,7 @@ use crate::{
     CurrentEditPrediction, DebugEvent, EditPredictionFinishedDebugEvent, EditPredictionId,
     EditPredictionModelInput, EditPredictionStartedDebugEvent, EditPredictionStore, StoredEvent,
     ZedUpdateRequiredError,
-    cursor_excerpt::{compute_cursor_excerpt, compute_syntax_ranges},
+    cursor_excerpt::{self, compute_cursor_excerpt, compute_syntax_ranges},
     prediction::EditPredictionResult,
 };
 use anyhow::Result;
@@ -12,11 +12,12 @@ use cloud_llm_client::{
 use edit_prediction_types::PredictedCursorPosition;
 use gpui::{App, AppContext as _, Entity, Task, WeakEntity, prelude::*};
 use language::{
-    Buffer, BufferSnapshot, ToOffset as _, language_settings::all_language_settings, text_diff,
+    Buffer, BufferSnapshot, DiagnosticSeverity, OffsetRangeExt as _, ToOffset as _,
+    language_settings::all_language_settings, text_diff,
 };
 use release_channel::AppVersion;
 use settings::EditPredictionPromptFormat;
-use text::{Anchor, Bias};
+use text::{Anchor, Bias, Point};
 use ui::SharedString;
 use workspace::notifications::{ErrorMessagePrompt, NotificationId, show_app_notification};
 use zeta_prompt::{ParsedOutput, ZetaPromptInput};
@@ -43,6 +44,7 @@ pub fn request_prediction_with_zeta(
         debug_tx,
         trigger,
         project,
+        diagnostic_search_range,
         can_collect_data,
         is_open_source,
         ..
@@ -115,6 +117,7 @@ pub fn request_prediction_with_zeta(
                 &snapshot,
                 related_files,
                 events,
+                diagnostic_search_range,
                 excerpt_path,
                 cursor_offset,
                 preferred_experiment,
@@ -479,10 +482,50 @@ fn handle_api_response<T>(
     }
 }
 
+pub(crate) fn active_buffer_diagnostics(
+    snapshot: &language::BufferSnapshot,
+    diagnostic_search_range: Range<Point>,
+    additional_context_token_count: usize,
+) -> Vec<zeta_prompt::ActiveBufferDiagnostic> {
+    snapshot
+        .diagnostics_in_range::<Point, Point>(diagnostic_search_range, false)
+        .map(|entry| {
+            let severity = match entry.diagnostic.severity {
+                DiagnosticSeverity::ERROR => Some(1),
+                DiagnosticSeverity::WARNING => Some(2),
+                DiagnosticSeverity::INFORMATION => Some(3),
+                DiagnosticSeverity::HINT => Some(4),
+                _ => None,
+            };
+            let diagnostic_point_range = entry.range.clone();
+            let snippet_point_range = cursor_excerpt::expand_context_syntactically_then_linewise(
+                snapshot,
+                diagnostic_point_range.clone(),
+                additional_context_token_count,
+            );
+            let snippet = snapshot
+                .text_for_range(snippet_point_range.clone())
+                .collect::<String>();
+            let snippet_start_offset = snippet_point_range.start.to_offset(snapshot);
+            let diagnostic_offset_range = diagnostic_point_range.to_offset(snapshot);
+            zeta_prompt::ActiveBufferDiagnostic {
+                severity,
+                message: entry.diagnostic.message.clone(),
+                snippet,
+                snippet_buffer_row_range: diagnostic_point_range.start.row
+                    ..diagnostic_point_range.end.row,
+                diagnostic_range_in_snippet: diagnostic_offset_range.start - snippet_start_offset
+                    ..diagnostic_offset_range.end - snippet_start_offset,
+            }
+        })
+        .collect()
+}
+
 pub fn zeta2_prompt_input(
     snapshot: &language::BufferSnapshot,
     related_files: Vec<zeta_prompt::RelatedFile>,
     events: Vec<Arc<zeta_prompt::Event>>,
+    diagnostic_search_range: Range<Point>,
     excerpt_path: Arc<Path>,
     cursor_offset: usize,
     preferred_experiment: Option<String>,
@@ -504,6 +547,9 @@ pub fn zeta2_prompt_input(
         &syntax_ranges,
     );
 
+    let active_buffer_diagnostics =
+        active_buffer_diagnostics(snapshot, diagnostic_search_range, 100);
+
     let prompt_input = zeta_prompt::ZetaPromptInput {
         cursor_path: excerpt_path,
         cursor_excerpt,
@@ -511,6 +557,7 @@ pub fn zeta2_prompt_input(
         excerpt_start_row: Some(excerpt_point_range.start.row),
         events,
         related_files: Some(related_files),
+        active_buffer_diagnostics,
         excerpt_ranges,
         syntax_ranges: Some(syntax_ranges),
         experiment: preferred_experiment,

crates/edit_prediction_cli/src/load_project.rs 🔗

@@ -103,6 +103,7 @@ pub async fn run_load_project(
                 excerpt_start_row: Some(excerpt_point_range.start.row),
                 events,
                 related_files: existing_related_files,
+                active_buffer_diagnostics: vec![],
                 excerpt_ranges,
                 syntax_ranges: Some(syntax_ranges),
                 in_open_source_repo: false,

crates/edit_prediction_cli/src/reversal_tracking.rs 🔗

@@ -669,6 +669,7 @@ mod tests {
             excerpt_start_row,
             events,
             related_files: Some(Vec::new()),
+            active_buffer_diagnostics: Vec::new(),
             excerpt_ranges: ExcerptRanges {
                 editable_150: 0..content.len(),
                 editable_180: 0..content.len(),

crates/zeta_prompt/src/zeta_prompt.rs 🔗

@@ -34,6 +34,8 @@ pub struct ZetaPromptInput {
     pub events: Vec<Arc<Event>>,
     #[serde(default)]
     pub related_files: Option<Vec<RelatedFile>>,
+    #[serde(default, skip_serializing_if = "Vec::is_empty")]
+    pub active_buffer_diagnostics: Vec<ActiveBufferDiagnostic>,
     /// These ranges let the server select model-appropriate subsets.
     pub excerpt_ranges: ExcerptRanges,
     /// Byte offset ranges within `cursor_excerpt` for all syntax nodes that
@@ -168,6 +170,15 @@ pub fn write_event(prompt: &mut String, event: &Event) {
     }
 }
 
+#[derive(Clone, Debug, PartialEq, Hash, Serialize, Deserialize)]
+pub struct ActiveBufferDiagnostic {
+    pub severity: Option<i32>,
+    pub message: String,
+    pub snippet: String,
+    pub snippet_buffer_row_range: Range<u32>,
+    pub diagnostic_range_in_snippet: Range<usize>,
+}
+
 #[derive(Clone, Debug, PartialEq, Hash, Serialize, Deserialize)]
 pub struct RelatedFile {
     pub path: Arc<Path>,
@@ -3881,6 +3892,7 @@ mod tests {
             excerpt_start_row: None,
             events: events.into_iter().map(Arc::new).collect(),
             related_files: Some(related_files),
+            active_buffer_diagnostics: vec![],
             excerpt_ranges: ExcerptRanges {
                 editable_150: editable_range.clone(),
                 editable_180: editable_range.clone(),
@@ -3911,6 +3923,7 @@ mod tests {
             excerpt_start_row: None,
             events: vec![],
             related_files: Some(vec![]),
+            active_buffer_diagnostics: vec![],
             excerpt_ranges: ExcerptRanges {
                 editable_150: editable_range.clone(),
                 editable_180: editable_range.clone(),
@@ -4495,6 +4508,7 @@ mod tests {
             excerpt_start_row: Some(0),
             events: vec![Arc::new(make_event("other.rs", "-old\n+new\n"))],
             related_files: Some(vec![]),
+            active_buffer_diagnostics: vec![],
             excerpt_ranges: ExcerptRanges {
                 editable_150: 15..41,
                 editable_180: 15..41,
@@ -4559,6 +4573,7 @@ mod tests {
             excerpt_start_row: Some(10),
             events: vec![],
             related_files: Some(vec![]),
+            active_buffer_diagnostics: vec![],
             excerpt_ranges: ExcerptRanges {
                 editable_150: 0..28,
                 editable_180: 0..28,
@@ -4618,6 +4633,7 @@ mod tests {
             excerpt_start_row: Some(0),
             events: vec![],
             related_files: Some(vec![]),
+            active_buffer_diagnostics: vec![],
             excerpt_ranges: ExcerptRanges {
                 editable_150: editable_range.clone(),
                 editable_180: editable_range.clone(),